summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/arch/x86/include/asm/msr-index.h1
-rw-r--r--tools/arch/x86/kcpuid/kcpuid.c47
-rw-r--r--tools/arch/x86/lib/x86-opcode-map.txt54
-rw-r--r--tools/bpf/bpftool/cgroup.c14
-rw-r--r--tools/bpf/bpftool/common.c3
-rw-r--r--tools/bpf/bpftool/main.c6
-rw-r--r--tools/bpf/bpftool/net.c15
-rw-r--r--tools/bpf/bpftool/prog.c1
-rw-r--r--tools/bpf/resolve_btfids/Makefile2
-rw-r--r--tools/build/Makefile.build6
-rw-r--r--tools/cgroup/memcg_slabinfo.py4
-rw-r--r--tools/hv/hv_fcopy_uio_daemon.c128
-rw-r--r--tools/include/linux/kallsyms.h4
-rw-r--r--tools/include/nolibc/std.h4
-rw-r--r--tools/include/nolibc/types.h4
-rw-r--r--tools/include/uapi/linux/bpf.h3
-rw-r--r--tools/lib/bpf/bpf_core_read.h6
-rw-r--r--tools/lib/bpf/btf.c18
-rw-r--r--tools/lib/bpf/btf_dump.c3
-rw-r--r--tools/lib/bpf/libbpf.c91
-rw-r--r--tools/lib/bpf/linker.c4
-rw-r--r--tools/lib/bpf/nlattr.c15
-rw-r--r--tools/lib/subcmd/help.c12
-rwxr-xr-xtools/net/ynl/ethtool.py22
-rw-r--r--tools/net/ynl/lib/ynl.c2
-rwxr-xr-xtools/net/ynl/ynl-gen-c.py3
-rw-r--r--tools/objtool/check.c62
-rw-r--r--tools/perf/.gitignore2
-rw-r--r--tools/perf/Makefile.config2
-rw-r--r--tools/perf/Makefile.perf3
-rw-r--r--tools/perf/builtin-record.c2
-rw-r--r--tools/perf/builtin-sched.c99
-rw-r--r--tools/perf/builtin-trace.c9
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py5
-rw-r--r--tools/perf/tests/bp_account.c1
-rw-r--r--tools/perf/tests/switch-tracking.c2
-rw-r--r--tools/perf/tests/tests-scripts.c1
-rw-r--r--tools/perf/ui/browsers/hists.c2
-rw-r--r--tools/perf/util/build-id.c2
-rw-r--r--tools/perf/util/evsel.c11
-rw-r--r--tools/perf/util/evsel.h2
-rw-r--r--tools/perf/util/intel-pt.c205
-rw-r--r--tools/perf/util/machine.c6
-rw-r--r--tools/perf/util/print-events.c1
-rw-r--r--tools/perf/util/symbol-minimal.c160
-rw-r--r--tools/perf/util/symbol.c1
-rw-r--r--tools/perf/util/thread.c8
-rw-r--r--tools/perf/util/thread.h2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/mperf_monitor.c4
-rw-r--r--tools/power/x86/turbostat/turbostat.81
-rw-r--r--tools/power/x86/turbostat/turbostat.c68
-rw-r--r--tools/scripts/Makefile.include4
-rwxr-xr-xtools/testing/ktest/ktest.pl5
-rw-r--r--tools/testing/kunit/qemu_configs/sparc.py7
-rw-r--r--tools/testing/kunit/qemu_configs/x86_64.py4
-rw-r--r--tools/testing/selftests/Makefile3
-rw-r--r--tools/testing/selftests/alsa/utimer-test.c1
-rw-r--r--tools/testing/selftests/arm64/fp/sve-ptrace.c5
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c33
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_nf.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_listen.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/token.c19
-rw-r--r--tools/testing/selftests/bpf/prog_tests/user_ringbuf.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c44
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_map_resize.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_write.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c7
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_arena_large.c110
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_precision.c53
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_unpriv.c2
-rw-r--r--tools/testing/selftests/bpf/test_loader.c14
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c105
-rw-r--r--tools/testing/selftests/bpf/veristat.c1
-rw-r--r--tools/testing/selftests/breakpoints/step_after_suspend_test.c41
-rwxr-xr-xtools/testing/selftests/cpufreq/cpufreq.sh3
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/env.py2
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/load.py23
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc28
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc2
-rw-r--r--tools/testing/selftests/futex/include/futextest.h11
-rw-r--r--tools/testing/selftests/iommu/iommufd.c34
-rw-r--r--tools/testing/selftests/mincore/mincore_selftest.c3
-rw-r--r--tools/testing/selftests/mm/compaction_test.c19
-rw-r--r--tools/testing/selftests/mm/pkey-powerpc.h12
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mdb.sh2
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh113
-rwxr-xr-xtools/testing/selftests/net/gro.sh3
-rw-r--r--tools/testing/selftests/net/lib.sh115
-rw-r--r--tools/testing/selftests/net/mptcp/Makefile3
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh5
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh5
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh5
-rw-r--r--tools/testing/selftests/net/ncdevmem.c394
-rw-r--r--tools/testing/selftests/net/netfilter/config2
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh6
-rwxr-xr-xtools/testing/selftests/net/udpgro.sh8
-rw-r--r--tools/testing/selftests/perf_events/.gitignore1
-rw-r--r--tools/testing/selftests/perf_events/Makefile2
-rw-r--r--tools/testing/selftests/perf_events/mmap.c236
-rw-r--r--tools/testing/selftests/sched_ext/exit.c8
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c13
-rw-r--r--tools/testing/selftests/syscall_user_dispatch/sud_test.c50
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_04.sh24
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_chacha.c3
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_getrandom.c6
-rw-r--r--tools/testing/selftests/x86/Makefile2
-rw-r--r--tools/testing/selftests/x86/bugs/Makefile3
-rw-r--r--tools/testing/selftests/x86/bugs/common.py164
-rw-r--r--tools/testing/selftests/x86/bugs/its_indirect_alignment.py150
-rw-r--r--tools/testing/selftests/x86/bugs/its_permutations.py109
-rw-r--r--tools/testing/selftests/x86/bugs/its_ret_alignment.py139
-rw-r--r--tools/testing/selftests/x86/bugs/its_sysfs.py65
-rw-r--r--tools/testing/selftests/x86/sigtrap_loop.c101
-rw-r--r--tools/testing/vma/vma_internal.h2
-rw-r--r--tools/testing/vsock/vsock_test.c28
-rw-r--r--tools/verification/rv/src/in_kernel.c4
119 files changed, 2723 insertions, 827 deletions
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index 3ae84c3b8e6d..3deb6c11f134 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -612,6 +612,7 @@
#define MSR_AMD64_OSVW_STATUS 0xc0010141
#define MSR_AMD_PPIN_CTL 0xc00102f0
#define MSR_AMD_PPIN 0xc00102f1
+#define MSR_AMD64_CPUID_FN_7 0xc0011002
#define MSR_AMD64_CPUID_FN_1 0xc0011004
#define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022
diff --git a/tools/arch/x86/kcpuid/kcpuid.c b/tools/arch/x86/kcpuid/kcpuid.c
index 1b25c0a95d3f..40a9e59c2fd5 100644
--- a/tools/arch/x86/kcpuid/kcpuid.c
+++ b/tools/arch/x86/kcpuid/kcpuid.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
-#include <stdio.h>
+#include <err.h>
+#include <getopt.h>
#include <stdbool.h>
+#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <getopt.h>
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#define min(a, b) (((a) < (b)) ? (a) : (b))
@@ -145,14 +146,14 @@ static bool cpuid_store(struct cpuid_range *range, u32 f, int subleaf,
if (!func->leafs) {
func->leafs = malloc(sizeof(struct subleaf));
if (!func->leafs)
- perror("malloc func leaf");
+ err(EXIT_FAILURE, NULL);
func->nr = 1;
} else {
s = func->nr;
func->leafs = realloc(func->leafs, (s + 1) * sizeof(*leaf));
if (!func->leafs)
- perror("realloc f->leafs");
+ err(EXIT_FAILURE, NULL);
func->nr++;
}
@@ -211,7 +212,7 @@ struct cpuid_range *setup_cpuid_range(u32 input_eax)
range = malloc(sizeof(struct cpuid_range));
if (!range)
- perror("malloc range");
+ err(EXIT_FAILURE, NULL);
if (input_eax & 0x80000000)
range->is_ext = true;
@@ -220,7 +221,7 @@ struct cpuid_range *setup_cpuid_range(u32 input_eax)
range->funcs = malloc(sizeof(struct cpuid_func) * idx_func);
if (!range->funcs)
- perror("malloc range->funcs");
+ err(EXIT_FAILURE, NULL);
range->nr = idx_func;
memset(range->funcs, 0, sizeof(struct cpuid_func) * idx_func);
@@ -395,8 +396,8 @@ static int parse_line(char *line)
return 0;
err_exit:
- printf("Warning: wrong line format:\n");
- printf("\tline[%d]: %s\n", flines, line);
+ warnx("Wrong line format:\n"
+ "\tline[%d]: %s", flines, line);
return -1;
}
@@ -418,10 +419,8 @@ static void parse_text(void)
file = fopen("./cpuid.csv", "r");
}
- if (!file) {
- printf("Fail to open '%s'\n", filename);
- return;
- }
+ if (!file)
+ err(EXIT_FAILURE, "%s", filename);
while (1) {
ret = getline(&line, &len, file);
@@ -530,7 +529,7 @@ static inline struct cpuid_func *index_to_func(u32 index)
func_idx = index & 0xffff;
if ((func_idx + 1) > (u32)range->nr) {
- printf("ERR: invalid input index (0x%x)\n", index);
+ warnx("Invalid input index (0x%x)", index);
return NULL;
}
return &range->funcs[func_idx];
@@ -562,7 +561,7 @@ static void show_info(void)
return;
}
- printf("ERR: invalid input subleaf (0x%x)\n", user_sub);
+ warnx("Invalid input subleaf (0x%x)", user_sub);
}
show_func(func);
@@ -593,15 +592,15 @@ static void setup_platform_cpuid(void)
static void usage(void)
{
- printf("kcpuid [-abdfhr] [-l leaf] [-s subleaf]\n"
- "\t-a|--all Show both bit flags and complex bit fields info\n"
- "\t-b|--bitflags Show boolean flags only\n"
- "\t-d|--detail Show details of the flag/fields (default)\n"
- "\t-f|--flags Specify the cpuid csv file\n"
- "\t-h|--help Show usage info\n"
- "\t-l|--leaf=index Specify the leaf you want to check\n"
- "\t-r|--raw Show raw cpuid data\n"
- "\t-s|--subleaf=sub Specify the subleaf you want to check\n"
+ warnx("kcpuid [-abdfhr] [-l leaf] [-s subleaf]\n"
+ "\t-a|--all Show both bit flags and complex bit fields info\n"
+ "\t-b|--bitflags Show boolean flags only\n"
+ "\t-d|--detail Show details of the flag/fields (default)\n"
+ "\t-f|--flags Specify the CPUID CSV file\n"
+ "\t-h|--help Show usage info\n"
+ "\t-l|--leaf=index Specify the leaf you want to check\n"
+ "\t-r|--raw Show raw CPUID data\n"
+ "\t-s|--subleaf=sub Specify the subleaf you want to check"
);
}
@@ -652,7 +651,7 @@ static int parse_options(int argc, char *argv[])
user_sub = strtoul(optarg, NULL, 0);
break;
default:
- printf("%s: Invalid option '%c'\n", argv[0], optopt);
+ warnx("Invalid option '%c'", optopt);
return -1;
}
diff --git a/tools/arch/x86/lib/x86-opcode-map.txt b/tools/arch/x86/lib/x86-opcode-map.txt
index caedb3ef6688..cd3fd5155f6e 100644
--- a/tools/arch/x86/lib/x86-opcode-map.txt
+++ b/tools/arch/x86/lib/x86-opcode-map.txt
@@ -35,7 +35,7 @@
# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
# - (66&F2): Both 0x66 and 0xF2 prefixes are specified.
#
-# REX2 Prefix
+# REX2 Prefix Superscripts
# - (!REX2): REX2 is not allowed
# - (REX2): REX2 variant e.g. JMPABS
@@ -286,10 +286,10 @@ df: ESC
# Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix
# in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation
# to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD.
-e0: LOOPNE/LOOPNZ Jb (f64) (!REX2)
-e1: LOOPE/LOOPZ Jb (f64) (!REX2)
-e2: LOOP Jb (f64) (!REX2)
-e3: JrCXZ Jb (f64) (!REX2)
+e0: LOOPNE/LOOPNZ Jb (f64),(!REX2)
+e1: LOOPE/LOOPZ Jb (f64),(!REX2)
+e2: LOOP Jb (f64),(!REX2)
+e3: JrCXZ Jb (f64),(!REX2)
e4: IN AL,Ib (!REX2)
e5: IN eAX,Ib (!REX2)
e6: OUT Ib,AL (!REX2)
@@ -298,10 +298,10 @@ e7: OUT Ib,eAX (!REX2)
# in "near" jumps and calls is 16-bit. For CALL,
# push of return address is 16-bit wide, RSP is decremented by 2
# but is not truncated to 16 bits, unlike RIP.
-e8: CALL Jz (f64) (!REX2)
-e9: JMP-near Jz (f64) (!REX2)
-ea: JMP-far Ap (i64) (!REX2)
-eb: JMP-short Jb (f64) (!REX2)
+e8: CALL Jz (f64),(!REX2)
+e9: JMP-near Jz (f64),(!REX2)
+ea: JMP-far Ap (i64),(!REX2)
+eb: JMP-short Jb (f64),(!REX2)
ec: IN AL,DX (!REX2)
ed: IN eAX,DX (!REX2)
ee: OUT DX,AL (!REX2)
@@ -478,22 +478,22 @@ AVXcode: 1
7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev)
# 0x0f 0x80-0x8f
# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
-80: JO Jz (f64) (!REX2)
-81: JNO Jz (f64) (!REX2)
-82: JB/JC/JNAE Jz (f64) (!REX2)
-83: JAE/JNB/JNC Jz (f64) (!REX2)
-84: JE/JZ Jz (f64) (!REX2)
-85: JNE/JNZ Jz (f64) (!REX2)
-86: JBE/JNA Jz (f64) (!REX2)
-87: JA/JNBE Jz (f64) (!REX2)
-88: JS Jz (f64) (!REX2)
-89: JNS Jz (f64) (!REX2)
-8a: JP/JPE Jz (f64) (!REX2)
-8b: JNP/JPO Jz (f64) (!REX2)
-8c: JL/JNGE Jz (f64) (!REX2)
-8d: JNL/JGE Jz (f64) (!REX2)
-8e: JLE/JNG Jz (f64) (!REX2)
-8f: JNLE/JG Jz (f64) (!REX2)
+80: JO Jz (f64),(!REX2)
+81: JNO Jz (f64),(!REX2)
+82: JB/JC/JNAE Jz (f64),(!REX2)
+83: JAE/JNB/JNC Jz (f64),(!REX2)
+84: JE/JZ Jz (f64),(!REX2)
+85: JNE/JNZ Jz (f64),(!REX2)
+86: JBE/JNA Jz (f64),(!REX2)
+87: JA/JNBE Jz (f64),(!REX2)
+88: JS Jz (f64),(!REX2)
+89: JNS Jz (f64),(!REX2)
+8a: JP/JPE Jz (f64),(!REX2)
+8b: JNP/JPO Jz (f64),(!REX2)
+8c: JL/JNGE Jz (f64),(!REX2)
+8d: JNL/JGE Jz (f64),(!REX2)
+8e: JLE/JNG Jz (f64),(!REX2)
+8f: JNLE/JG Jz (f64),(!REX2)
# 0x0f 0x90-0x9f
90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66)
91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66)
@@ -996,8 +996,8 @@ AVXcode: 4
83: Grp1 Ev,Ib (1A),(es)
# CTESTSCC instructions are: CTESTB, CTESTBE, CTESTF, CTESTL, CTESTLE, CTESTNB, CTESTNBE, CTESTNL,
# CTESTNLE, CTESTNO, CTESTNS, CTESTNZ, CTESTO, CTESTS, CTESTT, CTESTZ
-84: CTESTSCC (ev)
-85: CTESTSCC (es) | CTESTSCC (66),(es)
+84: CTESTSCC Eb,Gb (ev)
+85: CTESTSCC Ev,Gv (es) | CTESTSCC Ev,Gv (66),(es)
88: POPCNT Gv,Ev (es) | POPCNT Gv,Ev (66),(es)
8f: POP2 Bq,Rq (000),(11B),(ev)
a5: SHLD Ev,Gv,CL (es) | SHLD Ev,Gv,CL (66),(es)
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
index 9af426d43299..4189c9d74fb0 100644
--- a/tools/bpf/bpftool/cgroup.c
+++ b/tools/bpf/bpftool/cgroup.c
@@ -221,7 +221,7 @@ static int cgroup_has_attached_progs(int cgroup_fd)
for (i = 0; i < ARRAY_SIZE(cgroup_attach_types); i++) {
int count = count_attached_bpf_progs(cgroup_fd, cgroup_attach_types[i]);
- if (count < 0)
+ if (count < 0 && errno != EINVAL)
return -1;
if (count > 0) {
@@ -318,11 +318,11 @@ static int show_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
static int do_show(int argc, char **argv)
{
- enum bpf_attach_type type;
int has_attached_progs;
const char *path;
int cgroup_fd;
int ret = -1;
+ unsigned int i;
query_flags = 0;
@@ -370,14 +370,14 @@ static int do_show(int argc, char **argv)
"AttachFlags", "Name");
btf_vmlinux = libbpf_find_kernel_btf();
- for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+ for (i = 0; i < ARRAY_SIZE(cgroup_attach_types); i++) {
/*
* Not all attach types may be supported, so it's expected,
* that some requests will fail.
* If we were able to get the show for at least one
* attach type, let's return 0.
*/
- if (show_bpf_progs(cgroup_fd, type, 0) == 0)
+ if (show_bpf_progs(cgroup_fd, cgroup_attach_types[i], 0) == 0)
ret = 0;
}
@@ -400,9 +400,9 @@ exit:
static int do_show_tree_fn(const char *fpath, const struct stat *sb,
int typeflag, struct FTW *ftw)
{
- enum bpf_attach_type type;
int has_attached_progs;
int cgroup_fd;
+ unsigned int i;
if (typeflag != FTW_D)
return 0;
@@ -434,8 +434,8 @@ static int do_show_tree_fn(const char *fpath, const struct stat *sb,
}
btf_vmlinux = libbpf_find_kernel_btf();
- for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++)
- show_bpf_progs(cgroup_fd, type, ftw->level);
+ for (i = 0; i < ARRAY_SIZE(cgroup_attach_types); i++)
+ show_bpf_progs(cgroup_fd, cgroup_attach_types[i], ftw->level);
if (errno == EINVAL)
/* Last attach type does not support query.
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 9b75639434b8..0a764426d935 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -461,10 +461,11 @@ int get_fd_type(int fd)
p_err("can't read link type: %s", strerror(errno));
return -1;
}
- if (n == sizeof(path)) {
+ if (n == sizeof(buf)) {
p_err("can't read link type: path too long!");
return -1;
}
+ buf[n] = '\0';
if (strstr(buf, "bpf-map"))
return BPF_OBJ_MAP;
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index 08d0ac543c67..a0536528dfde 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -534,9 +534,9 @@ int main(int argc, char **argv)
usage();
if (version_requested)
- return do_version(argc, argv);
-
- ret = cmd_select(commands, argc, argv, do_help);
+ ret = do_version(argc, argv);
+ else
+ ret = cmd_select(commands, argc, argv, do_help);
if (json_output)
jsonw_destroy(&json_wtr);
diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
index d2242d9f8441..39f208928cdb 100644
--- a/tools/bpf/bpftool/net.c
+++ b/tools/bpf/bpftool/net.c
@@ -366,17 +366,18 @@ static int dump_link_nlmsg(void *cookie, void *msg, struct nlattr **tb)
{
struct bpf_netdev_t *netinfo = cookie;
struct ifinfomsg *ifinfo = msg;
+ struct ip_devname_ifindex *tmp;
if (netinfo->filter_idx > 0 && netinfo->filter_idx != ifinfo->ifi_index)
return 0;
if (netinfo->used_len == netinfo->array_len) {
- netinfo->devices = realloc(netinfo->devices,
- (netinfo->array_len + 16) *
- sizeof(struct ip_devname_ifindex));
- if (!netinfo->devices)
+ tmp = realloc(netinfo->devices,
+ (netinfo->array_len + 16) * sizeof(struct ip_devname_ifindex));
+ if (!tmp)
return -ENOMEM;
+ netinfo->devices = tmp;
netinfo->array_len += 16;
}
netinfo->devices[netinfo->used_len].ifindex = ifinfo->ifi_index;
@@ -395,6 +396,7 @@ static int dump_class_qdisc_nlmsg(void *cookie, void *msg, struct nlattr **tb)
{
struct bpf_tcinfo_t *tcinfo = cookie;
struct tcmsg *info = msg;
+ struct tc_kind_handle *tmp;
if (tcinfo->is_qdisc) {
/* skip clsact qdisc */
@@ -406,11 +408,12 @@ static int dump_class_qdisc_nlmsg(void *cookie, void *msg, struct nlattr **tb)
}
if (tcinfo->used_len == tcinfo->array_len) {
- tcinfo->handle_array = realloc(tcinfo->handle_array,
+ tmp = realloc(tcinfo->handle_array,
(tcinfo->array_len + 16) * sizeof(struct tc_kind_handle));
- if (!tcinfo->handle_array)
+ if (!tmp)
return -ENOMEM;
+ tcinfo->handle_array = tmp;
tcinfo->array_len += 16;
}
tcinfo->handle_array[tcinfo->used_len].handle = info->tcm_handle;
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index e71be67f1d86..52ffb74ae4e8 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -1928,6 +1928,7 @@ static int do_loader(int argc, char **argv)
obj = bpf_object__open_file(file, &open_opts);
if (!obj) {
+ err = -1;
p_err("failed to open object file");
goto err_close_obj;
}
diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile
index 4b8079f294f6..b0072e64b010 100644
--- a/tools/bpf/resolve_btfids/Makefile
+++ b/tools/bpf/resolve_btfids/Makefile
@@ -19,7 +19,7 @@ endif
# Overrides for the prepare step libraries.
HOST_OVERRIDES := AR="$(HOSTAR)" CC="$(HOSTCC)" LD="$(HOSTLD)" ARCH="$(HOSTARCH)" \
- CROSS_COMPILE="" EXTRA_CFLAGS="$(HOSTCFLAGS)"
+ CROSS_COMPILE="" CLANG_CROSS_FLAGS="" EXTRA_CFLAGS="$(HOSTCFLAGS)"
RM ?= rm
HOSTCC ?= gcc
diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
index 5fb3fb3d97e0..ffe988867703 100644
--- a/tools/build/Makefile.build
+++ b/tools/build/Makefile.build
@@ -149,6 +149,10 @@ objprefix := $(subst ./,,$(OUTPUT)$(dir)/)
obj-y := $(addprefix $(objprefix),$(obj-y))
subdir-obj-y := $(addprefix $(objprefix),$(subdir-obj-y))
+# Separate out test log files from real build objects.
+test-y := $(filter %_log, $(obj-y))
+obj-y := $(filter-out %_log, $(obj-y))
+
# Final '$(obj)-in.o' object
in-target := $(objprefix)$(obj)-in.o
@@ -159,7 +163,7 @@ $(subdir-y):
$(sort $(subdir-obj-y)): $(subdir-y) ;
-$(in-target): $(obj-y) FORCE
+$(in-target): $(obj-y) $(test-y) FORCE
$(call rule_mkdir)
$(call if_changed,$(host)ld_multi)
diff --git a/tools/cgroup/memcg_slabinfo.py b/tools/cgroup/memcg_slabinfo.py
index 270c28a0d098..6bf4bde77903 100644
--- a/tools/cgroup/memcg_slabinfo.py
+++ b/tools/cgroup/memcg_slabinfo.py
@@ -146,11 +146,11 @@ def detect_kernel_config():
def for_each_slab(prog):
- PGSlab = ~prog.constant('PG_slab')
+ slabtype = prog.constant('PGTY_slab')
for page in for_each_page(prog):
try:
- if page.page_type.value_() == PGSlab:
+ if (page.page_type.value_() >> 24) == slabtype:
yield cast('struct slab *', page)
except FaultError:
pass
diff --git a/tools/hv/hv_fcopy_uio_daemon.c b/tools/hv/hv_fcopy_uio_daemon.c
index 12743d7f164f..e68a824d67b2 100644
--- a/tools/hv/hv_fcopy_uio_daemon.c
+++ b/tools/hv/hv_fcopy_uio_daemon.c
@@ -35,7 +35,10 @@
#define WIN8_SRV_MINOR 1
#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
-#define FCOPY_UIO "/sys/bus/vmbus/devices/eb765408-105f-49b6-b4aa-c123b64d17d4/uio"
+#define FCOPY_DEVICE_PATH(subdir) \
+ "/sys/bus/vmbus/devices/eb765408-105f-49b6-b4aa-c123b64d17d4/" #subdir
+#define FCOPY_UIO_PATH FCOPY_DEVICE_PATH(uio)
+#define FCOPY_CHANNELS_PATH FCOPY_DEVICE_PATH(channels)
#define FCOPY_VER_COUNT 1
static const int fcopy_versions[] = {
@@ -47,9 +50,62 @@ static const int fw_versions[] = {
UTIL_FW_VERSION
};
-#define HV_RING_SIZE 0x4000 /* 16KB ring buffer size */
+static uint32_t get_ring_buffer_size(void)
+{
+ char ring_path[PATH_MAX];
+ DIR *dir;
+ struct dirent *entry;
+ struct stat st;
+ uint32_t ring_size = 0;
+ int retry_count = 0;
+
+ /* Find the channel directory */
+ dir = opendir(FCOPY_CHANNELS_PATH);
+ if (!dir) {
+ usleep(100 * 1000); /* Avoid race with kernel, wait 100ms and retry once */
+ dir = opendir(FCOPY_CHANNELS_PATH);
+ if (!dir) {
+ syslog(LOG_ERR, "Failed to open channels directory: %s", strerror(errno));
+ return 0;
+ }
+ }
+
+retry_once:
+ while ((entry = readdir(dir)) != NULL) {
+ if (entry->d_type == DT_DIR && strcmp(entry->d_name, ".") != 0 &&
+ strcmp(entry->d_name, "..") != 0) {
+ snprintf(ring_path, sizeof(ring_path), "%s/%s/ring",
+ FCOPY_CHANNELS_PATH, entry->d_name);
+
+ if (stat(ring_path, &st) == 0) {
+ /*
+ * stat returns size of Tx, Rx rings combined,
+ * so take half of it for individual ring size.
+ */
+ ring_size = (uint32_t)st.st_size / 2;
+ syslog(LOG_INFO, "Ring buffer size from %s: %u bytes",
+ ring_path, ring_size);
+ break;
+ }
+ }
+ }
+
+ if (!ring_size && retry_count == 0) {
+ retry_count = 1;
+ rewinddir(dir);
+ usleep(100 * 1000); /* Wait 100ms and retry once */
+ goto retry_once;
+ }
+
+ closedir(dir);
-static unsigned char desc[HV_RING_SIZE];
+ if (!ring_size)
+ syslog(LOG_ERR, "Could not determine ring size");
+
+ return ring_size;
+}
+
+static unsigned char *desc;
static int target_fd;
static char target_fname[PATH_MAX];
@@ -62,8 +118,11 @@ static int hv_fcopy_create_file(char *file_name, char *path_name, __u32 flags)
filesize = 0;
p = path_name;
- snprintf(target_fname, sizeof(target_fname), "%s/%s",
- path_name, file_name);
+ if (snprintf(target_fname, sizeof(target_fname), "%s/%s",
+ path_name, file_name) >= sizeof(target_fname)) {
+ syslog(LOG_ERR, "target file name is too long: %s/%s", path_name, file_name);
+ goto done;
+ }
/*
* Check to see if the path is already in place; if not,
@@ -270,7 +329,7 @@ static void wcstoutf8(char *dest, const __u16 *src, size_t dest_size)
{
size_t len = 0;
- while (len < dest_size) {
+ while (len < dest_size && *src) {
if (src[len] < 0x80)
dest[len++] = (char)(*src++);
else
@@ -282,27 +341,15 @@ static void wcstoutf8(char *dest, const __u16 *src, size_t dest_size)
static int hv_fcopy_start(struct hv_start_fcopy *smsg_in)
{
- setlocale(LC_ALL, "en_US.utf8");
- size_t file_size, path_size;
- char *file_name, *path_name;
- char *in_file_name = (char *)smsg_in->file_name;
- char *in_path_name = (char *)smsg_in->path_name;
-
- file_size = wcstombs(NULL, (const wchar_t *restrict)in_file_name, 0) + 1;
- path_size = wcstombs(NULL, (const wchar_t *restrict)in_path_name, 0) + 1;
-
- file_name = (char *)malloc(file_size * sizeof(char));
- path_name = (char *)malloc(path_size * sizeof(char));
-
- if (!file_name || !path_name) {
- free(file_name);
- free(path_name);
- syslog(LOG_ERR, "Can't allocate memory for file name and/or path name");
- return HV_E_FAIL;
- }
+ /*
+ * file_name and path_name should have same length with appropriate
+ * member of hv_start_fcopy.
+ */
+ char file_name[W_MAX_PATH], path_name[W_MAX_PATH];
- wcstoutf8(file_name, (__u16 *)in_file_name, file_size);
- wcstoutf8(path_name, (__u16 *)in_path_name, path_size);
+ setlocale(LC_ALL, "en_US.utf8");
+ wcstoutf8(file_name, smsg_in->file_name, W_MAX_PATH - 1);
+ wcstoutf8(path_name, smsg_in->path_name, W_MAX_PATH - 1);
return hv_fcopy_create_file(file_name, path_name, smsg_in->copy_flags);
}
@@ -406,7 +453,7 @@ int main(int argc, char *argv[])
int daemonize = 1, long_index = 0, opt, ret = -EINVAL;
struct vmbus_br txbr, rxbr;
void *ring;
- uint32_t len = HV_RING_SIZE;
+ uint32_t ring_size, len;
char uio_name[NAME_MAX] = {0};
char uio_dev_path[PATH_MAX] = {0};
@@ -437,7 +484,20 @@ int main(int argc, char *argv[])
openlog("HV_UIO_FCOPY", 0, LOG_USER);
syslog(LOG_INFO, "starting; pid is:%d", getpid());
- fcopy_get_first_folder(FCOPY_UIO, uio_name);
+ ring_size = get_ring_buffer_size();
+ if (!ring_size) {
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ desc = malloc(ring_size * sizeof(unsigned char));
+ if (!desc) {
+ syslog(LOG_ERR, "malloc failed for desc buffer");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ fcopy_get_first_folder(FCOPY_UIO_PATH, uio_name);
snprintf(uio_dev_path, sizeof(uio_dev_path), "/dev/%s", uio_name);
fcopy_fd = open(uio_dev_path, O_RDWR);
@@ -445,17 +505,17 @@ int main(int argc, char *argv[])
syslog(LOG_ERR, "open %s failed; error: %d %s",
uio_dev_path, errno, strerror(errno));
ret = fcopy_fd;
- goto exit;
+ goto free_desc;
}
- ring = vmbus_uio_map(&fcopy_fd, HV_RING_SIZE);
+ ring = vmbus_uio_map(&fcopy_fd, ring_size);
if (!ring) {
ret = errno;
syslog(LOG_ERR, "mmap ringbuffer failed; error: %d %s", ret, strerror(ret));
goto close;
}
- vmbus_br_setup(&txbr, ring, HV_RING_SIZE);
- vmbus_br_setup(&rxbr, (char *)ring + HV_RING_SIZE, HV_RING_SIZE);
+ vmbus_br_setup(&txbr, ring, ring_size);
+ vmbus_br_setup(&rxbr, (char *)ring + ring_size, ring_size);
rxbr.vbr->imask = 0;
@@ -470,7 +530,7 @@ int main(int argc, char *argv[])
continue;
}
- len = HV_RING_SIZE;
+ len = ring_size;
ret = rte_vmbus_chan_recv_raw(&rxbr, desc, &len);
if (unlikely(ret <= 0)) {
/* This indicates a failure to communicate (or worse) */
@@ -490,6 +550,8 @@ int main(int argc, char *argv[])
}
close:
close(fcopy_fd);
+free_desc:
+ free(desc);
exit:
return ret;
}
diff --git a/tools/include/linux/kallsyms.h b/tools/include/linux/kallsyms.h
index 5a37ccbec54f..f61a01dd7eb7 100644
--- a/tools/include/linux/kallsyms.h
+++ b/tools/include/linux/kallsyms.h
@@ -18,6 +18,7 @@ static inline const char *kallsyms_lookup(unsigned long addr,
return NULL;
}
+#ifdef HAVE_BACKTRACE_SUPPORT
#include <execinfo.h>
#include <stdlib.h>
static inline void print_ip_sym(const char *loglvl, unsigned long ip)
@@ -30,5 +31,8 @@ static inline void print_ip_sym(const char *loglvl, unsigned long ip)
free(name);
}
+#else
+static inline void print_ip_sym(const char *loglvl, unsigned long ip) {}
+#endif
#endif
diff --git a/tools/include/nolibc/std.h b/tools/include/nolibc/std.h
index 933bc0be7e1c..a9d8b5b51f37 100644
--- a/tools/include/nolibc/std.h
+++ b/tools/include/nolibc/std.h
@@ -20,6 +20,8 @@
#include "stdint.h"
+#include <linux/types.h>
+
/* those are commonly provided by sys/types.h */
typedef unsigned int dev_t;
typedef unsigned long ino_t;
@@ -31,6 +33,6 @@ typedef unsigned long nlink_t;
typedef signed long off_t;
typedef signed long blksize_t;
typedef signed long blkcnt_t;
-typedef signed long time_t;
+typedef __kernel_old_time_t time_t;
#endif /* _NOLIBC_STD_H */
diff --git a/tools/include/nolibc/types.h b/tools/include/nolibc/types.h
index b26a5d0c417c..9d606c7138a8 100644
--- a/tools/include/nolibc/types.h
+++ b/tools/include/nolibc/types.h
@@ -127,7 +127,7 @@ typedef struct {
int __fd = (fd); \
if (__fd >= 0) \
__set->fds[__fd / FD_SETIDXMASK] &= \
- ~(1U << (__fd & FX_SETBITMASK)); \
+ ~(1U << (__fd & FD_SETBITMASK)); \
} while (0)
#define FD_SET(fd, set) do { \
@@ -144,7 +144,7 @@ typedef struct {
int __r = 0; \
if (__fd >= 0) \
__r = !!(__set->fds[__fd / FD_SETIDXMASK] & \
-1U << (__fd & FD_SET_BITMASK)); \
+1U << (__fd & FD_SETBITMASK)); \
__r; \
})
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 4a939c90dc2e..5a5cdb453935 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1206,6 +1206,7 @@ enum bpf_perf_event_type {
#define BPF_F_BEFORE (1U << 3)
#define BPF_F_AFTER (1U << 4)
#define BPF_F_ID (1U << 5)
+#define BPF_F_PREORDER (1U << 6)
#define BPF_F_LINK BPF_F_LINK /* 1 << 13 */
/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
@@ -2034,6 +2035,7 @@ union bpf_attr {
* for updates resulting in a null checksum the value is set to
* **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
* the checksum is to be computed against a pseudo-header.
+ * Flag **BPF_F_IPV6** should be set for IPv6 packets.
*
* This helper works in combination with **bpf_csum_diff**\ (),
* which does not update the checksum in-place, but offers more
@@ -6048,6 +6050,7 @@ enum {
BPF_F_PSEUDO_HDR = (1ULL << 4),
BPF_F_MARK_MANGLED_0 = (1ULL << 5),
BPF_F_MARK_ENFORCE = (1ULL << 6),
+ BPF_F_IPV6 = (1ULL << 7),
};
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
index c0e13cdf9660..b997c68bd945 100644
--- a/tools/lib/bpf/bpf_core_read.h
+++ b/tools/lib/bpf/bpf_core_read.h
@@ -388,7 +388,13 @@ extern void *bpf_rdonly_cast(const void *obj, __u32 btf_id) __ksym __weak;
#define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j
#define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__)
+#if defined(__clang__) && (__clang_major__ >= 19)
+#define ___type(...) __typeof_unqual__(___arrow(__VA_ARGS__))
+#elif defined(__GNUC__) && (__GNUC__ >= 14)
+#define ___type(...) __typeof_unqual__(___arrow(__VA_ARGS__))
+#else
#define ___type(...) typeof(___arrow(__VA_ARGS__))
+#endif
#define ___read(read_fn, dst, src_type, src, accessor) \
read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor)
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 27e7bfae953b..b770702dab37 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -995,7 +995,7 @@ static struct btf *btf_new_empty(struct btf *base_btf)
if (base_btf) {
btf->base_btf = base_btf;
btf->start_id = btf__type_cnt(base_btf);
- btf->start_str_off = base_btf->hdr->str_len;
+ btf->start_str_off = base_btf->hdr->str_len + base_btf->start_str_off;
btf->swapped_endian = base_btf->swapped_endian;
}
@@ -4176,6 +4176,19 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id
return true;
}
+static bool btf_dedup_identical_ptrs(struct btf_dedup *d, __u32 id1, __u32 id2)
+{
+ struct btf_type *t1, *t2;
+
+ t1 = btf_type_by_id(d->btf, id1);
+ t2 = btf_type_by_id(d->btf, id2);
+
+ if (!btf_is_ptr(t1) || !btf_is_ptr(t2))
+ return false;
+
+ return t1->type == t2->type;
+}
+
/*
* Check equivalence of BTF type graph formed by candidate struct/union (we'll
* call it "candidate graph" in this description for brevity) to a type graph
@@ -4308,6 +4321,9 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
*/
if (btf_dedup_identical_structs(d, hypot_type_id, cand_id))
return 1;
+ /* A similar case is again observed for PTRs. */
+ if (btf_dedup_identical_ptrs(d, hypot_type_id, cand_id))
+ return 1;
return 0;
}
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index 46cce18c8308..12306b5de3ef 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -225,6 +225,9 @@ static void btf_dump_free_names(struct hashmap *map)
size_t bkt;
struct hashmap_entry *cur;
+ if (!map)
+ return;
+
hashmap__for_each_entry(map, cur, bkt)
free((void *)cur->pkey);
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 5ff643e60d09..e33cf3caf8b6 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -60,6 +60,8 @@
#define BPF_FS_MAGIC 0xcafe4a11
#endif
+#define MAX_EVENT_NAME_LEN 64
+
#define BPF_FS_DEFAULT_PATH "/sys/fs/bpf"
#define BPF_INSN_SZ (sizeof(struct bpf_insn))
@@ -283,7 +285,7 @@ void libbpf_print(enum libbpf_print_level level, const char *format, ...)
old_errno = errno;
va_start(args, format);
- __libbpf_pr(level, format, args);
+ print_fn(level, format, args);
va_end(args);
errno = old_errno;
@@ -594,7 +596,7 @@ struct extern_desc {
int sym_idx;
int btf_id;
int sec_btf_id;
- const char *name;
+ char *name;
char *essent_name;
bool is_set;
bool is_weak;
@@ -724,7 +726,7 @@ struct bpf_object {
struct usdt_manager *usdt_man;
- struct bpf_map *arena_map;
+ int arena_map_idx;
void *arena_data;
size_t arena_data_sz;
@@ -887,7 +889,7 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
return -LIBBPF_ERRNO__FORMAT;
}
- if (sec_off + prog_sz > sec_sz) {
+ if (sec_off + prog_sz > sec_sz || sec_off + prog_sz < sec_off) {
pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
sec_name, sec_off);
return -LIBBPF_ERRNO__FORMAT;
@@ -1492,6 +1494,7 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->efile.obj_buf_sz = obj_buf_sz;
obj->efile.btf_maps_shndx = -1;
obj->kconfig_map_idx = -1;
+ obj->arena_map_idx = -1;
obj->kern_version = get_kernel_version();
obj->loaded = false;
@@ -2074,7 +2077,7 @@ static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
}
len = strlen(value);
- if (value[len - 1] != '"') {
+ if (len < 2 || value[len - 1] != '"') {
pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
ext->name, value);
return -EINVAL;
@@ -2933,7 +2936,7 @@ static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
const long page_sz = sysconf(_SC_PAGE_SIZE);
size_t mmap_sz;
- mmap_sz = bpf_map_mmap_sz(obj->arena_map);
+ mmap_sz = bpf_map_mmap_sz(map);
if (roundup(data_sz, page_sz) > mmap_sz) {
pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n",
sec_name, mmap_sz, data_sz);
@@ -3007,12 +3010,12 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
if (map->def.type != BPF_MAP_TYPE_ARENA)
continue;
- if (obj->arena_map) {
+ if (obj->arena_map_idx >= 0) {
pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n",
- map->name, obj->arena_map->name);
+ map->name, obj->maps[obj->arena_map_idx].name);
return -EINVAL;
}
- obj->arena_map = map;
+ obj->arena_map_idx = i;
if (obj->efile.arena_data) {
err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx,
@@ -3022,7 +3025,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
return err;
}
}
- if (obj->efile.arena_data && !obj->arena_map) {
+ if (obj->efile.arena_data && obj->arena_map_idx < 0) {
pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n",
ARENA_SEC);
return -ENOENT;
@@ -4221,7 +4224,9 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
return ext->btf_id;
}
t = btf__type_by_id(obj->btf, ext->btf_id);
- ext->name = btf__name_by_offset(obj->btf, t->name_off);
+ ext->name = strdup(btf__name_by_offset(obj->btf, t->name_off));
+ if (!ext->name)
+ return -ENOMEM;
ext->sym_idx = i;
ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
@@ -4541,10 +4546,20 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
/* arena data relocation */
if (shdr_idx == obj->efile.arena_data_shndx) {
+ if (obj->arena_map_idx < 0) {
+ pr_warn("prog '%s': bad arena data relocation at insn %u, no arena maps defined\n",
+ prog->name, insn_idx);
+ return -LIBBPF_ERRNO__RELOC;
+ }
reloc_desc->type = RELO_DATA;
reloc_desc->insn_idx = insn_idx;
- reloc_desc->map_idx = obj->arena_map - obj->maps;
+ reloc_desc->map_idx = obj->arena_map_idx;
reloc_desc->sym_off = sym->st_value;
+
+ map = &obj->maps[obj->arena_map_idx];
+ pr_debug("prog '%s': found arena map %d (%s, sec %d, off %zu) for insn %u\n",
+ prog->name, obj->arena_map_idx, map->name, map->sec_idx,
+ map->sec_offset, insn_idx);
return 0;
}
@@ -9060,8 +9075,10 @@ void bpf_object__close(struct bpf_object *obj)
zfree(&obj->btf_custom_path);
zfree(&obj->kconfig);
- for (i = 0; i < obj->nr_extern; i++)
+ for (i = 0; i < obj->nr_extern; i++) {
+ zfree(&obj->externs[i].name);
zfree(&obj->externs[i].essent_name);
+ }
zfree(&obj->externs);
obj->nr_extern = 0;
@@ -11039,16 +11056,16 @@ static const char *tracefs_available_filter_functions_addrs(void)
: TRACEFS"/available_filter_functions_addrs";
}
-static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
- const char *kfunc_name, size_t offset)
+static void gen_probe_legacy_event_name(char *buf, size_t buf_sz,
+ const char *name, size_t offset)
{
static int index = 0;
int i;
- snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset,
- __sync_fetch_and_add(&index, 1));
+ snprintf(buf, buf_sz, "libbpf_%u_%d_%s_0x%zx", getpid(),
+ __sync_fetch_and_add(&index, 1), name, offset);
- /* sanitize binary_path in the probe name */
+ /* sanitize name in the probe name */
for (i = 0; buf[i]; i++) {
if (!isalnum(buf[i]))
buf[i] = '_';
@@ -11174,9 +11191,9 @@ int probe_kern_syscall_wrapper(int token_fd)
return pfd >= 0 ? 1 : 0;
} else { /* legacy mode */
- char probe_name[128];
+ char probe_name[MAX_EVENT_NAME_LEN];
- gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0);
+ gen_probe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0);
if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0)
return 0;
@@ -11233,10 +11250,10 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
func_name, offset,
-1 /* pid */, 0 /* ref_ctr_off */);
} else {
- char probe_name[256];
+ char probe_name[MAX_EVENT_NAME_LEN];
- gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name),
- func_name, offset);
+ gen_probe_legacy_event_name(probe_name, sizeof(probe_name),
+ func_name, offset);
legacy_probe = strdup(probe_name);
if (!legacy_probe)
@@ -11744,20 +11761,6 @@ static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, stru
return ret;
}
-static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
- const char *binary_path, uint64_t offset)
-{
- int i;
-
- snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset);
-
- /* sanitize binary_path in the probe name */
- for (i = 0; buf[i]; i++) {
- if (!isalnum(buf[i]))
- buf[i] = '_';
- }
-}
-
static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
const char *binary_path, size_t offset)
{
@@ -12173,13 +12176,14 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
func_offset, pid, ref_ctr_off);
} else {
- char probe_name[PATH_MAX + 64];
+ char probe_name[MAX_EVENT_NAME_LEN];
if (ref_ctr_off)
return libbpf_err_ptr(-EINVAL);
- gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name),
- binary_path, func_offset);
+ gen_probe_legacy_event_name(probe_name, sizeof(probe_name),
+ strrchr(binary_path, '/') ? : binary_path,
+ func_offset);
legacy_probe = strdup(probe_name);
if (!legacy_probe)
@@ -13256,7 +13260,6 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
attr.config = PERF_COUNT_SW_BPF_OUTPUT;
attr.type = PERF_TYPE_SOFTWARE;
attr.sample_type = PERF_SAMPLE_RAW;
- attr.sample_period = sample_period;
attr.wakeup_events = sample_period;
p.attr = &attr;
@@ -13983,6 +13986,12 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
}
link = map_skel->link;
+ if (!link) {
+ pr_warn("map '%s': BPF map skeleton link is uninitialized\n",
+ bpf_map__name(map));
+ continue;
+ }
+
if (*link)
continue;
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
index 179f6b31cbd6..d4ab9315afe7 100644
--- a/tools/lib/bpf/linker.c
+++ b/tools/lib/bpf/linker.c
@@ -1220,7 +1220,7 @@ static int linker_append_sec_data(struct bpf_linker *linker, struct src_obj *obj
} else {
if (!secs_match(dst_sec, src_sec)) {
pr_warn("ELF sections %s are incompatible\n", src_sec->sec_name);
- return -1;
+ return -EINVAL;
}
/* "license" and "version" sections are deduped */
@@ -2067,7 +2067,7 @@ static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *ob
}
} else if (!secs_match(dst_sec, src_sec)) {
pr_warn("sections %s are not compatible\n", src_sec->sec_name);
- return -1;
+ return -EINVAL;
}
/* shdr->sh_link points to SYMTAB */
diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c
index 975e265eab3b..06663f9ea581 100644
--- a/tools/lib/bpf/nlattr.c
+++ b/tools/lib/bpf/nlattr.c
@@ -63,16 +63,16 @@ static int validate_nla(struct nlattr *nla, int maxtype,
minlen = nla_attr_minlen[pt->type];
if (libbpf_nla_len(nla) < minlen)
- return -1;
+ return -EINVAL;
if (pt->maxlen && libbpf_nla_len(nla) > pt->maxlen)
- return -1;
+ return -EINVAL;
if (pt->type == LIBBPF_NLA_STRING) {
char *data = libbpf_nla_data(nla);
if (data[libbpf_nla_len(nla) - 1] != '\0')
- return -1;
+ return -EINVAL;
}
return 0;
@@ -118,19 +118,18 @@ int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head,
if (policy) {
err = validate_nla(nla, maxtype, policy);
if (err < 0)
- goto errout;
+ return err;
}
- if (tb[type])
+ if (tb[type]) {
pr_warn("Attribute of type %#x found multiple times in message, "
"previous attribute is being ignored.\n", type);
+ }
tb[type] = nla;
}
- err = 0;
-errout:
- return err;
+ return 0;
}
/**
diff --git a/tools/lib/subcmd/help.c b/tools/lib/subcmd/help.c
index 8561b0f01a24..9ef569492560 100644
--- a/tools/lib/subcmd/help.c
+++ b/tools/lib/subcmd/help.c
@@ -9,6 +9,7 @@
#include <sys/stat.h>
#include <unistd.h>
#include <dirent.h>
+#include <assert.h>
#include "subcmd-util.h"
#include "help.h"
#include "exec-cmd.h"
@@ -82,10 +83,11 @@ void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
ci++;
cj++;
} else {
- zfree(&cmds->names[cj]);
- cmds->names[cj++] = cmds->names[ci++];
+ cmds->names[cj++] = cmds->names[ci];
+ cmds->names[ci++] = NULL;
}
} else if (cmp == 0) {
+ zfree(&cmds->names[ci]);
ci++;
ei++;
} else if (cmp > 0) {
@@ -94,12 +96,12 @@ void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
}
if (ci != cj) {
while (ci < cmds->cnt) {
- zfree(&cmds->names[cj]);
- cmds->names[cj++] = cmds->names[ci++];
+ cmds->names[cj++] = cmds->names[ci];
+ cmds->names[ci++] = NULL;
}
}
for (ci = cj; ci < cmds->cnt; ci++)
- zfree(&cmds->names[ci]);
+ assert(cmds->names[ci] == NULL);
cmds->cnt = cj;
}
diff --git a/tools/net/ynl/ethtool.py b/tools/net/ynl/ethtool.py
index 63c471f075ab..7e8342f91481 100755
--- a/tools/net/ynl/ethtool.py
+++ b/tools/net/ynl/ethtool.py
@@ -337,16 +337,24 @@ def main():
print('Capabilities:')
[print(f'\t{v}') for v in bits_to_dict(tsinfo['timestamping'])]
- print(f'PTP Hardware Clock: {tsinfo["phc-index"]}')
+ print(f'PTP Hardware Clock: {tsinfo.get("phc-index", "none")}')
- print('Hardware Transmit Timestamp Modes:')
- [print(f'\t{v}') for v in bits_to_dict(tsinfo['tx-types'])]
+ if 'tx-types' in tsinfo:
+ print('Hardware Transmit Timestamp Modes:')
+ [print(f'\t{v}') for v in bits_to_dict(tsinfo['tx-types'])]
+ else:
+ print('Hardware Transmit Timestamp Modes: none')
+
+ if 'rx-filters' in tsinfo:
+ print('Hardware Receive Filter Modes:')
+ [print(f'\t{v}') for v in bits_to_dict(tsinfo['rx-filters'])]
+ else:
+ print('Hardware Receive Filter Modes: none')
- print('Hardware Receive Filter Modes:')
- [print(f'\t{v}') for v in bits_to_dict(tsinfo['rx-filters'])]
+ if 'stats' in tsinfo and tsinfo['stats']:
+ print('Statistics:')
+ [print(f'\t{k}: {v}') for k, v in tsinfo['stats'].items()]
- print('Statistics:')
- [print(f'\t{k}: {v}') for k, v in tsinfo['stats'].items()]
return
print(f'Settings for {args.device}:')
diff --git a/tools/net/ynl/lib/ynl.c b/tools/net/ynl/lib/ynl.c
index ce32cb35007d..c4da34048ef8 100644
--- a/tools/net/ynl/lib/ynl.c
+++ b/tools/net/ynl/lib/ynl.c
@@ -364,7 +364,7 @@ int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr)
"Invalid attribute (binary %s)", policy->name);
return -1;
case YNL_PT_NUL_STR:
- if ((!policy->len || len <= policy->len) && !data[len - 1])
+ if (len && (!policy->len || len <= policy->len) && !data[len - 1])
break;
yerr(yarg->ys, YNL_ERROR_ATTR_INVALID,
"Invalid attribute (string %s)", policy->name);
diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py
index 463f1394ab97..c78f1c1bca75 100755
--- a/tools/net/ynl/ynl-gen-c.py
+++ b/tools/net/ynl/ynl-gen-c.py
@@ -2417,6 +2417,9 @@ def render_uapi(family, cw):
defines = []
for const in family['definitions']:
+ if const.get('header'):
+ continue
+
if const['type'] != 'const':
cw.writes_defines(defines)
defines = []
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index ce3ea0c2de04..d4d82bb9b551 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -216,12 +216,15 @@ static bool is_rust_noreturn(const struct symbol *func)
str_ends_with(func->name, "_4core9panicking14panic_explicit") ||
str_ends_with(func->name, "_4core9panicking14panic_nounwind") ||
str_ends_with(func->name, "_4core9panicking18panic_bounds_check") ||
+ str_ends_with(func->name, "_4core9panicking18panic_nounwind_fmt") ||
str_ends_with(func->name, "_4core9panicking19assert_failed_inner") ||
str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference") ||
str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference") ||
+ str_ends_with(func->name, "_7___rustc17rust_begin_unwind") ||
strstr(func->name, "_4core9panicking13assert_failed") ||
strstr(func->name, "_4core9panicking11panic_const24panic_const_") ||
- (strstr(func->name, "_4core5slice5index24slice_") &&
+ (strstr(func->name, "_4core5slice5index") &&
+ strstr(func->name, "slice_") &&
str_ends_with(func->name, "_fail"));
}
@@ -1243,12 +1246,15 @@ static const char *uaccess_safe_builtin[] = {
"__ubsan_handle_load_invalid_value",
/* STACKLEAK */
"stackleak_track_stack",
+ /* TRACE_BRANCH_PROFILING */
+ "ftrace_likely_update",
+ /* STACKPROTECTOR */
+ "__stack_chk_fail",
/* misc */
"csum_partial_copy_generic",
"copy_mc_fragile",
"copy_mc_fragile_handle_tail",
"copy_mc_enhanced_fast_string",
- "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
"rep_stos_alternative",
"rep_movs_alternative",
"__copy_user_nocache",
@@ -1567,6 +1573,8 @@ static int add_jump_destinations(struct objtool_file *file)
unsigned long dest_off;
for_each_insn(file, insn) {
+ struct symbol *func = insn_func(insn);
+
if (insn->jump_dest) {
/*
* handle_group_alt() may have previously set
@@ -1590,7 +1598,7 @@ static int add_jump_destinations(struct objtool_file *file)
} else if (reloc->sym->return_thunk) {
add_return_call(file, insn, true);
continue;
- } else if (insn_func(insn)) {
+ } else if (func) {
/*
* External sibling call or internal sibling call with
* STT_FUNC reloc.
@@ -1623,6 +1631,15 @@ static int add_jump_destinations(struct objtool_file *file)
continue;
}
+ /*
+ * GCOV/KCOV dead code can jump to the end of the
+ * function/section.
+ */
+ if (file->ignore_unreachables && func &&
+ dest_sec == insn->sec &&
+ dest_off == func->offset + func->len)
+ continue;
+
WARN_INSN(insn, "can't find jump dest instruction at %s+0x%lx",
dest_sec->name, dest_off);
return -1;
@@ -1647,8 +1664,7 @@ static int add_jump_destinations(struct objtool_file *file)
/*
* Cross-function jump.
*/
- if (insn_func(insn) && insn_func(jump_dest) &&
- insn_func(insn) != insn_func(jump_dest)) {
+ if (func && insn_func(jump_dest) && func != insn_func(jump_dest)) {
/*
* For GCC 8+, create parent/child links for any cold
@@ -1665,10 +1681,10 @@ static int add_jump_destinations(struct objtool_file *file)
* case where the parent function's only reference to a
* subfunction is through a jump table.
*/
- if (!strstr(insn_func(insn)->name, ".cold") &&
+ if (!strstr(func->name, ".cold") &&
strstr(insn_func(jump_dest)->name, ".cold")) {
- insn_func(insn)->cfunc = insn_func(jump_dest);
- insn_func(jump_dest)->pfunc = insn_func(insn);
+ func->cfunc = insn_func(jump_dest);
+ insn_func(jump_dest)->pfunc = func;
}
}
@@ -3339,7 +3355,7 @@ static int handle_insn_ops(struct instruction *insn,
if (update_cfi_state(insn, next_insn, &state->cfi, op))
return 1;
- if (!insn->alt_group)
+ if (!opts.uaccess || !insn->alt_group)
continue;
if (op->dest.type == OP_DEST_PUSHF) {
@@ -3634,6 +3650,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
!strncmp(func->name, "__pfx_", 6))
return 0;
+ if (file->ignore_unreachables)
+ return 0;
+
WARN("%s() falls through to next function %s()",
func->name, insn_func(insn)->name);
return 1;
@@ -3803,6 +3822,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
return 0;
case INSN_STAC:
+ if (!opts.uaccess)
+ break;
+
if (state.uaccess) {
WARN_INSN(insn, "recursive UACCESS enable");
return 1;
@@ -3812,6 +3834,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
break;
case INSN_CLAC:
+ if (!opts.uaccess)
+ break;
+
if (!state.uaccess && func) {
WARN_INSN(insn, "redundant UACCESS disable");
return 1;
@@ -3853,6 +3878,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
if (!next_insn) {
if (state.cfi.cfa.base == CFI_UNDEFINED)
return 0;
+ if (file->ignore_unreachables)
+ return 0;
+
WARN("%s: unexpected end of section", sec->name);
return 1;
}
@@ -4005,6 +4033,9 @@ static int validate_unret(struct objtool_file *file, struct instruction *insn)
break;
}
+ if (insn->dead_end)
+ return 0;
+
if (!next) {
WARN_INSN(insn, "teh end!");
return -1;
@@ -4281,7 +4312,8 @@ static int validate_symbol(struct objtool_file *file, struct section *sec,
if (!insn || insn->ignore || insn->visited)
return 0;
- state->uaccess = sym->uaccess_safe;
+ if (opts.uaccess)
+ state->uaccess = sym->uaccess_safe;
ret = validate_branch(file, insn_func(insn), insn, *state);
if (ret)
@@ -4728,8 +4760,10 @@ int check(struct objtool_file *file)
init_cfi_state(&force_undefined_cfi);
force_undefined_cfi.force_undefined = true;
- if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
+ if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) {
+ ret = -1;
goto out;
+ }
cfi_hash_add(&init_cfi);
cfi_hash_add(&func_cfi);
@@ -4746,7 +4780,7 @@ int check(struct objtool_file *file)
if (opts.retpoline) {
ret = validate_retpoline(file);
if (ret < 0)
- return ret;
+ goto out;
warnings += ret;
}
@@ -4782,7 +4816,7 @@ int check(struct objtool_file *file)
*/
ret = validate_unrets(file);
if (ret < 0)
- return ret;
+ goto out;
warnings += ret;
}
@@ -4845,7 +4879,7 @@ int check(struct objtool_file *file)
if (opts.prefix) {
ret = add_prefix_symbols(file);
if (ret < 0)
- return ret;
+ goto out;
warnings += ret;
}
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index f5b81d439387..1ef45d803024 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -48,8 +48,6 @@ libbpf/
libperf/
libsubcmd/
libsymbol/
-libtraceevent/
-libtraceevent_plugins/
fixdep
Documentation/doc.dep
python_ext_build/
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index b102a4c525e4..a2034fa18325 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -569,6 +569,8 @@ ifndef NO_LIBELF
ifeq ($(feature-libdebuginfod), 1)
CFLAGS += -DHAVE_DEBUGINFOD_SUPPORT
EXTLIBS += -ldebuginfod
+ else
+ $(warning No elfutils/debuginfod.h found, no debuginfo server support, please install libdebuginfod-dev/elfutils-debuginfod-client-devel or equivalent)
endif
endif
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 8ee59ecb1411..b61c355fbdee 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -1143,7 +1143,8 @@ install-tests: all install-gtk
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_probe'; \
$(INSTALL) tests/shell/base_probe/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_probe'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_report'; \
- $(INSTALL) tests/shell/base_probe/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_report'; \
+ $(INSTALL) tests/shell/base_report/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_report'; \
+ $(INSTALL) tests/shell/base_report/*.txt '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_report'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/coresight' ; \
$(INSTALL) tests/shell/coresight/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/coresight'
$(Q)$(MAKE) -C tests/shell/coresight install-tests
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index adbaf80b398c..ab9035573a15 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -3471,7 +3471,7 @@ static struct option __record_options[] = {
"sample selected machine registers on interrupt,"
" use '-I?' to list register names", parse_intr_regs),
OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
- "sample selected machine registers on interrupt,"
+ "sample selected machine registers in user space,"
" use '--user-regs=?' to list register names", parse_user_regs),
OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
"Record running/enabled time of read (:S) events"),
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 5981cc51abc8..64bf3ac237f2 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -999,7 +999,7 @@ thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
else if (cmp < 0)
node = node->rb_right;
else {
- BUG_ON(thread != atoms->thread);
+ BUG_ON(!RC_CHK_EQUAL(thread, atoms->thread));
return atoms;
}
}
@@ -1116,6 +1116,21 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
atoms->nb_atoms++;
}
+static void free_work_atoms(struct work_atoms *atoms)
+{
+ struct work_atom *atom, *tmp;
+
+ if (atoms == NULL)
+ return;
+
+ list_for_each_entry_safe(atom, tmp, &atoms->work_list, list) {
+ list_del(&atom->list);
+ free(atom);
+ }
+ thread__zput(atoms->thread);
+ free(atoms);
+}
+
static int latency_switch_event(struct perf_sched *sched,
struct evsel *evsel,
struct perf_sample *sample,
@@ -1639,6 +1654,7 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
const char *color = PERF_COLOR_NORMAL;
char stimestamp[32];
const char *str;
+ int ret = -1;
BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0);
@@ -1669,17 +1685,20 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
sched_in = map__findnew_thread(sched, machine, -1, next_pid);
sched_out = map__findnew_thread(sched, machine, -1, prev_pid);
if (sched_in == NULL || sched_out == NULL)
- return -1;
+ goto out;
tr = thread__get_runtime(sched_in);
- if (tr == NULL) {
- thread__put(sched_in);
- return -1;
- }
+ if (tr == NULL)
+ goto out;
+
+ thread__put(sched->curr_thread[this_cpu.cpu]);
+ thread__put(sched->curr_out_thread[this_cpu.cpu]);
sched->curr_thread[this_cpu.cpu] = thread__get(sched_in);
sched->curr_out_thread[this_cpu.cpu] = thread__get(sched_out);
+ ret = 0;
+
str = thread__comm_str(sched_in);
new_shortname = 0;
if (!tr->shortname[0]) {
@@ -1774,12 +1793,10 @@ sched_out:
color_fprintf(stdout, color, "\n");
out:
- if (sched->map.task_name)
- thread__put(sched_out);
-
+ thread__put(sched_out);
thread__put(sched_in);
- return 0;
+ return ret;
}
static int process_sched_switch_event(const struct perf_tool *tool,
@@ -2023,6 +2040,16 @@ static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
return r->last_time[cpu];
}
+static void timehist__evsel_priv_destructor(void *priv)
+{
+ struct evsel_runtime *r = priv;
+
+ if (r) {
+ free(r->last_time);
+ free(r);
+ }
+}
+
static int comm_width = 30;
static char *timehist_get_commstr(struct thread *thread)
@@ -3276,6 +3303,8 @@ static int perf_sched__timehist(struct perf_sched *sched)
setup_pager();
+ evsel__set_priv_destructor(timehist__evsel_priv_destructor);
+
/* prefer sched_waking if it is captured */
if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
handlers[1].handler = timehist_sched_wakeup_ignore;
@@ -3376,13 +3405,13 @@ static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *d
this->total_runtime += data->total_runtime;
this->nb_atoms += data->nb_atoms;
this->total_lat += data->total_lat;
- list_splice(&data->work_list, &this->work_list);
+ list_splice_init(&data->work_list, &this->work_list);
if (this->max_lat < data->max_lat) {
this->max_lat = data->max_lat;
this->max_lat_start = data->max_lat_start;
this->max_lat_end = data->max_lat_end;
}
- zfree(&data);
+ free_work_atoms(data);
return;
}
}
@@ -3461,7 +3490,6 @@ static int perf_sched__lat(struct perf_sched *sched)
work_list = rb_entry(next, struct work_atoms, node);
output_lat_thread(sched, work_list);
next = rb_next(next);
- thread__zput(work_list->thread);
}
printf(" -----------------------------------------------------------------------------------------------------------------\n");
@@ -3475,6 +3503,13 @@ static int perf_sched__lat(struct perf_sched *sched)
rc = 0;
+ while ((next = rb_first_cached(&sched->sorted_atom_root))) {
+ struct work_atoms *data;
+
+ data = rb_entry(next, struct work_atoms, node);
+ rb_erase_cached(next, &sched->sorted_atom_root);
+ free_work_atoms(data);
+ }
out_free_cpus_switch_event:
free_cpus_switch_event(sched);
return rc;
@@ -3546,10 +3581,10 @@ static int perf_sched__map(struct perf_sched *sched)
sched->curr_out_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_out_thread)));
if (!sched->curr_out_thread)
- return rc;
+ goto out_free_curr_thread;
if (setup_cpus_switch_event(sched))
- goto out_free_curr_thread;
+ goto out_free_curr_out_thread;
if (setup_map_cpus(sched))
goto out_free_cpus_switch_event;
@@ -3580,7 +3615,14 @@ out_put_map_cpus:
out_free_cpus_switch_event:
free_cpus_switch_event(sched);
+out_free_curr_out_thread:
+ for (int i = 0; i < MAX_CPUS; i++)
+ thread__put(sched->curr_out_thread[i]);
+ zfree(&sched->curr_out_thread);
+
out_free_curr_thread:
+ for (int i = 0; i < MAX_CPUS; i++)
+ thread__put(sched->curr_thread[i]);
zfree(&sched->curr_thread);
return rc;
}
@@ -3887,13 +3929,15 @@ int cmd_sched(int argc, const char **argv)
if (!argc)
usage_with_options(sched_usage, sched_options);
+ thread__set_priv_destructor(free);
+
/*
* Aliased to 'perf script' for now:
*/
if (!strcmp(argv[0], "script")) {
- return cmd_script(argc, argv);
+ ret = cmd_script(argc, argv);
} else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
- return __cmd_record(argc, argv);
+ ret = __cmd_record(argc, argv);
} else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
sched.tp_handler = &lat_ops;
if (argc > 1) {
@@ -3902,7 +3946,7 @@ int cmd_sched(int argc, const char **argv)
usage_with_options(latency_usage, latency_options);
}
setup_sorting(&sched, latency_options, latency_usage);
- return perf_sched__lat(&sched);
+ ret = perf_sched__lat(&sched);
} else if (!strcmp(argv[0], "map")) {
if (argc) {
argc = parse_options(argc, argv, map_options, map_usage, 0);
@@ -3913,13 +3957,14 @@ int cmd_sched(int argc, const char **argv)
sched.map.task_names = strlist__new(sched.map.task_name, NULL);
if (sched.map.task_names == NULL) {
fprintf(stderr, "Failed to parse task names\n");
- return -1;
+ ret = -1;
+ goto out;
}
}
}
sched.tp_handler = &map_ops;
setup_sorting(&sched, latency_options, latency_usage);
- return perf_sched__map(&sched);
+ ret = perf_sched__map(&sched);
} else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) {
sched.tp_handler = &replay_ops;
if (argc) {
@@ -3927,7 +3972,7 @@ int cmd_sched(int argc, const char **argv)
if (argc)
usage_with_options(replay_usage, replay_options);
}
- return perf_sched__replay(&sched);
+ ret = perf_sched__replay(&sched);
} else if (!strcmp(argv[0], "timehist")) {
if (argc) {
argc = parse_options(argc, argv, timehist_options,
@@ -3943,19 +3988,19 @@ int cmd_sched(int argc, const char **argv)
parse_options_usage(NULL, timehist_options, "w", true);
if (sched.show_next)
parse_options_usage(NULL, timehist_options, "n", true);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
ret = symbol__validate_sym_arguments();
- if (ret)
- return ret;
-
- return perf_sched__timehist(&sched);
+ if (!ret)
+ ret = perf_sched__timehist(&sched);
} else {
usage_with_options(sched_usage, sched_options);
}
+out:
/* free usage string allocated by parse_options_subcommand */
free((void *)sched_usage[0]);
- return 0;
+ return ret;
}
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index ecd26e058baf..f77e4f4b6f03 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1327,7 +1327,7 @@ static const struct syscall_fmt syscall_fmts[] = {
.arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
[2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
[4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
- { .name = "rseq", .errpid = true,
+ { .name = "rseq",
.arg = { [0] = { .from_user = true /* rseq */, }, }, },
{ .name = "rt_sigaction",
.arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
@@ -1351,7 +1351,7 @@ static const struct syscall_fmt syscall_fmts[] = {
{ .name = "sendto",
.arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
[4] = SCA_SOCKADDR_FROM_USER(addr), }, },
- { .name = "set_robust_list", .errpid = true,
+ { .name = "set_robust_list",
.arg = { [0] = { .from_user = true /* head */, }, }, },
{ .name = "set_tid_address", .errpid = true, },
{ .name = "setitimer",
@@ -2873,8 +2873,8 @@ errno_print: {
else if (sc->fmt->errpid) {
struct thread *child = machine__find_thread(trace->host, ret, ret);
+ fprintf(trace->output, "%ld", ret);
if (child != NULL) {
- fprintf(trace->output, "%ld", ret);
if (thread__comm_set(child))
fprintf(trace->output, " (%s)", thread__comm_str(child));
thread__put(child);
@@ -3986,10 +3986,13 @@ static int trace__set_filter_loop_pids(struct trace *trace)
if (!strcmp(thread__comm_str(parent), "sshd") ||
strstarts(thread__comm_str(parent), "gnome-terminal")) {
pids[nr++] = thread__tid(parent);
+ thread__put(parent);
break;
}
+ thread__put(thread);
thread = parent;
}
+ thread__put(thread);
err = evlist__append_tp_filter_pids(trace->evlist, nr, pids);
if (!err && trace->filter_pids.map)
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index 121cf61ba1b3..e0b2e7268ef6 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -680,7 +680,10 @@ class CallGraphModelBase(TreeModel):
s = value.replace("%", "\\%")
s = s.replace("_", "\\_")
# Translate * and ? into SQL LIKE pattern characters % and _
- trans = string.maketrans("*?", "%_")
+ if sys.version_info[0] == 3:
+ trans = str.maketrans("*?", "%_")
+ else:
+ trans = string.maketrans("*?", "%_")
match = " LIKE '" + str(s).translate(trans) + "'"
else:
match = " GLOB '" + str(value) + "'"
diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c
index 4cb7d486b5c1..047433c977bc 100644
--- a/tools/perf/tests/bp_account.c
+++ b/tools/perf/tests/bp_account.c
@@ -104,6 +104,7 @@ static int bp_accounting(int wp_cnt, int share)
fd_wp = wp_event((void *)&the_var, &attr_new);
TEST_ASSERT_VAL("failed to create max wp\n", fd_wp != -1);
pr_debug("wp max created\n");
+ close(fd_wp);
}
for (i = 0; i < wp_cnt; i++)
diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
index 5cab17a1942e..ee43d8fa2ed6 100644
--- a/tools/perf/tests/switch-tracking.c
+++ b/tools/perf/tests/switch-tracking.c
@@ -258,7 +258,7 @@ static int compar(const void *a, const void *b)
const struct event_node *nodeb = b;
s64 cmp = nodea->event_time - nodeb->event_time;
- return cmp;
+ return cmp < 0 ? -1 : (cmp > 0 ? 1 : 0);
}
static int process_events(struct evlist *evlist,
diff --git a/tools/perf/tests/tests-scripts.c b/tools/perf/tests/tests-scripts.c
index ed114b044293..b6986d50dde6 100644
--- a/tools/perf/tests/tests-scripts.c
+++ b/tools/perf/tests/tests-scripts.c
@@ -255,6 +255,7 @@ static void append_scripts_in_dir(int dir_fd,
continue; /* Skip scripts that have a separate driver. */
fd = openat(dir_fd, ent->d_name, O_PATH);
append_scripts_in_dir(fd, result, result_sz);
+ close(fd);
}
for (i = 0; i < n_dirs; i++) /* Clean up */
zfree(&entlist[i]);
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 49ba82bf3391..3283b6313bab 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -3267,10 +3267,10 @@ do_hotkey: // key came straight from options ui__popup_menu()
/*
* No need to set actions->dso here since
* it's just to remove the current filter.
- * Ditto for thread below.
*/
do_zoom_dso(browser, actions);
} else if (top == &browser->hists->thread_filter) {
+ actions->thread = thread;
do_zoom_thread(browser, actions);
} else if (top == &browser->hists->socket_filter) {
do_zoom_socket(browser, actions);
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index e763e8d99a43..ee00313d5d7e 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -864,7 +864,7 @@ static int dso__cache_build_id(struct dso *dso, struct machine *machine,
char *allocated_name = NULL;
int ret = 0;
- if (!dso__has_build_id(dso))
+ if (!dso__has_build_id(dso) || !dso__hit(dso))
return 0;
if (dso__is_kcore(dso)) {
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index dbf9c8cee3c5..6d7249cc1a99 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1477,6 +1477,15 @@ static void evsel__free_config_terms(struct evsel *evsel)
free_config_terms(&evsel->config_terms);
}
+static void (*evsel__priv_destructor)(void *priv);
+
+void evsel__set_priv_destructor(void (*destructor)(void *priv))
+{
+ assert(evsel__priv_destructor == NULL);
+
+ evsel__priv_destructor = destructor;
+}
+
void evsel__exit(struct evsel *evsel)
{
assert(list_empty(&evsel->core.node));
@@ -1502,6 +1511,8 @@ void evsel__exit(struct evsel *evsel)
hashmap__free(evsel->per_pkg_mask);
evsel->per_pkg_mask = NULL;
zfree(&evsel->metric_events);
+ if (evsel__priv_destructor)
+ evsel__priv_destructor(evsel->priv);
perf_evsel__object.fini(evsel);
if (evsel__tool_event(evsel) == PERF_TOOL_SYSTEM_TIME ||
evsel__tool_event(evsel) == PERF_TOOL_USER_TIME)
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 15e745a9a798..26574a33a725 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -282,6 +282,8 @@ void evsel__init(struct evsel *evsel, struct perf_event_attr *attr, int idx);
void evsel__exit(struct evsel *evsel);
void evsel__delete(struct evsel *evsel);
+void evsel__set_priv_destructor(void (*destructor)(void *priv));
+
struct callchain_param;
void evsel__config(struct evsel *evsel, struct record_opts *opts,
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index fd2597613f3d..61f10578e121 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -127,6 +127,7 @@ struct intel_pt {
bool single_pebs;
bool sample_pebs;
+ int pebs_data_src_fmt;
struct evsel *pebs_evsel;
u64 evt_sample_type;
@@ -175,6 +176,7 @@ enum switch_state {
struct intel_pt_pebs_event {
struct evsel *evsel;
u64 id;
+ int data_src_fmt;
};
struct intel_pt_queue {
@@ -2232,7 +2234,146 @@ static void intel_pt_add_lbrs(struct branch_stack *br_stack,
}
}
-static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel, u64 id)
+#define P(a, b) PERF_MEM_S(a, b)
+#define OP_LH (P(OP, LOAD) | P(LVL, HIT))
+#define LEVEL(x) P(LVLNUM, x)
+#define REM P(REMOTE, REMOTE)
+#define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
+
+#define PERF_PEBS_DATA_SOURCE_GRT_MAX 0x10
+#define PERF_PEBS_DATA_SOURCE_GRT_MASK (PERF_PEBS_DATA_SOURCE_GRT_MAX - 1)
+
+/* Based on kernel __intel_pmu_pebs_data_source_grt() and pebs_data_source */
+static const u64 pebs_data_source_grt[PERF_PEBS_DATA_SOURCE_GRT_MAX] = {
+ P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA), /* L3 miss|SNP N/A */
+ OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* L1 hit|SNP None */
+ OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* LFB/MAB hit|SNP None */
+ OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* L2 hit|SNP None */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, NONE), /* L3 hit|SNP None */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT), /* L3 hit|SNP Hit */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* L3 hit|SNP HitM */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* L3 hit|SNP HitM */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD), /* L3 hit|SNP Fwd */
+ OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* Remote L3 hit|SNP HitM */
+ OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, HIT), /* RAM hit|SNP Hit */
+ OP_LH | P(LVL, REM_RAM1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* Remote L3 hit|SNP Hit */
+ OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | SNOOP_NONE_MISS, /* RAM hit|SNP None or Miss */
+ OP_LH | P(LVL, REM_RAM1) | LEVEL(RAM) | REM | SNOOP_NONE_MISS, /* Remote RAM hit|SNP None or Miss */
+ OP_LH | P(LVL, IO) | LEVEL(NA) | P(SNOOP, NONE), /* I/O hit|SNP None */
+ OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* Uncached hit|SNP None */
+};
+
+/* Based on kernel __intel_pmu_pebs_data_source_cmt() and pebs_data_source */
+static const u64 pebs_data_source_cmt[PERF_PEBS_DATA_SOURCE_GRT_MAX] = {
+ P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA), /* L3 miss|SNP N/A */
+ OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* L1 hit|SNP None */
+ OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* LFB/MAB hit|SNP None */
+ OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* L2 hit|SNP None */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, NONE), /* L3 hit|SNP None */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, MISS), /* L3 hit|SNP Hit */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT), /* L3 hit|SNP HitM */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD), /* L3 hit|SNP HitM */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* L3 hit|SNP Fwd */
+ OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* Remote L3 hit|SNP HitM */
+ OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, NONE), /* RAM hit|SNP Hit */
+ OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE), /* Remote L3 hit|SNP Hit */
+ OP_LH | LEVEL(RAM) | REM | P(SNOOPX, FWD), /* RAM hit|SNP None or Miss */
+ OP_LH | LEVEL(RAM) | REM | P(SNOOP, HITM), /* Remote RAM hit|SNP None or Miss */
+ OP_LH | P(LVL, IO) | LEVEL(NA) | P(SNOOP, NONE), /* I/O hit|SNP None */
+ OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* Uncached hit|SNP None */
+};
+
+/* Based on kernel pebs_set_tlb_lock() */
+static inline void pebs_set_tlb_lock(u64 *val, bool tlb, bool lock)
+{
+ /*
+ * TLB access
+ * 0 = did not miss 2nd level TLB
+ * 1 = missed 2nd level TLB
+ */
+ if (tlb)
+ *val |= P(TLB, MISS) | P(TLB, L2);
+ else
+ *val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
+
+ /* locked prefix */
+ if (lock)
+ *val |= P(LOCK, LOCKED);
+}
+
+/* Based on kernel __grt_latency_data() */
+static u64 intel_pt_grt_latency_data(u8 dse, bool tlb, bool lock, bool blk,
+ const u64 *pebs_data_source)
+{
+ u64 val;
+
+ dse &= PERF_PEBS_DATA_SOURCE_GRT_MASK;
+ val = pebs_data_source[dse];
+
+ pebs_set_tlb_lock(&val, tlb, lock);
+
+ if (blk)
+ val |= P(BLK, DATA);
+ else
+ val |= P(BLK, NA);
+
+ return val;
+}
+
+/* Default value for data source */
+#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
+ PERF_MEM_S(LVL, NA) |\
+ PERF_MEM_S(SNOOP, NA) |\
+ PERF_MEM_S(LOCK, NA) |\
+ PERF_MEM_S(TLB, NA) |\
+ PERF_MEM_S(LVLNUM, NA))
+
+enum DATA_SRC_FORMAT {
+ DATA_SRC_FORMAT_ERR = -1,
+ DATA_SRC_FORMAT_NA = 0,
+ DATA_SRC_FORMAT_GRT = 1,
+ DATA_SRC_FORMAT_CMT = 2,
+};
+
+/* Based on kernel grt_latency_data() and cmt_latency_data */
+static u64 intel_pt_get_data_src(u64 mem_aux_info, int data_src_fmt)
+{
+ switch (data_src_fmt) {
+ case DATA_SRC_FORMAT_GRT: {
+ union {
+ u64 val;
+ struct {
+ unsigned int dse:4;
+ unsigned int locked:1;
+ unsigned int stlb_miss:1;
+ unsigned int fwd_blk:1;
+ unsigned int reserved:25;
+ };
+ } x = {.val = mem_aux_info};
+ return intel_pt_grt_latency_data(x.dse, x.stlb_miss, x.locked, x.fwd_blk,
+ pebs_data_source_grt);
+ }
+ case DATA_SRC_FORMAT_CMT: {
+ union {
+ u64 val;
+ struct {
+ unsigned int dse:5;
+ unsigned int locked:1;
+ unsigned int stlb_miss:1;
+ unsigned int fwd_blk:1;
+ unsigned int reserved:24;
+ };
+ } x = {.val = mem_aux_info};
+ return intel_pt_grt_latency_data(x.dse, x.stlb_miss, x.locked, x.fwd_blk,
+ pebs_data_source_cmt);
+ }
+ default:
+ return PERF_MEM_NA;
+ }
+}
+
+static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel,
+ u64 id, int data_src_fmt)
{
const struct intel_pt_blk_items *items = &ptq->state->items;
struct perf_sample sample = { .ip = 0, };
@@ -2350,6 +2491,18 @@ static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evse
}
}
+ if (sample_type & PERF_SAMPLE_DATA_SRC) {
+ if (items->has_mem_aux_info && data_src_fmt) {
+ if (data_src_fmt < 0) {
+ pr_err("Intel PT missing data_src info\n");
+ return -1;
+ }
+ sample.data_src = intel_pt_get_data_src(items->mem_aux_info, data_src_fmt);
+ } else {
+ sample.data_src = PERF_MEM_NA;
+ }
+ }
+
if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) {
u64 ax = items->has_rax ? items->rax : 0;
/* Refer kernel's intel_hsw_transaction() */
@@ -2368,9 +2521,10 @@ static int intel_pt_synth_single_pebs_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
struct evsel *evsel = pt->pebs_evsel;
+ int data_src_fmt = pt->pebs_data_src_fmt;
u64 id = evsel->core.id[0];
- return intel_pt_do_synth_pebs_sample(ptq, evsel, id);
+ return intel_pt_do_synth_pebs_sample(ptq, evsel, id, data_src_fmt);
}
static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
@@ -2395,7 +2549,7 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
hw_id);
return intel_pt_synth_single_pebs_sample(ptq);
}
- err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id);
+ err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id, pe->data_src_fmt);
if (err)
return err;
}
@@ -3355,6 +3509,49 @@ static int intel_pt_process_itrace_start(struct intel_pt *pt,
event->itrace_start.tid);
}
+/*
+ * Events with data_src are identified by L1_Hit_Indication
+ * refer https://github.com/intel/perfmon
+ */
+static int intel_pt_data_src_fmt(struct intel_pt *pt, struct evsel *evsel)
+{
+ struct perf_env *env = pt->machine->env;
+ int fmt = DATA_SRC_FORMAT_NA;
+
+ if (!env->cpuid)
+ return DATA_SRC_FORMAT_ERR;
+
+ /*
+ * PEBS-via-PT is only supported on E-core non-hybrid. Of those only
+ * Gracemont and Crestmont have data_src. Check for:
+ * Alderlake N (Gracemont)
+ * Sierra Forest (Crestmont)
+ * Grand Ridge (Crestmont)
+ */
+
+ if (!strncmp(env->cpuid, "GenuineIntel,6,190,", 19))
+ fmt = DATA_SRC_FORMAT_GRT;
+
+ if (!strncmp(env->cpuid, "GenuineIntel,6,175,", 19) ||
+ !strncmp(env->cpuid, "GenuineIntel,6,182,", 19))
+ fmt = DATA_SRC_FORMAT_CMT;
+
+ if (fmt == DATA_SRC_FORMAT_NA)
+ return fmt;
+
+ /*
+ * Only data_src events are:
+ * mem-loads event=0xd0,umask=0x5
+ * mem-stores event=0xd0,umask=0x6
+ */
+ if (evsel->core.attr.type == PERF_TYPE_RAW &&
+ ((evsel->core.attr.config & 0xffff) == 0x5d0 ||
+ (evsel->core.attr.config & 0xffff) == 0x6d0))
+ return fmt;
+
+ return DATA_SRC_FORMAT_NA;
+}
+
static int intel_pt_process_aux_output_hw_id(struct intel_pt *pt,
union perf_event *event,
struct perf_sample *sample)
@@ -3375,6 +3572,7 @@ static int intel_pt_process_aux_output_hw_id(struct intel_pt *pt,
ptq->pebs[hw_id].evsel = evsel;
ptq->pebs[hw_id].id = sample->id;
+ ptq->pebs[hw_id].data_src_fmt = intel_pt_data_src_fmt(pt, evsel);
return 0;
}
@@ -3924,6 +4122,7 @@ static void intel_pt_setup_pebs_events(struct intel_pt *pt)
}
pt->single_pebs = true;
pt->sample_pebs = true;
+ pt->pebs_data_src_fmt = intel_pt_data_src_fmt(pt, evsel);
pt->pebs_evsel = evsel;
}
}
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 9be2f4479f52..20fd742984e3 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1974,7 +1974,7 @@ static void ip__resolve_ams(struct thread *thread,
* Thus, we have to try consecutively until we find a match
* or else, the symbol is unknown
*/
- thread__find_cpumode_addr_location(thread, ip, &al);
+ thread__find_cpumode_addr_location(thread, ip, /*symbols=*/true, &al);
ams->addr = ip;
ams->al_addr = al.addr;
@@ -2076,7 +2076,7 @@ static int add_callchain_ip(struct thread *thread,
al.sym = NULL;
al.srcline = NULL;
if (!cpumode) {
- thread__find_cpumode_addr_location(thread, ip, &al);
+ thread__find_cpumode_addr_location(thread, ip, symbols, &al);
} else {
if (ip >= PERF_CONTEXT_MAX) {
switch (ip) {
@@ -2104,6 +2104,8 @@ static int add_callchain_ip(struct thread *thread,
}
if (symbols)
thread__find_symbol(thread, *cpumode, ip, &al);
+ else
+ thread__find_map(thread, *cpumode, ip, &al);
}
if (al.sym != NULL) {
diff --git a/tools/perf/util/print-events.c b/tools/perf/util/print-events.c
index 81e0135cddf0..a1c71d9793bd 100644
--- a/tools/perf/util/print-events.c
+++ b/tools/perf/util/print-events.c
@@ -282,6 +282,7 @@ bool is_event_supported(u8 type, u64 config)
ret = evsel__open(evsel, NULL, tmap) >= 0;
}
+ evsel__close(evsel);
evsel__delete(evsel);
}
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index c6f369b5d893..36c1d3090689 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -90,11 +90,23 @@ int filename__read_build_id(const char *filename, struct build_id *bid)
{
FILE *fp;
int ret = -1;
- bool need_swap = false;
+ bool need_swap = false, elf32;
u8 e_ident[EI_NIDENT];
- size_t buf_size;
- void *buf;
int i;
+ union {
+ struct {
+ Elf32_Ehdr ehdr32;
+ Elf32_Phdr *phdr32;
+ };
+ struct {
+ Elf64_Ehdr ehdr64;
+ Elf64_Phdr *phdr64;
+ };
+ } hdrs;
+ void *phdr;
+ size_t phdr_size;
+ void *buf = NULL;
+ size_t buf_size = 0;
fp = fopen(filename, "r");
if (fp == NULL)
@@ -108,117 +120,79 @@ int filename__read_build_id(const char *filename, struct build_id *bid)
goto out;
need_swap = check_need_swap(e_ident[EI_DATA]);
+ elf32 = e_ident[EI_CLASS] == ELFCLASS32;
- /* for simplicity */
- fseek(fp, 0, SEEK_SET);
-
- if (e_ident[EI_CLASS] == ELFCLASS32) {
- Elf32_Ehdr ehdr;
- Elf32_Phdr *phdr;
-
- if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
- goto out;
+ if (fread(elf32 ? (void *)&hdrs.ehdr32 : (void *)&hdrs.ehdr64,
+ elf32 ? sizeof(hdrs.ehdr32) : sizeof(hdrs.ehdr64),
+ 1, fp) != 1)
+ goto out;
- if (need_swap) {
- ehdr.e_phoff = bswap_32(ehdr.e_phoff);
- ehdr.e_phentsize = bswap_16(ehdr.e_phentsize);
- ehdr.e_phnum = bswap_16(ehdr.e_phnum);
+ if (need_swap) {
+ if (elf32) {
+ hdrs.ehdr32.e_phoff = bswap_32(hdrs.ehdr32.e_phoff);
+ hdrs.ehdr32.e_phentsize = bswap_16(hdrs.ehdr32.e_phentsize);
+ hdrs.ehdr32.e_phnum = bswap_16(hdrs.ehdr32.e_phnum);
+ } else {
+ hdrs.ehdr64.e_phoff = bswap_64(hdrs.ehdr64.e_phoff);
+ hdrs.ehdr64.e_phentsize = bswap_16(hdrs.ehdr64.e_phentsize);
+ hdrs.ehdr64.e_phnum = bswap_16(hdrs.ehdr64.e_phnum);
}
+ }
+ phdr_size = elf32 ? hdrs.ehdr32.e_phentsize * hdrs.ehdr32.e_phnum
+ : hdrs.ehdr64.e_phentsize * hdrs.ehdr64.e_phnum;
+ phdr = malloc(phdr_size);
+ if (phdr == NULL)
+ goto out;
- buf_size = ehdr.e_phentsize * ehdr.e_phnum;
- buf = malloc(buf_size);
- if (buf == NULL)
- goto out;
-
- fseek(fp, ehdr.e_phoff, SEEK_SET);
- if (fread(buf, buf_size, 1, fp) != 1)
- goto out_free;
-
- for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
- void *tmp;
- long offset;
-
- if (need_swap) {
- phdr->p_type = bswap_32(phdr->p_type);
- phdr->p_offset = bswap_32(phdr->p_offset);
- phdr->p_filesz = bswap_32(phdr->p_filesz);
- }
-
- if (phdr->p_type != PT_NOTE)
- continue;
-
- buf_size = phdr->p_filesz;
- offset = phdr->p_offset;
- tmp = realloc(buf, buf_size);
- if (tmp == NULL)
- goto out_free;
-
- buf = tmp;
- fseek(fp, offset, SEEK_SET);
- if (fread(buf, buf_size, 1, fp) != 1)
- goto out_free;
+ fseek(fp, elf32 ? hdrs.ehdr32.e_phoff : hdrs.ehdr64.e_phoff, SEEK_SET);
+ if (fread(phdr, phdr_size, 1, fp) != 1)
+ goto out_free;
- ret = read_build_id(buf, buf_size, bid, need_swap);
- if (ret == 0) {
- ret = bid->size;
- break;
- }
- }
- } else {
- Elf64_Ehdr ehdr;
- Elf64_Phdr *phdr;
+ if (elf32)
+ hdrs.phdr32 = phdr;
+ else
+ hdrs.phdr64 = phdr;
- if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
- goto out;
+ for (i = 0; i < elf32 ? hdrs.ehdr32.e_phnum : hdrs.ehdr64.e_phnum; i++) {
+ size_t p_filesz;
if (need_swap) {
- ehdr.e_phoff = bswap_64(ehdr.e_phoff);
- ehdr.e_phentsize = bswap_16(ehdr.e_phentsize);
- ehdr.e_phnum = bswap_16(ehdr.e_phnum);
+ if (elf32) {
+ hdrs.phdr32[i].p_type = bswap_32(hdrs.phdr32[i].p_type);
+ hdrs.phdr32[i].p_offset = bswap_32(hdrs.phdr32[i].p_offset);
+ hdrs.phdr32[i].p_filesz = bswap_32(hdrs.phdr32[i].p_offset);
+ } else {
+ hdrs.phdr64[i].p_type = bswap_32(hdrs.phdr64[i].p_type);
+ hdrs.phdr64[i].p_offset = bswap_64(hdrs.phdr64[i].p_offset);
+ hdrs.phdr64[i].p_filesz = bswap_64(hdrs.phdr64[i].p_filesz);
+ }
}
+ if ((elf32 ? hdrs.phdr32[i].p_type : hdrs.phdr64[i].p_type) != PT_NOTE)
+ continue;
- buf_size = ehdr.e_phentsize * ehdr.e_phnum;
- buf = malloc(buf_size);
- if (buf == NULL)
- goto out;
-
- fseek(fp, ehdr.e_phoff, SEEK_SET);
- if (fread(buf, buf_size, 1, fp) != 1)
- goto out_free;
-
- for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
+ p_filesz = elf32 ? hdrs.phdr32[i].p_filesz : hdrs.phdr64[i].p_filesz;
+ if (p_filesz > buf_size) {
void *tmp;
- long offset;
-
- if (need_swap) {
- phdr->p_type = bswap_32(phdr->p_type);
- phdr->p_offset = bswap_64(phdr->p_offset);
- phdr->p_filesz = bswap_64(phdr->p_filesz);
- }
- if (phdr->p_type != PT_NOTE)
- continue;
-
- buf_size = phdr->p_filesz;
- offset = phdr->p_offset;
+ buf_size = p_filesz;
tmp = realloc(buf, buf_size);
if (tmp == NULL)
goto out_free;
-
buf = tmp;
- fseek(fp, offset, SEEK_SET);
- if (fread(buf, buf_size, 1, fp) != 1)
- goto out_free;
+ }
+ fseek(fp, elf32 ? hdrs.phdr32[i].p_offset : hdrs.phdr64[i].p_offset, SEEK_SET);
+ if (fread(buf, p_filesz, 1, fp) != 1)
+ goto out_free;
- ret = read_build_id(buf, buf_size, bid, need_swap);
- if (ret == 0) {
- ret = bid->size;
- break;
- }
+ ret = read_build_id(buf, p_filesz, bid, need_swap);
+ if (ret == 0) {
+ ret = bid->size;
+ break;
}
}
out_free:
free(buf);
+ free(phdr);
out:
fclose(fp);
return ret;
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 3bbf173ad822..c0ec5ed4f1aa 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1405,6 +1405,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
goto out_err;
}
}
+ map__zput(new_node->map);
free(new_node);
}
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 0ffdd52d86d7..309d573eac9a 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -406,7 +406,7 @@ int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bo
}
void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
- struct addr_location *al)
+ bool symbols, struct addr_location *al)
{
size_t i;
const u8 cpumodes[] = {
@@ -417,7 +417,11 @@ void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
};
for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
- thread__find_symbol(thread, cpumodes[i], addr, al);
+ if (symbols)
+ thread__find_symbol(thread, cpumodes[i], addr, al);
+ else
+ thread__find_map(thread, cpumodes[i], addr, al);
+
if (al->map)
break;
}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 6cbf6eb2812e..1fb32e7d62a4 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -122,7 +122,7 @@ struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
u64 addr, struct addr_location *al);
void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
- struct addr_location *al);
+ bool symbols, struct addr_location *al);
int thread__memcpy(struct thread *thread, struct machine *machine,
void *buf, u64 ip, int len, bool *is64bit);
diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
index 08a399b0be28..6ab9139f16af 100644
--- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
@@ -240,9 +240,9 @@ static int mperf_stop(void)
int cpu;
for (cpu = 0; cpu < cpu_count; cpu++) {
- mperf_measure_stats(cpu);
- mperf_get_tsc(&tsc_at_measure_end[cpu]);
clock_gettime(CLOCK_REALTIME, &time_end[cpu]);
+ mperf_get_tsc(&tsc_at_measure_end[cpu]);
+ mperf_measure_stats(cpu);
}
return 0;
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index a3cf1d17163a..e4b00e13302b 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -199,6 +199,7 @@ The system configuration dump (if --quiet is not used) is followed by statistics
\fBUncMHz\fP per-package uncore MHz, instantaneous sample.
.PP
\fBUMHz1.0\fP per-package uncore MHz for domain=1 and fabric_cluster=0, instantaneous sample. System summary is the average of all packages.
+For the "--show" and "--hide" options, use "UncMHz" to operate on all UMHz*.* as a group.
.SH TOO MUCH INFORMATION EXAMPLE
By default, turbostat dumps all possible information -- a system configuration header, followed by columns for all counters.
This is ideal for remote debugging, use the "--out" option to save everything to a text file, and get that file to the expert helping you debug.
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 77ef60980ee5..b663a76d31f1 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -67,6 +67,7 @@
#include <stdbool.h>
#include <assert.h>
#include <linux/kernel.h>
+#include <limits.h>
#define UNUSED(x) (void)(x)
@@ -4491,6 +4492,38 @@ unsigned long pmt_read_counter(struct pmt_counter *ppmt, unsigned int domain_id)
return (value & value_mask) >> value_shift;
}
+
+/* Rapl domain enumeration helpers */
+static inline int get_rapl_num_domains(void)
+{
+ int num_packages = topo.max_package_id + 1;
+ int num_cores_per_package;
+ int num_cores;
+
+ if (!platform->has_per_core_rapl)
+ return num_packages;
+
+ num_cores_per_package = topo.max_core_id + 1;
+ num_cores = num_cores_per_package * num_packages;
+
+ return num_cores;
+}
+
+static inline int get_rapl_domain_id(int cpu)
+{
+ int nr_cores_per_package = topo.max_core_id + 1;
+ int rapl_core_id;
+
+ if (!platform->has_per_core_rapl)
+ return cpus[cpu].physical_package_id;
+
+ /* Compute the system-wide unique core-id for @cpu */
+ rapl_core_id = cpus[cpu].physical_core_id;
+ rapl_core_id += cpus[cpu].physical_package_id * nr_cores_per_package;
+
+ return rapl_core_id;
+}
+
/*
* get_counters(...)
* migrate to cpu
@@ -4544,7 +4577,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
goto done;
if (platform->has_per_core_rapl) {
- status = get_rapl_counters(cpu, c->core_id, c, p);
+ status = get_rapl_counters(cpu, get_rapl_domain_id(cpu), c, p);
if (status != 0)
return status;
}
@@ -4610,7 +4643,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
p->sys_lpi = cpuidle_cur_sys_lpi_us;
if (!platform->has_per_core_rapl) {
- status = get_rapl_counters(cpu, p->package_id, c, p);
+ status = get_rapl_counters(cpu, get_rapl_domain_id(cpu), c, p);
if (status != 0)
return status;
}
@@ -6213,8 +6246,16 @@ int check_for_cap_sys_rawio(void)
int ret = 0;
caps = cap_get_proc();
- if (caps == NULL)
+ if (caps == NULL) {
+ /*
+ * CONFIG_MULTIUSER=n kernels have no cap_get_proc()
+ * Allow them to continue and attempt to access MSRs
+ */
+ if (errno == ENOSYS)
+ return 0;
+
return 1;
+ }
if (cap_get_flag(caps, CAP_SYS_RAWIO, CAP_EFFECTIVE, &cap_flag_value)) {
ret = 1;
@@ -6377,7 +6418,8 @@ static void probe_intel_uncore_frequency_legacy(void)
sprintf(path_base, "/sys/devices/system/cpu/intel_uncore_frequency/package_%02d_die_%02d", i,
j);
- if (access(path_base, R_OK))
+ sprintf(path, "%s/current_freq_khz", path_base);
+ if (access(path, R_OK))
continue;
BIC_PRESENT(BIC_UNCORE_MHZ);
@@ -6445,7 +6487,18 @@ static void probe_intel_uncore_frequency_cluster(void)
sprintf(path, "%s/current_freq_khz", path_base);
sprintf(name_buf, "UMHz%d.%d", domain_id, cluster_id);
- add_counter(0, path, name_buf, 0, SCOPE_PACKAGE, COUNTER_K2M, FORMAT_AVERAGE, 0, package_id);
+ /*
+ * Once add_couter() is called, that counter is always read
+ * and reported -- So it is effectively (enabled & present).
+ * Only call add_counter() here if legacy BIC_UNCORE_MHZ (UncMHz)
+ * is (enabled). Since we are in this routine, we
+ * know we will not probe and set (present) the legacy counter.
+ *
+ * This allows "--show/--hide UncMHz" to be effective for
+ * the clustered MHz counters, as a group.
+ */
+ if BIC_IS_ENABLED(BIC_UNCORE_MHZ)
+ add_counter(0, path, name_buf, 0, SCOPE_PACKAGE, COUNTER_K2M, FORMAT_AVERAGE, 0, package_id);
if (quiet)
continue;
@@ -7559,7 +7612,7 @@ void linux_perf_init(void)
void rapl_perf_init(void)
{
- const unsigned int num_domains = (platform->has_per_core_rapl ? topo.max_core_id : topo.max_package_id) + 1;
+ const unsigned int num_domains = get_rapl_num_domains();
bool *domain_visited = calloc(num_domains, sizeof(bool));
rapl_counter_info_perdomain = calloc(num_domains, sizeof(*rapl_counter_info_perdomain));
@@ -7600,8 +7653,7 @@ void rapl_perf_init(void)
continue;
/* Skip already seen and handled RAPL domains */
- next_domain =
- platform->has_per_core_rapl ? cpus[cpu].physical_core_id : cpus[cpu].physical_package_id;
+ next_domain = get_rapl_domain_id(cpu);
assert(next_domain < num_domains);
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 0aa4005017c7..5f2afd95de43 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -98,7 +98,9 @@ else ifneq ($(CROSS_COMPILE),)
# Allow userspace to override CLANG_CROSS_FLAGS to specify their own
# sysroots and flags or to avoid the GCC call in pure Clang builds.
ifeq ($(CLANG_CROSS_FLAGS),)
-CLANG_CROSS_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
+CLANG_TARGET := $(notdir $(CROSS_COMPILE:%-=%))
+CLANG_TARGET := $(subst s390-linux,s390x-linux,$(CLANG_TARGET))
+CLANG_CROSS_FLAGS := --target=$(CLANG_TARGET)
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)gcc 2>/dev/null))
ifneq ($(GCC_TOOLCHAIN_DIR),)
CLANG_CROSS_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 7e524601e01a..bad227ee1b5b 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -1371,7 +1371,10 @@ sub __eval_option {
# If a variable contains itself, use the default var
if (($var eq $name) && defined($opt{$var})) {
$o = $opt{$var};
- $retval = "$retval$o";
+ # Only append if the default doesn't contain itself
+ if ($o !~ m/\$\{$var\}/) {
+ $retval = "$retval$o";
+ }
} elsif (defined($opt{$o})) {
$o = $opt{$o};
$retval = "$retval$o";
diff --git a/tools/testing/kunit/qemu_configs/sparc.py b/tools/testing/kunit/qemu_configs/sparc.py
index e975c4331a7c..2019550a1b69 100644
--- a/tools/testing/kunit/qemu_configs/sparc.py
+++ b/tools/testing/kunit/qemu_configs/sparc.py
@@ -2,8 +2,11 @@ from ..qemu_config import QemuArchParams
QEMU_ARCH = QemuArchParams(linux_arch='sparc',
kconfig='''
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y''',
+CONFIG_KUNIT_FAULT_TEST=n
+CONFIG_SPARC32=y
+CONFIG_SERIAL_SUNZILOG=y
+CONFIG_SERIAL_SUNZILOG_CONSOLE=y
+''',
qemu_arch='sparc',
kernel_path='arch/sparc/boot/zImage',
kernel_command_line='console=ttyS0 mem=256M',
diff --git a/tools/testing/kunit/qemu_configs/x86_64.py b/tools/testing/kunit/qemu_configs/x86_64.py
index dc7949076863..4a6bf4e048f5 100644
--- a/tools/testing/kunit/qemu_configs/x86_64.py
+++ b/tools/testing/kunit/qemu_configs/x86_64.py
@@ -7,4 +7,6 @@ CONFIG_SERIAL_8250_CONSOLE=y''',
qemu_arch='x86_64',
kernel_path='arch/x86/boot/bzImage',
kernel_command_line='console=ttyS0',
- extra_qemu_params=[])
+ # qboot is faster than SeaBIOS and doesn't mess up
+ # the terminal.
+ extra_qemu_params=['-bios', 'qboot.rom'])
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 363d031a16f7..85c5f39131d3 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -115,6 +115,7 @@ TARGETS += user_events
TARGETS += vDSO
TARGETS += mm
TARGETS += x86
+TARGETS += x86/bugs
TARGETS += zram
#Please keep the TARGETS list alphabetically sorted
# Run "make quicktest=1 run_tests" or
@@ -195,7 +196,7 @@ export KHDR_INCLUDES
all:
@ret=1; \
- for TARGET in $(TARGETS); do \
+ for TARGET in $(TARGETS) $(INSTALL_DEP_TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
mkdir $$BUILD_TARGET -p; \
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET \
diff --git a/tools/testing/selftests/alsa/utimer-test.c b/tools/testing/selftests/alsa/utimer-test.c
index 32ee3ce57721..37964f311a33 100644
--- a/tools/testing/selftests/alsa/utimer-test.c
+++ b/tools/testing/selftests/alsa/utimer-test.c
@@ -135,6 +135,7 @@ TEST_F(timer_f, utimer) {
pthread_join(ticking_thread, NULL);
ASSERT_EQ(total_ticks, TICKS_COUNT);
pclose(rfp);
+ free(buf);
}
TEST(wrong_timers_test) {
diff --git a/tools/testing/selftests/arm64/fp/sve-ptrace.c b/tools/testing/selftests/arm64/fp/sve-ptrace.c
index 6d61992fe8a0..408fb1c5c2f8 100644
--- a/tools/testing/selftests/arm64/fp/sve-ptrace.c
+++ b/tools/testing/selftests/arm64/fp/sve-ptrace.c
@@ -168,7 +168,7 @@ static void ptrace_set_get_inherit(pid_t child, const struct vec_type *type)
memset(&sve, 0, sizeof(sve));
sve.size = sizeof(sve);
sve.vl = sve_vl_from_vq(SVE_VQ_MIN);
- sve.flags = SVE_PT_VL_INHERIT;
+ sve.flags = SVE_PT_VL_INHERIT | SVE_PT_REGS_SVE;
ret = set_sve(child, type, &sve);
if (ret != 0) {
ksft_test_result_fail("Failed to set %s SVE_PT_VL_INHERIT\n",
@@ -233,6 +233,7 @@ static void ptrace_set_get_vl(pid_t child, const struct vec_type *type,
/* Set the VL by doing a set with no register payload */
memset(&sve, 0, sizeof(sve));
sve.size = sizeof(sve);
+ sve.flags = SVE_PT_REGS_SVE;
sve.vl = vl;
ret = set_sve(child, type, &sve);
if (ret != 0) {
@@ -251,7 +252,7 @@ static void ptrace_set_get_vl(pid_t child, const struct vec_type *type,
return;
}
- ksft_test_result(new_sve->vl = prctl_vl, "Set %s VL %u\n",
+ ksft_test_result(new_sve->vl == prctl_vl, "Set %s VL %u\n",
type->name, vl);
free(new_sve);
diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
index 27784946b01b..af0ee70a53f9 100644
--- a/tools/testing/selftests/bpf/network_helpers.c
+++ b/tools/testing/selftests/bpf/network_helpers.c
@@ -771,12 +771,13 @@ static const char *pkt_type_str(u16 pkt_type)
return "Unknown";
}
+#define MAX_FLAGS_STRLEN 21
/* Show the information of the transport layer in the packet */
static void show_transport(const u_char *packet, u16 len, u32 ifindex,
const char *src_addr, const char *dst_addr,
u16 proto, bool ipv6, u8 pkt_type)
{
- char *ifname, _ifname[IF_NAMESIZE];
+ char *ifname, _ifname[IF_NAMESIZE], flags[MAX_FLAGS_STRLEN] = "";
const char *transport_str;
u16 src_port, dst_port;
struct udphdr *udp;
@@ -817,29 +818,21 @@ static void show_transport(const u_char *packet, u16 len, u32 ifindex,
/* TCP or UDP*/
- flockfile(stdout);
+ if (proto == IPPROTO_TCP)
+ snprintf(flags, MAX_FLAGS_STRLEN, "%s%s%s%s",
+ tcp->fin ? ", FIN" : "",
+ tcp->syn ? ", SYN" : "",
+ tcp->rst ? ", RST" : "",
+ tcp->ack ? ", ACK" : "");
+
if (ipv6)
- printf("%-7s %-3s IPv6 %s.%d > %s.%d: %s, length %d",
+ printf("%-7s %-3s IPv6 %s.%d > %s.%d: %s, length %d%s\n",
ifname, pkt_type_str(pkt_type), src_addr, src_port,
- dst_addr, dst_port, transport_str, len);
+ dst_addr, dst_port, transport_str, len, flags);
else
- printf("%-7s %-3s IPv4 %s:%d > %s:%d: %s, length %d",
+ printf("%-7s %-3s IPv4 %s:%d > %s:%d: %s, length %d%s\n",
ifname, pkt_type_str(pkt_type), src_addr, src_port,
- dst_addr, dst_port, transport_str, len);
-
- if (proto == IPPROTO_TCP) {
- if (tcp->fin)
- printf(", FIN");
- if (tcp->syn)
- printf(", SYN");
- if (tcp->rst)
- printf(", RST");
- if (tcp->ack)
- printf(", ACK");
- }
-
- printf("\n");
- funlockfile(stdout);
+ dst_addr, dst_port, transport_str, len, flags);
}
static void show_ipv6_packet(const u_char *packet, u32 ifindex, u8 pkt_type)
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
index a4a1f93878d4..fad98f01e2c0 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
@@ -63,6 +63,12 @@ static void test_bpf_nf_ct(int mode)
.repeat = 1,
);
+ if (SYS_NOFAIL("iptables-legacy --version")) {
+ fprintf(stdout, "Missing required iptables-legacy tool\n");
+ test__skip();
+ return;
+ }
+
skel = test_bpf_nf__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_bpf_nf__open_and_load"))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
index da430df45aa4..d1e4cb28a72c 100644
--- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c
+++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
@@ -97,7 +97,7 @@ static void ringbuf_write_subtest(void)
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
- skel->maps.ringbuf.max_entries = 0x4000;
+ skel->maps.ringbuf.max_entries = 0x40000;
err = test_ringbuf_write_lskel__load(skel);
if (!ASSERT_OK(err, "skel_load"))
@@ -108,7 +108,7 @@ static void ringbuf_write_subtest(void)
mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
if (!ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos"))
goto cleanup;
- *mmap_ptr = 0x3000;
+ *mmap_ptr = 0x30000;
ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
skel->bss->pid = getpid();
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
index 2d0796314862..0a99fd404f6d 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
@@ -68,7 +68,6 @@ static void test_sockmap_ktls_disconnect_after_delete(int family, int map)
goto close_cli;
err = disconnect(cli);
- ASSERT_OK(err, "disconnect");
close_cli:
close(cli);
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
index 4ee1148d22be..1cfed83156b0 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
@@ -924,6 +924,8 @@ static void redir_partial(int family, int sotype, int sock_map, int parser_map)
goto close;
n = xsend(c1, buf, sizeof(buf), 0);
+ if (n == -1)
+ goto close;
if (n < sizeof(buf))
FAIL("incomplete write");
diff --git a/tools/testing/selftests/bpf/prog_tests/token.c b/tools/testing/selftests/bpf/prog_tests/token.c
index fe86e4fdb89c..c3ab9b6fb069 100644
--- a/tools/testing/selftests/bpf/prog_tests/token.c
+++ b/tools/testing/selftests/bpf/prog_tests/token.c
@@ -828,8 +828,12 @@ static int userns_obj_priv_btf_success(int mnt_fd, struct token_lsm *lsm_skel)
return validate_struct_ops_load(mnt_fd, true /* should succeed */);
}
+static const char *token_bpffs_custom_dir()
+{
+ return getenv("BPF_SELFTESTS_BPF_TOKEN_DIR") ?: "/tmp/bpf-token-fs";
+}
+
#define TOKEN_ENVVAR "LIBBPF_BPF_TOKEN_PATH"
-#define TOKEN_BPFFS_CUSTOM "/bpf-token-fs"
static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel)
{
@@ -892,6 +896,7 @@ static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel
static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *lsm_skel)
{
+ const char *custom_dir = token_bpffs_custom_dir();
LIBBPF_OPTS(bpf_object_open_opts, opts);
struct dummy_st_ops_success *skel;
int err;
@@ -909,10 +914,10 @@ static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *l
* BPF token implicitly, unless pointed to it through
* LIBBPF_BPF_TOKEN_PATH envvar
*/
- rmdir(TOKEN_BPFFS_CUSTOM);
- if (!ASSERT_OK(mkdir(TOKEN_BPFFS_CUSTOM, 0777), "mkdir_bpffs_custom"))
+ rmdir(custom_dir);
+ if (!ASSERT_OK(mkdir(custom_dir, 0777), "mkdir_bpffs_custom"))
goto err_out;
- err = sys_move_mount(mnt_fd, "", AT_FDCWD, TOKEN_BPFFS_CUSTOM, MOVE_MOUNT_F_EMPTY_PATH);
+ err = sys_move_mount(mnt_fd, "", AT_FDCWD, custom_dir, MOVE_MOUNT_F_EMPTY_PATH);
if (!ASSERT_OK(err, "move_mount_bpffs"))
goto err_out;
@@ -925,7 +930,7 @@ static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *l
goto err_out;
}
- err = setenv(TOKEN_ENVVAR, TOKEN_BPFFS_CUSTOM, 1 /*overwrite*/);
+ err = setenv(TOKEN_ENVVAR, custom_dir, 1 /*overwrite*/);
if (!ASSERT_OK(err, "setenv_token_path"))
goto err_out;
@@ -951,11 +956,11 @@ static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *l
if (!ASSERT_ERR(err, "obj_empty_token_path_load"))
goto err_out;
- rmdir(TOKEN_BPFFS_CUSTOM);
+ rmdir(custom_dir);
unsetenv(TOKEN_ENVVAR);
return 0;
err_out:
- rmdir(TOKEN_BPFFS_CUSTOM);
+ rmdir(custom_dir);
unsetenv(TOKEN_ENVVAR);
return -EINVAL;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
index d424e7ecbd12..9fd3ae987321 100644
--- a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
+++ b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
@@ -21,8 +21,7 @@
#include "../progs/test_user_ringbuf.h"
static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ;
-static const long c_ringbuf_size = 1 << 12; /* 1 small page */
-static const long c_max_entries = c_ringbuf_size / c_sample_size;
+static long c_ringbuf_size, c_max_entries;
static void drain_current_samples(void)
{
@@ -424,7 +423,9 @@ static void test_user_ringbuf_loop(void)
uint32_t remaining_samples = total_samples;
int err;
- BUILD_BUG_ON(total_samples <= c_max_entries);
+ if (!ASSERT_LT(c_max_entries, total_samples, "compare_c_max_entries"))
+ return;
+
err = load_skel_create_user_ringbuf(&skel, &ringbuf);
if (err)
return;
@@ -686,6 +687,9 @@ void test_user_ringbuf(void)
{
int i;
+ c_ringbuf_size = getpagesize(); /* 1 page */
+ c_max_entries = c_ringbuf_size / c_sample_size;
+
for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
if (!test__start_subtest(success_tests[i].test_name))
continue;
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c
index 481626a875d1..df27535995af 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c
@@ -2,35 +2,41 @@
#include <uapi/linux/bpf.h>
#include <linux/if_link.h>
#include <test_progs.h>
+#include <network_helpers.h>
#include "test_xdp_with_cpumap_frags_helpers.skel.h"
#include "test_xdp_with_cpumap_helpers.skel.h"
#define IFINDEX_LO 1
+#define TEST_NS "cpu_attach_ns"
static void test_xdp_with_cpumap_helpers(void)
{
- struct test_xdp_with_cpumap_helpers *skel;
+ struct test_xdp_with_cpumap_helpers *skel = NULL;
struct bpf_prog_info info = {};
__u32 len = sizeof(info);
struct bpf_cpumap_val val = {
.qsize = 192,
};
- int err, prog_fd, map_fd;
+ int err, prog_fd, prog_redir_fd, map_fd;
+ struct nstoken *nstoken = NULL;
__u32 idx = 0;
+ SYS(out_close, "ip netns add %s", TEST_NS);
+ nstoken = open_netns(TEST_NS);
+ if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+ goto out_close;
+ SYS(out_close, "ip link set dev lo up");
+
skel = test_xdp_with_cpumap_helpers__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_xdp_with_cpumap_helpers__open_and_load"))
return;
- prog_fd = bpf_program__fd(skel->progs.xdp_redir_prog);
- err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL);
+ prog_redir_fd = bpf_program__fd(skel->progs.xdp_redir_prog);
+ err = bpf_xdp_attach(IFINDEX_LO, prog_redir_fd, XDP_FLAGS_SKB_MODE, NULL);
if (!ASSERT_OK(err, "Generic attach of program with 8-byte CPUMAP"))
goto out_close;
- err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);
- ASSERT_OK(err, "XDP program detach");
-
prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm);
map_fd = bpf_map__fd(skel->maps.cpu_map);
err = bpf_prog_get_info_by_fd(prog_fd, &info, &len);
@@ -45,6 +51,26 @@ static void test_xdp_with_cpumap_helpers(void)
ASSERT_OK(err, "Read cpumap entry");
ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to cpumap entry prog_id");
+ /* send a packet to trigger any potential bugs in there */
+ char data[ETH_HLEN] = {};
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &data,
+ .data_size_in = sizeof(data),
+ .flags = BPF_F_TEST_XDP_LIVE_FRAMES,
+ .repeat = 1,
+ );
+ err = bpf_prog_test_run_opts(prog_redir_fd, &opts);
+ ASSERT_OK(err, "XDP test run");
+
+ /* wait for the packets to be flushed, then check that redirect has been
+ * performed
+ */
+ kern_sync_rcu();
+ ASSERT_NEQ(skel->bss->redirect_count, 0, "redirected packets");
+
+ err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);
+ ASSERT_OK(err, "XDP program detach");
+
/* can not attach BPF_XDP_CPUMAP program to a device */
err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL);
if (!ASSERT_NEQ(err, 0, "Attach of BPF_XDP_CPUMAP program"))
@@ -65,6 +91,8 @@ static void test_xdp_with_cpumap_helpers(void)
ASSERT_NEQ(err, 0, "Add BPF_XDP program with frags to cpumap entry");
out_close:
+ close_netns(nstoken);
+ SYS_NOFAIL("ip netns del %s", TEST_NS);
test_xdp_with_cpumap_helpers__destroy(skel);
}
@@ -111,7 +139,7 @@ out_close:
test_xdp_with_cpumap_frags_helpers__destroy(skel);
}
-void serial_test_xdp_cpumap_attach(void)
+void test_xdp_cpumap_attach(void)
{
if (test__start_subtest("CPUMAP with programs in entries"))
test_xdp_with_cpumap_helpers();
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c
index 27ffed17d4be..461ab18705d5 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c
@@ -23,7 +23,7 @@ static void test_xdp_with_devmap_helpers(void)
__u32 len = sizeof(info);
int err, dm_fd, dm_fd_redir, map_fd;
struct nstoken *nstoken = NULL;
- char data[10] = {};
+ char data[ETH_HLEN] = {};
__u32 idx = 0;
SYS(out_close, "ip netns add %s", TEST_NS);
@@ -58,7 +58,7 @@ static void test_xdp_with_devmap_helpers(void)
/* send a packet to trigger any potential bugs in there */
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &data,
- .data_size_in = 10,
+ .data_size_in = sizeof(data),
.flags = BPF_F_TEST_XDP_LIVE_FRAMES,
.repeat = 1,
);
@@ -158,7 +158,7 @@ static void test_xdp_with_devmap_helpers_veth(void)
struct nstoken *nstoken = NULL;
__u32 len = sizeof(info);
int err, dm_fd, dm_fd_redir, map_fd, ifindex_dst;
- char data[10] = {};
+ char data[ETH_HLEN] = {};
__u32 idx = 0;
SYS(out_close, "ip netns add %s", TEST_NS);
@@ -208,7 +208,7 @@ static void test_xdp_with_devmap_helpers_veth(void)
/* send a packet to trigger any potential bugs in there */
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &data,
- .data_size_in = 10,
+ .data_size_in = sizeof(data),
.flags = BPF_F_TEST_XDP_LIVE_FRAMES,
.repeat = 1,
);
diff --git a/tools/testing/selftests/bpf/progs/test_global_map_resize.c b/tools/testing/selftests/bpf/progs/test_global_map_resize.c
index a3f220ba7025..ee65bad0436d 100644
--- a/tools/testing/selftests/bpf/progs/test_global_map_resize.c
+++ b/tools/testing/selftests/bpf/progs/test_global_map_resize.c
@@ -32,6 +32,16 @@ int my_int_last SEC(".data.array_not_last");
int percpu_arr[1] SEC(".data.percpu_arr");
+/* at least one extern is included, to ensure that a specific
+ * regression is tested whereby resizing resulted in a free-after-use
+ * bug after type information is invalidated by the resize operation.
+ *
+ * There isn't a particularly good API to test for this specific condition,
+ * but by having externs for the resizing tests it will cover this path.
+ */
+extern int LINUX_KERNEL_VERSION __kconfig;
+long version_sink;
+
SEC("tp/syscalls/sys_enter_getpid")
int bss_array_sum(void *ctx)
{
@@ -44,6 +54,9 @@ int bss_array_sum(void *ctx)
for (size_t i = 0; i < bss_array_len; ++i)
sum += array[i];
+ /* see above; ensure this is not optimized out */
+ version_sink = LINUX_KERNEL_VERSION;
+
return 0;
}
@@ -59,6 +72,9 @@ int data_array_sum(void *ctx)
for (size_t i = 0; i < data_array_len; ++i)
sum += my_array[i];
+ /* see above; ensure this is not optimized out */
+ version_sink = LINUX_KERNEL_VERSION;
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_write.c b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c
index 350513c0e4c9..f063a0013f85 100644
--- a/tools/testing/selftests/bpf/progs/test_ringbuf_write.c
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c
@@ -26,11 +26,11 @@ int test_ringbuf_write(void *ctx)
if (cur_pid != pid)
return 0;
- sample1 = bpf_ringbuf_reserve(&ringbuf, 0x3000, 0);
+ sample1 = bpf_ringbuf_reserve(&ringbuf, 0x30000, 0);
if (!sample1)
return 0;
/* first one can pass */
- sample2 = bpf_ringbuf_reserve(&ringbuf, 0x3000, 0);
+ sample2 = bpf_ringbuf_reserve(&ringbuf, 0x30000, 0);
if (!sample2) {
bpf_ringbuf_discard(sample1, 0);
__sync_fetch_and_add(&discarded, 1);
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c
index 20ec6723df18..3619239b01b7 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c
@@ -12,10 +12,12 @@ struct {
__uint(max_entries, 4);
} cpu_map SEC(".maps");
+__u32 redirect_count = 0;
+
SEC("xdp")
int xdp_redir_prog(struct xdp_md *ctx)
{
- return bpf_redirect_map(&cpu_map, 1, 0);
+ return bpf_redirect_map(&cpu_map, 0, 0);
}
SEC("xdp")
@@ -27,6 +29,9 @@ int xdp_dummy_prog(struct xdp_md *ctx)
SEC("xdp/cpumap")
int xdp_dummy_cm(struct xdp_md *ctx)
{
+ if (bpf_get_smp_processor_id() == 0)
+ redirect_count++;
+
if (ctx->ingress_ifindex == IFINDEX_LO)
return XDP_DROP;
diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_large.c b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
index 6065f862d964..758b09a5eb88 100644
--- a/tools/testing/selftests/bpf/progs/verifier_arena_large.c
+++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
@@ -29,12 +29,12 @@ int big_alloc1(void *ctx)
if (!page1)
return 1;
*page1 = 1;
- page2 = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE,
+ page2 = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE * 2,
1, NUMA_NO_NODE, 0);
if (!page2)
return 2;
*page2 = 2;
- no_page = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE,
+ no_page = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE,
1, NUMA_NO_NODE, 0);
if (no_page)
return 3;
@@ -66,4 +66,110 @@ int big_alloc1(void *ctx)
#endif
return 0;
}
+
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+#define PAGE_CNT 100
+__u8 __arena * __arena page[PAGE_CNT]; /* occupies the first page */
+__u8 __arena *base;
+
+/*
+ * Check that arena's range_tree algorithm allocates pages sequentially
+ * on the first pass and then fills in all gaps on the second pass.
+ */
+__noinline int alloc_pages(int page_cnt, int pages_atonce, bool first_pass,
+ int max_idx, int step)
+{
+ __u8 __arena *pg;
+ int i, pg_idx;
+
+ for (i = 0; i < page_cnt; i++) {
+ pg = bpf_arena_alloc_pages(&arena, NULL, pages_atonce,
+ NUMA_NO_NODE, 0);
+ if (!pg)
+ return step;
+ pg_idx = (unsigned long) (pg - base) / PAGE_SIZE;
+ if (first_pass) {
+ /* Pages must be allocated sequentially */
+ if (pg_idx != i)
+ return step + 100;
+ } else {
+ /* Allocator must fill into gaps */
+ if (pg_idx >= max_idx || (pg_idx & 1))
+ return step + 200;
+ }
+ *pg = pg_idx;
+ page[pg_idx] = pg;
+ cond_break;
+ }
+ return 0;
+}
+
+//SEC("syscall")
+//__success __retval(0)
+int big_alloc2(void *ctx)
+{
+ __u8 __arena *pg;
+ int i, err;
+
+ base = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (!base)
+ return 1;
+ bpf_arena_free_pages(&arena, (void __arena *)base, 1);
+
+ err = alloc_pages(PAGE_CNT, 1, true, PAGE_CNT, 2);
+ if (err)
+ return err;
+
+ /* Clear all even pages */
+ for (i = 0; i < PAGE_CNT; i += 2) {
+ pg = page[i];
+ if (*pg != i)
+ return 3;
+ bpf_arena_free_pages(&arena, (void __arena *)pg, 1);
+ page[i] = NULL;
+ cond_break;
+ }
+
+ /* Allocate into freed gaps */
+ err = alloc_pages(PAGE_CNT / 2, 1, false, PAGE_CNT, 4);
+ if (err)
+ return err;
+
+ /* Free pairs of pages */
+ for (i = 0; i < PAGE_CNT; i += 4) {
+ pg = page[i];
+ if (*pg != i)
+ return 5;
+ bpf_arena_free_pages(&arena, (void __arena *)pg, 2);
+ page[i] = NULL;
+ page[i + 1] = NULL;
+ cond_break;
+ }
+
+ /* Allocate 2 pages at a time into freed gaps */
+ err = alloc_pages(PAGE_CNT / 4, 2, false, PAGE_CNT, 6);
+ if (err)
+ return err;
+
+ /* Check pages without freeing */
+ for (i = 0; i < PAGE_CNT; i += 2) {
+ pg = page[i];
+ if (*pg != i)
+ return 7;
+ cond_break;
+ }
+
+ pg = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+
+ if (!pg)
+ return 8;
+ /*
+ * The first PAGE_CNT pages are occupied. The new page
+ * must be above.
+ */
+ if ((pg - base) / PAGE_SIZE < PAGE_CNT)
+ return 9;
+ return 0;
+}
+#endif
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_precision.c b/tools/testing/selftests/bpf/progs/verifier_precision.c
index 6b564d4c0986..051d1962a4c7 100644
--- a/tools/testing/selftests/bpf/progs/verifier_precision.c
+++ b/tools/testing/selftests/bpf/progs/verifier_precision.c
@@ -130,4 +130,57 @@ __naked int state_loop_first_last_equal(void)
);
}
+__used __naked static void __bpf_cond_op_r10(void)
+{
+ asm volatile (
+ "r2 = 2314885393468386424 ll;"
+ "goto +0;"
+ "if r2 <= r10 goto +3;"
+ "if r1 >= -1835016 goto +0;"
+ "if r2 <= 8 goto +0;"
+ "if r3 <= 0 goto +0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("8: (bd) if r2 <= r10 goto pc+3")
+__msg("9: (35) if r1 >= 0xffe3fff8 goto pc+0")
+__msg("10: (b5) if r2 <= 0x8 goto pc+0")
+__msg("mark_precise: frame1: last_idx 10 first_idx 0 subseq_idx -1")
+__msg("mark_precise: frame1: regs=r2 stack= before 9: (35) if r1 >= 0xffe3fff8 goto pc+0")
+__msg("mark_precise: frame1: regs=r2 stack= before 8: (bd) if r2 <= r10 goto pc+3")
+__msg("mark_precise: frame1: regs=r2 stack= before 7: (05) goto pc+0")
+__naked void bpf_cond_op_r10(void)
+{
+ asm volatile (
+ "r3 = 0 ll;"
+ "call __bpf_cond_op_r10;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("3: (bf) r3 = r10")
+__msg("4: (bd) if r3 <= r2 goto pc+1")
+__msg("5: (b5) if r2 <= 0x8 goto pc+2")
+__msg("mark_precise: frame0: last_idx 5 first_idx 0 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r2 stack= before 4: (bd) if r3 <= r2 goto pc+1")
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10")
+__naked void bpf_cond_op_not_r10(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "r2 = 2314885393468386424 ll;"
+ "r3 = r10;"
+ "if r3 <= r2 goto +1;"
+ "if r2 <= 8 goto +2;"
+ "r0 = 2 ll;"
+ "exit;"
+ ::: __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv.c b/tools/testing/selftests/bpf/progs/verifier_unpriv.c
index 7ea535bfbacd..e4ef82a6ee38 100644
--- a/tools/testing/selftests/bpf/progs/verifier_unpriv.c
+++ b/tools/testing/selftests/bpf/progs/verifier_unpriv.c
@@ -619,7 +619,7 @@ __naked void pass_pointer_to_tail_call(void)
SEC("socket")
__description("unpriv: cmp map pointer with zero")
-__success __failure_unpriv __msg_unpriv("R1 pointer comparison")
+__success __success_unpriv
__retval(0)
__naked void cmp_map_pointer_with_zero(void)
{
diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c
index 3e9b009580d4..7f69d7b5bd4d 100644
--- a/tools/testing/selftests/bpf/test_loader.c
+++ b/tools/testing/selftests/bpf/test_loader.c
@@ -970,6 +970,14 @@ void run_subtest(struct test_loader *tester,
emit_verifier_log(tester->log_buf, false /*force*/);
validate_msgs(tester->log_buf, &subspec->expect_msgs, emit_verifier_log);
+ /* Restore capabilities because the kernel will silently ignore requests
+ * for program info (such as xlated program text) if we are not
+ * bpf-capable. Also, for some reason test_verifier executes programs
+ * with all capabilities restored. Do the same here.
+ */
+ if (restore_capabilities(&caps))
+ goto tobj_cleanup;
+
if (subspec->expect_xlated.cnt) {
err = get_xlated_program_text(bpf_program__fd(tprog),
tester->log_buf, tester->log_buf_sz);
@@ -995,12 +1003,6 @@ void run_subtest(struct test_loader *tester,
}
if (should_do_test_run(spec, subspec)) {
- /* For some reason test_verifier executes programs
- * with all capabilities restored. Do the same here.
- */
- if (restore_capabilities(&caps))
- goto tobj_cleanup;
-
/* Do bpf_map__attach_struct_ops() for each struct_ops map.
* This should trigger bpf_struct_ops->reg callback on kernel side.
*/
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
index fda7589c5023..0921939532c6 100644
--- a/tools/testing/selftests/bpf/test_lru_map.c
+++ b/tools/testing/selftests/bpf/test_lru_map.c
@@ -138,6 +138,18 @@ static int sched_next_online(int pid, int *next_to_try)
return ret;
}
+/* Derive target_free from map_size, same as bpf_common_lru_populate */
+static unsigned int __tgt_size(unsigned int map_size)
+{
+ return (map_size / nr_cpus) / 2;
+}
+
+/* Inverse of how bpf_common_lru_populate derives target_free from map_size. */
+static unsigned int __map_size(unsigned int tgt_free)
+{
+ return tgt_free * nr_cpus * 2;
+}
+
/* Size of the LRU map is 2
* Add key=1 (+1 key)
* Add key=2 (+1 key)
@@ -231,11 +243,11 @@ static void test_lru_sanity0(int map_type, int map_flags)
printf("Pass\n");
}
-/* Size of the LRU map is 1.5*tgt_free
- * Insert 1 to tgt_free (+tgt_free keys)
- * Lookup 1 to tgt_free/2
- * Insert 1+tgt_free to 2*tgt_free (+tgt_free keys)
- * => 1+tgt_free/2 to LOCALFREE_TARGET will be removed by LRU
+/* Verify that unreferenced elements are recycled before referenced ones.
+ * Insert elements.
+ * Reference a subset of these.
+ * Insert more, enough to trigger recycling.
+ * Verify that unreferenced are recycled.
*/
static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
{
@@ -257,7 +269,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
batch_size = tgt_free / 2;
assert(batch_size * 2 == tgt_free);
- map_size = tgt_free + batch_size;
+ map_size = __map_size(tgt_free) + batch_size;
lru_map_fd = create_map(map_type, map_flags, map_size);
assert(lru_map_fd != -1);
@@ -266,13 +278,13 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
value[0] = 1234;
- /* Insert 1 to tgt_free (+tgt_free keys) */
- end_key = 1 + tgt_free;
+ /* Insert map_size - batch_size keys */
+ end_key = 1 + __map_size(tgt_free);
for (key = 1; key < end_key; key++)
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
- /* Lookup 1 to tgt_free/2 */
+ /* Lookup 1 to batch_size */
end_key = 1 + batch_size;
for (key = 1; key < end_key; key++) {
assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
@@ -280,12 +292,13 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
BPF_NOEXIST));
}
- /* Insert 1+tgt_free to 2*tgt_free
- * => 1+tgt_free/2 to LOCALFREE_TARGET will be
+ /* Insert another map_size - batch_size keys
+ * Map will contain 1 to batch_size plus these latest, i.e.,
+ * => previous 1+batch_size to map_size - batch_size will have been
* removed by LRU
*/
- key = 1 + tgt_free;
- end_key = key + tgt_free;
+ key = 1 + __map_size(tgt_free);
+ end_key = key + __map_size(tgt_free);
for (; key < end_key; key++) {
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
@@ -301,17 +314,8 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
printf("Pass\n");
}
-/* Size of the LRU map 1.5 * tgt_free
- * Insert 1 to tgt_free (+tgt_free keys)
- * Update 1 to tgt_free/2
- * => The original 1 to tgt_free/2 will be removed due to
- * the LRU shrink process
- * Re-insert 1 to tgt_free/2 again and do a lookup immeidately
- * Insert 1+tgt_free to tgt_free*3/2
- * Insert 1+tgt_free*3/2 to tgt_free*5/2
- * => Key 1+tgt_free to tgt_free*3/2
- * will be removed from LRU because it has never
- * been lookup and ref bit is not set
+/* Verify that insertions exceeding map size will recycle the oldest.
+ * Verify that unreferenced elements are recycled before referenced.
*/
static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
{
@@ -334,7 +338,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
batch_size = tgt_free / 2;
assert(batch_size * 2 == tgt_free);
- map_size = tgt_free + batch_size;
+ map_size = __map_size(tgt_free) + batch_size;
lru_map_fd = create_map(map_type, map_flags, map_size);
assert(lru_map_fd != -1);
@@ -343,8 +347,8 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
value[0] = 1234;
- /* Insert 1 to tgt_free (+tgt_free keys) */
- end_key = 1 + tgt_free;
+ /* Insert map_size - batch_size keys */
+ end_key = 1 + __map_size(tgt_free);
for (key = 1; key < end_key; key++)
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
@@ -357,8 +361,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
* shrink the inactive list to get tgt_free
* number of free nodes.
*
- * Hence, the oldest key 1 to tgt_free/2
- * are removed from the LRU list.
+ * Hence, the oldest key is removed from the LRU list.
*/
key = 1;
if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
@@ -370,8 +373,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
BPF_EXIST));
}
- /* Re-insert 1 to tgt_free/2 again and do a lookup
- * immeidately.
+ /* Re-insert 1 to batch_size again and do a lookup immediately.
*/
end_key = 1 + batch_size;
value[0] = 4321;
@@ -387,17 +389,18 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
value[0] = 1234;
- /* Insert 1+tgt_free to tgt_free*3/2 */
- end_key = 1 + tgt_free + batch_size;
- for (key = 1 + tgt_free; key < end_key; key++)
+ /* Insert batch_size new elements */
+ key = 1 + __map_size(tgt_free);
+ end_key = key + batch_size;
+ for (; key < end_key; key++)
/* These newly added but not referenced keys will be
* gone during the next LRU shrink.
*/
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
- /* Insert 1+tgt_free*3/2 to tgt_free*5/2 */
- end_key = key + tgt_free;
+ /* Insert map_size - batch_size elements */
+ end_key += __map_size(tgt_free);
for (; key < end_key; key++) {
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
@@ -413,12 +416,12 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
printf("Pass\n");
}
-/* Size of the LRU map is 2*tgt_free
- * It is to test the active/inactive list rotation
- * Insert 1 to 2*tgt_free (+2*tgt_free keys)
- * Lookup key 1 to tgt_free*3/2
- * Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys)
- * => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU
+/* Test the active/inactive list rotation
+ *
+ * Fill the whole map, deplete the free list.
+ * Reference all except the last lru->target_free elements.
+ * Insert lru->target_free new elements. This triggers one shrink.
+ * Verify that the non-referenced elements are replaced.
*/
static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
{
@@ -437,8 +440,7 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
assert(sched_next_online(0, &next_cpu) != -1);
- batch_size = tgt_free / 2;
- assert(batch_size * 2 == tgt_free);
+ batch_size = __tgt_size(tgt_free);
map_size = tgt_free * 2;
lru_map_fd = create_map(map_type, map_flags, map_size);
@@ -449,23 +451,21 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
value[0] = 1234;
- /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */
- end_key = 1 + (2 * tgt_free);
+ /* Fill the map */
+ end_key = 1 + map_size;
for (key = 1; key < end_key; key++)
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
- /* Lookup key 1 to tgt_free*3/2 */
- end_key = tgt_free + batch_size;
+ /* Reference all but the last batch_size */
+ end_key = 1 + map_size - batch_size;
for (key = 1; key < end_key; key++) {
assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
}
- /* Add 1+2*tgt_free to tgt_free*5/2
- * (+tgt_free/2 keys)
- */
+ /* Insert new batch_size: replaces the non-referenced elements */
key = 2 * tgt_free + 1;
end_key = key + batch_size;
for (; key < end_key; key++) {
@@ -500,7 +500,8 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
lru_map_fd = create_map(map_type, map_flags,
3 * tgt_free * nr_cpus);
else
- lru_map_fd = create_map(map_type, map_flags, 3 * tgt_free);
+ lru_map_fd = create_map(map_type, map_flags,
+ 3 * __map_size(tgt_free));
assert(lru_map_fd != -1);
expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0,
diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c
index 1ec5c4c47235..7b6b9c4cadb5 100644
--- a/tools/testing/selftests/bpf/veristat.c
+++ b/tools/testing/selftests/bpf/veristat.c
@@ -309,6 +309,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
fprintf(stderr, "invalid top N specifier: %s\n", arg);
argp_usage(state);
}
+ break;
case 'C':
env.comparison_mode = true;
break;
diff --git a/tools/testing/selftests/breakpoints/step_after_suspend_test.c b/tools/testing/selftests/breakpoints/step_after_suspend_test.c
index 8d275f03e977..8d233ac95696 100644
--- a/tools/testing/selftests/breakpoints/step_after_suspend_test.c
+++ b/tools/testing/selftests/breakpoints/step_after_suspend_test.c
@@ -127,22 +127,42 @@ int run_test(int cpu)
return KSFT_PASS;
}
+/*
+ * Reads the suspend success count from sysfs.
+ * Returns the count on success or exits on failure.
+ */
+static int get_suspend_success_count_or_fail(void)
+{
+ FILE *fp;
+ int val;
+
+ fp = fopen("/sys/power/suspend_stats/success", "r");
+ if (!fp)
+ ksft_exit_fail_msg(
+ "Failed to open suspend_stats/success: %s\n",
+ strerror(errno));
+
+ if (fscanf(fp, "%d", &val) != 1) {
+ fclose(fp);
+ ksft_exit_fail_msg(
+ "Failed to read suspend success count\n");
+ }
+
+ fclose(fp);
+ return val;
+}
+
void suspend(void)
{
- int power_state_fd;
int timerfd;
int err;
+ int count_before;
+ int count_after;
struct itimerspec spec = {};
if (getuid() != 0)
ksft_exit_skip("Please run the test as root - Exiting.\n");
- power_state_fd = open("/sys/power/state", O_RDWR);
- if (power_state_fd < 0)
- ksft_exit_fail_msg(
- "open(\"/sys/power/state\") failed %s)\n",
- strerror(errno));
-
timerfd = timerfd_create(CLOCK_BOOTTIME_ALARM, 0);
if (timerfd < 0)
ksft_exit_fail_msg("timerfd_create() failed\n");
@@ -152,14 +172,15 @@ void suspend(void)
if (err < 0)
ksft_exit_fail_msg("timerfd_settime() failed\n");
+ count_before = get_suspend_success_count_or_fail();
+
system("(echo mem > /sys/power/state) 2> /dev/null");
- timerfd_gettime(timerfd, &spec);
- if (spec.it_value.tv_sec != 0 || spec.it_value.tv_nsec != 0)
+ count_after = get_suspend_success_count_or_fail();
+ if (count_after <= count_before)
ksft_exit_fail_msg("Failed to enter Suspend state\n");
close(timerfd);
- close(power_state_fd);
}
int main(int argc, char **argv)
diff --git a/tools/testing/selftests/cpufreq/cpufreq.sh b/tools/testing/selftests/cpufreq/cpufreq.sh
index e350c521b467..3aad9db921b5 100755
--- a/tools/testing/selftests/cpufreq/cpufreq.sh
+++ b/tools/testing/selftests/cpufreq/cpufreq.sh
@@ -244,9 +244,10 @@ do_suspend()
printf "Failed to suspend using RTC wake alarm\n"
return 1
fi
+ else
+ echo $filename > $SYSFS/power/state
fi
- echo $filename > $SYSFS/power/state
printf "Came out of $1\n"
printf "Do basic tests after finishing $1 to verify cpufreq state\n\n"
diff --git a/tools/testing/selftests/drivers/net/lib/py/env.py b/tools/testing/selftests/drivers/net/lib/py/env.py
index 1ea9bb695e94..3f35faca6559 100644
--- a/tools/testing/selftests/drivers/net/lib/py/env.py
+++ b/tools/testing/selftests/drivers/net/lib/py/env.py
@@ -224,7 +224,7 @@ class NetDrvEpEnv:
if not self._require_cmd(comm, "local"):
raise KsftSkipEx("Test requires command: " + comm)
if remote:
- if not self._require_cmd(comm, "remote"):
+ if not self._require_cmd(comm, "remote", host=self.remote):
raise KsftSkipEx("Test requires (remote) command: " + comm)
def wait_hw_stats_settle(self):
diff --git a/tools/testing/selftests/drivers/net/lib/py/load.py b/tools/testing/selftests/drivers/net/lib/py/load.py
index d9c10613ae67..44151b7b1a24 100644
--- a/tools/testing/selftests/drivers/net/lib/py/load.py
+++ b/tools/testing/selftests/drivers/net/lib/py/load.py
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+import re
import time
from lib.py import ksft_pr, cmd, ip, rand_port, wait_port_listen
@@ -10,12 +11,11 @@ class GenerateTraffic:
self.env = env
- if port is None:
- port = rand_port()
- self._iperf_server = cmd(f"iperf3 -s -1 -p {port}", background=True)
- wait_port_listen(port)
+ self.port = rand_port() if port is None else port
+ self._iperf_server = cmd(f"iperf3 -s -1 -p {self.port}", background=True)
+ wait_port_listen(self.port)
time.sleep(0.1)
- self._iperf_client = cmd(f"iperf3 -c {env.addr} -P 16 -p {port} -t 86400",
+ self._iperf_client = cmd(f"iperf3 -c {env.addr} -P 16 -p {self.port} -t 86400",
background=True, host=env.remote)
# Wait for traffic to ramp up
@@ -56,3 +56,16 @@ class GenerateTraffic:
ksft_pr(">> Server:")
ksft_pr(self._iperf_server.stdout)
ksft_pr(self._iperf_server.stderr)
+ self._wait_client_stopped()
+
+ def _wait_client_stopped(self, sleep=0.005, timeout=5):
+ end = time.monotonic() + timeout
+
+ live_port_pattern = re.compile(fr":{self.port:04X} 0[^6] ")
+
+ while time.monotonic() < end:
+ data = cmd("cat /proc/net/tcp*", host=self.env.remote).stdout
+ if not live_port_pattern.search(data):
+ return
+ time.sleep(sleep)
+ raise Exception(f"Waiting for client to stop timed out after {timeout}s")
diff --git a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
index b7c8f29c09a9..65916bb55dfb 100644
--- a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
+++ b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
@@ -14,11 +14,35 @@ fail() { #msg
exit_fail
}
+# As reading trace can last forever, simply look for 3 different
+# events then exit out of reading the file. If there's not 3 different
+# events, then the test has failed.
+check_unique() {
+ cat trace | grep -v '^#' | awk '
+ BEGIN { cnt = 0; }
+ {
+ for (i = 0; i < cnt; i++) {
+ if (event[i] == $5) {
+ break;
+ }
+ }
+ if (i == cnt) {
+ event[cnt++] = $5;
+ if (cnt > 2) {
+ exit;
+ }
+ }
+ }
+ END {
+ printf "%d", cnt;
+ }'
+}
+
echo 'sched:*' > set_event
yield
-count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+count=`check_unique`
if [ $count -lt 3 ]; then
fail "at least fork, exec and exit events should be recorded"
fi
@@ -29,7 +53,7 @@ echo 1 > events/sched/enable
yield
-count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+count=`check_unique`
if [ $count -lt 3 ]; then
fail "at least fork, exec and exit events should be recorded"
fi
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
index 4b994b6df5ac..ed81eaf2afd6 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
@@ -29,7 +29,7 @@ ftrace_filter_check 'schedule*' '^schedule.*$'
ftrace_filter_check '*pin*lock' '.*pin.*lock$'
# filter by start*mid*
-ftrace_filter_check 'mutex*try*' '^mutex.*try.*'
+ftrace_filter_check 'mutex*unl*' '^mutex.*unl.*'
# Advanced full-glob matching feature is recently supported.
# Skip the tests if we are sure the kernel does not support it.
diff --git a/tools/testing/selftests/futex/include/futextest.h b/tools/testing/selftests/futex/include/futextest.h
index ddbcfc9b7bac..7a5fd1d5355e 100644
--- a/tools/testing/selftests/futex/include/futextest.h
+++ b/tools/testing/selftests/futex/include/futextest.h
@@ -47,6 +47,17 @@ typedef volatile u_int32_t futex_t;
FUTEX_PRIVATE_FLAG)
#endif
+/*
+ * SYS_futex is expected from system C library, in glibc some 32-bit
+ * architectures (e.g. RV32) are using 64-bit time_t, therefore it doesn't have
+ * SYS_futex defined but just SYS_futex_time64. Define SYS_futex as
+ * SYS_futex_time64 in this situation to ensure the compilation and the
+ * compatibility.
+ */
+#if !defined(SYS_futex) && defined(SYS_futex_time64)
+#define SYS_futex SYS_futex_time64
+#endif
+
/**
* futex() - SYS_futex syscall wrapper
* @uaddr: address of first futex
diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c
index 4927b9add5ad..a81c22d52007 100644
--- a/tools/testing/selftests/iommu/iommufd.c
+++ b/tools/testing/selftests/iommu/iommufd.c
@@ -289,6 +289,10 @@ TEST_F(iommufd_ioas, alloc_hwpt_nested)
&test_hwpt_id);
test_err_hwpt_alloc(EINVAL, self->device_id, self->device_id, 0,
&test_hwpt_id);
+ test_err_hwpt_alloc(EOPNOTSUPP, self->device_id, self->ioas_id,
+ IOMMU_HWPT_ALLOC_NEST_PARENT |
+ IOMMU_HWPT_FAULT_ID_VALID,
+ &test_hwpt_id);
test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
IOMMU_HWPT_ALLOC_NEST_PARENT,
@@ -1744,6 +1748,7 @@ FIXTURE_VARIANT(iommufd_dirty_tracking)
FIXTURE_SETUP(iommufd_dirty_tracking)
{
+ size_t mmap_buffer_size;
unsigned long size;
int mmap_flags;
void *vrc;
@@ -1758,22 +1763,33 @@ FIXTURE_SETUP(iommufd_dirty_tracking)
self->fd = open("/dev/iommu", O_RDWR);
ASSERT_NE(-1, self->fd);
- rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, variant->buffer_size);
- if (rc || !self->buffer) {
- SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
- variant->buffer_size, rc);
- }
-
mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED;
+ mmap_buffer_size = variant->buffer_size;
if (variant->hugepages) {
/*
* MAP_POPULATE will cause the kernel to fail mmap if THPs are
* not available.
*/
mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
+
+ /*
+ * Allocation must be aligned to the HUGEPAGE_SIZE, because the
+ * following mmap() will automatically align the length to be a
+ * multiple of the underlying huge page size. Failing to do the
+ * same at this allocation will result in a memory overwrite by
+ * the mmap().
+ */
+ if (mmap_buffer_size < HUGEPAGE_SIZE)
+ mmap_buffer_size = HUGEPAGE_SIZE;
+ }
+
+ rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, mmap_buffer_size);
+ if (rc || !self->buffer) {
+ SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
+ mmap_buffer_size, rc);
}
assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
- vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE,
+ vrc = mmap(self->buffer, mmap_buffer_size, PROT_READ | PROT_WRITE,
mmap_flags, -1, 0);
assert(vrc == self->buffer);
@@ -1802,8 +1818,8 @@ FIXTURE_SETUP(iommufd_dirty_tracking)
FIXTURE_TEARDOWN(iommufd_dirty_tracking)
{
- munmap(self->buffer, variant->buffer_size);
- munmap(self->bitmap, DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE));
+ free(self->buffer);
+ free(self->bitmap);
teardown_iommufd(self->fd, _metadata);
}
diff --git a/tools/testing/selftests/mincore/mincore_selftest.c b/tools/testing/selftests/mincore/mincore_selftest.c
index e949a43a6145..efabfcbe0b49 100644
--- a/tools/testing/selftests/mincore/mincore_selftest.c
+++ b/tools/testing/selftests/mincore/mincore_selftest.c
@@ -261,9 +261,6 @@ TEST(check_file_mmap)
TH_LOG("No read-ahead pages found in memory");
}
- EXPECT_LT(i, vec_size) {
- TH_LOG("Read-ahead pages reached the end of the file");
- }
/*
* End of the readahead window. The rest of the pages shouldn't
* be in memory.
diff --git a/tools/testing/selftests/mm/compaction_test.c b/tools/testing/selftests/mm/compaction_test.c
index 2c3a0eb6b22d..9bc4591c7b16 100644
--- a/tools/testing/selftests/mm/compaction_test.c
+++ b/tools/testing/selftests/mm/compaction_test.c
@@ -90,6 +90,8 @@ int check_compaction(unsigned long mem_free, unsigned long hugepage_size,
int compaction_index = 0;
char nr_hugepages[20] = {0};
char init_nr_hugepages[24] = {0};
+ char target_nr_hugepages[24] = {0};
+ int slen;
snprintf(init_nr_hugepages, sizeof(init_nr_hugepages),
"%lu", initial_nr_hugepages);
@@ -106,11 +108,18 @@ int check_compaction(unsigned long mem_free, unsigned long hugepage_size,
goto out;
}
- /* Request a large number of huge pages. The Kernel will allocate
- as much as it can */
- if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) {
- ksft_print_msg("Failed to write 100000 to /proc/sys/vm/nr_hugepages: %s\n",
- strerror(errno));
+ /*
+ * Request huge pages for about half of the free memory. The Kernel
+ * will allocate as much as it can, and we expect it will get at least 1/3
+ */
+ nr_hugepages_ul = mem_free / hugepage_size / 2;
+ snprintf(target_nr_hugepages, sizeof(target_nr_hugepages),
+ "%lu", nr_hugepages_ul);
+
+ slen = strlen(target_nr_hugepages);
+ if (write(fd, target_nr_hugepages, slen) != slen) {
+ ksft_print_msg("Failed to write %lu to /proc/sys/vm/nr_hugepages: %s\n",
+ nr_hugepages_ul, strerror(errno));
goto close_fd;
}
diff --git a/tools/testing/selftests/mm/pkey-powerpc.h b/tools/testing/selftests/mm/pkey-powerpc.h
index 3d0c0bdae5bc..e90af82f8688 100644
--- a/tools/testing/selftests/mm/pkey-powerpc.h
+++ b/tools/testing/selftests/mm/pkey-powerpc.h
@@ -102,8 +102,18 @@ void expect_fault_on_read_execonly_key(void *p1, int pkey)
return;
}
+#define REPEAT_8(s) s s s s s s s s
+#define REPEAT_64(s) REPEAT_8(s) REPEAT_8(s) REPEAT_8(s) REPEAT_8(s) \
+ REPEAT_8(s) REPEAT_8(s) REPEAT_8(s) REPEAT_8(s)
+#define REPEAT_512(s) REPEAT_64(s) REPEAT_64(s) REPEAT_64(s) REPEAT_64(s) \
+ REPEAT_64(s) REPEAT_64(s) REPEAT_64(s) REPEAT_64(s)
+#define REPEAT_4096(s) REPEAT_512(s) REPEAT_512(s) REPEAT_512(s) REPEAT_512(s) \
+ REPEAT_512(s) REPEAT_512(s) REPEAT_512(s) REPEAT_512(s)
+#define REPEAT_16384(s) REPEAT_4096(s) REPEAT_4096(s) \
+ REPEAT_4096(s) REPEAT_4096(s)
+
/* 4-byte instructions * 16384 = 64K page */
-#define __page_o_noops() asm(".rept 16384 ; nop; .endr")
+#define __page_o_noops() asm(REPEAT_16384("nop\n"))
void *malloc_pkey_with_mprotect_subpage(long size, int prot, u16 pkey)
{
diff --git a/tools/testing/selftests/net/forwarding/bridge_mdb.sh b/tools/testing/selftests/net/forwarding/bridge_mdb.sh
index d9d587454d20..8c1597ebc2d3 100755
--- a/tools/testing/selftests/net/forwarding/bridge_mdb.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_mdb.sh
@@ -149,7 +149,7 @@ cfg_test_host_common()
check_err $? "Failed to add $name host entry"
bridge mdb replace dev br0 port br0 grp $grp $state vid 10 &> /dev/null
- check_fail $? "Managed to replace $name host entry"
+ check_err $? "Failed to replace $name host entry"
bridge mdb del dev br0 port br0 grp $grp $state vid 10
bridge mdb get dev br0 grp $grp vid 10 &> /dev/null
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index c992e385159c..195360082d94 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -48,7 +48,6 @@ declare -A NETIFS=(
: "${WAIT_TIME:=5}"
# Whether to pause on, respectively, after a failure and before cleanup.
-: "${PAUSE_ON_FAIL:=no}"
: "${PAUSE_ON_CLEANUP:=no}"
# Whether to create virtual interfaces, and what netdevice type they should be.
@@ -446,22 +445,6 @@ done
##############################################################################
# Helpers
-# Exit status to return at the end. Set in case one of the tests fails.
-EXIT_STATUS=0
-# Per-test return value. Clear at the beginning of each test.
-RET=0
-
-ret_set_ksft_status()
-{
- local ksft_status=$1; shift
- local msg=$1; shift
-
- RET=$(ksft_status_merge $RET $ksft_status)
- if (( $? )); then
- retmsg=$msg
- fi
-}
-
# Whether FAILs should be interpreted as XFAILs. Internal.
FAIL_TO_XFAIL=
@@ -535,102 +518,6 @@ xfail_on_veth()
fi
}
-log_test_result()
-{
- local test_name=$1; shift
- local opt_str=$1; shift
- local result=$1; shift
- local retmsg=$1; shift
-
- printf "TEST: %-60s [%s]\n" "$test_name $opt_str" "$result"
- if [[ $retmsg ]]; then
- printf "\t%s\n" "$retmsg"
- fi
-}
-
-pause_on_fail()
-{
- if [[ $PAUSE_ON_FAIL == yes ]]; then
- echo "Hit enter to continue, 'q' to quit"
- read a
- [[ $a == q ]] && exit 1
- fi
-}
-
-handle_test_result_pass()
-{
- local test_name=$1; shift
- local opt_str=$1; shift
-
- log_test_result "$test_name" "$opt_str" " OK "
-}
-
-handle_test_result_fail()
-{
- local test_name=$1; shift
- local opt_str=$1; shift
-
- log_test_result "$test_name" "$opt_str" FAIL "$retmsg"
- pause_on_fail
-}
-
-handle_test_result_xfail()
-{
- local test_name=$1; shift
- local opt_str=$1; shift
-
- log_test_result "$test_name" "$opt_str" XFAIL "$retmsg"
- pause_on_fail
-}
-
-handle_test_result_skip()
-{
- local test_name=$1; shift
- local opt_str=$1; shift
-
- log_test_result "$test_name" "$opt_str" SKIP "$retmsg"
-}
-
-log_test()
-{
- local test_name=$1
- local opt_str=$2
-
- if [[ $# -eq 2 ]]; then
- opt_str="($opt_str)"
- fi
-
- if ((RET == ksft_pass)); then
- handle_test_result_pass "$test_name" "$opt_str"
- elif ((RET == ksft_xfail)); then
- handle_test_result_xfail "$test_name" "$opt_str"
- elif ((RET == ksft_skip)); then
- handle_test_result_skip "$test_name" "$opt_str"
- else
- handle_test_result_fail "$test_name" "$opt_str"
- fi
-
- EXIT_STATUS=$(ksft_exit_status_merge $EXIT_STATUS $RET)
- return $RET
-}
-
-log_test_skip()
-{
- RET=$ksft_skip retmsg= log_test "$@"
-}
-
-log_test_xfail()
-{
- RET=$ksft_xfail retmsg= log_test "$@"
-}
-
-log_info()
-{
- local msg=$1
-
- echo "INFO: $msg"
-}
-
not()
{
"$@"
diff --git a/tools/testing/selftests/net/gro.sh b/tools/testing/selftests/net/gro.sh
index 02c21ff4ca81..aabd6e5480b8 100755
--- a/tools/testing/selftests/net/gro.sh
+++ b/tools/testing/selftests/net/gro.sh
@@ -100,5 +100,6 @@ trap cleanup EXIT
if [[ "${test}" == "all" ]]; then
run_all_tests
else
- run_test "${proto}" "${test}"
+ exit_code=$(run_test "${proto}" "${test}")
+ exit $exit_code
fi;
diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh
index be8707bfb46e..bb4d2f8d50d6 100644
--- a/tools/testing/selftests/net/lib.sh
+++ b/tools/testing/selftests/net/lib.sh
@@ -6,6 +6,9 @@
: "${WAIT_TIMEOUT:=20}"
+# Whether to pause on after a failure.
+: "${PAUSE_ON_FAIL:=no}"
+
BUSYWAIT_TIMEOUT=$((WAIT_TIMEOUT * 1000)) # ms
# Kselftest framework constants.
@@ -17,6 +20,11 @@ ksft_skip=4
# namespace list created by setup_ns
NS_LIST=()
+# Exit status to return at the end. Set in case one of the tests fails.
+EXIT_STATUS=0
+# Per-test return value. Clear at the beginning of each test.
+RET=0
+
##############################################################################
# Helpers
@@ -233,3 +241,110 @@ tc_rule_handle_stats_get()
| jq ".[] | select(.options.handle == $handle) | \
.options.actions[0].stats$selector"
}
+
+ret_set_ksft_status()
+{
+ local ksft_status=$1; shift
+ local msg=$1; shift
+
+ RET=$(ksft_status_merge $RET $ksft_status)
+ if (( $? )); then
+ retmsg=$msg
+ fi
+}
+
+log_test_result()
+{
+ local test_name=$1; shift
+ local opt_str=$1; shift
+ local result=$1; shift
+ local retmsg=$1
+
+ printf "TEST: %-60s [%s]\n" "$test_name $opt_str" "$result"
+ if [[ $retmsg ]]; then
+ printf "\t%s\n" "$retmsg"
+ fi
+}
+
+pause_on_fail()
+{
+ if [[ $PAUSE_ON_FAIL == yes ]]; then
+ echo "Hit enter to continue, 'q' to quit"
+ read a
+ [[ $a == q ]] && exit 1
+ fi
+}
+
+handle_test_result_pass()
+{
+ local test_name=$1; shift
+ local opt_str=$1; shift
+
+ log_test_result "$test_name" "$opt_str" " OK "
+}
+
+handle_test_result_fail()
+{
+ local test_name=$1; shift
+ local opt_str=$1; shift
+
+ log_test_result "$test_name" "$opt_str" FAIL "$retmsg"
+ pause_on_fail
+}
+
+handle_test_result_xfail()
+{
+ local test_name=$1; shift
+ local opt_str=$1; shift
+
+ log_test_result "$test_name" "$opt_str" XFAIL "$retmsg"
+ pause_on_fail
+}
+
+handle_test_result_skip()
+{
+ local test_name=$1; shift
+ local opt_str=$1; shift
+
+ log_test_result "$test_name" "$opt_str" SKIP "$retmsg"
+}
+
+log_test()
+{
+ local test_name=$1
+ local opt_str=$2
+
+ if [[ $# -eq 2 ]]; then
+ opt_str="($opt_str)"
+ fi
+
+ if ((RET == ksft_pass)); then
+ handle_test_result_pass "$test_name" "$opt_str"
+ elif ((RET == ksft_xfail)); then
+ handle_test_result_xfail "$test_name" "$opt_str"
+ elif ((RET == ksft_skip)); then
+ handle_test_result_skip "$test_name" "$opt_str"
+ else
+ handle_test_result_fail "$test_name" "$opt_str"
+ fi
+
+ EXIT_STATUS=$(ksft_exit_status_merge $EXIT_STATUS $RET)
+ return $RET
+}
+
+log_test_skip()
+{
+ RET=$ksft_skip retmsg= log_test "$@"
+}
+
+log_test_xfail()
+{
+ RET=$ksft_xfail retmsg= log_test "$@"
+}
+
+log_info()
+{
+ local msg=$1
+
+ echo "INFO: $msg"
+}
diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile
index 580610c46e5a..4524085784df 100644
--- a/tools/testing/selftests/net/mptcp/Makefile
+++ b/tools/testing/selftests/net/mptcp/Makefile
@@ -4,7 +4,8 @@ top_srcdir = ../../../../..
CFLAGS += -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES)
-TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \
+TEST_PROGS := mptcp_connect.sh mptcp_connect_mmap.sh mptcp_connect_sendfile.sh \
+ mptcp_connect_checksum.sh pm_netlink.sh mptcp_join.sh diag.sh \
simult_flows.sh mptcp_sockopt.sh userspace_pm.sh
TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt mptcp_inq
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh b/tools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh
new file mode 100644
index 000000000000..ce93ec2f107f
--- /dev/null
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+MPTCP_LIB_KSFT_TEST="$(basename "${0}" .sh)" \
+ "$(dirname "${0}")/mptcp_connect.sh" -C "${@}"
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh b/tools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh
new file mode 100644
index 000000000000..5dd30f9394af
--- /dev/null
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+MPTCP_LIB_KSFT_TEST="$(basename "${0}" .sh)" \
+ "$(dirname "${0}")/mptcp_connect.sh" -m mmap "${@}"
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh b/tools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh
new file mode 100644
index 000000000000..1d16fb1cc9bb
--- /dev/null
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+MPTCP_LIB_KSFT_TEST="$(basename "${0}" .sh)" \
+ "$(dirname "${0}")/mptcp_connect.sh" -m sendfile "${@}"
diff --git a/tools/testing/selftests/net/ncdevmem.c b/tools/testing/selftests/net/ncdevmem.c
index 64d6805381c5..8617e6d7698d 100644
--- a/tools/testing/selftests/net/ncdevmem.c
+++ b/tools/testing/selftests/net/ncdevmem.c
@@ -62,7 +62,7 @@
*/
static char *server_ip = "192.168.1.4";
-static char *client_ip = "192.168.1.2";
+static char *client_ip;
static char *port = "5201";
static size_t do_validation;
static int start_queue = 8;
@@ -71,24 +71,107 @@ static char *ifname = "eth1";
static unsigned int ifindex;
static unsigned int dmabuf_id;
-void print_bytes(void *ptr, size_t size)
+struct memory_buffer {
+ int fd;
+ size_t size;
+
+ int devfd;
+ int memfd;
+ char *buf_mem;
+};
+
+struct memory_provider {
+ struct memory_buffer *(*alloc)(size_t size);
+ void (*free)(struct memory_buffer *ctx);
+ void (*memcpy_from_device)(void *dst, struct memory_buffer *src,
+ size_t off, int n);
+};
+
+static struct memory_buffer *udmabuf_alloc(size_t size)
{
- unsigned char *p = ptr;
- int i;
+ struct udmabuf_create create;
+ struct memory_buffer *ctx;
+ int ret;
- for (i = 0; i < size; i++)
- printf("%02hhX ", p[i]);
- printf("\n");
+ ctx = malloc(sizeof(*ctx));
+ if (!ctx)
+ error(1, ENOMEM, "malloc failed");
+
+ ctx->size = size;
+
+ ctx->devfd = open("/dev/udmabuf", O_RDWR);
+ if (ctx->devfd < 0)
+ error(1, errno,
+ "%s: [skip,no-udmabuf: Unable to access DMA buffer device file]\n",
+ TEST_PREFIX);
+
+ ctx->memfd = memfd_create("udmabuf-test", MFD_ALLOW_SEALING);
+ if (ctx->memfd < 0)
+ error(1, errno, "%s: [skip,no-memfd]\n", TEST_PREFIX);
+
+ ret = fcntl(ctx->memfd, F_ADD_SEALS, F_SEAL_SHRINK);
+ if (ret < 0)
+ error(1, errno, "%s: [skip,fcntl-add-seals]\n", TEST_PREFIX);
+
+ ret = ftruncate(ctx->memfd, size);
+ if (ret == -1)
+ error(1, errno, "%s: [FAIL,memfd-truncate]\n", TEST_PREFIX);
+
+ memset(&create, 0, sizeof(create));
+
+ create.memfd = ctx->memfd;
+ create.offset = 0;
+ create.size = size;
+ ctx->fd = ioctl(ctx->devfd, UDMABUF_CREATE, &create);
+ if (ctx->fd < 0)
+ error(1, errno, "%s: [FAIL, create udmabuf]\n", TEST_PREFIX);
+
+ ctx->buf_mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ ctx->fd, 0);
+ if (ctx->buf_mem == MAP_FAILED)
+ error(1, errno, "%s: [FAIL, map udmabuf]\n", TEST_PREFIX);
+
+ return ctx;
+}
+
+static void udmabuf_free(struct memory_buffer *ctx)
+{
+ munmap(ctx->buf_mem, ctx->size);
+ close(ctx->fd);
+ close(ctx->memfd);
+ close(ctx->devfd);
+ free(ctx);
}
-void print_nonzero_bytes(void *ptr, size_t size)
+static void udmabuf_memcpy_from_device(void *dst, struct memory_buffer *src,
+ size_t off, int n)
+{
+ struct dma_buf_sync sync = {};
+
+ sync.flags = DMA_BUF_SYNC_START;
+ ioctl(src->fd, DMA_BUF_IOCTL_SYNC, &sync);
+
+ memcpy(dst, src->buf_mem + off, n);
+
+ sync.flags = DMA_BUF_SYNC_END;
+ ioctl(src->fd, DMA_BUF_IOCTL_SYNC, &sync);
+}
+
+static struct memory_provider udmabuf_memory_provider = {
+ .alloc = udmabuf_alloc,
+ .free = udmabuf_free,
+ .memcpy_from_device = udmabuf_memcpy_from_device,
+};
+
+static struct memory_provider *provider = &udmabuf_memory_provider;
+
+static void print_nonzero_bytes(void *ptr, size_t size)
{
unsigned char *p = ptr;
unsigned int i;
for (i = 0; i < size; i++)
putchar(p[i]);
- printf("\n");
}
void validate_buffer(void *line, size_t size)
@@ -120,7 +203,7 @@ void validate_buffer(void *line, size_t size)
char command[256]; \
memset(command, 0, sizeof(command)); \
snprintf(command, sizeof(command), cmd, ##__VA_ARGS__); \
- printf("Running: %s\n", command); \
+ fprintf(stderr, "Running: %s\n", command); \
system(command); \
})
@@ -128,22 +211,22 @@ static int reset_flow_steering(void)
{
int ret = 0;
- ret = run_command("sudo ethtool -K %s ntuple off", ifname);
+ ret = run_command("sudo ethtool -K %s ntuple off >&2", ifname);
if (ret)
return ret;
- return run_command("sudo ethtool -K %s ntuple on", ifname);
+ return run_command("sudo ethtool -K %s ntuple on >&2", ifname);
}
static int configure_headersplit(bool on)
{
- return run_command("sudo ethtool -G %s tcp-data-split %s", ifname,
+ return run_command("sudo ethtool -G %s tcp-data-split %s >&2", ifname,
on ? "on" : "off");
}
static int configure_rss(void)
{
- return run_command("sudo ethtool -X %s equal %d", ifname, start_queue);
+ return run_command("sudo ethtool -X %s equal %d >&2", ifname, start_queue);
}
static int configure_channels(unsigned int rx, unsigned int tx)
@@ -151,10 +234,29 @@ static int configure_channels(unsigned int rx, unsigned int tx)
return run_command("sudo ethtool -L %s rx %u tx %u", ifname, rx, tx);
}
-static int configure_flow_steering(void)
+static int configure_flow_steering(struct sockaddr_in6 *server_sin)
{
- return run_command("sudo ethtool -N %s flow-type tcp4 src-ip %s dst-ip %s src-port %s dst-port %s queue %d",
- ifname, client_ip, server_ip, port, port, start_queue);
+ const char *type = "tcp6";
+ const char *server_addr;
+ char buf[40];
+
+ inet_ntop(AF_INET6, &server_sin->sin6_addr, buf, sizeof(buf));
+ server_addr = buf;
+
+ if (IN6_IS_ADDR_V4MAPPED(&server_sin->sin6_addr)) {
+ type = "tcp4";
+ server_addr = strrchr(server_addr, ':') + 1;
+ }
+
+ return run_command("sudo ethtool -N %s flow-type %s %s %s dst-ip %s %s %s dst-port %s queue %d >&2",
+ ifname,
+ type,
+ client_ip ? "src-ip" : "",
+ client_ip ?: "",
+ server_addr,
+ client_ip ? "src-port" : "",
+ client_ip ? port : "",
+ port, start_queue);
}
static int bind_rx_queue(unsigned int ifindex, unsigned int dmabuf_fd,
@@ -187,7 +289,7 @@ static int bind_rx_queue(unsigned int ifindex, unsigned int dmabuf_fd,
goto err_close;
}
- printf("got dmabuf id=%d\n", rsp->id);
+ fprintf(stderr, "got dmabuf id=%d\n", rsp->id);
dmabuf_id = rsp->id;
netdev_bind_rx_req_free(req);
@@ -202,66 +304,82 @@ err_close:
return -1;
}
-static void create_udmabuf(int *devfd, int *memfd, int *buf, size_t dmabuf_size)
+static void enable_reuseaddr(int fd)
{
- struct udmabuf_create create;
+ int opt = 1;
int ret;
- *devfd = open("/dev/udmabuf", O_RDWR);
- if (*devfd < 0) {
- error(70, 0,
- "%s: [skip,no-udmabuf: Unable to access DMA buffer device file]\n",
- TEST_PREFIX);
- }
+ ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt));
+ if (ret)
+ error(1, errno, "%s: [FAIL, SO_REUSEPORT]\n", TEST_PREFIX);
- *memfd = memfd_create("udmabuf-test", MFD_ALLOW_SEALING);
- if (*memfd < 0)
- error(70, 0, "%s: [skip,no-memfd]\n", TEST_PREFIX);
+ ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt));
+ if (ret)
+ error(1, errno, "%s: [FAIL, SO_REUSEADDR]\n", TEST_PREFIX);
+}
- /* Required for udmabuf */
- ret = fcntl(*memfd, F_ADD_SEALS, F_SEAL_SHRINK);
- if (ret < 0)
- error(73, 0, "%s: [skip,fcntl-add-seals]\n", TEST_PREFIX);
+static int parse_address(const char *str, int port, struct sockaddr_in6 *sin6)
+{
+ int ret;
- ret = ftruncate(*memfd, dmabuf_size);
- if (ret == -1)
- error(74, 0, "%s: [FAIL,memfd-truncate]\n", TEST_PREFIX);
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_port = htons(port);
+
+ ret = inet_pton(sin6->sin6_family, str, &sin6->sin6_addr);
+ if (ret != 1) {
+ /* fallback to plain IPv4 */
+ ret = inet_pton(AF_INET, str, &sin6->sin6_addr.s6_addr32[3]);
+ if (ret != 1)
+ return -1;
+
+ /* add ::ffff prefix */
+ sin6->sin6_addr.s6_addr32[0] = 0;
+ sin6->sin6_addr.s6_addr32[1] = 0;
+ sin6->sin6_addr.s6_addr16[4] = 0;
+ sin6->sin6_addr.s6_addr16[5] = 0xffff;
+ }
- memset(&create, 0, sizeof(create));
+ return 0;
+}
- create.memfd = *memfd;
- create.offset = 0;
- create.size = dmabuf_size;
- *buf = ioctl(*devfd, UDMABUF_CREATE, &create);
- if (*buf < 0)
- error(75, 0, "%s: [FAIL, create udmabuf]\n", TEST_PREFIX);
+static struct netdev_queue_id *create_queues(void)
+{
+ struct netdev_queue_id *queues;
+ size_t i = 0;
+
+ queues = calloc(num_queues, sizeof(*queues));
+ for (i = 0; i < num_queues; i++) {
+ queues[i]._present.type = 1;
+ queues[i]._present.id = 1;
+ queues[i].type = NETDEV_QUEUE_TYPE_RX;
+ queues[i].id = start_queue + i;
+ }
+
+ return queues;
}
-int do_server(void)
+int do_server(struct memory_buffer *mem)
{
char ctrl_data[sizeof(int) * 20000];
struct netdev_queue_id *queues;
size_t non_page_aligned_frags = 0;
- struct sockaddr_in client_addr;
- struct sockaddr_in server_sin;
+ struct sockaddr_in6 client_addr;
+ struct sockaddr_in6 server_sin;
size_t page_aligned_frags = 0;
- int devfd, memfd, buf, ret;
size_t total_received = 0;
socklen_t client_addr_len;
bool is_devmem = false;
- char *buf_mem = NULL;
+ char *tmp_mem = NULL;
struct ynl_sock *ys;
- size_t dmabuf_size;
char iobuf[819200];
char buffer[256];
int socket_fd;
int client_fd;
- size_t i = 0;
- int opt = 1;
-
- dmabuf_size = getpagesize() * NUM_PAGES;
+ int ret;
- create_udmabuf(&devfd, &memfd, &buf, dmabuf_size);
+ ret = parse_address(server_ip, atoi(port), &server_sin);
+ if (ret < 0)
+ error(1, 0, "parse server address");
if (reset_flow_steering())
error(1, 0, "Failed to reset flow steering\n");
@@ -271,92 +389,65 @@ int do_server(void)
error(1, 0, "Failed to configure rss\n");
/* Flow steer our devmem flows to start_queue */
- if (configure_flow_steering())
+ if (configure_flow_steering(&server_sin))
error(1, 0, "Failed to configure flow steering\n");
sleep(1);
- queues = malloc(sizeof(*queues) * num_queues);
-
- for (i = 0; i < num_queues; i++) {
- queues[i]._present.type = 1;
- queues[i]._present.id = 1;
- queues[i].type = NETDEV_QUEUE_TYPE_RX;
- queues[i].id = start_queue + i;
- }
-
- if (bind_rx_queue(ifindex, buf, queues, num_queues, &ys))
+ if (bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys))
error(1, 0, "Failed to bind\n");
- buf_mem = mmap(NULL, dmabuf_size, PROT_READ | PROT_WRITE, MAP_SHARED,
- buf, 0);
- if (buf_mem == MAP_FAILED)
- error(1, 0, "mmap()");
-
- server_sin.sin_family = AF_INET;
- server_sin.sin_port = htons(atoi(port));
+ tmp_mem = malloc(mem->size);
+ if (!tmp_mem)
+ error(1, ENOMEM, "malloc failed");
- ret = inet_pton(server_sin.sin_family, server_ip, &server_sin.sin_addr);
- if (socket < 0)
- error(79, 0, "%s: [FAIL, create socket]\n", TEST_PREFIX);
+ socket_fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (socket_fd < 0)
+ error(1, errno, "%s: [FAIL, create socket]\n", TEST_PREFIX);
- socket_fd = socket(server_sin.sin_family, SOCK_STREAM, 0);
- if (socket < 0)
- error(errno, errno, "%s: [FAIL, create socket]\n", TEST_PREFIX);
+ enable_reuseaddr(socket_fd);
- ret = setsockopt(socket_fd, SOL_SOCKET, SO_REUSEPORT, &opt,
- sizeof(opt));
- if (ret)
- error(errno, errno, "%s: [FAIL, set sock opt]\n", TEST_PREFIX);
-
- ret = setsockopt(socket_fd, SOL_SOCKET, SO_REUSEADDR, &opt,
- sizeof(opt));
- if (ret)
- error(errno, errno, "%s: [FAIL, set sock opt]\n", TEST_PREFIX);
-
- printf("binding to address %s:%d\n", server_ip,
- ntohs(server_sin.sin_port));
+ fprintf(stderr, "binding to address %s:%d\n", server_ip,
+ ntohs(server_sin.sin6_port));
ret = bind(socket_fd, &server_sin, sizeof(server_sin));
if (ret)
- error(errno, errno, "%s: [FAIL, bind]\n", TEST_PREFIX);
+ error(1, errno, "%s: [FAIL, bind]\n", TEST_PREFIX);
ret = listen(socket_fd, 1);
if (ret)
- error(errno, errno, "%s: [FAIL, listen]\n", TEST_PREFIX);
+ error(1, errno, "%s: [FAIL, listen]\n", TEST_PREFIX);
client_addr_len = sizeof(client_addr);
- inet_ntop(server_sin.sin_family, &server_sin.sin_addr, buffer,
+ inet_ntop(AF_INET6, &server_sin.sin6_addr, buffer,
sizeof(buffer));
- printf("Waiting or connection on %s:%d\n", buffer,
- ntohs(server_sin.sin_port));
+ fprintf(stderr, "Waiting or connection on %s:%d\n", buffer,
+ ntohs(server_sin.sin6_port));
client_fd = accept(socket_fd, &client_addr, &client_addr_len);
- inet_ntop(client_addr.sin_family, &client_addr.sin_addr, buffer,
+ inet_ntop(AF_INET6, &client_addr.sin6_addr, buffer,
sizeof(buffer));
- printf("Got connection from %s:%d\n", buffer,
- ntohs(client_addr.sin_port));
+ fprintf(stderr, "Got connection from %s:%d\n", buffer,
+ ntohs(client_addr.sin6_port));
while (1) {
struct iovec iov = { .iov_base = iobuf,
.iov_len = sizeof(iobuf) };
struct dmabuf_cmsg *dmabuf_cmsg = NULL;
- struct dma_buf_sync sync = { 0 };
struct cmsghdr *cm = NULL;
struct msghdr msg = { 0 };
struct dmabuf_token token;
ssize_t ret;
is_devmem = false;
- printf("\n\n");
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_control = ctrl_data;
msg.msg_controllen = sizeof(ctrl_data);
ret = recvmsg(client_fd, &msg, MSG_SOCK_DEVMEM);
- printf("recvmsg ret=%ld\n", ret);
+ fprintf(stderr, "recvmsg ret=%ld\n", ret);
if (ret < 0 && (errno == EAGAIN || errno == EWOULDBLOCK))
continue;
if (ret < 0) {
@@ -364,16 +455,15 @@ int do_server(void)
continue;
}
if (ret == 0) {
- printf("client exited\n");
+ fprintf(stderr, "client exited\n");
goto cleanup;
}
- i++;
for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) {
if (cm->cmsg_level != SOL_SOCKET ||
(cm->cmsg_type != SCM_DEVMEM_DMABUF &&
cm->cmsg_type != SCM_DEVMEM_LINEAR)) {
- fprintf(stdout, "skipping non-devmem cmsg\n");
+ fprintf(stderr, "skipping non-devmem cmsg\n");
continue;
}
@@ -384,7 +474,7 @@ int do_server(void)
/* TODO: process data copied from skb's linear
* buffer.
*/
- fprintf(stdout,
+ fprintf(stderr,
"SCM_DEVMEM_LINEAR. dmabuf_cmsg->frag_size=%u\n",
dmabuf_cmsg->frag_size);
@@ -395,12 +485,13 @@ int do_server(void)
token.token_count = 1;
total_received += dmabuf_cmsg->frag_size;
- printf("received frag_page=%llu, in_page_offset=%llu, frag_offset=%llu, frag_size=%u, token=%u, total_received=%lu, dmabuf_id=%u\n",
- dmabuf_cmsg->frag_offset >> PAGE_SHIFT,
- dmabuf_cmsg->frag_offset % getpagesize(),
- dmabuf_cmsg->frag_offset, dmabuf_cmsg->frag_size,
- dmabuf_cmsg->frag_token, total_received,
- dmabuf_cmsg->dmabuf_id);
+ fprintf(stderr,
+ "received frag_page=%llu, in_page_offset=%llu, frag_offset=%llu, frag_size=%u, token=%u, total_received=%lu, dmabuf_id=%u\n",
+ dmabuf_cmsg->frag_offset >> PAGE_SHIFT,
+ dmabuf_cmsg->frag_offset % getpagesize(),
+ dmabuf_cmsg->frag_offset,
+ dmabuf_cmsg->frag_size, dmabuf_cmsg->frag_token,
+ total_received, dmabuf_cmsg->dmabuf_id);
if (dmabuf_cmsg->dmabuf_id != dmabuf_id)
error(1, 0,
@@ -411,22 +502,16 @@ int do_server(void)
else
page_aligned_frags++;
- sync.flags = DMA_BUF_SYNC_READ | DMA_BUF_SYNC_START;
- ioctl(buf, DMA_BUF_IOCTL_SYNC, &sync);
+ provider->memcpy_from_device(tmp_mem, mem,
+ dmabuf_cmsg->frag_offset,
+ dmabuf_cmsg->frag_size);
if (do_validation)
- validate_buffer(
- ((unsigned char *)buf_mem) +
- dmabuf_cmsg->frag_offset,
- dmabuf_cmsg->frag_size);
+ validate_buffer(tmp_mem,
+ dmabuf_cmsg->frag_size);
else
- print_nonzero_bytes(
- ((unsigned char *)buf_mem) +
- dmabuf_cmsg->frag_offset,
- dmabuf_cmsg->frag_size);
-
- sync.flags = DMA_BUF_SYNC_READ | DMA_BUF_SYNC_END;
- ioctl(buf, DMA_BUF_IOCTL_SYNC, &sync);
+ print_nonzero_bytes(tmp_mem,
+ dmabuf_cmsg->frag_size);
ret = setsockopt(client_fd, SOL_SOCKET,
SO_DEVMEM_DONTNEED, &token,
@@ -438,25 +523,22 @@ int do_server(void)
if (!is_devmem)
error(1, 0, "flow steering error\n");
- printf("total_received=%lu\n", total_received);
+ fprintf(stderr, "total_received=%lu\n", total_received);
}
- fprintf(stdout, "%s: ok\n", TEST_PREFIX);
+ fprintf(stderr, "%s: ok\n", TEST_PREFIX);
- fprintf(stdout, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n",
+ fprintf(stderr, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n",
page_aligned_frags, non_page_aligned_frags);
- fprintf(stdout, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n",
+ fprintf(stderr, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n",
page_aligned_frags, non_page_aligned_frags);
cleanup:
- munmap(buf_mem, dmabuf_size);
+ free(tmp_mem);
close(client_fd);
close(socket_fd);
- close(buf);
- close(memfd);
- close(devfd);
ynl_sock_destroy(ys);
return 0;
@@ -464,52 +546,33 @@ cleanup:
void run_devmem_tests(void)
{
- struct netdev_queue_id *queues;
- int devfd, memfd, buf;
+ struct memory_buffer *mem;
struct ynl_sock *ys;
- size_t dmabuf_size;
- size_t i = 0;
- dmabuf_size = getpagesize() * NUM_PAGES;
-
- create_udmabuf(&devfd, &memfd, &buf, dmabuf_size);
+ mem = provider->alloc(getpagesize() * NUM_PAGES);
/* Configure RSS to divert all traffic from our devmem queues */
if (configure_rss())
error(1, 0, "rss error\n");
- queues = calloc(num_queues, sizeof(*queues));
-
if (configure_headersplit(1))
error(1, 0, "Failed to configure header split\n");
- if (!bind_rx_queue(ifindex, buf, queues, num_queues, &ys))
+ if (!bind_rx_queue(ifindex, mem->fd,
+ calloc(num_queues, sizeof(struct netdev_queue_id)),
+ num_queues, &ys))
error(1, 0, "Binding empty queues array should have failed\n");
- for (i = 0; i < num_queues; i++) {
- queues[i]._present.type = 1;
- queues[i]._present.id = 1;
- queues[i].type = NETDEV_QUEUE_TYPE_RX;
- queues[i].id = start_queue + i;
- }
-
if (configure_headersplit(0))
error(1, 0, "Failed to configure header split\n");
- if (!bind_rx_queue(ifindex, buf, queues, num_queues, &ys))
+ if (!bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys))
error(1, 0, "Configure dmabuf with header split off should have failed\n");
if (configure_headersplit(1))
error(1, 0, "Failed to configure header split\n");
- for (i = 0; i < num_queues; i++) {
- queues[i]._present.type = 1;
- queues[i]._present.id = 1;
- queues[i].type = NETDEV_QUEUE_TYPE_RX;
- queues[i].id = start_queue + i;
- }
-
- if (bind_rx_queue(ifindex, buf, queues, num_queues, &ys))
+ if (bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys))
error(1, 0, "Failed to bind\n");
/* Deactivating a bound queue should not be legal */
@@ -518,11 +581,15 @@ void run_devmem_tests(void)
/* Closing the netlink socket does an implicit unbind */
ynl_sock_destroy(ys);
+
+ provider->free(mem);
}
int main(int argc, char *argv[])
{
+ struct memory_buffer *mem;
int is_server = 0, opt;
+ int ret;
while ((opt = getopt(argc, argv, "ls:c:p:v:q:t:f:")) != -1) {
switch (opt) {
@@ -551,7 +618,7 @@ int main(int argc, char *argv[])
ifname = optarg;
break;
case '?':
- printf("unknown option: %c\n", optopt);
+ fprintf(stderr, "unknown option: %c\n", optopt);
break;
}
}
@@ -559,12 +626,13 @@ int main(int argc, char *argv[])
ifindex = if_nametoindex(ifname);
for (; optind < argc; optind++)
- printf("extra arguments: %s\n", argv[optind]);
+ fprintf(stderr, "extra arguments: %s\n", argv[optind]);
run_devmem_tests();
- if (is_server)
- return do_server();
+ mem = provider->alloc(getpagesize() * NUM_PAGES);
+ ret = is_server ? do_server(mem) : 1;
+ provider->free(mem);
- return 0;
+ return ret;
}
diff --git a/tools/testing/selftests/net/netfilter/config b/tools/testing/selftests/net/netfilter/config
index 43d8b500d391..8cc6036f97dc 100644
--- a/tools/testing/selftests/net/netfilter/config
+++ b/tools/testing/selftests/net/netfilter/config
@@ -91,4 +91,4 @@ CONFIG_XFRM_STATISTICS=y
CONFIG_NET_PKTGEN=m
CONFIG_TUN=m
CONFIG_INET_DIAG=m
-CONFIG_SCTP_DIAG=m
+CONFIG_INET_SCTP_DIAG=m
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 87dce3efe31e..8a92432177d3 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -738,6 +738,11 @@ kci_test_ipsec_offload()
sysfsf=$sysfsd/ipsec
sysfsnet=/sys/bus/netdevsim/devices/netdevsim0/net/
probed=false
+ esp4_offload_probed_default=false
+
+ if lsmod | grep -q esp4_offload; then
+ esp4_offload_probed_default=true
+ fi
if ! mount | grep -q debugfs; then
mount -t debugfs none /sys/kernel/debug/ &> /dev/null
@@ -831,6 +836,7 @@ EOF
fi
# clean up any leftovers
+ ! "$esp4_offload_probed_default" && lsmod | grep -q esp4_offload && rmmod esp4_offload
echo 0 > /sys/bus/netdevsim/del_device
$probed && rmmod netdevsim
diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh
index d5ffd8c9172e..799dbc2b4b01 100755
--- a/tools/testing/selftests/net/udpgro.sh
+++ b/tools/testing/selftests/net/udpgro.sh
@@ -48,7 +48,7 @@ run_one() {
cfg_veth
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} &
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 100 ${rx_args} &
local PID1=$!
wait_local_port_listen ${PEER_NS} 8000 udp
@@ -95,7 +95,7 @@ run_one_nat() {
# will land on the 'plain' one
ip netns exec "${PEER_NS}" ./udpgso_bench_rx -G ${family} -b ${addr1} -n 0 &
local PID1=$!
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${family} -b ${addr2%/*} ${rx_args} &
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 100 ${family} -b ${addr2%/*} ${rx_args} &
local PID2=$!
wait_local_port_listen "${PEER_NS}" 8000 udp
@@ -117,9 +117,9 @@ run_one_2sock() {
cfg_veth
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} -p 12345 &
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 100 ${rx_args} -p 12345 &
local PID1=$!
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 10 ${rx_args} &
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 100 ${rx_args} &
local PID2=$!
wait_local_port_listen "${PEER_NS}" 12345 udp
diff --git a/tools/testing/selftests/perf_events/.gitignore b/tools/testing/selftests/perf_events/.gitignore
index ee93dc4969b8..4931b3b6bbd3 100644
--- a/tools/testing/selftests/perf_events/.gitignore
+++ b/tools/testing/selftests/perf_events/.gitignore
@@ -2,3 +2,4 @@
sigtrap_threads
remove_on_exec
watermark_signal
+mmap
diff --git a/tools/testing/selftests/perf_events/Makefile b/tools/testing/selftests/perf_events/Makefile
index 70e3ff211278..2e5d85770dfe 100644
--- a/tools/testing/selftests/perf_events/Makefile
+++ b/tools/testing/selftests/perf_events/Makefile
@@ -2,5 +2,5 @@
CFLAGS += -Wl,-no-as-needed -Wall $(KHDR_INCLUDES)
LDFLAGS += -lpthread
-TEST_GEN_PROGS := sigtrap_threads remove_on_exec watermark_signal
+TEST_GEN_PROGS := sigtrap_threads remove_on_exec watermark_signal mmap
include ../lib.mk
diff --git a/tools/testing/selftests/perf_events/mmap.c b/tools/testing/selftests/perf_events/mmap.c
new file mode 100644
index 000000000000..ea0427aac1f9
--- /dev/null
+++ b/tools/testing/selftests/perf_events/mmap.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define _GNU_SOURCE
+
+#include <dirent.h>
+#include <sched.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+
+#include <linux/perf_event.h>
+
+#include "../kselftest_harness.h"
+
+#define RB_SIZE 0x3000
+#define AUX_SIZE 0x10000
+#define AUX_OFFS 0x4000
+
+#define HOLE_SIZE 0x1000
+
+/* Reserve space for rb, aux with space for shrink-beyond-vma testing. */
+#define REGION_SIZE (2 * RB_SIZE + 2 * AUX_SIZE)
+#define REGION_AUX_OFFS (2 * RB_SIZE)
+
+#define MAP_BASE 1
+#define MAP_AUX 2
+
+#define EVENT_SRC_DIR "/sys/bus/event_source/devices"
+
+FIXTURE(perf_mmap)
+{
+ int fd;
+ void *ptr;
+ void *region;
+};
+
+FIXTURE_VARIANT(perf_mmap)
+{
+ bool aux;
+ unsigned long ptr_size;
+};
+
+FIXTURE_VARIANT_ADD(perf_mmap, rb)
+{
+ .aux = false,
+ .ptr_size = RB_SIZE,
+};
+
+FIXTURE_VARIANT_ADD(perf_mmap, aux)
+{
+ .aux = true,
+ .ptr_size = AUX_SIZE,
+};
+
+static bool read_event_type(struct dirent *dent, __u32 *type)
+{
+ char typefn[512];
+ FILE *fp;
+ int res;
+
+ snprintf(typefn, sizeof(typefn), "%s/%s/type", EVENT_SRC_DIR, dent->d_name);
+ fp = fopen(typefn, "r");
+ if (!fp)
+ return false;
+
+ res = fscanf(fp, "%u", type);
+ fclose(fp);
+ return res > 0;
+}
+
+FIXTURE_SETUP(perf_mmap)
+{
+ struct perf_event_attr attr = {
+ .size = sizeof(attr),
+ .disabled = 1,
+ .exclude_kernel = 1,
+ .exclude_hv = 1,
+ };
+ struct perf_event_attr attr_ok = {};
+ unsigned int eacces = 0, map = 0;
+ struct perf_event_mmap_page *rb;
+ struct dirent *dent;
+ void *aux, *region;
+ DIR *dir;
+
+ self->ptr = NULL;
+
+ dir = opendir(EVENT_SRC_DIR);
+ if (!dir)
+ SKIP(return, "perf not available.");
+
+ region = mmap(NULL, REGION_SIZE, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0);
+ ASSERT_NE(region, MAP_FAILED);
+ self->region = region;
+
+ // Try to find a suitable event on this system
+ while ((dent = readdir(dir))) {
+ int fd;
+
+ if (!read_event_type(dent, &attr.type))
+ continue;
+
+ fd = syscall(SYS_perf_event_open, &attr, 0, -1, -1, 0);
+ if (fd < 0) {
+ if (errno == EACCES)
+ eacces++;
+ continue;
+ }
+
+ // Check whether the event supports mmap()
+ rb = mmap(region, RB_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, 0);
+ if (rb == MAP_FAILED) {
+ close(fd);
+ continue;
+ }
+
+ if (!map) {
+ // Save the event in case that no AUX capable event is found
+ attr_ok = attr;
+ map = MAP_BASE;
+ }
+
+ if (!variant->aux)
+ continue;
+
+ rb->aux_offset = AUX_OFFS;
+ rb->aux_size = AUX_SIZE;
+
+ // Check whether it supports a AUX buffer
+ aux = mmap(region + REGION_AUX_OFFS, AUX_SIZE, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_FIXED, fd, AUX_OFFS);
+ if (aux == MAP_FAILED) {
+ munmap(rb, RB_SIZE);
+ close(fd);
+ continue;
+ }
+
+ attr_ok = attr;
+ map = MAP_AUX;
+ munmap(aux, AUX_SIZE);
+ munmap(rb, RB_SIZE);
+ close(fd);
+ break;
+ }
+ closedir(dir);
+
+ if (!map) {
+ if (!eacces)
+ SKIP(return, "No mappable perf event found.");
+ else
+ SKIP(return, "No permissions for perf_event_open()");
+ }
+
+ self->fd = syscall(SYS_perf_event_open, &attr_ok, 0, -1, -1, 0);
+ ASSERT_NE(self->fd, -1);
+
+ rb = mmap(region, RB_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, self->fd, 0);
+ ASSERT_NE(rb, MAP_FAILED);
+
+ if (!variant->aux) {
+ self->ptr = rb;
+ return;
+ }
+
+ if (map != MAP_AUX)
+ SKIP(return, "No AUX event found.");
+
+ rb->aux_offset = AUX_OFFS;
+ rb->aux_size = AUX_SIZE;
+ aux = mmap(region + REGION_AUX_OFFS, AUX_SIZE, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_FIXED, self->fd, AUX_OFFS);
+ ASSERT_NE(aux, MAP_FAILED);
+ self->ptr = aux;
+}
+
+FIXTURE_TEARDOWN(perf_mmap)
+{
+ ASSERT_EQ(munmap(self->region, REGION_SIZE), 0);
+ if (self->fd != -1)
+ ASSERT_EQ(close(self->fd), 0);
+}
+
+TEST_F(perf_mmap, remap)
+{
+ void *tmp, *ptr = self->ptr;
+ unsigned long size = variant->ptr_size;
+
+ // Test the invalid remaps
+ ASSERT_EQ(mremap(ptr, size, HOLE_SIZE, MREMAP_MAYMOVE), MAP_FAILED);
+ ASSERT_EQ(mremap(ptr + HOLE_SIZE, size, HOLE_SIZE, MREMAP_MAYMOVE), MAP_FAILED);
+ ASSERT_EQ(mremap(ptr + size - HOLE_SIZE, HOLE_SIZE, size, MREMAP_MAYMOVE), MAP_FAILED);
+ // Shrink the end of the mapping such that we only unmap past end of the VMA,
+ // which should succeed and poke a hole into the PROT_NONE region
+ ASSERT_NE(mremap(ptr + size - HOLE_SIZE, size, HOLE_SIZE, MREMAP_MAYMOVE), MAP_FAILED);
+
+ // Remap the whole buffer to a new address
+ tmp = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ ASSERT_NE(tmp, MAP_FAILED);
+
+ // Try splitting offset 1 hole size into VMA, this should fail
+ ASSERT_EQ(mremap(ptr + HOLE_SIZE, size - HOLE_SIZE, size - HOLE_SIZE,
+ MREMAP_MAYMOVE | MREMAP_FIXED, tmp), MAP_FAILED);
+ // Remapping the whole thing should succeed fine
+ ptr = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tmp);
+ ASSERT_EQ(ptr, tmp);
+ ASSERT_EQ(munmap(tmp, size), 0);
+}
+
+TEST_F(perf_mmap, unmap)
+{
+ unsigned long size = variant->ptr_size;
+
+ // Try to poke holes into the mappings
+ ASSERT_NE(munmap(self->ptr, HOLE_SIZE), 0);
+ ASSERT_NE(munmap(self->ptr + HOLE_SIZE, HOLE_SIZE), 0);
+ ASSERT_NE(munmap(self->ptr + size - HOLE_SIZE, HOLE_SIZE), 0);
+}
+
+TEST_F(perf_mmap, map)
+{
+ unsigned long size = variant->ptr_size;
+
+ // Try to poke holes into the mappings by mapping anonymous memory over it
+ ASSERT_EQ(mmap(self->ptr, HOLE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0), MAP_FAILED);
+ ASSERT_EQ(mmap(self->ptr + HOLE_SIZE, HOLE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0), MAP_FAILED);
+ ASSERT_EQ(mmap(self->ptr + size - HOLE_SIZE, HOLE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0), MAP_FAILED);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/sched_ext/exit.c b/tools/testing/selftests/sched_ext/exit.c
index 31bcd06e21cd..2c084ded2968 100644
--- a/tools/testing/selftests/sched_ext/exit.c
+++ b/tools/testing/selftests/sched_ext/exit.c
@@ -22,6 +22,14 @@ static enum scx_test_status run(void *ctx)
struct bpf_link *link;
char buf[16];
+ /*
+ * On single-CPU systems, ops.select_cpu() is never
+ * invoked, so skip this test to avoid getting stuck
+ * indefinitely.
+ */
+ if (tc == EXIT_SELECT_CPU && libbpf_num_possible_cpus() == 1)
+ continue;
+
skel = exit__open();
skel->rodata->exit_point = tc;
exit__load(skel);
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 8c3a73461475..60c84d935a2b 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1618,14 +1618,8 @@ void teardown_trace_fixture(struct __test_metadata *_metadata,
{
if (tracer) {
int status;
- /*
- * Extract the exit code from the other process and
- * adopt it for ourselves in case its asserts failed.
- */
ASSERT_EQ(0, kill(tracer, SIGUSR1));
ASSERT_EQ(tracer, waitpid(tracer, &status, 0));
- if (WEXITSTATUS(status))
- _metadata->exit_code = KSFT_FAIL;
}
}
@@ -3155,12 +3149,15 @@ TEST(syscall_restart)
ret = get_syscall(_metadata, child_pid);
#if defined(__arm__)
/*
- * FIXME:
* - native ARM registers do NOT expose true syscall.
* - compat ARM registers on ARM64 DO expose true syscall.
+ * - values of utsbuf.machine include 'armv8l' or 'armb8b'
+ * for ARM64 running in compat mode.
*/
ASSERT_EQ(0, uname(&utsbuf));
- if (strncmp(utsbuf.machine, "arm", 3) == 0) {
+ if ((strncmp(utsbuf.machine, "arm", 3) == 0) &&
+ (strncmp(utsbuf.machine, "armv8l", 6) != 0) &&
+ (strncmp(utsbuf.machine, "armv8b", 6) != 0)) {
EXPECT_EQ(__NR_nanosleep, ret);
} else
#endif
diff --git a/tools/testing/selftests/syscall_user_dispatch/sud_test.c b/tools/testing/selftests/syscall_user_dispatch/sud_test.c
index d975a6767329..48cf01aeec3e 100644
--- a/tools/testing/selftests/syscall_user_dispatch/sud_test.c
+++ b/tools/testing/selftests/syscall_user_dispatch/sud_test.c
@@ -79,6 +79,21 @@ TEST_SIGNAL(dispatch_trigger_sigsys, SIGSYS)
}
}
+static void prctl_valid(struct __test_metadata *_metadata,
+ unsigned long op, unsigned long off,
+ unsigned long size, void *sel)
+{
+ EXPECT_EQ(0, prctl(PR_SET_SYSCALL_USER_DISPATCH, op, off, size, sel));
+}
+
+static void prctl_invalid(struct __test_metadata *_metadata,
+ unsigned long op, unsigned long off,
+ unsigned long size, void *sel, int err)
+{
+ EXPECT_EQ(-1, prctl(PR_SET_SYSCALL_USER_DISPATCH, op, off, size, sel));
+ EXPECT_EQ(err, errno);
+}
+
TEST(bad_prctl_param)
{
char sel = SYSCALL_DISPATCH_FILTER_ALLOW;
@@ -86,57 +101,42 @@ TEST(bad_prctl_param)
/* Invalid op */
op = -1;
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0, 0, &sel);
- ASSERT_EQ(EINVAL, errno);
+ prctl_invalid(_metadata, op, 0, 0, &sel, EINVAL);
/* PR_SYS_DISPATCH_OFF */
op = PR_SYS_DISPATCH_OFF;
/* offset != 0 */
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x1, 0x0, 0);
- EXPECT_EQ(EINVAL, errno);
+ prctl_invalid(_metadata, op, 0x1, 0x0, 0, EINVAL);
/* len != 0 */
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0xff, 0);
- EXPECT_EQ(EINVAL, errno);
+ prctl_invalid(_metadata, op, 0x0, 0xff, 0, EINVAL);
/* sel != NULL */
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0x0, &sel);
- EXPECT_EQ(EINVAL, errno);
+ prctl_invalid(_metadata, op, 0x0, 0x0, &sel, EINVAL);
/* Valid parameter */
- errno = 0;
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0x0, 0x0);
- EXPECT_EQ(0, errno);
+ prctl_valid(_metadata, op, 0x0, 0x0, 0x0);
/* PR_SYS_DISPATCH_ON */
op = PR_SYS_DISPATCH_ON;
/* Dispatcher region is bad (offset > 0 && len == 0) */
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x1, 0x0, &sel);
- EXPECT_EQ(EINVAL, errno);
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, -1L, 0x0, &sel);
- EXPECT_EQ(EINVAL, errno);
+ prctl_invalid(_metadata, op, 0x1, 0x0, &sel, EINVAL);
+ prctl_invalid(_metadata, op, -1L, 0x0, &sel, EINVAL);
/* Invalid selector */
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0x1, (void *) -1);
- ASSERT_EQ(EFAULT, errno);
+ prctl_invalid(_metadata, op, 0x0, 0x1, (void *) -1, EFAULT);
/*
* Dispatcher range overflows unsigned long
*/
- prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, 1, -1L, &sel);
- ASSERT_EQ(EINVAL, errno) {
- TH_LOG("Should reject bad syscall range");
- }
+ prctl_invalid(_metadata, PR_SYS_DISPATCH_ON, 1, -1L, &sel, EINVAL);
/*
* Allowed range overflows usigned long
*/
- prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, -1L, 0x1, &sel);
- ASSERT_EQ(EINVAL, errno) {
- TH_LOG("Should reject bad syscall range");
- }
+ prctl_invalid(_metadata, PR_SYS_DISPATCH_ON, -1L, 0x1, &sel, EINVAL);
}
/*
diff --git a/tools/testing/selftests/ublk/test_stripe_04.sh b/tools/testing/selftests/ublk/test_stripe_04.sh
new file mode 100755
index 000000000000..1f2b642381d1
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_stripe_04.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
+
+TID="stripe_04"
+ERR_CODE=0
+
+_prep_test "stripe" "mkfs & mount & umount on zero copy"
+
+backfile_0=$(_create_backfile 256M)
+backfile_1=$(_create_backfile 256M)
+dev_id=$(_add_ublk_dev -t stripe -z -q 2 "$backfile_0" "$backfile_1")
+_check_add_dev $TID $? "$backfile_0" "$backfile_1"
+
+_mkfs_mount_test /dev/ublkb"${dev_id}"
+ERR_CODE=$?
+
+_cleanup_test "stripe"
+
+_remove_backfile "$backfile_0"
+_remove_backfile "$backfile_1"
+
+_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/vDSO/vdso_test_chacha.c b/tools/testing/selftests/vDSO/vdso_test_chacha.c
index 8757f738b0b1..0aad682b12c8 100644
--- a/tools/testing/selftests/vDSO/vdso_test_chacha.c
+++ b/tools/testing/selftests/vDSO/vdso_test_chacha.c
@@ -76,7 +76,8 @@ static void reference_chacha20_blocks(uint8_t *dst_bytes, const uint32_t *key, u
void __weak __arch_chacha20_blocks_nostack(uint8_t *dst_bytes, const uint32_t *key, uint32_t *counter, size_t nblocks)
{
- ksft_exit_skip("Not implemented on architecture\n");
+ ksft_test_result_skip("Not implemented on architecture\n");
+ ksft_finished();
}
int main(int argc, char *argv[])
diff --git a/tools/testing/selftests/vDSO/vdso_test_getrandom.c b/tools/testing/selftests/vDSO/vdso_test_getrandom.c
index 95057f7567db..ff8d5675da2b 100644
--- a/tools/testing/selftests/vDSO/vdso_test_getrandom.c
+++ b/tools/testing/selftests/vDSO/vdso_test_getrandom.c
@@ -242,6 +242,7 @@ static void kselftest(void)
pid_t child;
ksft_print_header();
+ vgetrandom_init();
ksft_set_plan(2);
for (size_t i = 0; i < 1000; ++i) {
@@ -295,8 +296,6 @@ static void usage(const char *argv0)
int main(int argc, char *argv[])
{
- vgetrandom_init();
-
if (argc == 1) {
kselftest();
return 0;
@@ -306,6 +305,9 @@ int main(int argc, char *argv[])
usage(argv[0]);
return 1;
}
+
+ vgetrandom_init();
+
if (!strcmp(argv[1], "bench-single"))
bench_single();
else if (!strcmp(argv[1], "bench-multi"))
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index d51249f14e2f..5656e58a5380 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -12,7 +12,7 @@ CAN_BUILD_WITH_NOPIE := $(shell ./check_cc.sh "$(CC)" trivial_program.c -no-pie)
TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \
check_initial_reg_state sigreturn iopl ioperm \
- test_vsyscall mov_ss_trap \
+ test_vsyscall mov_ss_trap sigtrap_loop \
syscall_arg_fault fsgsbase_restore sigaltstack
TARGETS_C_BOTHBITS += nx_stack
TARGETS_C_32BIT_ONLY := entry_from_vm86 test_syscall_vdso unwind_vdso \
diff --git a/tools/testing/selftests/x86/bugs/Makefile b/tools/testing/selftests/x86/bugs/Makefile
new file mode 100644
index 000000000000..8ff2d7226c7f
--- /dev/null
+++ b/tools/testing/selftests/x86/bugs/Makefile
@@ -0,0 +1,3 @@
+TEST_PROGS := its_sysfs.py its_permutations.py its_indirect_alignment.py its_ret_alignment.py
+TEST_FILES := common.py
+include ../../lib.mk
diff --git a/tools/testing/selftests/x86/bugs/common.py b/tools/testing/selftests/x86/bugs/common.py
new file mode 100644
index 000000000000..2f9664a80617
--- /dev/null
+++ b/tools/testing/selftests/x86/bugs/common.py
@@ -0,0 +1,164 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2025 Intel Corporation
+#
+# This contains kselftest framework adapted common functions for testing
+# mitigation for x86 bugs.
+
+import os, sys, re, shutil
+
+sys.path.insert(0, '../../kselftest')
+import ksft
+
+def read_file(path):
+ if not os.path.exists(path):
+ return None
+ with open(path, 'r') as file:
+ return file.read().strip()
+
+def cpuinfo_has(arg):
+ cpuinfo = read_file('/proc/cpuinfo')
+ if arg in cpuinfo:
+ return True
+ return False
+
+def cmdline_has(arg):
+ cmdline = read_file('/proc/cmdline')
+ if arg in cmdline:
+ return True
+ return False
+
+def cmdline_has_either(args):
+ cmdline = read_file('/proc/cmdline')
+ for arg in args:
+ if arg in cmdline:
+ return True
+ return False
+
+def cmdline_has_none(args):
+ return not cmdline_has_either(args)
+
+def cmdline_has_all(args):
+ cmdline = read_file('/proc/cmdline')
+ for arg in args:
+ if arg not in cmdline:
+ return False
+ return True
+
+def get_sysfs(bug):
+ return read_file("/sys/devices/system/cpu/vulnerabilities/" + bug)
+
+def sysfs_has(bug, mitigation):
+ status = get_sysfs(bug)
+ if mitigation in status:
+ return True
+ return False
+
+def sysfs_has_either(bugs, mitigations):
+ for bug in bugs:
+ for mitigation in mitigations:
+ if sysfs_has(bug, mitigation):
+ return True
+ return False
+
+def sysfs_has_none(bugs, mitigations):
+ return not sysfs_has_either(bugs, mitigations)
+
+def sysfs_has_all(bugs, mitigations):
+ for bug in bugs:
+ for mitigation in mitigations:
+ if not sysfs_has(bug, mitigation):
+ return False
+ return True
+
+def bug_check_pass(bug, found):
+ ksft.print_msg(f"\nFound: {found}")
+ # ksft.print_msg(f"\ncmdline: {read_file('/proc/cmdline')}")
+ ksft.test_result_pass(f'{bug}: {found}')
+
+def bug_check_fail(bug, found, expected):
+ ksft.print_msg(f'\nFound:\t {found}')
+ ksft.print_msg(f'Expected:\t {expected}')
+ ksft.print_msg(f"\ncmdline: {read_file('/proc/cmdline')}")
+ ksft.test_result_fail(f'{bug}: {found}')
+
+def bug_status_unknown(bug, found):
+ ksft.print_msg(f'\nUnknown status: {found}')
+ ksft.print_msg(f"\ncmdline: {read_file('/proc/cmdline')}")
+ ksft.test_result_fail(f'{bug}: {found}')
+
+def basic_checks_sufficient(bug, mitigation):
+ if not mitigation:
+ bug_status_unknown(bug, "None")
+ return True
+ elif mitigation == "Not affected":
+ ksft.test_result_pass(bug)
+ return True
+ elif mitigation == "Vulnerable":
+ if cmdline_has_either([f'{bug}=off', 'mitigations=off']):
+ bug_check_pass(bug, mitigation)
+ return True
+ return False
+
+def get_section_info(vmlinux, section_name):
+ from elftools.elf.elffile import ELFFile
+ with open(vmlinux, 'rb') as f:
+ elffile = ELFFile(f)
+ section = elffile.get_section_by_name(section_name)
+ if section is None:
+ ksft.print_msg("Available sections in vmlinux:")
+ for sec in elffile.iter_sections():
+ ksft.print_msg(sec.name)
+ raise ValueError(f"Section {section_name} not found in {vmlinux}")
+ return section['sh_addr'], section['sh_offset'], section['sh_size']
+
+def get_patch_sites(vmlinux, offset, size):
+ import struct
+ output = []
+ with open(vmlinux, 'rb') as f:
+ f.seek(offset)
+ i = 0
+ while i < size:
+ data = f.read(4) # s32
+ if not data:
+ break
+ sym_offset = struct.unpack('<i', data)[0] + i
+ i += 4
+ output.append(sym_offset)
+ return output
+
+def get_instruction_from_vmlinux(elffile, section, virtual_address, target_address):
+ from capstone import Cs, CS_ARCH_X86, CS_MODE_64
+ section_start = section['sh_addr']
+ section_end = section_start + section['sh_size']
+
+ if not (section_start <= target_address < section_end):
+ return None
+
+ offset = target_address - section_start
+ code = section.data()[offset:offset + 16]
+
+ cap = init_capstone()
+ for instruction in cap.disasm(code, target_address):
+ if instruction.address == target_address:
+ return instruction
+ return None
+
+def init_capstone():
+ from capstone import Cs, CS_ARCH_X86, CS_MODE_64, CS_OPT_SYNTAX_ATT
+ cap = Cs(CS_ARCH_X86, CS_MODE_64)
+ cap.syntax = CS_OPT_SYNTAX_ATT
+ return cap
+
+def get_runtime_kernel():
+ import drgn
+ return drgn.program_from_kernel()
+
+def check_dependencies_or_skip(modules, script_name="unknown test"):
+ for mod in modules:
+ try:
+ __import__(mod)
+ except ImportError:
+ ksft.test_result_skip(f"Skipping {script_name}: missing module '{mod}'")
+ ksft.finished()
diff --git a/tools/testing/selftests/x86/bugs/its_indirect_alignment.py b/tools/testing/selftests/x86/bugs/its_indirect_alignment.py
new file mode 100644
index 000000000000..cdc33ae6a91c
--- /dev/null
+++ b/tools/testing/selftests/x86/bugs/its_indirect_alignment.py
@@ -0,0 +1,150 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2025 Intel Corporation
+#
+# Test for indirect target selection (ITS) mitigation.
+#
+# Test if indirect CALL/JMP are correctly patched by evaluating
+# the vmlinux .retpoline_sites in /proc/kcore.
+
+# Install dependencies
+# add-apt-repository ppa:michel-slm/kernel-utils
+# apt update
+# apt install -y python3-drgn python3-pyelftools python3-capstone
+#
+# Best to copy the vmlinux at a standard location:
+# mkdir -p /usr/lib/debug/lib/modules/$(uname -r)
+# cp $VMLINUX /usr/lib/debug/lib/modules/$(uname -r)/vmlinux
+#
+# Usage: ./its_indirect_alignment.py [vmlinux]
+
+import os, sys, argparse
+from pathlib import Path
+
+this_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.insert(0, this_dir + '/../../kselftest')
+import ksft
+import common as c
+
+bug = "indirect_target_selection"
+
+mitigation = c.get_sysfs(bug)
+if not mitigation or "Aligned branch/return thunks" not in mitigation:
+ ksft.test_result_skip("Skipping its_indirect_alignment.py: Aligned branch/return thunks not enabled")
+ ksft.finished()
+
+if c.sysfs_has("spectre_v2", "Retpolines"):
+ ksft.test_result_skip("Skipping its_indirect_alignment.py: Retpolines deployed")
+ ksft.finished()
+
+c.check_dependencies_or_skip(['drgn', 'elftools', 'capstone'], script_name="its_indirect_alignment.py")
+
+from elftools.elf.elffile import ELFFile
+from drgn.helpers.common.memory import identify_address
+
+cap = c.init_capstone()
+
+if len(os.sys.argv) > 1:
+ arg_vmlinux = os.sys.argv[1]
+ if not os.path.exists(arg_vmlinux):
+ ksft.test_result_fail(f"its_indirect_alignment.py: vmlinux not found at argument path: {arg_vmlinux}")
+ ksft.exit_fail()
+ os.makedirs(f"/usr/lib/debug/lib/modules/{os.uname().release}", exist_ok=True)
+ os.system(f'cp {arg_vmlinux} /usr/lib/debug/lib/modules/$(uname -r)/vmlinux')
+
+vmlinux = f"/usr/lib/debug/lib/modules/{os.uname().release}/vmlinux"
+if not os.path.exists(vmlinux):
+ ksft.test_result_fail(f"its_indirect_alignment.py: vmlinux not found at {vmlinux}")
+ ksft.exit_fail()
+
+ksft.print_msg(f"Using vmlinux: {vmlinux}")
+
+retpolines_start_vmlinux, retpolines_sec_offset, size = c.get_section_info(vmlinux, '.retpoline_sites')
+ksft.print_msg(f"vmlinux: Section .retpoline_sites (0x{retpolines_start_vmlinux:x}) found at 0x{retpolines_sec_offset:x} with size 0x{size:x}")
+
+sites_offset = c.get_patch_sites(vmlinux, retpolines_sec_offset, size)
+total_retpoline_tests = len(sites_offset)
+ksft.print_msg(f"Found {total_retpoline_tests} retpoline sites")
+
+prog = c.get_runtime_kernel()
+retpolines_start_kcore = prog.symbol('__retpoline_sites').address
+ksft.print_msg(f'kcore: __retpoline_sites: 0x{retpolines_start_kcore:x}')
+
+x86_indirect_its_thunk_r15 = prog.symbol('__x86_indirect_its_thunk_r15').address
+ksft.print_msg(f'kcore: __x86_indirect_its_thunk_r15: 0x{x86_indirect_its_thunk_r15:x}')
+
+tests_passed = 0
+tests_failed = 0
+tests_unknown = 0
+
+with open(vmlinux, 'rb') as f:
+ elffile = ELFFile(f)
+ text_section = elffile.get_section_by_name('.text')
+
+ for i in range(0, len(sites_offset)):
+ site = retpolines_start_kcore + sites_offset[i]
+ vmlinux_site = retpolines_start_vmlinux + sites_offset[i]
+ passed = unknown = failed = False
+ try:
+ vmlinux_insn = c.get_instruction_from_vmlinux(elffile, text_section, text_section['sh_addr'], vmlinux_site)
+ kcore_insn = list(cap.disasm(prog.read(site, 16), site))[0]
+ operand = kcore_insn.op_str
+ insn_end = site + kcore_insn.size - 1 # TODO handle Jcc.32 __x86_indirect_thunk_\reg
+ safe_site = insn_end & 0x20
+ site_status = "" if safe_site else "(unsafe)"
+
+ ksft.print_msg(f"\nSite {i}: {identify_address(prog, site)} <0x{site:x}> {site_status}")
+ ksft.print_msg(f"\tvmlinux: 0x{vmlinux_insn.address:x}:\t{vmlinux_insn.mnemonic}\t{vmlinux_insn.op_str}")
+ ksft.print_msg(f"\tkcore: 0x{kcore_insn.address:x}:\t{kcore_insn.mnemonic}\t{kcore_insn.op_str}")
+
+ if (site & 0x20) ^ (insn_end & 0x20):
+ ksft.print_msg(f"\tSite at safe/unsafe boundary: {str(kcore_insn.bytes)} {kcore_insn.mnemonic} {operand}")
+ if safe_site:
+ tests_passed += 1
+ passed = True
+ ksft.print_msg(f"\tPASSED: At safe address")
+ continue
+
+ if operand.startswith('0xffffffff'):
+ thunk = int(operand, 16)
+ if thunk > x86_indirect_its_thunk_r15:
+ insn_at_thunk = list(cap.disasm(prog.read(thunk, 16), thunk))[0]
+ operand += ' -> ' + insn_at_thunk.mnemonic + ' ' + insn_at_thunk.op_str + ' <dynamic-thunk?>'
+ if 'jmp' in insn_at_thunk.mnemonic and thunk & 0x20:
+ ksft.print_msg(f"\tPASSED: Found {operand} at safe address")
+ passed = True
+ if not passed:
+ if kcore_insn.operands[0].type == capstone.CS_OP_IMM:
+ operand += ' <' + prog.symbol(int(operand, 16)) + '>'
+ if '__x86_indirect_its_thunk_' in operand:
+ ksft.print_msg(f"\tPASSED: Found {operand}")
+ else:
+ ksft.print_msg(f"\tPASSED: Found direct branch: {kcore_insn}, ITS thunk not required.")
+ passed = True
+ else:
+ unknown = True
+ if passed:
+ tests_passed += 1
+ elif unknown:
+ ksft.print_msg(f"UNKNOWN: unexpected operand: {kcore_insn}")
+ tests_unknown += 1
+ else:
+ ksft.print_msg(f'\t************* FAILED *************')
+ ksft.print_msg(f"\tFound {kcore_insn.bytes} {kcore_insn.mnemonic} {operand}")
+ ksft.print_msg(f'\t**********************************')
+ tests_failed += 1
+ except Exception as e:
+ ksft.print_msg(f"UNKNOWN: An unexpected error occurred: {e}")
+ tests_unknown += 1
+
+ksft.print_msg(f"\n\nSummary:")
+ksft.print_msg(f"PASS: \t{tests_passed} \t/ {total_retpoline_tests}")
+ksft.print_msg(f"FAIL: \t{tests_failed} \t/ {total_retpoline_tests}")
+ksft.print_msg(f"UNKNOWN: \t{tests_unknown} \t/ {total_retpoline_tests}")
+
+if tests_failed == 0:
+ ksft.test_result_pass("All ITS return thunk sites passed")
+else:
+ ksft.test_result_fail(f"{tests_failed} ITS return thunk sites failed")
+ksft.finished()
diff --git a/tools/testing/selftests/x86/bugs/its_permutations.py b/tools/testing/selftests/x86/bugs/its_permutations.py
new file mode 100644
index 000000000000..3204f4728c62
--- /dev/null
+++ b/tools/testing/selftests/x86/bugs/its_permutations.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2025 Intel Corporation
+#
+# Test for indirect target selection (ITS) cmdline permutations with other bugs
+# like spectre_v2 and retbleed.
+
+import os, sys, subprocess, itertools, re, shutil
+
+test_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.insert(0, test_dir + '/../../kselftest')
+import ksft
+import common as c
+
+bug = "indirect_target_selection"
+mitigation = c.get_sysfs(bug)
+
+if not mitigation or "Not affected" in mitigation:
+ ksft.test_result_skip("Skipping its_permutations.py: not applicable")
+ ksft.finished()
+
+if shutil.which('vng') is None:
+ ksft.test_result_skip("Skipping its_permutations.py: virtme-ng ('vng') not found in PATH.")
+ ksft.finished()
+
+TEST = f"{test_dir}/its_sysfs.py"
+default_kparam = ['clearcpuid=hypervisor', 'panic=5', 'panic_on_warn=1', 'oops=panic', 'nmi_watchdog=1', 'hung_task_panic=1']
+
+DEBUG = " -v "
+
+# Install dependencies
+# https://github.com/arighi/virtme-ng
+# apt install virtme-ng
+BOOT_CMD = f"vng --run {test_dir}/../../../../../arch/x86/boot/bzImage "
+#BOOT_CMD += DEBUG
+
+bug = "indirect_target_selection"
+
+input_options = {
+ 'indirect_target_selection' : ['off', 'on', 'stuff', 'vmexit'],
+ 'retbleed' : ['off', 'stuff', 'auto'],
+ 'spectre_v2' : ['off', 'on', 'eibrs', 'retpoline', 'ibrs', 'eibrs,retpoline'],
+}
+
+def pretty_print(output):
+ OKBLUE = '\033[94m'
+ OKGREEN = '\033[92m'
+ WARNING = '\033[93m'
+ FAIL = '\033[91m'
+ ENDC = '\033[0m'
+ BOLD = '\033[1m'
+
+ # Define patterns and their corresponding colors
+ patterns = {
+ r"^ok \d+": OKGREEN,
+ r"^not ok \d+": FAIL,
+ r"^# Testing .*": OKBLUE,
+ r"^# Found: .*": WARNING,
+ r"^# Totals: .*": BOLD,
+ r"pass:([1-9]\d*)": OKGREEN,
+ r"fail:([1-9]\d*)": FAIL,
+ r"skip:([1-9]\d*)": WARNING,
+ }
+
+ # Apply colors based on patterns
+ for pattern, color in patterns.items():
+ output = re.sub(pattern, lambda match: f"{color}{match.group(0)}{ENDC}", output, flags=re.MULTILINE)
+
+ print(output)
+
+combinations = list(itertools.product(*input_options.values()))
+ksft.print_header()
+ksft.set_plan(len(combinations))
+
+logs = ""
+
+for combination in combinations:
+ append = ""
+ log = ""
+ for p in default_kparam:
+ append += f' --append={p}'
+ command = BOOT_CMD + append
+ test_params = ""
+ for i, key in enumerate(input_options.keys()):
+ param = f'{key}={combination[i]}'
+ test_params += f' {param}'
+ command += f" --append={param}"
+ command += f" -- {TEST}"
+ test_name = f"{bug} {test_params}"
+ pretty_print(f'# Testing {test_name}')
+ t = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ t.wait()
+ output, _ = t.communicate()
+ if t.returncode == 0:
+ ksft.test_result_pass(test_name)
+ else:
+ ksft.test_result_fail(test_name)
+ output = output.decode()
+ log += f" {output}"
+ pretty_print(log)
+ logs += output + "\n"
+
+# Optionally use tappy to parse the output
+# apt install python3-tappy
+with open("logs.txt", "w") as f:
+ f.write(logs)
+
+ksft.finished()
diff --git a/tools/testing/selftests/x86/bugs/its_ret_alignment.py b/tools/testing/selftests/x86/bugs/its_ret_alignment.py
new file mode 100644
index 000000000000..f40078d9f6ff
--- /dev/null
+++ b/tools/testing/selftests/x86/bugs/its_ret_alignment.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2025 Intel Corporation
+#
+# Test for indirect target selection (ITS) mitigation.
+#
+# Tests if the RETs are correctly patched by evaluating the
+# vmlinux .return_sites in /proc/kcore.
+#
+# Install dependencies
+# add-apt-repository ppa:michel-slm/kernel-utils
+# apt update
+# apt install -y python3-drgn python3-pyelftools python3-capstone
+#
+# Run on target machine
+# mkdir -p /usr/lib/debug/lib/modules/$(uname -r)
+# cp $VMLINUX /usr/lib/debug/lib/modules/$(uname -r)/vmlinux
+#
+# Usage: ./its_ret_alignment.py
+
+import os, sys, argparse
+from pathlib import Path
+
+this_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.insert(0, this_dir + '/../../kselftest')
+import ksft
+import common as c
+
+bug = "indirect_target_selection"
+mitigation = c.get_sysfs(bug)
+if not mitigation or "Aligned branch/return thunks" not in mitigation:
+ ksft.test_result_skip("Skipping its_ret_alignment.py: Aligned branch/return thunks not enabled")
+ ksft.finished()
+
+c.check_dependencies_or_skip(['drgn', 'elftools', 'capstone'], script_name="its_ret_alignment.py")
+
+from elftools.elf.elffile import ELFFile
+from drgn.helpers.common.memory import identify_address
+
+cap = c.init_capstone()
+
+if len(os.sys.argv) > 1:
+ arg_vmlinux = os.sys.argv[1]
+ if not os.path.exists(arg_vmlinux):
+ ksft.test_result_fail(f"its_ret_alignment.py: vmlinux not found at user-supplied path: {arg_vmlinux}")
+ ksft.exit_fail()
+ os.makedirs(f"/usr/lib/debug/lib/modules/{os.uname().release}", exist_ok=True)
+ os.system(f'cp {arg_vmlinux} /usr/lib/debug/lib/modules/$(uname -r)/vmlinux')
+
+vmlinux = f"/usr/lib/debug/lib/modules/{os.uname().release}/vmlinux"
+if not os.path.exists(vmlinux):
+ ksft.test_result_fail(f"its_ret_alignment.py: vmlinux not found at {vmlinux}")
+ ksft.exit_fail()
+
+ksft.print_msg(f"Using vmlinux: {vmlinux}")
+
+rethunks_start_vmlinux, rethunks_sec_offset, size = c.get_section_info(vmlinux, '.return_sites')
+ksft.print_msg(f"vmlinux: Section .return_sites (0x{rethunks_start_vmlinux:x}) found at 0x{rethunks_sec_offset:x} with size 0x{size:x}")
+
+sites_offset = c.get_patch_sites(vmlinux, rethunks_sec_offset, size)
+total_rethunk_tests = len(sites_offset)
+ksft.print_msg(f"Found {total_rethunk_tests} rethunk sites")
+
+prog = c.get_runtime_kernel()
+rethunks_start_kcore = prog.symbol('__return_sites').address
+ksft.print_msg(f'kcore: __rethunk_sites: 0x{rethunks_start_kcore:x}')
+
+its_return_thunk = prog.symbol('its_return_thunk').address
+ksft.print_msg(f'kcore: its_return_thunk: 0x{its_return_thunk:x}')
+
+tests_passed = 0
+tests_failed = 0
+tests_unknown = 0
+tests_skipped = 0
+
+with open(vmlinux, 'rb') as f:
+ elffile = ELFFile(f)
+ text_section = elffile.get_section_by_name('.text')
+
+ for i in range(len(sites_offset)):
+ site = rethunks_start_kcore + sites_offset[i]
+ vmlinux_site = rethunks_start_vmlinux + sites_offset[i]
+ try:
+ passed = unknown = failed = skipped = False
+
+ symbol = identify_address(prog, site)
+ vmlinux_insn = c.get_instruction_from_vmlinux(elffile, text_section, text_section['sh_addr'], vmlinux_site)
+ kcore_insn = list(cap.disasm(prog.read(site, 16), site))[0]
+
+ insn_end = site + kcore_insn.size - 1
+
+ safe_site = insn_end & 0x20
+ site_status = "" if safe_site else "(unsafe)"
+
+ ksft.print_msg(f"\nSite {i}: {symbol} <0x{site:x}> {site_status}")
+ ksft.print_msg(f"\tvmlinux: 0x{vmlinux_insn.address:x}:\t{vmlinux_insn.mnemonic}\t{vmlinux_insn.op_str}")
+ ksft.print_msg(f"\tkcore: 0x{kcore_insn.address:x}:\t{kcore_insn.mnemonic}\t{kcore_insn.op_str}")
+
+ if safe_site:
+ tests_passed += 1
+ passed = True
+ ksft.print_msg(f"\tPASSED: At safe address")
+ continue
+
+ if "jmp" in kcore_insn.mnemonic:
+ passed = True
+ elif "ret" not in kcore_insn.mnemonic:
+ skipped = True
+
+ if passed:
+ ksft.print_msg(f"\tPASSED: Found {kcore_insn.mnemonic} {kcore_insn.op_str}")
+ tests_passed += 1
+ elif skipped:
+ ksft.print_msg(f"\tSKIPPED: Found '{kcore_insn.mnemonic}'")
+ tests_skipped += 1
+ elif unknown:
+ ksft.print_msg(f"UNKNOWN: An unknown instruction: {kcore_insn}")
+ tests_unknown += 1
+ else:
+ ksft.print_msg(f'\t************* FAILED *************')
+ ksft.print_msg(f"\tFound {kcore_insn.mnemonic} {kcore_insn.op_str}")
+ ksft.print_msg(f'\t**********************************')
+ tests_failed += 1
+ except Exception as e:
+ ksft.print_msg(f"UNKNOWN: An unexpected error occurred: {e}")
+ tests_unknown += 1
+
+ksft.print_msg(f"\n\nSummary:")
+ksft.print_msg(f"PASSED: \t{tests_passed} \t/ {total_rethunk_tests}")
+ksft.print_msg(f"FAILED: \t{tests_failed} \t/ {total_rethunk_tests}")
+ksft.print_msg(f"SKIPPED: \t{tests_skipped} \t/ {total_rethunk_tests}")
+ksft.print_msg(f"UNKNOWN: \t{tests_unknown} \t/ {total_rethunk_tests}")
+
+if tests_failed == 0:
+ ksft.test_result_pass("All ITS return thunk sites passed.")
+else:
+ ksft.test_result_fail(f"{tests_failed} failed sites need ITS return thunks.")
+ksft.finished()
diff --git a/tools/testing/selftests/x86/bugs/its_sysfs.py b/tools/testing/selftests/x86/bugs/its_sysfs.py
new file mode 100644
index 000000000000..7bca81f2f606
--- /dev/null
+++ b/tools/testing/selftests/x86/bugs/its_sysfs.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2025 Intel Corporation
+#
+# Test for Indirect Target Selection(ITS) mitigation sysfs status.
+
+import sys, os, re
+this_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.insert(0, this_dir + '/../../kselftest')
+import ksft
+
+from common import *
+
+bug = "indirect_target_selection"
+mitigation = get_sysfs(bug)
+
+ITS_MITIGATION_ALIGNED_THUNKS = "Mitigation: Aligned branch/return thunks"
+ITS_MITIGATION_RETPOLINE_STUFF = "Mitigation: Retpolines, Stuffing RSB"
+ITS_MITIGATION_VMEXIT_ONLY = "Mitigation: Vulnerable, KVM: Not affected"
+ITS_MITIGATION_VULNERABLE = "Vulnerable"
+
+def check_mitigation():
+ if mitigation == ITS_MITIGATION_ALIGNED_THUNKS:
+ if cmdline_has(f'{bug}=stuff') and sysfs_has("spectre_v2", "Retpolines"):
+ bug_check_fail(bug, ITS_MITIGATION_ALIGNED_THUNKS, ITS_MITIGATION_RETPOLINE_STUFF)
+ return
+ if cmdline_has(f'{bug}=vmexit') and cpuinfo_has('its_native_only'):
+ bug_check_fail(bug, ITS_MITIGATION_ALIGNED_THUNKS, ITS_MITIGATION_VMEXIT_ONLY)
+ return
+ bug_check_pass(bug, ITS_MITIGATION_ALIGNED_THUNKS)
+ return
+
+ if mitigation == ITS_MITIGATION_RETPOLINE_STUFF:
+ if cmdline_has(f'{bug}=stuff') and sysfs_has("spectre_v2", "Retpolines"):
+ bug_check_pass(bug, ITS_MITIGATION_RETPOLINE_STUFF)
+ return
+ if sysfs_has('retbleed', 'Stuffing'):
+ bug_check_pass(bug, ITS_MITIGATION_RETPOLINE_STUFF)
+ return
+ bug_check_fail(bug, ITS_MITIGATION_RETPOLINE_STUFF, ITS_MITIGATION_ALIGNED_THUNKS)
+
+ if mitigation == ITS_MITIGATION_VMEXIT_ONLY:
+ if cmdline_has(f'{bug}=vmexit') and cpuinfo_has('its_native_only'):
+ bug_check_pass(bug, ITS_MITIGATION_VMEXIT_ONLY)
+ return
+ bug_check_fail(bug, ITS_MITIGATION_VMEXIT_ONLY, ITS_MITIGATION_ALIGNED_THUNKS)
+
+ if mitigation == ITS_MITIGATION_VULNERABLE:
+ if sysfs_has("spectre_v2", "Vulnerable"):
+ bug_check_pass(bug, ITS_MITIGATION_VULNERABLE)
+ else:
+ bug_check_fail(bug, "Mitigation", ITS_MITIGATION_VULNERABLE)
+
+ bug_status_unknown(bug, mitigation)
+ return
+
+ksft.print_header()
+ksft.set_plan(1)
+ksft.print_msg(f'{bug}: {mitigation} ...')
+
+if not basic_checks_sufficient(bug, mitigation):
+ check_mitigation()
+
+ksft.finished()
diff --git a/tools/testing/selftests/x86/sigtrap_loop.c b/tools/testing/selftests/x86/sigtrap_loop.c
new file mode 100644
index 000000000000..9d065479e89f
--- /dev/null
+++ b/tools/testing/selftests/x86/sigtrap_loop.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+#define _GNU_SOURCE
+
+#include <err.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ucontext.h>
+
+#ifdef __x86_64__
+# define REG_IP REG_RIP
+#else
+# define REG_IP REG_EIP
+#endif
+
+static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), int flags)
+{
+ struct sigaction sa;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_sigaction = handler;
+ sa.sa_flags = SA_SIGINFO | flags;
+ sigemptyset(&sa.sa_mask);
+
+ if (sigaction(sig, &sa, 0))
+ err(1, "sigaction");
+
+ return;
+}
+
+static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
+{
+ ucontext_t *ctx = (ucontext_t *)ctx_void;
+ static unsigned int loop_count_on_same_ip;
+ static unsigned long last_trap_ip;
+
+ if (last_trap_ip == ctx->uc_mcontext.gregs[REG_IP]) {
+ printf("\tTrapped at %016lx\n", last_trap_ip);
+
+ /*
+ * If the same IP is hit more than 10 times in a row, it is
+ * _considered_ an infinite loop.
+ */
+ if (++loop_count_on_same_ip > 10) {
+ printf("[FAIL]\tDetected SIGTRAP infinite loop\n");
+ exit(1);
+ }
+
+ return;
+ }
+
+ loop_count_on_same_ip = 0;
+ last_trap_ip = ctx->uc_mcontext.gregs[REG_IP];
+ printf("\tTrapped at %016lx\n", last_trap_ip);
+}
+
+int main(int argc, char *argv[])
+{
+ sethandler(SIGTRAP, sigtrap, 0);
+
+ /*
+ * Set the Trap Flag (TF) to single-step the test code, therefore to
+ * trigger a SIGTRAP signal after each instruction until the TF is
+ * cleared.
+ *
+ * Because the arithmetic flags are not significant here, the TF is
+ * set by pushing 0x302 onto the stack and then popping it into the
+ * flags register.
+ *
+ * Four instructions in the following asm code are executed with the
+ * TF set, thus the SIGTRAP handler is expected to run four times.
+ */
+ printf("[RUN]\tSIGTRAP infinite loop detection\n");
+ asm volatile(
+#ifdef __x86_64__
+ /*
+ * Avoid clobbering the redzone
+ *
+ * Equivalent to "sub $128, %rsp", however -128 can be encoded
+ * in a single byte immediate while 128 uses 4 bytes.
+ */
+ "add $-128, %rsp\n\t"
+#endif
+ "push $0x302\n\t"
+ "popf\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "push $0x202\n\t"
+ "popf\n\t"
+#ifdef __x86_64__
+ "sub $-128, %rsp\n\t"
+#endif
+ );
+
+ printf("[OK]\tNo SIGTRAP infinite loop detected\n");
+ return 0;
+}
diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h
index c5b9da034511..1d5bbc8464f1 100644
--- a/tools/testing/vma/vma_internal.h
+++ b/tools/testing/vma/vma_internal.h
@@ -735,6 +735,8 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
(void)adjust_next;
}
+static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {}
+
static inline void vma_iter_free(struct vma_iterator *vmi)
{
mas_destroy(&vmi->mas);
diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c
index 0b7f5bf546da..0c22ff7a8de2 100644
--- a/tools/testing/vsock/vsock_test.c
+++ b/tools/testing/vsock/vsock_test.c
@@ -1283,21 +1283,25 @@ static void test_unsent_bytes_client(const struct test_opts *opts, int type)
send_buf(fd, buf, sizeof(buf), 0, sizeof(buf));
control_expectln("RECEIVED");
- ret = ioctl(fd, SIOCOUTQ, &sock_bytes_unsent);
- if (ret < 0) {
- if (errno == EOPNOTSUPP) {
- fprintf(stderr, "Test skipped, SIOCOUTQ not supported.\n");
- } else {
+ /* SIOCOUTQ isn't guaranteed to instantly track sent data. Even though
+ * the "RECEIVED" message means that the other side has received the
+ * data, there can be a delay in our kernel before updating the "unsent
+ * bytes" counter. Repeat SIOCOUTQ until it returns 0.
+ */
+ timeout_begin(TIMEOUT);
+ do {
+ ret = ioctl(fd, SIOCOUTQ, &sock_bytes_unsent);
+ if (ret < 0) {
+ if (errno == EOPNOTSUPP) {
+ fprintf(stderr, "Test skipped, SIOCOUTQ not supported.\n");
+ break;
+ }
perror("ioctl");
exit(EXIT_FAILURE);
}
- } else if (ret == 0 && sock_bytes_unsent != 0) {
- fprintf(stderr,
- "Unexpected 'SIOCOUTQ' value, expected 0, got %i\n",
- sock_bytes_unsent);
- exit(EXIT_FAILURE);
- }
-
+ timeout_check("SIOCOUTQ");
+ } while (sock_bytes_unsent != 0);
+ timeout_end();
close(fd);
}
diff --git a/tools/verification/rv/src/in_kernel.c b/tools/verification/rv/src/in_kernel.c
index f04479ecc96c..ced72950cb1e 100644
--- a/tools/verification/rv/src/in_kernel.c
+++ b/tools/verification/rv/src/in_kernel.c
@@ -353,7 +353,7 @@ ikm_event_handler(struct trace_seq *s, struct tep_record *record,
if (config_has_id && (config_my_pid == id))
return 0;
- else if (config_my_pid && (config_my_pid == pid))
+ else if (config_my_pid == pid)
return 0;
tep_print_event(trace_event->tep, s, record, "%16s-%-8d ", TEP_PRINT_COMM, TEP_PRINT_PID);
@@ -595,7 +595,7 @@ static int parse_arguments(char *monitor_name, int argc, char **argv)
config_reactor = optarg;
break;
case 's':
- config_my_pid = 0;
+ config_my_pid = -1;
break;
case 't':
config_trace = 1;