summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/arch/x86/include/asm/inat.h2
-rw-r--r--tools/arch/x86/include/asm/insn.h46
-rw-r--r--tools/arch/x86/include/asm/msr-index.h2
-rw-r--r--tools/arch/x86/include/asm/nops.h81
-rw-r--r--tools/arch/x86/kcpuid/Makefile24
-rw-r--r--tools/arch/x86/kcpuid/cpuid.csv400
-rw-r--r--tools/arch/x86/kcpuid/kcpuid.c657
-rw-r--r--tools/arch/x86/lib/inat.c2
-rw-r--r--tools/arch/x86/lib/insn.c230
-rw-r--r--tools/bpf/Makefile.helpers60
-rw-r--r--tools/bpf/bpf_dbg.c2
-rw-r--r--tools/bpf/bpf_exp.y14
-rw-r--r--tools/bpf/bpftool/.gitignore1
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile11
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-gen.rst78
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool17
-rw-r--r--tools/bpf/bpftool/btf.c41
-rw-r--r--tools/bpf/bpftool/btf_dumper.c1
-rw-r--r--tools/bpf/bpftool/common.c1
-rw-r--r--tools/bpf/bpftool/feature.c4
-rw-r--r--tools/bpf/bpftool/gen.c72
-rw-r--r--tools/bpf/bpftool/main.c3
-rw-r--r--tools/bpf/bpftool/map.c2
-rw-r--r--tools/bpf/bpftool/net.c2
-rw-r--r--tools/bpf/bpftool/prog.c1
-rw-r--r--tools/bpf/bpftool/xlated_dumper.c3
-rw-r--r--tools/bpf/resolve_btfids/main.c11
-rw-r--r--tools/bpf/runqslower/Makefile9
-rw-r--r--tools/bpf/runqslower/runqslower.bpf.c33
-rw-r--r--tools/build/Build.include24
-rw-r--r--tools/build/Makefile.feature28
-rw-r--r--tools/build/feature/Makefile4
-rw-r--r--tools/build/feature/test-libtraceevent.c12
-rwxr-xr-xtools/debugging/kernel-chktaint2
-rw-r--r--tools/iio/Makefile1
-rw-r--r--tools/iio/iio_event_monitor.c69
-rw-r--r--tools/iio/iio_generic_buffer.c153
-rw-r--r--tools/iio/iio_utils.c18
-rw-r--r--tools/iio/iio_utils.h9
-rw-r--r--tools/include/asm-generic/hugetlb_encode.h3
-rw-r--r--tools/include/linux/kconfig.h73
-rw-r--r--tools/include/linux/math64.h75
-rw-r--r--tools/include/linux/static_call_types.h18
-rw-r--r--tools/include/linux/types.h3
-rw-r--r--tools/include/uapi/linux/bpf.h850
-rw-r--r--tools/include/uapi/linux/btf.h5
-rw-r--r--tools/include/uapi/linux/perf_event.h15
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/Makefile3
-rw-r--r--tools/lib/bpf/bpf_core_read.h16
-rw-r--r--tools/lib/bpf/bpf_helpers.h40
-rw-r--r--tools/lib/bpf/bpf_tracing.h58
-rw-r--r--tools/lib/bpf/btf.c768
-rw-r--r--tools/lib/bpf/btf.h9
-rw-r--r--tools/lib/bpf/btf_dump.c12
-rw-r--r--tools/lib/bpf/libbpf.c905
-rw-r--r--tools/lib/bpf/libbpf.h20
-rw-r--r--tools/lib/bpf/libbpf.map12
-rw-r--r--tools/lib/bpf/libbpf_internal.h85
-rw-r--r--tools/lib/bpf/libbpf_util.h47
-rw-r--r--tools/lib/bpf/linker.c2883
-rw-r--r--tools/lib/bpf/strset.c176
-rw-r--r--tools/lib/bpf/strset.h21
-rw-r--r--tools/lib/bpf/xsk.c258
-rw-r--r--tools/lib/bpf/xsk.h87
-rw-r--r--tools/lib/perf/Documentation/libperf.txt3
-rw-r--r--tools/lib/perf/evsel.c80
-rw-r--r--tools/lib/perf/include/internal/evsel.h1
-rw-r--r--tools/lib/perf/include/internal/mmap.h3
-rw-r--r--tools/lib/perf/include/internal/tests.h32
-rw-r--r--tools/lib/perf/include/internal/xyarray.h9
-rw-r--r--tools/lib/perf/include/perf/bpf_perf.h31
-rw-r--r--tools/lib/perf/include/perf/event.h7
-rw-r--r--tools/lib/perf/include/perf/evsel.h3
-rw-r--r--tools/lib/perf/libperf.map3
-rw-r--r--tools/lib/perf/mmap.c88
-rw-r--r--tools/lib/perf/tests/Makefile6
-rw-r--r--tools/lib/perf/tests/test-evsel.c66
-rw-r--r--tools/memory-model/Documentation/access-marking.txt479
-rw-r--r--tools/memory-model/Documentation/glossary.txt2
-rw-r--r--tools/memory-model/Documentation/simple.txt1
-rw-r--r--tools/objtool/arch/x86/decode.c424
-rw-r--r--tools/objtool/arch/x86/include/arch/cfi_regs.h12
-rw-r--r--tools/objtool/arch/x86/include/arch/special.h2
-rw-r--r--tools/objtool/builtin-check.c43
-rw-r--r--tools/objtool/builtin-orc.c5
-rw-r--r--tools/objtool/check.c236
-rw-r--r--tools/objtool/elf.c289
-rw-r--r--tools/objtool/include/objtool/arch.h5
-rw-r--r--tools/objtool/include/objtool/builtin.h5
-rw-r--r--tools/objtool/include/objtool/check.h3
-rw-r--r--tools/objtool/include/objtool/elf.h13
-rw-r--r--tools/objtool/include/objtool/objtool.h1
-rw-r--r--tools/objtool/objtool.c65
-rw-r--r--tools/objtool/orc_gen.c33
-rw-r--r--tools/objtool/special.c12
-rwxr-xr-xtools/objtool/sync-check.sh18
-rw-r--r--tools/perf/.gitignore1
-rw-r--r--tools/perf/Documentation/intel-hybrid.txt214
-rw-r--r--tools/perf/Documentation/perf-annotate.txt7
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt2
-rw-r--r--tools/perf/Documentation/perf-config.txt11
-rw-r--r--tools/perf/Documentation/perf-data.txt5
-rw-r--r--tools/perf/Documentation/perf-iostat.txt88
-rw-r--r--tools/perf/Documentation/perf-record.txt1
-rw-r--r--tools/perf/Documentation/perf-report.txt10
-rw-r--r--tools/perf/Documentation/perf-stat.txt29
-rw-r--r--tools/perf/Documentation/perf-top.txt2
-rw-r--r--tools/perf/Documentation/perf.txt12
-rw-r--r--tools/perf/Documentation/topdown.txt18
-rw-r--r--tools/perf/MANIFEST2
-rw-r--r--tools/perf/Makefile5
-rw-r--r--tools/perf/Makefile.config31
-rw-r--r--tools/perf/Makefile.perf16
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c78
-rw-r--r--tools/perf/arch/arm64/util/Build1
-rw-r--r--tools/perf/arch/arm64/util/kvm-stat.c4
-rw-r--r--tools/perf/arch/arm64/util/machine.c6
-rw-r--r--tools/perf/arch/arm64/util/perf_regs.c2
-rw-r--r--tools/perf/arch/arm64/util/pmu.c25
-rw-r--r--tools/perf/arch/arm64/util/unwind-libunwind.c4
-rw-r--r--tools/perf/arch/mips/Makefile22
-rw-r--r--tools/perf/arch/mips/entry/syscalls/mksyscalltbl32
-rw-r--r--tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl358
-rw-r--r--tools/perf/arch/mips/include/dwarf-regs-table.h31
-rw-r--r--tools/perf/arch/mips/include/perf_regs.h84
-rw-r--r--tools/perf/arch/mips/util/Build3
-rw-r--r--tools/perf/arch/mips/util/dwarf-regs.c38
-rw-r--r--tools/perf/arch/mips/util/perf_regs.c6
-rw-r--r--tools/perf/arch/mips/util/unwind-libunwind.c22
-rw-r--r--tools/perf/arch/powerpc/util/Build2
-rw-r--r--tools/perf/arch/powerpc/util/event.c53
-rw-r--r--tools/perf/arch/powerpc/util/evsel.c8
-rw-r--r--tools/perf/arch/powerpc/util/kvm-stat.c2
-rw-r--r--tools/perf/arch/powerpc/util/utils_header.h2
-rw-r--r--tools/perf/arch/x86/tests/bp-modify.c2
-rw-r--r--tools/perf/arch/x86/tests/insn-x86.c9
-rw-r--r--tools/perf/arch/x86/util/Build1
-rw-r--r--tools/perf/arch/x86/util/archinsn.c9
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c6
-rw-r--r--tools/perf/arch/x86/util/iostat.c470
-rw-r--r--tools/perf/arch/x86/util/perf_regs.c4
-rw-r--r--tools/perf/bench/epoll-wait.c4
-rw-r--r--tools/perf/bench/inject-buildid.c2
-rw-r--r--tools/perf/bench/numa.c2
-rw-r--r--tools/perf/builtin-annotate.c41
-rw-r--r--tools/perf/builtin-daemon.c3
-rw-r--r--tools/perf/builtin-data.c26
-rw-r--r--tools/perf/builtin-diff.c2
-rw-r--r--tools/perf/builtin-lock.c2
-rw-r--r--tools/perf/builtin-record.c55
-rw-r--r--tools/perf/builtin-report.c43
-rw-r--r--tools/perf/builtin-sched.c2
-rw-r--r--tools/perf/builtin-script.c22
-rw-r--r--tools/perf/builtin-stat.c130
-rw-r--r--tools/perf/builtin-top.c22
-rwxr-xr-xtools/perf/check-headers.sh16
-rw-r--r--tools/perf/command-list.txt1
-rw-r--r--tools/perf/examples/bpf/augmented_raw_syscalls.c4
-rw-r--r--tools/perf/jvmti/jvmti_agent.c4
-rw-r--r--tools/perf/perf-iostat.sh12
-rw-r--r--tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json228
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/branch.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/bus.json62
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/cache.json128
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/cycle.json5
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/exception.json29
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/instruction.json131
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/memory.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/other.json188
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/pipeline.json194
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/sve.json110
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/metrics.json233
-rw-r--r--tools/perf/pmu-events/arch/arm64/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/powerpc/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/cache.json47
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/floating_point.json7
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/frontend.json217
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/locks.json12
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/marked.json147
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/memory.json192
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/others.json297
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/pipeline.json297
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/pmc.json22
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/translation.json57
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/metrics.json12
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/metrics.json134
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/cache.json48
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/core.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/floating-point.json42
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/memory.json42
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/other.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/recommended.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/branch.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/cache.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/core.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/floating-point.json42
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/memory.json86
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/recommended.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen3/branch.json53
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen3/cache.json402
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen3/core.json137
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen3/data-fabric.json98
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen3/floating-point.json139
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen3/memory.json428
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen3/other.json103
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen3/recommended.json214
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv5
-rw-r--r--tools/perf/pmu-events/jevents.c4
-rw-r--r--tools/perf/scripts/python/netdev-times.py2
-rw-r--r--tools/perf/tests/attr.c4
-rw-r--r--tools/perf/tests/bp_signal.c6
-rw-r--r--tools/perf/tests/code-reading.c2
-rw-r--r--tools/perf/tests/demangle-ocaml-test.c8
-rw-r--r--tools/perf/tests/evsel-roundtrip-name.c19
-rw-r--r--tools/perf/tests/hists_cumulate.c4
-rw-r--r--tools/perf/tests/hists_filter.c14
-rw-r--r--tools/perf/tests/make22
-rw-r--r--tools/perf/tests/parse-events.c173
-rw-r--r--tools/perf/tests/parse-metric.c12
-rw-r--r--tools/perf/tests/perf-time-to-tsc.c12
-rw-r--r--tools/perf/tests/pmu-events.c83
-rwxr-xr-xtools/perf/tests/shell/buildid.sh65
-rwxr-xr-xtools/perf/tests/shell/daemon.sh54
-rwxr-xr-xtools/perf/tests/shell/stat+csv_summary.sh31
-rwxr-xr-xtools/perf/tests/shell/stat+shadow_stat.sh3
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh31
-rw-r--r--tools/perf/tests/switch-tracking.c6
-rw-r--r--tools/perf/tests/topology.c15
-rwxr-xr-xtools/perf/trace/beauty/fsconfig.sh7
-rw-r--r--tools/perf/trace/beauty/include/linux/socket.h2
-rwxr-xr-xtools/perf/trace/beauty/tracepoints/x86_msr.sh2
-rw-r--r--tools/perf/ui/browsers/annotate.c27
-rw-r--r--tools/perf/ui/browsers/hists.c19
-rw-r--r--tools/perf/ui/stdio/hist.c15
-rw-r--r--tools/perf/util/Build5
-rw-r--r--tools/perf/util/annotate.c46
-rw-r--r--tools/perf/util/annotate.h2
-rw-r--r--tools/perf/util/bpf-loader.c2
-rw-r--r--tools/perf/util/bpf_counter.c544
-rw-r--r--tools/perf/util/bpf_counter.h9
-rw-r--r--tools/perf/util/bpf_skel/bperf.h14
-rw-r--r--tools/perf/util/bpf_skel/bperf_follower.bpf.c69
-rw-r--r--tools/perf/util/bpf_skel/bperf_leader.bpf.c46
-rw-r--r--tools/perf/util/bpf_skel/bperf_u.h14
-rw-r--r--tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c3
-rw-r--r--tools/perf/util/call-path.h2
-rw-r--r--tools/perf/util/callchain.c2
-rw-r--r--tools/perf/util/config.c9
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c40
-rw-r--r--tools/perf/util/cs-etm.c285
-rw-r--r--tools/perf/util/cs-etm.h36
-rw-r--r--tools/perf/util/data-convert-bt.c4
-rw-r--r--tools/perf/util/data-convert-bt.h11
-rw-r--r--tools/perf/util/data-convert-json.c384
-rw-r--r--tools/perf/util/data-convert.h10
-rw-r--r--tools/perf/util/demangle-java.c4
-rw-r--r--tools/perf/util/demangle-ocaml.c12
-rw-r--r--tools/perf/util/dso.h2
-rw-r--r--tools/perf/util/dwarf-aux.c6
-rw-r--r--tools/perf/util/dwarf-aux.h2
-rw-r--r--tools/perf/util/dwarf-regs.c3
-rw-r--r--tools/perf/util/event.h3
-rw-r--r--tools/perf/util/events_stats.h15
-rw-r--r--tools/perf/util/evlist-hybrid.c88
-rw-r--r--tools/perf/util/evlist-hybrid.h14
-rw-r--r--tools/perf/util/evlist.c38
-rw-r--r--tools/perf/util/evlist.h2
-rw-r--r--tools/perf/util/evsel.c38
-rw-r--r--tools/perf/util/evsel.h34
-rw-r--r--tools/perf/util/expr.h2
-rw-r--r--tools/perf/util/header.c18
-rw-r--r--tools/perf/util/hist.c37
-rw-r--r--tools/perf/util/hist.h8
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c17
-rw-r--r--tools/perf/util/intel-pt.c2
-rw-r--r--tools/perf/util/iostat.c53
-rw-r--r--tools/perf/util/iostat.h47
-rw-r--r--tools/perf/util/jitdump.c30
-rw-r--r--tools/perf/util/levenshtein.c2
-rw-r--r--tools/perf/util/libunwind/arm64.c2
-rw-r--r--tools/perf/util/libunwind/x86_32.c2
-rw-r--r--tools/perf/util/llvm-utils.c2
-rw-r--r--tools/perf/util/machine.c12
-rw-r--r--tools/perf/util/map.h4
-rw-r--r--tools/perf/util/mem-events.h3
-rw-r--r--tools/perf/util/metricgroup.c14
-rw-r--r--tools/perf/util/metricgroup.h4
-rw-r--r--tools/perf/util/parse-events-hybrid.c178
-rw-r--r--tools/perf/util/parse-events-hybrid.h23
-rw-r--r--tools/perf/util/parse-events.c115
-rw-r--r--tools/perf/util/parse-events.h9
-rw-r--r--tools/perf/util/parse-events.l2
-rw-r--r--tools/perf/util/parse-events.y9
-rw-r--r--tools/perf/util/pmu-hybrid.c89
-rw-r--r--tools/perf/util/pmu-hybrid.h22
-rw-r--r--tools/perf/util/pmu.c73
-rw-r--r--tools/perf/util/pmu.h8
-rw-r--r--tools/perf/util/probe-event.c4
-rw-r--r--tools/perf/util/probe-finder.c6
-rw-r--r--tools/perf/util/python-ext-sources2
-rw-r--r--tools/perf/util/python.c6
-rw-r--r--tools/perf/util/s390-cpumsf.c10
-rw-r--r--tools/perf/util/s390-sample-raw.c4
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c2
-rw-r--r--tools/perf/util/session.c41
-rw-r--r--tools/perf/util/session.h3
-rw-r--r--tools/perf/util/sort.c60
-rw-r--r--tools/perf/util/sort.h2
-rw-r--r--tools/perf/util/stat-display.c64
-rw-r--r--tools/perf/util/stat-shadow.c21
-rw-r--r--tools/perf/util/stat.c3
-rw-r--r--tools/perf/util/stat.h8
-rw-r--r--tools/perf/util/strbuf.h2
-rw-r--r--tools/perf/util/strfilter.h4
-rw-r--r--tools/perf/util/symbol-elf.c2
-rw-r--r--tools/perf/util/symbol_fprintf.c2
-rw-r--r--tools/perf/util/synthetic-events.c4
-rw-r--r--tools/perf/util/syscalltbl.c4
-rw-r--r--tools/perf/util/target.h7
-rw-r--r--tools/perf/util/thread-stack.h1
-rw-r--r--tools/perf/util/tsc.c30
-rw-r--r--tools/perf/util/tsc.h4
-rw-r--r--tools/perf/util/units.c21
-rw-r--r--tools/perf/util/units.h1
-rw-r--r--tools/perf/util/unwind-libunwind-local.c2
-rw-r--r--tools/power/acpi/common/cmfsize.c2
-rwxr-xr-xtools/power/pm-graph/sleepgraph.py2
-rw-r--r--tools/power/x86/intel-speed-select/isst-config.c30
-rw-r--r--tools/power/x86/intel-speed-select/isst-display.c12
-rw-r--r--tools/scripts/Makefile.include15
-rw-r--r--tools/spi/Makefile5
-rw-r--r--tools/testing/ktest/examples/vmware.conf137
-rwxr-xr-xtools/testing/ktest/ktest.pl571
-rwxr-xr-xtools/testing/kunit/kunit.py4
-rw-r--r--tools/testing/kunit/kunit_kernel.py2
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py6
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/arm64/Makefile2
-rw-r--r--tools/testing/selftests/arm64/bti/.gitignore2
-rw-r--r--tools/testing/selftests/arm64/bti/Makefile61
-rw-r--r--tools/testing/selftests/arm64/bti/assembler.h80
-rw-r--r--tools/testing/selftests/arm64/bti/btitest.h23
-rw-r--r--tools/testing/selftests/arm64/bti/compiler.h21
-rw-r--r--tools/testing/selftests/arm64/bti/gen/.gitignore2
-rw-r--r--tools/testing/selftests/arm64/bti/signal.c37
-rw-r--r--tools/testing/selftests/arm64/bti/signal.h21
-rw-r--r--tools/testing/selftests/arm64/bti/start.S14
-rw-r--r--tools/testing/selftests/arm64/bti/syscall.S23
-rw-r--r--tools/testing/selftests/arm64/bti/system.c22
-rw-r--r--tools/testing/selftests/arm64/bti/system.h28
-rw-r--r--tools/testing/selftests/arm64/bti/test.c234
-rw-r--r--tools/testing/selftests/arm64/bti/teststubs.S39
-rw-r--r--tools/testing/selftests/arm64/bti/trampoline.S29
-rw-r--r--tools/testing/selftests/arm64/mte/Makefile15
-rw-r--r--tools/testing/selftests/arm64/mte/check_ksm_options.c5
-rw-r--r--tools/testing/selftests/arm64/mte/check_user_mem.c3
-rw-r--r--tools/testing/selftests/arm64/mte/mte_common_util.c39
-rw-r--r--tools/testing/selftests/bpf/.gitignore2
-rw-r--r--tools/testing/selftests/bpf/Makefile79
-rw-r--r--tools/testing/selftests/bpf/Makefile.docs82
-rw-r--r--tools/testing/selftests/bpf/README.rst71
-rw-r--r--tools/testing/selftests/bpf/bpf_tcp_helpers.h29
-rw-r--r--tools/testing/selftests/bpf/btf_helpers.c4
-rw-r--r--tools/testing/selftests/bpf/config2
-rw-r--r--tools/testing/selftests/bpf/get_cgroup_id_user.c6
-rw-r--r--tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c109
-rw-r--r--tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c158
-rw-r--r--tools/testing/selftests/bpf/prog_tests/attach_probe.c40
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c176
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dump.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_endian.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_link.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_reloc.c52
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_test.c52
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c58
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_sleep.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_test.c52
-rw-r--r--tools/testing/selftests/bpf/prog_tests/for_each.c130
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfree_skb.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_call.c59
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_funcs.c42
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_maps.c30
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_vars.c43
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_ptr.c15
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mmap.c24
-rw-r--r--tools/testing/selftests/bpf/prog_tests/module_attach.c23
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c51
-rw-r--r--tools/testing/selftests/bpf/prog_tests/resolve_btfids.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c37
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_lookup.c83
-rw-r--r--tools/testing/selftests/bpf/prog_tests/snprintf.c125
-rw-r--r--tools/testing/selftests/bpf/prog_tests/snprintf_btf.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_basic.c40
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_listen.c144
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_sk.c65
-rw-r--r--tools/testing/selftests/bpf/prog_tests/static_linked.c40
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_local_storage.c92
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_ima.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_lsm.c61
-rw-r--r--tools/testing/selftests/bpf/progs/bind4_prog.c25
-rw-r--r--tools/testing/selftests/bpf/progs/bind6_prog.c25
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_cubic.c36
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_dctcp.c22
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c27
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c7
-rw-r--r--tools/testing/selftests/bpf/progs/core_reloc_types.h25
-rw-r--r--tools/testing/selftests/bpf/progs/fentry_test.c2
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_test.c4
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_array_map_elem.c61
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c95
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_test.c47
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c42
-rw-r--r--tools/testing/selftests/bpf/progs/linked_funcs1.c73
-rw-r--r--tools/testing/selftests/bpf/progs/linked_funcs2.c73
-rw-r--r--tools/testing/selftests/bpf/progs/linked_maps1.c82
-rw-r--r--tools/testing/selftests/bpf/progs/linked_maps2.c76
-rw-r--r--tools/testing/selftests/bpf/progs/linked_vars1.c54
-rw-r--r--tools/testing/selftests/bpf/progs/linked_vars2.c55
-rw-r--r--tools/testing/selftests/bpf/progs/loop6.c99
-rw-r--r--tools/testing/selftests/bpf/progs/map_ptr_kern.c4
-rw-r--r--tools/testing/selftests/bpf/progs/skb_pkt_end.c1
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c12
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_sk.c11
-rw-r--r--tools/testing/selftests/bpf/progs/task_local_storage.c64
-rw-r--r--tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c32
-rw-r--r--tools/testing/selftests/bpf/progs/task_ls_recursion.c70
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_size.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func10.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_mmap.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_multi.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup.c62
-rw-r--r--tools/testing/selftests/bpf/progs/test_snprintf.c73
-rw-r--r--tools/testing/selftests/bpf/progs/test_snprintf_single.c20
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_listen.c26
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_static_linked1.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_static_linked2.c31
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_tunnel.c113
-rw-r--r--tools/testing/selftests/bpf/progs/test_tunnel_kern.c2
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_build.sh21
-rw-r--r--tools/testing/selftests/bpf/test_btf.h3
-rwxr-xr-xtools/testing/selftests/bpf/test_doc_build.sh13
-rw-r--r--tools/testing/selftests/bpf/test_progs.h63
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c2
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_tunnel.sh15
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c4
-rwxr-xr-xtools/testing/selftests/bpf/test_xsk.sh138
-rw-r--r--tools/testing/selftests/bpf/verifier/array_access.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/bpf_get_stack.c43
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c12
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c1
-rw-r--r--tools/testing/selftests/bpf/verifier/dead_code.c10
-rwxr-xr-xtools/testing/selftests/bpf/vmtest.sh59
-rw-r--r--tools/testing/selftests/bpf/xdpxceiver.c862
-rw-r--r--tools/testing/selftests/bpf/xdpxceiver.h98
-rwxr-xr-xtools/testing/selftests/bpf/xsk_prereqs.sh30
-rw-r--r--tools/testing/selftests/cgroup/test_kmem.c22
-rw-r--r--tools/testing/selftests/dma/dma_map_benchmark.c22
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh31
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh3
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/port_scale.sh6
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh82
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh7
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/q_in_vni_veto.sh77
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh4
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh6
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh21
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/tc_sample.sh657
-rw-r--r--tools/testing/selftests/drivers/net/netdevsim/ethtool-common.sh5
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/ethtool-fec.sh110
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/nexthop.sh620
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/psample.sh181
-rw-r--r--tools/testing/selftests/firmware/fw_namespace.c2
-rw-r--r--tools/testing/selftests/kvm/.gitignore2
-rw-r--r--tools/testing/selftests/kvm/Makefile6
-rw-r--r--tools/testing/selftests/kvm/aarch64/vgic_init.c551
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c69
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h13
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h21
-rw-r--r--tools/testing/selftests/kvm/kvm_page_table_test.c506
-rw-r--r--tools/testing/selftests/kvm/lib/assert.c4
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c138
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c163
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c61
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c2
-rw-r--r--tools/testing/selftests/landlock/.gitignore2
-rw-r--r--tools/testing/selftests/landlock/Makefile24
-rw-r--r--tools/testing/selftests/landlock/base_test.c266
-rw-r--r--tools/testing/selftests/landlock/common.h183
-rw-r--r--tools/testing/selftests/landlock/config7
-rw-r--r--tools/testing/selftests/landlock/fs_test.c2791
-rw-r--r--tools/testing/selftests/landlock/ptrace_test.c337
-rw-r--r--tools/testing/selftests/landlock/true.c5
-rw-r--r--tools/testing/selftests/lib.mk7
-rw-r--r--tools/testing/selftests/lkdtm/.gitignore1
-rw-r--r--tools/testing/selftests/lkdtm/Makefile1
-rwxr-xr-xtools/testing/selftests/lkdtm/stack-entropy.sh36
-rw-r--r--tools/testing/selftests/net/Makefile4
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh564
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh152
-rwxr-xr-xtools/testing/selftests/net/forwarding/dual_vxlan_bridge.sh366
-rw-r--r--tools/testing/selftests/net/forwarding/fib_offload_lib.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_multipath_nh_res.sh361
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh14
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh2
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_lib.sh19
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_mpath_nh_res.sh400
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_police.sh56
-rw-r--r--tools/testing/selftests/net/mptcp/Makefile2
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh55
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c77
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh51
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh248
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_sockopt.sh276
-rwxr-xr-xtools/testing/selftests/net/mptcp/pm_netlink.sh6
-rw-r--r--tools/testing/selftests/net/mptcp/pm_nl_ctl.c34
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh13
-rw-r--r--tools/testing/selftests/net/settings1
-rw-r--r--tools/testing/selftests/net/so_txtime.c247
-rwxr-xr-xtools/testing/selftests/net/so_txtime.sh97
-rwxr-xr-xtools/testing/selftests/net/udpgro_fwd.sh251
-rwxr-xr-xtools/testing/selftests/net/veth.sh177
-rwxr-xr-xtools/testing/selftests/netfilter/nft_flowtable.sh82
-rw-r--r--tools/testing/selftests/perf_events/.gitignore3
-rw-r--r--tools/testing/selftests/perf_events/Makefile6
-rw-r--r--tools/testing/selftests/perf_events/config1
-rw-r--r--tools/testing/selftests/perf_events/remove_on_exec.c260
-rw-r--r--tools/testing/selftests/perf_events/settings1
-rw-r--r--tools/testing/selftests/perf_events/sigtrap_threads.c210
-rw-r--r--tools/testing/selftests/powerpc/alignment/alignment_handler.c11
-rw-r--r--tools/testing/selftests/powerpc/mm/Makefile1
-rwxr-xr-xtools/testing/selftests/powerpc/mm/stress_code_patching.sh49
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/ptrace/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c635
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c79
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-perf-hwbreak.c659
-rw-r--r--tools/testing/selftests/powerpc/security/Makefile3
-rw-r--r--tools/testing/selftests/powerpc/security/entry_flush.c2
-rw-r--r--tools/testing/selftests/powerpc/security/flush_utils.c13
-rw-r--r--tools/testing/selftests/powerpc/security/flush_utils.h7
-rw-r--r--tools/testing/selftests/powerpc/security/rfi_flush.c2
-rw-r--r--tools/testing/selftests/powerpc/security/uaccess_flush.c158
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-trap.c4
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/cpus2use.sh1
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/jitter.sh20
-rw-r--r--tools/testing/selftests/rcutorture/bin/jitterstart.sh37
-rw-r--r--tools/testing/selftests/rcutorture/bin/jitterstop.sh23
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-again.sh199
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run-batch.sh67
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run-qemu.sh176
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh218
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-transform.sh54
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh98
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/torture.sh2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/CFLIST4
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-T (renamed from tools/testing/selftests/rcutorture/configs/rcu/SRCU-t)0
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-T.boot (renamed from tools/testing/selftests/rcutorture/configs/rcu/SRCU-t.boot)0
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-U (renamed from tools/testing/selftests/rcutorture/configs/rcu/SRCU-u)0
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-U.boot (renamed from tools/testing/selftests/rcutorture/configs/rcu/SRCU-u.boot)0
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/ver_functions.sh2
-rw-r--r--tools/testing/selftests/rcutorture/configs/refscale/ver_functions.sh2
-rw-r--r--tools/testing/selftests/resctrl/.gitignore2
-rw-r--r--tools/testing/selftests/resctrl/Makefile2
-rw-r--r--tools/testing/selftests/resctrl/README4
-rw-r--r--tools/testing/selftests/resctrl/cache.c52
-rw-r--r--tools/testing/selftests/resctrl/cat_test.c57
-rw-r--r--tools/testing/selftests/resctrl/cmt_test.c (renamed from tools/testing/selftests/resctrl/cqm_test.c)75
-rw-r--r--tools/testing/selftests/resctrl/config2
-rw-r--r--tools/testing/selftests/resctrl/fill_buf.c4
-rw-r--r--tools/testing/selftests/resctrl/mba_test.c43
-rw-r--r--tools/testing/selftests/resctrl/mbm_test.c42
-rw-r--r--tools/testing/selftests/resctrl/resctrl.h29
-rw-r--r--tools/testing/selftests/resctrl/resctrl_tests.c163
-rw-r--r--tools/testing/selftests/resctrl/resctrl_val.c95
-rw-r--r--tools/testing/selftests/resctrl/resctrlfs.c134
-rw-r--r--tools/testing/selftests/sgx/defines.h2
-rw-r--r--tools/testing/selftests/sgx/load.c69
-rw-r--r--tools/testing/selftests/sgx/main.c26
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/police.json48
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/simple.json83
-rw-r--r--tools/testing/selftests/timens/gettime_perf.c8
-rw-r--r--tools/testing/selftests/timers/clocksource-switch.c4
-rw-r--r--tools/testing/selftests/timers/leap-a-day.c2
-rw-r--r--tools/testing/selftests/timers/leapcrash.c4
-rw-r--r--tools/testing/selftests/timers/threadtest.c2
-rw-r--r--tools/testing/selftests/vm/mremap_dontunmap.c52
-rwxr-xr-xtools/testing/selftests/vm/test_vmalloc.sh21
-rw-r--r--tools/testing/selftests/x86/thunks_32.S2
-rw-r--r--tools/thermal/tmon/Makefile2
-rw-r--r--tools/tracing/latency/latency-collector.c4
-rw-r--r--tools/usb/usbip/doc/usbip.842
-rw-r--r--tools/usb/usbip/doc/usbipd.826
-rw-r--r--tools/usb/usbip/libsrc/list.h10
-rw-r--r--tools/usb/usbip/src/usbip_list.c3
617 files changed, 37528 insertions, 4735 deletions
diff --git a/tools/arch/x86/include/asm/inat.h b/tools/arch/x86/include/asm/inat.h
index 877827b7c2c3..a61051400311 100644
--- a/tools/arch/x86/include/asm/inat.h
+++ b/tools/arch/x86/include/asm/inat.h
@@ -6,7 +6,7 @@
*
* Written by Masami Hiramatsu <mhiramat@redhat.com>
*/
-#include "inat_types.h"
+#include "inat_types.h" /* __ignore_sync_check__ */
/*
* Internal bits. Don't use bitmasks directly, because these bits are
diff --git a/tools/arch/x86/include/asm/insn.h b/tools/arch/x86/include/asm/insn.h
index cc777c185212..dc632b41f135 100644
--- a/tools/arch/x86/include/asm/insn.h
+++ b/tools/arch/x86/include/asm/insn.h
@@ -9,7 +9,7 @@
#include <asm/byteorder.h>
/* insn_attr_t is defined in inat.h */
-#include "inat.h"
+#include "inat.h" /* __ignore_sync_check__ */
#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
@@ -132,13 +132,25 @@ struct insn {
#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
-extern void insn_get_prefixes(struct insn *insn);
-extern void insn_get_opcode(struct insn *insn);
-extern void insn_get_modrm(struct insn *insn);
-extern void insn_get_sib(struct insn *insn);
-extern void insn_get_displacement(struct insn *insn);
-extern void insn_get_immediate(struct insn *insn);
-extern void insn_get_length(struct insn *insn);
+extern int insn_get_prefixes(struct insn *insn);
+extern int insn_get_opcode(struct insn *insn);
+extern int insn_get_modrm(struct insn *insn);
+extern int insn_get_sib(struct insn *insn);
+extern int insn_get_displacement(struct insn *insn);
+extern int insn_get_immediate(struct insn *insn);
+extern int insn_get_length(struct insn *insn);
+
+enum insn_mode {
+ INSN_MODE_32,
+ INSN_MODE_64,
+ /* Mode is determined by the current kernel build. */
+ INSN_MODE_KERN,
+ INSN_NUM_MODES,
+};
+
+extern int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m);
+
+#define insn_decode_kernel(_insn, _ptr) insn_decode((_insn), (_ptr), MAX_INSN_SIZE, INSN_MODE_KERN)
/* Attribute will be determined after getting ModRM (for opcode groups) */
static inline void insn_get_attribute(struct insn *insn)
@@ -149,17 +161,6 @@ static inline void insn_get_attribute(struct insn *insn)
/* Instruction uses RIP-relative addressing */
extern int insn_rip_relative(struct insn *insn);
-/* Init insn for kernel text */
-static inline void kernel_insn_init(struct insn *insn,
- const void *kaddr, int buf_len)
-{
-#ifdef CONFIG_X86_64
- insn_init(insn, kaddr, buf_len, 1);
-#else /* CONFIG_X86_32 */
- insn_init(insn, kaddr, buf_len, 0);
-#endif
-}
-
static inline int insn_is_avx(struct insn *insn)
{
if (!insn->prefixes.got)
@@ -179,13 +180,6 @@ static inline int insn_has_emulate_prefix(struct insn *insn)
return !!insn->emulate_prefix_size;
}
-/* Ensure this instruction is decoded completely */
-static inline int insn_complete(struct insn *insn)
-{
- return insn->opcode.got && insn->modrm.got && insn->sib.got &&
- insn->displacement.got && insn->immediate.got;
-}
-
static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
{
if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index 546d6ecf0a35..45029354e0a8 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -628,8 +628,6 @@
#define MSR_IA32_APICBASE_ENABLE (1<<11)
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
-#define MSR_IA32_TSCDEADLINE 0x000006e0
-
#define MSR_IA32_UCODE_WRITE 0x00000079
#define MSR_IA32_UCODE_REV 0x0000008b
diff --git a/tools/arch/x86/include/asm/nops.h b/tools/arch/x86/include/asm/nops.h
new file mode 100644
index 000000000000..c1e5e818ba16
--- /dev/null
+++ b/tools/arch/x86/include/asm/nops.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_NOPS_H
+#define _ASM_X86_NOPS_H
+
+/*
+ * Define nops for use with alternative() and for tracing.
+ */
+
+#ifndef CONFIG_64BIT
+
+/*
+ * Generic 32bit nops from GAS:
+ *
+ * 1: nop
+ * 2: movl %esi,%esi
+ * 3: leal 0x0(%esi),%esi
+ * 4: leal 0x0(%esi,%eiz,1),%esi
+ * 5: leal %ds:0x0(%esi,%eiz,1),%esi
+ * 6: leal 0x0(%esi),%esi
+ * 7: leal 0x0(%esi,%eiz,1),%esi
+ * 8: leal %ds:0x0(%esi,%eiz,1),%esi
+ *
+ * Except 5 and 8, which are DS prefixed 4 and 7 resp, where GAS would emit 2
+ * nop instructions.
+ */
+#define BYTES_NOP1 0x90
+#define BYTES_NOP2 0x89,0xf6
+#define BYTES_NOP3 0x8d,0x76,0x00
+#define BYTES_NOP4 0x8d,0x74,0x26,0x00
+#define BYTES_NOP5 0x3e,BYTES_NOP4
+#define BYTES_NOP6 0x8d,0xb6,0x00,0x00,0x00,0x00
+#define BYTES_NOP7 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00
+#define BYTES_NOP8 0x3e,BYTES_NOP7
+
+#else
+
+/*
+ * Generic 64bit nops from GAS:
+ *
+ * 1: nop
+ * 2: osp nop
+ * 3: nopl (%eax)
+ * 4: nopl 0x00(%eax)
+ * 5: nopl 0x00(%eax,%eax,1)
+ * 6: osp nopl 0x00(%eax,%eax,1)
+ * 7: nopl 0x00000000(%eax)
+ * 8: nopl 0x00000000(%eax,%eax,1)
+ */
+#define BYTES_NOP1 0x90
+#define BYTES_NOP2 0x66,BYTES_NOP1
+#define BYTES_NOP3 0x0f,0x1f,0x00
+#define BYTES_NOP4 0x0f,0x1f,0x40,0x00
+#define BYTES_NOP5 0x0f,0x1f,0x44,0x00,0x00
+#define BYTES_NOP6 0x66,BYTES_NOP5
+#define BYTES_NOP7 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00
+#define BYTES_NOP8 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
+
+#endif /* CONFIG_64BIT */
+
+#ifdef __ASSEMBLY__
+#define _ASM_MK_NOP(x) .byte x
+#else
+#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n"
+#endif
+
+#define ASM_NOP1 _ASM_MK_NOP(BYTES_NOP1)
+#define ASM_NOP2 _ASM_MK_NOP(BYTES_NOP2)
+#define ASM_NOP3 _ASM_MK_NOP(BYTES_NOP3)
+#define ASM_NOP4 _ASM_MK_NOP(BYTES_NOP4)
+#define ASM_NOP5 _ASM_MK_NOP(BYTES_NOP5)
+#define ASM_NOP6 _ASM_MK_NOP(BYTES_NOP6)
+#define ASM_NOP7 _ASM_MK_NOP(BYTES_NOP7)
+#define ASM_NOP8 _ASM_MK_NOP(BYTES_NOP8)
+
+#define ASM_NOP_MAX 8
+
+#ifndef __ASSEMBLY__
+extern const unsigned char * const x86_nops[];
+#endif
+
+#endif /* _ASM_X86_NOPS_H */
diff --git a/tools/arch/x86/kcpuid/Makefile b/tools/arch/x86/kcpuid/Makefile
new file mode 100644
index 000000000000..87b554fab14b
--- /dev/null
+++ b/tools/arch/x86/kcpuid/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for x86/kcpuid tool
+
+kcpuid : kcpuid.c
+
+CFLAGS = -Wextra
+
+BINDIR ?= /usr/sbin
+
+HWDATADIR ?= /usr/share/misc/
+
+override CFLAGS += -O2 -Wall -I../../../include
+
+%: %.c
+ $(CC) $(CFLAGS) -o $@ $< $(LDFLAGS)
+
+.PHONY : clean
+clean :
+ @rm -f kcpuid
+
+install : kcpuid
+ install -d $(DESTDIR)$(BINDIR)
+ install -m 755 -p kcpuid $(DESTDIR)$(BINDIR)/kcpuid
+ install -m 444 -p cpuid.csv $(HWDATADIR)/cpuid.csv
diff --git a/tools/arch/x86/kcpuid/cpuid.csv b/tools/arch/x86/kcpuid/cpuid.csv
new file mode 100644
index 000000000000..4f1c4b0c29e9
--- /dev/null
+++ b/tools/arch/x86/kcpuid/cpuid.csv
@@ -0,0 +1,400 @@
+# The basic row format is:
+# LEAF, SUBLEAF, register_name, bits, short_name, long_description
+
+# Leaf 00H
+ 0, 0, EAX, 31:0, max_basic_leafs, Max input value for supported subleafs
+
+# Leaf 01H
+ 1, 0, EAX, 3:0, stepping, Stepping ID
+ 1, 0, EAX, 7:4, model, Model
+ 1, 0, EAX, 11:8, family, Family ID
+ 1, 0, EAX, 13:12, processor, Processor Type
+ 1, 0, EAX, 19:16, model_ext, Extended Model ID
+ 1, 0, EAX, 27:20, family_ext, Extended Family ID
+
+ 1, 0, EBX, 7:0, brand, Brand Index
+ 1, 0, EBX, 15:8, clflush_size, CLFLUSH line size (value * 8) in bytes
+ 1, 0, EBX, 23:16, max_cpu_id, Maxim number of addressable logic cpu in this package
+ 1, 0, EBX, 31:24, apic_id, Initial APIC ID
+
+ 1, 0, ECX, 0, sse3, Streaming SIMD Extensions 3(SSE3)
+ 1, 0, ECX, 1, pclmulqdq, PCLMULQDQ instruction supported
+ 1, 0, ECX, 2, dtes64, DS area uses 64-bit layout
+ 1, 0, ECX, 3, mwait, MONITOR/MWAIT supported
+ 1, 0, ECX, 4, ds_cpl, CPL Qualified Debug Store which allows for branch message storage qualified by CPL
+ 1, 0, ECX, 5, vmx, Virtual Machine Extensions supported
+ 1, 0, ECX, 6, smx, Safer Mode Extension supported
+ 1, 0, ECX, 7, eist, Enhanced Intel SpeedStep Technology
+ 1, 0, ECX, 8, tm2, Thermal Monitor 2
+ 1, 0, ECX, 9, ssse3, Supplemental Streaming SIMD Extensions 3 (SSSE3)
+ 1, 0, ECX, 10, l1_ctx_id, L1 data cache could be set to either adaptive mode or shared mode (check IA32_MISC_ENABLE bit 24 definition)
+ 1, 0, ECX, 11, sdbg, IA32_DEBUG_INTERFACE MSR for silicon debug supported
+ 1, 0, ECX, 12, fma, FMA extensions using YMM state supported
+ 1, 0, ECX, 13, cmpxchg16b, 'CMPXCHG16B - Compare and Exchange Bytes' supported
+ 1, 0, ECX, 14, xtpr_update, xTPR Update Control supported
+ 1, 0, ECX, 15, pdcm, Perfmon and Debug Capability present
+ 1, 0, ECX, 17, pcid, Process-Context Identifiers feature present
+ 1, 0, ECX, 18, dca, Prefetching data from a memory mapped device supported
+ 1, 0, ECX, 19, sse4_1, SSE4.1 feature present
+ 1, 0, ECX, 20, sse4_2, SSE4.2 feature present
+ 1, 0, ECX, 21, x2apic, x2APIC supported
+ 1, 0, ECX, 22, movbe, MOVBE instruction supported
+ 1, 0, ECX, 23, popcnt, POPCNT instruction supported
+ 1, 0, ECX, 24, tsc_deadline_timer, LAPIC supports one-shot operation using a TSC deadline value
+ 1, 0, ECX, 25, aesni, AESNI instruction supported
+ 1, 0, ECX, 26, xsave, XSAVE/XRSTOR processor extended states (XSETBV/XGETBV/XCR0)
+ 1, 0, ECX, 27, osxsave, OS has set CR4.OSXSAVE bit to enable XSETBV/XGETBV/XCR0
+ 1, 0, ECX, 28, avx, AVX instruction supported
+ 1, 0, ECX, 29, f16c, 16-bit floating-point conversion instruction supported
+ 1, 0, ECX, 30, rdrand, RDRAND instruction supported
+
+ 1, 0, EDX, 0, fpu, x87 FPU on chip
+ 1, 0, EDX, 1, vme, Virtual-8086 Mode Enhancement
+ 1, 0, EDX, 2, de, Debugging Extensions
+ 1, 0, EDX, 3, pse, Page Size Extensions
+ 1, 0, EDX, 4, tsc, Time Stamp Counter
+ 1, 0, EDX, 5, msr, RDMSR and WRMSR Support
+ 1, 0, EDX, 6, pae, Physical Address Extensions
+ 1, 0, EDX, 7, mce, Machine Check Exception
+ 1, 0, EDX, 8, cx8, CMPXCHG8B instr
+ 1, 0, EDX, 9, apic, APIC on Chip
+ 1, 0, EDX, 11, sep, SYSENTER and SYSEXIT instrs
+ 1, 0, EDX, 12, mtrr, Memory Type Range Registers
+ 1, 0, EDX, 13, pge, Page Global Bit
+ 1, 0, EDX, 14, mca, Machine Check Architecture
+ 1, 0, EDX, 15, cmov, Conditional Move Instrs
+ 1, 0, EDX, 16, pat, Page Attribute Table
+ 1, 0, EDX, 17, pse36, 36-Bit Page Size Extension
+ 1, 0, EDX, 18, psn, Processor Serial Number
+ 1, 0, EDX, 19, clflush, CLFLUSH instr
+# 1, 0, EDX, 20,
+ 1, 0, EDX, 21, ds, Debug Store
+ 1, 0, EDX, 22, acpi, Thermal Monitor and Software Controlled Clock Facilities
+ 1, 0, EDX, 23, mmx, Intel MMX Technology
+ 1, 0, EDX, 24, fxsr, XSAVE and FXRSTOR Instrs
+ 1, 0, EDX, 25, sse, SSE
+ 1, 0, EDX, 26, sse2, SSE2
+ 1, 0, EDX, 27, ss, Self Snoop
+ 1, 0, EDX, 28, hit, Max APIC IDs
+ 1, 0, EDX, 29, tm, Thermal Monitor
+# 1, 0, EDX, 30,
+ 1, 0, EDX, 31, pbe, Pending Break Enable
+
+# Leaf 02H
+# cache and TLB descriptor info
+
+# Leaf 03H
+# Precessor Serial Number, introduced on Pentium III, not valid for
+# latest models
+
+# Leaf 04H
+# thread/core and cache topology
+ 4, 0, EAX, 4:0, cache_type, Cache type like instr/data or unified
+ 4, 0, EAX, 7:5, cache_level, Cache Level (starts at 1)
+ 4, 0, EAX, 8, cache_self_init, Cache Self Initialization
+ 4, 0, EAX, 9, fully_associate, Fully Associative cache
+# 4, 0, EAX, 13:10, resvd, resvd
+ 4, 0, EAX, 25:14, max_logical_id, Max number of addressable IDs for logical processors sharing the cache
+ 4, 0, EAX, 31:26, max_phy_id, Max number of addressable IDs for processors in phy package
+
+ 4, 0, EBX, 11:0, cache_linesize, Size of a cache line in bytes
+ 4, 0, EBX, 21:12, cache_partition, Physical Line partitions
+ 4, 0, EBX, 31:22, cache_ways, Ways of associativity
+ 4, 0, ECX, 31:0, cache_sets, Number of Sets - 1
+ 4, 0, EDX, 0, c_wbinvd, 1 means WBINVD/INVD is not ganranteed to act upon lower level caches of non-originating threads sharing this cache
+ 4, 0, EDX, 1, c_incl, Whether cache is inclusive of lower cache level
+ 4, 0, EDX, 2, c_comp_index, Complex Cache Indexing
+
+# Leaf 05H
+# MONITOR/MWAIT
+ 5, 0, EAX, 15:0, min_mon_size, Smallest monitor line size in bytes
+ 5, 0, EBX, 15:0, max_mon_size, Largest monitor line size in bytes
+ 5, 0, ECX, 0, mwait_ext, Enum of Monitor-Mwait extensions supported
+ 5, 0, ECX, 1, mwait_irq_break, Largest monitor line size in bytes
+ 5, 0, EDX, 3:0, c0_sub_stats, Number of C0* sub C-states supported using MWAIT
+ 5, 0, EDX, 7:4, c1_sub_stats, Number of C1* sub C-states supported using MWAIT
+ 5, 0, EDX, 11:8, c2_sub_stats, Number of C2* sub C-states supported using MWAIT
+ 5, 0, EDX, 15:12, c3_sub_stats, Number of C3* sub C-states supported using MWAIT
+ 5, 0, EDX, 19:16, c4_sub_stats, Number of C4* sub C-states supported using MWAIT
+ 5, 0, EDX, 23:20, c5_sub_stats, Number of C5* sub C-states supported using MWAIT
+ 5, 0, EDX, 27:24, c6_sub_stats, Number of C6* sub C-states supported using MWAIT
+ 5, 0, EDX, 31:28, c7_sub_stats, Number of C7* sub C-states supported using MWAIT
+
+# Leaf 06H
+# Thermal & Power Management
+
+ 6, 0, EAX, 0, dig_temp, Digital temperature sensor supported
+ 6, 0, EAX, 1, turbo, Intel Turbo Boost
+ 6, 0, EAX, 2, arat, Always running APIC timer
+# 6, 0, EAX, 3, resv, Reserved
+ 6, 0, EAX, 4, pln, Power limit notifications supported
+ 6, 0, EAX, 5, ecmd, Clock modulation duty cycle extension supported
+ 6, 0, EAX, 6, ptm, Package thermal management supported
+ 6, 0, EAX, 7, hwp, HWP base register
+ 6, 0, EAX, 8, hwp_notify, HWP notification
+ 6, 0, EAX, 9, hwp_act_window, HWP activity window
+ 6, 0, EAX, 10, hwp_energy, HWP energy performance preference
+ 6, 0, EAX, 11, hwp_pkg_req, HWP package level request
+# 6, 0, EAX, 12, resv, Reserved
+ 6, 0, EAX, 13, hdc, HDC base registers supported
+ 6, 0, EAX, 14, turbo3, Turbo Boost Max 3.0
+ 6, 0, EAX, 15, hwp_cap, Highest Performance change supported
+ 6, 0, EAX, 16, hwp_peci, HWP PECI override is supported
+ 6, 0, EAX, 17, hwp_flex, Flexible HWP is supported
+ 6, 0, EAX, 18, hwp_fast, Fast access mode for the IA32_HWP_REQUEST MSR is supported
+# 6, 0, EAX, 19, resv, Reserved
+ 6, 0, EAX, 20, hwp_ignr, Ignoring Idle Logical Processor HWP request is supported
+
+ 6, 0, EBX, 3:0, therm_irq_thresh, Number of Interrupt Thresholds in Digital Thermal Sensor
+ 6, 0, ECX, 0, aperfmperf, Presence of IA32_MPERF and IA32_APERF
+ 6, 0, ECX, 3, energ_bias, Performance-energy bias preference supported
+
+# Leaf 07H
+# ECX == 0
+# AVX512 refers to https://en.wikipedia.org/wiki/AVX-512
+# XXX: Do we really need to enumerate each and every AVX512 sub features
+
+ 7, 0, EBX, 0, fsgsbase, RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE supported
+ 7, 0, EBX, 1, tsc_adjust, TSC_ADJUST MSR supported
+ 7, 0, EBX, 2, sgx, Software Guard Extensions
+ 7, 0, EBX, 3, bmi1, BMI1
+ 7, 0, EBX, 4, hle, Hardware Lock Elision
+ 7, 0, EBX, 5, avx2, AVX2
+# 7, 0, EBX, 6, fdp_excp_only, x87 FPU Data Pointer updated only on x87 exceptions
+ 7, 0, EBX, 7, smep, Supervisor-Mode Execution Prevention
+ 7, 0, EBX, 8, bmi2, BMI2
+ 7, 0, EBX, 9, rep_movsb, Enhanced REP MOVSB/STOSB
+ 7, 0, EBX, 10, invpcid, INVPCID instruction
+ 7, 0, EBX, 11, rtm, Restricted Transactional Memory
+ 7, 0, EBX, 12, rdt_m, Intel RDT Monitoring capability
+ 7, 0, EBX, 13, depc_fpu_cs_ds, Deprecates FPU CS and FPU DS
+ 7, 0, EBX, 14, mpx, Memory Protection Extensions
+ 7, 0, EBX, 15, rdt_a, Intel RDT Allocation capability
+ 7, 0, EBX, 16, avx512f, AVX512 Foundation instr
+ 7, 0, EBX, 17, avx512dq, AVX512 Double and Quadword AVX512 instr
+ 7, 0, EBX, 18, rdseed, RDSEED instr
+ 7, 0, EBX, 19, adx, ADX instr
+ 7, 0, EBX, 20, smap, Supervisor Mode Access Prevention
+ 7, 0, EBX, 21, avx512ifma, AVX512 Integer Fused Multiply Add
+# 7, 0, EBX, 22, resvd, resvd
+ 7, 0, EBX, 23, clflushopt, CLFLUSHOPT instr
+ 7, 0, EBX, 24, clwb, CLWB instr
+ 7, 0, EBX, 25, intel_pt, Intel Processor Trace instr
+ 7, 0, EBX, 26, avx512pf, Prefetch
+ 7, 0, EBX, 27, avx512er, AVX512 Exponent Reciproca instr
+ 7, 0, EBX, 28, avx512cd, AVX512 Conflict Detection instr
+ 7, 0, EBX, 29, sha, Intel Secure Hash Algorithm Extensions instr
+ 7, 0, EBX, 26, avx512bw, AVX512 Byte & Word instr
+ 7, 0, EBX, 28, avx512vl, AVX512 Vector Length Extentions (VL)
+ 7, 0, ECX, 0, prefetchwt1, X
+ 7, 0, ECX, 1, avx512vbmi, AVX512 Vector Byte Manipulation Instructions
+ 7, 0, ECX, 2, umip, User-mode Instruction Prevention
+
+ 7, 0, ECX, 3, pku, Protection Keys for User-mode pages
+ 7, 0, ECX, 4, ospke, CR4 PKE set to enable protection keys
+# 7, 0, ECX, 16:5, resvd, resvd
+ 7, 0, ECX, 21:17, mawau, The value of MAWAU used by the BNDLDX and BNDSTX instructions in 64-bit mode
+ 7, 0, ECX, 22, rdpid, RDPID and IA32_TSC_AUX
+# 7, 0, ECX, 29:23, resvd, resvd
+ 7, 0, ECX, 30, sgx_lc, SGX Launch Configuration
+# 7, 0, ECX, 31, resvd, resvd
+
+# Leaf 08H
+#
+
+
+# Leaf 09H
+# Direct Cache Access (DCA) information
+ 9, 0, ECX, 31:0, dca_cap, The value of IA32_PLATFORM_DCA_CAP
+
+# Leaf 0AH
+# Architectural Performance Monitoring
+#
+# Do we really need to print out the PMU related stuff?
+# Does normal user really care about it?
+#
+ 0xA, 0, EAX, 7:0, pmu_ver, Performance Monitoring Unit version
+ 0xA, 0, EAX, 15:8, pmu_gp_cnt_num, Numer of general-purose PMU counters per logical CPU
+ 0xA, 0, EAX, 23:16, pmu_cnt_bits, Bit wideth of PMU counter
+ 0xA, 0, EAX, 31:24, pmu_ebx_bits, Length of EBX bit vector to enumerate PMU events
+
+ 0xA, 0, EBX, 0, pmu_no_core_cycle_evt, Core cycle event not available
+ 0xA, 0, EBX, 1, pmu_no_instr_ret_evt, Instruction retired event not available
+ 0xA, 0, EBX, 2, pmu_no_ref_cycle_evt, Reference cycles event not available
+ 0xA, 0, EBX, 3, pmu_no_llc_ref_evt, Last-level cache reference event not available
+ 0xA, 0, EBX, 4, pmu_no_llc_mis_evt, Last-level cache misses event not available
+ 0xA, 0, EBX, 5, pmu_no_br_instr_ret_evt, Branch instruction retired event not available
+ 0xA, 0, EBX, 6, pmu_no_br_mispredict_evt, Branch mispredict retired event not available
+
+ 0xA, 0, ECX, 4:0, pmu_fixed_cnt_num, Performance Monitoring Unit version
+ 0xA, 0, ECX, 12:5, pmu_fixed_cnt_bits, Numer of PMU counters per logical CPU
+
+# Leaf 0BH
+# Extended Topology Enumeration Leaf
+#
+
+ 0xB, 0, EAX, 4:0, id_shift, Number of bits to shift right on x2APIC ID to get a unique topology ID of the next level type
+ 0xB, 0, EBX, 15:0, cpu_nr, Number of logical processors at this level type
+ 0xB, 0, ECX, 15:8, lvl_type, 0-Invalid 1-SMT 2-Core
+ 0xB, 0, EDX, 31:0, x2apic_id, x2APIC ID the current logical processor
+
+
+# Leaf 0DH
+# Processor Extended State
+
+ 0xD, 0, EAX, 0, x87, X87 state
+ 0xD, 0, EAX, 1, sse, SSE state
+ 0xD, 0, EAX, 2, avx, AVX state
+ 0xD, 0, EAX, 4:3, mpx, MPX state
+ 0xD, 0, EAX, 7:5, avx512, AVX-512 state
+ 0xD, 0, EAX, 9, pkru, PKRU state
+
+ 0xD, 0, EBX, 31:0, max_sz_xcr0, Maximum size (bytes) required by enabled features in XCR0
+ 0xD, 0, ECX, 31:0, max_sz_xsave, Maximum size (bytes) of the XSAVE/XRSTOR save area
+
+ 0xD, 1, EAX, 0, xsaveopt, XSAVEOPT available
+ 0xD, 1, EAX, 1, xsavec, XSAVEC and compacted form supported
+ 0xD, 1, EAX, 2, xgetbv, XGETBV supported
+ 0xD, 1, EAX, 3, xsaves, XSAVES/XRSTORS and IA32_XSS supported
+
+ 0xD, 1, EBX, 31:0, max_sz_xcr0, Maximum size (bytes) required by enabled features in XCR0
+ 0xD, 1, ECX, 8, pt, PT state
+ 0xD, 1, ECX, 11, cet_usr, CET user state
+ 0xD, 1, ECX, 12, cet_supv, CET supervisor state
+ 0xD, 1, ECX, 13, hdc, HDC state
+ 0xD, 1, ECX, 16, hwp, HWP state
+
+# Leaf 0FH
+# Intel RDT Monitoring
+
+ 0xF, 0, EBX, 31:0, rmid_range, Maximum range (zero-based) of RMID within this physical processor of all types
+ 0xF, 0, EDX, 1, l3c_rdt_mon, L3 Cache RDT Monitoring supported
+
+ 0xF, 1, ECX, 31:0, rmid_range, Maximum range (zero-based) of RMID of this types
+ 0xF, 1, EDX, 0, l3c_ocp_mon, L3 Cache occupancy Monitoring supported
+ 0xF, 1, EDX, 1, l3c_tbw_mon, L3 Cache Total Bandwidth Monitoring supported
+ 0xF, 1, EDX, 2, l3c_lbw_mon, L3 Cache Local Bandwidth Monitoring supported
+
+# Leaf 10H
+# Intel RDT Allocation
+
+ 0x10, 0, EBX, 1, l3c_rdt_alloc, L3 Cache Allocation supported
+ 0x10, 0, EBX, 2, l2c_rdt_alloc, L2 Cache Allocation supported
+ 0x10, 0, EBX, 3, mem_bw_alloc, Memory Bandwidth Allocation supported
+
+
+# Leaf 12H
+# SGX Capability
+#
+# Some detailed SGX features not added yet
+
+ 0x12, 0, EAX, 0, sgx1, L3 Cache Allocation supported
+ 0x12, 1, EAX, 0, sgx2, L3 Cache Allocation supported
+
+
+# Leaf 14H
+# Intel Processor Tracer
+#
+
+# Leaf 15H
+# Time Stamp Counter and Nominal Core Crystal Clock Information
+
+ 0x15, 0, EAX, 31:0, tsc_denominator, The denominator of the TSC/”core crystal clock” ratio
+ 0x15, 0, EBX, 31:0, tsc_numerator, The numerator of the TSC/”core crystal clock” ratio
+ 0x15, 0, ECX, 31:0, nom_freq, Nominal frequency of the core crystal clock in Hz
+
+# Leaf 16H
+# Processor Frequency Information
+
+ 0x16, 0, EAX, 15:0, cpu_base_freq, Processor Base Frequency in MHz
+ 0x16, 0, EBX, 15:0, cpu_max_freq, Maximum Frequency in MHz
+ 0x16, 0, ECX, 15:0, bus_freq, Bus (Reference) Frequency in MHz
+
+# Leaf 17H
+# System-On-Chip Vendor Attribute
+
+ 0x17, 0, EAX, 31:0, max_socid, Maximum input value of supported sub-leaf
+ 0x17, 0, EBX, 15:0, soc_vid, SOC Vendor ID
+ 0x17, 0, EBX, 16, std_vid, SOC Vendor ID is assigned via an industry standard scheme
+ 0x17, 0, ECX, 31:0, soc_pid, SOC Project ID assigned by vendor
+ 0x17, 0, EDX, 31:0, soc_sid, SOC Stepping ID
+
+# Leaf 18H
+# Deterministic Address Translation Parameters
+
+
+# Leaf 19H
+# Key Locker Leaf
+
+
+# Leaf 1AH
+# Hybrid Information
+
+ 0x1A, 0, EAX, 31:24, core_type, 20H-Intel_Atom 40H-Intel_Core
+
+
+# Leaf 1FH
+# V2 Extended Topology - A preferred superset to leaf 0BH
+
+
+# According to SDM
+# 40000000H - 4FFFFFFFH is invalid range
+
+
+# Leaf 80000001H
+# Extended Processor Signature and Feature Bits
+
+0x80000001, 0, ECX, 0, lahf_lm, LAHF/SAHF available in 64-bit mode
+0x80000001, 0, ECX, 5, lzcnt, LZCNT
+0x80000001, 0, ECX, 8, prefetchw, PREFETCHW
+
+0x80000001, 0, EDX, 11, sysret, SYSCALL/SYSRET supported
+0x80000001, 0, EDX, 20, exec_dis, Execute Disable Bit available
+0x80000001, 0, EDX, 26, 1gb_page, 1GB page supported
+0x80000001, 0, EDX, 27, rdtscp, RDTSCP and IA32_TSC_AUX are available
+#0x80000001, 0, EDX, 29, 64b, 64b Architecture supported
+
+# Leaf 80000002H/80000003H/80000004H
+# Processor Brand String
+
+# Leaf 80000005H
+# Reserved
+
+# Leaf 80000006H
+# Extended L2 Cache Features
+
+0x80000006, 0, ECX, 7:0, clsize, Cache Line size in bytes
+0x80000006, 0, ECX, 15:12, l2c_assoc, L2 Associativity
+0x80000006, 0, ECX, 31:16, csize, Cache size in 1K units
+
+
+# Leaf 80000007H
+
+0x80000007, 0, EDX, 8, nonstop_tsc, Invariant TSC available
+
+
+# Leaf 80000008H
+
+0x80000008, 0, EAX, 7:0, phy_adr_bits, Physical Address Bits
+0x80000008, 0, EAX, 15:8, lnr_adr_bits, Linear Address Bits
+0x80000007, 0, EBX, 9, wbnoinvd, WBNOINVD
+
+# 0x8000001E
+# EAX: Extended APIC ID
+0x8000001E, 0, EAX, 31:0, extended_apic_id, Extended APIC ID
+# EBX: Core Identifiers
+0x8000001E, 0, EBX, 7:0, core_id, Identifies the logical core ID
+0x8000001E, 0, EBX, 15:8, threads_per_core, The number of threads per core is threads_per_core + 1
+# ECX: Node Identifiers
+0x8000001E, 0, ECX, 7:0, node_id, Node ID
+0x8000001E, 0, ECX, 10:8, nodes_per_processor, Nodes per processor { 0: 1 node, else reserved }
+
+# 8000001F: AMD Secure Encryption
+0x8000001F, 0, EAX, 0, sme, Secure Memory Encryption
+0x8000001F, 0, EAX, 1, sev, Secure Encrypted Virtualization
+0x8000001F, 0, EAX, 2, vmpgflush, VM Page Flush MSR
+0x8000001F, 0, EAX, 3, seves, SEV Encrypted State
+0x8000001F, 0, EBX, 5:0, c-bit, Page table bit number used to enable memory encryption
+0x8000001F, 0, EBX, 11:6, mem_encrypt_physaddr_width, Reduction of physical address space in bits with SME enabled
+0x8000001F, 0, ECX, 31:0, num_encrypted_guests, Maximum ASID value that may be used for an SEV-enabled guest
+0x8000001F, 0, EDX, 31:0, minimum_sev_asid, Minimum ASID value that must be used for an SEV-enabled, SEV-ES-disabled guest
diff --git a/tools/arch/x86/kcpuid/kcpuid.c b/tools/arch/x86/kcpuid/kcpuid.c
new file mode 100644
index 000000000000..dae75511fef7
--- /dev/null
+++ b/tools/arch/x86/kcpuid/kcpuid.c
@@ -0,0 +1,657 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <string.h>
+#include <getopt.h>
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+typedef unsigned int u32;
+typedef unsigned long long u64;
+
+char *def_csv = "/usr/share/misc/cpuid.csv";
+char *user_csv;
+
+
+/* Cover both single-bit flag and multiple-bits fields */
+struct bits_desc {
+ /* start and end bits */
+ int start, end;
+ /* 0 or 1 for 1-bit flag */
+ int value;
+ char simp[32];
+ char detail[256];
+};
+
+/* descriptor info for eax/ebx/ecx/edx */
+struct reg_desc {
+ /* number of valid entries */
+ int nr;
+ struct bits_desc descs[32];
+};
+
+enum {
+ R_EAX = 0,
+ R_EBX,
+ R_ECX,
+ R_EDX,
+ NR_REGS
+};
+
+struct subleaf {
+ u32 index;
+ u32 sub;
+ u32 eax, ebx, ecx, edx;
+ struct reg_desc info[NR_REGS];
+};
+
+/* Represent one leaf (basic or extended) */
+struct cpuid_func {
+ /*
+ * Array of subleafs for this func, if there is no subleafs
+ * then the leafs[0] is the main leaf
+ */
+ struct subleaf *leafs;
+ int nr;
+};
+
+struct cpuid_range {
+ /* array of main leafs */
+ struct cpuid_func *funcs;
+ /* number of valid leafs */
+ int nr;
+ bool is_ext;
+};
+
+/*
+ * basic: basic functions range: [0... ]
+ * ext: extended functions range: [0x80000000... ]
+ */
+struct cpuid_range *leafs_basic, *leafs_ext;
+
+static int num_leafs;
+static bool is_amd;
+static bool show_details;
+static bool show_raw;
+static bool show_flags_only = true;
+static u32 user_index = 0xFFFFFFFF;
+static u32 user_sub = 0xFFFFFFFF;
+static int flines;
+
+static inline void cpuid(u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
+{
+ /* ecx is often an input as well as an output. */
+ asm volatile("cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "0" (*eax), "2" (*ecx));
+}
+
+static inline bool has_subleafs(u32 f)
+{
+ if (f == 0x7 || f == 0xd)
+ return true;
+
+ if (is_amd) {
+ if (f == 0x8000001d)
+ return true;
+ return false;
+ }
+
+ switch (f) {
+ case 0x4:
+ case 0xb:
+ case 0xf:
+ case 0x10:
+ case 0x14:
+ case 0x18:
+ case 0x1f:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void leaf_print_raw(struct subleaf *leaf)
+{
+ if (has_subleafs(leaf->index)) {
+ if (leaf->sub == 0)
+ printf("0x%08x: subleafs:\n", leaf->index);
+
+ printf(" %2d: EAX=0x%08x, EBX=0x%08x, ECX=0x%08x, EDX=0x%08x\n",
+ leaf->sub, leaf->eax, leaf->ebx, leaf->ecx, leaf->edx);
+ } else {
+ printf("0x%08x: EAX=0x%08x, EBX=0x%08x, ECX=0x%08x, EDX=0x%08x\n",
+ leaf->index, leaf->eax, leaf->ebx, leaf->ecx, leaf->edx);
+ }
+}
+
+/* Return true is the input eax/ebx/ecx/edx are all zero */
+static bool cpuid_store(struct cpuid_range *range, u32 f, int subleaf,
+ u32 a, u32 b, u32 c, u32 d)
+{
+ struct cpuid_func *func;
+ struct subleaf *leaf;
+ int s = 0;
+
+ if (a == 0 && b == 0 && c == 0 && d == 0)
+ return true;
+
+ /*
+ * Cut off vendor-prefix from CPUID function as we're using it as an
+ * index into ->funcs.
+ */
+ func = &range->funcs[f & 0xffff];
+
+ if (!func->leafs) {
+ func->leafs = malloc(sizeof(struct subleaf));
+ if (!func->leafs)
+ perror("malloc func leaf");
+
+ func->nr = 1;
+ } else {
+ s = func->nr;
+ func->leafs = realloc(func->leafs, (s + 1) * sizeof(*leaf));
+ if (!func->leafs)
+ perror("realloc f->leafs");
+
+ func->nr++;
+ }
+
+ leaf = &func->leafs[s];
+
+ leaf->index = f;
+ leaf->sub = subleaf;
+ leaf->eax = a;
+ leaf->ebx = b;
+ leaf->ecx = c;
+ leaf->edx = d;
+
+ return false;
+}
+
+static void raw_dump_range(struct cpuid_range *range)
+{
+ u32 f;
+ int i;
+
+ printf("%s Leafs :\n", range->is_ext ? "Extended" : "Basic");
+ printf("================\n");
+
+ for (f = 0; (int)f < range->nr; f++) {
+ struct cpuid_func *func = &range->funcs[f];
+ u32 index = f;
+
+ if (range->is_ext)
+ index += 0x80000000;
+
+ /* Skip leaf without valid items */
+ if (!func->nr)
+ continue;
+
+ /* First item is the main leaf, followed by all subleafs */
+ for (i = 0; i < func->nr; i++)
+ leaf_print_raw(&func->leafs[i]);
+ }
+}
+
+#define MAX_SUBLEAF_NUM 32
+struct cpuid_range *setup_cpuid_range(u32 input_eax)
+{
+ u32 max_func, idx_func;
+ int subleaf;
+ struct cpuid_range *range;
+ u32 eax, ebx, ecx, edx;
+ u32 f = input_eax;
+ int max_subleaf;
+ bool allzero;
+
+ eax = input_eax;
+ ebx = ecx = edx = 0;
+
+ cpuid(&eax, &ebx, &ecx, &edx);
+ max_func = eax;
+ idx_func = (max_func & 0xffff) + 1;
+
+ range = malloc(sizeof(struct cpuid_range));
+ if (!range)
+ perror("malloc range");
+
+ if (input_eax & 0x80000000)
+ range->is_ext = true;
+ else
+ range->is_ext = false;
+
+ range->funcs = malloc(sizeof(struct cpuid_func) * idx_func);
+ if (!range->funcs)
+ perror("malloc range->funcs");
+
+ range->nr = idx_func;
+ memset(range->funcs, 0, sizeof(struct cpuid_func) * idx_func);
+
+ for (; f <= max_func; f++) {
+ eax = f;
+ subleaf = ecx = 0;
+
+ cpuid(&eax, &ebx, &ecx, &edx);
+ allzero = cpuid_store(range, f, subleaf, eax, ebx, ecx, edx);
+ if (allzero)
+ continue;
+ num_leafs++;
+
+ if (!has_subleafs(f))
+ continue;
+
+ max_subleaf = MAX_SUBLEAF_NUM;
+
+ /*
+ * Some can provide the exact number of subleafs,
+ * others have to be tried (0xf)
+ */
+ if (f == 0x7 || f == 0x14 || f == 0x17 || f == 0x18)
+ max_subleaf = (eax & 0xff) + 1;
+
+ if (f == 0xb)
+ max_subleaf = 2;
+
+ for (subleaf = 1; subleaf < max_subleaf; subleaf++) {
+ eax = f;
+ ecx = subleaf;
+
+ cpuid(&eax, &ebx, &ecx, &edx);
+ allzero = cpuid_store(range, f, subleaf,
+ eax, ebx, ecx, edx);
+ if (allzero)
+ continue;
+ num_leafs++;
+ }
+
+ }
+
+ return range;
+}
+
+/*
+ * The basic row format for cpuid.csv is
+ * LEAF,SUBLEAF,register_name,bits,short name,long description
+ *
+ * like:
+ * 0, 0, EAX, 31:0, max_basic_leafs, Max input value for supported subleafs
+ * 1, 0, ECX, 0, sse3, Streaming SIMD Extensions 3(SSE3)
+ */
+static int parse_line(char *line)
+{
+ char *str;
+ int i;
+ struct cpuid_range *range;
+ struct cpuid_func *func;
+ struct subleaf *leaf;
+ u32 index;
+ u32 sub;
+ char buffer[512];
+ char *buf;
+ /*
+ * Tokens:
+ * 1. leaf
+ * 2. subleaf
+ * 3. register
+ * 4. bits
+ * 5. short name
+ * 6. long detail
+ */
+ char *tokens[6];
+ struct reg_desc *reg;
+ struct bits_desc *bdesc;
+ int reg_index;
+ char *start, *end;
+
+ /* Skip comments and NULL line */
+ if (line[0] == '#' || line[0] == '\n')
+ return 0;
+
+ strncpy(buffer, line, 511);
+ buffer[511] = 0;
+ str = buffer;
+ for (i = 0; i < 5; i++) {
+ tokens[i] = strtok(str, ",");
+ if (!tokens[i])
+ goto err_exit;
+ str = NULL;
+ }
+ tokens[5] = strtok(str, "\n");
+ if (!tokens[5])
+ goto err_exit;
+
+ /* index/main-leaf */
+ index = strtoull(tokens[0], NULL, 0);
+
+ if (index & 0x80000000)
+ range = leafs_ext;
+ else
+ range = leafs_basic;
+
+ index &= 0x7FFFFFFF;
+ /* Skip line parsing for non-existing indexes */
+ if ((int)index >= range->nr)
+ return -1;
+
+ func = &range->funcs[index];
+
+ /* Return if the index has no valid item on this platform */
+ if (!func->nr)
+ return 0;
+
+ /* subleaf */
+ sub = strtoul(tokens[1], NULL, 0);
+ if ((int)sub > func->nr)
+ return -1;
+
+ leaf = &func->leafs[sub];
+ buf = tokens[2];
+
+ if (strcasestr(buf, "EAX"))
+ reg_index = R_EAX;
+ else if (strcasestr(buf, "EBX"))
+ reg_index = R_EBX;
+ else if (strcasestr(buf, "ECX"))
+ reg_index = R_ECX;
+ else if (strcasestr(buf, "EDX"))
+ reg_index = R_EDX;
+ else
+ goto err_exit;
+
+ reg = &leaf->info[reg_index];
+ bdesc = &reg->descs[reg->nr++];
+
+ /* bit flag or bits field */
+ buf = tokens[3];
+
+ end = strtok(buf, ":");
+ bdesc->end = strtoul(end, NULL, 0);
+ bdesc->start = bdesc->end;
+
+ /* start != NULL means it is bit fields */
+ start = strtok(NULL, ":");
+ if (start)
+ bdesc->start = strtoul(start, NULL, 0);
+
+ strcpy(bdesc->simp, tokens[4]);
+ strcpy(bdesc->detail, tokens[5]);
+ return 0;
+
+err_exit:
+ printf("Warning: wrong line format:\n");
+ printf("\tline[%d]: %s\n", flines, line);
+ return -1;
+}
+
+/* Parse csv file, and construct the array of all leafs and subleafs */
+static void parse_text(void)
+{
+ FILE *file;
+ char *filename, *line = NULL;
+ size_t len = 0;
+ int ret;
+
+ if (show_raw)
+ return;
+
+ filename = user_csv ? user_csv : def_csv;
+ file = fopen(filename, "r");
+ if (!file) {
+ /* Fallback to a csv in the same dir */
+ file = fopen("./cpuid.csv", "r");
+ }
+
+ if (!file) {
+ printf("Fail to open '%s'\n", filename);
+ return;
+ }
+
+ while (1) {
+ ret = getline(&line, &len, file);
+ flines++;
+ if (ret > 0)
+ parse_line(line);
+
+ if (feof(file))
+ break;
+ }
+
+ fclose(file);
+}
+
+
+/* Decode every eax/ebx/ecx/edx */
+static void decode_bits(u32 value, struct reg_desc *rdesc)
+{
+ struct bits_desc *bdesc;
+ int start, end, i;
+ u32 mask;
+
+ for (i = 0; i < rdesc->nr; i++) {
+ bdesc = &rdesc->descs[i];
+
+ start = bdesc->start;
+ end = bdesc->end;
+ if (start == end) {
+ /* single bit flag */
+ if (value & (1 << start))
+ printf("\t%-20s %s%s\n",
+ bdesc->simp,
+ show_details ? "-" : "",
+ show_details ? bdesc->detail : ""
+ );
+ } else {
+ /* bit fields */
+ if (show_flags_only)
+ continue;
+
+ mask = ((u64)1 << (end - start + 1)) - 1;
+ printf("\t%-20s\t: 0x%-8x\t%s%s\n",
+ bdesc->simp,
+ (value >> start) & mask,
+ show_details ? "-" : "",
+ show_details ? bdesc->detail : ""
+ );
+ }
+ }
+}
+
+static void show_leaf(struct subleaf *leaf)
+{
+ if (!leaf)
+ return;
+
+ if (show_raw)
+ leaf_print_raw(leaf);
+
+ decode_bits(leaf->eax, &leaf->info[R_EAX]);
+ decode_bits(leaf->ebx, &leaf->info[R_EBX]);
+ decode_bits(leaf->ecx, &leaf->info[R_ECX]);
+ decode_bits(leaf->edx, &leaf->info[R_EDX]);
+}
+
+static void show_func(struct cpuid_func *func)
+{
+ int i;
+
+ if (!func)
+ return;
+
+ for (i = 0; i < func->nr; i++)
+ show_leaf(&func->leafs[i]);
+}
+
+static void show_range(struct cpuid_range *range)
+{
+ int i;
+
+ for (i = 0; i < range->nr; i++)
+ show_func(&range->funcs[i]);
+}
+
+static inline struct cpuid_func *index_to_func(u32 index)
+{
+ struct cpuid_range *range;
+
+ range = (index & 0x80000000) ? leafs_ext : leafs_basic;
+ index &= 0x7FFFFFFF;
+
+ if (((index & 0xFFFF) + 1) > (u32)range->nr) {
+ printf("ERR: invalid input index (0x%x)\n", index);
+ return NULL;
+ }
+ return &range->funcs[index];
+}
+
+static void show_info(void)
+{
+ struct cpuid_func *func;
+
+ if (show_raw) {
+ /* Show all of the raw output of 'cpuid' instr */
+ raw_dump_range(leafs_basic);
+ raw_dump_range(leafs_ext);
+ return;
+ }
+
+ if (user_index != 0xFFFFFFFF) {
+ /* Only show specific leaf/subleaf info */
+ func = index_to_func(user_index);
+ if (!func)
+ return;
+
+ /* Dump the raw data also */
+ show_raw = true;
+
+ if (user_sub != 0xFFFFFFFF) {
+ if (user_sub + 1 <= (u32)func->nr) {
+ show_leaf(&func->leafs[user_sub]);
+ return;
+ }
+
+ printf("ERR: invalid input subleaf (0x%x)\n", user_sub);
+ }
+
+ show_func(func);
+ return;
+ }
+
+ printf("CPU features:\n=============\n\n");
+ show_range(leafs_basic);
+ show_range(leafs_ext);
+}
+
+static void setup_platform_cpuid(void)
+{
+ u32 eax, ebx, ecx, edx;
+
+ /* Check vendor */
+ eax = ebx = ecx = edx = 0;
+ cpuid(&eax, &ebx, &ecx, &edx);
+
+ /* "htuA" */
+ if (ebx == 0x68747541)
+ is_amd = true;
+
+ /* Setup leafs for the basic and extended range */
+ leafs_basic = setup_cpuid_range(0x0);
+ leafs_ext = setup_cpuid_range(0x80000000);
+}
+
+static void usage(void)
+{
+ printf("kcpuid [-abdfhr] [-l leaf] [-s subleaf]\n"
+ "\t-a|--all Show both bit flags and complex bit fields info\n"
+ "\t-b|--bitflags Show boolean flags only\n"
+ "\t-d|--detail Show details of the flag/fields (default)\n"
+ "\t-f|--flags Specify the cpuid csv file\n"
+ "\t-h|--help Show usage info\n"
+ "\t-l|--leaf=index Specify the leaf you want to check\n"
+ "\t-r|--raw Show raw cpuid data\n"
+ "\t-s|--subleaf=sub Specify the subleaf you want to check\n"
+ );
+}
+
+static struct option opts[] = {
+ { "all", no_argument, NULL, 'a' }, /* show both bit flags and fields */
+ { "bitflags", no_argument, NULL, 'b' }, /* only show bit flags, default on */
+ { "detail", no_argument, NULL, 'd' }, /* show detail descriptions */
+ { "file", required_argument, NULL, 'f' }, /* use user's cpuid file */
+ { "help", no_argument, NULL, 'h'}, /* show usage */
+ { "leaf", required_argument, NULL, 'l'}, /* only check a specific leaf */
+ { "raw", no_argument, NULL, 'r'}, /* show raw CPUID leaf data */
+ { "subleaf", required_argument, NULL, 's'}, /* check a specific subleaf */
+ { NULL, 0, NULL, 0 }
+};
+
+static int parse_options(int argc, char *argv[])
+{
+ int c;
+
+ while ((c = getopt_long(argc, argv, "abdf:hl:rs:",
+ opts, NULL)) != -1)
+ switch (c) {
+ case 'a':
+ show_flags_only = false;
+ break;
+ case 'b':
+ show_flags_only = true;
+ break;
+ case 'd':
+ show_details = true;
+ break;
+ case 'f':
+ user_csv = optarg;
+ break;
+ case 'h':
+ usage();
+ exit(1);
+ break;
+ case 'l':
+ /* main leaf */
+ user_index = strtoul(optarg, NULL, 0);
+ break;
+ case 'r':
+ show_raw = true;
+ break;
+ case 's':
+ /* subleaf */
+ user_sub = strtoul(optarg, NULL, 0);
+ break;
+ default:
+ printf("%s: Invalid option '%c'\n", argv[0], optopt);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Do 4 things in turn:
+ * 1. Parse user options
+ * 2. Parse and store all the CPUID leaf data supported on this platform
+ * 2. Parse the csv file, while skipping leafs which are not available
+ * on this platform
+ * 3. Print leafs info based on user options
+ */
+int main(int argc, char *argv[])
+{
+ if (parse_options(argc, argv))
+ return -1;
+
+ /* Setup the cpuid leafs of current platform */
+ setup_platform_cpuid();
+
+ /* Read and parse the 'cpuid.csv' */
+ parse_text();
+
+ show_info();
+ return 0;
+}
diff --git a/tools/arch/x86/lib/inat.c b/tools/arch/x86/lib/inat.c
index 4f5ed49e1b4e..dfbcc6405941 100644
--- a/tools/arch/x86/lib/inat.c
+++ b/tools/arch/x86/lib/inat.c
@@ -4,7 +4,7 @@
*
* Written by Masami Hiramatsu <mhiramat@redhat.com>
*/
-#include "../include/asm/insn.h"
+#include "../include/asm/insn.h" /* __ignore_sync_check__ */
/* Attribute tables are generated from opcode map */
#include "inat-tables.c"
diff --git a/tools/arch/x86/lib/insn.c b/tools/arch/x86/lib/insn.c
index 3d9355ed1246..c41f95815480 100644
--- a/tools/arch/x86/lib/insn.c
+++ b/tools/arch/x86/lib/insn.c
@@ -11,10 +11,13 @@
#else
#include <string.h>
#endif
-#include "../include/asm/inat.h"
-#include "../include/asm/insn.h"
+#include "../include/asm/inat.h" /* __ignore_sync_check__ */
+#include "../include/asm/insn.h" /* __ignore_sync_check__ */
-#include "../include/asm/emulate_prefix.h"
+#include <linux/errno.h>
+#include <linux/kconfig.h>
+
+#include "../include/asm/emulate_prefix.h" /* __ignore_sync_check__ */
#define leXX_to_cpu(t, r) \
({ \
@@ -51,6 +54,7 @@
* insn_init() - initialize struct insn
* @insn: &struct insn to be initialized
* @kaddr: address (in kernel memory) of instruction (or copy thereof)
+ * @buf_len: length of the insn buffer at @kaddr
* @x86_64: !0 for 64-bit kernel or 64-bit app
*/
void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
@@ -111,8 +115,12 @@ static void insn_get_emulate_prefix(struct insn *insn)
* Populates the @insn->prefixes bitmap, and updates @insn->next_byte
* to point to the (first) opcode. No effect if @insn->prefixes.got
* is already set.
+ *
+ * * Returns:
+ * 0: on success
+ * < 0: on error
*/
-void insn_get_prefixes(struct insn *insn)
+int insn_get_prefixes(struct insn *insn)
{
struct insn_field *prefixes = &insn->prefixes;
insn_attr_t attr;
@@ -120,7 +128,7 @@ void insn_get_prefixes(struct insn *insn)
int i, nb;
if (prefixes->got)
- return;
+ return 0;
insn_get_emulate_prefix(insn);
@@ -230,8 +238,10 @@ vex_end:
prefixes->got = 1;
+ return 0;
+
err_out:
- return;
+ return -ENODATA;
}
/**
@@ -243,16 +253,25 @@ err_out:
* If necessary, first collects any preceding (prefix) bytes.
* Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got
* is already 1.
+ *
+ * Returns:
+ * 0: on success
+ * < 0: on error
*/
-void insn_get_opcode(struct insn *insn)
+int insn_get_opcode(struct insn *insn)
{
struct insn_field *opcode = &insn->opcode;
+ int pfx_id, ret;
insn_byte_t op;
- int pfx_id;
+
if (opcode->got)
- return;
- if (!insn->prefixes.got)
- insn_get_prefixes(insn);
+ return 0;
+
+ if (!insn->prefixes.got) {
+ ret = insn_get_prefixes(insn);
+ if (ret)
+ return ret;
+ }
/* Get first opcode */
op = get_next(insn_byte_t, insn);
@@ -267,9 +286,13 @@ void insn_get_opcode(struct insn *insn)
insn->attr = inat_get_avx_attribute(op, m, p);
if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
(!inat_accept_vex(insn->attr) &&
- !inat_is_group(insn->attr)))
- insn->attr = 0; /* This instruction is bad */
- goto end; /* VEX has only 1 byte for opcode */
+ !inat_is_group(insn->attr))) {
+ /* This instruction is bad */
+ insn->attr = 0;
+ return -EINVAL;
+ }
+ /* VEX has only 1 byte for opcode */
+ goto end;
}
insn->attr = inat_get_opcode_attribute(op);
@@ -280,13 +303,18 @@ void insn_get_opcode(struct insn *insn)
pfx_id = insn_last_prefix_id(insn);
insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
}
- if (inat_must_vex(insn->attr))
- insn->attr = 0; /* This instruction is bad */
+
+ if (inat_must_vex(insn->attr)) {
+ /* This instruction is bad */
+ insn->attr = 0;
+ return -EINVAL;
+ }
end:
opcode->got = 1;
+ return 0;
err_out:
- return;
+ return -ENODATA;
}
/**
@@ -296,15 +324,25 @@ err_out:
* Populates @insn->modrm and updates @insn->next_byte to point past the
* ModRM byte, if any. If necessary, first collects the preceding bytes
* (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1.
+ *
+ * Returns:
+ * 0: on success
+ * < 0: on error
*/
-void insn_get_modrm(struct insn *insn)
+int insn_get_modrm(struct insn *insn)
{
struct insn_field *modrm = &insn->modrm;
insn_byte_t pfx_id, mod;
+ int ret;
+
if (modrm->got)
- return;
- if (!insn->opcode.got)
- insn_get_opcode(insn);
+ return 0;
+
+ if (!insn->opcode.got) {
+ ret = insn_get_opcode(insn);
+ if (ret)
+ return ret;
+ }
if (inat_has_modrm(insn->attr)) {
mod = get_next(insn_byte_t, insn);
@@ -313,17 +351,22 @@ void insn_get_modrm(struct insn *insn)
pfx_id = insn_last_prefix_id(insn);
insn->attr = inat_get_group_attribute(mod, pfx_id,
insn->attr);
- if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
- insn->attr = 0; /* This is bad */
+ if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) {
+ /* Bad insn */
+ insn->attr = 0;
+ return -EINVAL;
+ }
}
}
if (insn->x86_64 && inat_is_force64(insn->attr))
insn->opnd_bytes = 8;
+
modrm->got = 1;
+ return 0;
err_out:
- return;
+ return -ENODATA;
}
@@ -337,11 +380,16 @@ err_out:
int insn_rip_relative(struct insn *insn)
{
struct insn_field *modrm = &insn->modrm;
+ int ret;
if (!insn->x86_64)
return 0;
- if (!modrm->got)
- insn_get_modrm(insn);
+
+ if (!modrm->got) {
+ ret = insn_get_modrm(insn);
+ if (ret)
+ return 0;
+ }
/*
* For rip-relative instructions, the mod field (top 2 bits)
* is zero and the r/m field (bottom 3 bits) is 0x5.
@@ -355,15 +403,25 @@ int insn_rip_relative(struct insn *insn)
*
* If necessary, first collects the instruction up to and including the
* ModRM byte.
+ *
+ * Returns:
+ * 0: if decoding succeeded
+ * < 0: otherwise.
*/
-void insn_get_sib(struct insn *insn)
+int insn_get_sib(struct insn *insn)
{
insn_byte_t modrm;
+ int ret;
if (insn->sib.got)
- return;
- if (!insn->modrm.got)
- insn_get_modrm(insn);
+ return 0;
+
+ if (!insn->modrm.got) {
+ ret = insn_get_modrm(insn);
+ if (ret)
+ return ret;
+ }
+
if (insn->modrm.nbytes) {
modrm = insn->modrm.bytes[0];
if (insn->addr_bytes != 2 &&
@@ -374,8 +432,10 @@ void insn_get_sib(struct insn *insn)
}
insn->sib.got = 1;
+ return 0;
+
err_out:
- return;
+ return -ENODATA;
}
@@ -386,15 +446,25 @@ err_out:
* If necessary, first collects the instruction up to and including the
* SIB byte.
* Displacement value is sign-expanded.
+ *
+ * * Returns:
+ * 0: if decoding succeeded
+ * < 0: otherwise.
*/
-void insn_get_displacement(struct insn *insn)
+int insn_get_displacement(struct insn *insn)
{
insn_byte_t mod, rm, base;
+ int ret;
if (insn->displacement.got)
- return;
- if (!insn->sib.got)
- insn_get_sib(insn);
+ return 0;
+
+ if (!insn->sib.got) {
+ ret = insn_get_sib(insn);
+ if (ret)
+ return ret;
+ }
+
if (insn->modrm.nbytes) {
/*
* Interpreting the modrm byte:
@@ -436,9 +506,10 @@ void insn_get_displacement(struct insn *insn)
}
out:
insn->displacement.got = 1;
+ return 0;
err_out:
- return;
+ return -ENODATA;
}
/* Decode moffset16/32/64. Return 0 if failed */
@@ -537,20 +608,30 @@ err_out:
}
/**
- * insn_get_immediate() - Get the immediates of instruction
+ * insn_get_immediate() - Get the immediate in an instruction
* @insn: &struct insn containing instruction
*
* If necessary, first collects the instruction up to and including the
* displacement bytes.
* Basically, most of immediates are sign-expanded. Unsigned-value can be
- * get by bit masking with ((1 << (nbytes * 8)) - 1)
+ * computed by bit masking with ((1 << (nbytes * 8)) - 1)
+ *
+ * Returns:
+ * 0: on success
+ * < 0: on error
*/
-void insn_get_immediate(struct insn *insn)
+int insn_get_immediate(struct insn *insn)
{
+ int ret;
+
if (insn->immediate.got)
- return;
- if (!insn->displacement.got)
- insn_get_displacement(insn);
+ return 0;
+
+ if (!insn->displacement.got) {
+ ret = insn_get_displacement(insn);
+ if (ret)
+ return ret;
+ }
if (inat_has_moffset(insn->attr)) {
if (!__get_moffset(insn))
@@ -597,9 +678,10 @@ void insn_get_immediate(struct insn *insn)
}
done:
insn->immediate.got = 1;
+ return 0;
err_out:
- return;
+ return -ENODATA;
}
/**
@@ -608,13 +690,65 @@ err_out:
*
* If necessary, first collects the instruction up to and including the
* immediates bytes.
- */
-void insn_get_length(struct insn *insn)
+ *
+ * Returns:
+ * - 0 on success
+ * - < 0 on error
+*/
+int insn_get_length(struct insn *insn)
{
+ int ret;
+
if (insn->length)
- return;
- if (!insn->immediate.got)
- insn_get_immediate(insn);
+ return 0;
+
+ if (!insn->immediate.got) {
+ ret = insn_get_immediate(insn);
+ if (ret)
+ return ret;
+ }
+
insn->length = (unsigned char)((unsigned long)insn->next_byte
- (unsigned long)insn->kaddr);
+
+ return 0;
+}
+
+/* Ensure this instruction is decoded completely */
+static inline int insn_complete(struct insn *insn)
+{
+ return insn->opcode.got && insn->modrm.got && insn->sib.got &&
+ insn->displacement.got && insn->immediate.got;
+}
+
+/**
+ * insn_decode() - Decode an x86 instruction
+ * @insn: &struct insn to be initialized
+ * @kaddr: address (in kernel memory) of instruction (or copy thereof)
+ * @buf_len: length of the insn buffer at @kaddr
+ * @m: insn mode, see enum insn_mode
+ *
+ * Returns:
+ * 0: if decoding succeeded
+ * < 0: otherwise.
+ */
+int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m)
+{
+ int ret;
+
+#define INSN_MODE_KERN (enum insn_mode)-1 /* __ignore_sync_check__ mode is only valid in the kernel */
+
+ if (m == INSN_MODE_KERN)
+ insn_init(insn, kaddr, buf_len, IS_ENABLED(CONFIG_X86_64));
+ else
+ insn_init(insn, kaddr, buf_len, m == INSN_MODE_64);
+
+ ret = insn_get_length(insn);
+ if (ret)
+ return ret;
+
+ if (insn_complete(insn))
+ return 0;
+
+ return -EINVAL;
}
diff --git a/tools/bpf/Makefile.helpers b/tools/bpf/Makefile.helpers
deleted file mode 100644
index 854d084026dd..000000000000
--- a/tools/bpf/Makefile.helpers
+++ /dev/null
@@ -1,60 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-ifndef allow-override
- include ../scripts/Makefile.include
- include ../scripts/utilities.mak
-else
- # Assume Makefile.helpers is being run from bpftool/Documentation
- # subdirectory. Go up two more directories to fetch bpf.h header and
- # associated script.
- UP2DIR := ../../
-endif
-
-INSTALL ?= install
-RM ?= rm -f
-RMDIR ?= rmdir --ignore-fail-on-non-empty
-
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
-prefix ?= /usr/local
-mandir ?= $(prefix)/man
-man7dir = $(mandir)/man7
-
-HELPERS_RST = bpf-helpers.rst
-MAN7_RST = $(HELPERS_RST)
-
-_DOC_MAN7 = $(patsubst %.rst,%.7,$(MAN7_RST))
-DOC_MAN7 = $(addprefix $(OUTPUT),$(_DOC_MAN7))
-
-helpers: man7
-man7: $(DOC_MAN7)
-
-RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
-
-$(OUTPUT)$(HELPERS_RST): $(UP2DIR)../../include/uapi/linux/bpf.h
- $(QUIET_GEN)$(UP2DIR)../../scripts/bpf_helpers_doc.py --filename $< > $@
-
-$(OUTPUT)%.7: $(OUTPUT)%.rst
-ifndef RST2MAN_DEP
- $(error "rst2man not found, but required to generate man pages")
-endif
- $(QUIET_GEN)rst2man $< > $@
-
-helpers-clean:
- $(call QUIET_CLEAN, eBPF_helpers-manpage)
- $(Q)$(RM) $(DOC_MAN7) $(OUTPUT)$(HELPERS_RST)
-
-helpers-install: helpers
- $(call QUIET_INSTALL, eBPF_helpers-manpage)
- $(Q)$(INSTALL) -d -m 755 $(DESTDIR)$(man7dir)
- $(Q)$(INSTALL) -m 644 $(DOC_MAN7) $(DESTDIR)$(man7dir)
-
-helpers-uninstall:
- $(call QUIET_UNINST, eBPF_helpers-manpage)
- $(Q)$(RM) $(addprefix $(DESTDIR)$(man7dir)/,$(_DOC_MAN7))
- $(Q)$(RMDIR) $(DESTDIR)$(man7dir)
-
-.PHONY: helpers helpers-clean helpers-install helpers-uninstall
diff --git a/tools/bpf/bpf_dbg.c b/tools/bpf/bpf_dbg.c
index a07dfc479270..00e560a17baf 100644
--- a/tools/bpf/bpf_dbg.c
+++ b/tools/bpf/bpf_dbg.c
@@ -1198,7 +1198,7 @@ static int cmd_run(char *num)
else
return CMD_OK;
bpf_reset();
- } while (pcap_next_pkt() && (!has_limit || (has_limit && ++i < pkts)));
+ } while (pcap_next_pkt() && (!has_limit || (++i < pkts)));
rl_printf("bpf passes:%u fails:%u\n", pass, fail);
diff --git a/tools/bpf/bpf_exp.y b/tools/bpf/bpf_exp.y
index 8d48e896be50..dfb7254a24e8 100644
--- a/tools/bpf/bpf_exp.y
+++ b/tools/bpf/bpf_exp.y
@@ -185,13 +185,13 @@ ldx
| OP_LDXB number '*' '(' '[' number ']' '&' number ')' {
if ($2 != 4 || $9 != 0xf) {
fprintf(stderr, "ldxb offset not supported!\n");
- exit(0);
+ exit(1);
} else {
bpf_set_curr_instr(BPF_LDX | BPF_MSH | BPF_B, 0, 0, $6); } }
| OP_LDX number '*' '(' '[' number ']' '&' number ')' {
if ($2 != 4 || $9 != 0xf) {
fprintf(stderr, "ldxb offset not supported!\n");
- exit(0);
+ exit(1);
} else {
bpf_set_curr_instr(BPF_LDX | BPF_MSH | BPF_B, 0, 0, $6); } }
;
@@ -472,7 +472,7 @@ static void bpf_assert_max(void)
{
if (curr_instr >= BPF_MAXINSNS) {
fprintf(stderr, "only max %u insns allowed!\n", BPF_MAXINSNS);
- exit(0);
+ exit(1);
}
}
@@ -522,7 +522,7 @@ static int bpf_find_insns_offset(const char *label)
if (ret == -ENOENT) {
fprintf(stderr, "no such label \'%s\'!\n", label);
- exit(0);
+ exit(1);
}
return ret;
@@ -549,9 +549,11 @@ static uint8_t bpf_encode_jt_jf_offset(int off, int i)
{
int delta = off - i - 1;
- if (delta < 0 || delta > 255)
- fprintf(stderr, "warning: insn #%d jumps to insn #%d, "
+ if (delta < 0 || delta > 255) {
+ fprintf(stderr, "error: insn #%d jumps to insn #%d, "
"which is out of range\n", i, off);
+ exit(1);
+ }
return (uint8_t) delta;
}
diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore
index 944cb4b7c95d..05ce4446b780 100644
--- a/tools/bpf/bpftool/.gitignore
+++ b/tools/bpf/bpftool/.gitignore
@@ -3,7 +3,6 @@
/bootstrap/
/bpftool
bpftool*.8
-bpf-helpers.*
FEATURE-DUMP.bpftool
feature
libbpf
diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile
index f33cb02de95c..c49487905ceb 100644
--- a/tools/bpf/bpftool/Documentation/Makefile
+++ b/tools/bpf/bpftool/Documentation/Makefile
@@ -16,15 +16,12 @@ prefix ?= /usr/local
mandir ?= $(prefix)/man
man8dir = $(mandir)/man8
-# Load targets for building eBPF helpers man page.
-include ../../Makefile.helpers
-
MAN8_RST = $(wildcard bpftool*.rst)
_DOC_MAN8 = $(patsubst %.rst,%.8,$(MAN8_RST))
DOC_MAN8 = $(addprefix $(OUTPUT),$(_DOC_MAN8))
-man: man8 helpers
+man: man8
man8: $(DOC_MAN8)
RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
@@ -46,16 +43,16 @@ ifndef RST2MAN_DEP
endif
$(QUIET_GEN)( cat $< ; printf "%b" $(call see_also,$<) ) | rst2man $(RST2MAN_OPTS) > $@
-clean: helpers-clean
+clean:
$(call QUIET_CLEAN, Documentation)
$(Q)$(RM) $(DOC_MAN8)
-install: man helpers-install
+install: man
$(call QUIET_INSTALL, Documentation-man)
$(Q)$(INSTALL) -d -m 755 $(DESTDIR)$(man8dir)
$(Q)$(INSTALL) -m 644 $(DOC_MAN8) $(DESTDIR)$(man8dir)
-uninstall: helpers-uninstall
+uninstall:
$(call QUIET_UNINST, Documentation-man)
$(Q)$(RM) $(addprefix $(DESTDIR)$(man8dir)/,$(_DOC_MAN8))
$(Q)$(RMDIR) $(DESTDIR)$(man8dir)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-gen.rst b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
index 84cf0639696f..7cd6681137f3 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-gen.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
@@ -14,16 +14,37 @@ SYNOPSIS
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] }
- *COMMAND* := { **skeleton** | **help** }
+ *COMMAND* := { **object** | **skeleton** | **help** }
GEN COMMANDS
=============
-| **bpftool** **gen skeleton** *FILE*
+| **bpftool** **gen object** *OUTPUT_FILE* *INPUT_FILE* [*INPUT_FILE*...]
+| **bpftool** **gen skeleton** *FILE* [**name** *OBJECT_NAME*]
| **bpftool** **gen help**
DESCRIPTION
===========
+ **bpftool gen object** *OUTPUT_FILE* *INPUT_FILE* [*INPUT_FILE*...]
+ Statically link (combine) together one or more *INPUT_FILE*'s
+ into a single resulting *OUTPUT_FILE*. All the files involved
+ are BPF ELF object files.
+
+ The rules of BPF static linking are mostly the same as for
+ user-space object files, but in addition to combining data
+ and instruction sections, .BTF and .BTF.ext (if present in
+ any of the input files) data are combined together. .BTF
+ data is deduplicated, so all the common types across
+ *INPUT_FILE*'s will only be represented once in the resulting
+ BTF information.
+
+ BPF static linking allows to partition BPF source code into
+ individually compiled files that are then linked into
+ a single resulting BPF object file, which can be used to
+ generated BPF skeleton (with **gen skeleton** command) or
+ passed directly into **libbpf** (using **bpf_object__open()**
+ family of APIs).
+
**bpftool gen skeleton** *FILE*
Generate BPF skeleton C header file for a given *FILE*.
@@ -75,10 +96,13 @@ DESCRIPTION
specific maps, programs, etc.
As part of skeleton, few custom functions are generated.
- Each of them is prefixed with object name, derived from
- object file name. I.e., if BPF object file name is
- **example.o**, BPF object name will be **example**. The
- following custom functions are provided in such case:
+ Each of them is prefixed with object name. Object name can
+ either be derived from object file name, i.e., if BPF object
+ file name is **example.o**, BPF object name will be
+ **example**. Object name can be also specified explicitly
+ through **name** *OBJECT_NAME* parameter. The following
+ custom functions are provided (assuming **example** as
+ the object name):
- **example__open** and **example__open_opts**.
These functions are used to instantiate skeleton. It
@@ -130,26 +154,19 @@ OPTIONS
EXAMPLES
========
-**$ cat example.c**
+**$ cat example1.bpf.c**
::
#include <stdbool.h>
#include <linux/ptrace.h>
#include <linux/bpf.h>
- #include "bpf_helpers.h"
+ #include <bpf/bpf_helpers.h>
const volatile int param1 = 42;
bool global_flag = true;
struct { int x; } data = {};
- struct {
- __uint(type, BPF_MAP_TYPE_HASH);
- __uint(max_entries, 128);
- __type(key, int);
- __type(value, long);
- } my_map SEC(".maps");
-
SEC("raw_tp/sys_enter")
int handle_sys_enter(struct pt_regs *ctx)
{
@@ -161,6 +178,21 @@ EXAMPLES
return 0;
}
+**$ cat example2.bpf.c**
+
+::
+
+ #include <linux/ptrace.h>
+ #include <linux/bpf.h>
+ #include <bpf/bpf_helpers.h>
+
+ struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 128);
+ __type(key, int);
+ __type(value, long);
+ } my_map SEC(".maps");
+
SEC("raw_tp/sys_exit")
int handle_sys_exit(struct pt_regs *ctx)
{
@@ -170,9 +202,17 @@ EXAMPLES
}
This is example BPF application with two BPF programs and a mix of BPF maps
-and global variables.
+and global variables. Source code is split across two source code files.
-**$ bpftool gen skeleton example.o**
+**$ clang -target bpf -g example1.bpf.c -o example1.bpf.o**
+**$ clang -target bpf -g example2.bpf.c -o example2.bpf.o**
+**$ bpftool gen object example.bpf.o example1.bpf.o example2.bpf.o**
+
+This set of commands compiles *example1.bpf.c* and *example2.bpf.c*
+individually and then statically links respective object files into the final
+BPF ELF object file *example.bpf.o*.
+
+**$ bpftool gen skeleton example.bpf.o name example | tee example.skel.h**
::
@@ -227,7 +267,7 @@ and global variables.
#endif /* __EXAMPLE_SKEL_H__ */
-**$ cat example_user.c**
+**$ cat example.c**
::
@@ -270,7 +310,7 @@ and global variables.
return err;
}
-**# ./example_user**
+**# ./example**
::
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index fdffbc64c65c..d67518bcbd44 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -981,12 +981,25 @@ _bpftool()
;;
gen)
case $command in
- skeleton)
+ object)
_filedir
+ return 0
+ ;;
+ skeleton)
+ case $prev in
+ $command)
+ _filedir
+ return 0
+ ;;
+ *)
+ _bpftool_once_attr 'name'
+ return 0
+ ;;
+ esac
;;
*)
[[ $prev == $object ]] && \
- COMPREPLY=( $( compgen -W 'skeleton help' -- "$cur" ) )
+ COMPREPLY=( $( compgen -W 'object skeleton help' -- "$cur" ) )
;;
esac
;;
diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
index fe9e7b3a4b50..385d5c955cf3 100644
--- a/tools/bpf/bpftool/btf.c
+++ b/tools/bpf/bpftool/btf.c
@@ -36,6 +36,7 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = {
[BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
[BTF_KIND_VAR] = "VAR",
[BTF_KIND_DATASEC] = "DATASEC",
+ [BTF_KIND_FLOAT] = "FLOAT",
};
struct btf_attach_table {
@@ -70,7 +71,9 @@ static const char *btf_var_linkage_str(__u32 linkage)
case BTF_VAR_STATIC:
return "static";
case BTF_VAR_GLOBAL_ALLOCATED:
- return "global-alloc";
+ return "global";
+ case BTF_VAR_GLOBAL_EXTERN:
+ return "extern";
default:
return "(unknown)";
}
@@ -97,26 +100,28 @@ static const char *btf_str(const struct btf *btf, __u32 off)
return btf__name_by_offset(btf, off) ? : "(invalid)";
}
+static int btf_kind_safe(int kind)
+{
+ return kind <= BTF_KIND_MAX ? kind : BTF_KIND_UNKN;
+}
+
static int dump_btf_type(const struct btf *btf, __u32 id,
const struct btf_type *t)
{
json_writer_t *w = json_wtr;
- int kind, safe_kind;
-
- kind = BTF_INFO_KIND(t->info);
- safe_kind = kind <= BTF_KIND_MAX ? kind : BTF_KIND_UNKN;
+ int kind = btf_kind(t);
if (json_output) {
jsonw_start_object(w);
jsonw_uint_field(w, "id", id);
- jsonw_string_field(w, "kind", btf_kind_str[safe_kind]);
+ jsonw_string_field(w, "kind", btf_kind_str[btf_kind_safe(kind)]);
jsonw_string_field(w, "name", btf_str(btf, t->name_off));
} else {
- printf("[%u] %s '%s'", id, btf_kind_str[safe_kind],
+ printf("[%u] %s '%s'", id, btf_kind_str[btf_kind_safe(kind)],
btf_str(btf, t->name_off));
}
- switch (BTF_INFO_KIND(t->info)) {
+ switch (kind) {
case BTF_KIND_INT: {
__u32 v = *(__u32 *)(t + 1);
const char *enc;
@@ -299,7 +304,8 @@ static int dump_btf_type(const struct btf *btf, __u32 id,
break;
}
case BTF_KIND_DATASEC: {
- const struct btf_var_secinfo *v = (const void *)(t+1);
+ const struct btf_var_secinfo *v = (const void *)(t + 1);
+ const struct btf_type *vt;
__u16 vlen = BTF_INFO_VLEN(t->info);
int i;
@@ -321,12 +327,26 @@ static int dump_btf_type(const struct btf *btf, __u32 id,
} else {
printf("\n\ttype_id=%u offset=%u size=%u",
v->type, v->offset, v->size);
+
+ if (v->type <= btf__get_nr_types(btf)) {
+ vt = btf__type_by_id(btf, v->type);
+ printf(" (%s '%s')",
+ btf_kind_str[btf_kind_safe(btf_kind(vt))],
+ btf_str(btf, vt->name_off));
+ }
}
}
if (json_output)
jsonw_end_array(w);
break;
}
+ case BTF_KIND_FLOAT: {
+ if (json_output)
+ jsonw_uint_field(w, "size", t->size);
+ else
+ printf(" size=%u", t->size);
+ break;
+ }
default:
break;
}
@@ -538,6 +558,7 @@ static int do_dump(int argc, char **argv)
NEXT_ARG();
if (argc < 1) {
p_err("expecting value for 'format' option\n");
+ err = -EINVAL;
goto done;
}
if (strcmp(*argv, "c") == 0) {
@@ -547,11 +568,13 @@ static int do_dump(int argc, char **argv)
} else {
p_err("unrecognized format specifier: '%s', possible values: raw, c",
*argv);
+ err = -EINVAL;
goto done;
}
NEXT_ARG();
} else {
p_err("unrecognized option: '%s'", *argv);
+ err = -EINVAL;
goto done;
}
}
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index 0e9310727281..7ca54d046362 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -596,6 +596,7 @@ static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
switch (BTF_INFO_KIND(t->info)) {
case BTF_KIND_INT:
case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FLOAT:
BTF_PRINT_ARG("%s ", btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_STRUCT:
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 65303664417e..1828bba19020 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -57,6 +57,7 @@ const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = {
[BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser",
[BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict",
+ [BPF_SK_SKB_VERDICT] = "sk_skb_verdict",
[BPF_SK_MSG_VERDICT] = "sk_msg_verdict",
[BPF_LIRC_MODE2] = "lirc_mode2",
[BPF_FLOW_DISSECTOR] = "flow_dissector",
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index 359960a8f1de..40a88df275f9 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -336,6 +336,10 @@ static void probe_kernel_image_config(const char *define_prefix)
{ "CONFIG_BPF_JIT", },
/* Avoid compiling eBPF interpreter (use JIT only) */
{ "CONFIG_BPF_JIT_ALWAYS_ON", },
+ /* Kernel BTF debug information available */
+ { "CONFIG_DEBUG_INFO_BTF", },
+ /* Kernel module BTF debug information available */
+ { "CONFIG_DEBUG_INFO_BTF_MODULES", },
/* cgroups */
{ "CONFIG_CGROUPS", },
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index 4033c46d83e7..31ade77f5ef8 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -273,7 +273,7 @@ static int do_skeleton(int argc, char **argv)
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
size_t i, map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
- char obj_name[MAX_OBJ_NAME_LEN], *obj_data;
+ char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
struct bpf_object *obj = NULL;
const char *file, *ident;
struct bpf_program *prog;
@@ -288,6 +288,28 @@ static int do_skeleton(int argc, char **argv)
}
file = GET_ARG();
+ while (argc) {
+ if (!REQ_ARGS(2))
+ return -1;
+
+ if (is_prefix(*argv, "name")) {
+ NEXT_ARG();
+
+ if (obj_name[0] != '\0') {
+ p_err("object name already specified");
+ return -1;
+ }
+
+ strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
+ obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
+ } else {
+ p_err("unknown arg %s", *argv);
+ return -1;
+ }
+
+ NEXT_ARG();
+ }
+
if (argc) {
p_err("extra unknown arguments");
return -1;
@@ -310,7 +332,8 @@ static int do_skeleton(int argc, char **argv)
p_err("failed to mmap() %s: %s", file, strerror(errno));
goto out;
}
- get_obj_name(obj_name, file);
+ if (obj_name[0] == '\0')
+ get_obj_name(obj_name, file);
opts.object_name = obj_name;
obj = bpf_object__open_mem(obj_data, file_sz, &opts);
if (IS_ERR(obj)) {
@@ -591,6 +614,47 @@ out:
return err;
}
+static int do_object(int argc, char **argv)
+{
+ struct bpf_linker *linker;
+ const char *output_file, *file;
+ int err = 0;
+
+ if (!REQ_ARGS(2)) {
+ usage();
+ return -1;
+ }
+
+ output_file = GET_ARG();
+
+ linker = bpf_linker__new(output_file, NULL);
+ if (!linker) {
+ p_err("failed to create BPF linker instance");
+ return -1;
+ }
+
+ while (argc) {
+ file = GET_ARG();
+
+ err = bpf_linker__add_file(linker, file);
+ if (err) {
+ p_err("failed to link '%s': %s (%d)", file, strerror(err), err);
+ goto out;
+ }
+ }
+
+ err = bpf_linker__finalize(linker);
+ if (err) {
+ p_err("failed to finalize ELF file: %s (%d)", strerror(err), err);
+ goto out;
+ }
+
+ err = 0;
+out:
+ bpf_linker__free(linker);
+ return err;
+}
+
static int do_help(int argc, char **argv)
{
if (json_output) {
@@ -599,7 +663,8 @@ static int do_help(int argc, char **argv)
}
fprintf(stderr,
- "Usage: %1$s %2$s skeleton FILE\n"
+ "Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n"
+ " %1$s %2$s skeleton FILE [name OBJECT_NAME]\n"
" %1$s %2$s help\n"
"\n"
" " HELP_SPEC_OPTIONS "\n"
@@ -610,6 +675,7 @@ static int do_help(int argc, char **argv)
}
static const struct cmd cmds[] = {
+ { "object", do_object },
{ "skeleton", do_skeleton },
{ "help", do_help },
{ 0 }
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index b86f450e6fce..d9afb730136a 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -276,7 +276,7 @@ static int do_batch(int argc, char **argv)
int n_argc;
FILE *fp;
char *cp;
- int err;
+ int err = 0;
int i;
if (argc < 2) {
@@ -370,7 +370,6 @@ static int do_batch(int argc, char **argv)
} else {
if (!json_output)
printf("processed %d commands\n", lines);
- err = 0;
}
err_close:
if (fp != stdin)
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index b400364ee054..09ae0381205b 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -100,7 +100,7 @@ static int do_dump_btf(const struct btf_dumper *d,
void *value)
{
__u32 value_id;
- int ret;
+ int ret = 0;
/* start of key-value pair */
jsonw_start_object(d->jw);
diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
index ff3aa0cf3997..f836d115d7d6 100644
--- a/tools/bpf/bpftool/net.c
+++ b/tools/bpf/bpftool/net.c
@@ -157,7 +157,7 @@ static int netlink_recv(int sock, __u32 nl_pid, __u32 seq,
if (len == 0)
break;
- for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len);
+ for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, (unsigned int)len);
nh = NLMSG_NEXT(nh, len)) {
if (nh->nlmsg_pid != nl_pid) {
ret = -LIBBPF_ERRNO__WRNGPID;
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index f2b915b20546..3f067d2d7584 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -76,6 +76,7 @@ enum dump_mode {
static const char * const attach_type_strings[] = {
[BPF_SK_SKB_STREAM_PARSER] = "stream_parser",
[BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict",
+ [BPF_SK_SKB_VERDICT] = "skb_verdict",
[BPF_SK_MSG_VERDICT] = "msg_verdict",
[BPF_FLOW_DISSECTOR] = "flow_dissector",
[__MAX_BPF_ATTACH_TYPE] = NULL,
diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c
index 8608cd68cdd0..6fc3e6f7f40c 100644
--- a/tools/bpf/bpftool/xlated_dumper.c
+++ b/tools/bpf/bpftool/xlated_dumper.c
@@ -196,6 +196,9 @@ static const char *print_imm(void *private_data,
else if (insn->src_reg == BPF_PSEUDO_MAP_VALUE)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"map[id:%u][0]+%u", insn->imm, (insn + 1)->imm);
+ else if (insn->src_reg == BPF_PSEUDO_FUNC)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "subprog[%+d]", insn->imm);
else
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"0x%llx", (unsigned long long)full_imm);
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index 80d966cfcaa1..7550fd9c3188 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -115,10 +115,10 @@ struct object {
static int verbose;
-int eprintf(int level, int var, const char *fmt, ...)
+static int eprintf(int level, int var, const char *fmt, ...)
{
va_list args;
- int ret;
+ int ret = 0;
if (var >= level) {
va_start(args, fmt);
@@ -385,7 +385,7 @@ static int elf_collect(struct object *obj)
static int symbols_collect(struct object *obj)
{
Elf_Scn *scn = NULL;
- int n, i, err = 0;
+ int n, i;
GElf_Shdr sh;
char *name;
@@ -402,11 +402,10 @@ static int symbols_collect(struct object *obj)
* Scan symbols and look for the ones starting with
* __BTF_ID__* over .BTF_ids section.
*/
- for (i = 0; !err && i < n; i++) {
- char *tmp, *prefix;
+ for (i = 0; i < n; i++) {
+ char *prefix;
struct btf_id *id;
GElf_Sym sym;
- int err = -1;
if (!gelf_getsym(obj->efile.symbols, i, &sym))
return -1;
diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile
index 9d9fb6209be1..3818ec511fd2 100644
--- a/tools/bpf/runqslower/Makefile
+++ b/tools/bpf/runqslower/Makefile
@@ -16,7 +16,10 @@ CFLAGS := -g -Wall
# Try to detect best kernel BTF source
KERNEL_REL := $(shell uname -r)
-VMLINUX_BTF_PATHS := /sys/kernel/btf/vmlinux /boot/vmlinux-$(KERNEL_REL)
+VMLINUX_BTF_PATHS := $(if $(O),$(O)/vmlinux) \
+ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
+ ../../../vmlinux /sys/kernel/btf/vmlinux \
+ /boot/vmlinux-$(KERNEL_REL)
VMLINUX_BTF_PATH := $(or $(VMLINUX_BTF),$(firstword \
$(wildcard $(VMLINUX_BTF_PATHS))))
@@ -66,12 +69,16 @@ $(OUTPUT) $(BPFOBJ_OUTPUT) $(BPFTOOL_OUTPUT):
$(QUIET_MKDIR)mkdir -p $@
$(OUTPUT)/vmlinux.h: $(VMLINUX_BTF_PATH) | $(OUTPUT) $(BPFTOOL)
+ifeq ($(VMLINUX_H),)
$(Q)if [ ! -e "$(VMLINUX_BTF_PATH)" ] ; then \
echo "Couldn't find kernel BTF; set VMLINUX_BTF to" \
"specify its location." >&2; \
exit 1;\
fi
$(QUIET_GEN)$(BPFTOOL) btf dump file $(VMLINUX_BTF_PATH) format c > $@
+else
+ $(Q)cp "$(VMLINUX_H)" $@
+endif
$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(BPFOBJ_OUTPUT)
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(BPFOBJ_OUTPUT) $@
diff --git a/tools/bpf/runqslower/runqslower.bpf.c b/tools/bpf/runqslower/runqslower.bpf.c
index 1f18a409f044..645530ca7e98 100644
--- a/tools/bpf/runqslower/runqslower.bpf.c
+++ b/tools/bpf/runqslower/runqslower.bpf.c
@@ -11,9 +11,9 @@ const volatile __u64 min_us = 0;
const volatile pid_t targ_pid = 0;
struct {
- __uint(type, BPF_MAP_TYPE_HASH);
- __uint(max_entries, 10240);
- __type(key, u32);
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
__type(value, u64);
} start SEC(".maps");
@@ -25,15 +25,20 @@ struct {
/* record enqueue timestamp */
__always_inline
-static int trace_enqueue(u32 tgid, u32 pid)
+static int trace_enqueue(struct task_struct *t)
{
- u64 ts;
+ u32 pid = t->pid;
+ u64 *ptr;
if (!pid || (targ_pid && targ_pid != pid))
return 0;
- ts = bpf_ktime_get_ns();
- bpf_map_update_elem(&start, &pid, &ts, 0);
+ ptr = bpf_task_storage_get(&start, t, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ return 0;
+
+ *ptr = bpf_ktime_get_ns();
return 0;
}
@@ -43,7 +48,7 @@ int handle__sched_wakeup(u64 *ctx)
/* TP_PROTO(struct task_struct *p) */
struct task_struct *p = (void *)ctx[0];
- return trace_enqueue(p->tgid, p->pid);
+ return trace_enqueue(p);
}
SEC("tp_btf/sched_wakeup_new")
@@ -52,7 +57,7 @@ int handle__sched_wakeup_new(u64 *ctx)
/* TP_PROTO(struct task_struct *p) */
struct task_struct *p = (void *)ctx[0];
- return trace_enqueue(p->tgid, p->pid);
+ return trace_enqueue(p);
}
SEC("tp_btf/sched_switch")
@@ -70,12 +75,16 @@ int handle__sched_switch(u64 *ctx)
/* ivcsw: treat like an enqueue event and store timestamp */
if (prev->state == TASK_RUNNING)
- trace_enqueue(prev->tgid, prev->pid);
+ trace_enqueue(prev);
pid = next->pid;
+ /* For pid mismatch, save a bpf_task_storage_get */
+ if (!pid || (targ_pid && targ_pid != pid))
+ return 0;
+
/* fetch timestamp and calculate delta */
- tsp = bpf_map_lookup_elem(&start, &pid);
+ tsp = bpf_task_storage_get(&start, next, 0, 0);
if (!tsp)
return 0; /* missed enqueue */
@@ -91,7 +100,7 @@ int handle__sched_switch(u64 *ctx)
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
&event, sizeof(event));
- bpf_map_delete_elem(&start, &pid);
+ bpf_task_storage_delete(&start, next);
return 0;
}
diff --git a/tools/build/Build.include b/tools/build/Build.include
index 585486e40995..2cf3b1bde86e 100644
--- a/tools/build/Build.include
+++ b/tools/build/Build.include
@@ -100,3 +100,27 @@ cxx_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXX
## HOSTCC C flags
host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(KBUILD_HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj))
+
+# output directory for tests below
+TMPOUT = .tmp_$$$$
+
+# try-run
+# Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise)
+# Exit code chooses option. "$$TMP" serves as a temporary file and is
+# automatically cleaned up.
+try-run = $(shell set -e; \
+ TMP=$(TMPOUT)/tmp; \
+ mkdir -p $(TMPOUT); \
+ trap "rm -rf $(TMPOUT)" EXIT; \
+ if ($(1)) >/dev/null 2>&1; \
+ then echo "$(2)"; \
+ else echo "$(3)"; \
+ fi)
+
+# cc-option
+# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
+cc-option = $(call try-run, \
+ $(CC) -Werror $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+
+# delete partially updated (i.e. corrupted) files on error
+.DELETE_ON_ERROR:
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 74e255d58d8d..04a8e3db8a54 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -52,6 +52,7 @@ FEATURE_TESTS_BASIC := \
libpython-version \
libslang \
libslang-include-subdir \
+ libtraceevent \
libcrypto \
libunwind \
pthread-attr-setaffinity-np \
@@ -239,17 +240,24 @@ ifeq ($(VF),1)
feature_verbose := 1
endif
-ifeq ($(feature_display),1)
- $(info )
- $(info Auto-detecting system features:)
- $(foreach feat,$(FEATURE_DISPLAY),$(call feature_print_status,$(feat),))
- ifneq ($(feature_verbose),1)
+feature_display_entries = $(eval $(feature_display_entries_code))
+define feature_display_entries_code
+ ifeq ($(feature_display),1)
$(info )
+ $(info Auto-detecting system features:)
+ $(foreach feat,$(FEATURE_DISPLAY),$(call feature_print_status,$(feat),))
+ ifneq ($(feature_verbose),1)
+ $(info )
+ endif
endif
-endif
-ifeq ($(feature_verbose),1)
- TMP := $(filter-out $(FEATURE_DISPLAY),$(FEATURE_TESTS))
- $(foreach feat,$(TMP),$(call feature_print_status,$(feat),))
- $(info )
+ ifeq ($(feature_verbose),1)
+ TMP := $(filter-out $(FEATURE_DISPLAY),$(FEATURE_TESTS))
+ $(foreach feat,$(TMP),$(call feature_print_status,$(feat),))
+ $(info )
+ endif
+endef
+
+ifeq ($(FEATURE_DISPLAY_DEFERRED),)
+ $(call feature_display_entries)
endif
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 3e55edb3ea54..ec203e28407f 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -36,6 +36,7 @@ FILES= \
test-libpython-version.bin \
test-libslang.bin \
test-libslang-include-subdir.bin \
+ test-libtraceevent.bin \
test-libcrypto.bin \
test-libunwind.bin \
test-libunwind-debug-frame.bin \
@@ -196,6 +197,9 @@ $(OUTPUT)test-libslang.bin:
$(OUTPUT)test-libslang-include-subdir.bin:
$(BUILD) -lslang
+$(OUTPUT)test-libtraceevent.bin:
+ $(BUILD) -ltraceevent
+
$(OUTPUT)test-libcrypto.bin:
$(BUILD) -lcrypto
diff --git a/tools/build/feature/test-libtraceevent.c b/tools/build/feature/test-libtraceevent.c
new file mode 100644
index 000000000000..416b11ffd4b4
--- /dev/null
+++ b/tools/build/feature/test-libtraceevent.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <traceevent/trace-seq.h>
+
+int main(void)
+{
+ int rv = 0;
+ struct trace_seq s;
+ trace_seq_init(&s);
+ rv += !(s.state == TRACE_SEQ__GOOD);
+ trace_seq_destroy(&s);
+ return rv;
+}
diff --git a/tools/debugging/kernel-chktaint b/tools/debugging/kernel-chktaint
index 607b2b280945..719f18b1edf0 100755
--- a/tools/debugging/kernel-chktaint
+++ b/tools/debugging/kernel-chktaint
@@ -25,7 +25,7 @@ if [ "$1"x != "x" ]; then
elif [ $1 -ge 0 ] 2>/dev/null ; then
taint=$1
else
- echo "Error: Parameter '$1' not a positive interger. Aborting." >&2
+ echo "Error: Parameter '$1' not a positive integer. Aborting." >&2
exit 1
fi
else
diff --git a/tools/iio/Makefile b/tools/iio/Makefile
index 3de763d9ab70..5d12ac4e7f8f 100644
--- a/tools/iio/Makefile
+++ b/tools/iio/Makefile
@@ -27,6 +27,7 @@ include $(srctree)/tools/build/Makefile.include
#
$(OUTPUT)include/linux/iio: ../../include/uapi/linux/iio
mkdir -p $(OUTPUT)include/linux/iio 2>&1 || true
+ ln -sf $(CURDIR)/../../include/uapi/linux/iio/buffer.h $@
ln -sf $(CURDIR)/../../include/uapi/linux/iio/events.h $@
ln -sf $(CURDIR)/../../include/uapi/linux/iio/types.h $@
diff --git a/tools/iio/iio_event_monitor.c b/tools/iio/iio_event_monitor.c
index bb03859db89d..0076437f6e3f 100644
--- a/tools/iio/iio_event_monitor.c
+++ b/tools/iio/iio_event_monitor.c
@@ -14,6 +14,7 @@
#include <unistd.h>
#include <stdlib.h>
+#include <dirent.h>
#include <stdbool.h>
#include <stdio.h>
#include <errno.h>
@@ -280,22 +281,69 @@ static void print_event(struct iio_event_data *event)
printf("\n");
}
+/* Enable or disable events in sysfs if the knob is available */
+static void enable_events(char *dev_dir, int enable)
+{
+ const struct dirent *ent;
+ char evdir[256];
+ int ret;
+ DIR *dp;
+
+ snprintf(evdir, sizeof(evdir), FORMAT_EVENTS_DIR, dev_dir);
+ evdir[sizeof(evdir)-1] = '\0';
+
+ dp = opendir(evdir);
+ if (!dp) {
+ fprintf(stderr, "Enabling/disabling events: can't open %s\n",
+ evdir);
+ return;
+ }
+
+ while (ent = readdir(dp), ent) {
+ if (iioutils_check_suffix(ent->d_name, "_en")) {
+ printf("%sabling: %s\n",
+ enable ? "En" : "Dis",
+ ent->d_name);
+ ret = write_sysfs_int(ent->d_name, evdir,
+ enable);
+ if (ret < 0)
+ fprintf(stderr, "Failed to enable/disable %s\n",
+ ent->d_name);
+ }
+ }
+
+ if (closedir(dp) == -1) {
+ perror("Enabling/disabling channels: "
+ "Failed to close directory");
+ return;
+ }
+}
+
int main(int argc, char **argv)
{
struct iio_event_data event;
const char *device_name;
+ char *dev_dir_name = NULL;
char *chrdev_name;
int ret;
int dev_num;
int fd, event_fd;
-
- if (argc <= 1) {
- fprintf(stderr, "Usage: %s <device_name>\n", argv[0]);
+ bool all_events = false;
+
+ if (argc == 2) {
+ device_name = argv[1];
+ } else if (argc == 3) {
+ device_name = argv[2];
+ if (!strcmp(argv[1], "-a"))
+ all_events = true;
+ } else {
+ fprintf(stderr,
+ "Usage: iio_event_monitor [options] <device_name>\n"
+ "Listen and display events from IIO devices\n"
+ " -a Auto-activate all available events\n");
return -1;
}
- device_name = argv[1];
-
dev_num = find_type_by_name(device_name, "iio:device");
if (dev_num >= 0) {
printf("Found IIO device with name %s with device number %d\n",
@@ -303,6 +351,10 @@ int main(int argc, char **argv)
ret = asprintf(&chrdev_name, "/dev/iio:device%d", dev_num);
if (ret < 0)
return -ENOMEM;
+ /* Look up sysfs dir as well if we can */
+ ret = asprintf(&dev_dir_name, "%siio:device%d", iio_dir, dev_num);
+ if (ret < 0)
+ return -ENOMEM;
} else {
/*
* If we can't find an IIO device by name assume device_name is
@@ -313,6 +365,9 @@ int main(int argc, char **argv)
return -ENOMEM;
}
+ if (all_events && dev_dir_name)
+ enable_events(dev_dir_name, 1);
+
fd = open(chrdev_name, 0);
if (fd == -1) {
ret = -errno;
@@ -365,6 +420,10 @@ int main(int argc, char **argv)
perror("Failed to close event file");
error_free_chrdev_name:
+ /* Disable events after use */
+ if (all_events && dev_dir_name)
+ enable_events(dev_dir_name, 0);
+
free(chrdev_name);
return ret;
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index 34d63bcebcd2..2491c54a5e4f 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -30,6 +30,8 @@
#include <inttypes.h>
#include <stdbool.h>
#include <signal.h>
+#include <sys/ioctl.h>
+#include <linux/iio/buffer.h>
#include "iio_utils.h"
/**
@@ -49,7 +51,7 @@ enum autochan {
* Has the side effect of filling the channels[i].location values used
* in processing the buffer output.
**/
-int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
+static int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
{
int bytes = 0;
int i = 0;
@@ -68,7 +70,7 @@ int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
return bytes;
}
-void print1byte(uint8_t input, struct iio_channel_info *info)
+static void print1byte(uint8_t input, struct iio_channel_info *info)
{
/*
* Shift before conversion to avoid sign extension
@@ -85,7 +87,7 @@ void print1byte(uint8_t input, struct iio_channel_info *info)
}
}
-void print2byte(uint16_t input, struct iio_channel_info *info)
+static void print2byte(uint16_t input, struct iio_channel_info *info)
{
/* First swap if incorrect endian */
if (info->be)
@@ -108,7 +110,7 @@ void print2byte(uint16_t input, struct iio_channel_info *info)
}
}
-void print4byte(uint32_t input, struct iio_channel_info *info)
+static void print4byte(uint32_t input, struct iio_channel_info *info)
{
/* First swap if incorrect endian */
if (info->be)
@@ -131,7 +133,7 @@ void print4byte(uint32_t input, struct iio_channel_info *info)
}
}
-void print8byte(uint64_t input, struct iio_channel_info *info)
+static void print8byte(uint64_t input, struct iio_channel_info *info)
{
/* First swap if incorrect endian */
if (info->be)
@@ -167,9 +169,8 @@ void print8byte(uint64_t input, struct iio_channel_info *info)
* to fill the location offsets.
* @num_channels: number of channels
**/
-void process_scan(char *data,
- struct iio_channel_info *channels,
- int num_channels)
+static void process_scan(char *data, struct iio_channel_info *channels,
+ int num_channels)
{
int k;
@@ -198,7 +199,7 @@ void process_scan(char *data,
printf("\n");
}
-static int enable_disable_all_channels(char *dev_dir_name, int enable)
+static int enable_disable_all_channels(char *dev_dir_name, int buffer_idx, int enable)
{
const struct dirent *ent;
char scanelemdir[256];
@@ -206,7 +207,7 @@ static int enable_disable_all_channels(char *dev_dir_name, int enable)
int ret;
snprintf(scanelemdir, sizeof(scanelemdir),
- FORMAT_SCAN_ELEMENTS_DIR, dev_dir_name);
+ FORMAT_SCAN_ELEMENTS_DIR, dev_dir_name, buffer_idx);
scanelemdir[sizeof(scanelemdir)-1] = '\0';
dp = opendir(scanelemdir);
@@ -238,12 +239,13 @@ static int enable_disable_all_channels(char *dev_dir_name, int enable)
return 0;
}
-void print_usage(void)
+static void print_usage(void)
{
fprintf(stderr, "Usage: generic_buffer [options]...\n"
"Capture, convert and output data from IIO device buffer\n"
" -a Auto-activate all available channels\n"
" -A Force-activate ALL channels\n"
+ " -b <n> The buffer which to open (by index), default 0\n"
" -c <n> Do n conversions, or loop forever if n < 0\n"
" -e Disable wait for event (new data)\n"
" -g Use trigger-less mode\n"
@@ -257,12 +259,13 @@ void print_usage(void)
" -w <n> Set delay between reads in us (event-less mode)\n");
}
-enum autochan autochannels = AUTOCHANNELS_DISABLED;
-char *dev_dir_name = NULL;
-char *buf_dir_name = NULL;
-bool current_trigger_set = false;
+static enum autochan autochannels = AUTOCHANNELS_DISABLED;
+static char *dev_dir_name = NULL;
+static char *buf_dir_name = NULL;
+static int buffer_idx = 0;
+static bool current_trigger_set = false;
-void cleanup(void)
+static void cleanup(void)
{
int ret;
@@ -287,21 +290,21 @@ void cleanup(void)
/* Disable channels if auto-enabled */
if (dev_dir_name && autochannels == AUTOCHANNELS_ACTIVE) {
- ret = enable_disable_all_channels(dev_dir_name, 0);
+ ret = enable_disable_all_channels(dev_dir_name, buffer_idx, 0);
if (ret)
fprintf(stderr, "Failed to disable all channels\n");
autochannels = AUTOCHANNELS_DISABLED;
}
}
-void sig_handler(int signum)
+static void sig_handler(int signum)
{
fprintf(stderr, "Caught signal %d\n", signum);
cleanup();
exit(-signum);
}
-void register_cleanup(void)
+static void register_cleanup(void)
{
struct sigaction sa = { .sa_handler = sig_handler };
const int signums[] = { SIGINT, SIGTERM, SIGABRT };
@@ -334,7 +337,9 @@ int main(int argc, char **argv)
unsigned long long j;
unsigned long toread;
int ret, c;
- int fp = -1;
+ struct stat st;
+ int fd = -1;
+ int buf_fd = -1;
int num_channels = 0;
char *trigger_name = NULL, *device_name = NULL;
@@ -353,7 +358,7 @@ int main(int argc, char **argv)
register_cleanup();
- while ((c = getopt_long(argc, argv, "aAc:egl:n:N:t:T:w:?", longopts,
+ while ((c = getopt_long(argc, argv, "aAb:c:egl:n:N:t:T:w:?", longopts,
NULL)) != -1) {
switch (c) {
case 'a':
@@ -362,7 +367,20 @@ int main(int argc, char **argv)
case 'A':
autochannels = AUTOCHANNELS_ENABLED;
force_autochannels = true;
- break;
+ break;
+ case 'b':
+ errno = 0;
+ buffer_idx = strtoll(optarg, &dummy, 10);
+ if (errno) {
+ ret = -errno;
+ goto error;
+ }
+ if (buffer_idx < 0) {
+ ret = -ERANGE;
+ goto error;
+ }
+
+ break;
case 'c':
errno = 0;
num_loops = strtoll(optarg, &dummy, 10);
@@ -519,7 +537,7 @@ int main(int argc, char **argv)
* Parse the files in scan_elements to identify what channels are
* present
*/
- ret = build_channel_array(dev_dir_name, &channels, &num_channels);
+ ret = build_channel_array(dev_dir_name, buffer_idx, &channels, &num_channels);
if (ret) {
fprintf(stderr, "Problem reading scan element information\n"
"diag %s\n", dev_dir_name);
@@ -536,7 +554,7 @@ int main(int argc, char **argv)
(autochannels == AUTOCHANNELS_ENABLED && force_autochannels)) {
fprintf(stderr, "Enabling all channels\n");
- ret = enable_disable_all_channels(dev_dir_name, 1);
+ ret = enable_disable_all_channels(dev_dir_name, buffer_idx, 1);
if (ret) {
fprintf(stderr, "Failed to enable all channels\n");
goto error;
@@ -545,7 +563,7 @@ int main(int argc, char **argv)
/* This flags that we need to disable the channels again */
autochannels = AUTOCHANNELS_ACTIVE;
- ret = build_channel_array(dev_dir_name, &channels,
+ ret = build_channel_array(dev_dir_name, buffer_idx, &channels,
&num_channels);
if (ret) {
fprintf(stderr, "Problem reading scan element "
@@ -566,7 +584,7 @@ int main(int argc, char **argv)
fprintf(stderr, "Enable channels manually in "
FORMAT_SCAN_ELEMENTS_DIR
"/*_en or pass -a to autoenable channels and "
- "try again.\n", dev_dir_name);
+ "try again.\n", dev_dir_name, buffer_idx);
ret = -ENOENT;
goto error;
}
@@ -577,12 +595,25 @@ int main(int argc, char **argv)
* be built rather than found.
*/
ret = asprintf(&buf_dir_name,
- "%siio:device%d/buffer", iio_dir, dev_num);
+ "%siio:device%d/buffer%d", iio_dir, dev_num, buffer_idx);
if (ret < 0) {
ret = -ENOMEM;
goto error;
}
+ if (stat(buf_dir_name, &st)) {
+ fprintf(stderr, "Could not stat() '%s', got error %d: %s\n",
+ buf_dir_name, errno, strerror(errno));
+ ret = -errno;
+ goto error;
+ }
+
+ if (!S_ISDIR(st.st_mode)) {
+ fprintf(stderr, "File '%s' is not a directory\n", buf_dir_name);
+ ret = -EFAULT;
+ goto error;
+ }
+
if (!notrigger) {
printf("%s %s\n", dev_dir_name, trigger_name);
/*
@@ -599,6 +630,35 @@ int main(int argc, char **argv)
}
}
+ ret = asprintf(&buffer_access, "/dev/iio:device%d", dev_num);
+ if (ret < 0) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Attempt to open non blocking the access dev */
+ fd = open(buffer_access, O_RDONLY | O_NONBLOCK);
+ if (fd == -1) { /* TODO: If it isn't there make the node */
+ ret = -errno;
+ fprintf(stderr, "Failed to open %s\n", buffer_access);
+ goto error;
+ }
+
+ /* specify for which buffer index we want an FD */
+ buf_fd = buffer_idx;
+
+ ret = ioctl(fd, IIO_BUFFER_GET_FD_IOCTL, &buf_fd);
+ if (ret == -1 || buf_fd == -1) {
+ ret = -errno;
+ if (ret == -ENODEV || ret == -EINVAL)
+ fprintf(stderr,
+ "Device does not have this many buffers\n");
+ else
+ fprintf(stderr, "Failed to retrieve buffer fd\n");
+
+ goto error;
+ }
+
/* Setup ring buffer parameters */
ret = write_sysfs_int("length", buf_dir_name, buf_len);
if (ret < 0)
@@ -608,7 +668,8 @@ int main(int argc, char **argv)
ret = write_sysfs_int("enable", buf_dir_name, 1);
if (ret < 0) {
fprintf(stderr,
- "Failed to enable buffer: %s\n", strerror(-ret));
+ "Failed to enable buffer '%s': %s\n",
+ buf_dir_name, strerror(-ret));
goto error;
}
@@ -619,24 +680,30 @@ int main(int argc, char **argv)
goto error;
}
- ret = asprintf(&buffer_access, "/dev/iio:device%d", dev_num);
- if (ret < 0) {
- ret = -ENOMEM;
- goto error;
+ /**
+ * This check is being done here for sanity reasons, however it
+ * should be omitted under normal operation.
+ * If this is buffer0, we check that we get EBUSY after this point.
+ */
+ if (buffer_idx == 0) {
+ errno = 0;
+ read_size = read(fd, data, 1);
+ if (read_size > -1 || errno != EBUSY) {
+ ret = -EFAULT;
+ perror("Reading from '%s' should not be possible after ioctl()");
+ goto error;
+ }
}
- /* Attempt to open non blocking the access dev */
- fp = open(buffer_access, O_RDONLY | O_NONBLOCK);
- if (fp == -1) { /* TODO: If it isn't there make the node */
- ret = -errno;
- fprintf(stderr, "Failed to open %s\n", buffer_access);
- goto error;
- }
+ /* close now the main chardev FD and let the buffer FD work */
+ if (close(fd) == -1)
+ perror("Failed to close character device file");
+ fd = -1;
for (j = 0; j < num_loops || num_loops < 0; j++) {
if (!noevents) {
struct pollfd pfd = {
- .fd = fp,
+ .fd = buf_fd,
.events = POLLIN,
};
@@ -654,7 +721,7 @@ int main(int argc, char **argv)
toread = 64;
}
- read_size = read(fp, data, toread * scan_size);
+ read_size = read(buf_fd, data, toread * scan_size);
if (read_size < 0) {
if (errno == EAGAIN) {
fprintf(stderr, "nothing available\n");
@@ -671,7 +738,9 @@ int main(int argc, char **argv)
error:
cleanup();
- if (fp >= 0 && close(fp) == -1)
+ if (fd >= 0 && close(fd) == -1)
+ perror("Failed to close character device");
+ if (buf_fd >= 0 && close(buf_fd) == -1)
perror("Failed to close buffer");
free(buffer_access);
free(data);
diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c
index 7399eb7f1378..aadee6d34c74 100644
--- a/tools/iio/iio_utils.c
+++ b/tools/iio/iio_utils.c
@@ -77,15 +77,17 @@ int iioutils_break_up_name(const char *full_name, char **generic_name)
* @mask: output a bit mask for the raw data
* @be: output if data in big endian
* @device_dir: the IIO device directory
+ * @buffer_idx: the IIO buffer index
* @name: the channel name
* @generic_name: the channel type name
*
* Returns a value >= 0 on success, otherwise a negative error code.
**/
-int iioutils_get_type(unsigned *is_signed, unsigned *bytes, unsigned *bits_used,
- unsigned *shift, uint64_t *mask, unsigned *be,
- const char *device_dir, const char *name,
- const char *generic_name)
+static int iioutils_get_type(unsigned int *is_signed, unsigned int *bytes,
+ unsigned int *bits_used, unsigned int *shift,
+ uint64_t *mask, unsigned int *be,
+ const char *device_dir, int buffer_idx,
+ const char *name, const char *generic_name)
{
FILE *sysfsfp;
int ret;
@@ -95,7 +97,7 @@ int iioutils_get_type(unsigned *is_signed, unsigned *bytes, unsigned *bits_used,
unsigned padint;
const struct dirent *ent;
- ret = asprintf(&scan_el_dir, FORMAT_SCAN_ELEMENTS_DIR, device_dir);
+ ret = asprintf(&scan_el_dir, FORMAT_SCAN_ELEMENTS_DIR, device_dir, buffer_idx);
if (ret < 0)
return -ENOMEM;
@@ -303,12 +305,13 @@ void bsort_channel_array_by_index(struct iio_channel_info *ci_array, int cnt)
/**
* build_channel_array() - function to figure out what channels are present
* @device_dir: the IIO device directory in sysfs
+ * @buffer_idx: the IIO buffer for this channel array
* @ci_array: output the resulting array of iio_channel_info
* @counter: output the amount of array elements
*
* Returns 0 on success, otherwise a negative error code.
**/
-int build_channel_array(const char *device_dir,
+int build_channel_array(const char *device_dir, int buffer_idx,
struct iio_channel_info **ci_array, int *counter)
{
DIR *dp;
@@ -321,7 +324,7 @@ int build_channel_array(const char *device_dir,
char *filename;
*counter = 0;
- ret = asprintf(&scan_el_dir, FORMAT_SCAN_ELEMENTS_DIR, device_dir);
+ ret = asprintf(&scan_el_dir, FORMAT_SCAN_ELEMENTS_DIR, device_dir, buffer_idx);
if (ret < 0)
return -ENOMEM;
@@ -502,6 +505,7 @@ int build_channel_array(const char *device_dir,
&current->mask,
&current->be,
device_dir,
+ buffer_idx,
current->name,
current->generic_name);
if (ret < 0)
diff --git a/tools/iio/iio_utils.h b/tools/iio/iio_utils.h
index 74bde4fde2c8..663c94a6c705 100644
--- a/tools/iio/iio_utils.h
+++ b/tools/iio/iio_utils.h
@@ -12,7 +12,8 @@
/* Made up value to limit allocation sizes */
#define IIO_MAX_NAME_LENGTH 64
-#define FORMAT_SCAN_ELEMENTS_DIR "%s/scan_elements"
+#define FORMAT_SCAN_ELEMENTS_DIR "%s/buffer%d"
+#define FORMAT_EVENTS_DIR "%s/events"
#define FORMAT_TYPE_FILE "%s_type"
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
@@ -57,15 +58,11 @@ static inline int iioutils_check_suffix(const char *str, const char *suffix)
}
int iioutils_break_up_name(const char *full_name, char **generic_name);
-int iioutils_get_type(unsigned *is_signed, unsigned *bytes, unsigned *bits_used,
- unsigned *shift, uint64_t *mask, unsigned *be,
- const char *device_dir, const char *name,
- const char *generic_name);
int iioutils_get_param_float(float *output, const char *param_name,
const char *device_dir, const char *name,
const char *generic_name);
void bsort_channel_array_by_index(struct iio_channel_info *ci_array, int cnt);
-int build_channel_array(const char *device_dir,
+int build_channel_array(const char *device_dir, int buffer_idx,
struct iio_channel_info **ci_array, int *counter);
int find_type_by_name(const char *name, const char *type);
int write_sysfs_int(const char *filename, const char *basedir, int val);
diff --git a/tools/include/asm-generic/hugetlb_encode.h b/tools/include/asm-generic/hugetlb_encode.h
index e4732d3c2998..4f3d5aaa11f5 100644
--- a/tools/include/asm-generic/hugetlb_encode.h
+++ b/tools/include/asm-generic/hugetlb_encode.h
@@ -20,13 +20,16 @@
#define HUGETLB_FLAG_ENCODE_SHIFT 26
#define HUGETLB_FLAG_ENCODE_MASK 0x3f
+#define HUGETLB_FLAG_ENCODE_16KB (14 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_64KB (16 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_512KB (19 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_1MB (20 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_2MB (21 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_8MB (23 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_16MB (24 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_32MB (25 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_256MB (28 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512MB (29 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_1GB (30 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_2GB (31 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_16GB (34 << HUGETLB_FLAG_ENCODE_SHIFT)
diff --git a/tools/include/linux/kconfig.h b/tools/include/linux/kconfig.h
new file mode 100644
index 000000000000..1555a0c4f345
--- /dev/null
+++ b/tools/include/linux/kconfig.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_LINUX_KCONFIG_H
+#define _TOOLS_LINUX_KCONFIG_H
+
+/* CONFIG_CC_VERSION_TEXT (Do not delete this comment. See help in Kconfig) */
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define __BIG_ENDIAN 4321
+#else
+#define __LITTLE_ENDIAN 1234
+#endif
+
+#define __ARG_PLACEHOLDER_1 0,
+#define __take_second_arg(__ignored, val, ...) val
+
+/*
+ * The use of "&&" / "||" is limited in certain expressions.
+ * The following enable to calculate "and" / "or" with macro expansion only.
+ */
+#define __and(x, y) ___and(x, y)
+#define ___and(x, y) ____and(__ARG_PLACEHOLDER_##x, y)
+#define ____and(arg1_or_junk, y) __take_second_arg(arg1_or_junk y, 0)
+
+#define __or(x, y) ___or(x, y)
+#define ___or(x, y) ____or(__ARG_PLACEHOLDER_##x, y)
+#define ____or(arg1_or_junk, y) __take_second_arg(arg1_or_junk 1, y)
+
+/*
+ * Helper macros to use CONFIG_ options in C/CPP expressions. Note that
+ * these only work with boolean and tristate options.
+ */
+
+/*
+ * Getting something that works in C and CPP for an arg that may or may
+ * not be defined is tricky. Here, if we have "#define CONFIG_BOOGER 1"
+ * we match on the placeholder define, insert the "0," for arg1 and generate
+ * the triplet (0, 1, 0). Then the last step cherry picks the 2nd arg (a one).
+ * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
+ * the last step cherry picks the 2nd arg, we get a zero.
+ */
+#define __is_defined(x) ___is_defined(x)
+#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
+#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
+
+/*
+ * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0
+ * otherwise. For boolean options, this is equivalent to
+ * IS_ENABLED(CONFIG_FOO).
+ */
+#define IS_BUILTIN(option) __is_defined(option)
+
+/*
+ * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
+ * otherwise.
+ */
+#define IS_MODULE(option) __is_defined(option##_MODULE)
+
+/*
+ * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
+ * code can call a function defined in code compiled based on CONFIG_FOO.
+ * This is similar to IS_ENABLED(), but returns false when invoked from
+ * built-in code when CONFIG_FOO is set to 'm'.
+ */
+#define IS_REACHABLE(option) __or(IS_BUILTIN(option), \
+ __and(IS_MODULE(option), __is_defined(MODULE)))
+
+/*
+ * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
+ * 0 otherwise.
+ */
+#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option))
+
+#endif /* _TOOLS_LINUX_KCONFIG_H */
diff --git a/tools/include/linux/math64.h b/tools/include/linux/math64.h
new file mode 100644
index 000000000000..4ad45d5943dc
--- /dev/null
+++ b/tools/include/linux/math64.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MATH64_H
+#define _LINUX_MATH64_H
+
+#include <linux/types.h>
+
+#ifdef __x86_64__
+static inline u64 mul_u64_u64_div64(u64 a, u64 b, u64 c)
+{
+ u64 q;
+
+ asm ("mulq %2; divq %3" : "=a" (q)
+ : "a" (a), "rm" (b), "rm" (c)
+ : "rdx");
+
+ return q;
+}
+#define mul_u64_u64_div64 mul_u64_u64_div64
+#endif
+
+#ifdef __SIZEOF_INT128__
+static inline u64 mul_u64_u32_shr(u64 a, u32 b, unsigned int shift)
+{
+ return (u64)(((unsigned __int128)a * b) >> shift);
+}
+
+#else
+
+#ifdef __i386__
+static inline u64 mul_u32_u32(u32 a, u32 b)
+{
+ u32 high, low;
+
+ asm ("mull %[b]" : "=a" (low), "=d" (high)
+ : [a] "a" (a), [b] "rm" (b) );
+
+ return low | ((u64)high) << 32;
+}
+#else
+static inline u64 mul_u32_u32(u32 a, u32 b)
+{
+ return (u64)a * b;
+}
+#endif
+
+static inline u64 mul_u64_u32_shr(u64 a, u32 b, unsigned int shift)
+{
+ u32 ah, al;
+ u64 ret;
+
+ al = a;
+ ah = a >> 32;
+
+ ret = mul_u32_u32(al, b) >> shift;
+ if (ah)
+ ret += mul_u32_u32(ah, b) << (32 - shift);
+
+ return ret;
+}
+
+#endif /* __SIZEOF_INT128__ */
+
+#ifndef mul_u64_u64_div64
+static inline u64 mul_u64_u64_div64(u64 a, u64 b, u64 c)
+{
+ u64 quot, rem;
+
+ quot = a / c;
+ rem = a % c;
+
+ return quot * b + (rem * b) / c;
+}
+#endif
+
+#endif /* _LINUX_MATH64_H */
diff --git a/tools/include/linux/static_call_types.h b/tools/include/linux/static_call_types.h
index ae5662d368b9..5a00b8b2cf9f 100644
--- a/tools/include/linux/static_call_types.h
+++ b/tools/include/linux/static_call_types.h
@@ -58,11 +58,25 @@ struct static_call_site {
__raw_static_call(name); \
})
+struct static_call_key {
+ void *func;
+ union {
+ /* bit 0: 0 = mods, 1 = sites */
+ unsigned long type;
+ struct static_call_mod *mods;
+ struct static_call_site *sites;
+ };
+};
+
#else /* !CONFIG_HAVE_STATIC_CALL_INLINE */
#define __STATIC_CALL_ADDRESSABLE(name)
#define __static_call(name) __raw_static_call(name)
+struct static_call_key {
+ void *func;
+};
+
#endif /* CONFIG_HAVE_STATIC_CALL_INLINE */
#ifdef MODULE
@@ -77,6 +91,10 @@ struct static_call_site {
#else
+struct static_call_key {
+ void *func;
+};
+
#define static_call(name) \
((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
diff --git a/tools/include/linux/types.h b/tools/include/linux/types.h
index e9c5a215837d..6e14a533ab4e 100644
--- a/tools/include/linux/types.h
+++ b/tools/include/linux/types.h
@@ -61,6 +61,9 @@ typedef __u32 __bitwise __be32;
typedef __u64 __bitwise __le64;
typedef __u64 __bitwise __be64;
+typedef __u16 __bitwise __sum16;
+typedef __u32 __bitwise __wsum;
+
typedef struct {
int counter;
} atomic_t;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 79c893310492..ec6d85a81744 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -93,7 +93,738 @@ union bpf_iter_link_info {
} map;
};
-/* BPF syscall commands, see bpf(2) man-page for details. */
+/* BPF syscall commands, see bpf(2) man-page for more details. */
+/**
+ * DOC: eBPF Syscall Preamble
+ *
+ * The operation to be performed by the **bpf**\ () system call is determined
+ * by the *cmd* argument. Each operation takes an accompanying argument,
+ * provided via *attr*, which is a pointer to a union of type *bpf_attr* (see
+ * below). The size argument is the size of the union pointed to by *attr*.
+ */
+/**
+ * DOC: eBPF Syscall Commands
+ *
+ * BPF_MAP_CREATE
+ * Description
+ * Create a map and return a file descriptor that refers to the
+ * map. The close-on-exec file descriptor flag (see **fcntl**\ (2))
+ * is automatically enabled for the new file descriptor.
+ *
+ * Applying **close**\ (2) to the file descriptor returned by
+ * **BPF_MAP_CREATE** will delete the map (but see NOTES).
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_MAP_LOOKUP_ELEM
+ * Description
+ * Look up an element with a given *key* in the map referred to
+ * by the file descriptor *map_fd*.
+ *
+ * The *flags* argument may be specified as one of the
+ * following:
+ *
+ * **BPF_F_LOCK**
+ * Look up the value of a spin-locked map without
+ * returning the lock. This must be specified if the
+ * elements contain a spinlock.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_UPDATE_ELEM
+ * Description
+ * Create or update an element (key/value pair) in a specified map.
+ *
+ * The *flags* argument should be specified as one of the
+ * following:
+ *
+ * **BPF_ANY**
+ * Create a new element or update an existing element.
+ * **BPF_NOEXIST**
+ * Create a new element only if it did not exist.
+ * **BPF_EXIST**
+ * Update an existing element.
+ * **BPF_F_LOCK**
+ * Update a spin_lock-ed map element.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**,
+ * **E2BIG**, **EEXIST**, or **ENOENT**.
+ *
+ * **E2BIG**
+ * The number of elements in the map reached the
+ * *max_entries* limit specified at map creation time.
+ * **EEXIST**
+ * If *flags* specifies **BPF_NOEXIST** and the element
+ * with *key* already exists in the map.
+ * **ENOENT**
+ * If *flags* specifies **BPF_EXIST** and the element with
+ * *key* does not exist in the map.
+ *
+ * BPF_MAP_DELETE_ELEM
+ * Description
+ * Look up and delete an element by key in a specified map.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_GET_NEXT_KEY
+ * Description
+ * Look up an element by key in a specified map and return the key
+ * of the next element. Can be used to iterate over all elements
+ * in the map.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * The following cases can be used to iterate over all elements of
+ * the map:
+ *
+ * * If *key* is not found, the operation returns zero and sets
+ * the *next_key* pointer to the key of the first element.
+ * * If *key* is found, the operation returns zero and sets the
+ * *next_key* pointer to the key of the next element.
+ * * If *key* is the last element, returns -1 and *errno* is set
+ * to **ENOENT**.
+ *
+ * May set *errno* to **ENOMEM**, **EFAULT**, **EPERM**, or
+ * **EINVAL** on error.
+ *
+ * BPF_PROG_LOAD
+ * Description
+ * Verify and load an eBPF program, returning a new file
+ * descriptor associated with the program.
+ *
+ * Applying **close**\ (2) to the file descriptor returned by
+ * **BPF_PROG_LOAD** will unload the eBPF program (but see NOTES).
+ *
+ * The close-on-exec file descriptor flag (see **fcntl**\ (2)) is
+ * automatically enabled for the new file descriptor.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_OBJ_PIN
+ * Description
+ * Pin an eBPF program or map referred by the specified *bpf_fd*
+ * to the provided *pathname* on the filesystem.
+ *
+ * The *pathname* argument must not contain a dot (".").
+ *
+ * On success, *pathname* retains a reference to the eBPF object,
+ * preventing deallocation of the object when the original
+ * *bpf_fd* is closed. This allow the eBPF object to live beyond
+ * **close**\ (\ *bpf_fd*\ ), and hence the lifetime of the parent
+ * process.
+ *
+ * Applying **unlink**\ (2) or similar calls to the *pathname*
+ * unpins the object from the filesystem, removing the reference.
+ * If no other file descriptors or filesystem nodes refer to the
+ * same object, it will be deallocated (see NOTES).
+ *
+ * The filesystem type for the parent directory of *pathname* must
+ * be **BPF_FS_MAGIC**.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_OBJ_GET
+ * Description
+ * Open a file descriptor for the eBPF object pinned to the
+ * specified *pathname*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_PROG_ATTACH
+ * Description
+ * Attach an eBPF program to a *target_fd* at the specified
+ * *attach_type* hook.
+ *
+ * The *attach_type* specifies the eBPF attachment point to
+ * attach the program to, and must be one of *bpf_attach_type*
+ * (see below).
+ *
+ * The *attach_bpf_fd* must be a valid file descriptor for a
+ * loaded eBPF program of a cgroup, flow dissector, LIRC, sockmap
+ * or sock_ops type corresponding to the specified *attach_type*.
+ *
+ * The *target_fd* must be a valid file descriptor for a kernel
+ * object which depends on the attach type of *attach_bpf_fd*:
+ *
+ * **BPF_PROG_TYPE_CGROUP_DEVICE**,
+ * **BPF_PROG_TYPE_CGROUP_SKB**,
+ * **BPF_PROG_TYPE_CGROUP_SOCK**,
+ * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
+ * **BPF_PROG_TYPE_CGROUP_SOCKOPT**,
+ * **BPF_PROG_TYPE_CGROUP_SYSCTL**,
+ * **BPF_PROG_TYPE_SOCK_OPS**
+ *
+ * Control Group v2 hierarchy with the eBPF controller
+ * enabled. Requires the kernel to be compiled with
+ * **CONFIG_CGROUP_BPF**.
+ *
+ * **BPF_PROG_TYPE_FLOW_DISSECTOR**
+ *
+ * Network namespace (eg /proc/self/ns/net).
+ *
+ * **BPF_PROG_TYPE_LIRC_MODE2**
+ *
+ * LIRC device path (eg /dev/lircN). Requires the kernel
+ * to be compiled with **CONFIG_BPF_LIRC_MODE2**.
+ *
+ * **BPF_PROG_TYPE_SK_SKB**,
+ * **BPF_PROG_TYPE_SK_MSG**
+ *
+ * eBPF map of socket type (eg **BPF_MAP_TYPE_SOCKHASH**).
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_PROG_DETACH
+ * Description
+ * Detach the eBPF program associated with the *target_fd* at the
+ * hook specified by *attach_type*. The program must have been
+ * previously attached using **BPF_PROG_ATTACH**.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_PROG_TEST_RUN
+ * Description
+ * Run the eBPF program associated with the *prog_fd* a *repeat*
+ * number of times against a provided program context *ctx_in* and
+ * data *data_in*, and return the modified program context
+ * *ctx_out*, *data_out* (for example, packet data), result of the
+ * execution *retval*, and *duration* of the test run.
+ *
+ * The sizes of the buffers provided as input and output
+ * parameters *ctx_in*, *ctx_out*, *data_in*, and *data_out* must
+ * be provided in the corresponding variables *ctx_size_in*,
+ * *ctx_size_out*, *data_size_in*, and/or *data_size_out*. If any
+ * of these parameters are not provided (ie set to NULL), the
+ * corresponding size field must be zero.
+ *
+ * Some program types have particular requirements:
+ *
+ * **BPF_PROG_TYPE_SK_LOOKUP**
+ * *data_in* and *data_out* must be NULL.
+ *
+ * **BPF_PROG_TYPE_XDP**
+ * *ctx_in* and *ctx_out* must be NULL.
+ *
+ * **BPF_PROG_TYPE_RAW_TRACEPOINT**,
+ * **BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE**
+ *
+ * *ctx_out*, *data_in* and *data_out* must be NULL.
+ * *repeat* must be zero.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * **ENOSPC**
+ * Either *data_size_out* or *ctx_size_out* is too small.
+ * **ENOTSUPP**
+ * This command is not supported by the program type of
+ * the program referred to by *prog_fd*.
+ *
+ * BPF_PROG_GET_NEXT_ID
+ * Description
+ * Fetch the next eBPF program currently loaded into the kernel.
+ *
+ * Looks for the eBPF program with an id greater than *start_id*
+ * and updates *next_id* on success. If no other eBPF programs
+ * remain with ids higher than *start_id*, returns -1 and sets
+ * *errno* to **ENOENT**.
+ *
+ * Return
+ * Returns zero on success. On error, or when no id remains, -1
+ * is returned and *errno* is set appropriately.
+ *
+ * BPF_MAP_GET_NEXT_ID
+ * Description
+ * Fetch the next eBPF map currently loaded into the kernel.
+ *
+ * Looks for the eBPF map with an id greater than *start_id*
+ * and updates *next_id* on success. If no other eBPF maps
+ * remain with ids higher than *start_id*, returns -1 and sets
+ * *errno* to **ENOENT**.
+ *
+ * Return
+ * Returns zero on success. On error, or when no id remains, -1
+ * is returned and *errno* is set appropriately.
+ *
+ * BPF_PROG_GET_FD_BY_ID
+ * Description
+ * Open a file descriptor for the eBPF program corresponding to
+ * *prog_id*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_MAP_GET_FD_BY_ID
+ * Description
+ * Open a file descriptor for the eBPF map corresponding to
+ * *map_id*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_OBJ_GET_INFO_BY_FD
+ * Description
+ * Obtain information about the eBPF object corresponding to
+ * *bpf_fd*.
+ *
+ * Populates up to *info_len* bytes of *info*, which will be in
+ * one of the following formats depending on the eBPF object type
+ * of *bpf_fd*:
+ *
+ * * **struct bpf_prog_info**
+ * * **struct bpf_map_info**
+ * * **struct bpf_btf_info**
+ * * **struct bpf_link_info**
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_PROG_QUERY
+ * Description
+ * Obtain information about eBPF programs associated with the
+ * specified *attach_type* hook.
+ *
+ * The *target_fd* must be a valid file descriptor for a kernel
+ * object which depends on the attach type of *attach_bpf_fd*:
+ *
+ * **BPF_PROG_TYPE_CGROUP_DEVICE**,
+ * **BPF_PROG_TYPE_CGROUP_SKB**,
+ * **BPF_PROG_TYPE_CGROUP_SOCK**,
+ * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
+ * **BPF_PROG_TYPE_CGROUP_SOCKOPT**,
+ * **BPF_PROG_TYPE_CGROUP_SYSCTL**,
+ * **BPF_PROG_TYPE_SOCK_OPS**
+ *
+ * Control Group v2 hierarchy with the eBPF controller
+ * enabled. Requires the kernel to be compiled with
+ * **CONFIG_CGROUP_BPF**.
+ *
+ * **BPF_PROG_TYPE_FLOW_DISSECTOR**
+ *
+ * Network namespace (eg /proc/self/ns/net).
+ *
+ * **BPF_PROG_TYPE_LIRC_MODE2**
+ *
+ * LIRC device path (eg /dev/lircN). Requires the kernel
+ * to be compiled with **CONFIG_BPF_LIRC_MODE2**.
+ *
+ * **BPF_PROG_QUERY** always fetches the number of programs
+ * attached and the *attach_flags* which were used to attach those
+ * programs. Additionally, if *prog_ids* is nonzero and the number
+ * of attached programs is less than *prog_cnt*, populates
+ * *prog_ids* with the eBPF program ids of the programs attached
+ * at *target_fd*.
+ *
+ * The following flags may alter the result:
+ *
+ * **BPF_F_QUERY_EFFECTIVE**
+ * Only return information regarding programs which are
+ * currently effective at the specified *target_fd*.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_RAW_TRACEPOINT_OPEN
+ * Description
+ * Attach an eBPF program to a tracepoint *name* to access kernel
+ * internal arguments of the tracepoint in their raw form.
+ *
+ * The *prog_fd* must be a valid file descriptor associated with
+ * a loaded eBPF program of type **BPF_PROG_TYPE_RAW_TRACEPOINT**.
+ *
+ * No ABI guarantees are made about the content of tracepoint
+ * arguments exposed to the corresponding eBPF program.
+ *
+ * Applying **close**\ (2) to the file descriptor returned by
+ * **BPF_RAW_TRACEPOINT_OPEN** will delete the map (but see NOTES).
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_BTF_LOAD
+ * Description
+ * Verify and load BPF Type Format (BTF) metadata into the kernel,
+ * returning a new file descriptor associated with the metadata.
+ * BTF is described in more detail at
+ * https://www.kernel.org/doc/html/latest/bpf/btf.html.
+ *
+ * The *btf* parameter must point to valid memory providing
+ * *btf_size* bytes of BTF binary metadata.
+ *
+ * The returned file descriptor can be passed to other **bpf**\ ()
+ * subcommands such as **BPF_PROG_LOAD** or **BPF_MAP_CREATE** to
+ * associate the BTF with those objects.
+ *
+ * Similar to **BPF_PROG_LOAD**, **BPF_BTF_LOAD** has optional
+ * parameters to specify a *btf_log_buf*, *btf_log_size* and
+ * *btf_log_level* which allow the kernel to return freeform log
+ * output regarding the BTF verification process.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_BTF_GET_FD_BY_ID
+ * Description
+ * Open a file descriptor for the BPF Type Format (BTF)
+ * corresponding to *btf_id*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_TASK_FD_QUERY
+ * Description
+ * Obtain information about eBPF programs associated with the
+ * target process identified by *pid* and *fd*.
+ *
+ * If the *pid* and *fd* are associated with a tracepoint, kprobe
+ * or uprobe perf event, then the *prog_id* and *fd_type* will
+ * be populated with the eBPF program id and file descriptor type
+ * of type **bpf_task_fd_type**. If associated with a kprobe or
+ * uprobe, the *probe_offset* and *probe_addr* will also be
+ * populated. Optionally, if *buf* is provided, then up to
+ * *buf_len* bytes of *buf* will be populated with the name of
+ * the tracepoint, kprobe or uprobe.
+ *
+ * The resulting *prog_id* may be introspected in deeper detail
+ * using **BPF_PROG_GET_FD_BY_ID** and **BPF_OBJ_GET_INFO_BY_FD**.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_LOOKUP_AND_DELETE_ELEM
+ * Description
+ * Look up an element with the given *key* in the map referred to
+ * by the file descriptor *fd*, and if found, delete the element.
+ *
+ * The **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map types
+ * implement this command as a "pop" operation, deleting the top
+ * element rather than one corresponding to *key*.
+ * The *key* and *key_len* parameters should be zeroed when
+ * issuing this operation for these map types.
+ *
+ * This command is only valid for the following map types:
+ * * **BPF_MAP_TYPE_QUEUE**
+ * * **BPF_MAP_TYPE_STACK**
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_FREEZE
+ * Description
+ * Freeze the permissions of the specified map.
+ *
+ * Write permissions may be frozen by passing zero *flags*.
+ * Upon success, no future syscall invocations may alter the
+ * map state of *map_fd*. Write operations from eBPF programs
+ * are still possible for a frozen map.
+ *
+ * Not supported for maps of type **BPF_MAP_TYPE_STRUCT_OPS**.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_BTF_GET_NEXT_ID
+ * Description
+ * Fetch the next BPF Type Format (BTF) object currently loaded
+ * into the kernel.
+ *
+ * Looks for the BTF object with an id greater than *start_id*
+ * and updates *next_id* on success. If no other BTF objects
+ * remain with ids higher than *start_id*, returns -1 and sets
+ * *errno* to **ENOENT**.
+ *
+ * Return
+ * Returns zero on success. On error, or when no id remains, -1
+ * is returned and *errno* is set appropriately.
+ *
+ * BPF_MAP_LOOKUP_BATCH
+ * Description
+ * Iterate and fetch multiple elements in a map.
+ *
+ * Two opaque values are used to manage batch operations,
+ * *in_batch* and *out_batch*. Initially, *in_batch* must be set
+ * to NULL to begin the batched operation. After each subsequent
+ * **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant
+ * *out_batch* as the *in_batch* for the next operation to
+ * continue iteration from the current point.
+ *
+ * The *keys* and *values* are output parameters which must point
+ * to memory large enough to hold *count* items based on the key
+ * and value size of the map *map_fd*. The *keys* buffer must be
+ * of *key_size* * *count*. The *values* buffer must be of
+ * *value_size* * *count*.
+ *
+ * The *elem_flags* argument may be specified as one of the
+ * following:
+ *
+ * **BPF_F_LOCK**
+ * Look up the value of a spin-locked map without
+ * returning the lock. This must be specified if the
+ * elements contain a spinlock.
+ *
+ * On success, *count* elements from the map are copied into the
+ * user buffer, with the keys copied into *keys* and the values
+ * copied into the corresponding indices in *values*.
+ *
+ * If an error is returned and *errno* is not **EFAULT**, *count*
+ * is set to the number of successfully processed elements.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * May set *errno* to **ENOSPC** to indicate that *keys* or
+ * *values* is too small to dump an entire bucket during
+ * iteration of a hash-based map type.
+ *
+ * BPF_MAP_LOOKUP_AND_DELETE_BATCH
+ * Description
+ * Iterate and delete all elements in a map.
+ *
+ * This operation has the same behavior as
+ * **BPF_MAP_LOOKUP_BATCH** with two exceptions:
+ *
+ * * Every element that is successfully returned is also deleted
+ * from the map. This is at least *count* elements. Note that
+ * *count* is both an input and an output parameter.
+ * * Upon returning with *errno* set to **EFAULT**, up to
+ * *count* elements may be deleted without returning the keys
+ * and values of the deleted elements.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_UPDATE_BATCH
+ * Description
+ * Update multiple elements in a map by *key*.
+ *
+ * The *keys* and *values* are input parameters which must point
+ * to memory large enough to hold *count* items based on the key
+ * and value size of the map *map_fd*. The *keys* buffer must be
+ * of *key_size* * *count*. The *values* buffer must be of
+ * *value_size* * *count*.
+ *
+ * Each element specified in *keys* is sequentially updated to the
+ * value in the corresponding index in *values*. The *in_batch*
+ * and *out_batch* parameters are ignored and should be zeroed.
+ *
+ * The *elem_flags* argument should be specified as one of the
+ * following:
+ *
+ * **BPF_ANY**
+ * Create new elements or update a existing elements.
+ * **BPF_NOEXIST**
+ * Create new elements only if they do not exist.
+ * **BPF_EXIST**
+ * Update existing elements.
+ * **BPF_F_LOCK**
+ * Update spin_lock-ed map elements. This must be
+ * specified if the map value contains a spinlock.
+ *
+ * On success, *count* elements from the map are updated.
+ *
+ * If an error is returned and *errno* is not **EFAULT**, *count*
+ * is set to the number of successfully processed elements.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, or
+ * **E2BIG**. **E2BIG** indicates that the number of elements in
+ * the map reached the *max_entries* limit specified at map
+ * creation time.
+ *
+ * May set *errno* to one of the following error codes under
+ * specific circumstances:
+ *
+ * **EEXIST**
+ * If *flags* specifies **BPF_NOEXIST** and the element
+ * with *key* already exists in the map.
+ * **ENOENT**
+ * If *flags* specifies **BPF_EXIST** and the element with
+ * *key* does not exist in the map.
+ *
+ * BPF_MAP_DELETE_BATCH
+ * Description
+ * Delete multiple elements in a map by *key*.
+ *
+ * The *keys* parameter is an input parameter which must point
+ * to memory large enough to hold *count* items based on the key
+ * size of the map *map_fd*, that is, *key_size* * *count*.
+ *
+ * Each element specified in *keys* is sequentially deleted. The
+ * *in_batch*, *out_batch*, and *values* parameters are ignored
+ * and should be zeroed.
+ *
+ * The *elem_flags* argument may be specified as one of the
+ * following:
+ *
+ * **BPF_F_LOCK**
+ * Look up the value of a spin-locked map without
+ * returning the lock. This must be specified if the
+ * elements contain a spinlock.
+ *
+ * On success, *count* elements from the map are updated.
+ *
+ * If an error is returned and *errno* is not **EFAULT**, *count*
+ * is set to the number of successfully processed elements. If
+ * *errno* is **EFAULT**, up to *count* elements may be been
+ * deleted.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_LINK_CREATE
+ * Description
+ * Attach an eBPF program to a *target_fd* at the specified
+ * *attach_type* hook and return a file descriptor handle for
+ * managing the link.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_LINK_UPDATE
+ * Description
+ * Update the eBPF program in the specified *link_fd* to
+ * *new_prog_fd*.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_LINK_GET_FD_BY_ID
+ * Description
+ * Open a file descriptor for the eBPF Link corresponding to
+ * *link_id*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_LINK_GET_NEXT_ID
+ * Description
+ * Fetch the next eBPF link currently loaded into the kernel.
+ *
+ * Looks for the eBPF link with an id greater than *start_id*
+ * and updates *next_id* on success. If no other eBPF links
+ * remain with ids higher than *start_id*, returns -1 and sets
+ * *errno* to **ENOENT**.
+ *
+ * Return
+ * Returns zero on success. On error, or when no id remains, -1
+ * is returned and *errno* is set appropriately.
+ *
+ * BPF_ENABLE_STATS
+ * Description
+ * Enable eBPF runtime statistics gathering.
+ *
+ * Runtime statistics gathering for the eBPF runtime is disabled
+ * by default to minimize the corresponding performance overhead.
+ * This command enables statistics globally.
+ *
+ * Multiple programs may independently enable statistics.
+ * After gathering the desired statistics, eBPF runtime statistics
+ * may be disabled again by calling **close**\ (2) for the file
+ * descriptor returned by this function. Statistics will only be
+ * disabled system-wide when all outstanding file descriptors
+ * returned by prior calls for this subcommand are closed.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_ITER_CREATE
+ * Description
+ * Create an iterator on top of the specified *link_fd* (as
+ * previously created using **BPF_LINK_CREATE**) and return a
+ * file descriptor that can be used to trigger the iteration.
+ *
+ * If the resulting file descriptor is pinned to the filesystem
+ * using **BPF_OBJ_PIN**, then subsequent **read**\ (2) syscalls
+ * for that path will trigger the iterator to read kernel state
+ * using the eBPF program attached to *link_fd*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_LINK_DETACH
+ * Description
+ * Forcefully detach the specified *link_fd* from its
+ * corresponding attachment point.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_PROG_BIND_MAP
+ * Description
+ * Bind a map to the lifetime of an eBPF program.
+ *
+ * The map identified by *map_fd* is bound to the program
+ * identified by *prog_fd* and only released when *prog_fd* is
+ * released. This may be used in cases where metadata should be
+ * associated with a program which otherwise does not contain any
+ * references to the map (for example, embedded in the eBPF
+ * program instructions).
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * NOTES
+ * eBPF objects (maps and programs) can be shared between processes.
+ *
+ * * After **fork**\ (2), the child inherits file descriptors
+ * referring to the same eBPF objects.
+ * * File descriptors referring to eBPF objects can be transferred over
+ * **unix**\ (7) domain sockets.
+ * * File descriptors referring to eBPF objects can be duplicated in the
+ * usual way, using **dup**\ (2) and similar calls.
+ * * File descriptors referring to eBPF objects can be pinned to the
+ * filesystem using the **BPF_OBJ_PIN** command of **bpf**\ (2).
+ *
+ * An eBPF object is deallocated only after all file descriptors referring
+ * to the object have been closed and no references remain pinned to the
+ * filesystem or attached (for example, bound to a program or device).
+ */
enum bpf_cmd {
BPF_MAP_CREATE,
BPF_MAP_LOOKUP_ELEM,
@@ -247,6 +978,7 @@ enum bpf_attach_type {
BPF_XDP_CPUMAP,
BPF_SK_LOOKUP,
BPF_XDP,
+ BPF_SK_SKB_VERDICT,
__MAX_BPF_ATTACH_TYPE
};
@@ -393,11 +1125,24 @@ enum bpf_link_type {
* is struct/union.
*/
#define BPF_PSEUDO_BTF_ID 3
+/* insn[0].src_reg: BPF_PSEUDO_FUNC
+ * insn[0].imm: insn offset to the func
+ * insn[1].imm: 0
+ * insn[0].off: 0
+ * insn[1].off: 0
+ * ldimm64 rewrite: address of the function
+ * verifier type: PTR_TO_FUNC.
+ */
+#define BPF_PSEUDO_FUNC 4
/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
* offset to another bpf function
*/
#define BPF_PSEUDO_CALL 1
+/* when bpf_call->src_reg == BPF_PSEUDO_KFUNC_CALL,
+ * bpf_call->imm == btf_id of a BTF_KIND_FUNC in the running kernel
+ */
+#define BPF_PSEUDO_KFUNC_CALL 2
/* flags for BPF_MAP_UPDATE_ELEM command */
enum {
@@ -720,7 +1465,7 @@ union bpf_attr {
* parsed and used to produce a manual page. The workflow is the following,
* and requires the rst2man utility:
*
- * $ ./scripts/bpf_helpers_doc.py \
+ * $ ./scripts/bpf_doc.py \
* --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst
* $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7
* $ man /tmp/bpf-helpers.7
@@ -1765,6 +2510,10 @@ union bpf_attr {
* Use with ENCAP_L3/L4 flags to further specify the tunnel
* type; *len* is the length of the inner MAC header.
*
+ * * **BPF_F_ADJ_ROOM_ENCAP_L2_ETH**:
+ * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the
+ * L2 type as Ethernet.
+ *
* A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
@@ -3333,12 +4082,20 @@ union bpf_attr {
* of new data availability is sent.
* If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
* of new data availability is sent unconditionally.
+ * If **0** is specified in *flags*, an adaptive notification
+ * of new data availability is sent.
+ *
+ * An adaptive notification is a notification sent whenever the user-space
+ * process has caught up and consumed all available payloads. In case the user-space
+ * process is still processing a previous payload, then no notification is needed
+ * as it will process the newly added payload automatically.
* Return
* 0 on success, or a negative error in case of failure.
*
* void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
* Description
* Reserve *size* bytes of payload in a ring buffer *ringbuf*.
+ * *flags* must be 0.
* Return
* Valid pointer with *size* bytes of memory available; NULL,
* otherwise.
@@ -3350,6 +4107,10 @@ union bpf_attr {
* of new data availability is sent.
* If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
* of new data availability is sent unconditionally.
+ * If **0** is specified in *flags*, an adaptive notification
+ * of new data availability is sent.
+ *
+ * See 'bpf_ringbuf_output()' for the definition of adaptive notification.
* Return
* Nothing. Always succeeds.
*
@@ -3360,6 +4121,10 @@ union bpf_attr {
* of new data availability is sent.
* If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
* of new data availability is sent unconditionally.
+ * If **0** is specified in *flags*, an adaptive notification
+ * of new data availability is sent.
+ *
+ * See 'bpf_ringbuf_output()' for the definition of adaptive notification.
* Return
* Nothing. Always succeeds.
*
@@ -3850,7 +4615,7 @@ union bpf_attr {
*
* long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags)
* Description
- * Check ctx packet size against exceeding MTU of net device (based
+ * Check packet size against exceeding MTU of net device (based
* on *ifindex*). This helper will likely be used in combination
* with helpers that adjust/change the packet size.
*
@@ -3867,6 +4632,14 @@ union bpf_attr {
* against the current net device. This is practical if this isn't
* used prior to redirect.
*
+ * On input *mtu_len* must be a valid pointer, else verifier will
+ * reject BPF program. If the value *mtu_len* is initialized to
+ * zero then the ctx packet size is use. When value *mtu_len* is
+ * provided as input this specify the L3 length that the MTU check
+ * is done against. Remember XDP and TC length operate at L2, but
+ * this value is L3 as this correlate to MTU and IP-header tot_len
+ * values which are L3 (similar behavior as bpf_fib_lookup).
+ *
* The Linux kernel route table can configure MTUs on a more
* specific per route level, which is not provided by this helper.
* For route level MTU checks use the **bpf_fib_lookup**\ ()
@@ -3891,11 +4664,9 @@ union bpf_attr {
*
* On return *mtu_len* pointer contains the MTU value of the net
* device. Remember the net device configured MTU is the L3 size,
- * which is returned here and XDP and TX length operate at L2.
+ * which is returned here and XDP and TC length operate at L2.
* Helper take this into account for you, but remember when using
- * MTU value in your BPF-code. On input *mtu_len* must be a valid
- * pointer and be initialized (to zero), else verifier will reject
- * BPF program.
+ * MTU value in your BPF-code.
*
* Return
* * 0 on success, and populate MTU value in *mtu_len* pointer.
@@ -3909,6 +4680,61 @@ union bpf_attr {
* * **BPF_MTU_CHK_RET_FRAG_NEEDED**
* * **BPF_MTU_CHK_RET_SEGS_TOOBIG**
*
+ * long bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, void *callback_ctx, u64 flags)
+ * Description
+ * For each element in **map**, call **callback_fn** function with
+ * **map**, **callback_ctx** and other map-specific parameters.
+ * The **callback_fn** should be a static function and
+ * the **callback_ctx** should be a pointer to the stack.
+ * The **flags** is used to control certain aspects of the helper.
+ * Currently, the **flags** must be 0.
+ *
+ * The following are a list of supported map types and their
+ * respective expected callback signatures:
+ *
+ * BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH,
+ * BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH,
+ * BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY
+ *
+ * long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx);
+ *
+ * For per_cpu maps, the map_value is the value on the cpu where the
+ * bpf_prog is running.
+ *
+ * If **callback_fn** return 0, the helper will continue to the next
+ * element. If return value is 1, the helper will skip the rest of
+ * elements and return. Other return values are not used now.
+ *
+ * Return
+ * The number of traversed map elements for success, **-EINVAL** for
+ * invalid **flags**.
+ *
+ * long bpf_snprintf(char *str, u32 str_size, const char *fmt, u64 *data, u32 data_len)
+ * Description
+ * Outputs a string into the **str** buffer of size **str_size**
+ * based on a format string stored in a read-only map pointed by
+ * **fmt**.
+ *
+ * Each format specifier in **fmt** corresponds to one u64 element
+ * in the **data** array. For strings and pointers where pointees
+ * are accessed, only the pointer values are stored in the *data*
+ * array. The *data_len* is the size of *data* in bytes.
+ *
+ * Formats **%s** and **%p{i,I}{4,6}** require to read kernel
+ * memory. Reading kernel memory may fail due to either invalid
+ * address or valid address but requiring a major memory fault. If
+ * reading kernel memory fails, the string for **%s** will be an
+ * empty string, and the ip address for **%p{i,I}{4,6}** will be 0.
+ * Not returning error to bpf program is consistent with what
+ * **bpf_trace_printk**\ () does for now.
+ *
+ * Return
+ * The strictly positive length of the formatted string, including
+ * the trailing zero character. If the return value is greater than
+ * **str_size**, **str** contains a truncated string, guaranteed to
+ * be zero-terminated except when **str_size** is 0.
+ *
+ * Or **-EBUSY** if the per-CPU memory copy buffer is busy.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -4075,6 +4901,8 @@ union bpf_attr {
FN(ima_inode_hash), \
FN(sock_from_file), \
FN(check_mtu), \
+ FN(for_each_map_elem), \
+ FN(snprintf), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
@@ -4168,6 +4996,7 @@ enum {
BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3),
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5),
+ BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6),
};
enum {
@@ -4615,6 +5444,8 @@ struct bpf_link_info {
} raw_tracepoint;
struct {
__u32 attach_type;
+ __u32 target_obj_id; /* prog_id for PROG_EXT, otherwise btf object id */
+ __u32 target_btf_id; /* BTF type id inside the object */
} tracing;
struct {
__u64 cgroup_id;
@@ -5205,7 +6036,10 @@ struct bpf_pidns_info {
/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
struct bpf_sk_lookup {
- __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
+ union {
+ __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
+ __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */
+ };
__u32 family; /* Protocol family (AF_INET, AF_INET6) */
__u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
diff --git a/tools/include/uapi/linux/btf.h b/tools/include/uapi/linux/btf.h
index 5a667107ad2c..d27b1708efe9 100644
--- a/tools/include/uapi/linux/btf.h
+++ b/tools/include/uapi/linux/btf.h
@@ -52,7 +52,7 @@ struct btf_type {
};
};
-#define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f)
+#define BTF_INFO_KIND(info) (((info) >> 24) & 0x1f)
#define BTF_INFO_VLEN(info) ((info) & 0xffff)
#define BTF_INFO_KFLAG(info) ((info) >> 31)
@@ -72,7 +72,8 @@ struct btf_type {
#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
#define BTF_KIND_VAR 14 /* Variable */
#define BTF_KIND_DATASEC 15 /* Section */
-#define BTF_KIND_MAX BTF_KIND_DATASEC
+#define BTF_KIND_FLOAT 16 /* Floating point */
+#define BTF_KIND_MAX BTF_KIND_FLOAT
#define NR_BTF_KINDS (BTF_KIND_MAX + 1)
/* For some specific BTF_KIND, "struct btf_type" is immediately
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index ad15e40d7f5d..14332f4cf816 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -38,6 +38,21 @@ enum perf_type_id {
};
/*
+ * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE
+ * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA
+ * AA: hardware event ID
+ * EEEEEEEE: PMU type ID
+ * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB
+ * BB: hardware cache ID
+ * CC: hardware cache op ID
+ * DD: hardware cache op result ID
+ * EEEEEEEE: PMU type ID
+ * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
+ */
+#define PERF_PMU_TYPE_SHIFT 32
+#define PERF_HW_EVENT_MASK 0xffffffff
+
+/*
* Generalized performance event event_id types, used by the
* attr.event_id parameter of the sys_perf_event_open()
* syscall:
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 190366d05588..9b057cc7650a 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1,3 +1,3 @@
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o hashmap.o \
- btf_dump.o ringbuf.o
+ btf_dump.o ringbuf.o strset.o linker.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index e9eb6a6e80d2..e43e1896cb4b 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -158,7 +158,7 @@ $(BPF_IN_STATIC): force $(BPF_HELPER_DEFS)
$(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR)
$(BPF_HELPER_DEFS): $(srctree)/tools/include/uapi/linux/bpf.h
- $(QUIET_GEN)$(srctree)/scripts/bpf_helpers_doc.py --header \
+ $(QUIET_GEN)$(srctree)/scripts/bpf_doc.py --header \
--file $(srctree)/tools/include/uapi/linux/bpf.h > $(BPF_HELPER_DEFS)
$(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
@@ -228,7 +228,6 @@ install_headers: $(BPF_HELPER_DEFS)
$(call do_install,bpf.h,$(prefix)/include/bpf,644); \
$(call do_install,libbpf.h,$(prefix)/include/bpf,644); \
$(call do_install,btf.h,$(prefix)/include/bpf,644); \
- $(call do_install,libbpf_util.h,$(prefix)/include/bpf,644); \
$(call do_install,libbpf_common.h,$(prefix)/include/bpf,644); \
$(call do_install,xsk.h,$(prefix)/include/bpf,644); \
$(call do_install,bpf_helpers.h,$(prefix)/include/bpf,644); \
diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
index 53b3e199fb25..09ebe3db5f2f 100644
--- a/tools/lib/bpf/bpf_core_read.h
+++ b/tools/lib/bpf/bpf_core_read.h
@@ -88,11 +88,19 @@ enum bpf_enum_value_kind {
const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
unsigned long long val; \
\
+ /* This is a so-called barrier_var() operation that makes specified \
+ * variable "a black box" for optimizing compiler. \
+ * It forces compiler to perform BYTE_OFFSET relocation on p and use \
+ * its calculated value in the switch below, instead of applying \
+ * the same relocation 4 times for each individual memory load. \
+ */ \
+ asm volatile("" : "=r"(p) : "0"(p)); \
+ \
switch (__CORE_RELO(s, field, BYTE_SIZE)) { \
- case 1: val = *(const unsigned char *)p; \
- case 2: val = *(const unsigned short *)p; \
- case 4: val = *(const unsigned int *)p; \
- case 8: val = *(const unsigned long long *)p; \
+ case 1: val = *(const unsigned char *)p; break; \
+ case 2: val = *(const unsigned short *)p; break; \
+ case 4: val = *(const unsigned int *)p; break; \
+ case 8: val = *(const unsigned long long *)p; break; \
} \
val <<= __CORE_RELO(s, field, LSHIFT_U64); \
if (__CORE_RELO(s, field, SIGNED)) \
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index ae6c975e0b87..9720dc0b4605 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -25,13 +25,21 @@
/*
* Helper macro to place programs, maps, license in
* different sections in elf_bpf file. Section names
- * are interpreted by elf_bpf loader
+ * are interpreted by libbpf depending on the context (BPF programs, BPF maps,
+ * extern variables, etc).
+ * To allow use of SEC() with externs (e.g., for extern .maps declarations),
+ * make sure __attribute__((unused)) doesn't trigger compilation warning.
*/
-#define SEC(NAME) __attribute__((section(NAME), used))
+#define SEC(name) \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \
+ __attribute__((section(name), used)) \
+ _Pragma("GCC diagnostic pop") \
-#ifndef __always_inline
+/* Avoid 'linux/stddef.h' definition of '__always_inline'. */
+#undef __always_inline
#define __always_inline inline __attribute__((always_inline))
-#endif
+
#ifndef __noinline
#define __noinline __attribute__((noinline))
#endif
@@ -40,7 +48,29 @@
#endif
/*
- * Helper macro to manipulate data structures
+ * Use __hidden attribute to mark a non-static BPF subprogram effectively
+ * static for BPF verifier's verification algorithm purposes, allowing more
+ * extensive and permissive BPF verification process, taking into account
+ * subprogram's caller context.
+ */
+#define __hidden __attribute__((visibility("hidden")))
+
+/* When utilizing vmlinux.h with BPF CO-RE, user BPF programs can't include
+ * any system-level headers (such as stddef.h, linux/version.h, etc), and
+ * commonly-used macros like NULL and KERNEL_VERSION aren't available through
+ * vmlinux.h. This just adds unnecessary hurdles and forces users to re-define
+ * them on their own. So as a convenience, provide such definitions here.
+ */
+#ifndef NULL
+#define NULL ((void *)0)
+#endif
+
+#ifndef KERNEL_VERSION
+#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c)))
+#endif
+
+/*
+ * Helper macros to manipulate data structures
*/
#ifndef offsetof
#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER)
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
index f9ef37707888..8c954ebc0c7c 100644
--- a/tools/lib/bpf/bpf_tracing.h
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -413,20 +413,56 @@ typeof(name(0)) name(struct pt_regs *ctx) \
} \
static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
+#define ___bpf_fill0(arr, p, x) do {} while (0)
+#define ___bpf_fill1(arr, p, x) arr[p] = x
+#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args)
+#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args)
+#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args)
+#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args)
+#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args)
+#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args)
+#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args)
+#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args)
+#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args)
+#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args)
+#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args)
+#define ___bpf_fill(arr, args...) \
+ ___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args)
+
/*
* BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values
* in a structure.
*/
-#define BPF_SEQ_PRINTF(seq, fmt, args...) \
- ({ \
- _Pragma("GCC diagnostic push") \
- _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
- static const char ___fmt[] = fmt; \
- unsigned long long ___param[] = { args }; \
- _Pragma("GCC diagnostic pop") \
- int ___ret = bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \
- ___param, sizeof(___param)); \
- ___ret; \
- })
+#define BPF_SEQ_PRINTF(seq, fmt, args...) \
+({ \
+ static const char ___fmt[] = fmt; \
+ unsigned long long ___param[___bpf_narg(args)]; \
+ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ ___bpf_fill(___param, args); \
+ _Pragma("GCC diagnostic pop") \
+ \
+ bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \
+ ___param, sizeof(___param)); \
+})
+
+/*
+ * BPF_SNPRINTF wraps the bpf_snprintf helper with variadic arguments instead of
+ * an array of u64.
+ */
+#define BPF_SNPRINTF(out, out_size, fmt, args...) \
+({ \
+ static const char ___fmt[] = fmt; \
+ unsigned long long ___param[___bpf_narg(args)]; \
+ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ ___bpf_fill(___param, args); \
+ _Pragma("GCC diagnostic pop") \
+ \
+ bpf_snprintf(out, out_size, ___fmt, \
+ ___param, sizeof(___param)); \
+})
#endif
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index d9c10830d749..d57e13a13798 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -21,6 +21,7 @@
#include "libbpf.h"
#include "libbpf_internal.h"
#include "hashmap.h"
+#include "strset.h"
#define BTF_MAX_NR_TYPES 0x7fffffffU
#define BTF_MAX_STR_OFFSET 0x7fffffffU
@@ -67,7 +68,7 @@ struct btf {
* | | |
* hdr | |
* types_data----+ |
- * strs_data------------------+
+ * strset__data(strs_set)-----+
*
* +----------+---------+-----------+
* | Header | Types | Strings |
@@ -105,20 +106,15 @@ struct btf {
*/
int start_str_off;
+ /* only one of strs_data or strs_set can be non-NULL, depending on
+ * whether BTF is in a modifiable state (strs_set is used) or not
+ * (strs_data points inside raw_data)
+ */
void *strs_data;
- size_t strs_data_cap; /* used size stored in hdr->str_len */
-
- /* lookup index for each unique string in strings section */
- struct hashmap *strs_hash;
+ /* a set of unique strings */
+ struct strset *strs_set;
/* whether strings are already deduplicated */
bool strs_deduped;
- /* extra indirection layer to make strings hashmap work with stable
- * string offsets and ability to transparently choose between
- * btf->strs_data or btf_dedup->strs_data as a source of strings.
- * This is used for BTF strings dedup to transfer deduplicated strings
- * data back to struct btf without re-building strings index.
- */
- void **strs_data_ptr;
/* BTF object FD, if loaded into kernel */
int fd;
@@ -142,8 +138,8 @@ static inline __u64 ptr_to_u64(const void *ptr)
* On success, memory pointer to the beginning of unused memory is returned.
* On error, NULL is returned.
*/
-void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
- size_t cur_cnt, size_t max_cnt, size_t add_cnt)
+void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
+ size_t cur_cnt, size_t max_cnt, size_t add_cnt)
{
size_t new_cnt;
void *new_data;
@@ -179,14 +175,14 @@ void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
/* Ensure given dynamically allocated memory region has enough allocated space
* to accommodate *need_cnt* elements of size *elem_sz* bytes each
*/
-int btf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
+int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
{
void *p;
if (need_cnt <= *cap_cnt)
return 0;
- p = btf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
+ p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
if (!p)
return -ENOMEM;
@@ -197,8 +193,8 @@ static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
{
__u32 *p;
- p = btf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
- btf->nr_types, BTF_MAX_NR_TYPES, 1);
+ p = libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
+ btf->nr_types, BTF_MAX_NR_TYPES, 1);
if (!p)
return -ENOMEM;
@@ -291,6 +287,7 @@ static int btf_type_size(const struct btf_type *t)
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
+ case BTF_KIND_FLOAT:
return base_size;
case BTF_KIND_INT:
return base_size + sizeof(__u32);
@@ -338,6 +335,7 @@ static int btf_bswap_type_rest(struct btf_type *t)
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
+ case BTF_KIND_FLOAT:
return 0;
case BTF_KIND_INT:
*(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
@@ -433,7 +431,7 @@ const struct btf *btf__base_btf(const struct btf *btf)
}
/* internal helper returning non-const pointer to a type */
-static struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id)
+struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id)
{
if (type_id == 0)
return &btf_void;
@@ -578,6 +576,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
case BTF_KIND_UNION:
case BTF_KIND_ENUM:
case BTF_KIND_DATASEC:
+ case BTF_KIND_FLOAT:
size = t->size;
goto done;
case BTF_KIND_PTR:
@@ -621,6 +620,7 @@ int btf__align_of(const struct btf *btf, __u32 id)
switch (kind) {
case BTF_KIND_INT:
case BTF_KIND_ENUM:
+ case BTF_KIND_FLOAT:
return min(btf_ptr_sz(btf), (size_t)t->size);
case BTF_KIND_PTR:
return btf_ptr_sz(btf);
@@ -734,7 +734,7 @@ void btf__free(struct btf *btf)
*/
free(btf->hdr);
free(btf->types_data);
- free(btf->strs_data);
+ strset__free(btf->strs_set);
}
free(btf->raw_data);
free(btf->raw_data_swapped);
@@ -1242,6 +1242,11 @@ void btf__set_fd(struct btf *btf, int fd)
btf->fd = fd;
}
+static const void *btf_strs_data(const struct btf *btf)
+{
+ return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set);
+}
+
static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian)
{
struct btf_header *hdr = btf->hdr;
@@ -1282,7 +1287,7 @@ static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endi
}
p += hdr->type_len;
- memcpy(p, btf->strs_data, hdr->str_len);
+ memcpy(p, btf_strs_data(btf), hdr->str_len);
p += hdr->str_len;
*size = data_sz;
@@ -1316,7 +1321,7 @@ const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
if (offset < btf->start_str_off)
return btf__str_by_offset(btf->base_btf, offset);
else if (offset - btf->start_str_off < btf->hdr->str_len)
- return btf->strs_data + (offset - btf->start_str_off);
+ return btf_strs_data(btf) + (offset - btf->start_str_off);
else
return NULL;
}
@@ -1470,25 +1475,6 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
return 0;
}
-static size_t strs_hash_fn(const void *key, void *ctx)
-{
- const struct btf *btf = ctx;
- const char *strs = *btf->strs_data_ptr;
- const char *str = strs + (long)key;
-
- return str_hash(str);
-}
-
-static bool strs_hash_equal_fn(const void *key1, const void *key2, void *ctx)
-{
- const struct btf *btf = ctx;
- const char *strs = *btf->strs_data_ptr;
- const char *str1 = strs + (long)key1;
- const char *str2 = strs + (long)key2;
-
- return strcmp(str1, str2) == 0;
-}
-
static void btf_invalidate_raw_data(struct btf *btf)
{
if (btf->raw_data) {
@@ -1507,10 +1493,9 @@ static void btf_invalidate_raw_data(struct btf *btf)
*/
static int btf_ensure_modifiable(struct btf *btf)
{
- void *hdr, *types, *strs, *strs_end, *s;
- struct hashmap *hash = NULL;
- long off;
- int err;
+ void *hdr, *types;
+ struct strset *set = NULL;
+ int err = -ENOMEM;
if (btf_is_modifiable(btf)) {
/* any BTF modification invalidates raw_data */
@@ -1521,44 +1506,25 @@ static int btf_ensure_modifiable(struct btf *btf)
/* split raw data into three memory regions */
hdr = malloc(btf->hdr->hdr_len);
types = malloc(btf->hdr->type_len);
- strs = malloc(btf->hdr->str_len);
- if (!hdr || !types || !strs)
+ if (!hdr || !types)
goto err_out;
memcpy(hdr, btf->hdr, btf->hdr->hdr_len);
memcpy(types, btf->types_data, btf->hdr->type_len);
- memcpy(strs, btf->strs_data, btf->hdr->str_len);
-
- /* make hashmap below use btf->strs_data as a source of strings */
- btf->strs_data_ptr = &btf->strs_data;
/* build lookup index for all strings */
- hash = hashmap__new(strs_hash_fn, strs_hash_equal_fn, btf);
- if (IS_ERR(hash)) {
- err = PTR_ERR(hash);
- hash = NULL;
+ set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len);
+ if (IS_ERR(set)) {
+ err = PTR_ERR(set);
goto err_out;
}
- strs_end = strs + btf->hdr->str_len;
- for (off = 0, s = strs; s < strs_end; off += strlen(s) + 1, s = strs + off) {
- /* hashmap__add() returns EEXIST if string with the same
- * content already is in the hash map
- */
- err = hashmap__add(hash, (void *)off, (void *)off);
- if (err == -EEXIST)
- continue; /* duplicate */
- if (err)
- goto err_out;
- }
-
/* only when everything was successful, update internal state */
btf->hdr = hdr;
btf->types_data = types;
btf->types_data_cap = btf->hdr->type_len;
- btf->strs_data = strs;
- btf->strs_data_cap = btf->hdr->str_len;
- btf->strs_hash = hash;
+ btf->strs_data = NULL;
+ btf->strs_set = set;
/* if BTF was created from scratch, all strings are guaranteed to be
* unique and deduplicated
*/
@@ -1573,17 +1539,10 @@ static int btf_ensure_modifiable(struct btf *btf)
return 0;
err_out:
- hashmap__free(hash);
+ strset__free(set);
free(hdr);
free(types);
- free(strs);
- return -ENOMEM;
-}
-
-static void *btf_add_str_mem(struct btf *btf, size_t add_sz)
-{
- return btf_add_mem(&btf->strs_data, &btf->strs_data_cap, 1,
- btf->hdr->str_len, BTF_MAX_STR_OFFSET, add_sz);
+ return err;
}
/* Find an offset in BTF string section that corresponds to a given string *s*.
@@ -1594,34 +1553,23 @@ static void *btf_add_str_mem(struct btf *btf, size_t add_sz)
*/
int btf__find_str(struct btf *btf, const char *s)
{
- long old_off, new_off, len;
- void *p;
+ int off;
if (btf->base_btf) {
- int ret;
-
- ret = btf__find_str(btf->base_btf, s);
- if (ret != -ENOENT)
- return ret;
+ off = btf__find_str(btf->base_btf, s);
+ if (off != -ENOENT)
+ return off;
}
/* BTF needs to be in a modifiable state to build string lookup index */
if (btf_ensure_modifiable(btf))
return -ENOMEM;
- /* see btf__add_str() for why we do this */
- len = strlen(s) + 1;
- p = btf_add_str_mem(btf, len);
- if (!p)
- return -ENOMEM;
-
- new_off = btf->hdr->str_len;
- memcpy(p, s, len);
-
- if (hashmap__find(btf->strs_hash, (void *)new_off, (void **)&old_off))
- return btf->start_str_off + old_off;
+ off = strset__find_str(btf->strs_set, s);
+ if (off < 0)
+ return off;
- return -ENOENT;
+ return btf->start_str_off + off;
}
/* Add a string s to the BTF string section.
@@ -1631,61 +1579,30 @@ int btf__find_str(struct btf *btf, const char *s)
*/
int btf__add_str(struct btf *btf, const char *s)
{
- long old_off, new_off, len;
- void *p;
- int err;
+ int off;
if (btf->base_btf) {
- int ret;
-
- ret = btf__find_str(btf->base_btf, s);
- if (ret != -ENOENT)
- return ret;
+ off = btf__find_str(btf->base_btf, s);
+ if (off != -ENOENT)
+ return off;
}
if (btf_ensure_modifiable(btf))
return -ENOMEM;
- /* Hashmap keys are always offsets within btf->strs_data, so to even
- * look up some string from the "outside", we need to first append it
- * at the end, so that it can be addressed with an offset. Luckily,
- * until btf->hdr->str_len is incremented, that string is just a piece
- * of garbage for the rest of BTF code, so no harm, no foul. On the
- * other hand, if the string is unique, it's already appended and
- * ready to be used, only a simple btf->hdr->str_len increment away.
- */
- len = strlen(s) + 1;
- p = btf_add_str_mem(btf, len);
- if (!p)
- return -ENOMEM;
-
- new_off = btf->hdr->str_len;
- memcpy(p, s, len);
+ off = strset__add_str(btf->strs_set, s);
+ if (off < 0)
+ return off;
- /* Now attempt to add the string, but only if the string with the same
- * contents doesn't exist already (HASHMAP_ADD strategy). If such
- * string exists, we'll get its offset in old_off (that's old_key).
- */
- err = hashmap__insert(btf->strs_hash, (void *)new_off, (void *)new_off,
- HASHMAP_ADD, (const void **)&old_off, NULL);
- if (err == -EEXIST)
- return btf->start_str_off + old_off; /* duplicated string, return existing offset */
- if (err)
- return err;
+ btf->hdr->str_len = strset__data_size(btf->strs_set);
- btf->hdr->str_len += len; /* new unique string, adjust data length */
- return btf->start_str_off + new_off;
+ return btf->start_str_off + off;
}
static void *btf_add_type_mem(struct btf *btf, size_t add_sz)
{
- return btf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
- btf->hdr->type_len, UINT_MAX, add_sz);
-}
-
-static __u32 btf_type_info(int kind, int vlen, int kflag)
-{
- return (kflag << 31) | (kind << 24) | vlen;
+ return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
+ btf->hdr->type_len, UINT_MAX, add_sz);
}
static void btf_type_inc_vlen(struct btf_type *t)
@@ -1707,6 +1624,54 @@ static int btf_commit_type(struct btf *btf, int data_sz)
return btf->start_id + btf->nr_types - 1;
}
+struct btf_pipe {
+ const struct btf *src;
+ struct btf *dst;
+};
+
+static int btf_rewrite_str(__u32 *str_off, void *ctx)
+{
+ struct btf_pipe *p = ctx;
+ int off;
+
+ if (!*str_off) /* nothing to do for empty strings */
+ return 0;
+
+ off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off));
+ if (off < 0)
+ return off;
+
+ *str_off = off;
+ return 0;
+}
+
+int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
+{
+ struct btf_pipe p = { .src = src_btf, .dst = btf };
+ struct btf_type *t;
+ int sz, err;
+
+ sz = btf_type_size(src_type);
+ if (sz < 0)
+ return sz;
+
+ /* deconstruct BTF, if necessary, and invalidate raw_data */
+ if (btf_ensure_modifiable(btf))
+ return -ENOMEM;
+
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return -ENOMEM;
+
+ memcpy(t, src_type, sz);
+
+ err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
+ if (err)
+ return err;
+
+ return btf_commit_type(btf, sz);
+}
+
/*
* Append new BTF_KIND_INT type with:
* - *name* - non-empty, non-NULL type name;
@@ -1756,6 +1721,47 @@ int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding
return btf_commit_type(btf, sz);
}
+/*
+ * Append new BTF_KIND_FLOAT type with:
+ * - *name* - non-empty, non-NULL type name;
+ * - *sz* - size of the type, in bytes;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_float(struct btf *btf, const char *name, size_t byte_sz)
+{
+ struct btf_type *t;
+ int sz, name_off;
+
+ /* non-empty name */
+ if (!name || !name[0])
+ return -EINVAL;
+
+ /* byte_sz must be one of the explicitly allowed values */
+ if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 &&
+ byte_sz != 16)
+ return -EINVAL;
+
+ if (btf_ensure_modifiable(btf))
+ return -ENOMEM;
+
+ sz = sizeof(struct btf_type);
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return -ENOMEM;
+
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
+
+ t->name_off = name_off;
+ t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0);
+ t->size = byte_sz;
+
+ return btf_commit_type(btf, sz);
+}
+
/* it's completely legal to append BTF types with type IDs pointing forward to
* types that haven't been appended yet, so we only make sure that id looks
* sane, we can't guarantee that ID will always be valid
@@ -1883,7 +1889,7 @@ static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32
* - *byte_sz* - size of the struct, in bytes;
*
* Struct initially has no fields in it. Fields can be added by
- * btf__add_field() right after btf__add_struct() succeeds.
+ * btf__add_field() right after btf__add_struct() succeeds.
*
* Returns:
* - >0, type ID of newly added BTF type;
@@ -2971,10 +2977,7 @@ struct btf_dedup {
/* Various option modifying behavior of algorithm */
struct btf_dedup_opts opts;
/* temporary strings deduplication state */
- void *strs_data;
- size_t strs_cap;
- size_t strs_len;
- struct hashmap* strs_hash;
+ struct strset *strs_set;
};
static long hash_combine(long h, long value)
@@ -3110,95 +3113,28 @@ done:
return d;
}
-typedef int (*str_off_fn_t)(__u32 *str_off_ptr, void *ctx);
-
/*
* Iterate over all possible places in .BTF and .BTF.ext that can reference
* string and pass pointer to it to a provided callback `fn`.
*/
-static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
+static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx)
{
- void *line_data_cur, *line_data_end;
- int i, j, r, rec_size;
- struct btf_type *t;
+ int i, r;
for (i = 0; i < d->btf->nr_types; i++) {
- t = btf_type_by_id(d->btf, d->btf->start_id + i);
- r = fn(&t->name_off, ctx);
+ struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
+
+ r = btf_type_visit_str_offs(t, fn, ctx);
if (r)
return r;
-
- switch (btf_kind(t)) {
- case BTF_KIND_STRUCT:
- case BTF_KIND_UNION: {
- struct btf_member *m = btf_members(t);
- __u16 vlen = btf_vlen(t);
-
- for (j = 0; j < vlen; j++) {
- r = fn(&m->name_off, ctx);
- if (r)
- return r;
- m++;
- }
- break;
- }
- case BTF_KIND_ENUM: {
- struct btf_enum *m = btf_enum(t);
- __u16 vlen = btf_vlen(t);
-
- for (j = 0; j < vlen; j++) {
- r = fn(&m->name_off, ctx);
- if (r)
- return r;
- m++;
- }
- break;
- }
- case BTF_KIND_FUNC_PROTO: {
- struct btf_param *m = btf_params(t);
- __u16 vlen = btf_vlen(t);
-
- for (j = 0; j < vlen; j++) {
- r = fn(&m->name_off, ctx);
- if (r)
- return r;
- m++;
- }
- break;
- }
- default:
- break;
- }
}
if (!d->btf_ext)
return 0;
- line_data_cur = d->btf_ext->line_info.info;
- line_data_end = d->btf_ext->line_info.info + d->btf_ext->line_info.len;
- rec_size = d->btf_ext->line_info.rec_size;
-
- while (line_data_cur < line_data_end) {
- struct btf_ext_info_sec *sec = line_data_cur;
- struct bpf_line_info_min *line_info;
- __u32 num_info = sec->num_info;
-
- r = fn(&sec->sec_name_off, ctx);
- if (r)
- return r;
-
- line_data_cur += sizeof(struct btf_ext_info_sec);
- for (i = 0; i < num_info; i++) {
- line_info = line_data_cur;
- r = fn(&line_info->file_name_off, ctx);
- if (r)
- return r;
- r = fn(&line_info->line_off, ctx);
- if (r)
- return r;
- line_data_cur += rec_size;
- }
- }
+ r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx);
+ if (r)
+ return r;
return 0;
}
@@ -3207,10 +3143,8 @@ static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
{
struct btf_dedup *d = ctx;
__u32 str_off = *str_off_ptr;
- long old_off, new_off, len;
const char *s;
- void *p;
- int err;
+ int off, err;
/* don't touch empty string or string in main BTF */
if (str_off == 0 || str_off < d->btf->start_str_off)
@@ -3227,29 +3161,11 @@ static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
return err;
}
- len = strlen(s) + 1;
-
- new_off = d->strs_len;
- p = btf_add_mem(&d->strs_data, &d->strs_cap, 1, new_off, BTF_MAX_STR_OFFSET, len);
- if (!p)
- return -ENOMEM;
+ off = strset__add_str(d->strs_set, s);
+ if (off < 0)
+ return off;
- memcpy(p, s, len);
-
- /* Now attempt to add the string, but only if the string with the same
- * contents doesn't exist already (HASHMAP_ADD strategy). If such
- * string exists, we'll get its offset in old_off (that's old_key).
- */
- err = hashmap__insert(d->strs_hash, (void *)new_off, (void *)new_off,
- HASHMAP_ADD, (const void **)&old_off, NULL);
- if (err == -EEXIST) {
- *str_off_ptr = d->btf->start_str_off + old_off;
- } else if (err) {
- return err;
- } else {
- *str_off_ptr = d->btf->start_str_off + new_off;
- d->strs_len += len;
- }
+ *str_off_ptr = d->btf->start_str_off + off;
return 0;
}
@@ -3266,39 +3182,23 @@ static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
*/
static int btf_dedup_strings(struct btf_dedup *d)
{
- char *s;
int err;
if (d->btf->strs_deduped)
return 0;
- /* temporarily switch to use btf_dedup's strs_data for strings for hash
- * functions; later we'll just transfer hashmap to struct btf as is,
- * along the strs_data
- */
- d->btf->strs_data_ptr = &d->strs_data;
-
- d->strs_hash = hashmap__new(strs_hash_fn, strs_hash_equal_fn, d->btf);
- if (IS_ERR(d->strs_hash)) {
- err = PTR_ERR(d->strs_hash);
- d->strs_hash = NULL;
+ d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0);
+ if (IS_ERR(d->strs_set)) {
+ err = PTR_ERR(d->strs_set);
goto err_out;
}
if (!d->btf->base_btf) {
- s = btf_add_mem(&d->strs_data, &d->strs_cap, 1, d->strs_len, BTF_MAX_STR_OFFSET, 1);
- if (!s)
- return -ENOMEM;
- /* initial empty string */
- s[0] = 0;
- d->strs_len = 1;
-
/* insert empty string; we won't be looking it up during strings
* dedup, but it's good to have it for generic BTF string lookups
*/
- err = hashmap__insert(d->strs_hash, (void *)0, (void *)0,
- HASHMAP_ADD, NULL, NULL);
- if (err)
+ err = strset__add_str(d->strs_set, "");
+ if (err < 0)
goto err_out;
}
@@ -3308,28 +3208,16 @@ static int btf_dedup_strings(struct btf_dedup *d)
goto err_out;
/* replace BTF string data and hash with deduped ones */
- free(d->btf->strs_data);
- hashmap__free(d->btf->strs_hash);
- d->btf->strs_data = d->strs_data;
- d->btf->strs_data_cap = d->strs_cap;
- d->btf->hdr->str_len = d->strs_len;
- d->btf->strs_hash = d->strs_hash;
- /* now point strs_data_ptr back to btf->strs_data */
- d->btf->strs_data_ptr = &d->btf->strs_data;
-
- d->strs_data = d->strs_hash = NULL;
- d->strs_len = d->strs_cap = 0;
+ strset__free(d->btf->strs_set);
+ d->btf->hdr->str_len = strset__data_size(d->strs_set);
+ d->btf->strs_set = d->strs_set;
+ d->strs_set = NULL;
d->btf->strs_deduped = true;
return 0;
err_out:
- free(d->strs_data);
- hashmap__free(d->strs_hash);
- d->strs_data = d->strs_hash = NULL;
- d->strs_len = d->strs_cap = 0;
-
- /* restore strings pointer for existing d->btf->strs_hash back */
- d->btf->strs_data_ptr = &d->strs_data;
+ strset__free(d->strs_set);
+ d->strs_set = NULL;
return err;
}
@@ -3626,6 +3514,7 @@ static int btf_dedup_prep(struct btf_dedup *d)
case BTF_KIND_FWD:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
+ case BTF_KIND_FLOAT:
h = btf_hash_common(t);
break;
case BTF_KIND_INT:
@@ -3722,6 +3611,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
break;
case BTF_KIND_FWD:
+ case BTF_KIND_FLOAT:
h = btf_hash_common(t);
for_each_dedup_cand(d, hash_entry, h) {
cand_id = (__u32)(long)hash_entry->value;
@@ -3983,6 +3873,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
return btf_compat_enum(cand_type, canon_type);
case BTF_KIND_FWD:
+ case BTF_KIND_FLOAT:
return btf_equal_common(cand_type, canon_type);
case BTF_KIND_CONST:
@@ -4450,15 +4341,18 @@ static int btf_dedup_compact_types(struct btf_dedup *d)
* then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
* which is populated during compaction phase.
*/
-static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id)
+static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx)
{
+ struct btf_dedup *d = ctx;
__u32 resolved_type_id, new_type_id;
- resolved_type_id = resolve_type_id(d, type_id);
+ resolved_type_id = resolve_type_id(d, *type_id);
new_type_id = d->hypot_map[resolved_type_id];
if (new_type_id > BTF_MAX_NR_TYPES)
return -EINVAL;
- return new_type_id;
+
+ *type_id = new_type_id;
+ return 0;
}
/*
@@ -4471,111 +4365,28 @@ static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id)
* referenced from any BTF type (e.g., struct fields, func proto args, etc) to
* their final deduped type IDs.
*/
-static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
+static int btf_dedup_remap_types(struct btf_dedup *d)
{
- struct btf_type *t = btf_type_by_id(d->btf, type_id);
int i, r;
- switch (btf_kind(t)) {
- case BTF_KIND_INT:
- case BTF_KIND_ENUM:
- break;
-
- case BTF_KIND_FWD:
- case BTF_KIND_CONST:
- case BTF_KIND_VOLATILE:
- case BTF_KIND_RESTRICT:
- case BTF_KIND_PTR:
- case BTF_KIND_TYPEDEF:
- case BTF_KIND_FUNC:
- case BTF_KIND_VAR:
- r = btf_dedup_remap_type_id(d, t->type);
- if (r < 0)
- return r;
- t->type = r;
- break;
-
- case BTF_KIND_ARRAY: {
- struct btf_array *arr_info = btf_array(t);
+ for (i = 0; i < d->btf->nr_types; i++) {
+ struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
- r = btf_dedup_remap_type_id(d, arr_info->type);
- if (r < 0)
- return r;
- arr_info->type = r;
- r = btf_dedup_remap_type_id(d, arr_info->index_type);
- if (r < 0)
+ r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d);
+ if (r)
return r;
- arr_info->index_type = r;
- break;
- }
-
- case BTF_KIND_STRUCT:
- case BTF_KIND_UNION: {
- struct btf_member *member = btf_members(t);
- __u16 vlen = btf_vlen(t);
-
- for (i = 0; i < vlen; i++) {
- r = btf_dedup_remap_type_id(d, member->type);
- if (r < 0)
- return r;
- member->type = r;
- member++;
- }
- break;
}
- case BTF_KIND_FUNC_PROTO: {
- struct btf_param *param = btf_params(t);
- __u16 vlen = btf_vlen(t);
-
- r = btf_dedup_remap_type_id(d, t->type);
- if (r < 0)
- return r;
- t->type = r;
-
- for (i = 0; i < vlen; i++) {
- r = btf_dedup_remap_type_id(d, param->type);
- if (r < 0)
- return r;
- param->type = r;
- param++;
- }
- break;
- }
-
- case BTF_KIND_DATASEC: {
- struct btf_var_secinfo *var = btf_var_secinfos(t);
- __u16 vlen = btf_vlen(t);
-
- for (i = 0; i < vlen; i++) {
- r = btf_dedup_remap_type_id(d, var->type);
- if (r < 0)
- return r;
- var->type = r;
- var++;
- }
- break;
- }
+ if (!d->btf_ext)
+ return 0;
- default:
- return -EINVAL;
- }
+ r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d);
+ if (r)
+ return r;
return 0;
}
-static int btf_dedup_remap_types(struct btf_dedup *d)
-{
- int i, r;
-
- for (i = 0; i < d->btf->nr_types; i++) {
- r = btf_dedup_remap_type(d, d->btf->start_id + i);
- if (r < 0)
- return r;
- }
- return 0;
-}
-
/*
* Probe few well-known locations for vmlinux kernel image and try to load BTF
* data out of it to use for target BTF.
@@ -4626,3 +4437,200 @@ struct btf *libbpf_find_kernel_btf(void)
pr_warn("failed to find valid kernel BTF\n");
return ERR_PTR(-ESRCH);
}
+
+int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
+{
+ int i, n, err;
+
+ switch (btf_kind(t)) {
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_ENUM:
+ return 0;
+
+ case BTF_KIND_FWD:
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ case BTF_KIND_VAR:
+ return visit(&t->type, ctx);
+
+ case BTF_KIND_ARRAY: {
+ struct btf_array *a = btf_array(t);
+
+ err = visit(&a->type, ctx);
+ err = err ?: visit(&a->index_type, ctx);
+ return err;
+ }
+
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ struct btf_member *m = btf_members(t);
+
+ for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
+ err = visit(&m->type, ctx);
+ if (err)
+ return err;
+ }
+ return 0;
+ }
+
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *m = btf_params(t);
+
+ err = visit(&t->type, ctx);
+ if (err)
+ return err;
+ for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
+ err = visit(&m->type, ctx);
+ if (err)
+ return err;
+ }
+ return 0;
+ }
+
+ case BTF_KIND_DATASEC: {
+ struct btf_var_secinfo *m = btf_var_secinfos(t);
+
+ for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
+ err = visit(&m->type, ctx);
+ if (err)
+ return err;
+ }
+ return 0;
+ }
+
+ default:
+ return -EINVAL;
+ }
+}
+
+int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx)
+{
+ int i, n, err;
+
+ err = visit(&t->name_off, ctx);
+ if (err)
+ return err;
+
+ switch (btf_kind(t)) {
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ struct btf_member *m = btf_members(t);
+
+ for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
+ err = visit(&m->name_off, ctx);
+ if (err)
+ return err;
+ }
+ break;
+ }
+ case BTF_KIND_ENUM: {
+ struct btf_enum *m = btf_enum(t);
+
+ for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
+ err = visit(&m->name_off, ctx);
+ if (err)
+ return err;
+ }
+ break;
+ }
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *m = btf_params(t);
+
+ for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
+ err = visit(&m->name_off, ctx);
+ if (err)
+ return err;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
+{
+ const struct btf_ext_info *seg;
+ struct btf_ext_info_sec *sec;
+ int i, err;
+
+ seg = &btf_ext->func_info;
+ for_each_btf_ext_sec(seg, sec) {
+ struct bpf_func_info_min *rec;
+
+ for_each_btf_ext_rec(seg, sec, i, rec) {
+ err = visit(&rec->type_id, ctx);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ seg = &btf_ext->core_relo_info;
+ for_each_btf_ext_sec(seg, sec) {
+ struct bpf_core_relo *rec;
+
+ for_each_btf_ext_rec(seg, sec, i, rec) {
+ err = visit(&rec->type_id, ctx);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx)
+{
+ const struct btf_ext_info *seg;
+ struct btf_ext_info_sec *sec;
+ int i, err;
+
+ seg = &btf_ext->func_info;
+ for_each_btf_ext_sec(seg, sec) {
+ err = visit(&sec->sec_name_off, ctx);
+ if (err)
+ return err;
+ }
+
+ seg = &btf_ext->line_info;
+ for_each_btf_ext_sec(seg, sec) {
+ struct bpf_line_info_min *rec;
+
+ err = visit(&sec->sec_name_off, ctx);
+ if (err)
+ return err;
+
+ for_each_btf_ext_rec(seg, sec, i, rec) {
+ err = visit(&rec->file_name_off, ctx);
+ if (err)
+ return err;
+ err = visit(&rec->line_off, ctx);
+ if (err)
+ return err;
+ }
+ }
+
+ seg = &btf_ext->core_relo_info;
+ for_each_btf_ext_sec(seg, sec) {
+ struct bpf_core_relo *rec;
+
+ err = visit(&sec->sec_name_off, ctx);
+ if (err)
+ return err;
+
+ for_each_btf_ext_rec(seg, sec, i, rec) {
+ err = visit(&rec->access_str_off, ctx);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index 1237bcd1dd17..b54f1c3ebd57 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -93,8 +93,11 @@ LIBBPF_API struct btf *libbpf_find_kernel_btf(void);
LIBBPF_API int btf__find_str(struct btf *btf, const char *s);
LIBBPF_API int btf__add_str(struct btf *btf, const char *s);
+LIBBPF_API int btf__add_type(struct btf *btf, const struct btf *src_btf,
+ const struct btf_type *src_type);
LIBBPF_API int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding);
+LIBBPF_API int btf__add_float(struct btf *btf, const char *name, size_t byte_sz);
LIBBPF_API int btf__add_ptr(struct btf *btf, int ref_type_id);
LIBBPF_API int btf__add_array(struct btf *btf,
int index_type_id, int elem_type_id, __u32 nr_elems);
@@ -173,6 +176,7 @@ struct btf_dump_emit_type_decl_opts {
int indent_level;
/* strip all the const/volatile/restrict mods */
bool strip_mods;
+ size_t :0;
};
#define btf_dump_emit_type_decl_opts__last_field strip_mods
@@ -294,6 +298,11 @@ static inline bool btf_is_datasec(const struct btf_type *t)
return btf_kind(t) == BTF_KIND_DATASEC;
}
+static inline bool btf_is_float(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_FLOAT;
+}
+
static inline __u8 btf_int_encoding(const struct btf_type *t)
{
return BTF_INT_ENCODING(*(__u32 *)(t + 1));
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index 0911aea4cdbe..5e2809d685bf 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -166,11 +166,11 @@ static int btf_dump_resize(struct btf_dump *d)
if (last_id <= d->last_id)
return 0;
- if (btf_ensure_mem((void **)&d->type_states, &d->type_states_cap,
- sizeof(*d->type_states), last_id + 1))
+ if (libbpf_ensure_mem((void **)&d->type_states, &d->type_states_cap,
+ sizeof(*d->type_states), last_id + 1))
return -ENOMEM;
- if (btf_ensure_mem((void **)&d->cached_names, &d->cached_names_cap,
- sizeof(*d->cached_names), last_id + 1))
+ if (libbpf_ensure_mem((void **)&d->cached_names, &d->cached_names_cap,
+ sizeof(*d->cached_names), last_id + 1))
return -ENOMEM;
if (d->last_id == 0) {
@@ -279,6 +279,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d)
case BTF_KIND_INT:
case BTF_KIND_ENUM:
case BTF_KIND_FWD:
+ case BTF_KIND_FLOAT:
break;
case BTF_KIND_VOLATILE:
@@ -453,6 +454,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
switch (btf_kind(t)) {
case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
tstate->order_state = ORDERED;
return 0;
@@ -1133,6 +1135,7 @@ skip_mod:
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FLOAT:
goto done;
default:
pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n",
@@ -1247,6 +1250,7 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
switch (kind) {
case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
btf_dump_emit_mods(d, decls);
name = btf_name_of(d, t->name_off);
btf_dump_printf(d, "%s", name);
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 4181d178ee7b..e2a3cf437814 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -55,10 +55,6 @@
#include "libbpf_internal.h"
#include "hashmap.h"
-#ifndef EM_BPF
-#define EM_BPF 247
-#endif
-
#ifndef BPF_FS_MAGIC
#define BPF_FS_MAGIC 0xcafe4a11
#endif
@@ -73,8 +69,7 @@
#define __printf(a, b) __attribute__((format(printf, a, b)))
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
-static const struct btf_type *
-skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
+static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
static int __base_pr(enum libbpf_print_level level, const char *format,
va_list args)
@@ -178,6 +173,8 @@ enum kern_feature_id {
FEAT_PROG_BIND_MAP,
/* Kernel support for module BTFs */
FEAT_MODULE_BTF,
+ /* BTF_KIND_FLOAT support */
+ FEAT_BTF_FLOAT,
__FEAT_CNT,
};
@@ -187,7 +184,9 @@ enum reloc_type {
RELO_LD64,
RELO_CALL,
RELO_DATA,
- RELO_EXTERN,
+ RELO_EXTERN_VAR,
+ RELO_EXTERN_FUNC,
+ RELO_SUBPROG_ADDR,
};
struct reloc_desc {
@@ -195,7 +194,6 @@ struct reloc_desc {
int insn_idx;
int map_idx;
int sym_off;
- bool processed;
};
struct bpf_sec_def;
@@ -275,6 +273,7 @@ struct bpf_program {
bpf_program_clear_priv_t clear_priv;
bool load;
+ bool mark_btf_static;
enum bpf_prog_type type;
enum bpf_attach_type expected_attach_type;
int prog_ifindex;
@@ -501,8 +500,6 @@ static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr);
static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
-static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
- size_t off, __u32 sym_type, GElf_Sym *sym);
void bpf_program__unload(struct bpf_program *prog)
{
@@ -574,6 +571,21 @@ static bool insn_is_subprog_call(const struct bpf_insn *insn)
insn->off == 0;
}
+static bool is_ldimm64_insn(struct bpf_insn *insn)
+{
+ return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
+}
+
+static bool is_call_insn(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_JMP | BPF_CALL);
+}
+
+static bool insn_is_pseudo_func(struct bpf_insn *insn)
+{
+ return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
+}
+
static int
bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
const char *name, size_t sec_idx, const char *sec_name,
@@ -628,25 +640,29 @@ static int
bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
const char *sec_name, int sec_idx)
{
+ Elf_Data *symbols = obj->efile.symbols;
struct bpf_program *prog, *progs;
void *data = sec_data->d_buf;
- size_t sec_sz = sec_data->d_size, sec_off, prog_sz;
- int nr_progs, err;
+ size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
+ int nr_progs, err, i;
const char *name;
GElf_Sym sym;
progs = obj->programs;
nr_progs = obj->nr_programs;
+ nr_syms = symbols->d_size / sizeof(GElf_Sym);
sec_off = 0;
- while (sec_off < sec_sz) {
- if (elf_sym_by_sec_off(obj, sec_idx, sec_off, STT_FUNC, &sym)) {
- pr_warn("sec '%s': failed to find program symbol at offset %zu\n",
- sec_name, sec_off);
- return -LIBBPF_ERRNO__FORMAT;
- }
+ for (i = 0; i < nr_syms; i++) {
+ if (!gelf_getsym(symbols, i, &sym))
+ continue;
+ if (sym.st_shndx != sec_idx)
+ continue;
+ if (GELF_ST_TYPE(sym.st_info) != STT_FUNC)
+ continue;
prog_sz = sym.st_size;
+ sec_off = sym.st_value;
name = elf_sym_str(obj, sym.st_name);
if (!name) {
@@ -684,10 +700,17 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
if (err)
return err;
+ /* if function is a global/weak symbol, but has hidden
+ * visibility (STV_HIDDEN), mark its BTF FUNC as static to
+ * enable more permissive BPF verification mode with more
+ * outside context available to BPF verifier
+ */
+ if (GELF_ST_BIND(sym.st_info) != STB_LOCAL
+ && GELF_ST_VISIBILITY(sym.st_other) == STV_HIDDEN)
+ prog->mark_btf_static = true;
+
nr_progs++;
obj->nr_programs = nr_progs;
-
- sec_off += prog_sz;
}
return 0;
@@ -1121,11 +1144,6 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
obj->efile.obj_buf_sz = 0;
}
-/* if libelf is old and doesn't support mmap(), fall back to read() */
-#ifndef ELF_C_READ_MMAP
-#define ELF_C_READ_MMAP ELF_C_READ
-#endif
-
static int bpf_object__elf_init(struct bpf_object *obj)
{
int err = 0;
@@ -1886,7 +1904,7 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
return 0;
}
-static const struct btf_type *
+const struct btf_type *
skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
{
const struct btf_type *t = btf__type_by_id(btf, id);
@@ -1917,9 +1935,9 @@ resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
return btf_is_func_proto(t) ? t : NULL;
}
-static const char *btf_kind_str(const struct btf_type *t)
+static const char *__btf_kind_str(__u16 kind)
{
- switch (btf_kind(t)) {
+ switch (kind) {
case BTF_KIND_UNKN: return "void";
case BTF_KIND_INT: return "int";
case BTF_KIND_PTR: return "ptr";
@@ -1936,10 +1954,16 @@ static const char *btf_kind_str(const struct btf_type *t)
case BTF_KIND_FUNC_PROTO: return "func_proto";
case BTF_KIND_VAR: return "var";
case BTF_KIND_DATASEC: return "datasec";
+ case BTF_KIND_FLOAT: return "float";
default: return "unknown";
}
}
+const char *btf_kind_str(const struct btf_type *t)
+{
+ return __btf_kind_str(btf_kind(t));
+}
+
/*
* Fetch integer attribute of BTF map definition. Such attributes are
* represented using a pointer to an array, in which dimensionality of array
@@ -1994,254 +2018,262 @@ static int build_map_pin_path(struct bpf_map *map, const char *path)
return bpf_map__set_pin_path(map, buf);
}
-
-static int parse_btf_map_def(struct bpf_object *obj,
- struct bpf_map *map,
- const struct btf_type *def,
- bool strict, bool is_inner,
- const char *pin_root_path)
+int parse_btf_map_def(const char *map_name, struct btf *btf,
+ const struct btf_type *def_t, bool strict,
+ struct btf_map_def *map_def, struct btf_map_def *inner_def)
{
const struct btf_type *t;
const struct btf_member *m;
+ bool is_inner = inner_def == NULL;
int vlen, i;
- vlen = btf_vlen(def);
- m = btf_members(def);
+ vlen = btf_vlen(def_t);
+ m = btf_members(def_t);
for (i = 0; i < vlen; i++, m++) {
- const char *name = btf__name_by_offset(obj->btf, m->name_off);
+ const char *name = btf__name_by_offset(btf, m->name_off);
if (!name) {
- pr_warn("map '%s': invalid field #%d.\n", map->name, i);
+ pr_warn("map '%s': invalid field #%d.\n", map_name, i);
return -EINVAL;
}
if (strcmp(name, "type") == 0) {
- if (!get_map_field_int(map->name, obj->btf, m,
- &map->def.type))
+ if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
return -EINVAL;
- pr_debug("map '%s': found type = %u.\n",
- map->name, map->def.type);
+ map_def->parts |= MAP_DEF_MAP_TYPE;
} else if (strcmp(name, "max_entries") == 0) {
- if (!get_map_field_int(map->name, obj->btf, m,
- &map->def.max_entries))
+ if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
return -EINVAL;
- pr_debug("map '%s': found max_entries = %u.\n",
- map->name, map->def.max_entries);
+ map_def->parts |= MAP_DEF_MAX_ENTRIES;
} else if (strcmp(name, "map_flags") == 0) {
- if (!get_map_field_int(map->name, obj->btf, m,
- &map->def.map_flags))
+ if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
return -EINVAL;
- pr_debug("map '%s': found map_flags = %u.\n",
- map->name, map->def.map_flags);
+ map_def->parts |= MAP_DEF_MAP_FLAGS;
} else if (strcmp(name, "numa_node") == 0) {
- if (!get_map_field_int(map->name, obj->btf, m, &map->numa_node))
+ if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
return -EINVAL;
- pr_debug("map '%s': found numa_node = %u.\n", map->name, map->numa_node);
+ map_def->parts |= MAP_DEF_NUMA_NODE;
} else if (strcmp(name, "key_size") == 0) {
__u32 sz;
- if (!get_map_field_int(map->name, obj->btf, m, &sz))
+ if (!get_map_field_int(map_name, btf, m, &sz))
return -EINVAL;
- pr_debug("map '%s': found key_size = %u.\n",
- map->name, sz);
- if (map->def.key_size && map->def.key_size != sz) {
+ if (map_def->key_size && map_def->key_size != sz) {
pr_warn("map '%s': conflicting key size %u != %u.\n",
- map->name, map->def.key_size, sz);
+ map_name, map_def->key_size, sz);
return -EINVAL;
}
- map->def.key_size = sz;
+ map_def->key_size = sz;
+ map_def->parts |= MAP_DEF_KEY_SIZE;
} else if (strcmp(name, "key") == 0) {
__s64 sz;
- t = btf__type_by_id(obj->btf, m->type);
+ t = btf__type_by_id(btf, m->type);
if (!t) {
pr_warn("map '%s': key type [%d] not found.\n",
- map->name, m->type);
+ map_name, m->type);
return -EINVAL;
}
if (!btf_is_ptr(t)) {
pr_warn("map '%s': key spec is not PTR: %s.\n",
- map->name, btf_kind_str(t));
+ map_name, btf_kind_str(t));
return -EINVAL;
}
- sz = btf__resolve_size(obj->btf, t->type);
+ sz = btf__resolve_size(btf, t->type);
if (sz < 0) {
pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
- map->name, t->type, (ssize_t)sz);
+ map_name, t->type, (ssize_t)sz);
return sz;
}
- pr_debug("map '%s': found key [%u], sz = %zd.\n",
- map->name, t->type, (ssize_t)sz);
- if (map->def.key_size && map->def.key_size != sz) {
+ if (map_def->key_size && map_def->key_size != sz) {
pr_warn("map '%s': conflicting key size %u != %zd.\n",
- map->name, map->def.key_size, (ssize_t)sz);
+ map_name, map_def->key_size, (ssize_t)sz);
return -EINVAL;
}
- map->def.key_size = sz;
- map->btf_key_type_id = t->type;
+ map_def->key_size = sz;
+ map_def->key_type_id = t->type;
+ map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
} else if (strcmp(name, "value_size") == 0) {
__u32 sz;
- if (!get_map_field_int(map->name, obj->btf, m, &sz))
+ if (!get_map_field_int(map_name, btf, m, &sz))
return -EINVAL;
- pr_debug("map '%s': found value_size = %u.\n",
- map->name, sz);
- if (map->def.value_size && map->def.value_size != sz) {
+ if (map_def->value_size && map_def->value_size != sz) {
pr_warn("map '%s': conflicting value size %u != %u.\n",
- map->name, map->def.value_size, sz);
+ map_name, map_def->value_size, sz);
return -EINVAL;
}
- map->def.value_size = sz;
+ map_def->value_size = sz;
+ map_def->parts |= MAP_DEF_VALUE_SIZE;
} else if (strcmp(name, "value") == 0) {
__s64 sz;
- t = btf__type_by_id(obj->btf, m->type);
+ t = btf__type_by_id(btf, m->type);
if (!t) {
pr_warn("map '%s': value type [%d] not found.\n",
- map->name, m->type);
+ map_name, m->type);
return -EINVAL;
}
if (!btf_is_ptr(t)) {
pr_warn("map '%s': value spec is not PTR: %s.\n",
- map->name, btf_kind_str(t));
+ map_name, btf_kind_str(t));
return -EINVAL;
}
- sz = btf__resolve_size(obj->btf, t->type);
+ sz = btf__resolve_size(btf, t->type);
if (sz < 0) {
pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
- map->name, t->type, (ssize_t)sz);
+ map_name, t->type, (ssize_t)sz);
return sz;
}
- pr_debug("map '%s': found value [%u], sz = %zd.\n",
- map->name, t->type, (ssize_t)sz);
- if (map->def.value_size && map->def.value_size != sz) {
+ if (map_def->value_size && map_def->value_size != sz) {
pr_warn("map '%s': conflicting value size %u != %zd.\n",
- map->name, map->def.value_size, (ssize_t)sz);
+ map_name, map_def->value_size, (ssize_t)sz);
return -EINVAL;
}
- map->def.value_size = sz;
- map->btf_value_type_id = t->type;
+ map_def->value_size = sz;
+ map_def->value_type_id = t->type;
+ map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
}
else if (strcmp(name, "values") == 0) {
+ char inner_map_name[128];
int err;
if (is_inner) {
pr_warn("map '%s': multi-level inner maps not supported.\n",
- map->name);
+ map_name);
return -ENOTSUP;
}
if (i != vlen - 1) {
pr_warn("map '%s': '%s' member should be last.\n",
- map->name, name);
+ map_name, name);
return -EINVAL;
}
- if (!bpf_map_type__is_map_in_map(map->def.type)) {
+ if (!bpf_map_type__is_map_in_map(map_def->map_type)) {
pr_warn("map '%s': should be map-in-map.\n",
- map->name);
+ map_name);
return -ENOTSUP;
}
- if (map->def.value_size && map->def.value_size != 4) {
+ if (map_def->value_size && map_def->value_size != 4) {
pr_warn("map '%s': conflicting value size %u != 4.\n",
- map->name, map->def.value_size);
+ map_name, map_def->value_size);
return -EINVAL;
}
- map->def.value_size = 4;
- t = btf__type_by_id(obj->btf, m->type);
+ map_def->value_size = 4;
+ t = btf__type_by_id(btf, m->type);
if (!t) {
pr_warn("map '%s': map-in-map inner type [%d] not found.\n",
- map->name, m->type);
+ map_name, m->type);
return -EINVAL;
}
if (!btf_is_array(t) || btf_array(t)->nelems) {
pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n",
- map->name);
+ map_name);
return -EINVAL;
}
- t = skip_mods_and_typedefs(obj->btf, btf_array(t)->type,
- NULL);
+ t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
if (!btf_is_ptr(t)) {
pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
- map->name, btf_kind_str(t));
+ map_name, btf_kind_str(t));
return -EINVAL;
}
- t = skip_mods_and_typedefs(obj->btf, t->type, NULL);
+ t = skip_mods_and_typedefs(btf, t->type, NULL);
if (!btf_is_struct(t)) {
pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
- map->name, btf_kind_str(t));
+ map_name, btf_kind_str(t));
return -EINVAL;
}
- map->inner_map = calloc(1, sizeof(*map->inner_map));
- if (!map->inner_map)
- return -ENOMEM;
- map->inner_map->sec_idx = obj->efile.btf_maps_shndx;
- map->inner_map->name = malloc(strlen(map->name) +
- sizeof(".inner") + 1);
- if (!map->inner_map->name)
- return -ENOMEM;
- sprintf(map->inner_map->name, "%s.inner", map->name);
-
- err = parse_btf_map_def(obj, map->inner_map, t, strict,
- true /* is_inner */, NULL);
+ snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name);
+ err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL);
if (err)
return err;
+
+ map_def->parts |= MAP_DEF_INNER_MAP;
} else if (strcmp(name, "pinning") == 0) {
__u32 val;
- int err;
if (is_inner) {
- pr_debug("map '%s': inner def can't be pinned.\n",
- map->name);
+ pr_warn("map '%s': inner def can't be pinned.\n", map_name);
return -EINVAL;
}
- if (!get_map_field_int(map->name, obj->btf, m, &val))
+ if (!get_map_field_int(map_name, btf, m, &val))
return -EINVAL;
- pr_debug("map '%s': found pinning = %u.\n",
- map->name, val);
-
- if (val != LIBBPF_PIN_NONE &&
- val != LIBBPF_PIN_BY_NAME) {
+ if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) {
pr_warn("map '%s': invalid pinning value %u.\n",
- map->name, val);
+ map_name, val);
return -EINVAL;
}
- if (val == LIBBPF_PIN_BY_NAME) {
- err = build_map_pin_path(map, pin_root_path);
- if (err) {
- pr_warn("map '%s': couldn't build pin path.\n",
- map->name);
- return err;
- }
- }
+ map_def->pinning = val;
+ map_def->parts |= MAP_DEF_PINNING;
} else {
if (strict) {
- pr_warn("map '%s': unknown field '%s'.\n",
- map->name, name);
+ pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
return -ENOTSUP;
}
- pr_debug("map '%s': ignoring unknown field '%s'.\n",
- map->name, name);
+ pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name);
}
}
- if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
- pr_warn("map '%s': map type isn't specified.\n", map->name);
+ if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
+ pr_warn("map '%s': map type isn't specified.\n", map_name);
return -EINVAL;
}
return 0;
}
+static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
+{
+ map->def.type = def->map_type;
+ map->def.key_size = def->key_size;
+ map->def.value_size = def->value_size;
+ map->def.max_entries = def->max_entries;
+ map->def.map_flags = def->map_flags;
+
+ map->numa_node = def->numa_node;
+ map->btf_key_type_id = def->key_type_id;
+ map->btf_value_type_id = def->value_type_id;
+
+ if (def->parts & MAP_DEF_MAP_TYPE)
+ pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
+
+ if (def->parts & MAP_DEF_KEY_TYPE)
+ pr_debug("map '%s': found key [%u], sz = %u.\n",
+ map->name, def->key_type_id, def->key_size);
+ else if (def->parts & MAP_DEF_KEY_SIZE)
+ pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
+
+ if (def->parts & MAP_DEF_VALUE_TYPE)
+ pr_debug("map '%s': found value [%u], sz = %u.\n",
+ map->name, def->value_type_id, def->value_size);
+ else if (def->parts & MAP_DEF_VALUE_SIZE)
+ pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
+
+ if (def->parts & MAP_DEF_MAX_ENTRIES)
+ pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
+ if (def->parts & MAP_DEF_MAP_FLAGS)
+ pr_debug("map '%s': found map_flags = %u.\n", map->name, def->map_flags);
+ if (def->parts & MAP_DEF_PINNING)
+ pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
+ if (def->parts & MAP_DEF_NUMA_NODE)
+ pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
+
+ if (def->parts & MAP_DEF_INNER_MAP)
+ pr_debug("map '%s': found inner map definition.\n", map->name);
+}
+
static int bpf_object__init_user_btf_map(struct bpf_object *obj,
const struct btf_type *sec,
int var_idx, int sec_idx,
const Elf_Data *data, bool strict,
const char *pin_root_path)
{
+ struct btf_map_def map_def = {}, inner_def = {};
const struct btf_type *var, *def;
const struct btf_var_secinfo *vi;
const struct btf_var *var_extra;
const char *map_name;
struct bpf_map *map;
+ int err;
vi = btf_var_secinfos(sec) + var_idx;
var = btf__type_by_id(obj->btf, vi->type);
@@ -2295,7 +2327,35 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
map_name, map->sec_idx, map->sec_offset);
- return parse_btf_map_def(obj, map, def, strict, false, pin_root_path);
+ err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
+ if (err)
+ return err;
+
+ fill_map_from_def(map, &map_def);
+
+ if (map_def.pinning == LIBBPF_PIN_BY_NAME) {
+ err = build_map_pin_path(map, pin_root_path);
+ if (err) {
+ pr_warn("map '%s': couldn't build pin path.\n", map->name);
+ return err;
+ }
+ }
+
+ if (map_def.parts & MAP_DEF_INNER_MAP) {
+ map->inner_map = calloc(1, sizeof(*map->inner_map));
+ if (!map->inner_map)
+ return -ENOMEM;
+ map->inner_map->fd = -1;
+ map->inner_map->sec_idx = sec_idx;
+ map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
+ if (!map->inner_map->name)
+ return -ENOMEM;
+ sprintf(map->inner_map->name, "%s.inner", map_name);
+
+ fill_map_from_def(map->inner_map, &inner_def);
+ }
+
+ return 0;
}
static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
@@ -2385,15 +2445,17 @@ static bool btf_needs_sanitization(struct bpf_object *obj)
{
bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
+ bool has_float = kernel_supports(FEAT_BTF_FLOAT);
bool has_func = kernel_supports(FEAT_BTF_FUNC);
- return !has_func || !has_datasec || !has_func_global;
+ return !has_func || !has_datasec || !has_func_global || !has_float;
}
static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
{
bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
+ bool has_float = kernel_supports(FEAT_BTF_FLOAT);
bool has_func = kernel_supports(FEAT_BTF_FUNC);
struct btf_type *t;
int i, j, vlen;
@@ -2446,6 +2508,13 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
} else if (!has_func_global && btf_is_func(t)) {
/* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
+ } else if (!has_float && btf_is_float(t)) {
+ /* replace FLOAT with an equally-sized empty STRUCT;
+ * since C compilers do not accept e.g. "float" as a
+ * valid struct name, make it anonymous
+ */
+ t->name_off = 0;
+ t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
}
}
}
@@ -2588,7 +2657,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
{
struct btf *kern_btf = obj->btf;
bool btf_mandatory, sanitize;
- int err = 0;
+ int i, err = 0;
if (!obj->btf)
return 0;
@@ -2602,6 +2671,38 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
return 0;
}
+ /* Even though some subprogs are global/weak, user might prefer more
+ * permissive BPF verification process that BPF verifier performs for
+ * static functions, taking into account more context from the caller
+ * functions. In such case, they need to mark such subprogs with
+ * __attribute__((visibility("hidden"))) and libbpf will adjust
+ * corresponding FUNC BTF type to be marked as static and trigger more
+ * involved BPF verification process.
+ */
+ for (i = 0; i < obj->nr_programs; i++) {
+ struct bpf_program *prog = &obj->programs[i];
+ struct btf_type *t;
+ const char *name;
+ int j, n;
+
+ if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
+ continue;
+
+ n = btf__get_nr_types(obj->btf);
+ for (j = 1; j <= n; j++) {
+ t = btf_type_by_id(obj->btf, j);
+ if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
+ continue;
+
+ name = btf__str_by_offset(obj->btf, t->name_off);
+ if (strcmp(name, prog->name) != 0)
+ continue;
+
+ t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
+ break;
+ }
+ }
+
sanitize = btf_needs_sanitization(obj);
if (sanitize) {
const void *raw_data;
@@ -2752,26 +2853,6 @@ static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
return data;
}
-static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
- size_t off, __u32 sym_type, GElf_Sym *sym)
-{
- Elf_Data *symbols = obj->efile.symbols;
- size_t n = symbols->d_size / sizeof(GElf_Sym);
- int i;
-
- for (i = 0; i < n; i++) {
- if (!gelf_getsym(symbols, i, sym))
- continue;
- if (sym->st_shndx != sec_idx || sym->st_value != off)
- continue;
- if (GELF_ST_TYPE(sym->st_info) != sym_type)
- continue;
- return 0;
- }
-
- return -ENOENT;
-}
-
static bool is_sec_name_dwarf(const char *name)
{
/* approximation, but the actual list is too long */
@@ -2785,7 +2866,7 @@ static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
return true;
/* ignore .llvm_addrsig section as well */
- if (hdr->sh_type == 0x6FFF4C03 /* SHT_LLVM_ADDRSIG */)
+ if (hdr->sh_type == SHT_LLVM_ADDRSIG)
return true;
/* no subprograms will lead to an empty .text section, ignore it */
@@ -2975,10 +3056,27 @@ static bool sym_is_extern(const GElf_Sym *sym)
GELF_ST_TYPE(sym->st_info) == STT_NOTYPE;
}
+static bool sym_is_subprog(const GElf_Sym *sym, int text_shndx)
+{
+ int bind = GELF_ST_BIND(sym->st_info);
+ int type = GELF_ST_TYPE(sym->st_info);
+
+ /* in .text section */
+ if (sym->st_shndx != text_shndx)
+ return false;
+
+ /* local function */
+ if (bind == STB_LOCAL && type == STT_SECTION)
+ return true;
+
+ /* global function */
+ return bind == STB_GLOBAL && type == STT_FUNC;
+}
+
static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
{
const struct btf_type *t;
- const char *var_name;
+ const char *tname;
int i, n;
if (!btf)
@@ -2988,14 +3086,18 @@ static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
for (i = 1; i <= n; i++) {
t = btf__type_by_id(btf, i);
- if (!btf_is_var(t))
+ if (!btf_is_var(t) && !btf_is_func(t))
continue;
- var_name = btf__name_by_offset(btf, t->name_off);
- if (strcmp(var_name, ext_name))
+ tname = btf__name_by_offset(btf, t->name_off);
+ if (strcmp(tname, ext_name))
continue;
- if (btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
+ if (btf_is_var(t) &&
+ btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
+ return -EINVAL;
+
+ if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN)
return -EINVAL;
return i;
@@ -3108,12 +3210,48 @@ static int find_int_btf_id(const struct btf *btf)
return 0;
}
+static int add_dummy_ksym_var(struct btf *btf)
+{
+ int i, int_btf_id, sec_btf_id, dummy_var_btf_id;
+ const struct btf_var_secinfo *vs;
+ const struct btf_type *sec;
+
+ sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
+ BTF_KIND_DATASEC);
+ if (sec_btf_id < 0)
+ return 0;
+
+ sec = btf__type_by_id(btf, sec_btf_id);
+ vs = btf_var_secinfos(sec);
+ for (i = 0; i < btf_vlen(sec); i++, vs++) {
+ const struct btf_type *vt;
+
+ vt = btf__type_by_id(btf, vs->type);
+ if (btf_is_func(vt))
+ break;
+ }
+
+ /* No func in ksyms sec. No need to add dummy var. */
+ if (i == btf_vlen(sec))
+ return 0;
+
+ int_btf_id = find_int_btf_id(btf);
+ dummy_var_btf_id = btf__add_var(btf,
+ "dummy_ksym",
+ BTF_VAR_GLOBAL_ALLOCATED,
+ int_btf_id);
+ if (dummy_var_btf_id < 0)
+ pr_warn("cannot create a dummy_ksym var\n");
+
+ return dummy_var_btf_id;
+}
+
static int bpf_object__collect_externs(struct bpf_object *obj)
{
struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
const struct btf_type *t;
struct extern_desc *ext;
- int i, n, off;
+ int i, n, off, dummy_var_btf_id;
const char *ext_name, *sec_name;
Elf_Scn *scn;
GElf_Shdr sh;
@@ -3125,6 +3263,10 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
if (elf_sec_hdr(obj, scn, &sh))
return -LIBBPF_ERRNO__FORMAT;
+ dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
+ if (dummy_var_btf_id < 0)
+ return dummy_var_btf_id;
+
n = sh.sh_size / sh.sh_entsize;
pr_debug("looking for externs among %d symbols...\n", n);
@@ -3169,6 +3311,11 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
sec_name = btf__name_by_offset(obj->btf, sec->name_off);
if (strcmp(sec_name, KCONFIG_SEC) == 0) {
+ if (btf_is_func(t)) {
+ pr_warn("extern function %s is unsupported under %s section\n",
+ ext->name, KCONFIG_SEC);
+ return -ENOTSUP;
+ }
kcfg_sec = sec;
ext->type = EXT_KCFG;
ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
@@ -3190,6 +3337,11 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
return -ENOTSUP;
}
} else if (strcmp(sec_name, KSYMS_SEC) == 0) {
+ if (btf_is_func(t) && ext->is_weak) {
+ pr_warn("extern weak function %s is unsupported\n",
+ ext->name);
+ return -ENOTSUP;
+ }
ksym_sec = sec;
ext->type = EXT_KSYM;
skip_mods_and_typedefs(obj->btf, t->type,
@@ -3216,7 +3368,14 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
* extern variables in DATASEC
*/
int int_btf_id = find_int_btf_id(obj->btf);
+ /* For extern function, a dummy_var added earlier
+ * will be used to replace the vs->type and
+ * its name string will be used to refill
+ * the missing param's name.
+ */
+ const struct btf_type *dummy_var;
+ dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
for (i = 0; i < obj->nr_extern; i++) {
ext = &obj->externs[i];
if (ext->type != EXT_KSYM)
@@ -3235,12 +3394,32 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
ext_name = btf__name_by_offset(obj->btf, vt->name_off);
ext = find_extern_by_name(obj, ext_name);
if (!ext) {
- pr_warn("failed to find extern definition for BTF var '%s'\n",
- ext_name);
+ pr_warn("failed to find extern definition for BTF %s '%s'\n",
+ btf_kind_str(vt), ext_name);
return -ESRCH;
}
- btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
- vt->type = int_btf_id;
+ if (btf_is_func(vt)) {
+ const struct btf_type *func_proto;
+ struct btf_param *param;
+ int j;
+
+ func_proto = btf__type_by_id(obj->btf,
+ vt->type);
+ param = btf_params(func_proto);
+ /* Reuse the dummy_var string if the
+ * func proto does not have param name.
+ */
+ for (j = 0; j < btf_vlen(func_proto); j++)
+ if (param[j].type && !param[j].name_off)
+ param[j].name_off =
+ dummy_var->name_off;
+ vs->type = dummy_var_btf_id;
+ vt->info &= ~0xffff;
+ vt->info |= BTF_FUNC_GLOBAL;
+ } else {
+ btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
+ vt->type = int_btf_id;
+ }
vs->offset = off;
vs->size = sizeof(int);
}
@@ -3370,33 +3549,7 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
const char *sym_sec_name;
struct bpf_map *map;
- reloc_desc->processed = false;
-
- /* sub-program call relocation */
- if (insn->code == (BPF_JMP | BPF_CALL)) {
- if (insn->src_reg != BPF_PSEUDO_CALL) {
- pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
- return -LIBBPF_ERRNO__RELOC;
- }
- /* text_shndx can be 0, if no default "main" program exists */
- if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
- sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
- pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
- prog->name, sym_name, sym_sec_name);
- return -LIBBPF_ERRNO__RELOC;
- }
- if (sym->st_value % BPF_INSN_SZ) {
- pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
- prog->name, sym_name, (size_t)sym->st_value);
- return -LIBBPF_ERRNO__RELOC;
- }
- reloc_desc->type = RELO_CALL;
- reloc_desc->insn_idx = insn_idx;
- reloc_desc->sym_off = sym->st_value;
- return 0;
- }
-
- if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) {
+ if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
prog->name, sym_name, insn_idx, insn->code);
return -LIBBPF_ERRNO__RELOC;
@@ -3419,18 +3572,62 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
}
pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
prog->name, i, ext->name, ext->sym_idx, insn_idx);
- reloc_desc->type = RELO_EXTERN;
+ if (insn->code == (BPF_JMP | BPF_CALL))
+ reloc_desc->type = RELO_EXTERN_FUNC;
+ else
+ reloc_desc->type = RELO_EXTERN_VAR;
reloc_desc->insn_idx = insn_idx;
reloc_desc->sym_off = i; /* sym_off stores extern index */
return 0;
}
+ /* sub-program call relocation */
+ if (is_call_insn(insn)) {
+ if (insn->src_reg != BPF_PSEUDO_CALL) {
+ pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ /* text_shndx can be 0, if no default "main" program exists */
+ if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
+ sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
+ pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
+ prog->name, sym_name, sym_sec_name);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ if (sym->st_value % BPF_INSN_SZ) {
+ pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
+ prog->name, sym_name, (size_t)sym->st_value);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ reloc_desc->type = RELO_CALL;
+ reloc_desc->insn_idx = insn_idx;
+ reloc_desc->sym_off = sym->st_value;
+ return 0;
+ }
+
if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
prog->name, sym_name, shdr_idx);
return -LIBBPF_ERRNO__RELOC;
}
+ /* loading subprog addresses */
+ if (sym_is_subprog(sym, obj->efile.text_shndx)) {
+ /* global_func: sym->st_value = offset in the section, insn->imm = 0.
+ * local_func: sym->st_value = 0, insn->imm = offset in the section.
+ */
+ if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
+ pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
+ prog->name, sym_name, (size_t)sym->st_value, insn->imm);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+
+ reloc_desc->type = RELO_SUBPROG_ADDR;
+ reloc_desc->insn_idx = insn_idx;
+ reloc_desc->sym_off = sym->st_value;
+ return 0;
+ }
+
type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
@@ -3534,11 +3731,16 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data
int err, i, nrels;
const char *sym_name;
__u32 insn_idx;
+ Elf_Scn *scn;
+ Elf_Data *scn_data;
GElf_Sym sym;
GElf_Rel rel;
+ scn = elf_sec_by_idx(obj, sec_idx);
+ scn_data = elf_sec_data(obj, scn);
+
relo_sec_name = elf_sec_str(obj, shdr->sh_name);
- sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
+ sec_name = elf_sec_name(obj, scn);
if (!relo_sec_name || !sec_name)
return -EINVAL;
@@ -3556,7 +3758,8 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data
relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
return -LIBBPF_ERRNO__FORMAT;
}
- if (rel.r_offset % BPF_INSN_SZ) {
+
+ if (rel.r_offset % BPF_INSN_SZ || rel.r_offset >= scn_data->d_size) {
pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
return -LIBBPF_ERRNO__FORMAT;
@@ -3580,9 +3783,9 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data
prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
if (!prog) {
- pr_warn("sec '%s': relo #%d: program not found in section '%s' for insn #%u\n",
+ pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n",
relo_sec_name, i, sec_name, insn_idx);
- return -LIBBPF_ERRNO__RELOC;
+ continue;
}
relos = libbpf_reallocarray(prog->reloc_desc,
@@ -3697,6 +3900,14 @@ __u32 bpf_map__max_entries(const struct bpf_map *map)
return map->def.max_entries;
}
+struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
+{
+ if (!bpf_map_type__is_map_in_map(map->def.type))
+ return NULL;
+
+ return map->inner_map;
+}
+
int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
{
if (map->fd >= 0)
@@ -3883,6 +4094,18 @@ static int probe_kern_btf_datasec(void)
strs, sizeof(strs)));
}
+static int probe_kern_btf_float(void)
+{
+ static const char strs[] = "\0float";
+ __u32 types[] = {
+ /* float */
+ BTF_TYPE_FLOAT_ENC(1, 4),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs)));
+}
+
static int probe_kern_array_mmap(void)
{
struct bpf_create_map_attr attr = {
@@ -4062,6 +4285,9 @@ static struct kern_feature_desc {
[FEAT_MODULE_BTF] = {
"module BTF support", probe_module_btf,
},
+ [FEAT_BTF_FLOAT] = {
+ "BTF_KIND_FLOAT support", probe_kern_btf_float,
+ },
};
static bool kernel_supports(enum kern_feature_id feat_id)
@@ -4796,8 +5022,8 @@ static int load_module_btfs(struct bpf_object *obj)
goto err_out;
}
- err = btf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
- sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
+ err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
+ sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
if (err)
goto err_out;
@@ -4889,6 +5115,7 @@ err_out:
* least one of enums should be anonymous;
* - for ENUMs, check sizes, names are ignored;
* - for INT, size and signedness are ignored;
+ * - any two FLOATs are always compatible;
* - for ARRAY, dimensionality is ignored, element types are checked for
* compatibility recursively;
* - everything else shouldn't be ever a target of relocation.
@@ -4915,6 +5142,7 @@ recur:
switch (btf_kind(local_type)) {
case BTF_KIND_PTR:
+ case BTF_KIND_FLOAT:
return 1;
case BTF_KIND_FWD:
case BTF_KIND_ENUM: {
@@ -5567,11 +5795,6 @@ static void bpf_core_poison_insn(struct bpf_program *prog, int relo_idx,
insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */
}
-static bool is_ldimm64(struct bpf_insn *insn)
-{
- return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
-}
-
static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
{
switch (BPF_SIZE(insn->code)) {
@@ -5637,7 +5860,7 @@ poison:
/* poison second part of ldimm64 to avoid confusing error from
* verifier about "unknown opcode 00"
*/
- if (is_ldimm64(insn))
+ if (is_ldimm64_insn(insn))
bpf_core_poison_insn(prog, relo_idx, insn_idx + 1, insn + 1);
bpf_core_poison_insn(prog, relo_idx, insn_idx, insn);
return 0;
@@ -5713,7 +5936,7 @@ poison:
case BPF_LD: {
__u64 imm;
- if (!is_ldimm64(insn) ||
+ if (!is_ldimm64_insn(insn) ||
insn[0].src_reg != 0 || insn[0].off != 0 ||
insn_idx + 1 >= prog->insns_cnt ||
insn[1].code != 0 || insn[1].dst_reg != 0 ||
@@ -6024,8 +6247,8 @@ patch_insn:
/* bpf_core_patch_insn() should know how to handle missing targ_spec */
err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res);
if (err) {
- pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
- prog->name, relo_idx, relo->insn_off, err);
+ pr_warn("prog '%s': relo #%d: failed to patch insn #%zu: %d\n",
+ prog->name, relo_idx, relo->insn_off / BPF_INSN_SZ, err);
return -EINVAL;
}
@@ -6147,15 +6370,13 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
case RELO_LD64:
insn[0].src_reg = BPF_PSEUDO_MAP_FD;
insn[0].imm = obj->maps[relo->map_idx].fd;
- relo->processed = true;
break;
case RELO_DATA:
insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
insn[1].imm = insn[0].imm + relo->sym_off;
insn[0].imm = obj->maps[relo->map_idx].fd;
- relo->processed = true;
break;
- case RELO_EXTERN:
+ case RELO_EXTERN_VAR:
ext = &obj->externs[relo->sym_off];
if (ext->type == EXT_KCFG) {
insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
@@ -6171,7 +6392,15 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
insn[1].imm = ext->ksym.addr >> 32;
}
}
- relo->processed = true;
+ break;
+ case RELO_EXTERN_FUNC:
+ ext = &obj->externs[relo->sym_off];
+ insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
+ insn[0].imm = ext->ksym.kernel_btf_id;
+ break;
+ case RELO_SUBPROG_ADDR:
+ insn[0].src_reg = BPF_PSEUDO_FUNC;
+ /* will be handled as a follow up pass */
break;
case RELO_CALL:
/* will be handled as a follow up pass */
@@ -6359,11 +6588,11 @@ bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
- if (!insn_is_subprog_call(insn))
+ if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn))
continue;
relo = find_prog_insn_relo(prog, insn_idx);
- if (relo && relo->type != RELO_CALL) {
+ if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
prog->name, insn_idx, relo->type);
return -LIBBPF_ERRNO__RELOC;
@@ -6375,8 +6604,22 @@ bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
* call always has imm = -1, but for static functions
* relocation is against STT_SECTION and insn->imm
* points to a start of a static function
+ *
+ * for subprog addr relocation, the relo->sym_off + insn->imm is
+ * the byte offset in the corresponding section.
*/
- sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
+ if (relo->type == RELO_CALL)
+ sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
+ else
+ sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
+ } else if (insn_is_pseudo_func(insn)) {
+ /*
+ * RELO_SUBPROG_ADDR relo is always emitted even if both
+ * functions are in the same section, so it shouldn't reach here.
+ */
+ pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n",
+ prog->name, insn_idx);
+ return -LIBBPF_ERRNO__RELOC;
} else {
/* if subprogram call is to a static function within
* the same ELF section, there won't be any relocation
@@ -6439,9 +6682,6 @@ bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
* different main programs */
insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
- if (relo)
- relo->processed = true;
-
pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
}
@@ -6534,7 +6774,7 @@ static int
bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
{
struct bpf_program *subprog;
- int i, j, err;
+ int i, err;
/* mark all subprogs as not relocated (yet) within the context of
* current main program
@@ -6545,9 +6785,6 @@ bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
continue;
subprog->sub_insn_off = 0;
- for (j = 0; j < subprog->nr_reloc; j++)
- if (subprog->reloc_desc[j].type == RELO_CALL)
- subprog->reloc_desc[j].processed = false;
}
err = bpf_object__reloc_code(obj, prog, prog);
@@ -6794,7 +7031,7 @@ static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id
return false;
}
-static int bpf_object__sanitize_prog(struct bpf_object* obj, struct bpf_program *prog)
+static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog)
{
struct bpf_insn *insn = prog->insns;
enum bpf_func_id func_id;
@@ -7275,6 +7512,7 @@ static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
{
char sym_type, sym_name[500];
unsigned long long sym_addr;
+ const struct btf_type *t;
struct extern_desc *ext;
int ret, err = 0;
FILE *f;
@@ -7301,6 +7539,10 @@ static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
if (!ext || ext->type != EXT_KSYM)
continue;
+ t = btf__type_by_id(obj->btf, ext->btf_id);
+ if (!btf_is_var(t))
+ continue;
+
if (ext->is_set && ext->ksym.addr != sym_addr) {
pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n",
sym_name, ext->ksym.addr, sym_addr);
@@ -7319,75 +7561,151 @@ out:
return err;
}
-static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
+static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
+ __u16 kind, struct btf **res_btf,
+ int *res_btf_fd)
{
- struct extern_desc *ext;
+ int i, id, btf_fd, err;
struct btf *btf;
- int i, j, id, btf_fd, err;
-
- for (i = 0; i < obj->nr_extern; i++) {
- const struct btf_type *targ_var, *targ_type;
- __u32 targ_type_id, local_type_id;
- const char *targ_var_name;
- int ret;
- ext = &obj->externs[i];
- if (ext->type != EXT_KSYM || !ext->ksym.type_id)
- continue;
+ btf = obj->btf_vmlinux;
+ btf_fd = 0;
+ id = btf__find_by_name_kind(btf, ksym_name, kind);
- btf = obj->btf_vmlinux;
- btf_fd = 0;
- id = btf__find_by_name_kind(btf, ext->name, BTF_KIND_VAR);
- if (id == -ENOENT) {
- err = load_module_btfs(obj);
- if (err)
- return err;
+ if (id == -ENOENT) {
+ err = load_module_btfs(obj);
+ if (err)
+ return err;
- for (j = 0; j < obj->btf_module_cnt; j++) {
- btf = obj->btf_modules[j].btf;
- /* we assume module BTF FD is always >0 */
- btf_fd = obj->btf_modules[j].fd;
- id = btf__find_by_name_kind(btf, ext->name, BTF_KIND_VAR);
- if (id != -ENOENT)
- break;
- }
- }
- if (id <= 0) {
- pr_warn("extern (ksym) '%s': failed to find BTF ID in kernel BTF(s).\n",
- ext->name);
- return -ESRCH;
+ for (i = 0; i < obj->btf_module_cnt; i++) {
+ btf = obj->btf_modules[i].btf;
+ /* we assume module BTF FD is always >0 */
+ btf_fd = obj->btf_modules[i].fd;
+ id = btf__find_by_name_kind(btf, ksym_name, kind);
+ if (id != -ENOENT)
+ break;
}
+ }
+ if (id <= 0) {
+ pr_warn("extern (%s ksym) '%s': failed to find BTF ID in kernel BTF(s).\n",
+ __btf_kind_str(kind), ksym_name);
+ return -ESRCH;
+ }
+
+ *res_btf = btf;
+ *res_btf_fd = btf_fd;
+ return id;
+}
- /* find local type_id */
- local_type_id = ext->ksym.type_id;
+static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
+ struct extern_desc *ext)
+{
+ const struct btf_type *targ_var, *targ_type;
+ __u32 targ_type_id, local_type_id;
+ const char *targ_var_name;
+ int id, btf_fd = 0, err;
+ struct btf *btf = NULL;
- /* find target type_id */
- targ_var = btf__type_by_id(btf, id);
- targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
- targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
+ id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &btf_fd);
+ if (id < 0)
+ return id;
- ret = bpf_core_types_are_compat(obj->btf, local_type_id,
- btf, targ_type_id);
- if (ret <= 0) {
- const struct btf_type *local_type;
- const char *targ_name, *local_name;
+ /* find local type_id */
+ local_type_id = ext->ksym.type_id;
- local_type = btf__type_by_id(obj->btf, local_type_id);
- local_name = btf__name_by_offset(obj->btf, local_type->name_off);
- targ_name = btf__name_by_offset(btf, targ_type->name_off);
+ /* find target type_id */
+ targ_var = btf__type_by_id(btf, id);
+ targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
+ targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
- pr_warn("extern (ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
- ext->name, local_type_id,
- btf_kind_str(local_type), local_name, targ_type_id,
- btf_kind_str(targ_type), targ_name);
- return -EINVAL;
- }
+ err = bpf_core_types_are_compat(obj->btf, local_type_id,
+ btf, targ_type_id);
+ if (err <= 0) {
+ const struct btf_type *local_type;
+ const char *targ_name, *local_name;
- ext->is_set = true;
- ext->ksym.kernel_btf_obj_fd = btf_fd;
- ext->ksym.kernel_btf_id = id;
- pr_debug("extern (ksym) '%s': resolved to [%d] %s %s\n",
- ext->name, id, btf_kind_str(targ_var), targ_var_name);
+ local_type = btf__type_by_id(obj->btf, local_type_id);
+ local_name = btf__name_by_offset(obj->btf, local_type->name_off);
+ targ_name = btf__name_by_offset(btf, targ_type->name_off);
+
+ pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
+ ext->name, local_type_id,
+ btf_kind_str(local_type), local_name, targ_type_id,
+ btf_kind_str(targ_type), targ_name);
+ return -EINVAL;
+ }
+
+ ext->is_set = true;
+ ext->ksym.kernel_btf_obj_fd = btf_fd;
+ ext->ksym.kernel_btf_id = id;
+ pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
+ ext->name, id, btf_kind_str(targ_var), targ_var_name);
+
+ return 0;
+}
+
+static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
+ struct extern_desc *ext)
+{
+ int local_func_proto_id, kfunc_proto_id, kfunc_id;
+ const struct btf_type *kern_func;
+ struct btf *kern_btf = NULL;
+ int ret, kern_btf_fd = 0;
+
+ local_func_proto_id = ext->ksym.type_id;
+
+ kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC,
+ &kern_btf, &kern_btf_fd);
+ if (kfunc_id < 0) {
+ pr_warn("extern (func ksym) '%s': not found in kernel BTF\n",
+ ext->name);
+ return kfunc_id;
+ }
+
+ if (kern_btf != obj->btf_vmlinux) {
+ pr_warn("extern (func ksym) '%s': function in kernel module is not supported\n",
+ ext->name);
+ return -ENOTSUP;
+ }
+
+ kern_func = btf__type_by_id(kern_btf, kfunc_id);
+ kfunc_proto_id = kern_func->type;
+
+ ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
+ kern_btf, kfunc_proto_id);
+ if (ret <= 0) {
+ pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with kernel [%d]\n",
+ ext->name, local_func_proto_id, kfunc_proto_id);
+ return -EINVAL;
+ }
+
+ ext->is_set = true;
+ ext->ksym.kernel_btf_obj_fd = kern_btf_fd;
+ ext->ksym.kernel_btf_id = kfunc_id;
+ pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n",
+ ext->name, kfunc_id);
+
+ return 0;
+}
+
+static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
+{
+ const struct btf_type *t;
+ struct extern_desc *ext;
+ int i, err;
+
+ for (i = 0; i < obj->nr_extern; i++) {
+ ext = &obj->externs[i];
+ if (ext->type != EXT_KSYM || !ext->ksym.type_id)
+ continue;
+
+ t = btf__type_by_id(obj->btf, ext->btf_id);
+ if (btf_is_var(t))
+ err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
+ else
+ err = bpf_object__resolve_ksym_func_btf_id(obj, ext);
+ if (err)
+ return err;
}
return 0;
}
@@ -8194,6 +8512,16 @@ int bpf_object__btf_fd(const struct bpf_object *obj)
return obj->btf ? btf__fd(obj->btf) : -1;
}
+int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
+{
+ if (obj->loaded)
+ return -EINVAL;
+
+ obj->kern_version = kern_version;
+
+ return 0;
+}
+
int bpf_object__set_priv(struct bpf_object *obj, void *priv,
bpf_object_clear_priv_t clear_priv)
{
@@ -8382,7 +8710,7 @@ int bpf_program__nth_fd(const struct bpf_program *prog, int n)
return fd;
}
-enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog)
+enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog)
{
return prog->type;
}
@@ -8427,7 +8755,7 @@ BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP);
enum bpf_attach_type
-bpf_program__get_expected_attach_type(struct bpf_program *prog)
+bpf_program__get_expected_attach_type(const struct bpf_program *prog)
{
return prog->expected_attach_type;
}
@@ -9203,6 +9531,7 @@ int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
pr_warn("error: inner_map_fd already specified\n");
return -EINVAL;
}
+ zfree(&map->inner_map);
map->inner_map_fd = fd;
return 0;
}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 3c35eb401931..bec4e6a6e31d 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -143,6 +143,7 @@ LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj);
+LIBBPF_API int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version);
struct btf;
LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj);
@@ -361,12 +362,12 @@ LIBBPF_API int bpf_program__set_struct_ops(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_extension(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_sk_lookup(struct bpf_program *prog);
-LIBBPF_API enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog);
+LIBBPF_API enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog);
LIBBPF_API void bpf_program__set_type(struct bpf_program *prog,
enum bpf_prog_type type);
LIBBPF_API enum bpf_attach_type
-bpf_program__get_expected_attach_type(struct bpf_program *prog);
+bpf_program__get_expected_attach_type(const struct bpf_program *prog);
LIBBPF_API void
bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type);
@@ -479,6 +480,7 @@ LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);
LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd);
+LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map);
LIBBPF_API long libbpf_get_error(const void *ptr);
@@ -507,6 +509,7 @@ struct xdp_link_info {
struct bpf_xdp_set_link_opts {
size_t sz;
int old_fd;
+ size_t :0;
};
#define bpf_xdp_set_link_opts__last_field old_fd
@@ -759,6 +762,19 @@ enum libbpf_tristate {
TRI_MODULE = 2,
};
+struct bpf_linker_opts {
+ /* size of this struct, for forward/backward compatiblity */
+ size_t sz;
+};
+#define bpf_linker_opts__last_field sz
+
+struct bpf_linker;
+
+LIBBPF_API struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts);
+LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker, const char *filename);
+LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker);
+LIBBPF_API void bpf_linker__free(struct bpf_linker *linker);
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 1c0fd2dd233a..b9b29baf1df8 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -350,3 +350,15 @@ LIBBPF_0.3.0 {
xsk_setup_xdp_prog;
xsk_socket__update_xskmap;
} LIBBPF_0.2.0;
+
+LIBBPF_0.4.0 {
+ global:
+ btf__add_float;
+ btf__add_type;
+ bpf_linker__add_file;
+ bpf_linker__finalize;
+ bpf_linker__free;
+ bpf_linker__new;
+ bpf_map__inner_map;
+ bpf_object__set_kversion;
+} LIBBPF_0.3.0;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index 969d0ac592ba..ee426226928f 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -19,6 +19,27 @@
#pragma GCC poison reallocarray
#include "libbpf.h"
+#include "btf.h"
+
+#ifndef EM_BPF
+#define EM_BPF 247
+#endif
+
+#ifndef R_BPF_64_64
+#define R_BPF_64_64 1
+#endif
+#ifndef R_BPF_64_32
+#define R_BPF_64_32 10
+#endif
+
+#ifndef SHT_LLVM_ADDRSIG
+#define SHT_LLVM_ADDRSIG 0x6FFF4C03
+#endif
+
+/* if libelf is old and doesn't support mmap(), fall back to read() */
+#ifndef ELF_C_READ_MMAP
+#define ELF_C_READ_MMAP ELF_C_READ
+#endif
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
@@ -31,6 +52,8 @@
#define BTF_MEMBER_ENC(name, type, bits_offset) (name), (type), (bits_offset)
#define BTF_PARAM_ENC(name, type) (name), (type)
#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size)
+#define BTF_TYPE_FLOAT_ENC(name, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz)
#ifndef likely
#define likely(x) __builtin_expect(!!(x), 1)
@@ -105,9 +128,58 @@ static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size)
return realloc(ptr, total);
}
-void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
- size_t cur_cnt, size_t max_cnt, size_t add_cnt);
-int btf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt);
+struct btf;
+struct btf_type;
+
+struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id);
+const char *btf_kind_str(const struct btf_type *t);
+const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
+
+static inline enum btf_func_linkage btf_func_linkage(const struct btf_type *t)
+{
+ return (enum btf_func_linkage)(int)btf_vlen(t);
+}
+
+static inline __u32 btf_type_info(int kind, int vlen, int kflag)
+{
+ return (kflag << 31) | (kind << 24) | vlen;
+}
+
+enum map_def_parts {
+ MAP_DEF_MAP_TYPE = 0x001,
+ MAP_DEF_KEY_TYPE = 0x002,
+ MAP_DEF_KEY_SIZE = 0x004,
+ MAP_DEF_VALUE_TYPE = 0x008,
+ MAP_DEF_VALUE_SIZE = 0x010,
+ MAP_DEF_MAX_ENTRIES = 0x020,
+ MAP_DEF_MAP_FLAGS = 0x040,
+ MAP_DEF_NUMA_NODE = 0x080,
+ MAP_DEF_PINNING = 0x100,
+ MAP_DEF_INNER_MAP = 0x200,
+
+ MAP_DEF_ALL = 0x3ff, /* combination of all above */
+};
+
+struct btf_map_def {
+ enum map_def_parts parts;
+ __u32 map_type;
+ __u32 key_type_id;
+ __u32 key_size;
+ __u32 value_type_id;
+ __u32 value_size;
+ __u32 max_entries;
+ __u32 map_flags;
+ __u32 numa_node;
+ __u32 pinning;
+};
+
+int parse_btf_map_def(const char *map_name, struct btf *btf,
+ const struct btf_type *def_t, bool strict,
+ struct btf_map_def *map_def, struct btf_map_def *inner_def);
+
+void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
+ size_t cur_cnt, size_t max_cnt, size_t add_cnt);
+int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt);
static inline bool libbpf_validate_opts(const char *opts,
size_t opts_sz, size_t user_sz,
@@ -349,4 +421,11 @@ struct bpf_core_relo {
enum bpf_core_relo_kind kind;
};
+typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx);
+typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx);
+int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx);
+int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx);
+int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
+int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
+
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/tools/lib/bpf/libbpf_util.h b/tools/lib/bpf/libbpf_util.h
deleted file mode 100644
index 59c779c5790c..000000000000
--- a/tools/lib/bpf/libbpf_util.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
-/* Copyright (c) 2019 Facebook */
-
-#ifndef __LIBBPF_LIBBPF_UTIL_H
-#define __LIBBPF_LIBBPF_UTIL_H
-
-#include <stdbool.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Use these barrier functions instead of smp_[rw]mb() when they are
- * used in a libbpf header file. That way they can be built into the
- * application that uses libbpf.
- */
-#if defined(__i386__) || defined(__x86_64__)
-# define libbpf_smp_rmb() asm volatile("" : : : "memory")
-# define libbpf_smp_wmb() asm volatile("" : : : "memory")
-# define libbpf_smp_mb() \
- asm volatile("lock; addl $0,-4(%%rsp)" : : : "memory", "cc")
-/* Hinders stores to be observed before older loads. */
-# define libbpf_smp_rwmb() asm volatile("" : : : "memory")
-#elif defined(__aarch64__)
-# define libbpf_smp_rmb() asm volatile("dmb ishld" : : : "memory")
-# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory")
-# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory")
-# define libbpf_smp_rwmb() libbpf_smp_mb()
-#elif defined(__arm__)
-/* These are only valid for armv7 and above */
-# define libbpf_smp_rmb() asm volatile("dmb ish" : : : "memory")
-# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory")
-# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory")
-# define libbpf_smp_rwmb() libbpf_smp_mb()
-#else
-/* Architecture missing native barrier functions. */
-# define libbpf_smp_rmb() __sync_synchronize()
-# define libbpf_smp_wmb() __sync_synchronize()
-# define libbpf_smp_mb() __sync_synchronize()
-# define libbpf_smp_rwmb() __sync_synchronize()
-#endif
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
new file mode 100644
index 000000000000..9de084b1c699
--- /dev/null
+++ b/tools/lib/bpf/linker.c
@@ -0,0 +1,2883 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/*
+ * BPF static linker
+ *
+ * Copyright (c) 2021 Facebook
+ */
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <linux/err.h>
+#include <linux/btf.h>
+#include <elf.h>
+#include <libelf.h>
+#include <gelf.h>
+#include <fcntl.h>
+#include "libbpf.h"
+#include "btf.h"
+#include "libbpf_internal.h"
+#include "strset.h"
+
+#define BTF_EXTERN_SEC ".extern"
+
+struct src_sec {
+ const char *sec_name;
+ /* positional (not necessarily ELF) index in an array of sections */
+ int id;
+ /* positional (not necessarily ELF) index of a matching section in a final object file */
+ int dst_id;
+ /* section data offset in a matching output section */
+ int dst_off;
+ /* whether section is omitted from the final ELF file */
+ bool skipped;
+ /* whether section is an ephemeral section, not mapped to an ELF section */
+ bool ephemeral;
+
+ /* ELF info */
+ size_t sec_idx;
+ Elf_Scn *scn;
+ Elf64_Shdr *shdr;
+ Elf_Data *data;
+
+ /* corresponding BTF DATASEC type ID */
+ int sec_type_id;
+};
+
+struct src_obj {
+ const char *filename;
+ int fd;
+ Elf *elf;
+ /* Section header strings section index */
+ size_t shstrs_sec_idx;
+ /* SYMTAB section index */
+ size_t symtab_sec_idx;
+
+ struct btf *btf;
+ struct btf_ext *btf_ext;
+
+ /* List of sections (including ephemeral). Slot zero is unused. */
+ struct src_sec *secs;
+ int sec_cnt;
+
+ /* mapping of symbol indices from src to dst ELF */
+ int *sym_map;
+ /* mapping from the src BTF type IDs to dst ones */
+ int *btf_type_map;
+};
+
+/* single .BTF.ext data section */
+struct btf_ext_sec_data {
+ size_t rec_cnt;
+ __u32 rec_sz;
+ void *recs;
+};
+
+struct glob_sym {
+ /* ELF symbol index */
+ int sym_idx;
+ /* associated section id for .ksyms, .kconfig, etc, but not .extern */
+ int sec_id;
+ /* extern name offset in STRTAB */
+ int name_off;
+ /* optional associated BTF type ID */
+ int btf_id;
+ /* BTF type ID to which VAR/FUNC type is pointing to; used for
+ * rewriting types when extern VAR/FUNC is resolved to a concrete
+ * definition
+ */
+ int underlying_btf_id;
+ /* sec_var index in the corresponding dst_sec, if exists */
+ int var_idx;
+
+ /* extern or resolved/global symbol */
+ bool is_extern;
+ /* weak or strong symbol, never goes back from strong to weak */
+ bool is_weak;
+};
+
+struct dst_sec {
+ char *sec_name;
+ /* positional (not necessarily ELF) index in an array of sections */
+ int id;
+
+ bool ephemeral;
+
+ /* ELF info */
+ size_t sec_idx;
+ Elf_Scn *scn;
+ Elf64_Shdr *shdr;
+ Elf_Data *data;
+
+ /* final output section size */
+ int sec_sz;
+ /* final output contents of the section */
+ void *raw_data;
+
+ /* corresponding STT_SECTION symbol index in SYMTAB */
+ int sec_sym_idx;
+
+ /* section's DATASEC variable info, emitted on BTF finalization */
+ bool has_btf;
+ int sec_var_cnt;
+ struct btf_var_secinfo *sec_vars;
+
+ /* section's .BTF.ext data */
+ struct btf_ext_sec_data func_info;
+ struct btf_ext_sec_data line_info;
+ struct btf_ext_sec_data core_relo_info;
+};
+
+struct bpf_linker {
+ char *filename;
+ int fd;
+ Elf *elf;
+ Elf64_Ehdr *elf_hdr;
+
+ /* Output sections metadata */
+ struct dst_sec *secs;
+ int sec_cnt;
+
+ struct strset *strtab_strs; /* STRTAB unique strings */
+ size_t strtab_sec_idx; /* STRTAB section index */
+ size_t symtab_sec_idx; /* SYMTAB section index */
+
+ struct btf *btf;
+ struct btf_ext *btf_ext;
+
+ /* global (including extern) ELF symbols */
+ int glob_sym_cnt;
+ struct glob_sym *glob_syms;
+};
+
+#define pr_warn_elf(fmt, ...) \
+ libbpf_print(LIBBPF_WARN, "libbpf: " fmt ": %s\n", ##__VA_ARGS__, elf_errmsg(-1))
+
+static int init_output_elf(struct bpf_linker *linker, const char *file);
+
+static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, struct src_obj *obj);
+static int linker_sanity_check_elf(struct src_obj *obj);
+static int linker_sanity_check_elf_symtab(struct src_obj *obj, struct src_sec *sec);
+static int linker_sanity_check_elf_relos(struct src_obj *obj, struct src_sec *sec);
+static int linker_sanity_check_btf(struct src_obj *obj);
+static int linker_sanity_check_btf_ext(struct src_obj *obj);
+static int linker_fixup_btf(struct src_obj *obj);
+static int linker_append_sec_data(struct bpf_linker *linker, struct src_obj *obj);
+static int linker_append_elf_syms(struct bpf_linker *linker, struct src_obj *obj);
+static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj,
+ Elf64_Sym *sym, const char *sym_name, int src_sym_idx);
+static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *obj);
+static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj);
+static int linker_append_btf_ext(struct bpf_linker *linker, struct src_obj *obj);
+
+static int finalize_btf(struct bpf_linker *linker);
+static int finalize_btf_ext(struct bpf_linker *linker);
+
+void bpf_linker__free(struct bpf_linker *linker)
+{
+ int i;
+
+ if (!linker)
+ return;
+
+ free(linker->filename);
+
+ if (linker->elf)
+ elf_end(linker->elf);
+
+ if (linker->fd >= 0)
+ close(linker->fd);
+
+ strset__free(linker->strtab_strs);
+
+ btf__free(linker->btf);
+ btf_ext__free(linker->btf_ext);
+
+ for (i = 1; i < linker->sec_cnt; i++) {
+ struct dst_sec *sec = &linker->secs[i];
+
+ free(sec->sec_name);
+ free(sec->raw_data);
+ free(sec->sec_vars);
+
+ free(sec->func_info.recs);
+ free(sec->line_info.recs);
+ free(sec->core_relo_info.recs);
+ }
+ free(linker->secs);
+
+ free(linker);
+}
+
+struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts)
+{
+ struct bpf_linker *linker;
+ int err;
+
+ if (!OPTS_VALID(opts, bpf_linker_opts))
+ return NULL;
+
+ if (elf_version(EV_CURRENT) == EV_NONE) {
+ pr_warn_elf("libelf initialization failed");
+ return NULL;
+ }
+
+ linker = calloc(1, sizeof(*linker));
+ if (!linker)
+ return NULL;
+
+ linker->fd = -1;
+
+ err = init_output_elf(linker, filename);
+ if (err)
+ goto err_out;
+
+ return linker;
+
+err_out:
+ bpf_linker__free(linker);
+ return NULL;
+}
+
+static struct dst_sec *add_dst_sec(struct bpf_linker *linker, const char *sec_name)
+{
+ struct dst_sec *secs = linker->secs, *sec;
+ size_t new_cnt = linker->sec_cnt ? linker->sec_cnt + 1 : 2;
+
+ secs = libbpf_reallocarray(secs, new_cnt, sizeof(*secs));
+ if (!secs)
+ return NULL;
+
+ /* zero out newly allocated memory */
+ memset(secs + linker->sec_cnt, 0, (new_cnt - linker->sec_cnt) * sizeof(*secs));
+
+ linker->secs = secs;
+ linker->sec_cnt = new_cnt;
+
+ sec = &linker->secs[new_cnt - 1];
+ sec->id = new_cnt - 1;
+ sec->sec_name = strdup(sec_name);
+ if (!sec->sec_name)
+ return NULL;
+
+ return sec;
+}
+
+static Elf64_Sym *add_new_sym(struct bpf_linker *linker, size_t *sym_idx)
+{
+ struct dst_sec *symtab = &linker->secs[linker->symtab_sec_idx];
+ Elf64_Sym *syms, *sym;
+ size_t sym_cnt = symtab->sec_sz / sizeof(*sym);
+
+ syms = libbpf_reallocarray(symtab->raw_data, sym_cnt + 1, sizeof(*sym));
+ if (!syms)
+ return NULL;
+
+ sym = &syms[sym_cnt];
+ memset(sym, 0, sizeof(*sym));
+
+ symtab->raw_data = syms;
+ symtab->sec_sz += sizeof(*sym);
+ symtab->shdr->sh_size += sizeof(*sym);
+ symtab->data->d_size += sizeof(*sym);
+
+ if (sym_idx)
+ *sym_idx = sym_cnt;
+
+ return sym;
+}
+
+static int init_output_elf(struct bpf_linker *linker, const char *file)
+{
+ int err, str_off;
+ Elf64_Sym *init_sym;
+ struct dst_sec *sec;
+
+ linker->filename = strdup(file);
+ if (!linker->filename)
+ return -ENOMEM;
+
+ linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ if (linker->fd < 0) {
+ err = -errno;
+ pr_warn("failed to create '%s': %d\n", file, err);
+ return err;
+ }
+
+ linker->elf = elf_begin(linker->fd, ELF_C_WRITE, NULL);
+ if (!linker->elf) {
+ pr_warn_elf("failed to create ELF object");
+ return -EINVAL;
+ }
+
+ /* ELF header */
+ linker->elf_hdr = elf64_newehdr(linker->elf);
+ if (!linker->elf_hdr) {
+ pr_warn_elf("failed to create ELF header");
+ return -EINVAL;
+ }
+
+ linker->elf_hdr->e_machine = EM_BPF;
+ linker->elf_hdr->e_type = ET_REL;
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2LSB;
+#elif __BYTE_ORDER == __BIG_ENDIAN
+ linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2MSB;
+#else
+#error "Unknown __BYTE_ORDER"
+#endif
+
+ /* STRTAB */
+ /* initialize strset with an empty string to conform to ELF */
+ linker->strtab_strs = strset__new(INT_MAX, "", sizeof(""));
+ if (libbpf_get_error(linker->strtab_strs))
+ return libbpf_get_error(linker->strtab_strs);
+
+ sec = add_dst_sec(linker, ".strtab");
+ if (!sec)
+ return -ENOMEM;
+
+ sec->scn = elf_newscn(linker->elf);
+ if (!sec->scn) {
+ pr_warn_elf("failed to create STRTAB section");
+ return -EINVAL;
+ }
+
+ sec->shdr = elf64_getshdr(sec->scn);
+ if (!sec->shdr)
+ return -EINVAL;
+
+ sec->data = elf_newdata(sec->scn);
+ if (!sec->data) {
+ pr_warn_elf("failed to create STRTAB data");
+ return -EINVAL;
+ }
+
+ str_off = strset__add_str(linker->strtab_strs, sec->sec_name);
+ if (str_off < 0)
+ return str_off;
+
+ sec->sec_idx = elf_ndxscn(sec->scn);
+ linker->elf_hdr->e_shstrndx = sec->sec_idx;
+ linker->strtab_sec_idx = sec->sec_idx;
+
+ sec->shdr->sh_name = str_off;
+ sec->shdr->sh_type = SHT_STRTAB;
+ sec->shdr->sh_flags = SHF_STRINGS;
+ sec->shdr->sh_offset = 0;
+ sec->shdr->sh_link = 0;
+ sec->shdr->sh_info = 0;
+ sec->shdr->sh_addralign = 1;
+ sec->shdr->sh_size = sec->sec_sz = 0;
+ sec->shdr->sh_entsize = 0;
+
+ /* SYMTAB */
+ sec = add_dst_sec(linker, ".symtab");
+ if (!sec)
+ return -ENOMEM;
+
+ sec->scn = elf_newscn(linker->elf);
+ if (!sec->scn) {
+ pr_warn_elf("failed to create SYMTAB section");
+ return -EINVAL;
+ }
+
+ sec->shdr = elf64_getshdr(sec->scn);
+ if (!sec->shdr)
+ return -EINVAL;
+
+ sec->data = elf_newdata(sec->scn);
+ if (!sec->data) {
+ pr_warn_elf("failed to create SYMTAB data");
+ return -EINVAL;
+ }
+
+ str_off = strset__add_str(linker->strtab_strs, sec->sec_name);
+ if (str_off < 0)
+ return str_off;
+
+ sec->sec_idx = elf_ndxscn(sec->scn);
+ linker->symtab_sec_idx = sec->sec_idx;
+
+ sec->shdr->sh_name = str_off;
+ sec->shdr->sh_type = SHT_SYMTAB;
+ sec->shdr->sh_flags = 0;
+ sec->shdr->sh_offset = 0;
+ sec->shdr->sh_link = linker->strtab_sec_idx;
+ /* sh_info should be one greater than the index of the last local
+ * symbol (i.e., binding is STB_LOCAL). But why and who cares?
+ */
+ sec->shdr->sh_info = 0;
+ sec->shdr->sh_addralign = 8;
+ sec->shdr->sh_entsize = sizeof(Elf64_Sym);
+
+ /* .BTF */
+ linker->btf = btf__new_empty();
+ err = libbpf_get_error(linker->btf);
+ if (err)
+ return err;
+
+ /* add the special all-zero symbol */
+ init_sym = add_new_sym(linker, NULL);
+ if (!init_sym)
+ return -EINVAL;
+
+ init_sym->st_name = 0;
+ init_sym->st_info = 0;
+ init_sym->st_other = 0;
+ init_sym->st_shndx = SHN_UNDEF;
+ init_sym->st_value = 0;
+ init_sym->st_size = 0;
+
+ return 0;
+}
+
+int bpf_linker__add_file(struct bpf_linker *linker, const char *filename)
+{
+ struct src_obj obj = {};
+ int err = 0;
+
+ if (!linker->elf)
+ return -EINVAL;
+
+ err = err ?: linker_load_obj_file(linker, filename, &obj);
+ err = err ?: linker_append_sec_data(linker, &obj);
+ err = err ?: linker_append_elf_syms(linker, &obj);
+ err = err ?: linker_append_elf_relos(linker, &obj);
+ err = err ?: linker_append_btf(linker, &obj);
+ err = err ?: linker_append_btf_ext(linker, &obj);
+
+ /* free up src_obj resources */
+ free(obj.btf_type_map);
+ btf__free(obj.btf);
+ btf_ext__free(obj.btf_ext);
+ free(obj.secs);
+ free(obj.sym_map);
+ if (obj.elf)
+ elf_end(obj.elf);
+ if (obj.fd >= 0)
+ close(obj.fd);
+
+ return err;
+}
+
+static bool is_dwarf_sec_name(const char *name)
+{
+ /* approximation, but the actual list is too long */
+ return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0;
+}
+
+static bool is_ignored_sec(struct src_sec *sec)
+{
+ Elf64_Shdr *shdr = sec->shdr;
+ const char *name = sec->sec_name;
+
+ /* no special handling of .strtab */
+ if (shdr->sh_type == SHT_STRTAB)
+ return true;
+
+ /* ignore .llvm_addrsig section as well */
+ if (shdr->sh_type == SHT_LLVM_ADDRSIG)
+ return true;
+
+ /* no subprograms will lead to an empty .text section, ignore it */
+ if (shdr->sh_type == SHT_PROGBITS && shdr->sh_size == 0 &&
+ strcmp(sec->sec_name, ".text") == 0)
+ return true;
+
+ /* DWARF sections */
+ if (is_dwarf_sec_name(sec->sec_name))
+ return true;
+
+ if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) {
+ name += sizeof(".rel") - 1;
+ /* DWARF section relocations */
+ if (is_dwarf_sec_name(name))
+ return true;
+
+ /* .BTF and .BTF.ext don't need relocations */
+ if (strcmp(name, BTF_ELF_SEC) == 0 ||
+ strcmp(name, BTF_EXT_ELF_SEC) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+static struct src_sec *add_src_sec(struct src_obj *obj, const char *sec_name)
+{
+ struct src_sec *secs = obj->secs, *sec;
+ size_t new_cnt = obj->sec_cnt ? obj->sec_cnt + 1 : 2;
+
+ secs = libbpf_reallocarray(secs, new_cnt, sizeof(*secs));
+ if (!secs)
+ return NULL;
+
+ /* zero out newly allocated memory */
+ memset(secs + obj->sec_cnt, 0, (new_cnt - obj->sec_cnt) * sizeof(*secs));
+
+ obj->secs = secs;
+ obj->sec_cnt = new_cnt;
+
+ sec = &obj->secs[new_cnt - 1];
+ sec->id = new_cnt - 1;
+ sec->sec_name = sec_name;
+
+ return sec;
+}
+
+static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, struct src_obj *obj)
+{
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ const int host_endianness = ELFDATA2LSB;
+#elif __BYTE_ORDER == __BIG_ENDIAN
+ const int host_endianness = ELFDATA2MSB;
+#else
+#error "Unknown __BYTE_ORDER"
+#endif
+ int err = 0;
+ Elf_Scn *scn;
+ Elf_Data *data;
+ Elf64_Ehdr *ehdr;
+ Elf64_Shdr *shdr;
+ struct src_sec *sec;
+
+ pr_debug("linker: adding object file '%s'...\n", filename);
+
+ obj->filename = filename;
+
+ obj->fd = open(filename, O_RDONLY);
+ if (obj->fd < 0) {
+ err = -errno;
+ pr_warn("failed to open file '%s': %d\n", filename, err);
+ return err;
+ }
+ obj->elf = elf_begin(obj->fd, ELF_C_READ_MMAP, NULL);
+ if (!obj->elf) {
+ err = -errno;
+ pr_warn_elf("failed to parse ELF file '%s'", filename);
+ return err;
+ }
+
+ /* Sanity check ELF file high-level properties */
+ ehdr = elf64_getehdr(obj->elf);
+ if (!ehdr) {
+ err = -errno;
+ pr_warn_elf("failed to get ELF header for %s", filename);
+ return err;
+ }
+ if (ehdr->e_ident[EI_DATA] != host_endianness) {
+ err = -EOPNOTSUPP;
+ pr_warn_elf("unsupported byte order of ELF file %s", filename);
+ return err;
+ }
+ if (ehdr->e_type != ET_REL
+ || ehdr->e_machine != EM_BPF
+ || ehdr->e_ident[EI_CLASS] != ELFCLASS64) {
+ err = -EOPNOTSUPP;
+ pr_warn_elf("unsupported kind of ELF file %s", filename);
+ return err;
+ }
+
+ if (elf_getshdrstrndx(obj->elf, &obj->shstrs_sec_idx)) {
+ err = -errno;
+ pr_warn_elf("failed to get SHSTRTAB section index for %s", filename);
+ return err;
+ }
+
+ scn = NULL;
+ while ((scn = elf_nextscn(obj->elf, scn)) != NULL) {
+ size_t sec_idx = elf_ndxscn(scn);
+ const char *sec_name;
+
+ shdr = elf64_getshdr(scn);
+ if (!shdr) {
+ err = -errno;
+ pr_warn_elf("failed to get section #%zu header for %s",
+ sec_idx, filename);
+ return err;
+ }
+
+ sec_name = elf_strptr(obj->elf, obj->shstrs_sec_idx, shdr->sh_name);
+ if (!sec_name) {
+ err = -errno;
+ pr_warn_elf("failed to get section #%zu name for %s",
+ sec_idx, filename);
+ return err;
+ }
+
+ data = elf_getdata(scn, 0);
+ if (!data) {
+ err = -errno;
+ pr_warn_elf("failed to get section #%zu (%s) data from %s",
+ sec_idx, sec_name, filename);
+ return err;
+ }
+
+ sec = add_src_sec(obj, sec_name);
+ if (!sec)
+ return -ENOMEM;
+
+ sec->scn = scn;
+ sec->shdr = shdr;
+ sec->data = data;
+ sec->sec_idx = elf_ndxscn(scn);
+
+ if (is_ignored_sec(sec)) {
+ sec->skipped = true;
+ continue;
+ }
+
+ switch (shdr->sh_type) {
+ case SHT_SYMTAB:
+ if (obj->symtab_sec_idx) {
+ err = -EOPNOTSUPP;
+ pr_warn("multiple SYMTAB sections found, not supported\n");
+ return err;
+ }
+ obj->symtab_sec_idx = sec_idx;
+ break;
+ case SHT_STRTAB:
+ /* we'll construct our own string table */
+ break;
+ case SHT_PROGBITS:
+ if (strcmp(sec_name, BTF_ELF_SEC) == 0) {
+ obj->btf = btf__new(data->d_buf, shdr->sh_size);
+ err = libbpf_get_error(obj->btf);
+ if (err) {
+ pr_warn("failed to parse .BTF from %s: %d\n", filename, err);
+ return err;
+ }
+ sec->skipped = true;
+ continue;
+ }
+ if (strcmp(sec_name, BTF_EXT_ELF_SEC) == 0) {
+ obj->btf_ext = btf_ext__new(data->d_buf, shdr->sh_size);
+ err = libbpf_get_error(obj->btf_ext);
+ if (err) {
+ pr_warn("failed to parse .BTF.ext from '%s': %d\n", filename, err);
+ return err;
+ }
+ sec->skipped = true;
+ continue;
+ }
+
+ /* data & code */
+ break;
+ case SHT_NOBITS:
+ /* BSS */
+ break;
+ case SHT_REL:
+ /* relocations */
+ break;
+ default:
+ pr_warn("unrecognized section #%zu (%s) in %s\n",
+ sec_idx, sec_name, filename);
+ err = -EINVAL;
+ return err;
+ }
+ }
+
+ err = err ?: linker_sanity_check_elf(obj);
+ err = err ?: linker_sanity_check_btf(obj);
+ err = err ?: linker_sanity_check_btf_ext(obj);
+ err = err ?: linker_fixup_btf(obj);
+
+ return err;
+}
+
+static bool is_pow_of_2(size_t x)
+{
+ return x && (x & (x - 1)) == 0;
+}
+
+static int linker_sanity_check_elf(struct src_obj *obj)
+{
+ struct src_sec *sec;
+ int i, err;
+
+ if (!obj->symtab_sec_idx) {
+ pr_warn("ELF is missing SYMTAB section in %s\n", obj->filename);
+ return -EINVAL;
+ }
+ if (!obj->shstrs_sec_idx) {
+ pr_warn("ELF is missing section headers STRTAB section in %s\n", obj->filename);
+ return -EINVAL;
+ }
+
+ for (i = 1; i < obj->sec_cnt; i++) {
+ sec = &obj->secs[i];
+
+ if (sec->sec_name[0] == '\0') {
+ pr_warn("ELF section #%zu has empty name in %s\n", sec->sec_idx, obj->filename);
+ return -EINVAL;
+ }
+
+ if (sec->shdr->sh_addralign && !is_pow_of_2(sec->shdr->sh_addralign))
+ return -EINVAL;
+ if (sec->shdr->sh_addralign != sec->data->d_align)
+ return -EINVAL;
+
+ if (sec->shdr->sh_size != sec->data->d_size)
+ return -EINVAL;
+
+ switch (sec->shdr->sh_type) {
+ case SHT_SYMTAB:
+ err = linker_sanity_check_elf_symtab(obj, sec);
+ if (err)
+ return err;
+ break;
+ case SHT_STRTAB:
+ break;
+ case SHT_PROGBITS:
+ if (sec->shdr->sh_flags & SHF_EXECINSTR) {
+ if (sec->shdr->sh_size % sizeof(struct bpf_insn) != 0)
+ return -EINVAL;
+ }
+ break;
+ case SHT_NOBITS:
+ break;
+ case SHT_REL:
+ err = linker_sanity_check_elf_relos(obj, sec);
+ if (err)
+ return err;
+ break;
+ case SHT_LLVM_ADDRSIG:
+ break;
+ default:
+ pr_warn("ELF section #%zu (%s) has unrecognized type %zu in %s\n",
+ sec->sec_idx, sec->sec_name, (size_t)sec->shdr->sh_type, obj->filename);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int linker_sanity_check_elf_symtab(struct src_obj *obj, struct src_sec *sec)
+{
+ struct src_sec *link_sec;
+ Elf64_Sym *sym;
+ int i, n;
+
+ if (sec->shdr->sh_entsize != sizeof(Elf64_Sym))
+ return -EINVAL;
+ if (sec->shdr->sh_size % sec->shdr->sh_entsize != 0)
+ return -EINVAL;
+
+ if (!sec->shdr->sh_link || sec->shdr->sh_link >= obj->sec_cnt) {
+ pr_warn("ELF SYMTAB section #%zu points to missing STRTAB section #%zu in %s\n",
+ sec->sec_idx, (size_t)sec->shdr->sh_link, obj->filename);
+ return -EINVAL;
+ }
+ link_sec = &obj->secs[sec->shdr->sh_link];
+ if (link_sec->shdr->sh_type != SHT_STRTAB) {
+ pr_warn("ELF SYMTAB section #%zu points to invalid STRTAB section #%zu in %s\n",
+ sec->sec_idx, (size_t)sec->shdr->sh_link, obj->filename);
+ return -EINVAL;
+ }
+
+ n = sec->shdr->sh_size / sec->shdr->sh_entsize;
+ sym = sec->data->d_buf;
+ for (i = 0; i < n; i++, sym++) {
+ int sym_type = ELF64_ST_TYPE(sym->st_info);
+ int sym_bind = ELF64_ST_BIND(sym->st_info);
+ int sym_vis = ELF64_ST_VISIBILITY(sym->st_other);
+
+ if (i == 0) {
+ if (sym->st_name != 0 || sym->st_info != 0
+ || sym->st_other != 0 || sym->st_shndx != 0
+ || sym->st_value != 0 || sym->st_size != 0) {
+ pr_warn("ELF sym #0 is invalid in %s\n", obj->filename);
+ return -EINVAL;
+ }
+ continue;
+ }
+ if (sym_bind != STB_LOCAL && sym_bind != STB_GLOBAL && sym_bind != STB_WEAK) {
+ pr_warn("ELF sym #%d in section #%zu has unsupported symbol binding %d\n",
+ i, sec->sec_idx, sym_bind);
+ return -EINVAL;
+ }
+ if (sym_vis != STV_DEFAULT && sym_vis != STV_HIDDEN) {
+ pr_warn("ELF sym #%d in section #%zu has unsupported symbol visibility %d\n",
+ i, sec->sec_idx, sym_vis);
+ return -EINVAL;
+ }
+ if (sym->st_shndx == 0) {
+ if (sym_type != STT_NOTYPE || sym_bind == STB_LOCAL
+ || sym->st_value != 0 || sym->st_size != 0) {
+ pr_warn("ELF sym #%d is invalid extern symbol in %s\n",
+ i, obj->filename);
+
+ return -EINVAL;
+ }
+ continue;
+ }
+ if (sym->st_shndx < SHN_LORESERVE && sym->st_shndx >= obj->sec_cnt) {
+ pr_warn("ELF sym #%d in section #%zu points to missing section #%zu in %s\n",
+ i, sec->sec_idx, (size_t)sym->st_shndx, obj->filename);
+ return -EINVAL;
+ }
+ if (sym_type == STT_SECTION) {
+ if (sym->st_value != 0)
+ return -EINVAL;
+ continue;
+ }
+ }
+
+ return 0;
+}
+
+static int linker_sanity_check_elf_relos(struct src_obj *obj, struct src_sec *sec)
+{
+ struct src_sec *link_sec, *sym_sec;
+ Elf64_Rel *relo;
+ int i, n;
+
+ if (sec->shdr->sh_entsize != sizeof(Elf64_Rel))
+ return -EINVAL;
+ if (sec->shdr->sh_size % sec->shdr->sh_entsize != 0)
+ return -EINVAL;
+
+ /* SHT_REL's sh_link should point to SYMTAB */
+ if (sec->shdr->sh_link != obj->symtab_sec_idx) {
+ pr_warn("ELF relo section #%zu points to invalid SYMTAB section #%zu in %s\n",
+ sec->sec_idx, (size_t)sec->shdr->sh_link, obj->filename);
+ return -EINVAL;
+ }
+
+ /* SHT_REL's sh_info points to relocated section */
+ if (!sec->shdr->sh_info || sec->shdr->sh_info >= obj->sec_cnt) {
+ pr_warn("ELF relo section #%zu points to missing section #%zu in %s\n",
+ sec->sec_idx, (size_t)sec->shdr->sh_info, obj->filename);
+ return -EINVAL;
+ }
+ link_sec = &obj->secs[sec->shdr->sh_info];
+
+ /* .rel<secname> -> <secname> pattern is followed */
+ if (strncmp(sec->sec_name, ".rel", sizeof(".rel") - 1) != 0
+ || strcmp(sec->sec_name + sizeof(".rel") - 1, link_sec->sec_name) != 0) {
+ pr_warn("ELF relo section #%zu name has invalid name in %s\n",
+ sec->sec_idx, obj->filename);
+ return -EINVAL;
+ }
+
+ /* don't further validate relocations for ignored sections */
+ if (link_sec->skipped)
+ return 0;
+
+ /* relocatable section is data or instructions */
+ if (link_sec->shdr->sh_type != SHT_PROGBITS && link_sec->shdr->sh_type != SHT_NOBITS) {
+ pr_warn("ELF relo section #%zu points to invalid section #%zu in %s\n",
+ sec->sec_idx, (size_t)sec->shdr->sh_info, obj->filename);
+ return -EINVAL;
+ }
+
+ /* check sanity of each relocation */
+ n = sec->shdr->sh_size / sec->shdr->sh_entsize;
+ relo = sec->data->d_buf;
+ sym_sec = &obj->secs[obj->symtab_sec_idx];
+ for (i = 0; i < n; i++, relo++) {
+ size_t sym_idx = ELF64_R_SYM(relo->r_info);
+ size_t sym_type = ELF64_R_TYPE(relo->r_info);
+
+ if (sym_type != R_BPF_64_64 && sym_type != R_BPF_64_32) {
+ pr_warn("ELF relo #%d in section #%zu has unexpected type %zu in %s\n",
+ i, sec->sec_idx, sym_type, obj->filename);
+ return -EINVAL;
+ }
+
+ if (!sym_idx || sym_idx * sizeof(Elf64_Sym) >= sym_sec->shdr->sh_size) {
+ pr_warn("ELF relo #%d in section #%zu points to invalid symbol #%zu in %s\n",
+ i, sec->sec_idx, sym_idx, obj->filename);
+ return -EINVAL;
+ }
+
+ if (link_sec->shdr->sh_flags & SHF_EXECINSTR) {
+ if (relo->r_offset % sizeof(struct bpf_insn) != 0) {
+ pr_warn("ELF relo #%d in section #%zu points to missing symbol #%zu in %s\n",
+ i, sec->sec_idx, sym_idx, obj->filename);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int check_btf_type_id(__u32 *type_id, void *ctx)
+{
+ struct btf *btf = ctx;
+
+ if (*type_id > btf__get_nr_types(btf))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int check_btf_str_off(__u32 *str_off, void *ctx)
+{
+ struct btf *btf = ctx;
+ const char *s;
+
+ s = btf__str_by_offset(btf, *str_off);
+
+ if (!s)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int linker_sanity_check_btf(struct src_obj *obj)
+{
+ struct btf_type *t;
+ int i, n, err = 0;
+
+ if (!obj->btf)
+ return 0;
+
+ n = btf__get_nr_types(obj->btf);
+ for (i = 1; i <= n; i++) {
+ t = btf_type_by_id(obj->btf, i);
+
+ err = err ?: btf_type_visit_type_ids(t, check_btf_type_id, obj->btf);
+ err = err ?: btf_type_visit_str_offs(t, check_btf_str_off, obj->btf);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int linker_sanity_check_btf_ext(struct src_obj *obj)
+{
+ int err = 0;
+
+ if (!obj->btf_ext)
+ return 0;
+
+ /* can't use .BTF.ext without .BTF */
+ if (!obj->btf)
+ return -EINVAL;
+
+ err = err ?: btf_ext_visit_type_ids(obj->btf_ext, check_btf_type_id, obj->btf);
+ err = err ?: btf_ext_visit_str_offs(obj->btf_ext, check_btf_str_off, obj->btf);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int init_sec(struct bpf_linker *linker, struct dst_sec *dst_sec, struct src_sec *src_sec)
+{
+ Elf_Scn *scn;
+ Elf_Data *data;
+ Elf64_Shdr *shdr;
+ int name_off;
+
+ dst_sec->sec_sz = 0;
+ dst_sec->sec_idx = 0;
+ dst_sec->ephemeral = src_sec->ephemeral;
+
+ /* ephemeral sections are just thin section shells lacking most parts */
+ if (src_sec->ephemeral)
+ return 0;
+
+ scn = elf_newscn(linker->elf);
+ if (!scn)
+ return -ENOMEM;
+ data = elf_newdata(scn);
+ if (!data)
+ return -ENOMEM;
+ shdr = elf64_getshdr(scn);
+ if (!shdr)
+ return -ENOMEM;
+
+ dst_sec->scn = scn;
+ dst_sec->shdr = shdr;
+ dst_sec->data = data;
+ dst_sec->sec_idx = elf_ndxscn(scn);
+
+ name_off = strset__add_str(linker->strtab_strs, src_sec->sec_name);
+ if (name_off < 0)
+ return name_off;
+
+ shdr->sh_name = name_off;
+ shdr->sh_type = src_sec->shdr->sh_type;
+ shdr->sh_flags = src_sec->shdr->sh_flags;
+ shdr->sh_size = 0;
+ /* sh_link and sh_info have different meaning for different types of
+ * sections, so we leave it up to the caller code to fill them in, if
+ * necessary
+ */
+ shdr->sh_link = 0;
+ shdr->sh_info = 0;
+ shdr->sh_addralign = src_sec->shdr->sh_addralign;
+ shdr->sh_entsize = src_sec->shdr->sh_entsize;
+
+ data->d_type = src_sec->data->d_type;
+ data->d_size = 0;
+ data->d_buf = NULL;
+ data->d_align = src_sec->data->d_align;
+ data->d_off = 0;
+
+ return 0;
+}
+
+static struct dst_sec *find_dst_sec_by_name(struct bpf_linker *linker, const char *sec_name)
+{
+ struct dst_sec *sec;
+ int i;
+
+ for (i = 1; i < linker->sec_cnt; i++) {
+ sec = &linker->secs[i];
+
+ if (strcmp(sec->sec_name, sec_name) == 0)
+ return sec;
+ }
+
+ return NULL;
+}
+
+static bool secs_match(struct dst_sec *dst, struct src_sec *src)
+{
+ if (dst->ephemeral || src->ephemeral)
+ return true;
+
+ if (dst->shdr->sh_type != src->shdr->sh_type) {
+ pr_warn("sec %s types mismatch\n", dst->sec_name);
+ return false;
+ }
+ if (dst->shdr->sh_flags != src->shdr->sh_flags) {
+ pr_warn("sec %s flags mismatch\n", dst->sec_name);
+ return false;
+ }
+ if (dst->shdr->sh_entsize != src->shdr->sh_entsize) {
+ pr_warn("sec %s entsize mismatch\n", dst->sec_name);
+ return false;
+ }
+
+ return true;
+}
+
+static bool sec_content_is_same(struct dst_sec *dst_sec, struct src_sec *src_sec)
+{
+ if (dst_sec->sec_sz != src_sec->shdr->sh_size)
+ return false;
+ if (memcmp(dst_sec->raw_data, src_sec->data->d_buf, dst_sec->sec_sz) != 0)
+ return false;
+ return true;
+}
+
+static int extend_sec(struct bpf_linker *linker, struct dst_sec *dst, struct src_sec *src)
+{
+ void *tmp;
+ size_t dst_align, src_align;
+ size_t dst_align_sz, dst_final_sz;
+ int err;
+
+ /* Ephemeral source section doesn't contribute anything to ELF
+ * section data.
+ */
+ if (src->ephemeral)
+ return 0;
+
+ /* Some sections (like .maps) can contain both externs (and thus be
+ * ephemeral) and non-externs (map definitions). So it's possible that
+ * it has to be "upgraded" from ephemeral to non-ephemeral when the
+ * first non-ephemeral entity appears. In such case, we add ELF
+ * section, data, etc.
+ */
+ if (dst->ephemeral) {
+ err = init_sec(linker, dst, src);
+ if (err)
+ return err;
+ }
+
+ dst_align = dst->shdr->sh_addralign;
+ src_align = src->shdr->sh_addralign;
+ if (dst_align == 0)
+ dst_align = 1;
+ if (dst_align < src_align)
+ dst_align = src_align;
+
+ dst_align_sz = (dst->sec_sz + dst_align - 1) / dst_align * dst_align;
+
+ /* no need to re-align final size */
+ dst_final_sz = dst_align_sz + src->shdr->sh_size;
+
+ if (src->shdr->sh_type != SHT_NOBITS) {
+ tmp = realloc(dst->raw_data, dst_final_sz);
+ if (!tmp)
+ return -ENOMEM;
+ dst->raw_data = tmp;
+
+ /* pad dst section, if it's alignment forced size increase */
+ memset(dst->raw_data + dst->sec_sz, 0, dst_align_sz - dst->sec_sz);
+ /* now copy src data at a properly aligned offset */
+ memcpy(dst->raw_data + dst_align_sz, src->data->d_buf, src->shdr->sh_size);
+ }
+
+ dst->sec_sz = dst_final_sz;
+ dst->shdr->sh_size = dst_final_sz;
+ dst->data->d_size = dst_final_sz;
+
+ dst->shdr->sh_addralign = dst_align;
+ dst->data->d_align = dst_align;
+
+ src->dst_off = dst_align_sz;
+
+ return 0;
+}
+
+static bool is_data_sec(struct src_sec *sec)
+{
+ if (!sec || sec->skipped)
+ return false;
+ /* ephemeral sections are data sections, e.g., .kconfig, .ksyms */
+ if (sec->ephemeral)
+ return true;
+ return sec->shdr->sh_type == SHT_PROGBITS || sec->shdr->sh_type == SHT_NOBITS;
+}
+
+static bool is_relo_sec(struct src_sec *sec)
+{
+ if (!sec || sec->skipped || sec->ephemeral)
+ return false;
+ return sec->shdr->sh_type == SHT_REL;
+}
+
+static int linker_append_sec_data(struct bpf_linker *linker, struct src_obj *obj)
+{
+ int i, err;
+
+ for (i = 1; i < obj->sec_cnt; i++) {
+ struct src_sec *src_sec;
+ struct dst_sec *dst_sec;
+
+ src_sec = &obj->secs[i];
+ if (!is_data_sec(src_sec))
+ continue;
+
+ dst_sec = find_dst_sec_by_name(linker, src_sec->sec_name);
+ if (!dst_sec) {
+ dst_sec = add_dst_sec(linker, src_sec->sec_name);
+ if (!dst_sec)
+ return -ENOMEM;
+ err = init_sec(linker, dst_sec, src_sec);
+ if (err) {
+ pr_warn("failed to init section '%s'\n", src_sec->sec_name);
+ return err;
+ }
+ } else {
+ if (!secs_match(dst_sec, src_sec)) {
+ pr_warn("ELF sections %s are incompatible\n", src_sec->sec_name);
+ return -1;
+ }
+
+ /* "license" and "version" sections are deduped */
+ if (strcmp(src_sec->sec_name, "license") == 0
+ || strcmp(src_sec->sec_name, "version") == 0) {
+ if (!sec_content_is_same(dst_sec, src_sec)) {
+ pr_warn("non-identical contents of section '%s' are not supported\n", src_sec->sec_name);
+ return -EINVAL;
+ }
+ src_sec->skipped = true;
+ src_sec->dst_id = dst_sec->id;
+ continue;
+ }
+ }
+
+ /* record mapped section index */
+ src_sec->dst_id = dst_sec->id;
+
+ err = extend_sec(linker, dst_sec, src_sec);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int linker_append_elf_syms(struct bpf_linker *linker, struct src_obj *obj)
+{
+ struct src_sec *symtab = &obj->secs[obj->symtab_sec_idx];
+ Elf64_Sym *sym = symtab->data->d_buf;
+ int i, n = symtab->shdr->sh_size / symtab->shdr->sh_entsize, err;
+ int str_sec_idx = symtab->shdr->sh_link;
+ const char *sym_name;
+
+ obj->sym_map = calloc(n + 1, sizeof(*obj->sym_map));
+ if (!obj->sym_map)
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++, sym++) {
+ /* We already validated all-zero symbol #0 and we already
+ * appended it preventively to the final SYMTAB, so skip it.
+ */
+ if (i == 0)
+ continue;
+
+ sym_name = elf_strptr(obj->elf, str_sec_idx, sym->st_name);
+ if (!sym_name) {
+ pr_warn("can't fetch symbol name for symbol #%d in '%s'\n", i, obj->filename);
+ return -EINVAL;
+ }
+
+ err = linker_append_elf_sym(linker, obj, sym, sym_name, i);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static Elf64_Sym *get_sym_by_idx(struct bpf_linker *linker, size_t sym_idx)
+{
+ struct dst_sec *symtab = &linker->secs[linker->symtab_sec_idx];
+ Elf64_Sym *syms = symtab->raw_data;
+
+ return &syms[sym_idx];
+}
+
+static struct glob_sym *find_glob_sym(struct bpf_linker *linker, const char *sym_name)
+{
+ struct glob_sym *glob_sym;
+ const char *name;
+ int i;
+
+ for (i = 0; i < linker->glob_sym_cnt; i++) {
+ glob_sym = &linker->glob_syms[i];
+ name = strset__data(linker->strtab_strs) + glob_sym->name_off;
+
+ if (strcmp(name, sym_name) == 0)
+ return glob_sym;
+ }
+
+ return NULL;
+}
+
+static struct glob_sym *add_glob_sym(struct bpf_linker *linker)
+{
+ struct glob_sym *syms, *sym;
+
+ syms = libbpf_reallocarray(linker->glob_syms, linker->glob_sym_cnt + 1,
+ sizeof(*linker->glob_syms));
+ if (!syms)
+ return NULL;
+
+ sym = &syms[linker->glob_sym_cnt];
+ memset(sym, 0, sizeof(*sym));
+ sym->var_idx = -1;
+
+ linker->glob_syms = syms;
+ linker->glob_sym_cnt++;
+
+ return sym;
+}
+
+static bool glob_sym_btf_matches(const char *sym_name, bool exact,
+ const struct btf *btf1, __u32 id1,
+ const struct btf *btf2, __u32 id2)
+{
+ const struct btf_type *t1, *t2;
+ bool is_static1, is_static2;
+ const char *n1, *n2;
+ int i, n;
+
+recur:
+ n1 = n2 = NULL;
+ t1 = skip_mods_and_typedefs(btf1, id1, &id1);
+ t2 = skip_mods_and_typedefs(btf2, id2, &id2);
+
+ /* check if only one side is FWD, otherwise handle with common logic */
+ if (!exact && btf_is_fwd(t1) != btf_is_fwd(t2)) {
+ n1 = btf__str_by_offset(btf1, t1->name_off);
+ n2 = btf__str_by_offset(btf2, t2->name_off);
+ if (strcmp(n1, n2) != 0) {
+ pr_warn("global '%s': incompatible forward declaration names '%s' and '%s'\n",
+ sym_name, n1, n2);
+ return false;
+ }
+ /* validate if FWD kind matches concrete kind */
+ if (btf_is_fwd(t1)) {
+ if (btf_kflag(t1) && btf_is_union(t2))
+ return true;
+ if (!btf_kflag(t1) && btf_is_struct(t2))
+ return true;
+ pr_warn("global '%s': incompatible %s forward declaration and concrete kind %s\n",
+ sym_name, btf_kflag(t1) ? "union" : "struct", btf_kind_str(t2));
+ } else {
+ if (btf_kflag(t2) && btf_is_union(t1))
+ return true;
+ if (!btf_kflag(t2) && btf_is_struct(t1))
+ return true;
+ pr_warn("global '%s': incompatible %s forward declaration and concrete kind %s\n",
+ sym_name, btf_kflag(t2) ? "union" : "struct", btf_kind_str(t1));
+ }
+ return false;
+ }
+
+ if (btf_kind(t1) != btf_kind(t2)) {
+ pr_warn("global '%s': incompatible BTF kinds %s and %s\n",
+ sym_name, btf_kind_str(t1), btf_kind_str(t2));
+ return false;
+ }
+
+ switch (btf_kind(t1)) {
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_FWD:
+ case BTF_KIND_FUNC:
+ case BTF_KIND_VAR:
+ n1 = btf__str_by_offset(btf1, t1->name_off);
+ n2 = btf__str_by_offset(btf2, t2->name_off);
+ if (strcmp(n1, n2) != 0) {
+ pr_warn("global '%s': incompatible %s names '%s' and '%s'\n",
+ sym_name, btf_kind_str(t1), n1, n2);
+ return false;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (btf_kind(t1)) {
+ case BTF_KIND_UNKN: /* void */
+ case BTF_KIND_FWD:
+ return true;
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_ENUM:
+ /* ignore encoding for int and enum values for enum */
+ if (t1->size != t2->size) {
+ pr_warn("global '%s': incompatible %s '%s' size %u and %u\n",
+ sym_name, btf_kind_str(t1), n1, t1->size, t2->size);
+ return false;
+ }
+ return true;
+ case BTF_KIND_PTR:
+ /* just validate overall shape of the referenced type, so no
+ * contents comparison for struct/union, and allowd fwd vs
+ * struct/union
+ */
+ exact = false;
+ id1 = t1->type;
+ id2 = t2->type;
+ goto recur;
+ case BTF_KIND_ARRAY:
+ /* ignore index type and array size */
+ id1 = btf_array(t1)->type;
+ id2 = btf_array(t2)->type;
+ goto recur;
+ case BTF_KIND_FUNC:
+ /* extern and global linkages are compatible */
+ is_static1 = btf_func_linkage(t1) == BTF_FUNC_STATIC;
+ is_static2 = btf_func_linkage(t2) == BTF_FUNC_STATIC;
+ if (is_static1 != is_static2) {
+ pr_warn("global '%s': incompatible func '%s' linkage\n", sym_name, n1);
+ return false;
+ }
+
+ id1 = t1->type;
+ id2 = t2->type;
+ goto recur;
+ case BTF_KIND_VAR:
+ /* extern and global linkages are compatible */
+ is_static1 = btf_var(t1)->linkage == BTF_VAR_STATIC;
+ is_static2 = btf_var(t2)->linkage == BTF_VAR_STATIC;
+ if (is_static1 != is_static2) {
+ pr_warn("global '%s': incompatible var '%s' linkage\n", sym_name, n1);
+ return false;
+ }
+
+ id1 = t1->type;
+ id2 = t2->type;
+ goto recur;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ const struct btf_member *m1, *m2;
+
+ if (!exact)
+ return true;
+
+ if (btf_vlen(t1) != btf_vlen(t2)) {
+ pr_warn("global '%s': incompatible number of %s fields %u and %u\n",
+ sym_name, btf_kind_str(t1), btf_vlen(t1), btf_vlen(t2));
+ return false;
+ }
+
+ n = btf_vlen(t1);
+ m1 = btf_members(t1);
+ m2 = btf_members(t2);
+ for (i = 0; i < n; i++, m1++, m2++) {
+ n1 = btf__str_by_offset(btf1, m1->name_off);
+ n2 = btf__str_by_offset(btf2, m2->name_off);
+ if (strcmp(n1, n2) != 0) {
+ pr_warn("global '%s': incompatible field #%d names '%s' and '%s'\n",
+ sym_name, i, n1, n2);
+ return false;
+ }
+ if (m1->offset != m2->offset) {
+ pr_warn("global '%s': incompatible field #%d ('%s') offsets\n",
+ sym_name, i, n1);
+ return false;
+ }
+ if (!glob_sym_btf_matches(sym_name, exact, btf1, m1->type, btf2, m2->type))
+ return false;
+ }
+
+ return true;
+ }
+ case BTF_KIND_FUNC_PROTO: {
+ const struct btf_param *m1, *m2;
+
+ if (btf_vlen(t1) != btf_vlen(t2)) {
+ pr_warn("global '%s': incompatible number of %s params %u and %u\n",
+ sym_name, btf_kind_str(t1), btf_vlen(t1), btf_vlen(t2));
+ return false;
+ }
+
+ n = btf_vlen(t1);
+ m1 = btf_params(t1);
+ m2 = btf_params(t2);
+ for (i = 0; i < n; i++, m1++, m2++) {
+ /* ignore func arg names */
+ if (!glob_sym_btf_matches(sym_name, exact, btf1, m1->type, btf2, m2->type))
+ return false;
+ }
+
+ /* now check return type as well */
+ id1 = t1->type;
+ id2 = t2->type;
+ goto recur;
+ }
+
+ /* skip_mods_and_typedefs() make this impossible */
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ /* DATASECs are never compared with each other */
+ case BTF_KIND_DATASEC:
+ default:
+ pr_warn("global '%s': unsupported BTF kind %s\n",
+ sym_name, btf_kind_str(t1));
+ return false;
+ }
+}
+
+static bool map_defs_match(const char *sym_name,
+ const struct btf *main_btf,
+ const struct btf_map_def *main_def,
+ const struct btf_map_def *main_inner_def,
+ const struct btf *extra_btf,
+ const struct btf_map_def *extra_def,
+ const struct btf_map_def *extra_inner_def)
+{
+ const char *reason;
+
+ if (main_def->map_type != extra_def->map_type) {
+ reason = "type";
+ goto mismatch;
+ }
+
+ /* check key type/size match */
+ if (main_def->key_size != extra_def->key_size) {
+ reason = "key_size";
+ goto mismatch;
+ }
+ if (!!main_def->key_type_id != !!extra_def->key_type_id) {
+ reason = "key type";
+ goto mismatch;
+ }
+ if ((main_def->parts & MAP_DEF_KEY_TYPE)
+ && !glob_sym_btf_matches(sym_name, true /*exact*/,
+ main_btf, main_def->key_type_id,
+ extra_btf, extra_def->key_type_id)) {
+ reason = "key type";
+ goto mismatch;
+ }
+
+ /* validate value type/size match */
+ if (main_def->value_size != extra_def->value_size) {
+ reason = "value_size";
+ goto mismatch;
+ }
+ if (!!main_def->value_type_id != !!extra_def->value_type_id) {
+ reason = "value type";
+ goto mismatch;
+ }
+ if ((main_def->parts & MAP_DEF_VALUE_TYPE)
+ && !glob_sym_btf_matches(sym_name, true /*exact*/,
+ main_btf, main_def->value_type_id,
+ extra_btf, extra_def->value_type_id)) {
+ reason = "key type";
+ goto mismatch;
+ }
+
+ if (main_def->max_entries != extra_def->max_entries) {
+ reason = "max_entries";
+ goto mismatch;
+ }
+ if (main_def->map_flags != extra_def->map_flags) {
+ reason = "map_flags";
+ goto mismatch;
+ }
+ if (main_def->numa_node != extra_def->numa_node) {
+ reason = "numa_node";
+ goto mismatch;
+ }
+ if (main_def->pinning != extra_def->pinning) {
+ reason = "pinning";
+ goto mismatch;
+ }
+
+ if ((main_def->parts & MAP_DEF_INNER_MAP) != (extra_def->parts & MAP_DEF_INNER_MAP)) {
+ reason = "inner map";
+ goto mismatch;
+ }
+
+ if (main_def->parts & MAP_DEF_INNER_MAP) {
+ char inner_map_name[128];
+
+ snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", sym_name);
+
+ return map_defs_match(inner_map_name,
+ main_btf, main_inner_def, NULL,
+ extra_btf, extra_inner_def, NULL);
+ }
+
+ return true;
+
+mismatch:
+ pr_warn("global '%s': map %s mismatch\n", sym_name, reason);
+ return false;
+}
+
+static bool glob_map_defs_match(const char *sym_name,
+ struct bpf_linker *linker, struct glob_sym *glob_sym,
+ struct src_obj *obj, Elf64_Sym *sym, int btf_id)
+{
+ struct btf_map_def dst_def = {}, dst_inner_def = {};
+ struct btf_map_def src_def = {}, src_inner_def = {};
+ const struct btf_type *t;
+ int err;
+
+ t = btf__type_by_id(obj->btf, btf_id);
+ if (!btf_is_var(t)) {
+ pr_warn("global '%s': invalid map definition type [%d]\n", sym_name, btf_id);
+ return false;
+ }
+ t = skip_mods_and_typedefs(obj->btf, t->type, NULL);
+
+ err = parse_btf_map_def(sym_name, obj->btf, t, true /*strict*/, &src_def, &src_inner_def);
+ if (err) {
+ pr_warn("global '%s': invalid map definition\n", sym_name);
+ return false;
+ }
+
+ /* re-parse existing map definition */
+ t = btf__type_by_id(linker->btf, glob_sym->btf_id);
+ t = skip_mods_and_typedefs(linker->btf, t->type, NULL);
+ err = parse_btf_map_def(sym_name, linker->btf, t, true /*strict*/, &dst_def, &dst_inner_def);
+ if (err) {
+ /* this should not happen, because we already validated it */
+ pr_warn("global '%s': invalid dst map definition\n", sym_name);
+ return false;
+ }
+
+ /* Currently extern map definition has to be complete and match
+ * concrete map definition exactly. This restriction might be lifted
+ * in the future.
+ */
+ return map_defs_match(sym_name, linker->btf, &dst_def, &dst_inner_def,
+ obj->btf, &src_def, &src_inner_def);
+}
+
+static bool glob_syms_match(const char *sym_name,
+ struct bpf_linker *linker, struct glob_sym *glob_sym,
+ struct src_obj *obj, Elf64_Sym *sym, size_t sym_idx, int btf_id)
+{
+ const struct btf_type *src_t;
+
+ /* if we are dealing with externs, BTF types describing both global
+ * and extern VARs/FUNCs should be completely present in all files
+ */
+ if (!glob_sym->btf_id || !btf_id) {
+ pr_warn("BTF info is missing for global symbol '%s'\n", sym_name);
+ return false;
+ }
+
+ src_t = btf__type_by_id(obj->btf, btf_id);
+ if (!btf_is_var(src_t) && !btf_is_func(src_t)) {
+ pr_warn("only extern variables and functions are supported, but got '%s' for '%s'\n",
+ btf_kind_str(src_t), sym_name);
+ return false;
+ }
+
+ /* deal with .maps definitions specially */
+ if (glob_sym->sec_id && strcmp(linker->secs[glob_sym->sec_id].sec_name, MAPS_ELF_SEC) == 0)
+ return glob_map_defs_match(sym_name, linker, glob_sym, obj, sym, btf_id);
+
+ if (!glob_sym_btf_matches(sym_name, true /*exact*/,
+ linker->btf, glob_sym->btf_id, obj->btf, btf_id))
+ return false;
+
+ return true;
+}
+
+static bool btf_is_non_static(const struct btf_type *t)
+{
+ return (btf_is_var(t) && btf_var(t)->linkage != BTF_VAR_STATIC)
+ || (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_STATIC);
+}
+
+static int find_glob_sym_btf(struct src_obj *obj, Elf64_Sym *sym, const char *sym_name,
+ int *out_btf_sec_id, int *out_btf_id)
+{
+ int i, j, n = btf__get_nr_types(obj->btf), m, btf_id = 0;
+ const struct btf_type *t;
+ const struct btf_var_secinfo *vi;
+ const char *name;
+
+ for (i = 1; i <= n; i++) {
+ t = btf__type_by_id(obj->btf, i);
+
+ /* some global and extern FUNCs and VARs might not be associated with any
+ * DATASEC, so try to detect them in the same pass
+ */
+ if (btf_is_non_static(t)) {
+ name = btf__str_by_offset(obj->btf, t->name_off);
+ if (strcmp(name, sym_name) != 0)
+ continue;
+
+ /* remember and still try to find DATASEC */
+ btf_id = i;
+ continue;
+ }
+
+ if (!btf_is_datasec(t))
+ continue;
+
+ vi = btf_var_secinfos(t);
+ for (j = 0, m = btf_vlen(t); j < m; j++, vi++) {
+ t = btf__type_by_id(obj->btf, vi->type);
+ name = btf__str_by_offset(obj->btf, t->name_off);
+
+ if (strcmp(name, sym_name) != 0)
+ continue;
+ if (btf_is_var(t) && btf_var(t)->linkage == BTF_VAR_STATIC)
+ continue;
+ if (btf_is_func(t) && btf_func_linkage(t) == BTF_FUNC_STATIC)
+ continue;
+
+ if (btf_id && btf_id != vi->type) {
+ pr_warn("global/extern '%s' BTF is ambiguous: both types #%d and #%u match\n",
+ sym_name, btf_id, vi->type);
+ return -EINVAL;
+ }
+
+ *out_btf_sec_id = i;
+ *out_btf_id = vi->type;
+
+ return 0;
+ }
+ }
+
+ /* free-floating extern or global FUNC */
+ if (btf_id) {
+ *out_btf_sec_id = 0;
+ *out_btf_id = btf_id;
+ return 0;
+ }
+
+ pr_warn("failed to find BTF info for global/extern symbol '%s'\n", sym_name);
+ return -ENOENT;
+}
+
+static struct src_sec *find_src_sec_by_name(struct src_obj *obj, const char *sec_name)
+{
+ struct src_sec *sec;
+ int i;
+
+ for (i = 1; i < obj->sec_cnt; i++) {
+ sec = &obj->secs[i];
+
+ if (strcmp(sec->sec_name, sec_name) == 0)
+ return sec;
+ }
+
+ return NULL;
+}
+
+static int complete_extern_btf_info(struct btf *dst_btf, int dst_id,
+ struct btf *src_btf, int src_id)
+{
+ struct btf_type *dst_t = btf_type_by_id(dst_btf, dst_id);
+ struct btf_type *src_t = btf_type_by_id(src_btf, src_id);
+ struct btf_param *src_p, *dst_p;
+ const char *s;
+ int i, n, off;
+
+ /* We already made sure that source and destination types (FUNC or
+ * VAR) match in terms of types and argument names.
+ */
+ if (btf_is_var(dst_t)) {
+ btf_var(dst_t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
+ return 0;
+ }
+
+ dst_t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_GLOBAL, 0);
+
+ /* now onto FUNC_PROTO types */
+ src_t = btf_type_by_id(src_btf, src_t->type);
+ dst_t = btf_type_by_id(dst_btf, dst_t->type);
+
+ /* Fill in all the argument names, which for extern FUNCs are missing.
+ * We'll end up with two copies of FUNCs/VARs for externs, but that
+ * will be taken care of by BTF dedup at the very end.
+ * It might be that BTF types for extern in one file has less/more BTF
+ * information (e.g., FWD instead of full STRUCT/UNION information),
+ * but that should be (in most cases, subject to BTF dedup rules)
+ * handled and resolved by BTF dedup algorithm as well, so we won't
+ * worry about it. Our only job is to make sure that argument names
+ * are populated on both sides, otherwise BTF dedup will pedantically
+ * consider them different.
+ */
+ src_p = btf_params(src_t);
+ dst_p = btf_params(dst_t);
+ for (i = 0, n = btf_vlen(dst_t); i < n; i++, src_p++, dst_p++) {
+ if (!src_p->name_off)
+ continue;
+
+ /* src_btf has more complete info, so add name to dst_btf */
+ s = btf__str_by_offset(src_btf, src_p->name_off);
+ off = btf__add_str(dst_btf, s);
+ if (off < 0)
+ return off;
+ dst_p->name_off = off;
+ }
+ return 0;
+}
+
+static void sym_update_bind(Elf64_Sym *sym, int sym_bind)
+{
+ sym->st_info = ELF64_ST_INFO(sym_bind, ELF64_ST_TYPE(sym->st_info));
+}
+
+static void sym_update_type(Elf64_Sym *sym, int sym_type)
+{
+ sym->st_info = ELF64_ST_INFO(ELF64_ST_BIND(sym->st_info), sym_type);
+}
+
+static void sym_update_visibility(Elf64_Sym *sym, int sym_vis)
+{
+ /* libelf doesn't provide setters for ST_VISIBILITY,
+ * but it is stored in the lower 2 bits of st_other
+ */
+ sym->st_other &= 0x03;
+ sym->st_other |= sym_vis;
+}
+
+static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj,
+ Elf64_Sym *sym, const char *sym_name, int src_sym_idx)
+{
+ struct src_sec *src_sec = NULL;
+ struct dst_sec *dst_sec = NULL;
+ struct glob_sym *glob_sym = NULL;
+ int name_off, sym_type, sym_bind, sym_vis, err;
+ int btf_sec_id = 0, btf_id = 0;
+ size_t dst_sym_idx;
+ Elf64_Sym *dst_sym;
+ bool sym_is_extern;
+
+ sym_type = ELF64_ST_TYPE(sym->st_info);
+ sym_bind = ELF64_ST_BIND(sym->st_info);
+ sym_vis = ELF64_ST_VISIBILITY(sym->st_other);
+ sym_is_extern = sym->st_shndx == SHN_UNDEF;
+
+ if (sym_is_extern) {
+ if (!obj->btf) {
+ pr_warn("externs without BTF info are not supported\n");
+ return -ENOTSUP;
+ }
+ } else if (sym->st_shndx < SHN_LORESERVE) {
+ src_sec = &obj->secs[sym->st_shndx];
+ if (src_sec->skipped)
+ return 0;
+ dst_sec = &linker->secs[src_sec->dst_id];
+
+ /* allow only one STT_SECTION symbol per section */
+ if (sym_type == STT_SECTION && dst_sec->sec_sym_idx) {
+ obj->sym_map[src_sym_idx] = dst_sec->sec_sym_idx;
+ return 0;
+ }
+ }
+
+ if (sym_bind == STB_LOCAL)
+ goto add_sym;
+
+ /* find matching BTF info */
+ err = find_glob_sym_btf(obj, sym, sym_name, &btf_sec_id, &btf_id);
+ if (err)
+ return err;
+
+ if (sym_is_extern && btf_sec_id) {
+ const char *sec_name = NULL;
+ const struct btf_type *t;
+
+ t = btf__type_by_id(obj->btf, btf_sec_id);
+ sec_name = btf__str_by_offset(obj->btf, t->name_off);
+
+ /* Clang puts unannotated extern vars into
+ * '.extern' BTF DATASEC. Treat them the same
+ * as unannotated extern funcs (which are
+ * currently not put into any DATASECs).
+ * Those don't have associated src_sec/dst_sec.
+ */
+ if (strcmp(sec_name, BTF_EXTERN_SEC) != 0) {
+ src_sec = find_src_sec_by_name(obj, sec_name);
+ if (!src_sec) {
+ pr_warn("failed to find matching ELF sec '%s'\n", sec_name);
+ return -ENOENT;
+ }
+ dst_sec = &linker->secs[src_sec->dst_id];
+ }
+ }
+
+ glob_sym = find_glob_sym(linker, sym_name);
+ if (glob_sym) {
+ /* Preventively resolve to existing symbol. This is
+ * needed for further relocation symbol remapping in
+ * the next step of linking.
+ */
+ obj->sym_map[src_sym_idx] = glob_sym->sym_idx;
+
+ /* If both symbols are non-externs, at least one of
+ * them has to be STB_WEAK, otherwise they are in
+ * a conflict with each other.
+ */
+ if (!sym_is_extern && !glob_sym->is_extern
+ && !glob_sym->is_weak && sym_bind != STB_WEAK) {
+ pr_warn("conflicting non-weak symbol #%d (%s) definition in '%s'\n",
+ src_sym_idx, sym_name, obj->filename);
+ return -EINVAL;
+ }
+
+ if (!glob_syms_match(sym_name, linker, glob_sym, obj, sym, src_sym_idx, btf_id))
+ return -EINVAL;
+
+ dst_sym = get_sym_by_idx(linker, glob_sym->sym_idx);
+
+ /* If new symbol is strong, then force dst_sym to be strong as
+ * well; this way a mix of weak and non-weak extern
+ * definitions will end up being strong.
+ */
+ if (sym_bind == STB_GLOBAL) {
+ /* We still need to preserve type (NOTYPE or
+ * OBJECT/FUNC, depending on whether the symbol is
+ * extern or not)
+ */
+ sym_update_bind(dst_sym, STB_GLOBAL);
+ glob_sym->is_weak = false;
+ }
+
+ /* Non-default visibility is "contaminating", with stricter
+ * visibility overwriting more permissive ones, even if more
+ * permissive visibility comes from just an extern definition.
+ * Currently only STV_DEFAULT and STV_HIDDEN are allowed and
+ * ensured by ELF symbol sanity checks above.
+ */
+ if (sym_vis > ELF64_ST_VISIBILITY(dst_sym->st_other))
+ sym_update_visibility(dst_sym, sym_vis);
+
+ /* If the new symbol is extern, then regardless if
+ * existing symbol is extern or resolved global, just
+ * keep the existing one untouched.
+ */
+ if (sym_is_extern)
+ return 0;
+
+ /* If existing symbol is a strong resolved symbol, bail out,
+ * because we lost resolution battle have nothing to
+ * contribute. We already checked abover that there is no
+ * strong-strong conflict. We also already tightened binding
+ * and visibility, so nothing else to contribute at that point.
+ */
+ if (!glob_sym->is_extern && sym_bind == STB_WEAK)
+ return 0;
+
+ /* At this point, new symbol is strong non-extern,
+ * so overwrite glob_sym with new symbol information.
+ * Preserve binding and visibility.
+ */
+ sym_update_type(dst_sym, sym_type);
+ dst_sym->st_shndx = dst_sec->sec_idx;
+ dst_sym->st_value = src_sec->dst_off + sym->st_value;
+ dst_sym->st_size = sym->st_size;
+
+ /* see comment below about dst_sec->id vs dst_sec->sec_idx */
+ glob_sym->sec_id = dst_sec->id;
+ glob_sym->is_extern = false;
+
+ if (complete_extern_btf_info(linker->btf, glob_sym->btf_id,
+ obj->btf, btf_id))
+ return -EINVAL;
+
+ /* request updating VAR's/FUNC's underlying BTF type when appending BTF type */
+ glob_sym->underlying_btf_id = 0;
+
+ obj->sym_map[src_sym_idx] = glob_sym->sym_idx;
+ return 0;
+ }
+
+add_sym:
+ name_off = strset__add_str(linker->strtab_strs, sym_name);
+ if (name_off < 0)
+ return name_off;
+
+ dst_sym = add_new_sym(linker, &dst_sym_idx);
+ if (!dst_sym)
+ return -ENOMEM;
+
+ dst_sym->st_name = name_off;
+ dst_sym->st_info = sym->st_info;
+ dst_sym->st_other = sym->st_other;
+ dst_sym->st_shndx = dst_sec ? dst_sec->sec_idx : sym->st_shndx;
+ dst_sym->st_value = (src_sec ? src_sec->dst_off : 0) + sym->st_value;
+ dst_sym->st_size = sym->st_size;
+
+ obj->sym_map[src_sym_idx] = dst_sym_idx;
+
+ if (sym_type == STT_SECTION && dst_sym) {
+ dst_sec->sec_sym_idx = dst_sym_idx;
+ dst_sym->st_value = 0;
+ }
+
+ if (sym_bind != STB_LOCAL) {
+ glob_sym = add_glob_sym(linker);
+ if (!glob_sym)
+ return -ENOMEM;
+
+ glob_sym->sym_idx = dst_sym_idx;
+ /* we use dst_sec->id (and not dst_sec->sec_idx), because
+ * ephemeral sections (.kconfig, .ksyms, etc) don't have
+ * sec_idx (as they don't have corresponding ELF section), but
+ * still have id. .extern doesn't have even ephemeral section
+ * associated with it, so dst_sec->id == dst_sec->sec_idx == 0.
+ */
+ glob_sym->sec_id = dst_sec ? dst_sec->id : 0;
+ glob_sym->name_off = name_off;
+ /* we will fill btf_id in during BTF merging step */
+ glob_sym->btf_id = 0;
+ glob_sym->is_extern = sym_is_extern;
+ glob_sym->is_weak = sym_bind == STB_WEAK;
+ }
+
+ return 0;
+}
+
+static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *obj)
+{
+ struct src_sec *src_symtab = &obj->secs[obj->symtab_sec_idx];
+ struct dst_sec *dst_symtab = &linker->secs[linker->symtab_sec_idx];
+ int i, err;
+
+ for (i = 1; i < obj->sec_cnt; i++) {
+ struct src_sec *src_sec, *src_linked_sec;
+ struct dst_sec *dst_sec, *dst_linked_sec;
+ Elf64_Rel *src_rel, *dst_rel;
+ int j, n;
+
+ src_sec = &obj->secs[i];
+ if (!is_relo_sec(src_sec))
+ continue;
+
+ /* shdr->sh_info points to relocatable section */
+ src_linked_sec = &obj->secs[src_sec->shdr->sh_info];
+ if (src_linked_sec->skipped)
+ continue;
+
+ dst_sec = find_dst_sec_by_name(linker, src_sec->sec_name);
+ if (!dst_sec) {
+ dst_sec = add_dst_sec(linker, src_sec->sec_name);
+ if (!dst_sec)
+ return -ENOMEM;
+ err = init_sec(linker, dst_sec, src_sec);
+ if (err) {
+ pr_warn("failed to init section '%s'\n", src_sec->sec_name);
+ return err;
+ }
+ } else if (!secs_match(dst_sec, src_sec)) {
+ pr_warn("sections %s are not compatible\n", src_sec->sec_name);
+ return -1;
+ }
+
+ /* shdr->sh_link points to SYMTAB */
+ dst_sec->shdr->sh_link = linker->symtab_sec_idx;
+
+ /* shdr->sh_info points to relocated section */
+ dst_linked_sec = &linker->secs[src_linked_sec->dst_id];
+ dst_sec->shdr->sh_info = dst_linked_sec->sec_idx;
+
+ src_sec->dst_id = dst_sec->id;
+ err = extend_sec(linker, dst_sec, src_sec);
+ if (err)
+ return err;
+
+ src_rel = src_sec->data->d_buf;
+ dst_rel = dst_sec->raw_data + src_sec->dst_off;
+ n = src_sec->shdr->sh_size / src_sec->shdr->sh_entsize;
+ for (j = 0; j < n; j++, src_rel++, dst_rel++) {
+ size_t src_sym_idx = ELF64_R_SYM(src_rel->r_info);
+ size_t sym_type = ELF64_R_TYPE(src_rel->r_info);
+ Elf64_Sym *src_sym, *dst_sym;
+ size_t dst_sym_idx;
+
+ src_sym_idx = ELF64_R_SYM(src_rel->r_info);
+ src_sym = src_symtab->data->d_buf + sizeof(*src_sym) * src_sym_idx;
+
+ dst_sym_idx = obj->sym_map[src_sym_idx];
+ dst_sym = dst_symtab->raw_data + sizeof(*dst_sym) * dst_sym_idx;
+ dst_rel->r_offset += src_linked_sec->dst_off;
+ sym_type = ELF64_R_TYPE(src_rel->r_info);
+ dst_rel->r_info = ELF64_R_INFO(dst_sym_idx, sym_type);
+
+ if (ELF64_ST_TYPE(src_sym->st_info) == STT_SECTION) {
+ struct src_sec *sec = &obj->secs[src_sym->st_shndx];
+ struct bpf_insn *insn;
+
+ if (src_linked_sec->shdr->sh_flags & SHF_EXECINSTR) {
+ /* calls to the very first static function inside
+ * .text section at offset 0 will
+ * reference section symbol, not the
+ * function symbol. Fix that up,
+ * otherwise it won't be possible to
+ * relocate calls to two different
+ * static functions with the same name
+ * (rom two different object files)
+ */
+ insn = dst_linked_sec->raw_data + dst_rel->r_offset;
+ if (insn->code == (BPF_JMP | BPF_CALL))
+ insn->imm += sec->dst_off / sizeof(struct bpf_insn);
+ else
+ insn->imm += sec->dst_off;
+ } else {
+ pr_warn("relocation against STT_SECTION in non-exec section is not supported!\n");
+ return -EINVAL;
+ }
+ }
+
+ }
+ }
+
+ return 0;
+}
+
+static Elf64_Sym *find_sym_by_name(struct src_obj *obj, size_t sec_idx,
+ int sym_type, const char *sym_name)
+{
+ struct src_sec *symtab = &obj->secs[obj->symtab_sec_idx];
+ Elf64_Sym *sym = symtab->data->d_buf;
+ int i, n = symtab->shdr->sh_size / symtab->shdr->sh_entsize;
+ int str_sec_idx = symtab->shdr->sh_link;
+ const char *name;
+
+ for (i = 0; i < n; i++, sym++) {
+ if (sym->st_shndx != sec_idx)
+ continue;
+ if (ELF64_ST_TYPE(sym->st_info) != sym_type)
+ continue;
+
+ name = elf_strptr(obj->elf, str_sec_idx, sym->st_name);
+ if (!name)
+ return NULL;
+
+ if (strcmp(sym_name, name) != 0)
+ continue;
+
+ return sym;
+ }
+
+ return NULL;
+}
+
+static int linker_fixup_btf(struct src_obj *obj)
+{
+ const char *sec_name;
+ struct src_sec *sec;
+ int i, j, n, m;
+
+ if (!obj->btf)
+ return 0;
+
+ n = btf__get_nr_types(obj->btf);
+ for (i = 1; i <= n; i++) {
+ struct btf_var_secinfo *vi;
+ struct btf_type *t;
+
+ t = btf_type_by_id(obj->btf, i);
+ if (btf_kind(t) != BTF_KIND_DATASEC)
+ continue;
+
+ sec_name = btf__str_by_offset(obj->btf, t->name_off);
+ sec = find_src_sec_by_name(obj, sec_name);
+ if (sec) {
+ /* record actual section size, unless ephemeral */
+ if (sec->shdr)
+ t->size = sec->shdr->sh_size;
+ } else {
+ /* BTF can have some sections that are not represented
+ * in ELF, e.g., .kconfig, .ksyms, .extern, which are used
+ * for special extern variables.
+ *
+ * For all but one such special (ephemeral)
+ * sections, we pre-create "section shells" to be able
+ * to keep track of extra per-section metadata later
+ * (e.g., those BTF extern variables).
+ *
+ * .extern is even more special, though, because it
+ * contains extern variables that need to be resolved
+ * by static linker, not libbpf and kernel. When such
+ * externs are resolved, we are going to remove them
+ * from .extern BTF section and might end up not
+ * needing it at all. Each resolved extern should have
+ * matching non-extern VAR/FUNC in other sections.
+ *
+ * We do support leaving some of the externs
+ * unresolved, though, to support cases of building
+ * libraries, which will later be linked against final
+ * BPF applications. So if at finalization we still
+ * see unresolved externs, we'll create .extern
+ * section on our own.
+ */
+ if (strcmp(sec_name, BTF_EXTERN_SEC) == 0)
+ continue;
+
+ sec = add_src_sec(obj, sec_name);
+ if (!sec)
+ return -ENOMEM;
+
+ sec->ephemeral = true;
+ sec->sec_idx = 0; /* will match UNDEF shndx in ELF */
+ }
+
+ /* remember ELF section and its BTF type ID match */
+ sec->sec_type_id = i;
+
+ /* fix up variable offsets */
+ vi = btf_var_secinfos(t);
+ for (j = 0, m = btf_vlen(t); j < m; j++, vi++) {
+ const struct btf_type *vt = btf__type_by_id(obj->btf, vi->type);
+ const char *var_name = btf__str_by_offset(obj->btf, vt->name_off);
+ int var_linkage = btf_var(vt)->linkage;
+ Elf64_Sym *sym;
+
+ /* no need to patch up static or extern vars */
+ if (var_linkage != BTF_VAR_GLOBAL_ALLOCATED)
+ continue;
+
+ sym = find_sym_by_name(obj, sec->sec_idx, STT_OBJECT, var_name);
+ if (!sym) {
+ pr_warn("failed to find symbol for variable '%s' in section '%s'\n", var_name, sec_name);
+ return -ENOENT;
+ }
+
+ vi->offset = sym->st_value;
+ }
+ }
+
+ return 0;
+}
+
+static int remap_type_id(__u32 *type_id, void *ctx)
+{
+ int *id_map = ctx;
+ int new_id = id_map[*type_id];
+
+ /* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */
+ if (new_id == 0 && *type_id != 0) {
+ pr_warn("failed to find new ID mapping for original BTF type ID %u\n", *type_id);
+ return -EINVAL;
+ }
+
+ *type_id = id_map[*type_id];
+
+ return 0;
+}
+
+static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
+{
+ const struct btf_type *t;
+ int i, j, n, start_id, id;
+ const char *name;
+
+ if (!obj->btf)
+ return 0;
+
+ start_id = btf__get_nr_types(linker->btf) + 1;
+ n = btf__get_nr_types(obj->btf);
+
+ obj->btf_type_map = calloc(n + 1, sizeof(int));
+ if (!obj->btf_type_map)
+ return -ENOMEM;
+
+ for (i = 1; i <= n; i++) {
+ struct glob_sym *glob_sym = NULL;
+
+ t = btf__type_by_id(obj->btf, i);
+
+ /* DATASECs are handled specially below */
+ if (btf_kind(t) == BTF_KIND_DATASEC)
+ continue;
+
+ if (btf_is_non_static(t)) {
+ /* there should be glob_sym already */
+ name = btf__str_by_offset(obj->btf, t->name_off);
+ glob_sym = find_glob_sym(linker, name);
+
+ /* VARs without corresponding glob_sym are those that
+ * belong to skipped/deduplicated sections (i.e.,
+ * license and version), so just skip them
+ */
+ if (!glob_sym)
+ continue;
+
+ /* linker_append_elf_sym() might have requested
+ * updating underlying type ID, if extern was resolved
+ * to strong symbol or weak got upgraded to non-weak
+ */
+ if (glob_sym->underlying_btf_id == 0)
+ glob_sym->underlying_btf_id = -t->type;
+
+ /* globals from previous object files that match our
+ * VAR/FUNC already have a corresponding associated
+ * BTF type, so just make sure to use it
+ */
+ if (glob_sym->btf_id) {
+ /* reuse existing BTF type for global var/func */
+ obj->btf_type_map[i] = glob_sym->btf_id;
+ continue;
+ }
+ }
+
+ id = btf__add_type(linker->btf, obj->btf, t);
+ if (id < 0) {
+ pr_warn("failed to append BTF type #%d from file '%s'\n", i, obj->filename);
+ return id;
+ }
+
+ obj->btf_type_map[i] = id;
+
+ /* record just appended BTF type for var/func */
+ if (glob_sym) {
+ glob_sym->btf_id = id;
+ glob_sym->underlying_btf_id = -t->type;
+ }
+ }
+
+ /* remap all the types except DATASECs */
+ n = btf__get_nr_types(linker->btf);
+ for (i = start_id; i <= n; i++) {
+ struct btf_type *dst_t = btf_type_by_id(linker->btf, i);
+
+ if (btf_type_visit_type_ids(dst_t, remap_type_id, obj->btf_type_map))
+ return -EINVAL;
+ }
+
+ /* Rewrite VAR/FUNC underlying types (i.e., FUNC's FUNC_PROTO and VAR's
+ * actual type), if necessary
+ */
+ for (i = 0; i < linker->glob_sym_cnt; i++) {
+ struct glob_sym *glob_sym = &linker->glob_syms[i];
+ struct btf_type *glob_t;
+
+ if (glob_sym->underlying_btf_id >= 0)
+ continue;
+
+ glob_sym->underlying_btf_id = obj->btf_type_map[-glob_sym->underlying_btf_id];
+
+ glob_t = btf_type_by_id(linker->btf, glob_sym->btf_id);
+ glob_t->type = glob_sym->underlying_btf_id;
+ }
+
+ /* append DATASEC info */
+ for (i = 1; i < obj->sec_cnt; i++) {
+ struct src_sec *src_sec;
+ struct dst_sec *dst_sec;
+ const struct btf_var_secinfo *src_var;
+ struct btf_var_secinfo *dst_var;
+
+ src_sec = &obj->secs[i];
+ if (!src_sec->sec_type_id || src_sec->skipped)
+ continue;
+ dst_sec = &linker->secs[src_sec->dst_id];
+
+ /* Mark section as having BTF regardless of the presence of
+ * variables. In some cases compiler might generate empty BTF
+ * with no variables information. E.g., when promoting local
+ * array/structure variable initial values and BPF object
+ * file otherwise has no read-only static variables in
+ * .rodata. We need to preserve such empty BTF and just set
+ * correct section size.
+ */
+ dst_sec->has_btf = true;
+
+ t = btf__type_by_id(obj->btf, src_sec->sec_type_id);
+ src_var = btf_var_secinfos(t);
+ n = btf_vlen(t);
+ for (j = 0; j < n; j++, src_var++) {
+ void *sec_vars = dst_sec->sec_vars;
+ int new_id = obj->btf_type_map[src_var->type];
+ struct glob_sym *glob_sym = NULL;
+
+ t = btf_type_by_id(linker->btf, new_id);
+ if (btf_is_non_static(t)) {
+ name = btf__str_by_offset(linker->btf, t->name_off);
+ glob_sym = find_glob_sym(linker, name);
+ if (glob_sym->sec_id != dst_sec->id) {
+ pr_warn("global '%s': section mismatch %d vs %d\n",
+ name, glob_sym->sec_id, dst_sec->id);
+ return -EINVAL;
+ }
+ }
+
+ /* If there is already a member (VAR or FUNC) mapped
+ * to the same type, don't add a duplicate entry.
+ * This will happen when multiple object files define
+ * the same extern VARs/FUNCs.
+ */
+ if (glob_sym && glob_sym->var_idx >= 0) {
+ __s64 sz;
+
+ dst_var = &dst_sec->sec_vars[glob_sym->var_idx];
+ /* Because underlying BTF type might have
+ * changed, so might its size have changed, so
+ * re-calculate and update it in sec_var.
+ */
+ sz = btf__resolve_size(linker->btf, glob_sym->underlying_btf_id);
+ if (sz < 0) {
+ pr_warn("global '%s': failed to resolve size of underlying type: %d\n",
+ name, (int)sz);
+ return -EINVAL;
+ }
+ dst_var->size = sz;
+ continue;
+ }
+
+ sec_vars = libbpf_reallocarray(sec_vars,
+ dst_sec->sec_var_cnt + 1,
+ sizeof(*dst_sec->sec_vars));
+ if (!sec_vars)
+ return -ENOMEM;
+
+ dst_sec->sec_vars = sec_vars;
+ dst_sec->sec_var_cnt++;
+
+ dst_var = &dst_sec->sec_vars[dst_sec->sec_var_cnt - 1];
+ dst_var->type = obj->btf_type_map[src_var->type];
+ dst_var->size = src_var->size;
+ dst_var->offset = src_sec->dst_off + src_var->offset;
+
+ if (glob_sym)
+ glob_sym->var_idx = dst_sec->sec_var_cnt - 1;
+ }
+ }
+
+ return 0;
+}
+
+static void *add_btf_ext_rec(struct btf_ext_sec_data *ext_data, const void *src_rec)
+{
+ void *tmp;
+
+ tmp = libbpf_reallocarray(ext_data->recs, ext_data->rec_cnt + 1, ext_data->rec_sz);
+ if (!tmp)
+ return NULL;
+ ext_data->recs = tmp;
+
+ tmp += ext_data->rec_cnt * ext_data->rec_sz;
+ memcpy(tmp, src_rec, ext_data->rec_sz);
+
+ ext_data->rec_cnt++;
+
+ return tmp;
+}
+
+static int linker_append_btf_ext(struct bpf_linker *linker, struct src_obj *obj)
+{
+ const struct btf_ext_info_sec *ext_sec;
+ const char *sec_name, *s;
+ struct src_sec *src_sec;
+ struct dst_sec *dst_sec;
+ int rec_sz, str_off, i;
+
+ if (!obj->btf_ext)
+ return 0;
+
+ rec_sz = obj->btf_ext->func_info.rec_size;
+ for_each_btf_ext_sec(&obj->btf_ext->func_info, ext_sec) {
+ struct bpf_func_info_min *src_rec, *dst_rec;
+
+ sec_name = btf__name_by_offset(obj->btf, ext_sec->sec_name_off);
+ src_sec = find_src_sec_by_name(obj, sec_name);
+ if (!src_sec) {
+ pr_warn("can't find section '%s' referenced from .BTF.ext\n", sec_name);
+ return -EINVAL;
+ }
+ dst_sec = &linker->secs[src_sec->dst_id];
+
+ if (dst_sec->func_info.rec_sz == 0)
+ dst_sec->func_info.rec_sz = rec_sz;
+ if (dst_sec->func_info.rec_sz != rec_sz) {
+ pr_warn("incompatible .BTF.ext record sizes for section '%s'\n", sec_name);
+ return -EINVAL;
+ }
+
+ for_each_btf_ext_rec(&obj->btf_ext->func_info, ext_sec, i, src_rec) {
+ dst_rec = add_btf_ext_rec(&dst_sec->func_info, src_rec);
+ if (!dst_rec)
+ return -ENOMEM;
+
+ dst_rec->insn_off += src_sec->dst_off;
+ dst_rec->type_id = obj->btf_type_map[dst_rec->type_id];
+ }
+ }
+
+ rec_sz = obj->btf_ext->line_info.rec_size;
+ for_each_btf_ext_sec(&obj->btf_ext->line_info, ext_sec) {
+ struct bpf_line_info_min *src_rec, *dst_rec;
+
+ sec_name = btf__name_by_offset(obj->btf, ext_sec->sec_name_off);
+ src_sec = find_src_sec_by_name(obj, sec_name);
+ if (!src_sec) {
+ pr_warn("can't find section '%s' referenced from .BTF.ext\n", sec_name);
+ return -EINVAL;
+ }
+ dst_sec = &linker->secs[src_sec->dst_id];
+
+ if (dst_sec->line_info.rec_sz == 0)
+ dst_sec->line_info.rec_sz = rec_sz;
+ if (dst_sec->line_info.rec_sz != rec_sz) {
+ pr_warn("incompatible .BTF.ext record sizes for section '%s'\n", sec_name);
+ return -EINVAL;
+ }
+
+ for_each_btf_ext_rec(&obj->btf_ext->line_info, ext_sec, i, src_rec) {
+ dst_rec = add_btf_ext_rec(&dst_sec->line_info, src_rec);
+ if (!dst_rec)
+ return -ENOMEM;
+
+ dst_rec->insn_off += src_sec->dst_off;
+
+ s = btf__str_by_offset(obj->btf, src_rec->file_name_off);
+ str_off = btf__add_str(linker->btf, s);
+ if (str_off < 0)
+ return -ENOMEM;
+ dst_rec->file_name_off = str_off;
+
+ s = btf__str_by_offset(obj->btf, src_rec->line_off);
+ str_off = btf__add_str(linker->btf, s);
+ if (str_off < 0)
+ return -ENOMEM;
+ dst_rec->line_off = str_off;
+
+ /* dst_rec->line_col is fine */
+ }
+ }
+
+ rec_sz = obj->btf_ext->core_relo_info.rec_size;
+ for_each_btf_ext_sec(&obj->btf_ext->core_relo_info, ext_sec) {
+ struct bpf_core_relo *src_rec, *dst_rec;
+
+ sec_name = btf__name_by_offset(obj->btf, ext_sec->sec_name_off);
+ src_sec = find_src_sec_by_name(obj, sec_name);
+ if (!src_sec) {
+ pr_warn("can't find section '%s' referenced from .BTF.ext\n", sec_name);
+ return -EINVAL;
+ }
+ dst_sec = &linker->secs[src_sec->dst_id];
+
+ if (dst_sec->core_relo_info.rec_sz == 0)
+ dst_sec->core_relo_info.rec_sz = rec_sz;
+ if (dst_sec->core_relo_info.rec_sz != rec_sz) {
+ pr_warn("incompatible .BTF.ext record sizes for section '%s'\n", sec_name);
+ return -EINVAL;
+ }
+
+ for_each_btf_ext_rec(&obj->btf_ext->core_relo_info, ext_sec, i, src_rec) {
+ dst_rec = add_btf_ext_rec(&dst_sec->core_relo_info, src_rec);
+ if (!dst_rec)
+ return -ENOMEM;
+
+ dst_rec->insn_off += src_sec->dst_off;
+ dst_rec->type_id = obj->btf_type_map[dst_rec->type_id];
+
+ s = btf__str_by_offset(obj->btf, src_rec->access_str_off);
+ str_off = btf__add_str(linker->btf, s);
+ if (str_off < 0)
+ return -ENOMEM;
+ dst_rec->access_str_off = str_off;
+
+ /* dst_rec->kind is fine */
+ }
+ }
+
+ return 0;
+}
+
+int bpf_linker__finalize(struct bpf_linker *linker)
+{
+ struct dst_sec *sec;
+ size_t strs_sz;
+ const void *strs;
+ int err, i;
+
+ if (!linker->elf)
+ return -EINVAL;
+
+ err = finalize_btf(linker);
+ if (err)
+ return err;
+
+ /* Finalize strings */
+ strs_sz = strset__data_size(linker->strtab_strs);
+ strs = strset__data(linker->strtab_strs);
+
+ sec = &linker->secs[linker->strtab_sec_idx];
+ sec->data->d_align = 1;
+ sec->data->d_off = 0LL;
+ sec->data->d_buf = (void *)strs;
+ sec->data->d_type = ELF_T_BYTE;
+ sec->data->d_size = strs_sz;
+ sec->shdr->sh_size = strs_sz;
+
+ for (i = 1; i < linker->sec_cnt; i++) {
+ sec = &linker->secs[i];
+
+ /* STRTAB is handled specially above */
+ if (sec->sec_idx == linker->strtab_sec_idx)
+ continue;
+
+ /* special ephemeral sections (.ksyms, .kconfig, etc) */
+ if (!sec->scn)
+ continue;
+
+ sec->data->d_buf = sec->raw_data;
+ }
+
+ /* Finalize ELF layout */
+ if (elf_update(linker->elf, ELF_C_NULL) < 0) {
+ err = -errno;
+ pr_warn_elf("failed to finalize ELF layout");
+ return err;
+ }
+
+ /* Write out final ELF contents */
+ if (elf_update(linker->elf, ELF_C_WRITE) < 0) {
+ err = -errno;
+ pr_warn_elf("failed to write ELF contents");
+ return err;
+ }
+
+ elf_end(linker->elf);
+ close(linker->fd);
+
+ linker->elf = NULL;
+ linker->fd = -1;
+
+ return 0;
+}
+
+static int emit_elf_data_sec(struct bpf_linker *linker, const char *sec_name,
+ size_t align, const void *raw_data, size_t raw_sz)
+{
+ Elf_Scn *scn;
+ Elf_Data *data;
+ Elf64_Shdr *shdr;
+ int name_off;
+
+ name_off = strset__add_str(linker->strtab_strs, sec_name);
+ if (name_off < 0)
+ return name_off;
+
+ scn = elf_newscn(linker->elf);
+ if (!scn)
+ return -ENOMEM;
+ data = elf_newdata(scn);
+ if (!data)
+ return -ENOMEM;
+ shdr = elf64_getshdr(scn);
+ if (!shdr)
+ return -EINVAL;
+
+ shdr->sh_name = name_off;
+ shdr->sh_type = SHT_PROGBITS;
+ shdr->sh_flags = 0;
+ shdr->sh_size = raw_sz;
+ shdr->sh_link = 0;
+ shdr->sh_info = 0;
+ shdr->sh_addralign = align;
+ shdr->sh_entsize = 0;
+
+ data->d_type = ELF_T_BYTE;
+ data->d_size = raw_sz;
+ data->d_buf = (void *)raw_data;
+ data->d_align = align;
+ data->d_off = 0;
+
+ return 0;
+}
+
+static int finalize_btf(struct bpf_linker *linker)
+{
+ struct btf *btf = linker->btf;
+ const void *raw_data;
+ int i, j, id, err;
+ __u32 raw_sz;
+
+ /* bail out if no BTF data was produced */
+ if (btf__get_nr_types(linker->btf) == 0)
+ return 0;
+
+ for (i = 1; i < linker->sec_cnt; i++) {
+ struct dst_sec *sec = &linker->secs[i];
+
+ if (!sec->has_btf)
+ continue;
+
+ id = btf__add_datasec(btf, sec->sec_name, sec->sec_sz);
+ if (id < 0) {
+ pr_warn("failed to add consolidated BTF type for datasec '%s': %d\n",
+ sec->sec_name, id);
+ return id;
+ }
+
+ for (j = 0; j < sec->sec_var_cnt; j++) {
+ struct btf_var_secinfo *vi = &sec->sec_vars[j];
+
+ if (btf__add_datasec_var_info(btf, vi->type, vi->offset, vi->size))
+ return -EINVAL;
+ }
+ }
+
+ err = finalize_btf_ext(linker);
+ if (err) {
+ pr_warn(".BTF.ext generation failed: %d\n", err);
+ return err;
+ }
+
+ err = btf__dedup(linker->btf, linker->btf_ext, NULL);
+ if (err) {
+ pr_warn("BTF dedup failed: %d\n", err);
+ return err;
+ }
+
+ /* Emit .BTF section */
+ raw_data = btf__get_raw_data(linker->btf, &raw_sz);
+ if (!raw_data)
+ return -ENOMEM;
+
+ err = emit_elf_data_sec(linker, BTF_ELF_SEC, 8, raw_data, raw_sz);
+ if (err) {
+ pr_warn("failed to write out .BTF ELF section: %d\n", err);
+ return err;
+ }
+
+ /* Emit .BTF.ext section */
+ if (linker->btf_ext) {
+ raw_data = btf_ext__get_raw_data(linker->btf_ext, &raw_sz);
+ if (!raw_data)
+ return -ENOMEM;
+
+ err = emit_elf_data_sec(linker, BTF_EXT_ELF_SEC, 8, raw_data, raw_sz);
+ if (err) {
+ pr_warn("failed to write out .BTF.ext ELF section: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int emit_btf_ext_data(struct bpf_linker *linker, void *output,
+ const char *sec_name, struct btf_ext_sec_data *sec_data)
+{
+ struct btf_ext_info_sec *sec_info;
+ void *cur = output;
+ int str_off;
+ size_t sz;
+
+ if (!sec_data->rec_cnt)
+ return 0;
+
+ str_off = btf__add_str(linker->btf, sec_name);
+ if (str_off < 0)
+ return -ENOMEM;
+
+ sec_info = cur;
+ sec_info->sec_name_off = str_off;
+ sec_info->num_info = sec_data->rec_cnt;
+ cur += sizeof(struct btf_ext_info_sec);
+
+ sz = sec_data->rec_cnt * sec_data->rec_sz;
+ memcpy(cur, sec_data->recs, sz);
+ cur += sz;
+
+ return cur - output;
+}
+
+static int finalize_btf_ext(struct bpf_linker *linker)
+{
+ size_t funcs_sz = 0, lines_sz = 0, core_relos_sz = 0, total_sz = 0;
+ size_t func_rec_sz = 0, line_rec_sz = 0, core_relo_rec_sz = 0;
+ struct btf_ext_header *hdr;
+ void *data, *cur;
+ int i, err, sz;
+
+ /* validate that all sections have the same .BTF.ext record sizes
+ * and calculate total data size for each type of data (func info,
+ * line info, core relos)
+ */
+ for (i = 1; i < linker->sec_cnt; i++) {
+ struct dst_sec *sec = &linker->secs[i];
+
+ if (sec->func_info.rec_cnt) {
+ if (func_rec_sz == 0)
+ func_rec_sz = sec->func_info.rec_sz;
+ if (func_rec_sz != sec->func_info.rec_sz) {
+ pr_warn("mismatch in func_info record size %zu != %u\n",
+ func_rec_sz, sec->func_info.rec_sz);
+ return -EINVAL;
+ }
+
+ funcs_sz += sizeof(struct btf_ext_info_sec) + func_rec_sz * sec->func_info.rec_cnt;
+ }
+ if (sec->line_info.rec_cnt) {
+ if (line_rec_sz == 0)
+ line_rec_sz = sec->line_info.rec_sz;
+ if (line_rec_sz != sec->line_info.rec_sz) {
+ pr_warn("mismatch in line_info record size %zu != %u\n",
+ line_rec_sz, sec->line_info.rec_sz);
+ return -EINVAL;
+ }
+
+ lines_sz += sizeof(struct btf_ext_info_sec) + line_rec_sz * sec->line_info.rec_cnt;
+ }
+ if (sec->core_relo_info.rec_cnt) {
+ if (core_relo_rec_sz == 0)
+ core_relo_rec_sz = sec->core_relo_info.rec_sz;
+ if (core_relo_rec_sz != sec->core_relo_info.rec_sz) {
+ pr_warn("mismatch in core_relo_info record size %zu != %u\n",
+ core_relo_rec_sz, sec->core_relo_info.rec_sz);
+ return -EINVAL;
+ }
+
+ core_relos_sz += sizeof(struct btf_ext_info_sec) + core_relo_rec_sz * sec->core_relo_info.rec_cnt;
+ }
+ }
+
+ if (!funcs_sz && !lines_sz && !core_relos_sz)
+ return 0;
+
+ total_sz += sizeof(struct btf_ext_header);
+ if (funcs_sz) {
+ funcs_sz += sizeof(__u32); /* record size prefix */
+ total_sz += funcs_sz;
+ }
+ if (lines_sz) {
+ lines_sz += sizeof(__u32); /* record size prefix */
+ total_sz += lines_sz;
+ }
+ if (core_relos_sz) {
+ core_relos_sz += sizeof(__u32); /* record size prefix */
+ total_sz += core_relos_sz;
+ }
+
+ cur = data = calloc(1, total_sz);
+ if (!data)
+ return -ENOMEM;
+
+ hdr = cur;
+ hdr->magic = BTF_MAGIC;
+ hdr->version = BTF_VERSION;
+ hdr->flags = 0;
+ hdr->hdr_len = sizeof(struct btf_ext_header);
+ cur += sizeof(struct btf_ext_header);
+
+ /* All offsets are in bytes relative to the end of this header */
+ hdr->func_info_off = 0;
+ hdr->func_info_len = funcs_sz;
+ hdr->line_info_off = funcs_sz;
+ hdr->line_info_len = lines_sz;
+ hdr->core_relo_off = funcs_sz + lines_sz;
+ hdr->core_relo_len = core_relos_sz;
+
+ if (funcs_sz) {
+ *(__u32 *)cur = func_rec_sz;
+ cur += sizeof(__u32);
+
+ for (i = 1; i < linker->sec_cnt; i++) {
+ struct dst_sec *sec = &linker->secs[i];
+
+ sz = emit_btf_ext_data(linker, cur, sec->sec_name, &sec->func_info);
+ if (sz < 0) {
+ err = sz;
+ goto out;
+ }
+
+ cur += sz;
+ }
+ }
+
+ if (lines_sz) {
+ *(__u32 *)cur = line_rec_sz;
+ cur += sizeof(__u32);
+
+ for (i = 1; i < linker->sec_cnt; i++) {
+ struct dst_sec *sec = &linker->secs[i];
+
+ sz = emit_btf_ext_data(linker, cur, sec->sec_name, &sec->line_info);
+ if (sz < 0) {
+ err = sz;
+ goto out;
+ }
+
+ cur += sz;
+ }
+ }
+
+ if (core_relos_sz) {
+ *(__u32 *)cur = core_relo_rec_sz;
+ cur += sizeof(__u32);
+
+ for (i = 1; i < linker->sec_cnt; i++) {
+ struct dst_sec *sec = &linker->secs[i];
+
+ sz = emit_btf_ext_data(linker, cur, sec->sec_name, &sec->core_relo_info);
+ if (sz < 0) {
+ err = sz;
+ goto out;
+ }
+
+ cur += sz;
+ }
+ }
+
+ linker->btf_ext = btf_ext__new(data, total_sz);
+ err = libbpf_get_error(linker->btf_ext);
+ if (err) {
+ linker->btf_ext = NULL;
+ pr_warn("failed to parse final .BTF.ext data: %d\n", err);
+ goto out;
+ }
+
+out:
+ free(data);
+ return err;
+}
diff --git a/tools/lib/bpf/strset.c b/tools/lib/bpf/strset.c
new file mode 100644
index 000000000000..1fb8b49de1d6
--- /dev/null
+++ b/tools/lib/bpf/strset.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2021 Facebook */
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <linux/err.h>
+#include "hashmap.h"
+#include "libbpf_internal.h"
+#include "strset.h"
+
+struct strset {
+ void *strs_data;
+ size_t strs_data_len;
+ size_t strs_data_cap;
+ size_t strs_data_max_len;
+
+ /* lookup index for each unique string in strings set */
+ struct hashmap *strs_hash;
+};
+
+static size_t strset_hash_fn(const void *key, void *ctx)
+{
+ const struct strset *s = ctx;
+ const char *str = s->strs_data + (long)key;
+
+ return str_hash(str);
+}
+
+static bool strset_equal_fn(const void *key1, const void *key2, void *ctx)
+{
+ const struct strset *s = ctx;
+ const char *str1 = s->strs_data + (long)key1;
+ const char *str2 = s->strs_data + (long)key2;
+
+ return strcmp(str1, str2) == 0;
+}
+
+struct strset *strset__new(size_t max_data_sz, const char *init_data, size_t init_data_sz)
+{
+ struct strset *set = calloc(1, sizeof(*set));
+ struct hashmap *hash;
+ int err = -ENOMEM;
+
+ if (!set)
+ return ERR_PTR(-ENOMEM);
+
+ hash = hashmap__new(strset_hash_fn, strset_equal_fn, set);
+ if (IS_ERR(hash))
+ goto err_out;
+
+ set->strs_data_max_len = max_data_sz;
+ set->strs_hash = hash;
+
+ if (init_data) {
+ long off;
+
+ set->strs_data = malloc(init_data_sz);
+ if (!set->strs_data)
+ goto err_out;
+
+ memcpy(set->strs_data, init_data, init_data_sz);
+ set->strs_data_len = init_data_sz;
+ set->strs_data_cap = init_data_sz;
+
+ for (off = 0; off < set->strs_data_len; off += strlen(set->strs_data + off) + 1) {
+ /* hashmap__add() returns EEXIST if string with the same
+ * content already is in the hash map
+ */
+ err = hashmap__add(hash, (void *)off, (void *)off);
+ if (err == -EEXIST)
+ continue; /* duplicate */
+ if (err)
+ goto err_out;
+ }
+ }
+
+ return set;
+err_out:
+ strset__free(set);
+ return ERR_PTR(err);
+}
+
+void strset__free(struct strset *set)
+{
+ if (IS_ERR_OR_NULL(set))
+ return;
+
+ hashmap__free(set->strs_hash);
+ free(set->strs_data);
+}
+
+size_t strset__data_size(const struct strset *set)
+{
+ return set->strs_data_len;
+}
+
+const char *strset__data(const struct strset *set)
+{
+ return set->strs_data;
+}
+
+static void *strset_add_str_mem(struct strset *set, size_t add_sz)
+{
+ return libbpf_add_mem(&set->strs_data, &set->strs_data_cap, 1,
+ set->strs_data_len, set->strs_data_max_len, add_sz);
+}
+
+/* Find string offset that corresponds to a given string *s*.
+ * Returns:
+ * - >0 offset into string data, if string is found;
+ * - -ENOENT, if string is not in the string data;
+ * - <0, on any other error.
+ */
+int strset__find_str(struct strset *set, const char *s)
+{
+ long old_off, new_off, len;
+ void *p;
+
+ /* see strset__add_str() for why we do this */
+ len = strlen(s) + 1;
+ p = strset_add_str_mem(set, len);
+ if (!p)
+ return -ENOMEM;
+
+ new_off = set->strs_data_len;
+ memcpy(p, s, len);
+
+ if (hashmap__find(set->strs_hash, (void *)new_off, (void **)&old_off))
+ return old_off;
+
+ return -ENOENT;
+}
+
+/* Add a string s to the string data. If the string already exists, return its
+ * offset within string data.
+ * Returns:
+ * - > 0 offset into string data, on success;
+ * - < 0, on error.
+ */
+int strset__add_str(struct strset *set, const char *s)
+{
+ long old_off, new_off, len;
+ void *p;
+ int err;
+
+ /* Hashmap keys are always offsets within set->strs_data, so to even
+ * look up some string from the "outside", we need to first append it
+ * at the end, so that it can be addressed with an offset. Luckily,
+ * until set->strs_data_len is incremented, that string is just a piece
+ * of garbage for the rest of the code, so no harm, no foul. On the
+ * other hand, if the string is unique, it's already appended and
+ * ready to be used, only a simple set->strs_data_len increment away.
+ */
+ len = strlen(s) + 1;
+ p = strset_add_str_mem(set, len);
+ if (!p)
+ return -ENOMEM;
+
+ new_off = set->strs_data_len;
+ memcpy(p, s, len);
+
+ /* Now attempt to add the string, but only if the string with the same
+ * contents doesn't exist already (HASHMAP_ADD strategy). If such
+ * string exists, we'll get its offset in old_off (that's old_key).
+ */
+ err = hashmap__insert(set->strs_hash, (void *)new_off, (void *)new_off,
+ HASHMAP_ADD, (const void **)&old_off, NULL);
+ if (err == -EEXIST)
+ return old_off; /* duplicated string, return existing offset */
+ if (err)
+ return err;
+
+ set->strs_data_len += len; /* new unique string, adjust data length */
+ return new_off;
+}
diff --git a/tools/lib/bpf/strset.h b/tools/lib/bpf/strset.h
new file mode 100644
index 000000000000..b6ddf77a83c2
--- /dev/null
+++ b/tools/lib/bpf/strset.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+
+/* Copyright (c) 2021 Facebook */
+#ifndef __LIBBPF_STRSET_H
+#define __LIBBPF_STRSET_H
+
+#include <stdbool.h>
+#include <stddef.h>
+
+struct strset;
+
+struct strset *strset__new(size_t max_data_sz, const char *init_data, size_t init_data_sz);
+void strset__free(struct strset *set);
+
+const char *strset__data(const struct strset *set);
+size_t strset__data_size(const struct strset *set);
+
+int strset__find_str(struct strset *set, const char *s);
+int strset__add_str(struct strset *set, const char *s);
+
+#endif /* __LIBBPF_STRSET_H */
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index 007fe5d59438..6061431ee04c 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -28,6 +28,7 @@
#include <sys/mman.h>
#include <sys/socket.h>
#include <sys/types.h>
+#include <linux/if_link.h>
#include "bpf.h"
#include "libbpf.h"
@@ -72,8 +73,10 @@ struct xsk_ctx {
int ifindex;
struct list_head list;
int prog_fd;
+ int link_fd;
int xsks_map_fd;
char ifname[IFNAMSIZ];
+ bool has_bpf_link;
};
struct xsk_socket {
@@ -411,7 +414,7 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
static const int log_buf_size = 16 * 1024;
struct xsk_ctx *ctx = xsk->ctx;
char log_buf[log_buf_size];
- int err, prog_fd;
+ int prog_fd;
/* This is the fallback C-program:
* SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
@@ -501,14 +504,41 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
return prog_fd;
}
- err = bpf_set_link_xdp_fd(xsk->ctx->ifindex, prog_fd,
- xsk->config.xdp_flags);
+ ctx->prog_fd = prog_fd;
+ return 0;
+}
+
+static int xsk_create_bpf_link(struct xsk_socket *xsk)
+{
+ DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
+ struct xsk_ctx *ctx = xsk->ctx;
+ __u32 prog_id = 0;
+ int link_fd;
+ int err;
+
+ err = bpf_get_link_xdp_id(ctx->ifindex, &prog_id, xsk->config.xdp_flags);
if (err) {
- close(prog_fd);
+ pr_warn("getting XDP prog id failed\n");
return err;
}
- ctx->prog_fd = prog_fd;
+ /* if there's a netlink-based XDP prog loaded on interface, bail out
+ * and ask user to do the removal by himself
+ */
+ if (prog_id) {
+ pr_warn("Netlink-based XDP prog detected, please unload it in order to launch AF_XDP prog\n");
+ return -EINVAL;
+ }
+
+ opts.flags = xsk->config.xdp_flags & ~(XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_REPLACE);
+
+ link_fd = bpf_link_create(ctx->prog_fd, ctx->ifindex, BPF_XDP, &opts);
+ if (link_fd < 0) {
+ pr_warn("bpf_link_create failed: %s\n", strerror(errno));
+ return link_fd;
+ }
+
+ ctx->link_fd = link_fd;
return 0;
}
@@ -627,7 +657,6 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
close(fd);
}
- err = 0;
if (ctx->xsks_map_fd == -1)
err = -ENOENT;
@@ -644,6 +673,98 @@ static int xsk_set_bpf_maps(struct xsk_socket *xsk)
&xsk->fd, 0);
}
+static int xsk_link_lookup(int ifindex, __u32 *prog_id, int *link_fd)
+{
+ struct bpf_link_info link_info;
+ __u32 link_len;
+ __u32 id = 0;
+ int err;
+ int fd;
+
+ while (true) {
+ err = bpf_link_get_next_id(id, &id);
+ if (err) {
+ if (errno == ENOENT) {
+ err = 0;
+ break;
+ }
+ pr_warn("can't get next link: %s\n", strerror(errno));
+ break;
+ }
+
+ fd = bpf_link_get_fd_by_id(id);
+ if (fd < 0) {
+ if (errno == ENOENT)
+ continue;
+ pr_warn("can't get link by id (%u): %s\n", id, strerror(errno));
+ err = -errno;
+ break;
+ }
+
+ link_len = sizeof(struct bpf_link_info);
+ memset(&link_info, 0, link_len);
+ err = bpf_obj_get_info_by_fd(fd, &link_info, &link_len);
+ if (err) {
+ pr_warn("can't get link info: %s\n", strerror(errno));
+ close(fd);
+ break;
+ }
+ if (link_info.type == BPF_LINK_TYPE_XDP) {
+ if (link_info.xdp.ifindex == ifindex) {
+ *link_fd = fd;
+ if (prog_id)
+ *prog_id = link_info.prog_id;
+ break;
+ }
+ }
+ close(fd);
+ }
+
+ return err;
+}
+
+static bool xsk_probe_bpf_link(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
+ .flags = XDP_FLAGS_SKB_MODE);
+ struct bpf_load_program_attr prog_attr;
+ struct bpf_insn insns[2] = {
+ BPF_MOV64_IMM(BPF_REG_0, XDP_PASS),
+ BPF_EXIT_INSN()
+ };
+ int prog_fd, link_fd = -1;
+ int ifindex_lo = 1;
+ bool ret = false;
+ int err;
+
+ err = xsk_link_lookup(ifindex_lo, NULL, &link_fd);
+ if (err)
+ return ret;
+
+ if (link_fd >= 0)
+ return true;
+
+ memset(&prog_attr, 0, sizeof(prog_attr));
+ prog_attr.prog_type = BPF_PROG_TYPE_XDP;
+ prog_attr.insns = insns;
+ prog_attr.insns_cnt = ARRAY_SIZE(insns);
+ prog_attr.license = "GPL";
+
+ prog_fd = bpf_load_program_xattr(&prog_attr, NULL, 0);
+ if (prog_fd < 0)
+ return ret;
+
+ link_fd = bpf_link_create(prog_fd, ifindex_lo, BPF_XDP, &opts);
+ close(prog_fd);
+
+ if (link_fd >= 0) {
+ ret = true;
+ close(link_fd);
+ }
+
+ return ret;
+}
+
static int xsk_create_xsk_struct(int ifindex, struct xsk_socket *xsk)
{
char ifname[IFNAMSIZ];
@@ -665,64 +786,108 @@ static int xsk_create_xsk_struct(int ifindex, struct xsk_socket *xsk)
ctx->ifname[IFNAMSIZ - 1] = 0;
xsk->ctx = ctx;
+ xsk->ctx->has_bpf_link = xsk_probe_bpf_link();
return 0;
}
-static int __xsk_setup_xdp_prog(struct xsk_socket *_xdp,
- int *xsks_map_fd)
+static int xsk_init_xdp_res(struct xsk_socket *xsk,
+ int *xsks_map_fd)
{
- struct xsk_socket *xsk = _xdp;
struct xsk_ctx *ctx = xsk->ctx;
- __u32 prog_id = 0;
int err;
- err = bpf_get_link_xdp_id(ctx->ifindex, &prog_id,
- xsk->config.xdp_flags);
+ err = xsk_create_bpf_maps(xsk);
if (err)
return err;
- if (!prog_id) {
- err = xsk_create_bpf_maps(xsk);
- if (err)
- return err;
+ err = xsk_load_xdp_prog(xsk);
+ if (err)
+ goto err_load_xdp_prog;
- err = xsk_load_xdp_prog(xsk);
- if (err) {
- goto err_load_xdp_prog;
- }
- } else {
- ctx->prog_fd = bpf_prog_get_fd_by_id(prog_id);
- if (ctx->prog_fd < 0)
- return -errno;
- err = xsk_lookup_bpf_maps(xsk);
- if (err) {
- close(ctx->prog_fd);
- return err;
- }
- }
+ if (ctx->has_bpf_link)
+ err = xsk_create_bpf_link(xsk);
+ else
+ err = bpf_set_link_xdp_fd(xsk->ctx->ifindex, ctx->prog_fd,
+ xsk->config.xdp_flags);
- if (xsk->rx) {
- err = xsk_set_bpf_maps(xsk);
- if (err) {
- if (!prog_id) {
- goto err_set_bpf_maps;
- } else {
- close(ctx->prog_fd);
- return err;
- }
- }
- }
- if (xsks_map_fd)
- *xsks_map_fd = ctx->xsks_map_fd;
+ if (err)
+ goto err_attach_xdp_prog;
- return 0;
+ if (!xsk->rx)
+ return err;
+
+ err = xsk_set_bpf_maps(xsk);
+ if (err)
+ goto err_set_bpf_maps;
+
+ return err;
err_set_bpf_maps:
+ if (ctx->has_bpf_link)
+ close(ctx->link_fd);
+ else
+ bpf_set_link_xdp_fd(ctx->ifindex, -1, 0);
+err_attach_xdp_prog:
close(ctx->prog_fd);
- bpf_set_link_xdp_fd(ctx->ifindex, -1, 0);
err_load_xdp_prog:
xsk_delete_bpf_maps(xsk);
+ return err;
+}
+
+static int xsk_lookup_xdp_res(struct xsk_socket *xsk, int *xsks_map_fd, int prog_id)
+{
+ struct xsk_ctx *ctx = xsk->ctx;
+ int err;
+
+ ctx->prog_fd = bpf_prog_get_fd_by_id(prog_id);
+ if (ctx->prog_fd < 0) {
+ err = -errno;
+ goto err_prog_fd;
+ }
+ err = xsk_lookup_bpf_maps(xsk);
+ if (err)
+ goto err_lookup_maps;
+
+ if (!xsk->rx)
+ return err;
+
+ err = xsk_set_bpf_maps(xsk);
+ if (err)
+ goto err_set_maps;
+
+ return err;
+
+err_set_maps:
+ close(ctx->xsks_map_fd);
+err_lookup_maps:
+ close(ctx->prog_fd);
+err_prog_fd:
+ if (ctx->has_bpf_link)
+ close(ctx->link_fd);
+ return err;
+}
+
+static int __xsk_setup_xdp_prog(struct xsk_socket *_xdp, int *xsks_map_fd)
+{
+ struct xsk_socket *xsk = _xdp;
+ struct xsk_ctx *ctx = xsk->ctx;
+ __u32 prog_id = 0;
+ int err;
+
+ if (ctx->has_bpf_link)
+ err = xsk_link_lookup(ctx->ifindex, &prog_id, &ctx->link_fd);
+ else
+ err = bpf_get_link_xdp_id(ctx->ifindex, &prog_id, xsk->config.xdp_flags);
+
+ if (err)
+ return err;
+
+ err = !prog_id ? xsk_init_xdp_res(xsk, xsks_map_fd) :
+ xsk_lookup_xdp_res(xsk, xsks_map_fd, prog_id);
+
+ if (!err && xsks_map_fd)
+ *xsks_map_fd = ctx->xsks_map_fd;
return err;
}
@@ -907,6 +1072,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
}
}
xsk->ctx = ctx;
+ xsk->ctx->has_bpf_link = xsk_probe_bpf_link();
if (rx && !rx_setup_done) {
err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
@@ -1072,6 +1238,8 @@ void xsk_socket__delete(struct xsk_socket *xsk)
if (ctx->prog_fd != -1) {
xsk_delete_bpf_maps(xsk);
close(ctx->prog_fd);
+ if (ctx->has_bpf_link)
+ close(ctx->link_fd);
}
err = xsk_get_mmap_offsets(xsk->fd, &off);
diff --git a/tools/lib/bpf/xsk.h b/tools/lib/bpf/xsk.h
index e9f121f5d129..01c12dca9c10 100644
--- a/tools/lib/bpf/xsk.h
+++ b/tools/lib/bpf/xsk.h
@@ -3,7 +3,8 @@
/*
* AF_XDP user-space access library.
*
- * Copyright(c) 2018 - 2019 Intel Corporation.
+ * Copyright (c) 2018 - 2019 Intel Corporation.
+ * Copyright (c) 2019 Facebook
*
* Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
*/
@@ -13,15 +14,80 @@
#include <stdio.h>
#include <stdint.h>
+#include <stdbool.h>
#include <linux/if_xdp.h>
#include "libbpf.h"
-#include "libbpf_util.h"
#ifdef __cplusplus
extern "C" {
#endif
+/* Load-Acquire Store-Release barriers used by the XDP socket
+ * library. The following macros should *NOT* be considered part of
+ * the xsk.h API, and is subject to change anytime.
+ *
+ * LIBRARY INTERNAL
+ */
+
+#define __XSK_READ_ONCE(x) (*(volatile typeof(x) *)&x)
+#define __XSK_WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v)
+
+#if defined(__i386__) || defined(__x86_64__)
+# define libbpf_smp_store_release(p, v) \
+ do { \
+ asm volatile("" : : : "memory"); \
+ __XSK_WRITE_ONCE(*p, v); \
+ } while (0)
+# define libbpf_smp_load_acquire(p) \
+ ({ \
+ typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
+ asm volatile("" : : : "memory"); \
+ ___p1; \
+ })
+#elif defined(__aarch64__)
+# define libbpf_smp_store_release(p, v) \
+ asm volatile ("stlr %w1, %0" : "=Q" (*p) : "r" (v) : "memory")
+# define libbpf_smp_load_acquire(p) \
+ ({ \
+ typeof(*p) ___p1; \
+ asm volatile ("ldar %w0, %1" \
+ : "=r" (___p1) : "Q" (*p) : "memory"); \
+ ___p1; \
+ })
+#elif defined(__riscv)
+# define libbpf_smp_store_release(p, v) \
+ do { \
+ asm volatile ("fence rw,w" : : : "memory"); \
+ __XSK_WRITE_ONCE(*p, v); \
+ } while (0)
+# define libbpf_smp_load_acquire(p) \
+ ({ \
+ typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
+ asm volatile ("fence r,rw" : : : "memory"); \
+ ___p1; \
+ })
+#endif
+
+#ifndef libbpf_smp_store_release
+#define libbpf_smp_store_release(p, v) \
+ do { \
+ __sync_synchronize(); \
+ __XSK_WRITE_ONCE(*p, v); \
+ } while (0)
+#endif
+
+#ifndef libbpf_smp_load_acquire
+#define libbpf_smp_load_acquire(p) \
+ ({ \
+ typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
+ __sync_synchronize(); \
+ ___p1; \
+ })
+#endif
+
+/* LIBRARY INTERNAL -- END */
+
/* Do not access these members directly. Use the functions below. */
#define DEFINE_XSK_RING(name) \
struct name { \
@@ -96,7 +162,8 @@ static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
* this function. Without this optimization it whould have been
* free_entries = r->cached_prod - r->cached_cons + r->size.
*/
- r->cached_cons = *r->consumer + r->size;
+ r->cached_cons = libbpf_smp_load_acquire(r->consumer);
+ r->cached_cons += r->size;
return r->cached_cons - r->cached_prod;
}
@@ -106,7 +173,7 @@ static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb)
__u32 entries = r->cached_prod - r->cached_cons;
if (entries == 0) {
- r->cached_prod = *r->producer;
+ r->cached_prod = libbpf_smp_load_acquire(r->producer);
entries = r->cached_prod - r->cached_cons;
}
@@ -129,9 +196,7 @@ static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb)
/* Make sure everything has been written to the ring before indicating
* this to the kernel by writing the producer pointer.
*/
- libbpf_smp_wmb();
-
- *prod->producer += nb;
+ libbpf_smp_store_release(prod->producer, *prod->producer + nb);
}
static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx)
@@ -139,11 +204,6 @@ static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __
__u32 entries = xsk_cons_nb_avail(cons, nb);
if (entries > 0) {
- /* Make sure we do not speculatively read the data before
- * we have received the packet buffers from the ring.
- */
- libbpf_smp_rmb();
-
*idx = cons->cached_cons;
cons->cached_cons += entries;
}
@@ -161,9 +221,8 @@ static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb)
/* Make sure data has been read before indicating we are done
* with the entries by updating the consumer pointer.
*/
- libbpf_smp_rwmb();
+ libbpf_smp_store_release(cons->consumer, *cons->consumer + nb);
- *cons->consumer += nb;
}
static inline void *xsk_umem__get_data(void *umem_area, __u64 addr)
diff --git a/tools/lib/perf/Documentation/libperf.txt b/tools/lib/perf/Documentation/libperf.txt
index 0c74c30ed23a..63ae5e0195ce 100644
--- a/tools/lib/perf/Documentation/libperf.txt
+++ b/tools/lib/perf/Documentation/libperf.txt
@@ -136,6 +136,9 @@ SYNOPSIS
struct perf_thread_map *threads);
void perf_evsel__close(struct perf_evsel *evsel);
void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu);
+ int perf_evsel__mmap(struct perf_evsel *evsel, int pages);
+ void perf_evsel__munmap(struct perf_evsel *evsel);
+ void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread);
int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
struct perf_counts_values *count);
int perf_evsel__enable(struct perf_evsel *evsel);
diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c
index 4dc06289f4c7..bd8c2f19ef74 100644
--- a/tools/lib/perf/evsel.c
+++ b/tools/lib/perf/evsel.c
@@ -11,10 +11,12 @@
#include <stdlib.h>
#include <internal/xyarray.h>
#include <internal/cpumap.h>
+#include <internal/mmap.h>
#include <internal/threadmap.h>
#include <internal/lib.h>
#include <linux/string.h>
#include <sys/ioctl.h>
+#include <sys/mman.h>
void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr)
{
@@ -38,6 +40,7 @@ void perf_evsel__delete(struct perf_evsel *evsel)
}
#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
+#define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
@@ -55,6 +58,13 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
return evsel->fd != NULL ? 0 : -ENOMEM;
}
+static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap));
+
+ return evsel->mmap != NULL ? 0 : -ENOMEM;
+}
+
static int
sys_perf_event_open(struct perf_event_attr *attr,
pid_t pid, int cpu, int group_fd,
@@ -156,6 +166,72 @@ void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu)
perf_evsel__close_fd_cpu(evsel, cpu);
}
+void perf_evsel__munmap(struct perf_evsel *evsel)
+{
+ int cpu, thread;
+
+ if (evsel->fd == NULL || evsel->mmap == NULL)
+ return;
+
+ for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
+ for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
+ int fd = FD(evsel, cpu, thread);
+ struct perf_mmap *map = MMAP(evsel, cpu, thread);
+
+ if (fd < 0)
+ continue;
+
+ perf_mmap__munmap(map);
+ }
+ }
+
+ xyarray__delete(evsel->mmap);
+ evsel->mmap = NULL;
+}
+
+int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
+{
+ int ret, cpu, thread;
+ struct perf_mmap_param mp = {
+ .prot = PROT_READ | PROT_WRITE,
+ .mask = (pages * page_size) - 1,
+ };
+
+ if (evsel->fd == NULL || evsel->mmap)
+ return -EINVAL;
+
+ if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0)
+ return -ENOMEM;
+
+ for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
+ for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
+ int fd = FD(evsel, cpu, thread);
+ struct perf_mmap *map = MMAP(evsel, cpu, thread);
+
+ if (fd < 0)
+ continue;
+
+ perf_mmap__init(map, NULL, false, NULL);
+
+ ret = perf_mmap__mmap(map, &mp, fd, cpu);
+ if (ret) {
+ perf_evsel__munmap(evsel);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
+{
+ if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL)
+ return NULL;
+
+ return MMAP(evsel, cpu, thread)->base;
+}
+
int perf_evsel__read_size(struct perf_evsel *evsel)
{
u64 read_format = evsel->attr.read_format;
@@ -191,6 +267,10 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
if (FD(evsel, cpu, thread) < 0)
return -EINVAL;
+ if (MMAP(evsel, cpu, thread) &&
+ !perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
+ return 0;
+
if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
return -errno;
diff --git a/tools/lib/perf/include/internal/evsel.h b/tools/lib/perf/include/internal/evsel.h
index 1ffd083b235e..1c067d088bc6 100644
--- a/tools/lib/perf/include/internal/evsel.h
+++ b/tools/lib/perf/include/internal/evsel.h
@@ -41,6 +41,7 @@ struct perf_evsel {
struct perf_cpu_map *own_cpus;
struct perf_thread_map *threads;
struct xyarray *fd;
+ struct xyarray *mmap;
struct xyarray *sample_id;
u64 *id;
u32 ids;
diff --git a/tools/lib/perf/include/internal/mmap.h b/tools/lib/perf/include/internal/mmap.h
index be7556e0a2b2..5e3422f40ed5 100644
--- a/tools/lib/perf/include/internal/mmap.h
+++ b/tools/lib/perf/include/internal/mmap.h
@@ -11,6 +11,7 @@
#define PERF_SAMPLE_MAX_SIZE (1 << 16)
struct perf_mmap;
+struct perf_counts_values;
typedef void (*libperf_unmap_cb_t)(struct perf_mmap *map);
@@ -52,4 +53,6 @@ void perf_mmap__put(struct perf_mmap *map);
u64 perf_mmap__read_head(struct perf_mmap *map);
+int perf_mmap__read_self(struct perf_mmap *map, struct perf_counts_values *count);
+
#endif /* __LIBPERF_INTERNAL_MMAP_H */
diff --git a/tools/lib/perf/include/internal/tests.h b/tools/lib/perf/include/internal/tests.h
index 2093e8868a67..29425c2dabe1 100644
--- a/tools/lib/perf/include/internal/tests.h
+++ b/tools/lib/perf/include/internal/tests.h
@@ -3,11 +3,32 @@
#define __LIBPERF_INTERNAL_TESTS_H
#include <stdio.h>
+#include <unistd.h>
int tests_failed;
+int tests_verbose;
+
+static inline int get_verbose(char **argv, int argc)
+{
+ int c;
+ int verbose = 0;
+
+ while ((c = getopt(argc, argv, "v")) != -1) {
+ switch (c)
+ {
+ case 'v':
+ verbose = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ return verbose;
+}
#define __T_START \
do { \
+ tests_verbose = get_verbose(argv, argc); \
fprintf(stdout, "- running %s...", __FILE__); \
fflush(NULL); \
tests_failed = 0; \
@@ -30,4 +51,15 @@ do {
} \
} while (0)
+#define __T_VERBOSE(...) \
+do { \
+ if (tests_verbose) { \
+ if (tests_verbose == 1) { \
+ fputc('\n', stderr); \
+ tests_verbose++; \
+ } \
+ fprintf(stderr, ##__VA_ARGS__); \
+ } \
+} while (0)
+
#endif /* __LIBPERF_INTERNAL_TESTS_H */
diff --git a/tools/lib/perf/include/internal/xyarray.h b/tools/lib/perf/include/internal/xyarray.h
index 51e35d6c8ec4..f10af3da7b21 100644
--- a/tools/lib/perf/include/internal/xyarray.h
+++ b/tools/lib/perf/include/internal/xyarray.h
@@ -18,11 +18,18 @@ struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size);
void xyarray__delete(struct xyarray *xy);
void xyarray__reset(struct xyarray *xy);
-static inline void *xyarray__entry(struct xyarray *xy, int x, int y)
+static inline void *__xyarray__entry(struct xyarray *xy, int x, int y)
{
return &xy->contents[x * xy->row_size + y * xy->entry_size];
}
+static inline void *xyarray__entry(struct xyarray *xy, size_t x, size_t y)
+{
+ if (x >= xy->max_x || y >= xy->max_y)
+ return NULL;
+ return __xyarray__entry(xy, x, y);
+}
+
static inline int xyarray__max_y(struct xyarray *xy)
{
return xy->max_y;
diff --git a/tools/lib/perf/include/perf/bpf_perf.h b/tools/lib/perf/include/perf/bpf_perf.h
new file mode 100644
index 000000000000..e7cf6ba7b674
--- /dev/null
+++ b/tools/lib/perf/include/perf/bpf_perf.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __LIBPERF_BPF_PERF_H
+#define __LIBPERF_BPF_PERF_H
+
+#include <linux/types.h> /* for __u32 */
+
+/*
+ * bpf_perf uses a hashmap, the attr_map, to track all the leader programs.
+ * The hashmap is pinned in bpffs. flock() on this file is used to ensure
+ * no concurrent access to the attr_map. The key of attr_map is struct
+ * perf_event_attr, and the value is struct perf_event_attr_map_entry.
+ *
+ * struct perf_event_attr_map_entry contains two __u32 IDs, bpf_link of the
+ * leader prog, and the diff_map. Each perf-stat session holds a reference
+ * to the bpf_link to make sure the leader prog is attached to sched_switch
+ * tracepoint.
+ *
+ * Since the hashmap only contains IDs of the bpf_link and diff_map, it
+ * does not hold any references to the leader program. Once all perf-stat
+ * sessions of these events exit, the leader prog, its maps, and the
+ * perf_events will be freed.
+ */
+struct perf_event_attr_map_entry {
+ __u32 link_id;
+ __u32 diff_map_id;
+};
+
+/* default attr_map name */
+#define BPF_PERF_DEFAULT_ATTR_MAP_PATH "perf_attr_map"
+
+#endif /* __LIBPERF_BPF_PERF_H */
diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
index d82054225fcc..4d0c02ba3f7d 100644
--- a/tools/lib/perf/include/perf/event.h
+++ b/tools/lib/perf/include/perf/event.h
@@ -8,6 +8,8 @@
#include <linux/bpf.h>
#include <sys/types.h> /* pid_t */
+#define event_contains(obj, mem) ((obj).header.size > offsetof(typeof(obj), mem))
+
struct perf_record_mmap {
struct perf_event_header header;
__u32 pid, tid;
@@ -346,8 +348,9 @@ struct perf_record_time_conv {
__u64 time_zero;
__u64 time_cycles;
__u64 time_mask;
- bool cap_user_time_zero;
- bool cap_user_time_short;
+ __u8 cap_user_time_zero;
+ __u8 cap_user_time_short;
+ __u8 reserved[6]; /* For alignment */
};
struct perf_record_header_feature {
diff --git a/tools/lib/perf/include/perf/evsel.h b/tools/lib/perf/include/perf/evsel.h
index c82ec39a4ad0..60eae25076d3 100644
--- a/tools/lib/perf/include/perf/evsel.h
+++ b/tools/lib/perf/include/perf/evsel.h
@@ -27,6 +27,9 @@ LIBPERF_API int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *
struct perf_thread_map *threads);
LIBPERF_API void perf_evsel__close(struct perf_evsel *evsel);
LIBPERF_API void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu);
+LIBPERF_API int perf_evsel__mmap(struct perf_evsel *evsel, int pages);
+LIBPERF_API void perf_evsel__munmap(struct perf_evsel *evsel);
+LIBPERF_API void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread);
LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
struct perf_counts_values *count);
LIBPERF_API int perf_evsel__enable(struct perf_evsel *evsel);
diff --git a/tools/lib/perf/libperf.map b/tools/lib/perf/libperf.map
index 7be1af8a546c..c0c7ceb11060 100644
--- a/tools/lib/perf/libperf.map
+++ b/tools/lib/perf/libperf.map
@@ -23,6 +23,9 @@ LIBPERF_0.0.1 {
perf_evsel__disable;
perf_evsel__open;
perf_evsel__close;
+ perf_evsel__mmap;
+ perf_evsel__munmap;
+ perf_evsel__mmap_base;
perf_evsel__read;
perf_evsel__cpus;
perf_evsel__threads;
diff --git a/tools/lib/perf/mmap.c b/tools/lib/perf/mmap.c
index 79d5ed6c38cc..c89dfa5f67b3 100644
--- a/tools/lib/perf/mmap.c
+++ b/tools/lib/perf/mmap.c
@@ -8,9 +8,11 @@
#include <linux/perf_event.h>
#include <perf/mmap.h>
#include <perf/event.h>
+#include <perf/evsel.h>
#include <internal/mmap.h>
#include <internal/lib.h>
#include <linux/kernel.h>
+#include <linux/math64.h>
#include "internal.h"
void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
@@ -273,3 +275,89 @@ union perf_event *perf_mmap__read_event(struct perf_mmap *map)
return event;
}
+
+#if defined(__i386__) || defined(__x86_64__)
+static u64 read_perf_counter(unsigned int counter)
+{
+ unsigned int low, high;
+
+ asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
+
+ return low | ((u64)high) << 32;
+}
+
+static u64 read_timestamp(void)
+{
+ unsigned int low, high;
+
+ asm volatile("rdtsc" : "=a" (low), "=d" (high));
+
+ return low | ((u64)high) << 32;
+}
+#else
+static u64 read_perf_counter(unsigned int counter __maybe_unused) { return 0; }
+static u64 read_timestamp(void) { return 0; }
+#endif
+
+int perf_mmap__read_self(struct perf_mmap *map, struct perf_counts_values *count)
+{
+ struct perf_event_mmap_page *pc = map->base;
+ u32 seq, idx, time_mult = 0, time_shift = 0;
+ u64 cnt, cyc = 0, time_offset = 0, time_cycles = 0, time_mask = ~0ULL;
+
+ if (!pc || !pc->cap_user_rdpmc)
+ return -1;
+
+ do {
+ seq = READ_ONCE(pc->lock);
+ barrier();
+
+ count->ena = READ_ONCE(pc->time_enabled);
+ count->run = READ_ONCE(pc->time_running);
+
+ if (pc->cap_user_time && count->ena != count->run) {
+ cyc = read_timestamp();
+ time_mult = READ_ONCE(pc->time_mult);
+ time_shift = READ_ONCE(pc->time_shift);
+ time_offset = READ_ONCE(pc->time_offset);
+
+ if (pc->cap_user_time_short) {
+ time_cycles = READ_ONCE(pc->time_cycles);
+ time_mask = READ_ONCE(pc->time_mask);
+ }
+ }
+
+ idx = READ_ONCE(pc->index);
+ cnt = READ_ONCE(pc->offset);
+ if (pc->cap_user_rdpmc && idx) {
+ s64 evcnt = read_perf_counter(idx - 1);
+ u16 width = READ_ONCE(pc->pmc_width);
+
+ evcnt <<= 64 - width;
+ evcnt >>= 64 - width;
+ cnt += evcnt;
+ } else
+ return -1;
+
+ barrier();
+ } while (READ_ONCE(pc->lock) != seq);
+
+ if (count->ena != count->run) {
+ u64 delta;
+
+ /* Adjust for cap_usr_time_short, a nop if not */
+ cyc = time_cycles + ((cyc - time_cycles) & time_mask);
+
+ delta = time_offset + mul_u64_u32_shr(cyc, time_mult, time_shift);
+
+ count->ena += delta;
+ if (idx)
+ count->run += delta;
+
+ cnt = mul_u64_u64_div64(cnt, count->ena, count->run);
+ }
+
+ count->val = cnt;
+
+ return 0;
+}
diff --git a/tools/lib/perf/tests/Makefile b/tools/lib/perf/tests/Makefile
index 96841775feaf..b536cc9a26dd 100644
--- a/tools/lib/perf/tests/Makefile
+++ b/tools/lib/perf/tests/Makefile
@@ -5,6 +5,8 @@ TESTS = test-cpumap test-threadmap test-evlist test-evsel
TESTS_SO := $(addsuffix -so,$(TESTS))
TESTS_A := $(addsuffix -a,$(TESTS))
+TEST_ARGS := $(if $(V),-v)
+
# Set compile option CFLAGS
ifdef EXTRA_CFLAGS
CFLAGS := $(EXTRA_CFLAGS)
@@ -28,9 +30,9 @@ all: $(TESTS_A) $(TESTS_SO)
run:
@echo "running static:"
- @for i in $(TESTS_A); do ./$$i; done
+ @for i in $(TESTS_A); do ./$$i $(TEST_ARGS); done
@echo "running dynamic:"
- @for i in $(TESTS_SO); do LD_LIBRARY_PATH=../ ./$$i; done
+ @for i in $(TESTS_SO); do LD_LIBRARY_PATH=../ ./$$i $(TEST_ARGS); done
clean:
$(call QUIET_CLEAN, tests)$(RM) $(TESTS_A) $(TESTS_SO)
diff --git a/tools/lib/perf/tests/test-evsel.c b/tools/lib/perf/tests/test-evsel.c
index 0ad82d7a2a51..288b5feaefe2 100644
--- a/tools/lib/perf/tests/test-evsel.c
+++ b/tools/lib/perf/tests/test-evsel.c
@@ -120,6 +120,70 @@ static int test_stat_thread_enable(void)
return 0;
}
+static int test_stat_user_read(int event)
+{
+ struct perf_counts_values counts = { .val = 0 };
+ struct perf_thread_map *threads;
+ struct perf_evsel *evsel;
+ struct perf_event_mmap_page *pc;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = event,
+ };
+ int err, i;
+
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ evsel = perf_evsel__new(&attr);
+ __T("failed to create evsel", evsel);
+
+ err = perf_evsel__open(evsel, NULL, threads);
+ __T("failed to open evsel", err == 0);
+
+ err = perf_evsel__mmap(evsel, 0);
+ __T("failed to mmap evsel", err == 0);
+
+ pc = perf_evsel__mmap_base(evsel, 0, 0);
+
+#if defined(__i386__) || defined(__x86_64__)
+ __T("userspace counter access not supported", pc->cap_user_rdpmc);
+ __T("userspace counter access not enabled", pc->index);
+ __T("userspace counter width not set", pc->pmc_width >= 32);
+#endif
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ __T("failed to read value for evsel", counts.val != 0);
+
+ for (i = 0; i < 5; i++) {
+ volatile int count = 0x10000 << i;
+ __u64 start, end, last = 0;
+
+ __T_VERBOSE("\tloop = %u, ", count);
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ start = counts.val;
+
+ while (count--) ;
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ end = counts.val;
+
+ __T("invalid counter data", (end - start) > last);
+ last = end - start;
+ __T_VERBOSE("count = %llu\n", end - start);
+ }
+
+ perf_evsel__munmap(evsel);
+ perf_evsel__close(evsel);
+ perf_evsel__delete(evsel);
+
+ perf_thread_map__put(threads);
+ return 0;
+}
+
int main(int argc, char **argv)
{
__T_START;
@@ -129,6 +193,8 @@ int main(int argc, char **argv)
test_stat_cpu();
test_stat_thread();
test_stat_thread_enable();
+ test_stat_user_read(PERF_COUNT_HW_INSTRUCTIONS);
+ test_stat_user_read(PERF_COUNT_HW_CPU_CYCLES);
__T_END;
return tests_failed == 0 ? 0 : -1;
diff --git a/tools/memory-model/Documentation/access-marking.txt b/tools/memory-model/Documentation/access-marking.txt
new file mode 100644
index 000000000000..1ab189f51f55
--- /dev/null
+++ b/tools/memory-model/Documentation/access-marking.txt
@@ -0,0 +1,479 @@
+MARKING SHARED-MEMORY ACCESSES
+==============================
+
+This document provides guidelines for marking intentionally concurrent
+normal accesses to shared memory, that is "normal" as in accesses that do
+not use read-modify-write atomic operations. It also describes how to
+document these accesses, both with comments and with special assertions
+processed by the Kernel Concurrency Sanitizer (KCSAN). This discussion
+builds on an earlier LWN article [1].
+
+
+ACCESS-MARKING OPTIONS
+======================
+
+The Linux kernel provides the following access-marking options:
+
+1. Plain C-language accesses (unmarked), for example, "a = b;"
+
+2. Data-race marking, for example, "data_race(a = b);"
+
+3. READ_ONCE(), for example, "a = READ_ONCE(b);"
+ The various forms of atomic_read() also fit in here.
+
+4. WRITE_ONCE(), for example, "WRITE_ONCE(a, b);"
+ The various forms of atomic_set() also fit in here.
+
+
+These may be used in combination, as shown in this admittedly improbable
+example:
+
+ WRITE_ONCE(a, b + data_race(c + d) + READ_ONCE(e));
+
+Neither plain C-language accesses nor data_race() (#1 and #2 above) place
+any sort of constraint on the compiler's choice of optimizations [2].
+In contrast, READ_ONCE() and WRITE_ONCE() (#3 and #4 above) restrict the
+compiler's use of code-motion and common-subexpression optimizations.
+Therefore, if a given access is involved in an intentional data race,
+using READ_ONCE() for loads and WRITE_ONCE() for stores is usually
+preferable to data_race(), which in turn is usually preferable to plain
+C-language accesses.
+
+KCSAN will complain about many types of data races involving plain
+C-language accesses, but marking all accesses involved in a given data
+race with one of data_race(), READ_ONCE(), or WRITE_ONCE(), will prevent
+KCSAN from complaining. Of course, lack of KCSAN complaints does not
+imply correct code. Therefore, please take a thoughtful approach
+when responding to KCSAN complaints. Churning the code base with
+ill-considered additions of data_race(), READ_ONCE(), and WRITE_ONCE()
+is unhelpful.
+
+In fact, the following sections describe situations where use of
+data_race() and even plain C-language accesses is preferable to
+READ_ONCE() and WRITE_ONCE().
+
+
+Use of the data_race() Macro
+----------------------------
+
+Here are some situations where data_race() should be used instead of
+READ_ONCE() and WRITE_ONCE():
+
+1. Data-racy loads from shared variables whose values are used only
+ for diagnostic purposes.
+
+2. Data-racy reads whose values are checked against marked reload.
+
+3. Reads whose values feed into error-tolerant heuristics.
+
+4. Writes setting values that feed into error-tolerant heuristics.
+
+
+Data-Racy Reads for Approximate Diagnostics
+
+Approximate diagnostics include lockdep reports, monitoring/statistics
+(including /proc and /sys output), WARN*()/BUG*() checks whose return
+values are ignored, and other situations where reads from shared variables
+are not an integral part of the core concurrency design.
+
+In fact, use of data_race() instead READ_ONCE() for these diagnostic
+reads can enable better checking of the remaining accesses implementing
+the core concurrency design. For example, suppose that the core design
+prevents any non-diagnostic reads from shared variable x from running
+concurrently with updates to x. Then using plain C-language writes
+to x allows KCSAN to detect reads from x from within regions of code
+that fail to exclude the updates. In this case, it is important to use
+data_race() for the diagnostic reads because otherwise KCSAN would give
+false-positive warnings about these diagnostic reads.
+
+In theory, plain C-language loads can also be used for this use case.
+However, in practice this will have the disadvantage of causing KCSAN
+to generate false positives because KCSAN will have no way of knowing
+that the resulting data race was intentional.
+
+
+Data-Racy Reads That Are Checked Against Marked Reload
+
+The values from some reads are not implicitly trusted. They are instead
+fed into some operation that checks the full value against a later marked
+load from memory, which means that the occasional arbitrarily bogus value
+is not a problem. For example, if a bogus value is fed into cmpxchg(),
+all that happens is that this cmpxchg() fails, which normally results
+in a retry. Unless the race condition that resulted in the bogus value
+recurs, this retry will with high probability succeed, so no harm done.
+
+However, please keep in mind that a data_race() load feeding into
+a cmpxchg_relaxed() might still be subject to load fusing on some
+architectures. Therefore, it is best to capture the return value from
+the failing cmpxchg() for the next iteration of the loop, an approach
+that provides the compiler much less scope for mischievous optimizations.
+Capturing the return value from cmpxchg() also saves a memory reference
+in many cases.
+
+In theory, plain C-language loads can also be used for this use case.
+However, in practice this will have the disadvantage of causing KCSAN
+to generate false positives because KCSAN will have no way of knowing
+that the resulting data race was intentional.
+
+
+Reads Feeding Into Error-Tolerant Heuristics
+
+Values from some reads feed into heuristics that can tolerate occasional
+errors. Such reads can use data_race(), thus allowing KCSAN to focus on
+the other accesses to the relevant shared variables. But please note
+that data_race() loads are subject to load fusing, which can result in
+consistent errors, which in turn are quite capable of breaking heuristics.
+Therefore use of data_race() should be limited to cases where some other
+code (such as a barrier() call) will force the occasional reload.
+
+In theory, plain C-language loads can also be used for this use case.
+However, in practice this will have the disadvantage of causing KCSAN
+to generate false positives because KCSAN will have no way of knowing
+that the resulting data race was intentional.
+
+
+Writes Setting Values Feeding Into Error-Tolerant Heuristics
+
+The values read into error-tolerant heuristics come from somewhere,
+for example, from sysfs. This means that some code in sysfs writes
+to this same variable, and these writes can also use data_race().
+After all, if the heuristic can tolerate the occasional bogus value
+due to compiler-mangled reads, it can also tolerate the occasional
+compiler-mangled write, at least assuming that the proper value is in
+place once the write completes.
+
+Plain C-language stores can also be used for this use case. However,
+in kernels built with CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n, this
+will have the disadvantage of causing KCSAN to generate false positives
+because KCSAN will have no way of knowing that the resulting data race
+was intentional.
+
+
+Use of Plain C-Language Accesses
+--------------------------------
+
+Here are some example situations where plain C-language accesses should
+used instead of READ_ONCE(), WRITE_ONCE(), and data_race():
+
+1. Accesses protected by mutual exclusion, including strict locking
+ and sequence locking.
+
+2. Initialization-time and cleanup-time accesses. This covers a
+ wide variety of situations, including the uniprocessor phase of
+ system boot, variables to be used by not-yet-spawned kthreads,
+ structures not yet published to reference-counted or RCU-protected
+ data structures, and the cleanup side of any of these situations.
+
+3. Per-CPU variables that are not accessed from other CPUs.
+
+4. Private per-task variables, including on-stack variables, some
+ fields in the task_struct structure, and task-private heap data.
+
+5. Any other loads for which there is not supposed to be a concurrent
+ store to that same variable.
+
+6. Any other stores for which there should be neither concurrent
+ loads nor concurrent stores to that same variable.
+
+ But note that KCSAN makes two explicit exceptions to this rule
+ by default, refraining from flagging plain C-language stores:
+
+ a. No matter what. You can override this default by building
+ with CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n.
+
+ b. When the store writes the value already contained in
+ that variable. You can override this default by building
+ with CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=n.
+
+ c. When one of the stores is in an interrupt handler and
+ the other in the interrupted code. You can override this
+ default by building with CONFIG_KCSAN_INTERRUPT_WATCHER=y.
+
+Note that it is important to use plain C-language accesses in these cases,
+because doing otherwise prevents KCSAN from detecting violations of your
+code's synchronization rules.
+
+
+ACCESS-DOCUMENTATION OPTIONS
+============================
+
+It is important to comment marked accesses so that people reading your
+code, yourself included, are reminded of the synchronization design.
+However, it is even more important to comment plain C-language accesses
+that are intentionally involved in data races. Such comments are
+needed to remind people reading your code, again, yourself included,
+of how the compiler has been prevented from optimizing those accesses
+into concurrency bugs.
+
+It is also possible to tell KCSAN about your synchronization design.
+For example, ASSERT_EXCLUSIVE_ACCESS(foo) tells KCSAN that any
+concurrent access to variable foo by any other CPU is an error, even
+if that concurrent access is marked with READ_ONCE(). In addition,
+ASSERT_EXCLUSIVE_WRITER(foo) tells KCSAN that although it is OK for there
+to be concurrent reads from foo from other CPUs, it is an error for some
+other CPU to be concurrently writing to foo, even if that concurrent
+write is marked with data_race() or WRITE_ONCE().
+
+Note that although KCSAN will call out data races involving either
+ASSERT_EXCLUSIVE_ACCESS() or ASSERT_EXCLUSIVE_WRITER() on the one hand
+and data_race() writes on the other, KCSAN will not report the location
+of these data_race() writes.
+
+
+EXAMPLES
+========
+
+As noted earlier, the goal is to prevent the compiler from destroying
+your concurrent algorithm, to help the human reader, and to inform
+KCSAN of aspects of your concurrency design. This section looks at a
+few examples showing how this can be done.
+
+
+Lock Protection With Lockless Diagnostic Access
+-----------------------------------------------
+
+For example, suppose a shared variable "foo" is read only while a
+reader-writer spinlock is read-held, written only while that same
+spinlock is write-held, except that it is also read locklessly for
+diagnostic purposes. The code might look as follows:
+
+ int foo;
+ DEFINE_RWLOCK(foo_rwlock);
+
+ void update_foo(int newval)
+ {
+ write_lock(&foo_rwlock);
+ foo = newval;
+ do_something(newval);
+ write_unlock(&foo_rwlock);
+ }
+
+ int read_foo(void)
+ {
+ int ret;
+
+ read_lock(&foo_rwlock);
+ do_something_else();
+ ret = foo;
+ read_unlock(&foo_rwlock);
+ return ret;
+ }
+
+ int read_foo_diagnostic(void)
+ {
+ return data_race(foo);
+ }
+
+The reader-writer lock prevents the compiler from introducing concurrency
+bugs into any part of the main algorithm using foo, which means that
+the accesses to foo within both update_foo() and read_foo() can (and
+should) be plain C-language accesses. One benefit of making them be
+plain C-language accesses is that KCSAN can detect any erroneous lockless
+reads from or updates to foo. The data_race() in read_foo_diagnostic()
+tells KCSAN that data races are expected, and should be silently
+ignored. This data_race() also tells the human reading the code that
+read_foo_diagnostic() might sometimes return a bogus value.
+
+However, please note that your kernel must be built with
+CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n in order for KCSAN to
+detect a buggy lockless write. If you need KCSAN to detect such a
+write even if that write did not change the value of foo, you also
+need CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=n. If you need KCSAN to
+detect such a write happening in an interrupt handler running on the
+same CPU doing the legitimate lock-protected write, you also need
+CONFIG_KCSAN_INTERRUPT_WATCHER=y. With some or all of these Kconfig
+options set properly, KCSAN can be quite helpful, although it is not
+necessarily a full replacement for hardware watchpoints. On the other
+hand, neither are hardware watchpoints a full replacement for KCSAN
+because it is not always easy to tell hardware watchpoint to conditionally
+trap on accesses.
+
+
+Lock-Protected Writes With Lockless Reads
+-----------------------------------------
+
+For another example, suppose a shared variable "foo" is updated only
+while holding a spinlock, but is read locklessly. The code might look
+as follows:
+
+ int foo;
+ DEFINE_SPINLOCK(foo_lock);
+
+ void update_foo(int newval)
+ {
+ spin_lock(&foo_lock);
+ WRITE_ONCE(foo, newval);
+ ASSERT_EXCLUSIVE_WRITER(foo);
+ do_something(newval);
+ spin_unlock(&foo_wlock);
+ }
+
+ int read_foo(void)
+ {
+ do_something_else();
+ return READ_ONCE(foo);
+ }
+
+Because foo is read locklessly, all accesses are marked. The purpose
+of the ASSERT_EXCLUSIVE_WRITER() is to allow KCSAN to check for a buggy
+concurrent lockless write.
+
+
+Lockless Reads and Writes
+-------------------------
+
+For another example, suppose a shared variable "foo" is both read and
+updated locklessly. The code might look as follows:
+
+ int foo;
+
+ int update_foo(int newval)
+ {
+ int ret;
+
+ ret = xchg(&foo, newval);
+ do_something(newval);
+ return ret;
+ }
+
+ int read_foo(void)
+ {
+ do_something_else();
+ return READ_ONCE(foo);
+ }
+
+Because foo is accessed locklessly, all accesses are marked. It does
+not make sense to use ASSERT_EXCLUSIVE_WRITER() in this case because
+there really can be concurrent lockless writers. KCSAN would
+flag any concurrent plain C-language reads from foo, and given
+CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n, also any concurrent plain
+C-language writes to foo.
+
+
+Lockless Reads and Writes, But With Single-Threaded Initialization
+------------------------------------------------------------------
+
+For yet another example, suppose that foo is initialized in a
+single-threaded manner, but that a number of kthreads are then created
+that locklessly and concurrently access foo. Some snippets of this code
+might look as follows:
+
+ int foo;
+
+ void initialize_foo(int initval, int nkthreads)
+ {
+ int i;
+
+ foo = initval;
+ ASSERT_EXCLUSIVE_ACCESS(foo);
+ for (i = 0; i < nkthreads; i++)
+ kthread_run(access_foo_concurrently, ...);
+ }
+
+ /* Called from access_foo_concurrently(). */
+ int update_foo(int newval)
+ {
+ int ret;
+
+ ret = xchg(&foo, newval);
+ do_something(newval);
+ return ret;
+ }
+
+ /* Also called from access_foo_concurrently(). */
+ int read_foo(void)
+ {
+ do_something_else();
+ return READ_ONCE(foo);
+ }
+
+The initialize_foo() uses a plain C-language write to foo because there
+are not supposed to be concurrent accesses during initialization. The
+ASSERT_EXCLUSIVE_ACCESS() allows KCSAN to flag buggy concurrent unmarked
+reads, and the ASSERT_EXCLUSIVE_ACCESS() call further allows KCSAN to
+flag buggy concurrent writes, even if: (1) Those writes are marked or
+(2) The kernel was built with CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=y.
+
+
+Checking Stress-Test Race Coverage
+----------------------------------
+
+When designing stress tests it is important to ensure that race conditions
+of interest really do occur. For example, consider the following code
+fragment:
+
+ int foo;
+
+ int update_foo(int newval)
+ {
+ return xchg(&foo, newval);
+ }
+
+ int xor_shift_foo(int shift, int mask)
+ {
+ int old, new, newold;
+
+ newold = data_race(foo); /* Checked by cmpxchg(). */
+ do {
+ old = newold;
+ new = (old << shift) ^ mask;
+ newold = cmpxchg(&foo, old, new);
+ } while (newold != old);
+ return old;
+ }
+
+ int read_foo(void)
+ {
+ return READ_ONCE(foo);
+ }
+
+If it is possible for update_foo(), xor_shift_foo(), and read_foo() to be
+invoked concurrently, the stress test should force this concurrency to
+actually happen. KCSAN can evaluate the stress test when the above code
+is modified to read as follows:
+
+ int foo;
+
+ int update_foo(int newval)
+ {
+ ASSERT_EXCLUSIVE_ACCESS(foo);
+ return xchg(&foo, newval);
+ }
+
+ int xor_shift_foo(int shift, int mask)
+ {
+ int old, new, newold;
+
+ newold = data_race(foo); /* Checked by cmpxchg(). */
+ do {
+ old = newold;
+ new = (old << shift) ^ mask;
+ ASSERT_EXCLUSIVE_ACCESS(foo);
+ newold = cmpxchg(&foo, old, new);
+ } while (newold != old);
+ return old;
+ }
+
+
+ int read_foo(void)
+ {
+ ASSERT_EXCLUSIVE_ACCESS(foo);
+ return READ_ONCE(foo);
+ }
+
+If a given stress-test run does not result in KCSAN complaints from
+each possible pair of ASSERT_EXCLUSIVE_ACCESS() invocations, the
+stress test needs improvement. If the stress test was to be evaluated
+on a regular basis, it would be wise to place the above instances of
+ASSERT_EXCLUSIVE_ACCESS() under #ifdef so that they did not result in
+false positives when not evaluating the stress test.
+
+
+REFERENCES
+==========
+
+[1] "Concurrency bugs should fear the big bad data-race detector (part 2)"
+ https://lwn.net/Articles/816854/
+
+[2] "Who's afraid of a big bad optimizing compiler?"
+ https://lwn.net/Articles/793253/
diff --git a/tools/memory-model/Documentation/glossary.txt b/tools/memory-model/Documentation/glossary.txt
index b2da6365be63..6f3d16dbf467 100644
--- a/tools/memory-model/Documentation/glossary.txt
+++ b/tools/memory-model/Documentation/glossary.txt
@@ -19,7 +19,7 @@ Address Dependency: When the address of a later memory access is computed
from the value returned by the rcu_dereference() on line 2, the
address dependency extends from that rcu_dereference() to that
"p->a". In rare cases, optimizing compilers can destroy address
- dependencies. Please see Documentation/RCU/rcu_dereference.txt
+ dependencies. Please see Documentation/RCU/rcu_dereference.rst
for more information.
See also "Control Dependency" and "Data Dependency".
diff --git a/tools/memory-model/Documentation/simple.txt b/tools/memory-model/Documentation/simple.txt
index 81e1a0ec5342..4c789ec8334f 100644
--- a/tools/memory-model/Documentation/simple.txt
+++ b/tools/memory-model/Documentation/simple.txt
@@ -189,7 +189,6 @@ Additional information may be found in these files:
Documentation/atomic_t.txt
Documentation/atomic_bitops.txt
-Documentation/core-api/atomic_ops.rst
Documentation/core-api/refcount-vs-atomic.rst
Reading code using these primitives is often also quite helpful.
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 549813cff8ab..cedf3ede7545 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -11,22 +11,15 @@
#include "../../../arch/x86/lib/inat.c"
#include "../../../arch/x86/lib/insn.c"
+#define CONFIG_64BIT 1
+#include <asm/nops.h>
+
#include <asm/orc_types.h>
#include <objtool/check.h>
#include <objtool/elf.h>
#include <objtool/arch.h>
#include <objtool/warn.h>
-
-static unsigned char op_to_cfi_reg[][2] = {
- {CFI_AX, CFI_R8},
- {CFI_CX, CFI_R9},
- {CFI_DX, CFI_R10},
- {CFI_BX, CFI_R11},
- {CFI_SP, CFI_R12},
- {CFI_BP, CFI_R13},
- {CFI_SI, CFI_R14},
- {CFI_DI, CFI_R15},
-};
+#include <arch/elf.h>
static int is_x86_64(const struct elf *elf)
{
@@ -83,6 +76,31 @@ unsigned long arch_jump_destination(struct instruction *insn)
return -1; \
else for (list_add_tail(&op->list, ops_list); op; op = NULL)
+/*
+ * Helpers to decode ModRM/SIB:
+ *
+ * r/m| AX CX DX BX | SP | BP | SI DI |
+ * | R8 R9 R10 R11 | R12 | R13 | R14 R15 |
+ * Mod+----------------+-----+-----+---------+
+ * 00 | [r/m] |[SIB]|[IP+]| [r/m] |
+ * 01 | [r/m + d8] |[S+d]| [r/m + d8] |
+ * 10 | [r/m + d32] |[S+D]| [r/m + d32] |
+ * 11 | r/ m |
+ */
+
+#define mod_is_mem() (modrm_mod != 3)
+#define mod_is_reg() (modrm_mod == 3)
+
+#define is_RIP() ((modrm_rm & 7) == CFI_BP && modrm_mod == 0)
+#define have_SIB() ((modrm_rm & 7) == CFI_SP && mod_is_mem())
+
+#define rm_is(reg) (have_SIB() ? \
+ sib_base == (reg) && sib_index == CFI_SP : \
+ modrm_rm == (reg))
+
+#define rm_is_mem(reg) (mod_is_mem() && !is_RIP() && rm_is(reg))
+#define rm_is_reg(reg) (mod_is_reg() && modrm_rm == (reg))
+
int arch_decode_instruction(const struct elf *elf, const struct section *sec,
unsigned long offset, unsigned int maxlen,
unsigned int *len, enum insn_type *type,
@@ -90,21 +108,22 @@ int arch_decode_instruction(const struct elf *elf, const struct section *sec,
struct list_head *ops_list)
{
struct insn insn;
- int x86_64, sign;
- unsigned char op1, op2, rex = 0, rex_b = 0, rex_r = 0, rex_w = 0,
- rex_x = 0, modrm = 0, modrm_mod = 0, modrm_rm = 0,
- modrm_reg = 0, sib = 0;
+ int x86_64, ret;
+ unsigned char op1, op2,
+ rex = 0, rex_b = 0, rex_r = 0, rex_w = 0, rex_x = 0,
+ modrm = 0, modrm_mod = 0, modrm_rm = 0, modrm_reg = 0,
+ sib = 0, /* sib_scale = 0, */ sib_index = 0, sib_base = 0;
struct stack_op *op = NULL;
struct symbol *sym;
+ u64 imm;
x86_64 = is_x86_64(elf);
if (x86_64 == -1)
return -1;
- insn_init(&insn, sec->data->d_buf + offset, maxlen, x86_64);
- insn_get_length(&insn);
-
- if (!insn_complete(&insn)) {
+ ret = insn_decode(&insn, sec->data->d_buf + offset, maxlen,
+ x86_64 ? INSN_MODE_64 : INSN_MODE_32);
+ if (ret < 0) {
WARN("can't decode instruction at %s:0x%lx", sec->name, offset);
return -1;
}
@@ -129,23 +148,27 @@ int arch_decode_instruction(const struct elf *elf, const struct section *sec,
if (insn.modrm.nbytes) {
modrm = insn.modrm.bytes[0];
modrm_mod = X86_MODRM_MOD(modrm);
- modrm_reg = X86_MODRM_REG(modrm);
- modrm_rm = X86_MODRM_RM(modrm);
+ modrm_reg = X86_MODRM_REG(modrm) + 8*rex_r;
+ modrm_rm = X86_MODRM_RM(modrm) + 8*rex_b;
}
- if (insn.sib.nbytes)
+ if (insn.sib.nbytes) {
sib = insn.sib.bytes[0];
+ /* sib_scale = X86_SIB_SCALE(sib); */
+ sib_index = X86_SIB_INDEX(sib) + 8*rex_x;
+ sib_base = X86_SIB_BASE(sib) + 8*rex_b;
+ }
switch (op1) {
case 0x1:
case 0x29:
- if (rex_w && !rex_b && modrm_mod == 3 && modrm_rm == 4) {
+ if (rex_w && rm_is_reg(CFI_SP)) {
/* add/sub reg, %rsp */
ADD_OP(op) {
op->src.type = OP_SRC_ADD;
- op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+ op->src.reg = modrm_reg;
op->dest.type = OP_DEST_REG;
op->dest.reg = CFI_SP;
}
@@ -157,7 +180,7 @@ int arch_decode_instruction(const struct elf *elf, const struct section *sec,
/* push reg */
ADD_OP(op) {
op->src.type = OP_SRC_REG;
- op->src.reg = op_to_cfi_reg[op1 & 0x7][rex_b];
+ op->src.reg = (op1 & 0x7) + 8*rex_b;
op->dest.type = OP_DEST_PUSH;
}
@@ -169,7 +192,7 @@ int arch_decode_instruction(const struct elf *elf, const struct section *sec,
ADD_OP(op) {
op->src.type = OP_SRC_POP;
op->dest.type = OP_DEST_REG;
- op->dest.reg = op_to_cfi_reg[op1 & 0x7][rex_b];
+ op->dest.reg = (op1 & 0x7) + 8*rex_b;
}
break;
@@ -187,12 +210,54 @@ int arch_decode_instruction(const struct elf *elf, const struct section *sec,
*type = INSN_JUMP_CONDITIONAL;
break;
- case 0x81:
- case 0x83:
- if (rex != 0x48)
+ case 0x80 ... 0x83:
+ /*
+ * 1000 00sw : mod OP r/m : immediate
+ *
+ * s - sign extend immediate
+ * w - imm8 / imm32
+ *
+ * OP: 000 ADD 100 AND
+ * 001 OR 101 SUB
+ * 010 ADC 110 XOR
+ * 011 SBB 111 CMP
+ */
+
+ /* 64bit only */
+ if (!rex_w)
+ break;
+
+ /* %rsp target only */
+ if (!rm_is_reg(CFI_SP))
break;
- if (modrm == 0xe4) {
+ imm = insn.immediate.value;
+ if (op1 & 2) { /* sign extend */
+ if (op1 & 1) { /* imm32 */
+ imm <<= 32;
+ imm = (s64)imm >> 32;
+ } else { /* imm8 */
+ imm <<= 56;
+ imm = (s64)imm >> 56;
+ }
+ }
+
+ switch (modrm_reg & 7) {
+ case 5:
+ imm = -imm;
+ /* fallthrough */
+ case 0:
+ /* add/sub imm, %rsp */
+ ADD_OP(op) {
+ op->src.type = OP_SRC_ADD;
+ op->src.reg = CFI_SP;
+ op->src.offset = imm;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = CFI_SP;
+ }
+ break;
+
+ case 4:
/* and imm, %rsp */
ADD_OP(op) {
op->src.type = OP_SRC_AND;
@@ -202,53 +267,48 @@ int arch_decode_instruction(const struct elf *elf, const struct section *sec,
op->dest.reg = CFI_SP;
}
break;
- }
- if (modrm == 0xc4)
- sign = 1;
- else if (modrm == 0xec)
- sign = -1;
- else
+ default:
+ /* WARN ? */
break;
-
- /* add/sub imm, %rsp */
- ADD_OP(op) {
- op->src.type = OP_SRC_ADD;
- op->src.reg = CFI_SP;
- op->src.offset = insn.immediate.value * sign;
- op->dest.type = OP_DEST_REG;
- op->dest.reg = CFI_SP;
}
+
break;
case 0x89:
- if (rex_w && !rex_r && modrm_reg == 4) {
+ if (!rex_w)
+ break;
- if (modrm_mod == 3) {
+ if (modrm_reg == CFI_SP) {
+
+ if (mod_is_reg()) {
/* mov %rsp, reg */
ADD_OP(op) {
op->src.type = OP_SRC_REG;
op->src.reg = CFI_SP;
op->dest.type = OP_DEST_REG;
- op->dest.reg = op_to_cfi_reg[modrm_rm][rex_b];
+ op->dest.reg = modrm_rm;
}
break;
} else {
- /* skip nontrivial SIB */
- if (modrm_rm == 4 && !(sib == 0x24 && rex_b == rex_x))
- break;
-
/* skip RIP relative displacement */
- if (modrm_rm == 5 && modrm_mod == 0)
+ if (is_RIP())
break;
+ /* skip nontrivial SIB */
+ if (have_SIB()) {
+ modrm_rm = sib_base;
+ if (sib_index != CFI_SP)
+ break;
+ }
+
/* mov %rsp, disp(%reg) */
ADD_OP(op) {
op->src.type = OP_SRC_REG;
op->src.reg = CFI_SP;
op->dest.type = OP_DEST_REG_INDIRECT;
- op->dest.reg = op_to_cfi_reg[modrm_rm][rex_b];
+ op->dest.reg = modrm_rm;
op->dest.offset = insn.displacement.value;
}
break;
@@ -257,12 +317,12 @@ int arch_decode_instruction(const struct elf *elf, const struct section *sec,
break;
}
- if (rex_w && !rex_b && modrm_mod == 3 && modrm_rm == 4) {
+ if (rm_is_reg(CFI_SP)) {
/* mov reg, %rsp */
ADD_OP(op) {
op->src.type = OP_SRC_REG;
- op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+ op->src.reg = modrm_reg;
op->dest.type = OP_DEST_REG;
op->dest.reg = CFI_SP;
}
@@ -271,13 +331,15 @@ int arch_decode_instruction(const struct elf *elf, const struct section *sec,
/* fallthrough */
case 0x88:
- if (!rex_b &&
- (modrm_mod == 1 || modrm_mod == 2) && modrm_rm == 5) {
+ if (!rex_w)
+ break;
+
+ if (rm_is_mem(CFI_BP)) {
/* mov reg, disp(%rbp) */
ADD_OP(op) {
op->src.type = OP_SRC_REG;
- op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+ op->src.reg = modrm_reg;
op->dest.type = OP_DEST_REG_INDIRECT;
op->dest.reg = CFI_BP;
op->dest.offset = insn.displacement.value;
@@ -285,12 +347,12 @@ int arch_decode_instruction(const struct elf *elf, const struct section *sec,
break;
}
- if (rex_w && !rex_b && modrm_rm == 4 && sib == 0x24) {
+ if (rm_is_mem(CFI_SP)) {
/* mov reg, disp(%rsp) */
ADD_OP(op) {
op->src.type = OP_SRC_REG;
- op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+ op->src.reg = modrm_reg;
op->dest.type = OP_DEST_REG_INDIRECT;
op->dest.reg = CFI_SP;
op->dest.offset = insn.displacement.value;
@@ -301,7 +363,10 @@ int arch_decode_instruction(const struct elf *elf, const struct section *sec,
break;
case 0x8b:
- if (rex_w && !rex_b && modrm_mod == 1 && modrm_rm == 5) {
+ if (!rex_w)
+ break;
+
+ if (rm_is_mem(CFI_BP)) {
/* mov disp(%rbp), reg */
ADD_OP(op) {
@@ -309,11 +374,12 @@ int arch_decode_instruction(const struct elf *elf, const struct section *sec,
op->src.reg = CFI_BP;
op->src.offset = insn.displacement.value;
op->dest.type = OP_DEST_REG;
- op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
+ op->dest.reg = modrm_reg;
}
+ break;
+ }
- } else if (rex_w && !rex_b && sib == 0x24 &&
- modrm_mod != 3 && modrm_rm == 4) {
+ if (rm_is_mem(CFI_SP)) {
/* mov disp(%rsp), reg */
ADD_OP(op) {
@@ -321,75 +387,48 @@ int arch_decode_instruction(const struct elf *elf, const struct section *sec,
op->src.reg = CFI_SP;
op->src.offset = insn.displacement.value;
op->dest.type = OP_DEST_REG;
- op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
+ op->dest.reg = modrm_reg;
}
+ break;
}
break;
case 0x8d:
- if (sib == 0x24 && rex_w && !rex_b && !rex_x) {
-
- ADD_OP(op) {
- if (!insn.displacement.value) {
- /* lea (%rsp), reg */
- op->src.type = OP_SRC_REG;
- } else {
- /* lea disp(%rsp), reg */
- op->src.type = OP_SRC_ADD;
- op->src.offset = insn.displacement.value;
- }
- op->src.reg = CFI_SP;
- op->dest.type = OP_DEST_REG;
- op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
- }
-
- } else if (rex == 0x48 && modrm == 0x65) {
-
- /* lea disp(%rbp), %rsp */
- ADD_OP(op) {
- op->src.type = OP_SRC_ADD;
- op->src.reg = CFI_BP;
- op->src.offset = insn.displacement.value;
- op->dest.type = OP_DEST_REG;
- op->dest.reg = CFI_SP;
- }
+ if (mod_is_reg()) {
+ WARN("invalid LEA encoding at %s:0x%lx", sec->name, offset);
+ break;
+ }
- } else if (rex == 0x49 && modrm == 0x62 &&
- insn.displacement.value == -8) {
+ /* skip non 64bit ops */
+ if (!rex_w)
+ break;
- /*
- * lea -0x8(%r10), %rsp
- *
- * Restoring rsp back to its original value after a
- * stack realignment.
- */
- ADD_OP(op) {
- op->src.type = OP_SRC_ADD;
- op->src.reg = CFI_R10;
- op->src.offset = -8;
- op->dest.type = OP_DEST_REG;
- op->dest.reg = CFI_SP;
- }
+ /* skip RIP relative displacement */
+ if (is_RIP())
+ break;
- } else if (rex == 0x49 && modrm == 0x65 &&
- insn.displacement.value == -16) {
+ /* skip nontrivial SIB */
+ if (have_SIB()) {
+ modrm_rm = sib_base;
+ if (sib_index != CFI_SP)
+ break;
+ }
- /*
- * lea -0x10(%r13), %rsp
- *
- * Restoring rsp back to its original value after a
- * stack realignment.
- */
- ADD_OP(op) {
+ /* lea disp(%src), %dst */
+ ADD_OP(op) {
+ op->src.offset = insn.displacement.value;
+ if (!op->src.offset) {
+ /* lea (%src), %dst */
+ op->src.type = OP_SRC_REG;
+ } else {
+ /* lea disp(%src), %dst */
op->src.type = OP_SRC_ADD;
- op->src.reg = CFI_R13;
- op->src.offset = -16;
- op->dest.type = OP_DEST_REG;
- op->dest.reg = CFI_SP;
}
+ op->src.reg = modrm_rm;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = modrm_reg;
}
-
break;
case 0x8f:
@@ -476,9 +515,17 @@ int arch_decode_instruction(const struct elf *elf, const struct section *sec,
* mov bp, sp
* pop bp
*/
- ADD_OP(op)
- op->dest.type = OP_DEST_LEAVE;
-
+ ADD_OP(op) {
+ op->src.type = OP_SRC_REG;
+ op->src.reg = CFI_BP;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = CFI_SP;
+ }
+ ADD_OP(op) {
+ op->src.type = OP_SRC_POP;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = CFI_BP;
+ }
break;
case 0xe3:
@@ -596,11 +643,11 @@ void arch_initial_func_cfi_state(struct cfi_init_state *state)
const char *arch_nop_insn(int len)
{
static const char nops[5][5] = {
- /* 1 */ { 0x90 },
- /* 2 */ { 0x66, 0x90 },
- /* 3 */ { 0x0f, 0x1f, 0x00 },
- /* 4 */ { 0x0f, 0x1f, 0x40, 0x00 },
- /* 5 */ { 0x0f, 0x1f, 0x44, 0x00, 0x00 },
+ { BYTES_NOP1 },
+ { BYTES_NOP2 },
+ { BYTES_NOP3 },
+ { BYTES_NOP4 },
+ { BYTES_NOP5 },
};
if (len < 1 || len > 5) {
@@ -611,6 +658,122 @@ const char *arch_nop_insn(int len)
return nops[len-1];
}
+/* asm/alternative.h ? */
+
+#define ALTINSTR_FLAG_INV (1 << 15)
+#define ALT_NOT(feat) ((feat) | ALTINSTR_FLAG_INV)
+
+struct alt_instr {
+ s32 instr_offset; /* original instruction */
+ s32 repl_offset; /* offset to replacement instruction */
+ u16 cpuid; /* cpuid bit set for replacement */
+ u8 instrlen; /* length of original instruction */
+ u8 replacementlen; /* length of new instruction */
+} __packed;
+
+static int elf_add_alternative(struct elf *elf,
+ struct instruction *orig, struct symbol *sym,
+ int cpuid, u8 orig_len, u8 repl_len)
+{
+ const int size = sizeof(struct alt_instr);
+ struct alt_instr *alt;
+ struct section *sec;
+ Elf_Scn *s;
+
+ sec = find_section_by_name(elf, ".altinstructions");
+ if (!sec) {
+ sec = elf_create_section(elf, ".altinstructions",
+ SHF_WRITE, size, 0);
+
+ if (!sec) {
+ WARN_ELF("elf_create_section");
+ return -1;
+ }
+ }
+
+ s = elf_getscn(elf->elf, sec->idx);
+ if (!s) {
+ WARN_ELF("elf_getscn");
+ return -1;
+ }
+
+ sec->data = elf_newdata(s);
+ if (!sec->data) {
+ WARN_ELF("elf_newdata");
+ return -1;
+ }
+
+ sec->data->d_size = size;
+ sec->data->d_align = 1;
+
+ alt = sec->data->d_buf = malloc(size);
+ if (!sec->data->d_buf) {
+ perror("malloc");
+ return -1;
+ }
+ memset(sec->data->d_buf, 0, size);
+
+ if (elf_add_reloc_to_insn(elf, sec, sec->sh.sh_size,
+ R_X86_64_PC32, orig->sec, orig->offset)) {
+ WARN("elf_create_reloc: alt_instr::instr_offset");
+ return -1;
+ }
+
+ if (elf_add_reloc(elf, sec, sec->sh.sh_size + 4,
+ R_X86_64_PC32, sym, 0)) {
+ WARN("elf_create_reloc: alt_instr::repl_offset");
+ return -1;
+ }
+
+ alt->cpuid = cpuid;
+ alt->instrlen = orig_len;
+ alt->replacementlen = repl_len;
+
+ sec->sh.sh_size += size;
+ sec->changed = true;
+
+ return 0;
+}
+
+#define X86_FEATURE_RETPOLINE ( 7*32+12)
+
+int arch_rewrite_retpolines(struct objtool_file *file)
+{
+ struct instruction *insn;
+ struct reloc *reloc;
+ struct symbol *sym;
+ char name[32] = "";
+
+ list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
+
+ if (!strcmp(insn->sec->name, ".text.__x86.indirect_thunk"))
+ continue;
+
+ reloc = insn->reloc;
+
+ sprintf(name, "__x86_indirect_alt_%s_%s",
+ insn->type == INSN_JUMP_DYNAMIC ? "jmp" : "call",
+ reloc->sym->name + 21);
+
+ sym = find_symbol_by_name(file->elf, name);
+ if (!sym) {
+ sym = elf_create_undef_symbol(file->elf, name);
+ if (!sym) {
+ WARN("elf_create_undef_symbol");
+ return -1;
+ }
+ }
+
+ if (elf_add_alternative(file->elf, insn, sym,
+ ALT_NOT(X86_FEATURE_RETPOLINE), 5, 5)) {
+ WARN("elf_add_alternative");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
int arch_decode_hint_reg(struct instruction *insn, u8 sp_reg)
{
struct cfi_reg *cfa = &insn->cfi.cfa;
@@ -646,3 +809,8 @@ int arch_decode_hint_reg(struct instruction *insn, u8 sp_reg)
return 0;
}
+
+bool arch_is_retpoline(struct symbol *sym)
+{
+ return !strncmp(sym->name, "__x86_indirect_", 15);
+}
diff --git a/tools/objtool/arch/x86/include/arch/cfi_regs.h b/tools/objtool/arch/x86/include/arch/cfi_regs.h
index 79bc517efba8..0579d22c433c 100644
--- a/tools/objtool/arch/x86/include/arch/cfi_regs.h
+++ b/tools/objtool/arch/x86/include/arch/cfi_regs.h
@@ -4,13 +4,13 @@
#define _OBJTOOL_CFI_REGS_H
#define CFI_AX 0
-#define CFI_DX 1
-#define CFI_CX 2
+#define CFI_CX 1
+#define CFI_DX 2
#define CFI_BX 3
-#define CFI_SI 4
-#define CFI_DI 5
-#define CFI_BP 6
-#define CFI_SP 7
+#define CFI_SP 4
+#define CFI_BP 5
+#define CFI_SI 6
+#define CFI_DI 7
#define CFI_R8 8
#define CFI_R9 9
#define CFI_R10 10
diff --git a/tools/objtool/arch/x86/include/arch/special.h b/tools/objtool/arch/x86/include/arch/special.h
index d818b2bffa02..14271cca0c74 100644
--- a/tools/objtool/arch/x86/include/arch/special.h
+++ b/tools/objtool/arch/x86/include/arch/special.h
@@ -10,7 +10,7 @@
#define JUMP_ORIG_OFFSET 0
#define JUMP_NEW_OFFSET 4
-#define ALT_ENTRY_SIZE 13
+#define ALT_ENTRY_SIZE 12
#define ALT_ORIG_OFFSET 0
#define ALT_NEW_OFFSET 4
#define ALT_FEATURE_OFFSET 8
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index c3a85d8f6c5c..8b38b5d6fec7 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -15,16 +15,23 @@
#include <subcmd/parse-options.h>
#include <string.h>
+#include <stdlib.h>
#include <objtool/builtin.h>
#include <objtool/objtool.h>
-bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess, stats, validate_dup, vmlinux, mcount, noinstr;
+bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess, stats,
+ validate_dup, vmlinux, mcount, noinstr, backup;
static const char * const check_usage[] = {
"objtool check [<options>] file.o",
NULL,
};
+static const char * const env_usage[] = {
+ "OBJTOOL_ARGS=\"<options>\"",
+ NULL,
+};
+
const struct option check_options[] = {
OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"),
OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"),
@@ -37,20 +44,44 @@ const struct option check_options[] = {
OPT_BOOLEAN('n', "noinstr", &noinstr, "noinstr validation for vmlinux.o"),
OPT_BOOLEAN('l', "vmlinux", &vmlinux, "vmlinux.o validation"),
OPT_BOOLEAN('M', "mcount", &mcount, "generate __mcount_loc"),
+ OPT_BOOLEAN('B', "backup", &backup, "create .orig files before modification"),
OPT_END(),
};
+int cmd_parse_options(int argc, const char **argv, const char * const usage[])
+{
+ const char *envv[16] = { };
+ char *env;
+ int envc;
+
+ env = getenv("OBJTOOL_ARGS");
+ if (env) {
+ envv[0] = "OBJTOOL_ARGS";
+ for (envc = 1; envc < ARRAY_SIZE(envv); ) {
+ envv[envc++] = env;
+ env = strchr(env, ' ');
+ if (!env)
+ break;
+ *env = '\0';
+ env++;
+ }
+
+ parse_options(envc, envv, check_options, env_usage, 0);
+ }
+
+ argc = parse_options(argc, argv, check_options, usage, 0);
+ if (argc != 1)
+ usage_with_options(usage, check_options);
+ return argc;
+}
+
int cmd_check(int argc, const char **argv)
{
const char *objname;
struct objtool_file *file;
int ret;
- argc = parse_options(argc, argv, check_options, check_usage, 0);
-
- if (argc != 1)
- usage_with_options(check_usage, check_options);
-
+ argc = cmd_parse_options(argc, argv, check_usage);
objname = argv[0];
file = objtool_open_read(objname);
diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c
index 8273bbf7cebb..17f8b9307738 100644
--- a/tools/objtool/builtin-orc.c
+++ b/tools/objtool/builtin-orc.c
@@ -34,10 +34,7 @@ int cmd_orc(int argc, const char **argv)
struct objtool_file *file;
int ret;
- argc = parse_options(argc, argv, check_options, orc_usage, 0);
- if (argc != 1)
- usage_with_options(orc_usage, check_options);
-
+ argc = cmd_parse_options(argc, argv, orc_usage);
objname = argv[0];
file = objtool_open_read(objname);
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 5e5388a38e2a..9ed1a4cd00dc 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -108,6 +108,18 @@ static struct instruction *prev_insn_same_sym(struct objtool_file *file,
for (insn = next_insn_same_sec(file, insn); insn; \
insn = next_insn_same_sec(file, insn))
+static bool is_jump_table_jump(struct instruction *insn)
+{
+ struct alt_group *alt_group = insn->alt_group;
+
+ if (insn->jump_table)
+ return true;
+
+ /* Retpoline alternative for a jump table? */
+ return alt_group && alt_group->orig_group &&
+ alt_group->orig_group->first_insn->jump_table;
+}
+
static bool is_sibling_call(struct instruction *insn)
{
/*
@@ -120,7 +132,7 @@ static bool is_sibling_call(struct instruction *insn)
/* An indirect jump is either a sibling call or a jump to a table. */
if (insn->type == INSN_JUMP_DYNAMIC)
- return list_empty(&insn->alts);
+ return !is_jump_table_jump(insn);
/* add_jump_destinations() sets insn->call_dest for sibling calls. */
return (is_static_jump(insn) && insn->call_dest);
@@ -433,8 +445,7 @@ reachable:
static int create_static_call_sections(struct objtool_file *file)
{
- struct section *sec, *reloc_sec;
- struct reloc *reloc;
+ struct section *sec;
struct static_call_site *site;
struct instruction *insn;
struct symbol *key_sym;
@@ -452,7 +463,7 @@ static int create_static_call_sections(struct objtool_file *file)
return 0;
idx = 0;
- list_for_each_entry(insn, &file->static_call_list, static_call_node)
+ list_for_each_entry(insn, &file->static_call_list, call_node)
idx++;
sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
@@ -460,36 +471,18 @@ static int create_static_call_sections(struct objtool_file *file)
if (!sec)
return -1;
- reloc_sec = elf_create_reloc_section(file->elf, sec, SHT_RELA);
- if (!reloc_sec)
- return -1;
-
idx = 0;
- list_for_each_entry(insn, &file->static_call_list, static_call_node) {
+ list_for_each_entry(insn, &file->static_call_list, call_node) {
site = (struct static_call_site *)sec->data->d_buf + idx;
memset(site, 0, sizeof(struct static_call_site));
/* populate reloc for 'addr' */
- reloc = malloc(sizeof(*reloc));
-
- if (!reloc) {
- perror("malloc");
- return -1;
- }
- memset(reloc, 0, sizeof(*reloc));
-
- insn_to_reloc_sym_addend(insn->sec, insn->offset, reloc);
- if (!reloc->sym) {
- WARN_FUNC("static call tramp: missing containing symbol",
- insn->sec, insn->offset);
+ if (elf_add_reloc_to_insn(file->elf, sec,
+ idx * sizeof(struct static_call_site),
+ R_X86_64_PC32,
+ insn->sec, insn->offset))
return -1;
- }
-
- reloc->type = R_X86_64_PC32;
- reloc->offset = idx * sizeof(struct static_call_site);
- reloc->sec = reloc_sec;
- elf_add_reloc(file->elf, reloc);
/* find key symbol */
key_name = strdup(insn->call_dest->name);
@@ -526,32 +519,21 @@ static int create_static_call_sections(struct objtool_file *file)
free(key_name);
/* populate reloc for 'key' */
- reloc = malloc(sizeof(*reloc));
- if (!reloc) {
- perror("malloc");
+ if (elf_add_reloc(file->elf, sec,
+ idx * sizeof(struct static_call_site) + 4,
+ R_X86_64_PC32, key_sym,
+ is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
return -1;
- }
- memset(reloc, 0, sizeof(*reloc));
- reloc->sym = key_sym;
- reloc->addend = is_sibling_call(insn) ? STATIC_CALL_SITE_TAIL : 0;
- reloc->type = R_X86_64_PC32;
- reloc->offset = idx * sizeof(struct static_call_site) + 4;
- reloc->sec = reloc_sec;
- elf_add_reloc(file->elf, reloc);
idx++;
}
- if (elf_rebuild_reloc_section(file->elf, reloc_sec))
- return -1;
-
return 0;
}
static int create_mcount_loc_sections(struct objtool_file *file)
{
- struct section *sec, *reloc_sec;
- struct reloc *reloc;
+ struct section *sec;
unsigned long *loc;
struct instruction *insn;
int idx;
@@ -574,49 +556,21 @@ static int create_mcount_loc_sections(struct objtool_file *file)
if (!sec)
return -1;
- reloc_sec = elf_create_reloc_section(file->elf, sec, SHT_RELA);
- if (!reloc_sec)
- return -1;
-
idx = 0;
list_for_each_entry(insn, &file->mcount_loc_list, mcount_loc_node) {
loc = (unsigned long *)sec->data->d_buf + idx;
memset(loc, 0, sizeof(unsigned long));
- reloc = malloc(sizeof(*reloc));
- if (!reloc) {
- perror("malloc");
+ if (elf_add_reloc_to_insn(file->elf, sec,
+ idx * sizeof(unsigned long),
+ R_X86_64_64,
+ insn->sec, insn->offset))
return -1;
- }
- memset(reloc, 0, sizeof(*reloc));
-
- if (insn->sec->sym) {
- reloc->sym = insn->sec->sym;
- reloc->addend = insn->offset;
- } else {
- reloc->sym = find_symbol_containing(insn->sec, insn->offset);
-
- if (!reloc->sym) {
- WARN("missing symbol for insn at offset 0x%lx\n",
- insn->offset);
- return -1;
- }
-
- reloc->addend = insn->offset - reloc->sym->offset;
- }
-
- reloc->type = R_X86_64_64;
- reloc->offset = idx * sizeof(unsigned long);
- reloc->sec = reloc_sec;
- elf_add_reloc(file->elf, reloc);
idx++;
}
- if (elf_rebuild_reloc_section(file->elf, reloc_sec))
- return -1;
-
return 0;
}
@@ -850,6 +804,30 @@ static int add_ignore_alternatives(struct objtool_file *file)
return 0;
}
+__weak bool arch_is_retpoline(struct symbol *sym)
+{
+ return false;
+}
+
+#define NEGATIVE_RELOC ((void *)-1L)
+
+static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
+{
+ if (insn->reloc == NEGATIVE_RELOC)
+ return NULL;
+
+ if (!insn->reloc) {
+ insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec,
+ insn->offset, insn->len);
+ if (!insn->reloc) {
+ insn->reloc = NEGATIVE_RELOC;
+ return NULL;
+ }
+ }
+
+ return insn->reloc;
+}
+
/*
* Find the destination instructions for all jumps.
*/
@@ -864,16 +842,14 @@ static int add_jump_destinations(struct objtool_file *file)
if (!is_static_jump(insn))
continue;
- reloc = find_reloc_by_dest_range(file->elf, insn->sec,
- insn->offset, insn->len);
+ reloc = insn_reloc(file, insn);
if (!reloc) {
dest_sec = insn->sec;
dest_off = arch_jump_destination(insn);
} else if (reloc->sym->type == STT_SECTION) {
dest_sec = reloc->sym->sec;
dest_off = arch_dest_reloc_offset(reloc->addend);
- } else if (!strncmp(reloc->sym->name, "__x86_indirect_thunk_", 21) ||
- !strncmp(reloc->sym->name, "__x86_retpoline_", 16)) {
+ } else if (arch_is_retpoline(reloc->sym)) {
/*
* Retpoline jumps are really dynamic jumps in
* disguise, so convert them accordingly.
@@ -883,13 +859,16 @@ static int add_jump_destinations(struct objtool_file *file)
else
insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
+ list_add_tail(&insn->call_node,
+ &file->retpoline_call_list);
+
insn->retpoline_safe = true;
continue;
} else if (insn->func) {
/* internal or external sibling call (with reloc) */
insn->call_dest = reloc->sym;
if (insn->call_dest->static_call_tramp) {
- list_add_tail(&insn->static_call_node,
+ list_add_tail(&insn->call_node,
&file->static_call_list);
}
continue;
@@ -951,7 +930,7 @@ static int add_jump_destinations(struct objtool_file *file)
/* internal sibling call (without reloc) */
insn->call_dest = insn->jump_dest->func;
if (insn->call_dest->static_call_tramp) {
- list_add_tail(&insn->static_call_node,
+ list_add_tail(&insn->call_node,
&file->static_call_list);
}
}
@@ -995,8 +974,7 @@ static int add_call_destinations(struct objtool_file *file)
if (insn->type != INSN_CALL)
continue;
- reloc = find_reloc_by_dest_range(file->elf, insn->sec,
- insn->offset, insn->len);
+ reloc = insn_reloc(file, insn);
if (!reloc) {
dest_off = arch_jump_destination(insn);
insn->call_dest = find_call_destination(insn->sec, dest_off);
@@ -1026,9 +1004,29 @@ static int add_call_destinations(struct objtool_file *file)
dest_off);
return -1;
}
+
+ } else if (arch_is_retpoline(reloc->sym)) {
+ /*
+ * Retpoline calls are really dynamic calls in
+ * disguise, so convert them accordingly.
+ */
+ insn->type = INSN_CALL_DYNAMIC;
+ insn->retpoline_safe = true;
+
+ list_add_tail(&insn->call_node,
+ &file->retpoline_call_list);
+
+ remove_insn_ops(insn);
+ continue;
+
} else
insn->call_dest = reloc->sym;
+ if (insn->call_dest && insn->call_dest->static_call_tramp) {
+ list_add_tail(&insn->call_node,
+ &file->static_call_list);
+ }
+
/*
* Many compilers cannot disable KCOV with a function attribute
* so they need a little help, NOP out any KCOV calls from noinstr
@@ -1175,8 +1173,7 @@ static int handle_group_alt(struct objtool_file *file,
* alternatives code can adjust the relative offsets
* accordingly.
*/
- alt_reloc = find_reloc_by_dest_range(file->elf, insn->sec,
- insn->offset, insn->len);
+ alt_reloc = insn_reloc(file, insn);
if (alt_reloc &&
!arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
@@ -1751,6 +1748,11 @@ static void mark_rodata(struct objtool_file *file)
file->rodata = found;
}
+__weak int arch_rewrite_retpolines(struct objtool_file *file)
+{
+ return 0;
+}
+
static int decode_sections(struct objtool_file *file)
{
int ret;
@@ -1772,10 +1774,17 @@ static int decode_sections(struct objtool_file *file)
if (ret)
return ret;
+ /*
+ * Must be before add_{jump_call}_destination.
+ */
ret = read_static_call_tramps(file);
if (ret)
return ret;
+ /*
+ * Must be before add_special_section_alts() as that depends on
+ * jump_dest being set.
+ */
ret = add_jump_destinations(file);
if (ret)
return ret;
@@ -1784,6 +1793,10 @@ static int decode_sections(struct objtool_file *file)
if (ret)
return ret;
+ /*
+ * Must be before add_call_destination(); it changes INSN_CALL to
+ * INSN_JUMP.
+ */
ret = read_intra_function_calls(file);
if (ret)
return ret;
@@ -1808,6 +1821,15 @@ static int decode_sections(struct objtool_file *file)
if (ret)
return ret;
+ /*
+ * Must be after add_special_section_alts(), since this will emit
+ * alternatives. Must be after add_{jump,call}_destination(), since
+ * those create the call insn lists.
+ */
+ ret = arch_rewrite_retpolines(file);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -1959,8 +1981,9 @@ static void restore_reg(struct cfi_state *cfi, unsigned char reg)
* 41 5d pop %r13
* c3 retq
*/
-static int update_cfi_state(struct instruction *insn, struct cfi_state *cfi,
- struct stack_op *op)
+static int update_cfi_state(struct instruction *insn,
+ struct instruction *next_insn,
+ struct cfi_state *cfi, struct stack_op *op)
{
struct cfi_reg *cfa = &cfi->cfa;
struct cfi_reg *regs = cfi->regs;
@@ -2019,7 +2042,7 @@ static int update_cfi_state(struct instruction *insn, struct cfi_state *cfi,
}
else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
- cfa->base == CFI_BP) {
+ (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
/*
* mov %rbp, %rsp
@@ -2161,7 +2184,7 @@ static int update_cfi_state(struct instruction *insn, struct cfi_state *cfi,
break;
}
- if (op->dest.reg == cfi->cfa.base) {
+ if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
WARN_FUNC("unsupported stack register modification",
insn->sec, insn->offset);
return -1;
@@ -2216,7 +2239,7 @@ static int update_cfi_state(struct instruction *insn, struct cfi_state *cfi,
cfa->offset = 0;
cfi->drap_offset = -1;
- } else if (regs[op->dest.reg].offset == -cfi->stack_size) {
+ } else if (cfi->stack_size == -regs[op->dest.reg].offset) {
/* pop %reg */
restore_reg(cfi, op->dest.reg);
@@ -2357,26 +2380,6 @@ static int update_cfi_state(struct instruction *insn, struct cfi_state *cfi,
break;
- case OP_DEST_LEAVE:
- if ((!cfi->drap && cfa->base != CFI_BP) ||
- (cfi->drap && cfa->base != cfi->drap_reg)) {
- WARN_FUNC("leave instruction with modified stack frame",
- insn->sec, insn->offset);
- return -1;
- }
-
- /* leave (mov %rbp, %rsp; pop %rbp) */
-
- cfi->stack_size = -cfi->regs[CFI_BP].offset - 8;
- restore_reg(cfi, CFI_BP);
-
- if (!cfi->drap) {
- cfa->base = CFI_SP;
- cfa->offset -= 8;
- }
-
- break;
-
case OP_DEST_MEM:
if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
WARN_FUNC("unknown stack-related memory operation",
@@ -2433,13 +2436,15 @@ static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn
return 0;
}
-static int handle_insn_ops(struct instruction *insn, struct insn_state *state)
+static int handle_insn_ops(struct instruction *insn,
+ struct instruction *next_insn,
+ struct insn_state *state)
{
struct stack_op *op;
list_for_each_entry(op, &insn->stack_ops, list) {
- if (update_cfi_state(insn, &state->cfi, op))
+ if (update_cfi_state(insn, next_insn, &state->cfi, op))
return 1;
if (!insn->alt_group)
@@ -2722,7 +2727,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
return 0;
}
- if (handle_insn_ops(insn, &state))
+ if (handle_insn_ops(insn, next_insn, &state))
return 1;
switch (insn->type) {
@@ -2746,11 +2751,6 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
if (dead_end_function(file, insn->call_dest))
return 0;
- if (insn->type == INSN_CALL && insn->call_dest->static_call_tramp) {
- list_add_tail(&insn->static_call_node,
- &file->static_call_list);
- }
-
break;
case INSN_JUMP_CONDITIONAL:
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 93fa833a49a5..d08f5f3670f8 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -211,32 +211,6 @@ struct reloc *find_reloc_by_dest(const struct elf *elf, struct section *sec, uns
return find_reloc_by_dest_range(elf, sec, offset, 1);
}
-void insn_to_reloc_sym_addend(struct section *sec, unsigned long offset,
- struct reloc *reloc)
-{
- if (sec->sym) {
- reloc->sym = sec->sym;
- reloc->addend = offset;
- return;
- }
-
- /*
- * The Clang assembler strips section symbols, so we have to reference
- * the function symbol instead:
- */
- reloc->sym = find_symbol_containing(sec, offset);
- if (!reloc->sym) {
- /*
- * Hack alert. This happens when we need to reference the NOP
- * pad insn immediately after the function.
- */
- reloc->sym = find_symbol_containing(sec, offset - 1);
- }
-
- if (reloc->sym)
- reloc->addend = offset - reloc->sym->offset;
-}
-
static int read_sections(struct elf *elf)
{
Elf_Scn *s = NULL;
@@ -316,12 +290,39 @@ static int read_sections(struct elf *elf)
return 0;
}
+static void elf_add_symbol(struct elf *elf, struct symbol *sym)
+{
+ struct list_head *entry;
+ struct rb_node *pnode;
+
+ sym->type = GELF_ST_TYPE(sym->sym.st_info);
+ sym->bind = GELF_ST_BIND(sym->sym.st_info);
+
+ sym->offset = sym->sym.st_value;
+ sym->len = sym->sym.st_size;
+
+ rb_add(&sym->node, &sym->sec->symbol_tree, symbol_to_offset);
+ pnode = rb_prev(&sym->node);
+ if (pnode)
+ entry = &rb_entry(pnode, struct symbol, node)->list;
+ else
+ entry = &sym->sec->symbol_list;
+ list_add(&sym->list, entry);
+ elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx);
+ elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
+
+ /*
+ * Don't store empty STT_NOTYPE symbols in the rbtree. They
+ * can exist within a function, confusing the sorting.
+ */
+ if (!sym->len)
+ rb_erase(&sym->node, &sym->sec->symbol_tree);
+}
+
static int read_symbols(struct elf *elf)
{
struct section *symtab, *symtab_shndx, *sec;
struct symbol *sym, *pfunc;
- struct list_head *entry;
- struct rb_node *pnode;
int symbols_nr, i;
char *coldstr;
Elf_Data *shndx_data = NULL;
@@ -366,9 +367,6 @@ static int read_symbols(struct elf *elf)
goto err;
}
- sym->type = GELF_ST_TYPE(sym->sym.st_info);
- sym->bind = GELF_ST_BIND(sym->sym.st_info);
-
if ((sym->sym.st_shndx > SHN_UNDEF &&
sym->sym.st_shndx < SHN_LORESERVE) ||
(shndx_data && sym->sym.st_shndx == SHN_XINDEX)) {
@@ -381,32 +379,14 @@ static int read_symbols(struct elf *elf)
sym->name);
goto err;
}
- if (sym->type == STT_SECTION) {
+ if (GELF_ST_TYPE(sym->sym.st_info) == STT_SECTION) {
sym->name = sym->sec->name;
sym->sec->sym = sym;
}
} else
sym->sec = find_section_by_index(elf, 0);
- sym->offset = sym->sym.st_value;
- sym->len = sym->sym.st_size;
-
- rb_add(&sym->node, &sym->sec->symbol_tree, symbol_to_offset);
- pnode = rb_prev(&sym->node);
- if (pnode)
- entry = &rb_entry(pnode, struct symbol, node)->list;
- else
- entry = &sym->sec->symbol_list;
- list_add(&sym->list, entry);
- elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx);
- elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
-
- /*
- * Don't store empty STT_NOTYPE symbols in the rbtree. They
- * can exist within a function, confusing the sorting.
- */
- if (!sym->len)
- rb_erase(&sym->node, &sym->sec->symbol_tree);
+ elf_add_symbol(elf, sym);
}
if (stats)
@@ -473,12 +453,73 @@ err:
return -1;
}
-void elf_add_reloc(struct elf *elf, struct reloc *reloc)
+static struct section *elf_create_reloc_section(struct elf *elf,
+ struct section *base,
+ int reltype);
+
+int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
+ unsigned int type, struct symbol *sym, int addend)
{
- struct section *sec = reloc->sec;
+ struct reloc *reloc;
- list_add_tail(&reloc->list, &sec->reloc_list);
+ if (!sec->reloc && !elf_create_reloc_section(elf, sec, SHT_RELA))
+ return -1;
+
+ reloc = malloc(sizeof(*reloc));
+ if (!reloc) {
+ perror("malloc");
+ return -1;
+ }
+ memset(reloc, 0, sizeof(*reloc));
+
+ reloc->sec = sec->reloc;
+ reloc->offset = offset;
+ reloc->type = type;
+ reloc->sym = sym;
+ reloc->addend = addend;
+
+ list_add_tail(&reloc->list, &sec->reloc->reloc_list);
elf_hash_add(elf->reloc_hash, &reloc->hash, reloc_hash(reloc));
+
+ sec->reloc->changed = true;
+
+ return 0;
+}
+
+int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
+ unsigned long offset, unsigned int type,
+ struct section *insn_sec, unsigned long insn_off)
+{
+ struct symbol *sym;
+ int addend;
+
+ if (insn_sec->sym) {
+ sym = insn_sec->sym;
+ addend = insn_off;
+
+ } else {
+ /*
+ * The Clang assembler strips section symbols, so we have to
+ * reference the function symbol instead:
+ */
+ sym = find_symbol_containing(insn_sec, insn_off);
+ if (!sym) {
+ /*
+ * Hack alert. This happens when we need to reference
+ * the NOP pad insn immediately after the function.
+ */
+ sym = find_symbol_containing(insn_sec, insn_off - 1);
+ }
+
+ if (!sym) {
+ WARN("can't find symbol containing %s+0x%lx", insn_sec->name, insn_off);
+ return -1;
+ }
+
+ addend = insn_off - sym->offset;
+ }
+
+ return elf_add_reloc(elf, sec, offset, type, sym, addend);
}
static int read_rel_reloc(struct section *sec, int i, struct reloc *reloc, unsigned int *symndx)
@@ -558,7 +599,9 @@ static int read_relocs(struct elf *elf)
return -1;
}
- elf_add_reloc(elf, reloc);
+ list_add_tail(&reloc->list, &sec->reloc_list);
+ elf_hash_add(elf->reloc_hash, &reloc->hash, reloc_hash(reloc));
+
nr_reloc++;
}
max_reloc = max(max_reloc, nr_reloc);
@@ -636,13 +679,108 @@ err:
return NULL;
}
+static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
+{
+ Elf_Data *data;
+ Elf_Scn *s;
+ int len;
+
+ if (!strtab)
+ strtab = find_section_by_name(elf, ".strtab");
+ if (!strtab) {
+ WARN("can't find .strtab section");
+ return -1;
+ }
+
+ s = elf_getscn(elf->elf, strtab->idx);
+ if (!s) {
+ WARN_ELF("elf_getscn");
+ return -1;
+ }
+
+ data = elf_newdata(s);
+ if (!data) {
+ WARN_ELF("elf_newdata");
+ return -1;
+ }
+
+ data->d_buf = str;
+ data->d_size = strlen(str) + 1;
+ data->d_align = 1;
+
+ len = strtab->len;
+ strtab->len += data->d_size;
+ strtab->changed = true;
+
+ return len;
+}
+
+struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
+{
+ struct section *symtab;
+ struct symbol *sym;
+ Elf_Data *data;
+ Elf_Scn *s;
+
+ sym = malloc(sizeof(*sym));
+ if (!sym) {
+ perror("malloc");
+ return NULL;
+ }
+ memset(sym, 0, sizeof(*sym));
+
+ sym->name = strdup(name);
+
+ sym->sym.st_name = elf_add_string(elf, NULL, sym->name);
+ if (sym->sym.st_name == -1)
+ return NULL;
+
+ sym->sym.st_info = GELF_ST_INFO(STB_GLOBAL, STT_NOTYPE);
+ // st_other 0
+ // st_shndx 0
+ // st_value 0
+ // st_size 0
+
+ symtab = find_section_by_name(elf, ".symtab");
+ if (!symtab) {
+ WARN("can't find .symtab");
+ return NULL;
+ }
+
+ s = elf_getscn(elf->elf, symtab->idx);
+ if (!s) {
+ WARN_ELF("elf_getscn");
+ return NULL;
+ }
+
+ data = elf_newdata(s);
+ if (!data) {
+ WARN_ELF("elf_newdata");
+ return NULL;
+ }
+
+ data->d_buf = &sym->sym;
+ data->d_size = sizeof(sym->sym);
+ data->d_align = 1;
+
+ sym->idx = symtab->len / sizeof(sym->sym);
+
+ symtab->len += data->d_size;
+ symtab->changed = true;
+
+ sym->sec = find_section_by_index(elf, 0);
+
+ elf_add_symbol(elf, sym);
+
+ return sym;
+}
+
struct section *elf_create_section(struct elf *elf, const char *name,
unsigned int sh_flags, size_t entsize, int nr)
{
struct section *sec, *shstrtab;
size_t size = entsize * nr;
Elf_Scn *s;
- Elf_Data *data;
sec = malloc(sizeof(*sec));
if (!sec) {
@@ -699,7 +837,6 @@ struct section *elf_create_section(struct elf *elf, const char *name,
sec->sh.sh_addralign = 1;
sec->sh.sh_flags = SHF_ALLOC | sh_flags;
-
/* Add section name to .shstrtab (or .strtab for Clang) */
shstrtab = find_section_by_name(elf, ".shstrtab");
if (!shstrtab)
@@ -708,27 +845,9 @@ struct section *elf_create_section(struct elf *elf, const char *name,
WARN("can't find .shstrtab or .strtab section");
return NULL;
}
-
- s = elf_getscn(elf->elf, shstrtab->idx);
- if (!s) {
- WARN_ELF("elf_getscn");
- return NULL;
- }
-
- data = elf_newdata(s);
- if (!data) {
- WARN_ELF("elf_newdata");
+ sec->sh.sh_name = elf_add_string(elf, shstrtab, sec->name);
+ if (sec->sh.sh_name == -1)
return NULL;
- }
-
- data->d_buf = sec->name;
- data->d_size = strlen(name) + 1;
- data->d_align = 1;
-
- sec->sh.sh_name = shstrtab->len;
-
- shstrtab->len += strlen(name) + 1;
- shstrtab->changed = true;
list_add_tail(&sec->list, &elf->sections);
elf_hash_add(elf->section_hash, &sec->hash, sec->idx);
@@ -799,7 +918,7 @@ static struct section *elf_create_rela_reloc_section(struct elf *elf, struct sec
return sec;
}
-struct section *elf_create_reloc_section(struct elf *elf,
+static struct section *elf_create_reloc_section(struct elf *elf,
struct section *base,
int reltype)
{
@@ -873,14 +992,11 @@ static int elf_rebuild_rela_reloc_section(struct section *sec, int nr)
return 0;
}
-int elf_rebuild_reloc_section(struct elf *elf, struct section *sec)
+static int elf_rebuild_reloc_section(struct elf *elf, struct section *sec)
{
struct reloc *reloc;
int nr;
- sec->changed = true;
- elf->changed = true;
-
nr = 0;
list_for_each_entry(reloc, &sec->reloc_list, list)
nr++;
@@ -944,9 +1060,15 @@ int elf_write(struct elf *elf)
struct section *sec;
Elf_Scn *s;
- /* Update section headers for changed sections: */
+ /* Update changed relocation sections and section headers: */
list_for_each_entry(sec, &elf->sections, list) {
if (sec->changed) {
+ if (sec->base &&
+ elf_rebuild_reloc_section(elf, sec)) {
+ WARN("elf_rebuild_reloc_section");
+ return -1;
+ }
+
s = elf_getscn(elf->elf, sec->idx);
if (!s) {
WARN_ELF("elf_getscn");
@@ -958,6 +1080,7 @@ int elf_write(struct elf *elf)
}
sec->changed = false;
+ elf->changed = true;
}
}
diff --git a/tools/objtool/include/objtool/arch.h b/tools/objtool/include/objtool/arch.h
index 6ff0685f5cc5..062bb6e9b865 100644
--- a/tools/objtool/include/objtool/arch.h
+++ b/tools/objtool/include/objtool/arch.h
@@ -35,7 +35,6 @@ enum op_dest_type {
OP_DEST_MEM,
OP_DEST_PUSH,
OP_DEST_PUSHF,
- OP_DEST_LEAVE,
};
struct op_dest {
@@ -86,4 +85,8 @@ const char *arch_nop_insn(int len);
int arch_decode_hint_reg(struct instruction *insn, u8 sp_reg);
+bool arch_is_retpoline(struct symbol *sym);
+
+int arch_rewrite_retpolines(struct objtool_file *file);
+
#endif /* _ARCH_H */
diff --git a/tools/objtool/include/objtool/builtin.h b/tools/objtool/include/objtool/builtin.h
index 2502bb27de17..15ac0b7d3d6a 100644
--- a/tools/objtool/include/objtool/builtin.h
+++ b/tools/objtool/include/objtool/builtin.h
@@ -8,7 +8,10 @@
#include <subcmd/parse-options.h>
extern const struct option check_options[];
-extern bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess, stats, validate_dup, vmlinux, mcount, noinstr;
+extern bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess, stats,
+ validate_dup, vmlinux, mcount, noinstr, backup;
+
+extern int cmd_parse_options(int argc, const char **argv, const char * const usage[]);
extern int cmd_check(int argc, const char **argv);
extern int cmd_orc(int argc, const char **argv);
diff --git a/tools/objtool/include/objtool/check.h b/tools/objtool/include/objtool/check.h
index f5be798107bc..56d50bc50c10 100644
--- a/tools/objtool/include/objtool/check.h
+++ b/tools/objtool/include/objtool/check.h
@@ -39,7 +39,7 @@ struct alt_group {
struct instruction {
struct list_head list;
struct hlist_node hash;
- struct list_head static_call_node;
+ struct list_head call_node;
struct list_head mcount_loc_node;
struct section *sec;
unsigned long offset;
@@ -56,6 +56,7 @@ struct instruction {
struct instruction *jump_dest;
struct instruction *first_jump_src;
struct reloc *jump_table;
+ struct reloc *reloc;
struct list_head alts;
struct symbol *func;
struct list_head stack_ops;
diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h
index e6890cc70a25..45e5ede363b0 100644
--- a/tools/objtool/include/objtool/elf.h
+++ b/tools/objtool/include/objtool/elf.h
@@ -122,12 +122,18 @@ static inline u32 reloc_hash(struct reloc *reloc)
struct elf *elf_open_read(const char *name, int flags);
struct section *elf_create_section(struct elf *elf, const char *name, unsigned int sh_flags, size_t entsize, int nr);
-struct section *elf_create_reloc_section(struct elf *elf, struct section *base, int reltype);
-void elf_add_reloc(struct elf *elf, struct reloc *reloc);
+
+int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
+ unsigned int type, struct symbol *sym, int addend);
+int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
+ unsigned long offset, unsigned int type,
+ struct section *insn_sec, unsigned long insn_off);
+
int elf_write_insn(struct elf *elf, struct section *sec,
unsigned long offset, unsigned int len,
const char *insn);
int elf_write_reloc(struct elf *elf, struct reloc *reloc);
+struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name);
int elf_write(struct elf *elf);
void elf_close(struct elf *elf);
@@ -140,9 +146,6 @@ struct reloc *find_reloc_by_dest(const struct elf *elf, struct section *sec, uns
struct reloc *find_reloc_by_dest_range(const struct elf *elf, struct section *sec,
unsigned long offset, unsigned int len);
struct symbol *find_func_containing(struct section *sec, unsigned long offset);
-void insn_to_reloc_sym_addend(struct section *sec, unsigned long offset,
- struct reloc *reloc);
-int elf_rebuild_reloc_section(struct elf *elf, struct section *sec);
#define for_each_sec(file, sec) \
list_for_each_entry(sec, &file->elf->sections, list)
diff --git a/tools/objtool/include/objtool/objtool.h b/tools/objtool/include/objtool/objtool.h
index e68e37476c15..e4084afb2304 100644
--- a/tools/objtool/include/objtool/objtool.h
+++ b/tools/objtool/include/objtool/objtool.h
@@ -18,6 +18,7 @@ struct objtool_file {
struct elf *elf;
struct list_head insn_list;
DECLARE_HASHTABLE(insn_hash, 20);
+ struct list_head retpoline_call_list;
struct list_head static_call_list;
struct list_head mcount_loc_list;
bool ignore_unreachables, c_file, hints, rodata;
diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
index 7b97ce499405..e21db8bce493 100644
--- a/tools/objtool/objtool.c
+++ b/tools/objtool/objtool.c
@@ -17,6 +17,7 @@
#include <stdbool.h>
#include <string.h>
#include <stdlib.h>
+#include <unistd.h>
#include <subcmd/exec-cmd.h>
#include <subcmd/pager.h>
#include <linux/kernel.h>
@@ -44,6 +45,64 @@ bool help;
const char *objname;
static struct objtool_file file;
+static bool objtool_create_backup(const char *_objname)
+{
+ int len = strlen(_objname);
+ char *buf, *base, *name = malloc(len+6);
+ int s, d, l, t;
+
+ if (!name) {
+ perror("failed backup name malloc");
+ return false;
+ }
+
+ strcpy(name, _objname);
+ strcpy(name + len, ".orig");
+
+ d = open(name, O_CREAT|O_WRONLY|O_TRUNC, 0644);
+ if (d < 0) {
+ perror("failed to create backup file");
+ return false;
+ }
+
+ s = open(_objname, O_RDONLY);
+ if (s < 0) {
+ perror("failed to open orig file");
+ return false;
+ }
+
+ buf = malloc(4096);
+ if (!buf) {
+ perror("failed backup data malloc");
+ return false;
+ }
+
+ while ((l = read(s, buf, 4096)) > 0) {
+ base = buf;
+ do {
+ t = write(d, base, l);
+ if (t < 0) {
+ perror("failed backup write");
+ return false;
+ }
+ base += t;
+ l -= t;
+ } while (l);
+ }
+
+ if (l < 0) {
+ perror("failed backup read");
+ return false;
+ }
+
+ free(name);
+ free(buf);
+ close(d);
+ close(s);
+
+ return true;
+}
+
struct objtool_file *objtool_open_read(const char *_objname)
{
if (objname) {
@@ -59,8 +118,14 @@ struct objtool_file *objtool_open_read(const char *_objname)
if (!file.elf)
return NULL;
+ if (backup && !objtool_create_backup(objname)) {
+ WARN("can't create backup file");
+ return NULL;
+ }
+
INIT_LIST_HEAD(&file.insn_list);
hash_init(file.insn_hash);
+ INIT_LIST_HEAD(&file.retpoline_call_list);
INIT_LIST_HEAD(&file.static_call_list);
INIT_LIST_HEAD(&file.mcount_loc_list);
file.c_file = !vmlinux && find_section_by_name(file.elf, ".comment");
diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
index 738aa5021bc4..dc9b7dd314b0 100644
--- a/tools/objtool/orc_gen.c
+++ b/tools/objtool/orc_gen.c
@@ -82,12 +82,11 @@ static int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi)
}
static int write_orc_entry(struct elf *elf, struct section *orc_sec,
- struct section *ip_rsec, unsigned int idx,
+ struct section *ip_sec, unsigned int idx,
struct section *insn_sec, unsigned long insn_off,
struct orc_entry *o)
{
struct orc_entry *orc;
- struct reloc *reloc;
/* populate ORC data */
orc = (struct orc_entry *)orc_sec->data->d_buf + idx;
@@ -96,25 +95,9 @@ static int write_orc_entry(struct elf *elf, struct section *orc_sec,
orc->bp_offset = bswap_if_needed(orc->bp_offset);
/* populate reloc for ip */
- reloc = malloc(sizeof(*reloc));
- if (!reloc) {
- perror("malloc");
+ if (elf_add_reloc_to_insn(elf, ip_sec, idx * sizeof(int), R_X86_64_PC32,
+ insn_sec, insn_off))
return -1;
- }
- memset(reloc, 0, sizeof(*reloc));
-
- insn_to_reloc_sym_addend(insn_sec, insn_off, reloc);
- if (!reloc->sym) {
- WARN("missing symbol for insn at offset 0x%lx",
- insn_off);
- return -1;
- }
-
- reloc->type = R_X86_64_PC32;
- reloc->offset = idx * sizeof(int);
- reloc->sec = ip_rsec;
-
- elf_add_reloc(elf, reloc);
return 0;
}
@@ -153,7 +136,7 @@ static unsigned long alt_group_len(struct alt_group *alt_group)
int orc_create(struct objtool_file *file)
{
- struct section *sec, *ip_rsec, *orc_sec;
+ struct section *sec, *orc_sec;
unsigned int nr = 0, idx = 0;
struct orc_list_entry *entry;
struct list_head orc_list;
@@ -242,20 +225,14 @@ int orc_create(struct objtool_file *file)
sec = elf_create_section(file->elf, ".orc_unwind_ip", 0, sizeof(int), nr);
if (!sec)
return -1;
- ip_rsec = elf_create_reloc_section(file->elf, sec, SHT_RELA);
- if (!ip_rsec)
- return -1;
/* Write ORC entries to sections: */
list_for_each_entry(entry, &orc_list, list) {
- if (write_orc_entry(file->elf, orc_sec, ip_rsec, idx++,
+ if (write_orc_entry(file->elf, orc_sec, sec, idx++,
entry->insn_sec, entry->insn_off,
&entry->orc))
return -1;
}
- if (elf_rebuild_reloc_section(file->elf, ip_rsec))
- return -1;
-
return 0;
}
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index 2c7fbda7b055..07b21cfabf5c 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -106,6 +106,14 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
return -1;
}
+ /*
+ * Skip retpoline .altinstr_replacement... we already rewrite the
+ * instructions for retpolines anyway, see arch_is_retpoline()
+ * usage in add_{call,jump}_destinations().
+ */
+ if (arch_is_retpoline(new_reloc->sym))
+ return 1;
+
alt->new_sec = new_reloc->sym->sec;
alt->new_off = (unsigned int)new_reloc->addend;
@@ -154,7 +162,9 @@ int special_get_alts(struct elf *elf, struct list_head *alts)
memset(alt, 0, sizeof(*alt));
ret = get_alt_entry(elf, entry, sec, idx, alt);
- if (ret)
+ if (ret > 0)
+ continue;
+ if (ret < 0)
return ret;
list_add_tail(&alt->list, alts);
diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh
index 606a4b5e929f..105a291ff8e7 100755
--- a/tools/objtool/sync-check.sh
+++ b/tools/objtool/sync-check.sh
@@ -10,17 +10,21 @@ FILES="include/linux/objtool.h"
if [ "$SRCARCH" = "x86" ]; then
FILES="$FILES
+arch/x86/include/asm/nops.h
arch/x86/include/asm/inat_types.h
arch/x86/include/asm/orc_types.h
arch/x86/include/asm/emulate_prefix.h
arch/x86/lib/x86-opcode-map.txt
arch/x86/tools/gen-insn-attr-x86.awk
include/linux/static_call_types.h
-arch/x86/include/asm/inat.h -I '^#include [\"<]\(asm/\)*inat_types.h[\">]'
-arch/x86/include/asm/insn.h -I '^#include [\"<]\(asm/\)*inat.h[\">]'
-arch/x86/lib/inat.c -I '^#include [\"<]\(../include/\)*asm/insn.h[\">]'
-arch/x86/lib/insn.c -I '^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]' -I '^#include [\"<]\(../include/\)*asm/emulate_prefix.h[\">]'
"
+
+SYNC_CHECK_FILES='
+arch/x86/include/asm/inat.h
+arch/x86/include/asm/insn.h
+arch/x86/lib/inat.c
+arch/x86/lib/insn.c
+'
fi
check_2 () {
@@ -63,3 +67,9 @@ while read -r file_entry; do
done <<EOF
$FILES
EOF
+
+if [ "$SRCARCH" = "x86" ]; then
+ for i in $SYNC_CHECK_FILES; do
+ check $i '-I "^.*\/\*.*__ignore_sync_check__.*\*\/.*$"'
+ done
+fi
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index f3f84781fd74..e555e9729758 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -20,6 +20,7 @@ perf.data.old
output.svg
perf-archive
perf-with-kcore
+perf-iostat
tags
TAGS
cscope*
diff --git a/tools/perf/Documentation/intel-hybrid.txt b/tools/perf/Documentation/intel-hybrid.txt
new file mode 100644
index 000000000000..07f0aa3bf682
--- /dev/null
+++ b/tools/perf/Documentation/intel-hybrid.txt
@@ -0,0 +1,214 @@
+Intel hybrid support
+--------------------
+Support for Intel hybrid events within perf tools.
+
+For some Intel platforms, such as AlderLake, which is hybrid platform and
+it consists of atom cpu and core cpu. Each cpu has dedicated event list.
+Part of events are available on core cpu, part of events are available
+on atom cpu and even part of events are available on both.
+
+Kernel exports two new cpu pmus via sysfs:
+/sys/devices/cpu_core
+/sys/devices/cpu_atom
+
+The 'cpus' files are created under the directories. For example,
+
+cat /sys/devices/cpu_core/cpus
+0-15
+
+cat /sys/devices/cpu_atom/cpus
+16-23
+
+It indicates cpu0-cpu15 are core cpus and cpu16-cpu23 are atom cpus.
+
+Quickstart
+
+List hybrid event
+-----------------
+
+As before, use perf-list to list the symbolic event.
+
+perf list
+
+inst_retired.any
+ [Fixed Counter: Counts the number of instructions retired. Unit: cpu_atom]
+inst_retired.any
+ [Number of instructions retired. Fixed Counter - architectural event. Unit: cpu_core]
+
+The 'Unit: xxx' is added to brief description to indicate which pmu
+the event is belong to. Same event name but with different pmu can
+be supported.
+
+Enable hybrid event with a specific pmu
+---------------------------------------
+
+To enable a core only event or atom only event, following syntax is supported:
+
+ cpu_core/<event name>/
+or
+ cpu_atom/<event name>/
+
+For example, count the 'cycles' event on core cpus.
+
+ perf stat -e cpu_core/cycles/
+
+Create two events for one hardware event automatically
+------------------------------------------------------
+
+When creating one event and the event is available on both atom and core,
+two events are created automatically. One is for atom, the other is for
+core. Most of hardware events and cache events are available on both
+cpu_core and cpu_atom.
+
+For hardware events, they have pre-defined configs (e.g. 0 for cycles).
+But on hybrid platform, kernel needs to know where the event comes from
+(from atom or from core). The original perf event type PERF_TYPE_HARDWARE
+can't carry pmu information. So now this type is extended to be PMU aware
+type. The PMU type ID is stored at attr.config[63:32].
+
+PMU type ID is retrieved from sysfs.
+/sys/devices/cpu_atom/type
+/sys/devices/cpu_core/type
+
+The new attr.config layout for PERF_TYPE_HARDWARE:
+
+PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA
+ AA: hardware event ID
+ EEEEEEEE: PMU type ID
+
+Cache event is similar. The type PERF_TYPE_HW_CACHE is extended to be
+PMU aware type. The PMU type ID is stored at attr.config[63:32].
+
+The new attr.config layout for PERF_TYPE_HW_CACHE:
+
+PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB
+ BB: hardware cache ID
+ CC: hardware cache op ID
+ DD: hardware cache op result ID
+ EEEEEEEE: PMU type ID
+
+When enabling a hardware event without specified pmu, such as,
+perf stat -e cycles -a (use system-wide in this example), two events
+are created automatically.
+
+ ------------------------------------------------------------
+ perf_event_attr:
+ size 120
+ config 0x400000000
+ sample_type IDENTIFIER
+ read_format TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING
+ disabled 1
+ inherit 1
+ exclude_guest 1
+ ------------------------------------------------------------
+
+and
+
+ ------------------------------------------------------------
+ perf_event_attr:
+ size 120
+ config 0x800000000
+ sample_type IDENTIFIER
+ read_format TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING
+ disabled 1
+ inherit 1
+ exclude_guest 1
+ ------------------------------------------------------------
+
+type 0 is PERF_TYPE_HARDWARE.
+0x4 in 0x400000000 indicates it's cpu_core pmu.
+0x8 in 0x800000000 indicates it's cpu_atom pmu (atom pmu type id is random).
+
+The kernel creates 'cycles' (0x400000000) on cpu0-cpu15 (core cpus),
+and create 'cycles' (0x800000000) on cpu16-cpu23 (atom cpus).
+
+For perf-stat result, it displays two events:
+
+ Performance counter stats for 'system wide':
+
+ 6,744,979 cpu_core/cycles/
+ 1,965,552 cpu_atom/cycles/
+
+The first 'cycles' is core event, the second 'cycles' is atom event.
+
+Thread mode example:
+--------------------
+
+perf-stat reports the scaled counts for hybrid event and with a percentage
+displayed. The percentage is the event's running time/enabling time.
+
+One example, 'triad_loop' runs on cpu16 (atom core), while we can see the
+scaled value for core cycles is 160,444,092 and the percentage is 0.47%.
+
+perf stat -e cycles -- taskset -c 16 ./triad_loop
+
+As previous, two events are created.
+
+------------------------------------------------------------
+perf_event_attr:
+ size 120
+ config 0x400000000
+ sample_type IDENTIFIER
+ read_format TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING
+ disabled 1
+ inherit 1
+ enable_on_exec 1
+ exclude_guest 1
+------------------------------------------------------------
+
+and
+
+------------------------------------------------------------
+perf_event_attr:
+ size 120
+ config 0x800000000
+ sample_type IDENTIFIER
+ read_format TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING
+ disabled 1
+ inherit 1
+ enable_on_exec 1
+ exclude_guest 1
+------------------------------------------------------------
+
+ Performance counter stats for 'taskset -c 16 ./triad_loop':
+
+ 233,066,666 cpu_core/cycles/ (0.43%)
+ 604,097,080 cpu_atom/cycles/ (99.57%)
+
+perf-record:
+------------
+
+If there is no '-e' specified in perf record, on hybrid platform,
+it creates two default 'cycles' and adds them to event list. One
+is for core, the other is for atom.
+
+perf-stat:
+----------
+
+If there is no '-e' specified in perf stat, on hybrid platform,
+besides of software events, following events are created and
+added to event list in order.
+
+cpu_core/cycles/,
+cpu_atom/cycles/,
+cpu_core/instructions/,
+cpu_atom/instructions/,
+cpu_core/branches/,
+cpu_atom/branches/,
+cpu_core/branch-misses/,
+cpu_atom/branch-misses/
+
+Of course, both perf-stat and perf-record support to enable
+hybrid event with a specific pmu.
+
+e.g.
+perf stat -e cpu_core/cycles/
+perf stat -e cpu_atom/cycles/
+perf stat -e cpu_core/r1a/
+perf stat -e cpu_atom/L1-icache-loads/
+perf stat -e cpu_core/cycles/,cpu_atom/instructions/
+perf stat -e '{cpu_core/cycles/,cpu_core/instructions/}'
+
+But '{cpu_core/cycles/,cpu_atom/instructions/}' will return
+warning and disable grouping, because the pmus in group are
+not matched (cpu_core vs. cpu_atom).
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
index 1b5042f134a8..80c1be5d566c 100644
--- a/tools/perf/Documentation/perf-annotate.txt
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -124,6 +124,13 @@ OPTIONS
--group::
Show event group information together
+--demangle::
+ Demangle symbol names to human readable form. It's enabled by default,
+ disable with --no-demangle.
+
+--demangle-kernel::
+ Demangle kernel symbol names to human readable form (for C++ kernels).
+
--percent-type::
Set annotation percent type from following choices:
global-period, local-period, global-hits, local-hits
diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt
index bb167e32a1d7..cd8ce6e8ec12 100644
--- a/tools/perf/Documentation/perf-buildid-cache.txt
+++ b/tools/perf/Documentation/perf-buildid-cache.txt
@@ -57,7 +57,7 @@ OPTIONS
-u::
--update=::
Update specified file of the cache. Note that this doesn't remove
- older entires since those may be still needed for annotating old
+ older entries since those may be still needed for annotating old
(or remote) perf.data. Only if there is already a cache which has
exactly same build-id, that is replaced by new one. It can be used
to update kallsyms and kernel dso to vmlinux in order to support
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index 153bde14bbe0..b0872c801866 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -123,6 +123,7 @@ Given a $HOME/.perfconfig like this:
queue-size = 0
children = true
group = true
+ skip-empty = true
[llvm]
dump-obj = true
@@ -393,6 +394,12 @@ annotate.*::
This option works with tui, stdio2 browsers.
+ annotate.demangle::
+ Demangle symbol names to human readable form. Default is 'true'.
+
+ annotate.demangle_kernel::
+ Demangle kernel symbol names to human readable form. Default is 'true'.
+
hist.*::
hist.percentage::
This option control the way to calculate overhead of filtered entries -
@@ -525,6 +532,10 @@ report.*::
0.07% 0.00% noploop ld-2.15.so [.] strcmp
0.03% 0.00% noploop [kernel.kallsyms] [k] timerqueue_del
+ report.skip-empty::
+ This option can change default stat behavior with empty results.
+ If it's set true, 'perf report --stat' will not show 0 stats.
+
top.*::
top.children::
Same as 'report.children'. So if it is enabled, the output of 'top'
diff --git a/tools/perf/Documentation/perf-data.txt b/tools/perf/Documentation/perf-data.txt
index 726b9bc9e1a7..417bf17e265c 100644
--- a/tools/perf/Documentation/perf-data.txt
+++ b/tools/perf/Documentation/perf-data.txt
@@ -17,7 +17,7 @@ Data file related processing.
COMMANDS
--------
convert::
- Converts perf data file into another format (only CTF [1] format is support by now).
+ Converts perf data file into another format.
It's possible to set data-convert debug variable to get debug messages from conversion,
like:
perf --debug data-convert data convert ...
@@ -27,6 +27,9 @@ OPTIONS for 'convert'
--to-ctf::
Triggers the CTF conversion, specify the path of CTF data directory.
+--to-json::
+ Triggers JSON conversion. Specify the JSON filename to output.
+
--tod::
Convert time to wall clock time.
diff --git a/tools/perf/Documentation/perf-iostat.txt b/tools/perf/Documentation/perf-iostat.txt
new file mode 100644
index 000000000000..165176944031
--- /dev/null
+++ b/tools/perf/Documentation/perf-iostat.txt
@@ -0,0 +1,88 @@
+perf-iostat(1)
+===============
+
+NAME
+----
+perf-iostat - Show I/O performance metrics
+
+SYNOPSIS
+--------
+[verse]
+'perf iostat' list
+'perf iostat' <ports> -- <command> [<options>]
+
+DESCRIPTION
+-----------
+Mode is intended to provide four I/O performance metrics per each PCIe root port:
+
+- Inbound Read - I/O devices below root port read from the host memory, in MB
+
+- Inbound Write - I/O devices below root port write to the host memory, in MB
+
+- Outbound Read - CPU reads from I/O devices below root port, in MB
+
+- Outbound Write - CPU writes to I/O devices below root port, in MB
+
+OPTIONS
+-------
+<command>...::
+ Any command you can specify in a shell.
+
+list::
+ List all PCIe root ports.
+
+<ports>::
+ Select the root ports for monitoring. Comma-separated list is supported.
+
+EXAMPLES
+--------
+
+1. List all PCIe root ports (example for 2-S platform):
+
+ $ perf iostat list
+ S0-uncore_iio_0<0000:00>
+ S1-uncore_iio_0<0000:80>
+ S0-uncore_iio_1<0000:17>
+ S1-uncore_iio_1<0000:85>
+ S0-uncore_iio_2<0000:3a>
+ S1-uncore_iio_2<0000:ae>
+ S0-uncore_iio_3<0000:5d>
+ S1-uncore_iio_3<0000:d7>
+
+2. Collect metrics for all PCIe root ports:
+
+ $ perf iostat -- dd if=/dev/zero of=/dev/nvme0n1 bs=1M oflag=direct
+ 357708+0 records in
+ 357707+0 records out
+ 375083606016 bytes (375 GB, 349 GiB) copied, 215.974 s, 1.7 GB/s
+
+ Performance counter stats for 'system wide':
+
+ port Inbound Read(MB) Inbound Write(MB) Outbound Read(MB) Outbound Write(MB)
+ 0000:00 1 0 2 3
+ 0000:80 0 0 0 0
+ 0000:17 352552 43 0 21
+ 0000:85 0 0 0 0
+ 0000:3a 3 0 0 0
+ 0000:ae 0 0 0 0
+ 0000:5d 0 0 0 0
+ 0000:d7 0 0 0 0
+
+3. Collect metrics for comma-separated list of PCIe root ports:
+
+ $ perf iostat 0000:17,0:3a -- dd if=/dev/zero of=/dev/nvme0n1 bs=1M oflag=direct
+ 357708+0 records in
+ 357707+0 records out
+ 375083606016 bytes (375 GB, 349 GiB) copied, 197.08 s, 1.9 GB/s
+
+ Performance counter stats for 'system wide':
+
+ port Inbound Read(MB) Inbound Write(MB) Outbound Read(MB) Outbound Write(MB)
+ 0000:17 358559 44 0 22
+ 0000:3a 3 2 0 0
+
+ 197.081983474 seconds time elapsed
+
+SEE ALSO
+--------
+linkperf:perf-stat[1] \ No newline at end of file
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index f3161c9673e9..d71bac847936 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -695,6 +695,7 @@ measurements:
wait -n ${perf_pid}
exit $?
+include::intel-hybrid.txt[]
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index f546b5e9db05..24efc0583c93 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -112,6 +112,8 @@ OPTIONS
- ins_lat: Instruction latency in core cycles. This is the global instruction
latency
- local_ins_lat: Local instruction latency version
+ - p_stage_cyc: On powerpc, this presents the number of cycles spent in a
+ pipeline stage. And currently supported only on powerpc.
By default, comm, dso and symbol keys are used.
(i.e. --sort comm,dso,symbol)
@@ -224,6 +226,9 @@ OPTIONS
--dump-raw-trace::
Dump raw trace in ASCII.
+--disable-order::
+ Disable raw trace ordering.
+
-g::
--call-graph=<print_type,threshold[,print_limit],order,sort_key[,branch],value>::
Display call chains using type, min percent threshold, print limit,
@@ -472,7 +477,7 @@ OPTIONS
but probably we'll make the default not to show the switch-on/off events
on the --group mode and if there is only one event besides the off/on ones,
go straight to the histogram browser, just like 'perf report' with no events
- explicitely specified does.
+ explicitly specified does.
--itrace::
Options for decoding instruction tracing data. The options are:
@@ -566,6 +571,9 @@ include::itrace.txt[]
sampled cycles
'Avg Cycles' - block average sampled cycles
+--skip-empty::
+ Do not print 0 results in the --stat output.
+
include::callchain-overhead-calculation.txt[]
SEE ALSO
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 08a1714494f8..45c2467e4eb2 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -93,6 +93,19 @@ report::
1.102235068 seconds time elapsed
+--bpf-counters::
+ Use BPF programs to aggregate readings from perf_events. This
+ allows multiple perf-stat sessions that are counting the same metric (cycles,
+ instructions, etc.) to share hardware counters.
+ To use BPF programs on common events by default, use
+ "perf config stat.bpf-counter-events=<list_of_events>".
+
+--bpf-attr-map::
+ With option "--bpf-counters", different perf-stat sessions share
+ information about shared BPF programs and maps via a pinned hashmap.
+ Use "--bpf-attr-map" to specify the path of this pinned hashmap.
+ The default path is /sys/fs/bpf/perf_attr_map.
+
ifdef::HAVE_LIBPFM[]
--pfm-events events::
Select a PMU event using libpfm4 syntax (see http://perfmon2.sf.net)
@@ -142,7 +155,10 @@ Do not aggregate counts across all monitored CPUs.
-n::
--null::
- null run - don't start any counters
+null run - Don't start any counters.
+
+This can be useful to measure just elapsed wall-clock time - or to assess the
+raw overhead of perf stat itself, without running any counters.
-v::
--verbose::
@@ -468,6 +484,15 @@ convenient for post processing.
--summary::
Print summary for interval mode (-I).
+--no-csv-summary::
+Don't print 'summary' at the first column for CVS summary output.
+This option must be used with -x and --summary.
+
+This option can be enabled in perf config by setting the variable
+'stat.no-csv-summary'.
+
+$ perf config stat.no-csv-summary=true
+
EXAMPLES
--------
@@ -527,6 +552,8 @@ The fields are in this order:
Additional metrics may be printed with all earlier fields being empty.
+include::intel-hybrid.txt[]
+
SEE ALSO
--------
linkperf:perf-top[1], linkperf:perf-list[1]
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index ee2024691d46..bba5ffb05463 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -317,7 +317,7 @@ Default is to monitor all CPUS.
but probably we'll make the default not to show the switch-on/off events
on the --group mode and if there is only one event besides the off/on ones,
go straight to the histogram browser, just like 'perf top' with no events
- explicitely specified does.
+ explicitly specified does.
--stitch-lbr::
Show callgraph with stitched LBRs, which may have more complete
diff --git a/tools/perf/Documentation/perf.txt b/tools/perf/Documentation/perf.txt
index c130a3c46a90..9c330cdfa973 100644
--- a/tools/perf/Documentation/perf.txt
+++ b/tools/perf/Documentation/perf.txt
@@ -76,3 +76,15 @@ SEE ALSO
linkperf:perf-stat[1], linkperf:perf-top[1],
linkperf:perf-record[1], linkperf:perf-report[1],
linkperf:perf-list[1]
+
+linkperf:perf-annotate[1],linkperf:perf-archive[1],
+linkperf:perf-bench[1], linkperf:perf-buildid-cache[1],
+linkperf:perf-buildid-list[1], linkperf:perf-c2c[1],
+linkperf:perf-config[1], linkperf:perf-data[1], linkperf:perf-diff[1],
+linkperf:perf-evlist[1], linkperf:perf-ftrace[1],
+linkperf:perf-help[1], linkperf:perf-inject[1],
+linkperf:perf-intel-pt[1], linkperf:perf-kallsyms[1],
+linkperf:perf-kmem[1], linkperf:perf-kvm[1], linkperf:perf-lock[1],
+linkperf:perf-mem[1], linkperf:perf-probe[1], linkperf:perf-sched[1],
+linkperf:perf-script[1], linkperf:perf-test[1],
+linkperf:perf-trace[1], linkperf:perf-version[1]
diff --git a/tools/perf/Documentation/topdown.txt b/tools/perf/Documentation/topdown.txt
index 10f07f9455b8..c6302df4cf29 100644
--- a/tools/perf/Documentation/topdown.txt
+++ b/tools/perf/Documentation/topdown.txt
@@ -72,6 +72,7 @@ For example, the perf_event_attr structure can be initialized with
The Fixed counter 3 must be the leader of the group.
#include <linux/perf_event.h>
+#include <sys/mman.h>
#include <sys/syscall.h>
#include <unistd.h>
@@ -95,6 +96,11 @@ int slots_fd = perf_event_open(&slots, 0, -1, -1, 0);
if (slots_fd < 0)
... error ...
+/* Memory mapping the fd permits _rdpmc calls from userspace */
+void *slots_p = mmap(0, getpagesize(), PROT_READ, MAP_SHARED, slots_fd, 0);
+if (!slot_p)
+ .... error ...
+
/*
* Open metrics event file descriptor for current task.
* Set slots event as the leader of the group.
@@ -110,6 +116,14 @@ int metrics_fd = perf_event_open(&metrics, 0, -1, slots_fd, 0);
if (metrics_fd < 0)
... error ...
+/* Memory mapping the fd permits _rdpmc calls from userspace */
+void *metrics_p = mmap(0, getpagesize(), PROT_READ, MAP_SHARED, metrics_fd, 0);
+if (!metrics_p)
+ ... error ...
+
+Note: the file descriptors returned by the perf_event_open calls must be memory
+mapped to permit calls to the _rdpmd instruction. Permission may also be granted
+by writing the /sys/devices/cpu/rdpmc sysfs node.
The RDPMC instruction (or _rdpmc compiler intrinsic) can now be used
to read slots and the topdown metrics at different points of the program:
@@ -141,6 +155,10 @@ as the parallelism and overlap in the CPU program execution will
cause too much measurement inaccuracy. For example instrumenting
individual basic blocks is definitely too fine grained.
+_rdpmc calls should not be mixed with reading the metrics and slots counters
+through system calls, as the kernel will reset these counters after each system
+call.
+
Decoding metrics values
=======================
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 5d7b947320fb..f05c4d48fd7e 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -20,4 +20,4 @@ tools/lib/bitmap.c
tools/lib/str_error_r.c
tools/lib/vsprintf.c
tools/lib/zalloc.c
-scripts/bpf_helpers_doc.py
+scripts/bpf_doc.py
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index b8fc7d972be9..f3fe360a35c6 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -100,7 +100,10 @@ clean:
# make -C tools/perf -f tests/make
#
build-test:
- @$(MAKE) SHUF=1 -f tests/make REUSE_FEATURES_DUMP=1 MK=Makefile SET_PARALLEL=1 --no-print-directory tarpkg out
+ @$(MAKE) SHUF=1 -f tests/make REUSE_FEATURES_DUMP=1 MK=Makefile SET_PARALLEL=1 --no-print-directory tarpkg make_static make_with_gtk2 out
+
+build-test-tarball:
+ @$(MAKE) -f tests/make REUSE_FEATURES_DUMP=1 MK=Makefile SET_PARALLEL=1 --no-print-directory out
#
# All other targets get passed through:
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index d8e59d31399a..0d6619064a83 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -32,7 +32,7 @@ ifneq ($(NO_SYSCALL_TABLE),1)
NO_SYSCALL_TABLE := 0
endif
else
- ifeq ($(SRCARCH),$(filter $(SRCARCH),powerpc arm64 s390))
+ ifeq ($(SRCARCH),$(filter $(SRCARCH),powerpc arm64 s390 mips))
NO_SYSCALL_TABLE := 0
endif
endif
@@ -87,6 +87,13 @@ ifeq ($(ARCH),s390)
CFLAGS += -fPIC -I$(OUTPUT)arch/s390/include/generated
endif
+ifeq ($(ARCH),mips)
+ NO_PERF_REGS := 0
+ CFLAGS += -I$(OUTPUT)arch/mips/include/generated
+ CFLAGS += -I../../arch/mips/include/uapi -I../../arch/mips/include/generated/uapi
+ LIBUNWIND_LIBS = -lunwind -lunwind-mips
+endif
+
ifeq ($(NO_PERF_REGS),0)
$(call detected,CONFIG_PERF_REGS)
endif
@@ -292,6 +299,9 @@ ifneq ($(TCMALLOC),)
endif
ifeq ($(FEATURES_DUMP),)
+# We will display at the end of this Makefile.config, using $(call feature_display_entries)
+# As we may retry some feature detection here, see the disassembler-four-args case, for instance
+ FEATURE_DISPLAY_DEFERRED := 1
include $(srctree)/tools/build/Makefile.feature
else
include $(FEATURES_DUMP)
@@ -1072,6 +1082,15 @@ ifdef LIBPFM4
endif
endif
+ifdef LIBTRACEEVENT_DYNAMIC
+ $(call feature_check,libtraceevent)
+ ifeq ($(feature-libtraceevent), 1)
+ EXTLIBS += -ltraceevent
+ else
+ dummy := $(error Error: No libtraceevent devel library found, please install libtraceevent-devel);
+ endif
+endif
+
# Among the variables below, these:
# perfexecdir
# perf_include_dir
@@ -1208,3 +1227,13 @@ $(call detected_var,LIBDIR)
$(call detected_var,GTK_CFLAGS)
$(call detected_var,PERL_EMBED_CCOPTS)
$(call detected_var,PYTHON_EMBED_CCOPTS)
+
+# re-generate FEATURE-DUMP as we may have called feature_check, found out
+# extra libraries to add to LDFLAGS of some other test and then redo those
+# tests, see the block about libbfd, disassembler-four-args, for instance.
+$(shell rm -f $(FEATURE_DUMP_FILENAME))
+$(foreach feat,$(FEATURE_TESTS),$(shell echo "$(call feature_assign,$(feat))" >> $(FEATURE_DUMP_FILENAME)))
+
+ifeq ($(feature_display),1)
+ $(call feature_display_entries)
+endif
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index f6e609673de2..e47f04e5b51e 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -128,6 +128,8 @@ include ../scripts/utilities.mak
#
# Define BUILD_BPF_SKEL to enable BPF skeletons
#
+# Define LIBTRACEEVENT_DYNAMIC to enable libtraceevent dynamic linking
+#
# As per kernel Makefile, avoid funny character set dependencies
unexport LC_ALL
@@ -283,6 +285,7 @@ SCRIPT_SH =
SCRIPT_SH += perf-archive.sh
SCRIPT_SH += perf-with-kcore.sh
+SCRIPT_SH += perf-iostat.sh
grep-libs = $(filter -l%,$(1))
strip-libs = $(filter-out -l%,$(1))
@@ -309,7 +312,6 @@ endif
LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
export LIBTRACEEVENT
-
LIBTRACEEVENT_DYNAMIC_LIST = $(PLUGINS_PATH)libtraceevent-dynamic-list
#
@@ -374,12 +376,15 @@ endif
export PERL_PATH
-PERFLIBS = $(LIBAPI) $(LIBTRACEEVENT) $(LIBSUBCMD) $(LIBPERF)
+PERFLIBS = $(LIBAPI) $(LIBSUBCMD) $(LIBPERF)
ifndef NO_LIBBPF
ifndef LIBBPF_DYNAMIC
PERFLIBS += $(LIBBPF)
endif
endif
+ifndef LIBTRACEEVENT_DYNAMIC
+ PERFLIBS += $(LIBTRACEEVENT)
+endif
# We choose to avoid "if .. else if .. else .. endif endif"
# because maintaining the nesting to match is a pain. If
@@ -948,6 +953,8 @@ endif
$(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
$(call QUIET_INSTALL, perf-with-kcore) \
$(INSTALL) $(OUTPUT)perf-with-kcore -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
+ $(call QUIET_INSTALL, perf-iostat) \
+ $(INSTALL) $(OUTPUT)perf-iostat -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
ifndef NO_LIBAUDIT
$(call QUIET_INSTALL, strace/groups) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(STRACE_GROUPS_INSTDIR_SQ)'; \
@@ -1007,6 +1014,7 @@ python-clean:
SKEL_OUT := $(abspath $(OUTPUT)util/bpf_skel)
SKEL_TMP_OUT := $(abspath $(SKEL_OUT)/.tmp)
SKELETONS := $(SKEL_OUT)/bpf_prog_profiler.skel.h
+SKELETONS += $(SKEL_OUT)/bperf_leader.skel.h $(SKEL_OUT)/bperf_follower.skel.h
ifdef BUILD_BPF_SKEL
BPFTOOL := $(SKEL_TMP_OUT)/bootstrap/bpftool
@@ -1021,7 +1029,7 @@ $(BPFTOOL): | $(SKEL_TMP_OUT)
OUTPUT=$(SKEL_TMP_OUT)/ bootstrap
$(SKEL_TMP_OUT)/%.bpf.o: util/bpf_skel/%.bpf.c $(LIBBPF) | $(SKEL_TMP_OUT)
- $(QUIET_CLANG)$(CLANG) -g -O2 -target bpf $(BPF_INCLUDE) \
+ $(QUIET_CLANG)$(CLANG) -g -O2 -target bpf -Wall -Werror $(BPF_INCLUDE) \
-c $(filter util/bpf_skel/%.bpf.c,$^) -o $@ && $(LLVM_STRIP) -g $@
$(SKEL_OUT)/%.skel.h: $(SKEL_TMP_OUT)/%.bpf.o | $(BPFTOOL)
@@ -1041,7 +1049,7 @@ bpf-skel-clean:
$(call QUIET_CLEAN, bpf-skel) $(RM) -r $(SKEL_TMP_OUT) $(SKELETONS)
clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBPERF)-clean fixdep-clean python-clean bpf-skel-clean
- $(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
+ $(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(OUTPUT)perf-iostat $(LANG_BINDINGS)
$(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
$(Q)$(RM) $(OUTPUT).config-detected
$(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)pmu-events/jevents $(OUTPUT)$(LIBJVMTI).so
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index c25c878fd06c..d942f118d32c 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -67,6 +67,7 @@ static int cs_etm_set_context_id(struct auxtrace_record *itr,
char path[PATH_MAX];
int err = -EINVAL;
u32 val;
+ u64 contextid;
ptr = container_of(itr, struct cs_etm_recording, itr);
cs_etm_pmu = ptr->cs_etm_pmu;
@@ -86,25 +87,59 @@ static int cs_etm_set_context_id(struct auxtrace_record *itr,
goto out;
}
+ /* User has configured for PID tracing, respects it. */
+ contextid = evsel->core.attr.config &
+ (BIT(ETM_OPT_CTXTID) | BIT(ETM_OPT_CTXTID2));
+
/*
- * TRCIDR2.CIDSIZE, bit [9-5], indicates whether contextID tracing
- * is supported:
- * 0b00000 Context ID tracing is not supported.
- * 0b00100 Maximum of 32-bit Context ID size.
- * All other values are reserved.
+ * If user doesn't configure the contextid format, parse PMU format and
+ * enable PID tracing according to the "contextid" format bits:
+ *
+ * If bit ETM_OPT_CTXTID is set, trace CONTEXTIDR_EL1;
+ * If bit ETM_OPT_CTXTID2 is set, trace CONTEXTIDR_EL2.
*/
- val = BMVAL(val, 5, 9);
- if (!val || val != 0x4) {
- err = -EINVAL;
- goto out;
+ if (!contextid)
+ contextid = perf_pmu__format_bits(&cs_etm_pmu->format,
+ "contextid");
+
+ if (contextid & BIT(ETM_OPT_CTXTID)) {
+ /*
+ * TRCIDR2.CIDSIZE, bit [9-5], indicates whether contextID
+ * tracing is supported:
+ * 0b00000 Context ID tracing is not supported.
+ * 0b00100 Maximum of 32-bit Context ID size.
+ * All other values are reserved.
+ */
+ val = BMVAL(val, 5, 9);
+ if (!val || val != 0x4) {
+ pr_err("%s: CONTEXTIDR_EL1 isn't supported\n",
+ CORESIGHT_ETM_PMU_NAME);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (contextid & BIT(ETM_OPT_CTXTID2)) {
+ /*
+ * TRCIDR2.VMIDOPT[30:29] != 0 and
+ * TRCIDR2.VMIDSIZE[14:10] == 0b00100 (32bit virtual contextid)
+ * We can't support CONTEXTIDR in VMID if the size of the
+ * virtual context id is < 32bit.
+ * Any value of VMIDSIZE >= 4 (i.e, > 32bit) is fine for us.
+ */
+ if (!BMVAL(val, 29, 30) || BMVAL(val, 10, 14) < 4) {
+ pr_err("%s: CONTEXTIDR_EL2 isn't supported\n",
+ CORESIGHT_ETM_PMU_NAME);
+ err = -EINVAL;
+ goto out;
+ }
}
/* All good, let the kernel know */
- evsel->core.attr.config |= (1 << ETM_OPT_CTXTID);
+ evsel->core.attr.config |= contextid;
err = 0;
out:
-
return err;
}
@@ -173,17 +208,17 @@ static int cs_etm_set_option(struct auxtrace_record *itr,
!cpu_map__has(online_cpus, i))
continue;
- if (option & ETM_SET_OPT_CTXTID) {
+ if (option & BIT(ETM_OPT_CTXTID)) {
err = cs_etm_set_context_id(itr, evsel, i);
if (err)
goto out;
}
- if (option & ETM_SET_OPT_TS) {
+ if (option & BIT(ETM_OPT_TS)) {
err = cs_etm_set_timestamp(itr, evsel, i);
if (err)
goto out;
}
- if (option & ~(ETM_SET_OPT_MASK))
+ if (option & ~(BIT(ETM_OPT_CTXTID) | BIT(ETM_OPT_TS)))
/* Nothing else is currently supported */
goto out;
}
@@ -343,7 +378,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
}
- /* Snapshost size can't be bigger than the auxtrace area */
+ /* Snapshot size can't be bigger than the auxtrace area */
if (opts->auxtrace_snapshot_size >
opts->auxtrace_mmap_pages * (size_t)page_size) {
pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
@@ -410,7 +445,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
evsel__set_sample_bit(cs_etm_evsel, CPU);
err = cs_etm_set_option(itr, cs_etm_evsel,
- ETM_SET_OPT_CTXTID | ETM_SET_OPT_TS);
+ BIT(ETM_OPT_CTXTID) | BIT(ETM_OPT_TS));
if (err)
goto out;
}
@@ -489,7 +524,9 @@ static u64 cs_etmv4_get_config(struct auxtrace_record *itr)
config |= BIT(ETM4_CFG_BIT_TS);
if (config_opts & BIT(ETM_OPT_RETSTK))
config |= BIT(ETM4_CFG_BIT_RETSTK);
-
+ if (config_opts & BIT(ETM_OPT_CTXTID2))
+ config |= BIT(ETM4_CFG_BIT_VMID) |
+ BIT(ETM4_CFG_BIT_VMID_OPT);
return config;
}
@@ -576,7 +613,7 @@ static void cs_etm_get_metadata(int cpu, u32 *offset,
struct auxtrace_record *itr,
struct perf_record_auxtrace_info *info)
{
- u32 increment;
+ u32 increment, nr_trc_params;
u64 magic;
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
@@ -611,6 +648,7 @@ static void cs_etm_get_metadata(int cpu, u32 *offset,
/* How much space was used */
increment = CS_ETMV4_PRIV_MAX;
+ nr_trc_params = CS_ETMV4_PRIV_MAX - CS_ETMV4_TRCCONFIGR;
} else {
magic = __perf_cs_etmv3_magic;
/* Get configuration register */
@@ -628,11 +666,13 @@ static void cs_etm_get_metadata(int cpu, u32 *offset,
/* How much space was used */
increment = CS_ETM_PRIV_MAX;
+ nr_trc_params = CS_ETM_PRIV_MAX - CS_ETM_ETMCR;
}
/* Build generic header portion */
info->priv[*offset + CS_ETM_MAGIC] = magic;
info->priv[*offset + CS_ETM_CPU] = cpu;
+ info->priv[*offset + CS_ETM_NR_TRC_PARAMS] = nr_trc_params;
/* Where the next CPU entry should start from */
*offset += increment;
}
@@ -678,7 +718,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
/* First fill out the session header */
info->type = PERF_AUXTRACE_CS_ETM;
- info->priv[CS_HEADER_VERSION_0] = 0;
+ info->priv[CS_HEADER_VERSION] = CS_HEADER_CURRENT_VERSION;
info->priv[CS_PMU_TYPE_CPUS] = type << 32;
info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu;
info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode;
diff --git a/tools/perf/arch/arm64/util/Build b/tools/perf/arch/arm64/util/Build
index ead2f2275eee..9fcb4e68add9 100644
--- a/tools/perf/arch/arm64/util/Build
+++ b/tools/perf/arch/arm64/util/Build
@@ -2,6 +2,7 @@ perf-y += header.o
perf-y += machine.o
perf-y += perf_regs.o
perf-y += tsc.o
+perf-y += pmu.o
perf-y += kvm-stat.o
perf-$(CONFIG_DWARF) += dwarf-regs.o
perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
diff --git a/tools/perf/arch/arm64/util/kvm-stat.c b/tools/perf/arch/arm64/util/kvm-stat.c
index 50376b9062c1..2303256b7d05 100644
--- a/tools/perf/arch/arm64/util/kvm-stat.c
+++ b/tools/perf/arch/arm64/util/kvm-stat.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <memory.h>
-#include "../../util/evsel.h"
-#include "../../util/kvm-stat.h"
+#include "../../../util/evsel.h"
+#include "../../../util/kvm-stat.h"
#include "arm64_exception_types.h"
#include "debug.h"
diff --git a/tools/perf/arch/arm64/util/machine.c b/tools/perf/arch/arm64/util/machine.c
index 40c5e0b5bda8..7e7714290a87 100644
--- a/tools/perf/arch/arm64/util/machine.c
+++ b/tools/perf/arch/arm64/util/machine.c
@@ -6,11 +6,11 @@
#include "debug.h"
#include "symbol.h"
-/* On arm64, kernel text segment start at high memory address,
+/* On arm64, kernel text segment starts at high memory address,
* for example 0xffff 0000 8xxx xxxx. Modules start at a low memory
- * address, like 0xffff 0000 00ax xxxx. When only samll amount of
+ * address, like 0xffff 0000 00ax xxxx. When only small amount of
* memory is used by modules, gap between end of module's text segment
- * and start of kernel text segment may be reach 2G.
+ * and start of kernel text segment may reach 2G.
* Therefore do not fill this gap and do not assign it to the kernel dso map.
*/
diff --git a/tools/perf/arch/arm64/util/perf_regs.c b/tools/perf/arch/arm64/util/perf_regs.c
index 2518cde18b34..476b037eea1c 100644
--- a/tools/perf/arch/arm64/util/perf_regs.c
+++ b/tools/perf/arch/arm64/util/perf_regs.c
@@ -108,7 +108,7 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op)
/* [sp], [sp, NUM] or [sp,NUM] */
new_len = 7; /* + ( % s p ) NULL */
- /* If the arugment is [sp], need to fill offset '0' */
+ /* If the argument is [sp], need to fill offset '0' */
if (rm[2].rm_so == -1)
new_len += 1;
else
diff --git a/tools/perf/arch/arm64/util/pmu.c b/tools/perf/arch/arm64/util/pmu.c
new file mode 100644
index 000000000000..2234fbd0a912
--- /dev/null
+++ b/tools/perf/arch/arm64/util/pmu.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "../../../util/cpumap.h"
+#include "../../../util/pmu.h"
+
+struct pmu_events_map *pmu_events_map__find(void)
+{
+ struct perf_pmu *pmu = NULL;
+
+ while ((pmu = perf_pmu__scan(pmu))) {
+ if (!is_pmu_core(pmu->name))
+ continue;
+
+ /*
+ * The cpumap should cover all CPUs. Otherwise, some CPUs may
+ * not support some events or have different event IDs.
+ */
+ if (pmu->cpus->nr != cpu__max_cpu())
+ return NULL;
+
+ return perf_pmu__find_map(pmu);
+ }
+
+ return NULL;
+}
diff --git a/tools/perf/arch/arm64/util/unwind-libunwind.c b/tools/perf/arch/arm64/util/unwind-libunwind.c
index 1495a9523a23..5aecf88e3de6 100644
--- a/tools/perf/arch/arm64/util/unwind-libunwind.c
+++ b/tools/perf/arch/arm64/util/unwind-libunwind.c
@@ -4,9 +4,9 @@
#ifndef REMOTE_UNWIND_LIBUNWIND
#include <libunwind.h>
#include "perf_regs.h"
-#include "../../util/unwind.h"
+#include "../../../util/unwind.h"
#endif
-#include "../../util/debug.h"
+#include "../../../util/debug.h"
int LIBUNWIND__ARCH_REG_ID(int regnum)
{
diff --git a/tools/perf/arch/mips/Makefile b/tools/perf/arch/mips/Makefile
new file mode 100644
index 000000000000..8bc09072e3d6
--- /dev/null
+++ b/tools/perf/arch/mips/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+ifndef NO_DWARF
+PERF_HAVE_DWARF_REGS := 1
+endif
+
+# Syscall table generation for perf
+out := $(OUTPUT)arch/mips/include/generated/asm
+header := $(out)/syscalls_n64.c
+sysprf := $(srctree)/tools/perf/arch/mips/entry/syscalls
+sysdef := $(sysprf)/syscall_n64.tbl
+systbl := $(sysprf)/mksyscalltbl
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
+
+$(header): $(sysdef) $(systbl)
+ $(Q)$(SHELL) '$(systbl)' $(sysdef) > $@
+
+clean::
+ $(call QUIET_CLEAN, mips) $(RM) $(header)
+
+archheaders: $(header)
diff --git a/tools/perf/arch/mips/entry/syscalls/mksyscalltbl b/tools/perf/arch/mips/entry/syscalls/mksyscalltbl
new file mode 100644
index 000000000000..fb1f49451af6
--- /dev/null
+++ b/tools/perf/arch/mips/entry/syscalls/mksyscalltbl
@@ -0,0 +1,32 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Generate system call table for perf. Derived from
+# s390 script.
+#
+# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+# Changed by: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+SYSCALL_TBL=$1
+
+if ! test -r $SYSCALL_TBL; then
+ echo "Could not read input file" >&2
+ exit 1
+fi
+
+create_table()
+{
+ local max_nr nr abi sc discard
+
+ echo 'static const char *syscalltbl_mips_n64[] = {'
+ while read nr abi sc discard; do
+ printf '\t[%d] = "%s",\n' $nr $sc
+ max_nr=$nr
+ done
+ echo '};'
+ echo "#define SYSCALLTBL_MIPS_N64_MAX_ID $max_nr"
+}
+
+grep -E "^[[:digit:]]+[[:space:]]+(n64)" $SYSCALL_TBL \
+ |sort -k1 -n \
+ |create_table
diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
new file mode 100644
index 000000000000..91649690b52f
--- /dev/null
+++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
@@ -0,0 +1,358 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for mips
+#
+# The format is:
+# <number> <abi> <name> <entry point>
+#
+# The <abi> is always "n64" for this file.
+#
+0 n64 read sys_read
+1 n64 write sys_write
+2 n64 open sys_open
+3 n64 close sys_close
+4 n64 stat sys_newstat
+5 n64 fstat sys_newfstat
+6 n64 lstat sys_newlstat
+7 n64 poll sys_poll
+8 n64 lseek sys_lseek
+9 n64 mmap sys_mips_mmap
+10 n64 mprotect sys_mprotect
+11 n64 munmap sys_munmap
+12 n64 brk sys_brk
+13 n64 rt_sigaction sys_rt_sigaction
+14 n64 rt_sigprocmask sys_rt_sigprocmask
+15 n64 ioctl sys_ioctl
+16 n64 pread64 sys_pread64
+17 n64 pwrite64 sys_pwrite64
+18 n64 readv sys_readv
+19 n64 writev sys_writev
+20 n64 access sys_access
+21 n64 pipe sysm_pipe
+22 n64 _newselect sys_select
+23 n64 sched_yield sys_sched_yield
+24 n64 mremap sys_mremap
+25 n64 msync sys_msync
+26 n64 mincore sys_mincore
+27 n64 madvise sys_madvise
+28 n64 shmget sys_shmget
+29 n64 shmat sys_shmat
+30 n64 shmctl sys_old_shmctl
+31 n64 dup sys_dup
+32 n64 dup2 sys_dup2
+33 n64 pause sys_pause
+34 n64 nanosleep sys_nanosleep
+35 n64 getitimer sys_getitimer
+36 n64 setitimer sys_setitimer
+37 n64 alarm sys_alarm
+38 n64 getpid sys_getpid
+39 n64 sendfile sys_sendfile64
+40 n64 socket sys_socket
+41 n64 connect sys_connect
+42 n64 accept sys_accept
+43 n64 sendto sys_sendto
+44 n64 recvfrom sys_recvfrom
+45 n64 sendmsg sys_sendmsg
+46 n64 recvmsg sys_recvmsg
+47 n64 shutdown sys_shutdown
+48 n64 bind sys_bind
+49 n64 listen sys_listen
+50 n64 getsockname sys_getsockname
+51 n64 getpeername sys_getpeername
+52 n64 socketpair sys_socketpair
+53 n64 setsockopt sys_setsockopt
+54 n64 getsockopt sys_getsockopt
+55 n64 clone __sys_clone
+56 n64 fork __sys_fork
+57 n64 execve sys_execve
+58 n64 exit sys_exit
+59 n64 wait4 sys_wait4
+60 n64 kill sys_kill
+61 n64 uname sys_newuname
+62 n64 semget sys_semget
+63 n64 semop sys_semop
+64 n64 semctl sys_old_semctl
+65 n64 shmdt sys_shmdt
+66 n64 msgget sys_msgget
+67 n64 msgsnd sys_msgsnd
+68 n64 msgrcv sys_msgrcv
+69 n64 msgctl sys_old_msgctl
+70 n64 fcntl sys_fcntl
+71 n64 flock sys_flock
+72 n64 fsync sys_fsync
+73 n64 fdatasync sys_fdatasync
+74 n64 truncate sys_truncate
+75 n64 ftruncate sys_ftruncate
+76 n64 getdents sys_getdents
+77 n64 getcwd sys_getcwd
+78 n64 chdir sys_chdir
+79 n64 fchdir sys_fchdir
+80 n64 rename sys_rename
+81 n64 mkdir sys_mkdir
+82 n64 rmdir sys_rmdir
+83 n64 creat sys_creat
+84 n64 link sys_link
+85 n64 unlink sys_unlink
+86 n64 symlink sys_symlink
+87 n64 readlink sys_readlink
+88 n64 chmod sys_chmod
+89 n64 fchmod sys_fchmod
+90 n64 chown sys_chown
+91 n64 fchown sys_fchown
+92 n64 lchown sys_lchown
+93 n64 umask sys_umask
+94 n64 gettimeofday sys_gettimeofday
+95 n64 getrlimit sys_getrlimit
+96 n64 getrusage sys_getrusage
+97 n64 sysinfo sys_sysinfo
+98 n64 times sys_times
+99 n64 ptrace sys_ptrace
+100 n64 getuid sys_getuid
+101 n64 syslog sys_syslog
+102 n64 getgid sys_getgid
+103 n64 setuid sys_setuid
+104 n64 setgid sys_setgid
+105 n64 geteuid sys_geteuid
+106 n64 getegid sys_getegid
+107 n64 setpgid sys_setpgid
+108 n64 getppid sys_getppid
+109 n64 getpgrp sys_getpgrp
+110 n64 setsid sys_setsid
+111 n64 setreuid sys_setreuid
+112 n64 setregid sys_setregid
+113 n64 getgroups sys_getgroups
+114 n64 setgroups sys_setgroups
+115 n64 setresuid sys_setresuid
+116 n64 getresuid sys_getresuid
+117 n64 setresgid sys_setresgid
+118 n64 getresgid sys_getresgid
+119 n64 getpgid sys_getpgid
+120 n64 setfsuid sys_setfsuid
+121 n64 setfsgid sys_setfsgid
+122 n64 getsid sys_getsid
+123 n64 capget sys_capget
+124 n64 capset sys_capset
+125 n64 rt_sigpending sys_rt_sigpending
+126 n64 rt_sigtimedwait sys_rt_sigtimedwait
+127 n64 rt_sigqueueinfo sys_rt_sigqueueinfo
+128 n64 rt_sigsuspend sys_rt_sigsuspend
+129 n64 sigaltstack sys_sigaltstack
+130 n64 utime sys_utime
+131 n64 mknod sys_mknod
+132 n64 personality sys_personality
+133 n64 ustat sys_ustat
+134 n64 statfs sys_statfs
+135 n64 fstatfs sys_fstatfs
+136 n64 sysfs sys_sysfs
+137 n64 getpriority sys_getpriority
+138 n64 setpriority sys_setpriority
+139 n64 sched_setparam sys_sched_setparam
+140 n64 sched_getparam sys_sched_getparam
+141 n64 sched_setscheduler sys_sched_setscheduler
+142 n64 sched_getscheduler sys_sched_getscheduler
+143 n64 sched_get_priority_max sys_sched_get_priority_max
+144 n64 sched_get_priority_min sys_sched_get_priority_min
+145 n64 sched_rr_get_interval sys_sched_rr_get_interval
+146 n64 mlock sys_mlock
+147 n64 munlock sys_munlock
+148 n64 mlockall sys_mlockall
+149 n64 munlockall sys_munlockall
+150 n64 vhangup sys_vhangup
+151 n64 pivot_root sys_pivot_root
+152 n64 _sysctl sys_ni_syscall
+153 n64 prctl sys_prctl
+154 n64 adjtimex sys_adjtimex
+155 n64 setrlimit sys_setrlimit
+156 n64 chroot sys_chroot
+157 n64 sync sys_sync
+158 n64 acct sys_acct
+159 n64 settimeofday sys_settimeofday
+160 n64 mount sys_mount
+161 n64 umount2 sys_umount
+162 n64 swapon sys_swapon
+163 n64 swapoff sys_swapoff
+164 n64 reboot sys_reboot
+165 n64 sethostname sys_sethostname
+166 n64 setdomainname sys_setdomainname
+167 n64 create_module sys_ni_syscall
+168 n64 init_module sys_init_module
+169 n64 delete_module sys_delete_module
+170 n64 get_kernel_syms sys_ni_syscall
+171 n64 query_module sys_ni_syscall
+172 n64 quotactl sys_quotactl
+173 n64 nfsservctl sys_ni_syscall
+174 n64 getpmsg sys_ni_syscall
+175 n64 putpmsg sys_ni_syscall
+176 n64 afs_syscall sys_ni_syscall
+# 177 reserved for security
+177 n64 reserved177 sys_ni_syscall
+178 n64 gettid sys_gettid
+179 n64 readahead sys_readahead
+180 n64 setxattr sys_setxattr
+181 n64 lsetxattr sys_lsetxattr
+182 n64 fsetxattr sys_fsetxattr
+183 n64 getxattr sys_getxattr
+184 n64 lgetxattr sys_lgetxattr
+185 n64 fgetxattr sys_fgetxattr
+186 n64 listxattr sys_listxattr
+187 n64 llistxattr sys_llistxattr
+188 n64 flistxattr sys_flistxattr
+189 n64 removexattr sys_removexattr
+190 n64 lremovexattr sys_lremovexattr
+191 n64 fremovexattr sys_fremovexattr
+192 n64 tkill sys_tkill
+193 n64 reserved193 sys_ni_syscall
+194 n64 futex sys_futex
+195 n64 sched_setaffinity sys_sched_setaffinity
+196 n64 sched_getaffinity sys_sched_getaffinity
+197 n64 cacheflush sys_cacheflush
+198 n64 cachectl sys_cachectl
+199 n64 sysmips __sys_sysmips
+200 n64 io_setup sys_io_setup
+201 n64 io_destroy sys_io_destroy
+202 n64 io_getevents sys_io_getevents
+203 n64 io_submit sys_io_submit
+204 n64 io_cancel sys_io_cancel
+205 n64 exit_group sys_exit_group
+206 n64 lookup_dcookie sys_lookup_dcookie
+207 n64 epoll_create sys_epoll_create
+208 n64 epoll_ctl sys_epoll_ctl
+209 n64 epoll_wait sys_epoll_wait
+210 n64 remap_file_pages sys_remap_file_pages
+211 n64 rt_sigreturn sys_rt_sigreturn
+212 n64 set_tid_address sys_set_tid_address
+213 n64 restart_syscall sys_restart_syscall
+214 n64 semtimedop sys_semtimedop
+215 n64 fadvise64 sys_fadvise64_64
+216 n64 timer_create sys_timer_create
+217 n64 timer_settime sys_timer_settime
+218 n64 timer_gettime sys_timer_gettime
+219 n64 timer_getoverrun sys_timer_getoverrun
+220 n64 timer_delete sys_timer_delete
+221 n64 clock_settime sys_clock_settime
+222 n64 clock_gettime sys_clock_gettime
+223 n64 clock_getres sys_clock_getres
+224 n64 clock_nanosleep sys_clock_nanosleep
+225 n64 tgkill sys_tgkill
+226 n64 utimes sys_utimes
+227 n64 mbind sys_mbind
+228 n64 get_mempolicy sys_get_mempolicy
+229 n64 set_mempolicy sys_set_mempolicy
+230 n64 mq_open sys_mq_open
+231 n64 mq_unlink sys_mq_unlink
+232 n64 mq_timedsend sys_mq_timedsend
+233 n64 mq_timedreceive sys_mq_timedreceive
+234 n64 mq_notify sys_mq_notify
+235 n64 mq_getsetattr sys_mq_getsetattr
+236 n64 vserver sys_ni_syscall
+237 n64 waitid sys_waitid
+# 238 was sys_setaltroot
+239 n64 add_key sys_add_key
+240 n64 request_key sys_request_key
+241 n64 keyctl sys_keyctl
+242 n64 set_thread_area sys_set_thread_area
+243 n64 inotify_init sys_inotify_init
+244 n64 inotify_add_watch sys_inotify_add_watch
+245 n64 inotify_rm_watch sys_inotify_rm_watch
+246 n64 migrate_pages sys_migrate_pages
+247 n64 openat sys_openat
+248 n64 mkdirat sys_mkdirat
+249 n64 mknodat sys_mknodat
+250 n64 fchownat sys_fchownat
+251 n64 futimesat sys_futimesat
+252 n64 newfstatat sys_newfstatat
+253 n64 unlinkat sys_unlinkat
+254 n64 renameat sys_renameat
+255 n64 linkat sys_linkat
+256 n64 symlinkat sys_symlinkat
+257 n64 readlinkat sys_readlinkat
+258 n64 fchmodat sys_fchmodat
+259 n64 faccessat sys_faccessat
+260 n64 pselect6 sys_pselect6
+261 n64 ppoll sys_ppoll
+262 n64 unshare sys_unshare
+263 n64 splice sys_splice
+264 n64 sync_file_range sys_sync_file_range
+265 n64 tee sys_tee
+266 n64 vmsplice sys_vmsplice
+267 n64 move_pages sys_move_pages
+268 n64 set_robust_list sys_set_robust_list
+269 n64 get_robust_list sys_get_robust_list
+270 n64 kexec_load sys_kexec_load
+271 n64 getcpu sys_getcpu
+272 n64 epoll_pwait sys_epoll_pwait
+273 n64 ioprio_set sys_ioprio_set
+274 n64 ioprio_get sys_ioprio_get
+275 n64 utimensat sys_utimensat
+276 n64 signalfd sys_signalfd
+277 n64 timerfd sys_ni_syscall
+278 n64 eventfd sys_eventfd
+279 n64 fallocate sys_fallocate
+280 n64 timerfd_create sys_timerfd_create
+281 n64 timerfd_gettime sys_timerfd_gettime
+282 n64 timerfd_settime sys_timerfd_settime
+283 n64 signalfd4 sys_signalfd4
+284 n64 eventfd2 sys_eventfd2
+285 n64 epoll_create1 sys_epoll_create1
+286 n64 dup3 sys_dup3
+287 n64 pipe2 sys_pipe2
+288 n64 inotify_init1 sys_inotify_init1
+289 n64 preadv sys_preadv
+290 n64 pwritev sys_pwritev
+291 n64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
+292 n64 perf_event_open sys_perf_event_open
+293 n64 accept4 sys_accept4
+294 n64 recvmmsg sys_recvmmsg
+295 n64 fanotify_init sys_fanotify_init
+296 n64 fanotify_mark sys_fanotify_mark
+297 n64 prlimit64 sys_prlimit64
+298 n64 name_to_handle_at sys_name_to_handle_at
+299 n64 open_by_handle_at sys_open_by_handle_at
+300 n64 clock_adjtime sys_clock_adjtime
+301 n64 syncfs sys_syncfs
+302 n64 sendmmsg sys_sendmmsg
+303 n64 setns sys_setns
+304 n64 process_vm_readv sys_process_vm_readv
+305 n64 process_vm_writev sys_process_vm_writev
+306 n64 kcmp sys_kcmp
+307 n64 finit_module sys_finit_module
+308 n64 getdents64 sys_getdents64
+309 n64 sched_setattr sys_sched_setattr
+310 n64 sched_getattr sys_sched_getattr
+311 n64 renameat2 sys_renameat2
+312 n64 seccomp sys_seccomp
+313 n64 getrandom sys_getrandom
+314 n64 memfd_create sys_memfd_create
+315 n64 bpf sys_bpf
+316 n64 execveat sys_execveat
+317 n64 userfaultfd sys_userfaultfd
+318 n64 membarrier sys_membarrier
+319 n64 mlock2 sys_mlock2
+320 n64 copy_file_range sys_copy_file_range
+321 n64 preadv2 sys_preadv2
+322 n64 pwritev2 sys_pwritev2
+323 n64 pkey_mprotect sys_pkey_mprotect
+324 n64 pkey_alloc sys_pkey_alloc
+325 n64 pkey_free sys_pkey_free
+326 n64 statx sys_statx
+327 n64 rseq sys_rseq
+328 n64 io_pgetevents sys_io_pgetevents
+# 329 through 423 are reserved to sync up with other architectures
+424 n64 pidfd_send_signal sys_pidfd_send_signal
+425 n64 io_uring_setup sys_io_uring_setup
+426 n64 io_uring_enter sys_io_uring_enter
+427 n64 io_uring_register sys_io_uring_register
+428 n64 open_tree sys_open_tree
+429 n64 move_mount sys_move_mount
+430 n64 fsopen sys_fsopen
+431 n64 fsconfig sys_fsconfig
+432 n64 fsmount sys_fsmount
+433 n64 fspick sys_fspick
+434 n64 pidfd_open sys_pidfd_open
+435 n64 clone3 __sys_clone3
+436 n64 close_range sys_close_range
+437 n64 openat2 sys_openat2
+438 n64 pidfd_getfd sys_pidfd_getfd
+439 n64 faccessat2 sys_faccessat2
+440 n64 process_madvise sys_process_madvise
+441 n64 epoll_pwait2 sys_epoll_pwait2
diff --git a/tools/perf/arch/mips/include/dwarf-regs-table.h b/tools/perf/arch/mips/include/dwarf-regs-table.h
new file mode 100644
index 000000000000..5badbcd3c5ec
--- /dev/null
+++ b/tools/perf/arch/mips/include/dwarf-regs-table.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * dwarf-regs-table.h : Mapping of DWARF debug register numbers into
+ * register names.
+ *
+ * Copyright (C) 2013 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifdef DEFINE_DWARF_REGSTR_TABLE
+#undef REG_DWARFNUM_NAME
+#define REG_DWARFNUM_NAME(reg, idx) [idx] = "$" #reg
+static const char * const mips_regstr_tbl[] = {
+ "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", "$9",
+ "$10", "$11", "$12", "$13", "$14", "$15", "$16", "$17", "$18", "$19",
+ "$20", "$21", "$22", "$23", "$24", "$25", "$26", "$27", "$28", "%29",
+ "$30", "$31",
+ REG_DWARFNUM_NAME(hi, 64),
+ REG_DWARFNUM_NAME(lo, 65),
+};
+#endif
diff --git a/tools/perf/arch/mips/include/perf_regs.h b/tools/perf/arch/mips/include/perf_regs.h
new file mode 100644
index 000000000000..ee73b36a14d1
--- /dev/null
+++ b/tools/perf/arch/mips/include/perf_regs.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ARCH_PERF_REGS_H
+#define ARCH_PERF_REGS_H
+
+#include <stdlib.h>
+#include <linux/types.h>
+#include <asm/perf_regs.h>
+
+#define PERF_REGS_MAX PERF_REG_MIPS_MAX
+#define PERF_REG_IP PERF_REG_MIPS_PC
+#define PERF_REG_SP PERF_REG_MIPS_R29
+
+#define PERF_REGS_MASK ((1ULL << PERF_REG_MIPS_MAX) - 1)
+
+static inline const char *__perf_reg_name(int id)
+{
+ switch (id) {
+ case PERF_REG_MIPS_PC:
+ return "PC";
+ case PERF_REG_MIPS_R1:
+ return "$1";
+ case PERF_REG_MIPS_R2:
+ return "$2";
+ case PERF_REG_MIPS_R3:
+ return "$3";
+ case PERF_REG_MIPS_R4:
+ return "$4";
+ case PERF_REG_MIPS_R5:
+ return "$5";
+ case PERF_REG_MIPS_R6:
+ return "$6";
+ case PERF_REG_MIPS_R7:
+ return "$7";
+ case PERF_REG_MIPS_R8:
+ return "$8";
+ case PERF_REG_MIPS_R9:
+ return "$9";
+ case PERF_REG_MIPS_R10:
+ return "$10";
+ case PERF_REG_MIPS_R11:
+ return "$11";
+ case PERF_REG_MIPS_R12:
+ return "$12";
+ case PERF_REG_MIPS_R13:
+ return "$13";
+ case PERF_REG_MIPS_R14:
+ return "$14";
+ case PERF_REG_MIPS_R15:
+ return "$15";
+ case PERF_REG_MIPS_R16:
+ return "$16";
+ case PERF_REG_MIPS_R17:
+ return "$17";
+ case PERF_REG_MIPS_R18:
+ return "$18";
+ case PERF_REG_MIPS_R19:
+ return "$19";
+ case PERF_REG_MIPS_R20:
+ return "$20";
+ case PERF_REG_MIPS_R21:
+ return "$21";
+ case PERF_REG_MIPS_R22:
+ return "$22";
+ case PERF_REG_MIPS_R23:
+ return "$23";
+ case PERF_REG_MIPS_R24:
+ return "$24";
+ case PERF_REG_MIPS_R25:
+ return "$25";
+ case PERF_REG_MIPS_R28:
+ return "$28";
+ case PERF_REG_MIPS_R29:
+ return "$29";
+ case PERF_REG_MIPS_R30:
+ return "$30";
+ case PERF_REG_MIPS_R31:
+ return "$31";
+ default:
+ break;
+ }
+ return NULL;
+}
+
+#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/mips/util/Build b/tools/perf/arch/mips/util/Build
new file mode 100644
index 000000000000..51c8900a9a10
--- /dev/null
+++ b/tools/perf/arch/mips/util/Build
@@ -0,0 +1,3 @@
+perf-y += perf_regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
diff --git a/tools/perf/arch/mips/util/dwarf-regs.c b/tools/perf/arch/mips/util/dwarf-regs.c
new file mode 100644
index 000000000000..25c13a91c2a7
--- /dev/null
+++ b/tools/perf/arch/mips/util/dwarf-regs.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwarf-regs.c : Mapping of DWARF debug register numbers into register names.
+ *
+ * Copyright (C) 2013 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <stdio.h>
+#include <dwarf-regs.h>
+
+static const char *mips_gpr_names[32] = {
+ "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", "$9",
+ "$10", "$11", "$12", "$13", "$14", "$15", "$16", "$17", "$18", "$19",
+ "$20", "$21", "$22", "$23", "$24", "$25", "$26", "$27", "$28", "$29",
+ "$30", "$31"
+};
+
+const char *get_arch_regstr(unsigned int n)
+{
+ if (n < 32)
+ return mips_gpr_names[n];
+ if (n == 64)
+ return "hi";
+ if (n == 65)
+ return "lo";
+ return NULL;
+}
diff --git a/tools/perf/arch/mips/util/perf_regs.c b/tools/perf/arch/mips/util/perf_regs.c
new file mode 100644
index 000000000000..2864e2e3776d
--- /dev/null
+++ b/tools/perf/arch/mips/util/perf_regs.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../util/perf_regs.h"
+
+const struct sample_reg sample_reg_masks[] = {
+ SMPL_REG_END
+};
diff --git a/tools/perf/arch/mips/util/unwind-libunwind.c b/tools/perf/arch/mips/util/unwind-libunwind.c
new file mode 100644
index 000000000000..0d8c99c29da6
--- /dev/null
+++ b/tools/perf/arch/mips/util/unwind-libunwind.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <errno.h>
+#include <libunwind.h>
+#include "perf_regs.h"
+#include "../../util/unwind.h"
+#include "util/debug.h"
+
+int libunwind__arch_reg_id(int regnum)
+{
+ switch (regnum) {
+ case UNW_MIPS_R1 ... UNW_MIPS_R25:
+ return regnum - UNW_MIPS_R1 + PERF_REG_MIPS_R1;
+ case UNW_MIPS_R28 ... UNW_MIPS_R31:
+ return regnum - UNW_MIPS_R28 + PERF_REG_MIPS_R28;
+ case UNW_MIPS_PC:
+ return PERF_REG_MIPS_PC;
+ default:
+ pr_err("unwind: invalid reg id %d\n", regnum);
+ return -EINVAL;
+ }
+}
diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build
index b7945e5a543b..8a79c4126e5b 100644
--- a/tools/perf/arch/powerpc/util/Build
+++ b/tools/perf/arch/powerpc/util/Build
@@ -4,6 +4,8 @@ perf-y += kvm-stat.o
perf-y += perf_regs.o
perf-y += mem-events.o
perf-y += sym-handling.o
+perf-y += evsel.o
+perf-y += event.o
perf-$(CONFIG_DWARF) += dwarf-regs.o
perf-$(CONFIG_DWARF) += skip-callchain-idx.o
diff --git a/tools/perf/arch/powerpc/util/event.c b/tools/perf/arch/powerpc/util/event.c
new file mode 100644
index 000000000000..3bf441257466
--- /dev/null
+++ b/tools/perf/arch/powerpc/util/event.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/zalloc.h>
+
+#include "../../../util/event.h"
+#include "../../../util/synthetic-events.h"
+#include "../../../util/machine.h"
+#include "../../../util/tool.h"
+#include "../../../util/map.h"
+#include "../../../util/debug.h"
+
+void arch_perf_parse_sample_weight(struct perf_sample *data,
+ const __u64 *array, u64 type)
+{
+ union perf_sample_weight weight;
+
+ weight.full = *array;
+ if (type & PERF_SAMPLE_WEIGHT)
+ data->weight = weight.full;
+ else {
+ data->weight = weight.var1_dw;
+ data->ins_lat = weight.var2_w;
+ data->p_stage_cyc = weight.var3_w;
+ }
+}
+
+void arch_perf_synthesize_sample_weight(const struct perf_sample *data,
+ __u64 *array, u64 type)
+{
+ *array = data->weight;
+
+ if (type & PERF_SAMPLE_WEIGHT_STRUCT) {
+ *array &= 0xffffffff;
+ *array |= ((u64)data->ins_lat << 32);
+ }
+}
+
+const char *arch_perf_header_entry(const char *se_header)
+{
+ if (!strcmp(se_header, "Local INSTR Latency"))
+ return "Finish Cyc";
+ else if (!strcmp(se_header, "Pipeline Stage Cycle"))
+ return "Dispatch Cyc";
+ return se_header;
+}
+
+int arch_support_sort_key(const char *sort_key)
+{
+ if (!strcmp(sort_key, "p_stage_cyc"))
+ return 1;
+ return 0;
+}
diff --git a/tools/perf/arch/powerpc/util/evsel.c b/tools/perf/arch/powerpc/util/evsel.c
new file mode 100644
index 000000000000..2f733cdc8dbb
--- /dev/null
+++ b/tools/perf/arch/powerpc/util/evsel.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include "util/evsel.h"
+
+void arch_evsel__set_sample_weight(struct evsel *evsel)
+{
+ evsel__set_sample_bit(evsel, WEIGHT_STRUCT);
+}
diff --git a/tools/perf/arch/powerpc/util/kvm-stat.c b/tools/perf/arch/powerpc/util/kvm-stat.c
index eed9e5a42935..16510686c138 100644
--- a/tools/perf/arch/powerpc/util/kvm-stat.c
+++ b/tools/perf/arch/powerpc/util/kvm-stat.c
@@ -176,7 +176,7 @@ int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
}
/*
- * Incase of powerpc architecture, pmu registers are programmable
+ * In case of powerpc architecture, pmu registers are programmable
* by guest kernel. So monitoring guest via host may not provide
* valid samples with default 'cycles' event. It is better to use
* 'trace_imc/trace_cycles' event for guest profiling, since it
diff --git a/tools/perf/arch/powerpc/util/utils_header.h b/tools/perf/arch/powerpc/util/utils_header.h
index 5788eb1f1fe3..2baeb1c1ae85 100644
--- a/tools/perf/arch/powerpc/util/utils_header.h
+++ b/tools/perf/arch/powerpc/util/utils_header.h
@@ -10,6 +10,6 @@
#define SPRN_PVR 0x11F /* Processor Version Register */
#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
-#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
+#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revision field */
#endif /* __PERF_UTIL_HEADER_H */
diff --git a/tools/perf/arch/x86/tests/bp-modify.c b/tools/perf/arch/x86/tests/bp-modify.c
index adcacf1b6609..dffcf9b52153 100644
--- a/tools/perf/arch/x86/tests/bp-modify.c
+++ b/tools/perf/arch/x86/tests/bp-modify.c
@@ -73,7 +73,7 @@ static int bp_modify1(void)
/*
* The parent does following steps:
* - creates a new breakpoint (id 0) for bp_2 function
- * - changes that breakponit to bp_1 function
+ * - changes that breakpoint to bp_1 function
* - waits for the breakpoint to hit and checks
* it has proper rip of bp_1 function
* - detaches the child
diff --git a/tools/perf/arch/x86/tests/insn-x86.c b/tools/perf/arch/x86/tests/insn-x86.c
index 4f75ae990140..0262b0d8ccf5 100644
--- a/tools/perf/arch/x86/tests/insn-x86.c
+++ b/tools/perf/arch/x86/tests/insn-x86.c
@@ -96,13 +96,12 @@ static int get_branch(const char *branch_str)
static int test_data_item(struct test_data *dat, int x86_64)
{
struct intel_pt_insn intel_pt_insn;
+ int op, branch, ret;
struct insn insn;
- int op, branch;
- insn_init(&insn, dat->data, MAX_INSN_SIZE, x86_64);
- insn_get_length(&insn);
-
- if (!insn_complete(&insn)) {
+ ret = insn_decode(&insn, dat->data, MAX_INSN_SIZE,
+ x86_64 ? INSN_MODE_64 : INSN_MODE_32);
+ if (ret < 0) {
pr_debug("Failed to decode: %s\n", dat->asm_rep);
return -1;
}
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
index 0c72d418932e..dbeb04cb336e 100644
--- a/tools/perf/arch/x86/util/Build
+++ b/tools/perf/arch/x86/util/Build
@@ -9,6 +9,7 @@ perf-y += event.o
perf-y += evlist.o
perf-y += mem-events.o
perf-y += evsel.o
+perf-y += iostat.o
perf-$(CONFIG_DWARF) += dwarf-regs.o
perf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
diff --git a/tools/perf/arch/x86/util/archinsn.c b/tools/perf/arch/x86/util/archinsn.c
index 34d600c51044..546feda08428 100644
--- a/tools/perf/arch/x86/util/archinsn.c
+++ b/tools/perf/arch/x86/util/archinsn.c
@@ -11,7 +11,7 @@ void arch_fetch_insn(struct perf_sample *sample,
struct machine *machine)
{
struct insn insn;
- int len;
+ int len, ret;
bool is64bit = false;
if (!sample->ip)
@@ -19,8 +19,9 @@ void arch_fetch_insn(struct perf_sample *sample,
len = thread__memcpy(thread, machine, sample->insn, sample->ip, sizeof(sample->insn), &is64bit);
if (len <= 0)
return;
- insn_init(&insn, sample->insn, len, is64bit);
- insn_get_length(&insn);
- if (insn_complete(&insn) && insn.length <= len)
+
+ ret = insn_decode(&insn, sample->insn, len,
+ is64bit ? INSN_MODE_64 : INSN_MODE_32);
+ if (ret >= 0 && insn.length <= len)
sample->insn_len = insn.length;
}
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index a6420c647959..6df0dc00d73a 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -776,6 +776,12 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
}
}
+ if (!opts->auxtrace_snapshot_mode && !opts->auxtrace_sample_mode) {
+ u32 aux_watermark = opts->auxtrace_mmap_pages * page_size / 4;
+
+ intel_pt_evsel->core.attr.aux_watermark = aux_watermark;
+ }
+
intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
"tsc", &tsc_bit);
diff --git a/tools/perf/arch/x86/util/iostat.c b/tools/perf/arch/x86/util/iostat.c
new file mode 100644
index 000000000000..d63acb782b63
--- /dev/null
+++ b/tools/perf/arch/x86/util/iostat.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * perf iostat
+ *
+ * Copyright (C) 2020, Intel Corporation
+ *
+ * Authors: Alexander Antonov <alexander.antonov@linux.intel.com>
+ */
+
+#include <api/fs/fs.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <limits.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <dirent.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <regex.h>
+#include "util/cpumap.h"
+#include "util/debug.h"
+#include "util/iostat.h"
+#include "util/counts.h"
+#include "path.h"
+
+#ifndef MAX_PATH
+#define MAX_PATH 1024
+#endif
+
+#define UNCORE_IIO_PMU_PATH "devices/uncore_iio_%d"
+#define SYSFS_UNCORE_PMU_PATH "%s/"UNCORE_IIO_PMU_PATH
+#define PLATFORM_MAPPING_PATH UNCORE_IIO_PMU_PATH"/die%d"
+
+/*
+ * Each metric requiries one IIO event which increments at every 4B transfer
+ * in corresponding direction. The formulas to compute metrics are generic:
+ * #EventCount * 4B / (1024 * 1024)
+ */
+static const char * const iostat_metrics[] = {
+ "Inbound Read(MB)",
+ "Inbound Write(MB)",
+ "Outbound Read(MB)",
+ "Outbound Write(MB)",
+};
+
+static inline int iostat_metrics_count(void)
+{
+ return sizeof(iostat_metrics) / sizeof(char *);
+}
+
+static const char *iostat_metric_by_idx(int idx)
+{
+ return *(iostat_metrics + idx % iostat_metrics_count());
+}
+
+struct iio_root_port {
+ u32 domain;
+ u8 bus;
+ u8 die;
+ u8 pmu_idx;
+ int idx;
+};
+
+struct iio_root_ports_list {
+ struct iio_root_port **rps;
+ int nr_entries;
+};
+
+static struct iio_root_ports_list *root_ports;
+
+static void iio_root_port_show(FILE *output,
+ const struct iio_root_port * const rp)
+{
+ if (output && rp)
+ fprintf(output, "S%d-uncore_iio_%d<%04x:%02x>\n",
+ rp->die, rp->pmu_idx, rp->domain, rp->bus);
+}
+
+static struct iio_root_port *iio_root_port_new(u32 domain, u8 bus,
+ u8 die, u8 pmu_idx)
+{
+ struct iio_root_port *p = calloc(1, sizeof(*p));
+
+ if (p) {
+ p->domain = domain;
+ p->bus = bus;
+ p->die = die;
+ p->pmu_idx = pmu_idx;
+ }
+ return p;
+}
+
+static void iio_root_ports_list_free(struct iio_root_ports_list *list)
+{
+ int idx;
+
+ if (list) {
+ for (idx = 0; idx < list->nr_entries; idx++)
+ free(list->rps[idx]);
+ free(list->rps);
+ free(list);
+ }
+}
+
+static struct iio_root_port *iio_root_port_find_by_notation(
+ const struct iio_root_ports_list * const list, u32 domain, u8 bus)
+{
+ int idx;
+ struct iio_root_port *rp;
+
+ if (list) {
+ for (idx = 0; idx < list->nr_entries; idx++) {
+ rp = list->rps[idx];
+ if (rp && rp->domain == domain && rp->bus == bus)
+ return rp;
+ }
+ }
+ return NULL;
+}
+
+static int iio_root_ports_list_insert(struct iio_root_ports_list *list,
+ struct iio_root_port * const rp)
+{
+ struct iio_root_port **tmp_buf;
+
+ if (list && rp) {
+ rp->idx = list->nr_entries++;
+ tmp_buf = realloc(list->rps,
+ list->nr_entries * sizeof(*list->rps));
+ if (!tmp_buf) {
+ pr_err("Failed to realloc memory\n");
+ return -ENOMEM;
+ }
+ tmp_buf[rp->idx] = rp;
+ list->rps = tmp_buf;
+ }
+ return 0;
+}
+
+static int iio_mapping(u8 pmu_idx, struct iio_root_ports_list * const list)
+{
+ char *buf;
+ char path[MAX_PATH];
+ u32 domain;
+ u8 bus;
+ struct iio_root_port *rp;
+ size_t size;
+ int ret;
+
+ for (int die = 0; die < cpu__max_node(); die++) {
+ scnprintf(path, MAX_PATH, PLATFORM_MAPPING_PATH, pmu_idx, die);
+ if (sysfs__read_str(path, &buf, &size) < 0) {
+ if (pmu_idx)
+ goto out;
+ pr_err("Mode iostat is not supported\n");
+ return -1;
+ }
+ ret = sscanf(buf, "%04x:%02hhx", &domain, &bus);
+ free(buf);
+ if (ret != 2) {
+ pr_err("Invalid mapping data: iio_%d; die%d\n",
+ pmu_idx, die);
+ return -1;
+ }
+ rp = iio_root_port_new(domain, bus, die, pmu_idx);
+ if (!rp || iio_root_ports_list_insert(list, rp)) {
+ free(rp);
+ return -ENOMEM;
+ }
+ }
+out:
+ return 0;
+}
+
+static u8 iio_pmu_count(void)
+{
+ u8 pmu_idx = 0;
+ char path[MAX_PATH];
+ const char *sysfs = sysfs__mountpoint();
+
+ if (sysfs) {
+ for (;; pmu_idx++) {
+ snprintf(path, sizeof(path), SYSFS_UNCORE_PMU_PATH,
+ sysfs, pmu_idx);
+ if (access(path, F_OK) != 0)
+ break;
+ }
+ }
+ return pmu_idx;
+}
+
+static int iio_root_ports_scan(struct iio_root_ports_list **list)
+{
+ int ret = -ENOMEM;
+ struct iio_root_ports_list *tmp_list;
+ u8 pmu_count = iio_pmu_count();
+
+ if (!pmu_count) {
+ pr_err("Unsupported uncore pmu configuration\n");
+ return -1;
+ }
+
+ tmp_list = calloc(1, sizeof(*tmp_list));
+ if (!tmp_list)
+ goto err;
+
+ for (u8 pmu_idx = 0; pmu_idx < pmu_count; pmu_idx++) {
+ ret = iio_mapping(pmu_idx, tmp_list);
+ if (ret)
+ break;
+ }
+err:
+ if (!ret)
+ *list = tmp_list;
+ else
+ iio_root_ports_list_free(tmp_list);
+
+ return ret;
+}
+
+static int iio_root_port_parse_str(u32 *domain, u8 *bus, char *str)
+{
+ int ret;
+ regex_t regex;
+ /*
+ * Expected format domain:bus:
+ * Valid domain range [0:ffff]
+ * Valid bus range [0:ff]
+ * Example: 0000:af, 0:3d, 01:7
+ */
+ regcomp(&regex, "^([a-f0-9A-F]{1,}):([a-f0-9A-F]{1,2})", REG_EXTENDED);
+ ret = regexec(&regex, str, 0, NULL, 0);
+ if (ret || sscanf(str, "%08x:%02hhx", domain, bus) != 2)
+ pr_warning("Unrecognized root port format: %s\n"
+ "Please use the following format:\n"
+ "\t [domain]:[bus]\n"
+ "\t for example: 0000:3d\n", str);
+
+ regfree(&regex);
+ return ret;
+}
+
+static int iio_root_ports_list_filter(struct iio_root_ports_list **list,
+ const char *filter)
+{
+ char *tok, *tmp, *filter_copy = NULL;
+ struct iio_root_port *rp;
+ u32 domain;
+ u8 bus;
+ int ret = -ENOMEM;
+ struct iio_root_ports_list *tmp_list = calloc(1, sizeof(*tmp_list));
+
+ if (!tmp_list)
+ goto err;
+
+ filter_copy = strdup(filter);
+ if (!filter_copy)
+ goto err;
+
+ for (tok = strtok_r(filter_copy, ",", &tmp); tok;
+ tok = strtok_r(NULL, ",", &tmp)) {
+ if (!iio_root_port_parse_str(&domain, &bus, tok)) {
+ rp = iio_root_port_find_by_notation(*list, domain, bus);
+ if (rp) {
+ (*list)->rps[rp->idx] = NULL;
+ ret = iio_root_ports_list_insert(tmp_list, rp);
+ if (ret) {
+ free(rp);
+ goto err;
+ }
+ } else if (!iio_root_port_find_by_notation(tmp_list,
+ domain, bus))
+ pr_warning("Root port %04x:%02x were not found\n",
+ domain, bus);
+ }
+ }
+
+ if (tmp_list->nr_entries == 0) {
+ pr_err("Requested root ports were not found\n");
+ ret = -EINVAL;
+ }
+err:
+ iio_root_ports_list_free(*list);
+ if (ret)
+ iio_root_ports_list_free(tmp_list);
+ else
+ *list = tmp_list;
+
+ free(filter_copy);
+ return ret;
+}
+
+static int iostat_event_group(struct evlist *evl,
+ struct iio_root_ports_list *list)
+{
+ int ret;
+ int idx;
+ const char *iostat_cmd_template =
+ "{uncore_iio_%x/event=0x83,umask=0x04,ch_mask=0xF,fc_mask=0x07/,\
+ uncore_iio_%x/event=0x83,umask=0x01,ch_mask=0xF,fc_mask=0x07/,\
+ uncore_iio_%x/event=0xc0,umask=0x04,ch_mask=0xF,fc_mask=0x07/,\
+ uncore_iio_%x/event=0xc0,umask=0x01,ch_mask=0xF,fc_mask=0x07/}";
+ const int len_template = strlen(iostat_cmd_template) + 1;
+ struct evsel *evsel = NULL;
+ int metrics_count = iostat_metrics_count();
+ char *iostat_cmd = calloc(len_template, 1);
+
+ if (!iostat_cmd)
+ return -ENOMEM;
+
+ for (idx = 0; idx < list->nr_entries; idx++) {
+ sprintf(iostat_cmd, iostat_cmd_template,
+ list->rps[idx]->pmu_idx, list->rps[idx]->pmu_idx,
+ list->rps[idx]->pmu_idx, list->rps[idx]->pmu_idx);
+ ret = parse_events(evl, iostat_cmd, NULL);
+ if (ret)
+ goto err;
+ }
+
+ evlist__for_each_entry(evl, evsel) {
+ evsel->priv = list->rps[evsel->idx / metrics_count];
+ }
+ list->nr_entries = 0;
+err:
+ iio_root_ports_list_free(list);
+ free(iostat_cmd);
+ return ret;
+}
+
+int iostat_prepare(struct evlist *evlist, struct perf_stat_config *config)
+{
+ if (evlist->core.nr_entries > 0) {
+ pr_warning("The -e and -M options are not supported."
+ "All chosen events/metrics will be dropped\n");
+ evlist__delete(evlist);
+ evlist = evlist__new();
+ if (!evlist)
+ return -ENOMEM;
+ }
+
+ config->metric_only = true;
+ config->aggr_mode = AGGR_GLOBAL;
+
+ return iostat_event_group(evlist, root_ports);
+}
+
+int iostat_parse(const struct option *opt, const char *str,
+ int unset __maybe_unused)
+{
+ int ret;
+ struct perf_stat_config *config = (struct perf_stat_config *)opt->data;
+
+ ret = iio_root_ports_scan(&root_ports);
+ if (!ret) {
+ config->iostat_run = true;
+ if (!str)
+ iostat_mode = IOSTAT_RUN;
+ else if (!strcmp(str, "list"))
+ iostat_mode = IOSTAT_LIST;
+ else {
+ iostat_mode = IOSTAT_RUN;
+ ret = iio_root_ports_list_filter(&root_ports, str);
+ }
+ }
+ return ret;
+}
+
+void iostat_list(struct evlist *evlist, struct perf_stat_config *config)
+{
+ struct evsel *evsel;
+ struct iio_root_port *rp = NULL;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (rp != evsel->priv) {
+ rp = evsel->priv;
+ iio_root_port_show(config->output, rp);
+ }
+ }
+}
+
+void iostat_release(struct evlist *evlist)
+{
+ struct evsel *evsel;
+ struct iio_root_port *rp = NULL;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (rp != evsel->priv) {
+ rp = evsel->priv;
+ free(evsel->priv);
+ }
+ }
+}
+
+void iostat_prefix(struct evlist *evlist,
+ struct perf_stat_config *config,
+ char *prefix, struct timespec *ts)
+{
+ struct iio_root_port *rp = evlist->selected->priv;
+
+ if (rp) {
+ if (ts)
+ sprintf(prefix, "%6lu.%09lu%s%04x:%02x%s",
+ ts->tv_sec, ts->tv_nsec,
+ config->csv_sep, rp->domain, rp->bus,
+ config->csv_sep);
+ else
+ sprintf(prefix, "%04x:%02x%s", rp->domain, rp->bus,
+ config->csv_sep);
+ }
+}
+
+void iostat_print_header_prefix(struct perf_stat_config *config)
+{
+ if (config->csv_output)
+ fputs("port,", config->output);
+ else if (config->interval)
+ fprintf(config->output, "# time port ");
+ else
+ fprintf(config->output, " port ");
+}
+
+void iostat_print_metric(struct perf_stat_config *config, struct evsel *evsel,
+ struct perf_stat_output_ctx *out)
+{
+ double iostat_value = 0;
+ u64 prev_count_val = 0;
+ const char *iostat_metric = iostat_metric_by_idx(evsel->idx);
+ u8 die = ((struct iio_root_port *)evsel->priv)->die;
+ struct perf_counts_values *count = perf_counts(evsel->counts, die, 0);
+
+ if (count->run && count->ena) {
+ if (evsel->prev_raw_counts && !out->force_header) {
+ struct perf_counts_values *prev_count =
+ perf_counts(evsel->prev_raw_counts, die, 0);
+
+ prev_count_val = prev_count->val;
+ prev_count->val = count->val;
+ }
+ iostat_value = (count->val - prev_count_val) /
+ ((double) count->run / count->ena);
+ }
+ out->print_metric(config, out->ctx, NULL, "%8.0f", iostat_metric,
+ iostat_value / (256 * 1024));
+}
+
+void iostat_print_counters(struct evlist *evlist,
+ struct perf_stat_config *config, struct timespec *ts,
+ char *prefix, iostat_print_counter_t print_cnt_cb)
+{
+ void *perf_device = NULL;
+ struct evsel *counter = evlist__first(evlist);
+
+ evlist__set_selected(evlist, counter);
+ iostat_prefix(evlist, config, prefix, ts);
+ fprintf(config->output, "%s", prefix);
+ evlist__for_each_entry(evlist, counter) {
+ perf_device = evlist->selected->priv;
+ if (perf_device && perf_device != counter->priv) {
+ evlist__set_selected(evlist, counter);
+ iostat_prefix(evlist, config, prefix, ts);
+ fprintf(config->output, "\n%s", prefix);
+ }
+ print_cnt_cb(config, counter, prefix);
+ }
+ fputc('\n', config->output);
+}
diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c
index fca81b39b09f..207c56805c55 100644
--- a/tools/perf/arch/x86/util/perf_regs.c
+++ b/tools/perf/arch/x86/util/perf_regs.c
@@ -165,7 +165,7 @@ static int sdt_init_op_regex(void)
/*
* Max x86 register name length is 5(ex: %r15d). So, 6th char
* should always contain NULL. This helps to find register name
- * length using strlen, insted of maintaing one more variable.
+ * length using strlen, instead of maintaining one more variable.
*/
#define SDT_REG_NAME_SIZE 6
@@ -207,7 +207,7 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op)
* and displacement 0 (Both sign and displacement 0 are
* optional so it may be empty). Use one more character
* to hold last NULL so that strlen can be used to find
- * prefix length, instead of maintaing one more variable.
+ * prefix length, instead of maintaining one more variable.
*/
char prefix[3] = {0};
diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c
index 0a0ff1247c83..79d13dbc0a47 100644
--- a/tools/perf/bench/epoll-wait.c
+++ b/tools/perf/bench/epoll-wait.c
@@ -17,7 +17,7 @@
* While the second model, enabled via --multiq option, uses multiple
* queueing (which refers to one epoll instance per worker). For example,
* short lived tcp connections in a high throughput httpd server will
- * ditribute the accept()'ing connections across CPUs. In this case each
+ * distribute the accept()'ing connections across CPUs. In this case each
* worker does a limited amount of processing.
*
* [queue A] ---> [worker]
@@ -198,7 +198,7 @@ static void *workerfn(void *arg)
do {
/*
- * Block undefinitely waiting for the IN event.
+ * Block indefinitely waiting for the IN event.
* In order to stress the epoll_wait(2) syscall,
* call it event per event, instead of a larger
* batch (max)limit.
diff --git a/tools/perf/bench/inject-buildid.c b/tools/perf/bench/inject-buildid.c
index 280227e3ffd7..55d373b75791 100644
--- a/tools/perf/bench/inject-buildid.c
+++ b/tools/perf/bench/inject-buildid.c
@@ -372,7 +372,7 @@ static int inject_build_id(struct bench_data *data, u64 *max_rss)
len += synthesize_flush(data);
}
- /* tihs makes the child to finish */
+ /* this makes the child to finish */
close(data->input_pipe[1]);
wait4(data->pid, &status, 0, &rusage);
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 20b87e29c96f..f2640179ada9 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -42,7 +42,7 @@
#endif
/*
- * Regular printout to the terminal, supressed if -q is specified:
+ * Regular printout to the terminal, suppressed if -q is specified:
*/
#define tprintf(x...) do { if (g && g->p.show_details >= 0) printf(x); } while (0)
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index a23ba6bb99b6..49627a7bed7c 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -239,7 +239,7 @@ static int evsel__add_sample(struct evsel *evsel, struct perf_sample *sample,
}
/*
- * XXX filtered samples can still have branch entires pointing into our
+ * XXX filtered samples can still have branch entries pointing into our
* symbol and are missed.
*/
process_branch_stack(sample->branch_stack, al, sample);
@@ -374,13 +374,6 @@ find_next:
} else {
hist_entry__tty_annotate(he, evsel, ann);
nd = rb_next(nd);
- /*
- * Since we have a hist_entry per IP for the same
- * symbol, free he->ms.sym->src to signal we already
- * processed this symbol.
- */
- zfree(&notes->src->cycles_hist);
- zfree(&notes->src);
}
}
}
@@ -411,8 +404,8 @@ static int __cmd_annotate(struct perf_annotate *ann)
goto out;
if (dump_trace) {
- perf_session__fprintf_nr_events(session, stdout);
- evlist__fprintf_nr_events(session->evlist, stdout);
+ perf_session__fprintf_nr_events(session, stdout, false);
+ evlist__fprintf_nr_events(session->evlist, stdout, false);
goto out;
}
@@ -425,7 +418,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
total_nr_samples = 0;
evlist__for_each_entry(session->evlist, pos) {
struct hists *hists = evsel__hists(pos);
- u32 nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ u32 nr_samples = hists->stats.nr_samples;
if (nr_samples > 0) {
total_nr_samples += nr_samples;
@@ -538,6 +531,10 @@ int cmd_annotate(int argc, const char **argv)
"Strip first N entries of source file path name in programs (with --prefix)"),
OPT_STRING(0, "objdump", &annotate.opts.objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
+ OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
+ "Enable symbol demangling"),
+ OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
+ "Enable kernel symbol demangling"),
OPT_BOOLEAN(0, "group", &symbol_conf.event_group,
"Show event group information together"),
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
@@ -619,14 +616,22 @@ int cmd_annotate(int argc, const char **argv)
setup_browser(true);
- if ((use_browser == 1 || annotate.use_stdio2) && annotate.has_br_stack) {
+ /*
+ * Events of different processes may correspond to the same
+ * symbol, we do not care about the processes in annotate,
+ * set sort order to avoid repeated output.
+ */
+ sort_order = "dso,symbol";
+
+ /*
+ * Set SORT_MODE__BRANCH so that annotate display IPC/Cycle
+ * if branch info is in perf data in TUI mode.
+ */
+ if ((use_browser == 1 || annotate.use_stdio2) && annotate.has_br_stack)
sort__mode = SORT_MODE__BRANCH;
- if (setup_sorting(annotate.session->evlist) < 0)
- usage_with_options(annotate_usage, options);
- } else {
- if (setup_sorting(NULL) < 0)
- usage_with_options(annotate_usage, options);
- }
+
+ if (setup_sorting(NULL) < 0)
+ usage_with_options(annotate_usage, options);
ret = __cmd_annotate(&annotate);
diff --git a/tools/perf/builtin-daemon.c b/tools/perf/builtin-daemon.c
index 7c4a9d424a64..61929f63a047 100644
--- a/tools/perf/builtin-daemon.c
+++ b/tools/perf/builtin-daemon.c
@@ -6,7 +6,6 @@
#include <linux/zalloc.h>
#include <linux/string.h>
#include <linux/limits.h>
-#include <linux/string.h>
#include <string.h>
#include <sys/file.h>
#include <signal.h>
@@ -24,8 +23,6 @@
#include <sys/signalfd.h>
#include <sys/wait.h>
#include <poll.h>
-#include <sys/stat.h>
-#include <time.h>
#include "builtin.h"
#include "perf.h"
#include "debug.h"
diff --git a/tools/perf/builtin-data.c b/tools/perf/builtin-data.c
index 8d23b8d6ee8e..15ca23675ef0 100644
--- a/tools/perf/builtin-data.c
+++ b/tools/perf/builtin-data.c
@@ -7,7 +7,6 @@
#include "debug.h"
#include <subcmd/parse-options.h>
#include "data-convert.h"
-#include "data-convert-bt.h"
typedef int (*data_cmd_fn_t)(int argc, const char **argv);
@@ -55,7 +54,8 @@ static const char * const data_convert_usage[] = {
static int cmd_data_convert(int argc, const char **argv)
{
- const char *to_ctf = NULL;
+ const char *to_json = NULL;
+ const char *to_ctf = NULL;
struct perf_data_convert_opts opts = {
.force = false,
.all = false,
@@ -63,6 +63,7 @@ static int cmd_data_convert(int argc, const char **argv)
const struct option options[] = {
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_STRING('i', "input", &input_name, "file", "input file name"),
+ OPT_STRING(0, "to-json", &to_json, NULL, "Convert to JSON format"),
#ifdef HAVE_LIBBABELTRACE_SUPPORT
OPT_STRING(0, "to-ctf", &to_ctf, NULL, "Convert to CTF format"),
OPT_BOOLEAN(0, "tod", &opts.tod, "Convert time to wall clock time"),
@@ -72,11 +73,6 @@ static int cmd_data_convert(int argc, const char **argv)
OPT_END()
};
-#ifndef HAVE_LIBBABELTRACE_SUPPORT
- pr_err("No conversion support compiled in. perf should be compiled with environment variables LIBBABELTRACE=1 and LIBBABELTRACE_DIR=/path/to/libbabeltrace/\n");
- return -1;
-#endif
-
argc = parse_options(argc, argv, options,
data_convert_usage, 0);
if (argc) {
@@ -84,11 +80,25 @@ static int cmd_data_convert(int argc, const char **argv)
return -1;
}
+ if (to_json && to_ctf) {
+ pr_err("You cannot specify both --to-ctf and --to-json.\n");
+ return -1;
+ }
+ if (!to_json && !to_ctf) {
+ pr_err("You must specify one of --to-ctf or --to-json.\n");
+ return -1;
+ }
+
+ if (to_json)
+ return bt_convert__perf2json(input_name, to_json, &opts);
+
if (to_ctf) {
#ifdef HAVE_LIBBABELTRACE_SUPPORT
return bt_convert__perf2ctf(input_name, to_ctf, &opts);
#else
- pr_err("The libbabeltrace support is not compiled in.\n");
+ pr_err("The libbabeltrace support is not compiled in. perf should be "
+ "compiled with environment variables LIBBABELTRACE=1 and "
+ "LIBBABELTRACE_DIR=/path/to/libbabeltrace/\n");
return -1;
#endif
}
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 878e04b1fab7..f52b3a799e76 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -1796,7 +1796,7 @@ static int ui_init(void)
data__for_each_file(i, d) {
/*
- * Baseline or compute realted columns:
+ * Baseline or compute related columns:
*
* PERF_HPP_DIFF__BASELINE
* PERF_HPP_DIFF__DELTA
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index a2f1e53f37a7..01326e370009 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -49,7 +49,7 @@ struct lock_stat {
/*
* FIXME: evsel__intval() returns u64,
- * so address of lockdep_map should be dealed as 64bit.
+ * so address of lockdep_map should be treated as 64bit.
* Is there more better solution?
*/
void *addr; /* address of lockdep_map, used as ID */
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 35465d1db6dd..3337b5f93336 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -47,6 +47,8 @@
#include "util/util.h"
#include "util/pfm.h"
#include "util/clockid.h"
+#include "util/pmu-hybrid.h"
+#include "util/evlist-hybrid.h"
#include "asm/bug.h"
#include "perf.h"
@@ -1603,6 +1605,32 @@ static void hit_auxtrace_snapshot_trigger(struct record *rec)
}
}
+static void record__uniquify_name(struct record *rec)
+{
+ struct evsel *pos;
+ struct evlist *evlist = rec->evlist;
+ char *new_name;
+ int ret;
+
+ if (!perf_pmu__has_hybrid())
+ return;
+
+ evlist__for_each_entry(evlist, pos) {
+ if (!evsel__is_hybrid(pos))
+ continue;
+
+ if (strchr(pos->name, '/'))
+ continue;
+
+ ret = asprintf(&new_name, "%s/%s/",
+ pos->pmu_name, pos->name);
+ if (ret) {
+ free(pos->name);
+ pos->name = new_name;
+ }
+ }
+}
+
static int __cmd_record(struct record *rec, int argc, const char **argv)
{
int err;
@@ -1707,6 +1735,8 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
if (data->is_pipe && rec->evlist->core.nr_entries == 1)
rec->opts.sample_id = true;
+ record__uniquify_name(rec);
+
if (record__open(rec) != 0) {
err = -1;
goto out_child;
@@ -1977,9 +2007,13 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
record__auxtrace_snapshot_exit(rec);
if (forks && workload_exec_errno) {
- char msg[STRERR_BUFSIZE];
+ char msg[STRERR_BUFSIZE], strevsels[2048];
const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
- pr_err("Workload failed: %s\n", emsg);
+
+ evlist__scnprintf_evsels(rec->evlist, sizeof(strevsels), strevsels);
+
+ pr_err("Failed to collect '%s' for the '%s' workload: %s\n",
+ strevsels, argv[0], emsg);
err = -1;
goto out_child;
}
@@ -2786,10 +2820,19 @@ int cmd_record(int argc, const char **argv)
if (record.opts.overwrite)
record.opts.tail_synthesize = true;
- if (rec->evlist->core.nr_entries == 0 &&
- __evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
- pr_err("Not enough memory for event selector list\n");
- goto out;
+ if (rec->evlist->core.nr_entries == 0) {
+ if (perf_pmu__has_hybrid()) {
+ err = evlist__add_default_hybrid(rec->evlist,
+ !record.opts.no_samples);
+ } else {
+ err = __evlist__add_default(rec->evlist,
+ !record.opts.no_samples);
+ }
+
+ if (err < 0) {
+ pr_err("Not enough memory for event selector list\n");
+ goto out;
+ }
}
if (rec->opts.target.tid && !rec->opts.no_inherit_set)
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 2a845d6cac09..36f9ccfeb38a 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -84,6 +84,8 @@ struct report {
bool nonany_branch_mode;
bool group_set;
bool stitch_lbr;
+ bool disable_order;
+ bool skip_empty;
int max_stack;
struct perf_read_values show_threads_values;
struct annotation_options annotation_opts;
@@ -134,6 +136,11 @@ static int report__config(const char *var, const char *value, void *cb)
return 0;
}
+ if (!strcmp(var, "report.skip-empty")) {
+ rep->skip_empty = perf_config_bool(var, value);
+ return 0;
+ }
+
return 0;
}
@@ -435,7 +442,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
{
size_t ret;
char unit;
- unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ unsigned long nr_samples = hists->stats.nr_samples;
u64 nr_events = hists->stats.total_period;
struct evsel *evsel = hists_to_evsel(hists);
char buf[512];
@@ -463,7 +470,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
nr_samples += pos_hists->stats.nr_non_filtered_samples;
nr_events += pos_hists->stats.total_non_filtered_period;
} else {
- nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ nr_samples += pos_hists->stats.nr_samples;
nr_events += pos_hists->stats.total_period;
}
}
@@ -529,6 +536,9 @@ static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, c
if (symbol_conf.event_group && !evsel__is_group_leader(pos))
continue;
+ if (rep->skip_empty && !hists->stats.nr_samples)
+ continue;
+
hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
if (rep->total_cycles_mode) {
@@ -707,9 +717,22 @@ static void report__output_resort(struct report *rep)
ui_progress__finish();
}
+static int count_sample_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct evsel *evsel,
+ struct machine *machine __maybe_unused)
+{
+ struct hists *hists = evsel__hists(evsel);
+
+ hists__inc_nr_events(hists);
+ return 0;
+}
+
static void stats_setup(struct report *rep)
{
memset(&rep->tool, 0, sizeof(rep->tool));
+ rep->tool.sample = count_sample_event;
rep->tool.no_warn = true;
}
@@ -717,7 +740,8 @@ static int stats_print(struct report *rep)
{
struct perf_session *session = rep->session;
- perf_session__fprintf_nr_events(session, stdout);
+ perf_session__fprintf_nr_events(session, stdout, rep->skip_empty);
+ evlist__fprintf_nr_events(session->evlist, stdout, rep->skip_empty);
return 0;
}
@@ -929,8 +953,10 @@ static int __cmd_report(struct report *rep)
perf_session__fprintf_dsos(session, stdout);
if (dump_trace) {
- perf_session__fprintf_nr_events(session, stdout);
- evlist__fprintf_nr_events(session->evlist, stdout);
+ perf_session__fprintf_nr_events(session, stdout,
+ rep->skip_empty);
+ evlist__fprintf_nr_events(session->evlist, stdout,
+ rep->skip_empty);
return 0;
}
}
@@ -1139,6 +1165,7 @@ int cmd_report(int argc, const char **argv)
.pretty_printing_style = "normal",
.socket_filter = -1,
.annotation_opts = annotation__default_options,
+ .skip_empty = true,
};
const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
@@ -1296,6 +1323,10 @@ int cmd_report(int argc, const char **argv)
OPTS_EVSWITCH(&report.evswitch),
OPT_BOOLEAN(0, "total-cycles", &report.total_cycles_mode,
"Sort all blocks by 'Sampled Cycles%'"),
+ OPT_BOOLEAN(0, "disable-order", &report.disable_order,
+ "Disable raw trace ordering"),
+ OPT_BOOLEAN(0, "skip-empty", &report.skip_empty,
+ "Do not display empty (or dummy) events in the output"),
OPT_END()
};
struct perf_data data = {
@@ -1329,7 +1360,7 @@ int cmd_report(int argc, const char **argv)
if (report.mmaps_mode)
report.tasks_mode = true;
- if (dump_trace)
+ if (dump_trace && report.disable_order)
report.tool.ordered_events = false;
if (quiet)
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 69c769b04a61..954ce2f594e9 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1712,7 +1712,7 @@ static int perf_sched__process_fork_event(struct perf_tool *tool,
{
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
- /* run the fork event through the perf machineruy */
+ /* run the fork event through the perf machinery */
perf_event__process_fork(tool, event, sample, machine);
/* and then run additional processing needed for this command */
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 5915f19cee55..1280cbfad4db 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -314,8 +314,7 @@ static inline struct evsel_script *evsel_script(struct evsel *evsel)
return (struct evsel_script *)evsel->priv;
}
-static struct evsel_script *perf_evsel_script__new(struct evsel *evsel,
- struct perf_data *data)
+static struct evsel_script *evsel_script__new(struct evsel *evsel, struct perf_data *data)
{
struct evsel_script *es = zalloc(sizeof(*es));
@@ -335,7 +334,7 @@ out_free:
return NULL;
}
-static void perf_evsel_script__delete(struct evsel_script *es)
+static void evsel_script__delete(struct evsel_script *es)
{
zfree(&es->filename);
fclose(es->fp);
@@ -343,7 +342,7 @@ static void perf_evsel_script__delete(struct evsel_script *es)
free(es);
}
-static int perf_evsel_script__fprintf(struct evsel_script *es, FILE *fp)
+static int evsel_script__fprintf(struct evsel_script *es, FILE *fp)
{
struct stat st;
@@ -2219,8 +2218,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
if (!evsel->priv) {
if (scr->per_event_dump) {
- evsel->priv = perf_evsel_script__new(evsel,
- scr->session->data);
+ evsel->priv = evsel_script__new(evsel, scr->session->data);
} else {
es = zalloc(sizeof(*es));
if (!es)
@@ -2475,7 +2473,7 @@ static void perf_script__fclose_per_event_dump(struct perf_script *script)
evlist__for_each_entry(evlist, evsel) {
if (!evsel->priv)
break;
- perf_evsel_script__delete(evsel->priv);
+ evsel_script__delete(evsel->priv);
evsel->priv = NULL;
}
}
@@ -2488,14 +2486,14 @@ static int perf_script__fopen_per_event_dump(struct perf_script *script)
/*
* Already setup? I.e. we may be called twice in cases like
* Intel PT, one for the intel_pt// and dummy events, then
- * for the evsels syntheized from the auxtrace info.
+ * for the evsels synthesized from the auxtrace info.
*
* Ses perf_script__process_auxtrace_info.
*/
if (evsel->priv != NULL)
continue;
- evsel->priv = perf_evsel_script__new(evsel, script->session->data);
+ evsel->priv = evsel_script__new(evsel, script->session->data);
if (evsel->priv == NULL)
goto out_err_fclose;
}
@@ -2530,8 +2528,8 @@ static void perf_script__exit_per_event_dump_stats(struct perf_script *script)
evlist__for_each_entry(script->session->evlist, evsel) {
struct evsel_script *es = evsel->priv;
- perf_evsel_script__fprintf(es, stdout);
- perf_evsel_script__delete(es);
+ evsel_script__fprintf(es, stdout);
+ evsel_script__delete(es);
evsel->priv = NULL;
}
}
@@ -3085,7 +3083,7 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
*
* Fixme: All existing "xxx-record" are all in good formats "-e event ",
* which is covered well now. And new parsing code should be added to
- * cover the future complexing formats like event groups etc.
+ * cover the future complex formats like event groups etc.
*/
static int check_ev_match(char *dir_name, char *scriptname,
struct perf_session *session)
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 2e2e4a8345ea..5a830ae09418 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -48,6 +48,7 @@
#include "util/pmu.h"
#include "util/event.h"
#include "util/evlist.h"
+#include "util/evlist-hybrid.h"
#include "util/evsel.h"
#include "util/debug.h"
#include "util/color.h"
@@ -68,6 +69,8 @@
#include "util/affinity.h"
#include "util/pfm.h"
#include "util/bpf_counter.h"
+#include "util/iostat.h"
+#include "util/pmu-hybrid.h"
#include "asm/bug.h"
#include <linux/time64.h>
@@ -160,6 +163,7 @@ static const char *smi_cost_attrs = {
};
static struct evlist *evsel_list;
+static bool all_counters_use_bpf = true;
static struct target target = {
.uid = UINT_MAX,
@@ -212,7 +216,8 @@ static struct perf_stat_config stat_config = {
.walltime_nsecs_stats = &walltime_nsecs_stats,
.big_num = true,
.ctl_fd = -1,
- .ctl_fd_ack = -1
+ .ctl_fd_ack = -1,
+ .iostat_run = false,
};
static bool cpus_map_matched(struct evsel *a, struct evsel *b)
@@ -239,6 +244,9 @@ static void evlist__check_cpu_maps(struct evlist *evlist)
struct evsel *evsel, *pos, *leader;
char buf[1024];
+ if (evlist__has_hybrid(evlist))
+ evlist__warn_hybrid_group(evlist);
+
evlist__for_each_entry(evlist, evsel) {
leader = evsel->leader;
@@ -399,6 +407,9 @@ static int read_affinity_counters(struct timespec *rs)
struct affinity affinity;
int i, ncpus, cpu;
+ if (all_counters_use_bpf)
+ return 0;
+
if (affinity__setup(&affinity) < 0)
return -1;
@@ -413,6 +424,8 @@ static int read_affinity_counters(struct timespec *rs)
evlist__for_each_entry(evsel_list, counter) {
if (evsel__cpu_iter_skip(counter, cpu))
continue;
+ if (evsel__is_bpf(counter))
+ continue;
if (!counter->err) {
counter->err = read_counter_cpu(counter, rs,
counter->cpu_iter - 1);
@@ -429,6 +442,9 @@ static int read_bpf_map_counters(void)
int err;
evlist__for_each_entry(evsel_list, counter) {
+ if (!evsel__is_bpf(counter))
+ continue;
+
err = bpf_counter__read(counter);
if (err)
return err;
@@ -439,14 +455,10 @@ static int read_bpf_map_counters(void)
static void read_counters(struct timespec *rs)
{
struct evsel *counter;
- int err;
if (!stat_config.stop_read_counter) {
- if (target__has_bpf(&target))
- err = read_bpf_map_counters();
- else
- err = read_affinity_counters(rs);
- if (err < 0)
+ if (read_bpf_map_counters() ||
+ read_affinity_counters(rs))
return;
}
@@ -535,12 +547,13 @@ static int enable_counters(void)
struct evsel *evsel;
int err;
- if (target__has_bpf(&target)) {
- evlist__for_each_entry(evsel_list, evsel) {
- err = bpf_counter__enable(evsel);
- if (err)
- return err;
- }
+ evlist__for_each_entry(evsel_list, evsel) {
+ if (!evsel__is_bpf(evsel))
+ continue;
+
+ err = bpf_counter__enable(evsel);
+ if (err)
+ return err;
}
if (stat_config.initial_delay < 0) {
@@ -784,14 +797,20 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
if (affinity__setup(&affinity) < 0)
return -1;
- if (target__has_bpf(&target)) {
- evlist__for_each_entry(evsel_list, counter) {
- if (bpf_counter__load(counter, &target))
- return -1;
- }
+ evlist__for_each_entry(evsel_list, counter) {
+ if (bpf_counter__load(counter, &target))
+ return -1;
+ if (!evsel__is_bpf(counter))
+ all_counters_use_bpf = false;
}
evlist__for_each_cpu (evsel_list, i, cpu) {
+ /*
+ * bperf calls evsel__open_per_cpu() in bperf__load(), so
+ * no need to call it again here.
+ */
+ if (target.use_bpf)
+ break;
affinity__set(&affinity, cpu);
evlist__for_each_entry(evsel_list, counter) {
@@ -799,6 +818,8 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
continue;
if (counter->reset_group || counter->errored)
continue;
+ if (evsel__is_bpf(counter))
+ continue;
try_again:
if (create_perf_stat_counter(counter, &stat_config, &target,
counter->cpu_iter - 1) < 0) {
@@ -925,15 +946,15 @@ try_again_reset:
/*
* Enable counters and exec the command:
*/
- t0 = rdclock();
- clock_gettime(CLOCK_MONOTONIC, &ref_time);
-
if (forks) {
evlist__start_workload(evsel_list);
err = enable_counters();
if (err)
return -1;
+ t0 = rdclock();
+ clock_gettime(CLOCK_MONOTONIC, &ref_time);
+
if (interval || timeout || evlist__ctlfd_initialized(evsel_list))
status = dispatch_events(forks, timeout, interval, &times);
if (child_pid != -1) {
@@ -954,6 +975,10 @@ try_again_reset:
err = enable_counters();
if (err)
return -1;
+
+ t0 = rdclock();
+ clock_gettime(CLOCK_MONOTONIC, &ref_time);
+
status = dispatch_events(forks, timeout, interval, &times);
}
@@ -1083,6 +1108,11 @@ void perf_stat__set_big_num(int set)
stat_config.big_num = (set != 0);
}
+void perf_stat__set_no_csv_summary(int set)
+{
+ stat_config.no_csv_summary = (set != 0);
+}
+
static int stat__set_big_num(const struct option *opt __maybe_unused,
const char *s __maybe_unused, int unset)
{
@@ -1146,6 +1176,10 @@ static struct option stat_options[] = {
#ifdef HAVE_BPF_SKEL
OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id",
"stat events on existing bpf program id"),
+ OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf,
+ "use bpf program to count events"),
+ OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path",
+ "path to perf_event_attr map"),
#endif
OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
"system-wide collection from all CPUs"),
@@ -1235,6 +1269,8 @@ static struct option stat_options[] = {
"threads of same physical core"),
OPT_BOOLEAN(0, "summary", &stat_config.summary,
"print summary for interval mode"),
+ OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary,
+ "don't print 'summary' for CSV summary output"),
OPT_BOOLEAN(0, "quiet", &stat_config.quiet,
"don't print output (useful with record)"),
#ifdef HAVE_LIBPFM
@@ -1247,6 +1283,9 @@ static struct option stat_options[] = {
"\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
"\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
parse_control_option),
+ OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default",
+ "measure I/O performance metrics provided by arch/platform",
+ iostat_parse),
OPT_END()
};
@@ -1605,6 +1644,12 @@ static int add_default_attributes(void)
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
};
+ struct perf_event_attr default_sw_attrs[] = {
+ { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
+ { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
+ { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
+ { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
+};
/*
* Detailed stats (-d), covering the L1 and last level data caches:
@@ -1705,7 +1750,7 @@ static int add_default_attributes(void)
bzero(&errinfo, sizeof(errinfo));
if (transaction_run) {
/* Handle -T as -M transaction. Once platform specific metrics
- * support has been added to the json files, all archictures
+ * support has been added to the json files, all architectures
* will use this approach. To determine transaction support
* on an architecture test for such a metric name.
*/
@@ -1841,6 +1886,28 @@ setup_metrics:
}
if (!evsel_list->core.nr_entries) {
+ if (perf_pmu__has_hybrid()) {
+ const char *hybrid_str = "cycles,instructions,branches,branch-misses";
+
+ if (target__has_cpu(&target))
+ default_sw_attrs[0].config = PERF_COUNT_SW_CPU_CLOCK;
+
+ if (evlist__add_default_attrs(evsel_list,
+ default_sw_attrs) < 0) {
+ return -1;
+ }
+
+ err = parse_events(evsel_list, hybrid_str, &errinfo);
+ if (err) {
+ fprintf(stderr,
+ "Cannot set up hybrid events %s: %d\n",
+ hybrid_str, err);
+ parse_events_print_error(&errinfo, hybrid_str);
+ return -1;
+ }
+ return err;
+ }
+
if (target__has_cpu(&target))
default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
@@ -2320,6 +2387,17 @@ int cmd_stat(int argc, const char **argv)
goto out;
}
+ if (stat_config.iostat_run) {
+ status = iostat_prepare(evsel_list, &stat_config);
+ if (status)
+ goto out;
+ if (iostat_mode == IOSTAT_LIST) {
+ iostat_list(evsel_list, &stat_config);
+ goto out;
+ } else if (verbose)
+ iostat_list(evsel_list, &stat_config);
+ }
+
if (add_default_attributes())
goto out;
@@ -2357,6 +2435,9 @@ int cmd_stat(int argc, const char **argv)
evlist__check_cpu_maps(evsel_list);
+ if (perf_pmu__has_hybrid())
+ stat_config.no_merge = true;
+
/*
* Initialize thread_map with comm names,
* so we could print it out on output.
@@ -2459,7 +2540,7 @@ int cmd_stat(int argc, const char **argv)
/*
* We synthesize the kernel mmap record just so that older tools
* don't emit warnings about not being able to resolve symbols
- * due to /proc/sys/kernel/kptr_restrict settings and instear provide
+ * due to /proc/sys/kernel/kptr_restrict settings and instead provide
* a saner message about no samples being in the perf.data file.
*
* This also serves to suppress a warning about f_header.data.size == 0
@@ -2495,6 +2576,9 @@ int cmd_stat(int argc, const char **argv)
perf_stat__exit_aggr_mode();
evlist__free_stats(evsel_list);
out:
+ if (stat_config.iostat_run)
+ iostat_release(evsel_list);
+
zfree(&stat_config.walltime_run);
if (smi_cost && smi_reset)
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 3673c04d16b6..69cb3635f5ef 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -328,13 +328,13 @@ static void perf_top__print_sym_table(struct perf_top *top)
printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
if (!top->record_opts.overwrite &&
- (hists->stats.nr_lost_warned !=
- hists->stats.nr_events[PERF_RECORD_LOST])) {
- hists->stats.nr_lost_warned =
- hists->stats.nr_events[PERF_RECORD_LOST];
+ (top->evlist->stats.nr_lost_warned !=
+ top->evlist->stats.nr_events[PERF_RECORD_LOST])) {
+ top->evlist->stats.nr_lost_warned =
+ top->evlist->stats.nr_events[PERF_RECORD_LOST];
color_fprintf(stdout, PERF_COLOR_RED,
"WARNING: LOST %d chunks, Check IO/CPU overload",
- hists->stats.nr_lost_warned);
+ top->evlist->stats.nr_lost_warned);
++printed;
}
@@ -852,11 +852,9 @@ static void
perf_top__process_lost(struct perf_top *top, union perf_event *event,
struct evsel *evsel)
{
- struct hists *hists = evsel__hists(evsel);
-
top->lost += event->lost.lost;
top->lost_total += event->lost.lost;
- hists->stats.total_lost += event->lost.lost;
+ evsel->evlist->stats.total_lost += event->lost.lost;
}
static void
@@ -864,11 +862,9 @@ perf_top__process_lost_samples(struct perf_top *top,
union perf_event *event,
struct evsel *evsel)
{
- struct hists *hists = evsel__hists(evsel);
-
top->lost += event->lost_samples.lost;
top->lost_total += event->lost_samples.lost;
- hists->stats.total_lost_samples += event->lost_samples.lost;
+ evsel->evlist->stats.total_lost_samples += event->lost_samples.lost;
}
static u64 last_timestamp;
@@ -1205,7 +1201,7 @@ static int deliver_event(struct ordered_events *qe,
} else if (event->header.type == PERF_RECORD_LOST_SAMPLES) {
perf_top__process_lost_samples(top, event, evsel);
} else if (event->header.type < PERF_RECORD_MAX) {
- hists__inc_nr_events(evsel__hists(evsel), event->header.type);
+ events_stats__inc(&session->evlist->stats, event->header.type);
machine__process_event(machine, event, &sample);
} else
++session->evlist->stats.nr_unknown_events;
@@ -1607,7 +1603,7 @@ int cmd_top(int argc, const char **argv)
if (status) {
/*
* Some arches do not provide a get_cpuid(), so just use pr_debug, otherwise
- * warn the user explicitely.
+ * warn the user explicitly.
*/
eprintf(status == ENOSYS ? 1 : 0, verbose,
"Couldn't read the cpuid for this machine: %s\n",
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index dded93a2bc89..dd8ff287e930 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -75,6 +75,13 @@ include/uapi/asm-generic/mman-common.h
include/uapi/asm-generic/unistd.h
'
+SYNC_CHECK_FILES='
+arch/x86/include/asm/inat.h
+arch/x86/include/asm/insn.h
+arch/x86/lib/inat.c
+arch/x86/lib/insn.c
+'
+
# These copies are under tools/perf/trace/beauty/ as they are not used to in
# building object files only by scripts in tools/perf/trace/beauty/ to generate
# tables that then gets included in .c files for things like id->string syscall
@@ -129,6 +136,10 @@ for i in $FILES; do
check $i -B
done
+for i in $SYNC_CHECK_FILES; do
+ check $i '-I "^.*\/\*.*__ignore_sync_check__.*\*\/.*$"'
+done
+
# diff with extra ignore lines
check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memcpy_\(erms\|orig\))"'
check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memset_\(erms\|orig\))"'
@@ -137,15 +148,12 @@ check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"'
check include/linux/build_bug.h '-I "^#\(ifndef\|endif\)\( \/\/\)* static_assert$"'
check include/linux/ctype.h '-I "isdigit("'
check lib/ctype.c '-I "^EXPORT_SYMBOL" -I "^#include <linux/export.h>" -B'
-check arch/x86/include/asm/inat.h '-I "^#include [\"<]\(asm/\)*inat_types.h[\">]"'
-check arch/x86/include/asm/insn.h '-I "^#include [\"<]\(asm/\)*inat.h[\">]"'
-check arch/x86/lib/inat.c '-I "^#include [\"<]\(../include/\)*asm/insn.h[\">]"'
-check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]" -I "^#include [\"<]\(../include/\)*asm/emulate_prefix.h[\">]"'
# diff non-symmetric files
check_2 tools/perf/arch/x86/entry/syscalls/syscall_64.tbl arch/x86/entry/syscalls/syscall_64.tbl
check_2 tools/perf/arch/powerpc/entry/syscalls/syscall.tbl arch/powerpc/kernel/syscalls/syscall.tbl
check_2 tools/perf/arch/s390/entry/syscalls/syscall.tbl arch/s390/kernel/syscalls/syscall.tbl
+check_2 tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl arch/mips/kernel/syscalls/syscall_n64.tbl
for i in $BEAUTY_FILES; do
beauty_check $i -B
diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt
index 825a12e8d694..4aa034aefa33 100644
--- a/tools/perf/command-list.txt
+++ b/tools/perf/command-list.txt
@@ -14,6 +14,7 @@ perf-config mainporcelain common
perf-evlist mainporcelain common
perf-ftrace mainporcelain common
perf-inject mainporcelain common
+perf-iostat mainporcelain common
perf-kallsyms mainporcelain common
perf-kmem mainporcelain common
perf-kvm mainporcelain common
diff --git a/tools/perf/examples/bpf/augmented_raw_syscalls.c b/tools/perf/examples/bpf/augmented_raw_syscalls.c
index b80437971d80..a262dcd020f4 100644
--- a/tools/perf/examples/bpf/augmented_raw_syscalls.c
+++ b/tools/perf/examples/bpf/augmented_raw_syscalls.c
@@ -262,7 +262,7 @@ int sys_enter(struct syscall_enter_args *args)
/*
* Jump to syscall specific augmenter, even if the default one,
* "!raw_syscalls:unaugmented" that will just return 1 to return the
- * unagmented tracepoint payload.
+ * unaugmented tracepoint payload.
*/
bpf_tail_call(args, &syscalls_sys_enter, augmented_args->args.syscall_nr);
@@ -282,7 +282,7 @@ int sys_exit(struct syscall_exit_args *args)
/*
* Jump to syscall specific return augmenter, even if the default one,
* "!raw_syscalls:unaugmented" that will just return 1 to return the
- * unagmented tracepoint payload.
+ * unaugmented tracepoint payload.
*/
bpf_tail_call(args, &syscalls_sys_exit, exit_args.syscall_nr);
/*
diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
index 88108598d6e9..526dcaf9f079 100644
--- a/tools/perf/jvmti/jvmti_agent.c
+++ b/tools/perf/jvmti/jvmti_agent.c
@@ -390,7 +390,7 @@ jvmti_write_code(void *agent, char const *sym,
rec.p.total_size += size;
/*
- * If JVM is multi-threaded, nultiple concurrent calls to agent
+ * If JVM is multi-threaded, multiple concurrent calls to agent
* may be possible, so protect file writes
*/
flockfile(fp);
@@ -457,7 +457,7 @@ jvmti_write_debug_info(void *agent, uint64_t code,
rec.p.total_size = size;
/*
- * If JVM is multi-threaded, nultiple concurrent calls to agent
+ * If JVM is multi-threaded, multiple concurrent calls to agent
* may be possible, so protect file writes
*/
flockfile(fp);
diff --git a/tools/perf/perf-iostat.sh b/tools/perf/perf-iostat.sh
new file mode 100644
index 000000000000..e562f252d56f
--- /dev/null
+++ b/tools/perf/perf-iostat.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# perf iostat
+# Alexander Antonov <alexander.antonov@linux.intel.com>
+
+if [[ "$1" == "list" ]] || [[ "$1" =~ ([a-f0-9A-F]{1,}):([a-f0-9A-F]{1,2})(,)? ]]; then
+ DELIMITER="="
+else
+ DELIMITER=" "
+fi
+
+perf stat --iostat$DELIMITER$*
diff --git a/tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json b/tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json
index 75376c7cc072..913fb200ea52 100644
--- a/tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json
+++ b/tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json
@@ -210,12 +210,24 @@
"BriefDescription": "Attributable Level 2 data TLB refill"
},
{
+ "PublicDescription": "Attributable Level 2 instruction TLB refill.",
+ "EventCode": "0x2E",
+ "EventName": "L2I_TLB_REFILL",
+ "BriefDescription": "Attributable Level 2 instruction TLB refill."
+ },
+ {
"PublicDescription": "Attributable Level 2 data or unified TLB access",
"EventCode": "0x2F",
"EventName": "L2D_TLB",
"BriefDescription": "Attributable Level 2 data or unified TLB access"
},
{
+ "PublicDescription": "Attributable Level 2 instruction TLB access.",
+ "EventCode": "0x30",
+ "EventName": "L2I_TLB",
+ "BriefDescription": "Attributable Level 2 instruction TLB access."
+ },
+ {
"PublicDescription": "Access to another socket in a multi-socket system",
"EventCode": "0x31",
"EventName": "REMOTE_ACCESS",
@@ -244,5 +256,221 @@
"EventCode": "0x37",
"EventName": "LL_CACHE_MISS_RD",
"BriefDescription": "Last level cache miss, read"
+ },
+ {
+ "PublicDescription": "SIMD Instruction architecturally executed.",
+ "EventCode": "0x8000",
+ "EventName": "SIMD_INST_RETIRED",
+ "BriefDescription": "SIMD Instruction architecturally executed."
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, SVE.",
+ "EventCode": "0x8002",
+ "EventName": "SVE_INST_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, SVE."
+ },
+ {
+ "PublicDescription": "Microarchitectural operation, Operations speculatively executed.",
+ "EventCode": "0x8008",
+ "EventName": "UOP_SPEC",
+ "BriefDescription": "Microarchitectural operation, Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE Math accelerator Operations speculatively executed.",
+ "EventCode": "0x800E",
+ "EventName": "SVE_MATH_SPEC",
+ "BriefDescription": "SVE Math accelerator Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Floating-point Operations speculatively executed.",
+ "EventCode": "0x8010",
+ "EventName": "FP_SPEC",
+ "BriefDescription": "Floating-point Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Floating-point FMA Operations speculatively executed.",
+ "EventCode": "0x8028",
+ "EventName": "FP_FMA_SPEC",
+ "BriefDescription": "Floating-point FMA Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Floating-point reciprocal estimate Operations speculatively executed.",
+ "EventCode": "0x8034",
+ "EventName": "FP_RECPE_SPEC",
+ "BriefDescription": "Floating-point reciprocal estimate Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "floating-point convert Operations speculatively executed.",
+ "EventCode": "0x8038",
+ "EventName": "FP_CVT_SPEC",
+ "BriefDescription": "floating-point convert Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Advanced SIMD and SVE integer Operations speculatively executed.",
+ "EventCode": "0x8043",
+ "EventName": "ASE_SVE_INT_SPEC",
+ "BriefDescription": "Advanced SIMD and SVE integer Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE predicated Operations speculatively executed.",
+ "EventCode": "0x8074",
+ "EventName": "SVE_PRED_SPEC",
+ "BriefDescription": "SVE predicated Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE MOVPRFX Operations speculatively executed.",
+ "EventCode": "0x807C",
+ "EventName": "SVE_MOVPRFX_SPEC",
+ "BriefDescription": "SVE MOVPRFX Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE MOVPRFX unfused Operations speculatively executed.",
+ "EventCode": "0x807F",
+ "EventName": "SVE_MOVPRFX_U_SPEC",
+ "BriefDescription": "SVE MOVPRFX unfused Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Advanced SIMD and SVE load Operations speculatively executed.",
+ "EventCode": "0x8085",
+ "EventName": "ASE_SVE_LD_SPEC",
+ "BriefDescription": "Advanced SIMD and SVE load Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Advanced SIMD and SVE store Operations speculatively executed.",
+ "EventCode": "0x8086",
+ "EventName": "ASE_SVE_ST_SPEC",
+ "BriefDescription": "Advanced SIMD and SVE store Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Prefetch Operations speculatively executed.",
+ "EventCode": "0x8087",
+ "EventName": "PRF_SPEC",
+ "BriefDescription": "Prefetch Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "General-purpose register load Operations speculatively executed.",
+ "EventCode": "0x8089",
+ "EventName": "BASE_LD_REG_SPEC",
+ "BriefDescription": "General-purpose register load Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "General-purpose register store Operations speculatively executed.",
+ "EventCode": "0x808A",
+ "EventName": "BASE_ST_REG_SPEC",
+ "BriefDescription": "General-purpose register store Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE unpredicated load register Operations speculatively executed.",
+ "EventCode": "0x8091",
+ "EventName": "SVE_LDR_REG_SPEC",
+ "BriefDescription": "SVE unpredicated load register Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE unpredicated store register Operations speculatively executed.",
+ "EventCode": "0x8092",
+ "EventName": "SVE_STR_REG_SPEC",
+ "BriefDescription": "SVE unpredicated store register Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE load predicate register Operations speculatively executed.",
+ "EventCode": "0x8095",
+ "EventName": "SVE_LDR_PREG_SPEC",
+ "BriefDescription": "SVE load predicate register Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE store predicate register Operations speculatively executed.",
+ "EventCode": "0x8096",
+ "EventName": "SVE_STR_PREG_SPEC",
+ "BriefDescription": "SVE store predicate register Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE contiguous prefetch element Operations speculatively executed.",
+ "EventCode": "0x809F",
+ "EventName": "SVE_PRF_CONTIG_SPEC",
+ "BriefDescription": "SVE contiguous prefetch element Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Advanced SIMD and SVE contiguous load multiple vector Operations speculatively executed.",
+ "EventCode": "0x80A5",
+ "EventName": "ASE_SVE_LD_MULTI_SPEC",
+ "BriefDescription": "Advanced SIMD and SVE contiguous load multiple vector Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Advanced SIMD and SVE contiguous store multiple vector Operations speculatively executed.",
+ "EventCode": "0x80A6",
+ "EventName": "ASE_SVE_ST_MULTI_SPEC",
+ "BriefDescription": "Advanced SIMD and SVE contiguous store multiple vector Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE gather-load Operations speculatively executed.",
+ "EventCode": "0x80AD",
+ "EventName": "SVE_LD_GATHER_SPEC",
+ "BriefDescription": "SVE gather-load Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE scatter-store Operations speculatively executed.",
+ "EventCode": "0x80AE",
+ "EventName": "SVE_ST_SCATTER_SPEC",
+ "BriefDescription": "SVE scatter-store Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE gather-prefetch Operations speculatively executed.",
+ "EventCode": "0x80AF",
+ "EventName": "SVE_PRF_GATHER_SPEC",
+ "BriefDescription": "SVE gather-prefetch Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE First-fault load Operations speculatively executed.",
+ "EventCode": "0x80BC",
+ "EventName": "SVE_LDFF_SPEC",
+ "BriefDescription": "SVE First-fault load Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Scalable floating-point element Operations speculatively executed.",
+ "EventCode": "0x80C0",
+ "EventName": "FP_SCALE_OPS_SPEC",
+ "BriefDescription": "Scalable floating-point element Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Non-scalable floating-point element Operations speculatively executed.",
+ "EventCode": "0x80C1",
+ "EventName": "FP_FIXED_OPS_SPEC",
+ "BriefDescription": "Non-scalable floating-point element Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Scalable half-precision floating-point element Operations speculatively executed.",
+ "EventCode": "0x80C2",
+ "EventName": "FP_HP_SCALE_OPS_SPEC",
+ "BriefDescription": "Scalable half-precision floating-point element Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Non-scalable half-precision floating-point element Operations speculatively executed.",
+ "EventCode": "0x80C3",
+ "EventName": "FP_HP_FIXED_OPS_SPEC",
+ "BriefDescription": "Non-scalable half-precision floating-point element Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Scalable single-precision floating-point element Operations speculatively executed.",
+ "EventCode": "0x80C4",
+ "EventName": "FP_SP_SCALE_OPS_SPEC",
+ "BriefDescription": "Scalable single-precision floating-point element Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Non-scalable single-precision floating-point element Operations speculatively executed.",
+ "EventCode": "0x80C5",
+ "EventName": "FP_SP_FIXED_OPS_SPEC",
+ "BriefDescription": "Non-scalable single-precision floating-point element Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Scalable double-precision floating-point element Operations speculatively executed.",
+ "EventCode": "0x80C6",
+ "EventName": "FP_DP_SCALE_OPS_SPEC",
+ "BriefDescription": "Scalable double-precision floating-point element Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Non-scalable double-precision floating-point element Operations speculatively executed.",
+ "EventCode": "0x80C7",
+ "EventName": "FP_DP_FIXED_OPS_SPEC",
+ "BriefDescription": "Non-scalable double-precision floating-point element Operations speculatively executed."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/branch.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/branch.json
new file mode 100644
index 000000000000..b011af11bf94
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/branch.json
@@ -0,0 +1,8 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/bus.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/bus.json
new file mode 100644
index 000000000000..084e88d7df73
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/bus.json
@@ -0,0 +1,62 @@
+[
+ {
+ "PublicDescription": "This event counts read transactions from tofu controller to measured CMG.",
+ "EventCode": "0x314",
+ "EventName": "BUS_READ_TOTAL_TOFU",
+ "BriefDescription": "This event counts read transactions from tofu controller to measured CMG."
+ },
+ {
+ "PublicDescription": "This event counts read transactions from PCI controller to measured CMG.",
+ "EventCode": "0x315",
+ "EventName": "BUS_READ_TOTAL_PCI",
+ "BriefDescription": "This event counts read transactions from PCI controller to measured CMG."
+ },
+ {
+ "PublicDescription": "This event counts read transactions from measured CMG local memory to measured CMG.",
+ "EventCode": "0x316",
+ "EventName": "BUS_READ_TOTAL_MEM",
+ "BriefDescription": "This event counts read transactions from measured CMG local memory to measured CMG."
+ },
+ {
+ "PublicDescription": "This event counts write transactions from measured CMG to CMG0, if measured CMG is not CMG0.",
+ "EventCode": "0x318",
+ "EventName": "BUS_WRITE_TOTAL_CMG0",
+ "BriefDescription": "This event counts write transactions from measured CMG to CMG0, if measured CMG is not CMG0."
+ },
+ {
+ "PublicDescription": "This event counts write transactions from measured CMG to CMG1, if measured CMG is not CMG1.",
+ "EventCode": "0x319",
+ "EventName": "BUS_WRITE_TOTAL_CMG1",
+ "BriefDescription": "This event counts write transactions from measured CMG to CMG1, if measured CMG is not CMG1."
+ },
+ {
+ "PublicDescription": "This event counts write transactions from measured CMG to CMG2, if measured CMG is not CMG2.",
+ "EventCode": "0x31A",
+ "EventName": "BUS_WRITE_TOTAL_CMG2",
+ "BriefDescription": "This event counts write transactions from measured CMG to CMG2, if measured CMG is not CMG2."
+ },
+ {
+ "PublicDescription": "This event counts write transactions from measured CMG to CMG3, if measured CMG is not CMG3.",
+ "EventCode": "0x31B",
+ "EventName": "BUS_WRITE_TOTAL_CMG3",
+ "BriefDescription": "This event counts write transactions from measured CMG to CMG3, if measured CMG is not CMG3."
+ },
+ {
+ "PublicDescription": "This event counts write transactions from measured CMG to tofu controller.",
+ "EventCode": "0x31C",
+ "EventName": "BUS_WRITE_TOTAL_TOFU",
+ "BriefDescription": "This event counts write transactions from measured CMG to tofu controller."
+ },
+ {
+ "PublicDescription": "This event counts write transactions from measured CMG to PCI controller.",
+ "EventCode": "0x31D",
+ "EventName": "BUS_WRITE_TOTAL_PCI",
+ "BriefDescription": "This event counts write transactions from measured CMG to PCI controller."
+ },
+ {
+ "PublicDescription": "This event counts write transactions from measured CMG to measured CMG local memory.",
+ "EventCode": "0x31E",
+ "EventName": "BUS_WRITE_TOTAL_MEM",
+ "BriefDescription": "This event counts write transactions from measured CMG to measured CMG local memory."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/cache.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/cache.json
new file mode 100644
index 000000000000..2e341a951a10
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/cache.json
@@ -0,0 +1,128 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "L2I_TLB"
+ },
+ {
+ "PublicDescription": "This event counts L1D_CACHE_REFILL caused by software or hardware prefetch.",
+ "EventCode": "0x49",
+ "EventName": "L1D_CACHE_REFILL_PRF",
+ "BriefDescription": "This event counts L1D_CACHE_REFILL caused by software or hardware prefetch."
+ },
+ {
+ "PublicDescription": "This event counts L2D_CACHE_REFILL caused by software or hardware prefetch.",
+ "EventCode": "0x59",
+ "EventName": "L2D_CACHE_REFILL_PRF",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL caused by software or hardware prefetch."
+ },
+ {
+ "PublicDescription": "This event counts L1D_CACHE_REFILL caused by demand access.",
+ "EventCode": "0x200",
+ "EventName": "L1D_CACHE_REFILL_DM",
+ "BriefDescription": "This event counts L1D_CACHE_REFILL caused by demand access."
+ },
+ {
+ "PublicDescription": "This event counts L1D_CACHE_REFILL caused by hardware prefetch.",
+ "EventCode": "0x202",
+ "EventName": "L1D_CACHE_REFILL_HWPRF",
+ "BriefDescription": "This event counts L1D_CACHE_REFILL caused by hardware prefetch."
+ },
+ {
+ "PublicDescription": "This event counts outstanding L1D cache miss requests per cycle.",
+ "EventCode": "0x208",
+ "EventName": "L1_MISS_WAIT",
+ "BriefDescription": "This event counts outstanding L1D cache miss requests per cycle."
+ },
+ {
+ "PublicDescription": "This event counts outstanding L1I cache miss requests per cycle.",
+ "EventCode": "0x209",
+ "EventName": "L1I_MISS_WAIT",
+ "BriefDescription": "This event counts outstanding L1I cache miss requests per cycle."
+ },
+ {
+ "PublicDescription": "This event counts L2D_CACHE_REFILL caused by demand access.",
+ "EventCode": "0x300",
+ "EventName": "L2D_CACHE_REFILL_DM",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL caused by demand access."
+ },
+ {
+ "PublicDescription": "This event counts L2D_CACHE_REFILL caused by hardware prefetch.",
+ "EventCode": "0x302",
+ "EventName": "L2D_CACHE_REFILL_HWPRF",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL caused by hardware prefetch."
+ },
+ {
+ "PublicDescription": "This event counts outstanding L2 cache miss requests per cycle.",
+ "EventCode": "0x308",
+ "EventName": "L2_MISS_WAIT",
+ "BriefDescription": "This event counts outstanding L2 cache miss requests per cycle."
+ },
+ {
+ "PublicDescription": "This event counts the number of times of L2 cache miss.",
+ "EventCode": "0x309",
+ "EventName": "L2_MISS_COUNT",
+ "BriefDescription": "This event counts the number of times of L2 cache miss."
+ },
+ {
+ "PublicDescription": "This event counts operations where demand access hits an L2 cache refill buffer allocated by software or hardware prefetch.",
+ "EventCode": "0x325",
+ "EventName": "L2D_SWAP_DM",
+ "BriefDescription": "This event counts operations where demand access hits an L2 cache refill buffer allocated by software or hardware prefetch."
+ },
+ {
+ "PublicDescription": "This event counts operations where software or hardware prefetch hits an L2 cache refill buffer allocated by demand access.",
+ "EventCode": "0x326",
+ "EventName": "L2D_CACHE_MIBMCH_PRF",
+ "BriefDescription": "This event counts operations where software or hardware prefetch hits an L2 cache refill buffer allocated by demand access."
+ },
+ {
+ "PublicDescription": "This event counts operations where demand access hits an L2 cache refill buffer allocated by software or hardware prefetch.",
+ "EventCode": "0x396",
+ "EventName": "L2D_CACHE_SWAP_LOCAL",
+ "BriefDescription": "This event counts operations where demand access hits an L2 cache refill buffer allocated by software or hardware prefetch."
+ },
+ {
+ "PublicDescription": "This event counts energy consumption per cycle of L2 cache.",
+ "EventCode": "0x3E0",
+ "EventName": "EA_L2",
+ "BriefDescription": "This event counts energy consumption per cycle of L2 cache."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/cycle.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/cycle.json
new file mode 100644
index 000000000000..b16484628290
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/cycle.json
@@ -0,0 +1,5 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/exception.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/exception.json
new file mode 100644
index 000000000000..348749c154c0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/exception.json
@@ -0,0 +1,29 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ },
+ {
+ "ArchStdEvent": "EXC_SVC"
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_SMC"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/instruction.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/instruction.json
new file mode 100644
index 000000000000..6d258b1080cf
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/instruction.json
@@ -0,0 +1,131 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed zero blocking operations due to the 'DC ZVA' instruction.",
+ "EventCode": "0x9F",
+ "EventName": "DCZVA_SPEC",
+ "BriefDescription": "This event counts architecturally executed zero blocking operations due to the 'DC ZVA' instruction."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed floating-point move operations.",
+ "EventCode": "0x105",
+ "EventName": "FP_MV_SPEC",
+ "BriefDescription": "This event counts architecturally executed floating-point move operations."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed operations that using predicate register.",
+ "EventCode": "0x108",
+ "EventName": "PRD_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that using predicate register."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed inter-element manipulation operations.",
+ "EventCode": "0x109",
+ "EventName": "IEL_SPEC",
+ "BriefDescription": "This event counts architecturally executed inter-element manipulation operations."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed inter-register manipulation operations.",
+ "EventCode": "0x10A",
+ "EventName": "IREG_SPEC",
+ "BriefDescription": "This event counts architecturally executed inter-register manipulation operations."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed NOSIMD load operations that using SIMD&FP registers.",
+ "EventCode": "0x112",
+ "EventName": "FP_LD_SPEC",
+ "BriefDescription": "This event counts architecturally executed NOSIMD load operations that using SIMD&FP registers."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed NOSIMD store operations that using SIMD&FP registers.",
+ "EventCode": "0x113",
+ "EventName": "FP_ST_SPEC",
+ "BriefDescription": "This event counts architecturally executed NOSIMD store operations that using SIMD&FP registers."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed SIMD broadcast floating-point load operations.",
+ "EventCode": "0x11A",
+ "EventName": "BC_LD_SPEC",
+ "BriefDescription": "This event counts architecturally executed SIMD broadcast floating-point load operations."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed instructions, excluding the MOVPRFX instruction.",
+ "EventCode": "0x121",
+ "EventName": "EFFECTIVE_INST_SPEC",
+ "BriefDescription": "This event counts architecturally executed instructions, excluding the MOVPRFX instruction."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed operations that uses 'pre-index' as its addressing mode.",
+ "EventCode": "0x123",
+ "EventName": "PRE_INDEX_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that uses 'pre-index' as its addressing mode."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed operations that uses 'post-index' as its addressing mode.",
+ "EventCode": "0x124",
+ "EventName": "POST_INDEX_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that uses 'post-index' as its addressing mode."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/memory.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/memory.json
new file mode 100644
index 000000000000..c1f6479e92b4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/memory.json
@@ -0,0 +1,8 @@
+[
+ {
+ "PublicDescription": "This event counts energy consumption per cycle of CMG local memory.",
+ "EventCode": "0x3E8",
+ "EventName": "EA_MEMORY",
+ "BriefDescription": "This event counts energy consumption per cycle of CMG local memory."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/other.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/other.json
new file mode 100644
index 000000000000..10c823ac26cc
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/other.json
@@ -0,0 +1,188 @@
+[
+ {
+ "PublicDescription": "This event counts the occurrence count of the micro-operation split.",
+ "EventCode": "0x139",
+ "EventName": "UOP_SPLIT",
+ "BriefDescription": "This event counts the occurrence count of the micro-operation split."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no operation was committed because the oldest and uncommitted load/store/prefetch operation waits for memory access.",
+ "EventCode": "0x180",
+ "EventName": "LD_COMP_WAIT_L2_MISS",
+ "BriefDescription": "This event counts every cycle that no operation was committed because the oldest and uncommitted load/store/prefetch operation waits for memory access."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted integer load operation waits for memory access.",
+ "EventCode": "0x181",
+ "EventName": "LD_COMP_WAIT_L2_MISS_EX",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted integer load operation waits for memory access."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted load/store/prefetch operation waits for L2 cache access.",
+ "EventCode": "0x182",
+ "EventName": "LD_COMP_WAIT_L1_MISS",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted load/store/prefetch operation waits for L2 cache access."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted integer load operation waits for L2 cache access.",
+ "EventCode": "0x183",
+ "EventName": "LD_COMP_WAIT_L1_MISS_EX",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted integer load operation waits for L2 cache access."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted load/store/prefetch operation waits for L1D cache, L2 cache and memory access.",
+ "EventCode": "0x184",
+ "EventName": "LD_COMP_WAIT",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted load/store/prefetch operation waits for L1D cache, L2 cache and memory access."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted integer load operation waits for L1D cache, L2 cache and memory access.",
+ "EventCode": "0x185",
+ "EventName": "LD_COMP_WAIT_EX",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted integer load operation waits for L1D cache, L2 cache and memory access."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed due to the lack of an available prefetch port.",
+ "EventCode": "0x186",
+ "EventName": "LD_COMP_WAIT_PFP_BUSY",
+ "BriefDescription": "This event counts every cycle that no instruction was committed due to the lack of an available prefetch port."
+ },
+ {
+ "PublicDescription": "This event counts the LD_COMP_WAIT_PFP_BUSY caused by an integer load operation.",
+ "EventCode": "0x187",
+ "EventName": "LD_COMP_WAIT_PFP_BUSY_EX",
+ "BriefDescription": "This event counts the LD_COMP_WAIT_PFP_BUSY caused by an integer load operation."
+ },
+ {
+ "PublicDescription": "This event counts the LD_COMP_WAIT_PFP_BUSY caused by a software prefetch instruction.",
+ "EventCode": "0x188",
+ "EventName": "LD_COMP_WAIT_PFP_BUSY_SWPF",
+ "BriefDescription": "This event counts the LD_COMP_WAIT_PFP_BUSY caused by a software prefetch instruction."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed and the oldest and uncommitted instruction is an integer or floating-point/SIMD instruction.",
+ "EventCode": "0x189",
+ "EventName": "EU_COMP_WAIT",
+ "BriefDescription": "This event counts every cycle that no instruction was committed and the oldest and uncommitted instruction is an integer or floating-point/SIMD instruction."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed and the oldest and uncommitted instruction is a floating-point/SIMD instruction.",
+ "EventCode": "0x18A",
+ "EventName": "FL_COMP_WAIT",
+ "BriefDescription": "This event counts every cycle that no instruction was committed and the oldest and uncommitted instruction is a floating-point/SIMD instruction."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed and the oldest and uncommitted instruction is a branch instruction.",
+ "EventCode": "0x18B",
+ "EventName": "BR_COMP_WAIT",
+ "BriefDescription": "This event counts every cycle that no instruction was committed and the oldest and uncommitted instruction is a branch instruction."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed because the CSE is empty.",
+ "EventCode": "0x18C",
+ "EventName": "ROB_EMPTY",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the CSE is empty."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed because the CSE is empty and the store port (SP) is full.",
+ "EventCode": "0x18D",
+ "EventName": "ROB_EMPTY_STQ_BUSY",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the CSE is empty and the store port (SP) is full."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that the instruction unit is halted by the WFE/WFI instruction.",
+ "EventCode": "0x18E",
+ "EventName": "WFE_WFI_CYCLE",
+ "BriefDescription": "This event counts every cycle that the instruction unit is halted by the WFE/WFI instruction."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed, but counts at the time when commits MOVPRFX only.",
+ "EventCode": "0x190",
+ "EventName": "_0INST_COMMIT",
+ "BriefDescription": "This event counts every cycle that no instruction was committed, but counts at the time when commits MOVPRFX only."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that one instruction is committed.",
+ "EventCode": "0x191",
+ "EventName": "_1INST_COMMIT",
+ "BriefDescription": "This event counts every cycle that one instruction is committed."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that two instructions are committed.",
+ "EventCode": "0x192",
+ "EventName": "_2INST_COMMIT",
+ "BriefDescription": "This event counts every cycle that two instructions are committed."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that three instructions are committed.",
+ "EventCode": "0x193",
+ "EventName": "_3INST_COMMIT",
+ "BriefDescription": "This event counts every cycle that three instructions are committed."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that four instructions are committed.",
+ "EventCode": "0x194",
+ "EventName": "_4INST_COMMIT",
+ "BriefDescription": "This event counts every cycle that four instructions are committed."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that only any micro-operations are committed.",
+ "EventCode": "0x198",
+ "EventName": "UOP_ONLY_COMMIT",
+ "BriefDescription": "This event counts every cycle that only any micro-operations are committed."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that only the MOVPRFX instruction is committed.",
+ "EventCode": "0x199",
+ "EventName": "SINGLE_MOVPRFX_COMMIT",
+ "BriefDescription": "This event counts every cycle that only the MOVPRFX instruction is committed."
+ },
+ {
+ "PublicDescription": "This event counts energy consumption per cycle of core.",
+ "EventCode": "0x1E0",
+ "EventName": "EA_CORE",
+ "BriefDescription": "This event counts energy consumption per cycle of core."
+ },
+ {
+ "PublicDescription": "This event counts streaming prefetch requests to L1D cache generated by hardware prefetcher.",
+ "EventCode": "0x230",
+ "EventName": "L1HWPF_STREAM_PF",
+ "BriefDescription": "This event counts streaming prefetch requests to L1D cache generated by hardware prefetcher."
+ },
+ {
+ "PublicDescription": "This event counts allocation type prefetch injection requests to L1D cache generated by hardware prefetcher.",
+ "EventCode": "0x231",
+ "EventName": "L1HWPF_INJ_ALLOC_PF",
+ "BriefDescription": "This event counts allocation type prefetch injection requests to L1D cache generated by hardware prefetcher."
+ },
+ {
+ "PublicDescription": "This event counts non-allocation type prefetch injection requests to L1D cache generated by hardware prefetcher.",
+ "EventCode": "0x232",
+ "EventName": "L1HWPF_INJ_NOALLOC_PF",
+ "BriefDescription": "This event counts non-allocation type prefetch injection requests to L1D cache generated by hardware prefetcher."
+ },
+ {
+ "PublicDescription": "This event counts streaming prefetch requests to L2 cache generated by hardware prefecher.",
+ "EventCode": "0x233",
+ "EventName": "L2HWPF_STREAM_PF",
+ "BriefDescription": "This event counts streaming prefetch requests to L2 cache generated by hardware prefecher."
+ },
+ {
+ "PublicDescription": "This event counts allocation type prefetch injection requests to L2 cache generated by hardware prefetcher.",
+ "EventCode": "0x234",
+ "EventName": "L2HWPF_INJ_ALLOC_PF",
+ "BriefDescription": "This event counts allocation type prefetch injection requests to L2 cache generated by hardware prefetcher."
+ },
+ {
+ "PublicDescription": "This event counts non-allocation type prefetch injection requests to L2 cache generated by hardware prefetcher.",
+ "EventCode": "0x235",
+ "EventName": "L2HWPF_INJ_NOALLOC_PF",
+ "BriefDescription": "This event counts non-allocation type prefetch injection requests to L2 cache generated by hardware prefetcher."
+ },
+ {
+ "PublicDescription": "This event counts prefetch requests to L2 cache generated by the other causes.",
+ "EventCode": "0x236",
+ "EventName": "L2HWPF_OTHER",
+ "BriefDescription": "This event counts prefetch requests to L2 cache generated by the other causes."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/pipeline.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/pipeline.json
new file mode 100644
index 000000000000..dd7c97a9972b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/pipeline.json
@@ -0,0 +1,194 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of EAGA pipeline.",
+ "EventCode": "0x1A0",
+ "EventName": "EAGA_VAL",
+ "BriefDescription": "This event counts valid cycles of EAGA pipeline."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of EAGB pipeline.",
+ "EventCode": "0x1A1",
+ "EventName": "EAGB_VAL",
+ "BriefDescription": "This event counts valid cycles of EAGB pipeline."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of EXA pipeline.",
+ "EventCode": "0x1A2",
+ "EventName": "EXA_VAL",
+ "BriefDescription": "This event counts valid cycles of EXA pipeline."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of EXB pipeline.",
+ "EventCode": "0x1A3",
+ "EventName": "EXB_VAL",
+ "BriefDescription": "This event counts valid cycles of EXB pipeline."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of FLA pipeline.",
+ "EventCode": "0x1A4",
+ "EventName": "FLA_VAL",
+ "BriefDescription": "This event counts valid cycles of FLA pipeline."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of FLB pipeline.",
+ "EventCode": "0x1A5",
+ "EventName": "FLB_VAL",
+ "BriefDescription": "This event counts valid cycles of FLB pipeline."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of PRX pipeline.",
+ "EventCode": "0x1A6",
+ "EventName": "PRX_VAL",
+ "BriefDescription": "This event counts valid cycles of PRX pipeline."
+ },
+ {
+ "PublicDescription": "This event counts the number of 1's in the predicate bits of request in FLA pipeline, where it is corrected so that it becomes 16 when all bits are 1.",
+ "EventCode": "0x1B4",
+ "EventName": "FLA_VAL_PRD_CNT",
+ "BriefDescription": "This event counts the number of 1's in the predicate bits of request in FLA pipeline, where it is corrected so that it becomes 16 when all bits are 1."
+ },
+ {
+ "PublicDescription": "This event counts the number of 1's in the predicate bits of request in FLB pipeline, where it is corrected so that it becomes 16 when all bits are 1.",
+ "EventCode": "0x1B5",
+ "EventName": "FLB_VAL_PRD_CNT",
+ "BriefDescription": "This event counts the number of 1's in the predicate bits of request in FLB pipeline, where it is corrected so that it becomes 16 when all bits are 1."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of L1D cache pipeline#0.",
+ "EventCode": "0x240",
+ "EventName": "L1_PIPE0_VAL",
+ "BriefDescription": "This event counts valid cycles of L1D cache pipeline#0."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of L1D cache pipeline#1.",
+ "EventCode": "0x241",
+ "EventName": "L1_PIPE1_VAL",
+ "BriefDescription": "This event counts valid cycles of L1D cache pipeline#1."
+ },
+ {
+ "PublicDescription": "This event counts requests in L1D cache pipeline#0 that its sce bit of tagged address is 1.",
+ "EventCode": "0x250",
+ "EventName": "L1_PIPE0_VAL_IU_TAG_ADRS_SCE",
+ "BriefDescription": "This event counts requests in L1D cache pipeline#0 that its sce bit of tagged address is 1."
+ },
+ {
+ "PublicDescription": "This event counts requests in L1D cache pipeline#0 that its pfe bit of tagged address is 1.",
+ "EventCode": "0x251",
+ "EventName": "L1_PIPE0_VAL_IU_TAG_ADRS_PFE",
+ "BriefDescription": "This event counts requests in L1D cache pipeline#0 that its pfe bit of tagged address is 1."
+ },
+ {
+ "PublicDescription": "This event counts requests in L1D cache pipeline#1 that its sce bit of tagged address is 1.",
+ "EventCode": "0x252",
+ "EventName": "L1_PIPE1_VAL_IU_TAG_ADRS_SCE",
+ "BriefDescription": "This event counts requests in L1D cache pipeline#1 that its sce bit of tagged address is 1."
+ },
+ {
+ "PublicDescription": "This event counts requests in L1D cache pipeline#1 that its pfe bit of tagged address is 1.",
+ "EventCode": "0x253",
+ "EventName": "L1_PIPE1_VAL_IU_TAG_ADRS_PFE",
+ "BriefDescription": "This event counts requests in L1D cache pipeline#1 that its pfe bit of tagged address is 1."
+ },
+ {
+ "PublicDescription": "This event counts completed requests in L1D cache pipeline#0.",
+ "EventCode": "0x260",
+ "EventName": "L1_PIPE0_COMP",
+ "BriefDescription": "This event counts completed requests in L1D cache pipeline#0."
+ },
+ {
+ "PublicDescription": "This event counts completed requests in L1D cache pipeline#1.",
+ "EventCode": "0x261",
+ "EventName": "L1_PIPE1_COMP",
+ "BriefDescription": "This event counts completed requests in L1D cache pipeline#1."
+ },
+ {
+ "PublicDescription": "This event counts completed requests in L1I cache pipeline.",
+ "EventCode": "0x268",
+ "EventName": "L1I_PIPE_COMP",
+ "BriefDescription": "This event counts completed requests in L1I cache pipeline."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of L1I cache pipeline.",
+ "EventCode": "0x269",
+ "EventName": "L1I_PIPE_VAL",
+ "BriefDescription": "This event counts valid cycles of L1I cache pipeline."
+ },
+ {
+ "PublicDescription": "This event counts aborted requests in L1D pipelines that due to store-load interlock.",
+ "EventCode": "0x274",
+ "EventName": "L1_PIPE_ABORT_STLD_INTLK",
+ "BriefDescription": "This event counts aborted requests in L1D pipelines that due to store-load interlock."
+ },
+ {
+ "PublicDescription": "This event counts requests in L1D cache pipeline#0 that its sector cache ID is not 0.",
+ "EventCode": "0x2A0",
+ "EventName": "L1_PIPE0_VAL_IU_NOT_SEC0",
+ "BriefDescription": "This event counts requests in L1D cache pipeline#0 that its sector cache ID is not 0."
+ },
+ {
+ "PublicDescription": "This event counts requests in L1D cache pipeline#1 that its sector cache ID is not 0.",
+ "EventCode": "0x2A1",
+ "EventName": "L1_PIPE1_VAL_IU_NOT_SEC0",
+ "BriefDescription": "This event counts requests in L1D cache pipeline#1 that its sector cache ID is not 0."
+ },
+ {
+ "PublicDescription": "This event counts the number of times where 2 elements of the gather instructions became 2 flows because 2 elements could not be combined.",
+ "EventCode": "0x2B0",
+ "EventName": "L1_PIPE_COMP_GATHER_2FLOW",
+ "BriefDescription": "This event counts the number of times where 2 elements of the gather instructions became 2 flows because 2 elements could not be combined."
+ },
+ {
+ "PublicDescription": "This event counts the number of times where 2 elements of the gather instructions became 1 flow because 2 elements could be combined.",
+ "EventCode": "0x2B1",
+ "EventName": "L1_PIPE_COMP_GATHER_1FLOW",
+ "BriefDescription": "This event counts the number of times where 2 elements of the gather instructions became 1 flow because 2 elements could be combined."
+ },
+ {
+ "PublicDescription": "This event counts the number of times where 2 elements of the gather instructions became 0 flow because both predicate values are 0.",
+ "EventCode": "0x2B2",
+ "EventName": "L1_PIPE_COMP_GATHER_0FLOW",
+ "BriefDescription": "This event counts the number of times where 2 elements of the gather instructions became 0 flow because both predicate values are 0."
+ },
+ {
+ "PublicDescription": "This event counts the number of flows of the scatter instructions.",
+ "EventCode": "0x2B3",
+ "EventName": "L1_PIPE_COMP_SCATTER_1FLOW",
+ "BriefDescription": "This event counts the number of flows of the scatter instructions."
+ },
+ {
+ "PublicDescription": "This event counts the number of 1's in the predicate bits of request in L1D cache pipeline#0, where it is corrected so that it becomes 16 when all bits are 1.",
+ "EventCode": "0x2B8",
+ "EventName": "L1_PIPE0_COMP_PRD_CNT",
+ "BriefDescription": "This event counts the number of 1's in the predicate bits of request in L1D cache pipeline#0, where it is corrected so that it becomes 16 when all bits are 1."
+ },
+ {
+ "PublicDescription": "This event counts the number of 1's in the predicate bits of request in L1D cache pipeline#1, where it is corrected so that it becomes 16 when all bits are 1.",
+ "EventCode": "0x2B9",
+ "EventName": "L1_PIPE1_COMP_PRD_CNT",
+ "BriefDescription": "This event counts the number of 1's in the predicate bits of request in L1D cache pipeline#1, where it is corrected so that it becomes 16 when all bits are 1."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of L2 cache pipeline.",
+ "EventCode": "0x330",
+ "EventName": "L2_PIPE_VAL",
+ "BriefDescription": "This event counts valid cycles of L2 cache pipeline."
+ },
+ {
+ "PublicDescription": "This event counts completed requests in L2 cache pipeline.",
+ "EventCode": "0x350",
+ "EventName": "L2_PIPE_COMP_ALL",
+ "BriefDescription": "This event counts completed requests in L2 cache pipeline."
+ },
+ {
+ "PublicDescription": "This event counts operations where software or hardware prefetch hits an L2 cache refill buffer allocated by demand access.",
+ "EventCode": "0x370",
+ "EventName": "L2_PIPE_COMP_PF_L2MIB_MCH",
+ "BriefDescription": "This event counts operations where software or hardware prefetch hits an L2 cache refill buffer allocated by demand access."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/sve.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/sve.json
new file mode 100644
index 000000000000..dc1b95e42c32
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/sve.json
@@ -0,0 +1,110 @@
+[
+ {
+ "ArchStdEvent": "SIMD_INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "SVE_INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "UOP_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_MATH_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_FMA_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_RECPE_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_CVT_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_MOVPRFX_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_MOVPRFX_U_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "PRF_SPEC"
+ },
+ {
+ "ArchStdEvent": "BASE_LD_REG_SPEC"
+ },
+ {
+ "ArchStdEvent": "BASE_ST_REG_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LDR_REG_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_STR_REG_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LDR_PREG_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_STR_PREG_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRF_CONTIG_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_LD_MULTI_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_ST_MULTI_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LD_GATHER_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_ST_SCATTER_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRF_GATHER_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SCALE_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_FIXED_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_HP_SCALE_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_HP_FIXED_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SP_SCALE_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SP_FIXED_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_DP_SCALE_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_DP_FIXED_OPS_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/metrics.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/metrics.json
new file mode 100644
index 000000000000..dda8e59149d2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/metrics.json
@@ -0,0 +1,233 @@
+[
+ {
+ "MetricExpr": "FETCH_BUBBLE / (4 * CPU_CYCLES)",
+ "PublicDescription": "Frontend bound L1 topdown metric",
+ "BriefDescription": "Frontend bound L1 topdown metric",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "frontend_bound"
+ },
+ {
+ "MetricExpr": "(INST_SPEC - INST_RETIRED) / (4 * CPU_CYCLES)",
+ "PublicDescription": "Bad Speculation L1 topdown metric",
+ "BriefDescription": "Bad Speculation L1 topdown metric",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "bad_speculation"
+ },
+ {
+ "MetricExpr": "INST_RETIRED / (CPU_CYCLES * 4)",
+ "PublicDescription": "Retiring L1 topdown metric",
+ "BriefDescription": "Retiring L1 topdown metric",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "retiring"
+ },
+ {
+ "MetricExpr": "1 - (frontend_bound + bad_speculation + retiring)",
+ "PublicDescription": "Backend Bound L1 topdown metric",
+ "BriefDescription": "Backend Bound L1 topdown metric",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "backend_bound"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x201d@ / CPU_CYCLES",
+ "PublicDescription": "Fetch latency bound L2 topdown metric",
+ "BriefDescription": "Fetch latency bound L2 topdown metric",
+ "MetricGroup": "TopDownL2",
+ "MetricName": "fetch_latency_bound"
+ },
+ {
+ "MetricExpr": "frontend_bound - fetch_latency_bound",
+ "PublicDescription": "Fetch bandwidth bound L2 topdown metric",
+ "BriefDescription": "Fetch bandwidth bound L2 topdown metric",
+ "MetricGroup": "TopDownL2",
+ "MetricName": "fetch_bandwidth_bound"
+ },
+ {
+ "MetricExpr": "(bad_speculation * BR_MIS_PRED) / (BR_MIS_PRED + armv8_pmuv3_0@event\\=0x2013@)",
+ "PublicDescription": "Branch mispredicts L2 topdown metric",
+ "BriefDescription": "Branch mispredicts L2 topdown metric",
+ "MetricGroup": "TopDownL2",
+ "MetricName": "branch_mispredicts"
+ },
+ {
+ "MetricExpr": "bad_speculation - branch_mispredicts",
+ "PublicDescription": "Machine clears L2 topdown metric",
+ "BriefDescription": "Machine clears L2 topdown metric",
+ "MetricGroup": "TopDownL2",
+ "MetricName": "machine_clears"
+ },
+ {
+ "MetricExpr": "(EXE_STALL_CYCLE - (MEM_STALL_ANYLOAD + armv8_pmuv3_0@event\\=0x7005@)) / CPU_CYCLES",
+ "PublicDescription": "Core bound L2 topdown metric",
+ "BriefDescription": "Core bound L2 topdown metric",
+ "MetricGroup": "TopDownL2",
+ "MetricName": "core_bound"
+ },
+ {
+ "MetricExpr": "(MEM_STALL_ANYLOAD + armv8_pmuv3_0@event\\=0x7005@) / CPU_CYCLES",
+ "PublicDescription": "Memory bound L2 topdown metric",
+ "BriefDescription": "Memory bound L2 topdown metric",
+ "MetricGroup": "TopDownL2",
+ "MetricName": "memory_bound"
+ },
+ {
+ "MetricExpr": "(((L2I_TLB - L2I_TLB_REFILL) * 15) + (L2I_TLB_REFILL * 100)) / CPU_CYCLES",
+ "PublicDescription": "Idle by itlb miss L3 topdown metric",
+ "BriefDescription": "Idle by itlb miss L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "idle_by_itlb_miss"
+ },
+ {
+ "MetricExpr": "(((L2I_CACHE - L2I_CACHE_REFILL) * 15) + (L2I_CACHE_REFILL * 100)) / CPU_CYCLES",
+ "PublicDescription": "Idle by icache miss L3 topdown metric",
+ "BriefDescription": "Idle by icache miss L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "idle_by_icache_miss"
+ },
+ {
+ "MetricExpr": "(BR_MIS_PRED * 5) / CPU_CYCLES",
+ "PublicDescription": "BP misp flush L3 topdown metric",
+ "BriefDescription": "BP misp flush L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "bp_misp_flush"
+ },
+ {
+ "MetricExpr": "(armv8_pmuv3_0@event\\=0x2013@ * 5) / CPU_CYCLES",
+ "PublicDescription": "OOO flush L3 topdown metric",
+ "BriefDescription": "OOO flush L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "ooo_flush"
+ },
+ {
+ "MetricExpr": "(armv8_pmuv3_0@event\\=0x1001@ * 5) / CPU_CYCLES",
+ "PublicDescription": "Static predictor flush L3 topdown metric",
+ "BriefDescription": "Static predictor flush L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "sp_flush"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x1010@ / BR_MIS_PRED",
+ "PublicDescription": "Indirect branch L3 topdown metric",
+ "BriefDescription": "Indirect branch L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "indirect_branch"
+ },
+ {
+ "MetricExpr": "(armv8_pmuv3_0@event\\=0x1014@ + armv8_pmuv3_0@event\\=0x1018@) / BR_MIS_PRED",
+ "PublicDescription": "Push branch L3 topdown metric",
+ "BriefDescription": "Push branch L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "push_branch"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x100c@ / BR_MIS_PRED",
+ "PublicDescription": "Pop branch L3 topdown metric",
+ "BriefDescription": "Pop branch L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "pop_branch"
+ },
+ {
+ "MetricExpr": "(BR_MIS_PRED - armv8_pmuv3_0@event\\=0x1010@ - armv8_pmuv3_0@event\\=0x1014@ - armv8_pmuv3_0@event\\=0x1018@ - armv8_pmuv3_0@event\\=0x100c@) / BR_MIS_PRED",
+ "PublicDescription": "Other branch L3 topdown metric",
+ "BriefDescription": "Other branch L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "other_branch"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x2012@ / armv8_pmuv3_0@event\\=0x2013@",
+ "PublicDescription": "Nuke flush L3 topdown metric",
+ "BriefDescription": "Nuke flush L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "nuke_flush"
+ },
+ {
+ "MetricExpr": "1 - nuke_flush",
+ "PublicDescription": "Other flush L3 topdown metric",
+ "BriefDescription": "Other flush L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "other_flush"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x2010@ / CPU_CYCLES",
+ "PublicDescription": "Sync stall L3 topdown metric",
+ "BriefDescription": "Sync stall L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "sync_stall"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x2004@ / CPU_CYCLES",
+ "PublicDescription": "Rob stall L3 topdown metric",
+ "BriefDescription": "Rob stall L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "rob_stall"
+ },
+ {
+ "MetricExpr": "(armv8_pmuv3_0@event\\=0x2006@ + armv8_pmuv3_0@event\\=0x2007@ + armv8_pmuv3_0@event\\=0x2008@) / CPU_CYCLES",
+ "PublicDescription": "Ptag stall L3 topdown metric",
+ "BriefDescription": "Ptag stall L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "ptag_stall"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x201e@ / CPU_CYCLES",
+ "PublicDescription": "SaveOpQ stall L3 topdown metric",
+ "BriefDescription": "SaveOpQ stall L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "saveopq_stall"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x2005@ / CPU_CYCLES",
+ "PublicDescription": "PC buffer stall L3 topdown metric",
+ "BriefDescription": "PC buffer stall L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "pc_buffer_stall"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x7002@ / CPU_CYCLES",
+ "PublicDescription": "Divider L3 topdown metric",
+ "BriefDescription": "Divider L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "divider"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x7003@ / CPU_CYCLES",
+ "PublicDescription": "FSU stall L3 topdown metric",
+ "BriefDescription": "FSU stall L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "fsu_stall"
+ },
+ {
+ "MetricExpr": "core_bound - divider - fsu_stall",
+ "PublicDescription": "EXE ports util L3 topdown metric",
+ "BriefDescription": "EXE ports util L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "exe_ports_util"
+ },
+ {
+ "MetricExpr": "(MEM_STALL_ANYLOAD - MEM_STALL_L1MISS) / CPU_CYCLES",
+ "PublicDescription": "L1 bound L3 topdown metric",
+ "BriefDescription": "L1 bound L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "l1_bound"
+ },
+ {
+ "MetricExpr": "(MEM_STALL_L1MISS - MEM_STALL_L2MISS) / CPU_CYCLES",
+ "PublicDescription": "L2 bound L3 topdown metric",
+ "BriefDescription": "L2 bound L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "l2_bound"
+ },
+ {
+ "MetricExpr": "MEM_STALL_L2MISS / CPU_CYCLES",
+ "PublicDescription": "Mem bound L3 topdown metric",
+ "BriefDescription": "Mem bound L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "mem_bound"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x7005@ / CPU_CYCLES",
+ "PublicDescription": "Store bound L3 topdown metric",
+ "BriefDescription": "Store bound L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "store_bound"
+ },
+]
diff --git a/tools/perf/pmu-events/arch/arm64/mapfile.csv b/tools/perf/pmu-events/arch/arm64/mapfile.csv
index 0d609149b82a..c43591d831b8 100644
--- a/tools/perf/pmu-events/arch/arm64/mapfile.csv
+++ b/tools/perf/pmu-events/arch/arm64/mapfile.csv
@@ -20,5 +20,6 @@
0x00000000410fd0c0,v1,arm/cortex-a76-n1,core
0x00000000420f5160,v1,cavium/thunderx2,core
0x00000000430f0af0,v1,cavium/thunderx2,core
+0x00000000460f0010,v1,fujitsu/a64fx,core
0x00000000480fd010,v1,hisilicon/hip08,core
0x00000000500f0000,v1,ampere/emag,core
diff --git a/tools/perf/pmu-events/arch/powerpc/mapfile.csv b/tools/perf/pmu-events/arch/powerpc/mapfile.csv
index 229150e7ab7d..4abdfc3f9692 100644
--- a/tools/perf/pmu-events/arch/powerpc/mapfile.csv
+++ b/tools/perf/pmu-events/arch/powerpc/mapfile.csv
@@ -15,3 +15,4 @@
# Power8 entries
004[bcd][[:xdigit:]]{4},1,power8,core
004e[[:xdigit:]]{4},1,power9,core
+0080[[:xdigit:]]{4},1,power10,core
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/cache.json b/tools/perf/pmu-events/arch/powerpc/power10/cache.json
new file mode 100644
index 000000000000..616f29098c71
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/cache.json
@@ -0,0 +1,47 @@
+[
+ {
+ "EventCode": "1003C",
+ "EventName": "PM_EXEC_STALL_DMISS_L2L3",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from either the local L2 or local L3."
+ },
+ {
+ "EventCode": "34056",
+ "EventName": "PM_EXEC_STALL_LOAD_FINISH",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the NTF instruction merged with another load in the LMQ."
+ },
+ {
+ "EventCode": "3006C",
+ "EventName": "PM_RUN_CYC_SMT2_MODE",
+ "BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT2 mode."
+ },
+ {
+ "EventCode": "300F4",
+ "EventName": "PM_RUN_INST_CMPL_CONC",
+ "BriefDescription": "PowerPC instructions completed by this thread when all threads in the core had the run-latch set."
+ },
+ {
+ "EventCode": "4C016",
+ "EventName": "PM_EXEC_STALL_DMISS_L2L3_CONFLICT",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, with a dispatch conflict."
+ },
+ {
+ "EventCode": "4D014",
+ "EventName": "PM_EXEC_STALL_LOAD",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a load instruction executing in the Load Store Unit."
+ },
+ {
+ "EventCode": "4D016",
+ "EventName": "PM_EXEC_STALL_PTESYNC",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a PTESYNC instruction executing in the Load Store Unit."
+ },
+ {
+ "EventCode": "401EA",
+ "EventName": "PM_THRESH_EXC_128",
+ "BriefDescription": "Threshold counter exceeded a value of 128."
+ },
+ {
+ "EventCode": "400F6",
+ "EventName": "PM_BR_MPRED_CMPL",
+ "BriefDescription": "A mispredicted branch completed. Includes direction and target."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json b/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
new file mode 100644
index 000000000000..703cd431ae5b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
@@ -0,0 +1,7 @@
+[
+ {
+ "EventCode": "4016E",
+ "EventName": "PM_THRESH_NOT_MET",
+ "BriefDescription": "Threshold counter did not meet threshold."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/frontend.json b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
new file mode 100644
index 000000000000..eac8609dcc90
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
@@ -0,0 +1,217 @@
+[
+ {
+ "EventCode": "10004",
+ "EventName": "PM_EXEC_STALL_TRANSLATION",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss or ERAT miss and waited for it to resolve."
+ },
+ {
+ "EventCode": "10010",
+ "EventName": "PM_PMC4_OVERFLOW",
+ "BriefDescription": "The event selected for PMC4 caused the event counter to overflow."
+ },
+ {
+ "EventCode": "10020",
+ "EventName": "PM_PMC4_REWIND",
+ "BriefDescription": "The speculative event selected for PMC4 rewinds and the counter for PMC4 is not charged."
+ },
+ {
+ "EventCode": "10038",
+ "EventName": "PM_DISP_STALL_TRANSLATION",
+ "BriefDescription": "Cycles when dispatch was stalled for this thread because the MMU was handling a translation miss."
+ },
+ {
+ "EventCode": "1003A",
+ "EventName": "PM_DISP_STALL_BR_MPRED_IC_L2",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2 after suffering a branch mispredict."
+ },
+ {
+ "EventCode": "1E050",
+ "EventName": "PM_DISP_STALL_HELD_STF_MAPPER_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the STF mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR."
+ },
+ {
+ "EventCode": "1F054",
+ "EventName": "PM_DTLB_HIT",
+ "BriefDescription": "The PTE required by the instruction was resident in the TLB (data TLB access). When MMCR1[16]=0 this event counts only demand hits. When MMCR1[16]=1 this event includes demand and prefetch. Applies to both HPT and RPT."
+ },
+ {
+ "EventCode": "101E8",
+ "EventName": "PM_THRESH_EXC_256",
+ "BriefDescription": "Threshold counter exceeded a count of 256."
+ },
+ {
+ "EventCode": "101EC",
+ "EventName": "PM_THRESH_MET",
+ "BriefDescription": "Threshold exceeded."
+ },
+ {
+ "EventCode": "100F2",
+ "EventName": "PM_1PLUS_PPC_CMPL",
+ "BriefDescription": "Cycles in which at least one instruction is completed by this thread."
+ },
+ {
+ "EventCode": "100F6",
+ "EventName": "PM_IERAT_MISS",
+ "BriefDescription": "IERAT Reloaded to satisfy an IERAT miss. All page sizes are counted by this event."
+ },
+ {
+ "EventCode": "100F8",
+ "EventName": "PM_DISP_STALL_CYC",
+ "BriefDescription": "Cycles the ICT has no itags assigned to this thread (no instructions were dispatched during these cycles)."
+ },
+ {
+ "EventCode": "20114",
+ "EventName": "PM_MRK_L2_RC_DISP",
+ "BriefDescription": "Marked instruction RC dispatched in L2."
+ },
+ {
+ "EventCode": "2C010",
+ "EventName": "PM_EXEC_STALL_LSU",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the Load Store Unit. This does not include simple fixed point instructions."
+ },
+ {
+ "EventCode": "2C016",
+ "EventName": "PM_DISP_STALL_IERAT_ONLY_MISS",
+ "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction ERAT miss."
+ },
+ {
+ "EventCode": "2C01E",
+ "EventName": "PM_DISP_STALL_BR_MPRED_IC_L3",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3 after suffering a branch mispredict."
+ },
+ {
+ "EventCode": "2D01A",
+ "EventName": "PM_DISP_STALL_IC_MISS",
+ "BriefDescription": "Cycles when dispatch was stalled for this thread due to an Icache Miss."
+ },
+ {
+ "EventCode": "2D01C",
+ "EventName": "PM_CMPL_STALL_STCX",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a stcx waiting for resolution from the nest before completing."
+ },
+ {
+ "EventCode": "2E018",
+ "EventName": "PM_DISP_STALL_FETCH",
+ "BriefDescription": "Cycles when dispatch was stalled for this thread because Fetch was being held."
+ },
+ {
+ "EventCode": "2E01A",
+ "EventName": "PM_DISP_STALL_HELD_XVFC_MAPPER_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the XVFC mapper/SRB was full."
+ },
+ {
+ "EventCode": "2C142",
+ "EventName": "PM_MRK_XFER_FROM_SRC_PMC2",
+ "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[15:27]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ },
+ {
+ "EventCode": "24050",
+ "EventName": "PM_IOPS_DISP",
+ "BriefDescription": "Internal Operations dispatched. PM_IOPS_DISP / PM_INST_DISP will show the average number of internal operations per PowerPC instruction."
+ },
+ {
+ "EventCode": "2405E",
+ "EventName": "PM_ISSUE_CANCEL",
+ "BriefDescription": "An instruction issued and the issue was later cancelled. Only one cancel per PowerPC instruction."
+ },
+ {
+ "EventCode": "200FA",
+ "EventName": "PM_BR_TAKEN_CMPL",
+ "BriefDescription": "Branch Taken instruction completed."
+ },
+ {
+ "EventCode": "30012",
+ "EventName": "PM_FLUSH_COMPLETION",
+ "BriefDescription": "The instruction that was next to complete (oldest in the pipeline) did not complete because it suffered a flush."
+ },
+ {
+ "EventCode": "30014",
+ "EventName": "PM_EXEC_STALL_STORE",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store instruction executing in the Load Store Unit."
+ },
+ {
+ "EventCode": "30018",
+ "EventName": "PM_DISP_STALL_HELD_SCOREBOARD_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch while waiting on the Scoreboard. This event combines VSCR and FPSCR together."
+ },
+ {
+ "EventCode": "30026",
+ "EventName": "PM_EXEC_STALL_STORE_MISS",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store whose cache line was not resident in the L1 and was waiting for allocation of the missing line into the L1."
+ },
+ {
+ "EventCode": "3012A",
+ "EventName": "PM_MRK_L2_RC_DONE",
+ "BriefDescription": "L2 RC machine completed the transaction for the marked instruction."
+ },
+ {
+ "EventCode": "3F046",
+ "EventName": "PM_ITLB_HIT_1G",
+ "BriefDescription": "Instruction TLB hit (IERAT reload) page size 1G, which implies Radix Page Table translation is in use. When MMCR1[17]=0 this event counts only for demand misses. When MMCR1[17]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "34058",
+ "EventName": "PM_DISP_STALL_BR_MPRED_ICMISS",
+ "BriefDescription": "Cycles when dispatch was stalled after a mispredicted branch resulted in an instruction cache miss."
+ },
+ {
+ "EventCode": "3D05C",
+ "EventName": "PM_DISP_STALL_HELD_RENAME_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR and XVFC."
+ },
+ {
+ "EventCode": "3E052",
+ "EventName": "PM_DISP_STALL_IC_L3",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3."
+ },
+ {
+ "EventCode": "3E054",
+ "EventName": "PM_LD_MISS_L1",
+ "BriefDescription": "Load Missed L1, counted at execution time (can be greater than loads finished). LMQ merges are not included in this count. i.e. if a load instruction misses on an address that is already allocated on the LMQ, this event will not increment for that load). Note that this count is per slice, so if a load spans multiple slices this event will increment multiple times for a single load."
+ },
+ {
+ "EventCode": "301EA",
+ "EventName": "PM_THRESH_EXC_1024",
+ "BriefDescription": "Threshold counter exceeded a value of 1024."
+ },
+ {
+ "EventCode": "300FA",
+ "EventName": "PM_INST_FROM_L3MISS",
+ "BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
+ },
+ {
+ "EventCode": "40006",
+ "EventName": "PM_ISSUE_KILL",
+ "BriefDescription": "Cycles in which an instruction or group of instructions were cancelled after being issued. This event increments once per occurrence, regardless of how many instructions are included in the issue group."
+ },
+ {
+ "EventCode": "40116",
+ "EventName": "PM_MRK_LARX_FIN",
+ "BriefDescription": "Marked load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock."
+ },
+ {
+ "EventCode": "4C010",
+ "EventName": "PM_DISP_STALL_BR_MPRED_IC_L3MISS",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from sources beyond the local L3 after suffering a mispredicted branch."
+ },
+ {
+ "EventCode": "4D01E",
+ "EventName": "PM_DISP_STALL_BR_MPRED",
+ "BriefDescription": "Cycles when dispatch was stalled for this thread due to a mispredicted branch."
+ },
+ {
+ "EventCode": "4E010",
+ "EventName": "PM_DISP_STALL_IC_L3MISS",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from any source beyond the local L3."
+ },
+ {
+ "EventCode": "4E01A",
+ "EventName": "PM_DISP_STALL_HELD_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any reason."
+ },
+ {
+ "EventCode": "44056",
+ "EventName": "PM_VECTOR_ST_CMPL",
+ "BriefDescription": "Vector store instructions completed."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/locks.json b/tools/perf/pmu-events/arch/powerpc/power10/locks.json
new file mode 100644
index 000000000000..016d8de0e14a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/locks.json
@@ -0,0 +1,12 @@
+[
+ {
+ "EventCode": "1E058",
+ "EventName": "PM_STCX_FAIL_FIN",
+ "BriefDescription": "Conditional store instruction (STCX) failed. LARX and STCX are instructions used to acquire a lock."
+ },
+ {
+ "EventCode": "4E050",
+ "EventName": "PM_STCX_PASS_FIN",
+ "BriefDescription": "Conditional store instruction (STCX) passed. LARX and STCX are instructions used to acquire a lock."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/marked.json b/tools/perf/pmu-events/arch/powerpc/power10/marked.json
new file mode 100644
index 000000000000..93a5a5910648
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/marked.json
@@ -0,0 +1,147 @@
+[
+ {
+ "EventCode": "1002C",
+ "EventName": "PM_LD_PREFETCH_CACHE_LINE_MISS",
+ "BriefDescription": "The L1 cache was reloaded with a line that fulfills a prefetch request."
+ },
+ {
+ "EventCode": "10132",
+ "EventName": "PM_MRK_INST_ISSUED",
+ "BriefDescription": "Marked instruction issued. Note that stores always get issued twice, the address gets issued to the LSU and the data gets issued to the VSU. Also, issues can sometimes get killed/cancelled and cause multiple sequential issues for the same instruction."
+ },
+ {
+ "EventCode": "101E0",
+ "EventName": "PM_MRK_INST_DISP",
+ "BriefDescription": "The thread has dispatched a randomly sampled marked instruction."
+ },
+ {
+ "EventCode": "101E2",
+ "EventName": "PM_MRK_BR_TAKEN_CMPL",
+ "BriefDescription": "Marked Branch Taken instruction completed."
+ },
+ {
+ "EventCode": "20112",
+ "EventName": "PM_MRK_NTF_FIN",
+ "BriefDescription": "The marked instruction became the oldest in the pipeline before it finished. It excludes instructions that finish at dispatch."
+ },
+ {
+ "EventCode": "2C01C",
+ "EventName": "PM_EXEC_STALL_DMISS_OFF_CHIP",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a remote chip."
+ },
+ {
+ "EventCode": "20138",
+ "EventName": "PM_MRK_ST_NEST",
+ "BriefDescription": "A store has been sampled/marked and is at the point of execution where it has completed in the core and can no longer be flushed. At this point the store is sent to the L2."
+ },
+ {
+ "EventCode": "2013A",
+ "EventName": "PM_MRK_BRU_FIN",
+ "BriefDescription": "Marked Branch instruction finished."
+ },
+ {
+ "EventCode": "2C144",
+ "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC2",
+ "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[15:27]."
+ },
+ {
+ "EventCode": "24156",
+ "EventName": "PM_MRK_STCX_FIN",
+ "BriefDescription": "Marked conditional store instruction (STCX) finished. LARX and STCX are instructions used to acquire a lock."
+ },
+ {
+ "EventCode": "24158",
+ "EventName": "PM_MRK_INST",
+ "BriefDescription": "An instruction was marked. Includes both Random Instruction Sampling (RIS) at decode time and Random Event Sampling (RES) at the time the configured event happens."
+ },
+ {
+ "EventCode": "2415C",
+ "EventName": "PM_MRK_BR_CMPL",
+ "BriefDescription": "A marked branch completed. All branches are included."
+ },
+ {
+ "EventCode": "200FD",
+ "EventName": "PM_L1_ICACHE_MISS",
+ "BriefDescription": "Demand iCache Miss."
+ },
+ {
+ "EventCode": "30130",
+ "EventName": "PM_MRK_INST_FIN",
+ "BriefDescription": "marked instruction finished. Excludes instructions that finish at dispatch. Note that stores always finish twice since the address gets issued to the LSU and the data gets issued to the VSU."
+ },
+ {
+ "EventCode": "34146",
+ "EventName": "PM_MRK_LD_CMPL",
+ "BriefDescription": "Marked loads completed."
+ },
+ {
+ "EventCode": "3E158",
+ "EventName": "PM_MRK_STCX_FAIL",
+ "BriefDescription": "Marked conditional store instruction (STCX) failed. LARX and STCX are instructions used to acquire a lock."
+ },
+ {
+ "EventCode": "3E15A",
+ "EventName": "PM_MRK_ST_FIN",
+ "BriefDescription": "The marked instruction was a store of any kind."
+ },
+ {
+ "EventCode": "30068",
+ "EventName": "PM_L1_ICACHE_RELOADED_PREF",
+ "BriefDescription": "Counts all Icache prefetch reloads ( includes demand turned into prefetch)."
+ },
+ {
+ "EventCode": "301E4",
+ "EventName": "PM_MRK_BR_MPRED_CMPL",
+ "BriefDescription": "Marked Branch Mispredicted. Includes direction and target."
+ },
+ {
+ "EventCode": "300F6",
+ "EventName": "PM_LD_DEMAND_MISS_L1",
+ "BriefDescription": "The L1 cache was reloaded with a line that fulfills a demand miss request. Counted at reload time, before finish."
+ },
+ {
+ "EventCode": "300FE",
+ "EventName": "PM_DATA_FROM_L3MISS",
+ "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
+ },
+ {
+ "EventCode": "40012",
+ "EventName": "PM_L1_ICACHE_RELOADED_ALL",
+ "BriefDescription": "Counts all Icache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch."
+ },
+ {
+ "EventCode": "40134",
+ "EventName": "PM_MRK_INST_TIMEO",
+ "BriefDescription": "Marked instruction finish timeout (instruction was lost)."
+ },
+ {
+ "EventCode": "4003C",
+ "EventName": "PM_DISP_STALL_HELD_SYNC_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch."
+ },
+ {
+ "EventCode": "4505A",
+ "EventName": "PM_SP_FLOP_CMPL",
+ "BriefDescription": "Single Precision floating point instructions completed."
+ },
+ {
+ "EventCode": "4D058",
+ "EventName": "PM_VECTOR_FLOP_CMPL",
+ "BriefDescription": "Vector floating point instructions completed."
+ },
+ {
+ "EventCode": "4D05A",
+ "EventName": "PM_NON_MATH_FLOP_CMPL",
+ "BriefDescription": "Non Math instructions completed."
+ },
+ {
+ "EventCode": "401E0",
+ "EventName": "PM_MRK_INST_CMPL",
+ "BriefDescription": "marked instruction completed."
+ },
+ {
+ "EventCode": "400FE",
+ "EventName": "PM_DATA_FROM_MEMORY",
+ "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/memory.json b/tools/perf/pmu-events/arch/powerpc/power10/memory.json
new file mode 100644
index 000000000000..b01141eeebee
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/memory.json
@@ -0,0 +1,192 @@
+[
+ {
+ "EventCode": "1000A",
+ "EventName": "PM_PMC3_REWIND",
+ "BriefDescription": "The speculative event selected for PMC3 rewinds and the counter for PMC3 is not charged."
+ },
+ {
+ "EventCode": "1C040",
+ "EventName": "PM_XFER_FROM_SRC_PMC1",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ },
+ {
+ "EventCode": "1C142",
+ "EventName": "PM_MRK_XFER_FROM_SRC_PMC1",
+ "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ },
+ {
+ "EventCode": "1C144",
+ "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC1",
+ "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[0:12]."
+ },
+ {
+ "EventCode": "1C056",
+ "EventName": "PM_DERAT_MISS_4K",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 4K. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "1C058",
+ "EventName": "PM_DTLB_MISS_16G",
+ "BriefDescription": "Data TLB reload (after a miss) page size 16G. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "1C05C",
+ "EventName": "PM_DTLB_MISS_2M",
+ "BriefDescription": "Data TLB reload (after a miss) page size 2M. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "1E056",
+ "EventName": "PM_EXEC_STALL_STORE_PIPE",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the store unit. This does not include cycles spent handling store misses, PTESYNC instructions or TLBIE instructions."
+ },
+ {
+ "EventCode": "1F150",
+ "EventName": "PM_MRK_ST_L2_CYC",
+ "BriefDescription": "Cycles from L2 RC dispatch to L2 RC completion."
+ },
+ {
+ "EventCode": "10062",
+ "EventName": "PM_LD_L3MISS_PEND_CYC",
+ "BriefDescription": "Cycles L3 miss was pending for this thread."
+ },
+ {
+ "EventCode": "20010",
+ "EventName": "PM_PMC1_OVERFLOW",
+ "BriefDescription": "The event selected for PMC1 caused the event counter to overflow."
+ },
+ {
+ "EventCode": "2001A",
+ "EventName": "PM_ITLB_HIT",
+ "BriefDescription": "The PTE required to translate the instruction address was resident in the TLB (instruction TLB access/IERAT reload). Applies to both HPT and RPT. When MMCR1[17]=0 this event counts only for demand misses. When MMCR1[17]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "2003E",
+ "EventName": "PM_PTESYNC_FIN",
+ "BriefDescription": "Ptesync instruction finished in the store unit. Only one ptesync can finish at a time."
+ },
+ {
+ "EventCode": "2C040",
+ "EventName": "PM_XFER_FROM_SRC_PMC2",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[15:27]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ },
+ {
+ "EventCode": "2C054",
+ "EventName": "PM_DERAT_MISS_64K",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "2C056",
+ "EventName": "PM_DTLB_MISS_4K",
+ "BriefDescription": "Data TLB reload (after a miss) page size 4K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "2D154",
+ "EventName": "PM_MRK_DERAT_MISS_64K",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "200F6",
+ "EventName": "PM_DERAT_MISS",
+ "BriefDescription": "DERAT Reloaded to satisfy a DERAT miss. All page sizes are counted by this event. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "3000A",
+ "EventName": "PM_DISP_STALL_ITLB_MISS",
+ "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction TLB miss."
+ },
+ {
+ "EventCode": "30016",
+ "EventName": "PM_EXEC_STALL_DERAT_DTLB_MISS",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss and waited for it resolve."
+ },
+ {
+ "EventCode": "3C040",
+ "EventName": "PM_XFER_FROM_SRC_PMC3",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ },
+ {
+ "EventCode": "3C142",
+ "EventName": "PM_MRK_XFER_FROM_SRC_PMC3",
+ "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ },
+ {
+ "EventCode": "3C144",
+ "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC3",
+ "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[30:42]."
+ },
+ {
+ "EventCode": "3C054",
+ "EventName": "PM_DERAT_MISS_16M",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16M. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "3C056",
+ "EventName": "PM_DTLB_MISS_64K",
+ "BriefDescription": "Data TLB reload (after a miss) page size 64K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "3C058",
+ "EventName": "PM_LARX_FIN",
+ "BriefDescription": "Load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock."
+ },
+ {
+ "EventCode": "301E2",
+ "EventName": "PM_MRK_ST_CMPL",
+ "BriefDescription": "Marked store completed and sent to nest. Note that this count excludes cache-inhibited stores."
+ },
+ {
+ "EventCode": "300FC",
+ "EventName": "PM_DTLB_MISS",
+ "BriefDescription": "The DPTEG required for the load/store instruction in execution was missing from the TLB. It includes pages of all sizes for demand and prefetch activity."
+ },
+ {
+ "EventCode": "4D02C",
+ "EventName": "PM_PMC1_REWIND",
+ "BriefDescription": "The speculative event selected for PMC1 rewinds and the counter for PMC1 is not charged."
+ },
+ {
+ "EventCode": "4003E",
+ "EventName": "PM_LD_CMPL",
+ "BriefDescription": "Loads completed."
+ },
+ {
+ "EventCode": "4C040",
+ "EventName": "PM_XFER_FROM_SRC_PMC4",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ },
+ {
+ "EventCode": "4C142",
+ "EventName": "PM_MRK_XFER_FROM_SRC_PMC4",
+ "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ },
+ {
+ "EventCode": "4C144",
+ "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC4",
+ "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[45:57]."
+ },
+ {
+ "EventCode": "4C056",
+ "EventName": "PM_DTLB_MISS_16M",
+ "BriefDescription": "Data TLB reload (after a miss) page size 16M. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "4C05A",
+ "EventName": "PM_DTLB_MISS_1G",
+ "BriefDescription": "Data TLB reload (after a miss) page size 1G. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "4C15E",
+ "EventName": "PM_MRK_DTLB_MISS_64K",
+ "BriefDescription": "Marked Data TLB reload (after a miss) page size 64K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "4D056",
+ "EventName": "PM_NON_FMA_FLOP_CMPL",
+ "BriefDescription": "Non FMA instruction completed."
+ },
+ {
+ "EventCode": "40164",
+ "EventName": "PM_MRK_DERAT_MISS_2M",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/others.json b/tools/perf/pmu-events/arch/powerpc/power10/others.json
new file mode 100644
index 000000000000..a119e56cbf1c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/others.json
@@ -0,0 +1,297 @@
+[
+ {
+ "EventCode": "10016",
+ "EventName": "PM_VSU0_ISSUE",
+ "BriefDescription": "VSU instructions issued to VSU pipe 0."
+ },
+ {
+ "EventCode": "1001C",
+ "EventName": "PM_ULTRAVISOR_INST_CMPL",
+ "BriefDescription": "PowerPC instructions that completed while the thread was in ultravisor state."
+ },
+ {
+ "EventCode": "100F0",
+ "EventName": "PM_CYC",
+ "BriefDescription": "Processor cycles."
+ },
+ {
+ "EventCode": "10134",
+ "EventName": "PM_MRK_ST_DONE_L2",
+ "BriefDescription": "Marked stores completed in L2 (RC machine done)."
+ },
+ {
+ "EventCode": "1505E",
+ "EventName": "PM_LD_HIT_L1",
+ "BriefDescription": "Loads that finished without experiencing an L1 miss."
+ },
+ {
+ "EventCode": "1D05E",
+ "EventName": "PM_DISP_STALL_HELD_HALT_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of power management."
+ },
+ {
+ "EventCode": "1E054",
+ "EventName": "PM_EXEC_STALL_DMISS_L21_L31",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from another core's L2 or L3 on the same chip."
+ },
+ {
+ "EventCode": "1E05A",
+ "EventName": "PM_CMPL_STALL_LWSYNC",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a lwsync waiting to complete."
+ },
+ {
+ "EventCode": "1F056",
+ "EventName": "PM_DISP_SS0_2_INSTR_CYC",
+ "BriefDescription": "Cycles in which Superslice 0 dispatches either 1 or 2 instructions."
+ },
+ {
+ "EventCode": "1F15C",
+ "EventName": "PM_MRK_STCX_L2_CYC",
+ "BriefDescription": "Cycles spent in the nest portion of a marked Stcx instruction. It starts counting when the operation starts to drain to the L2 and it stops counting when the instruction retires from the Instruction Completion Table (ICT) in the Instruction Sequencing Unit (ISU)."
+ },
+ {
+ "EventCode": "10066",
+ "EventName": "PM_ADJUNCT_CYC",
+ "BriefDescription": "Cycles in which the thread is in Adjunct state. MSR[S HV PR] bits = 011."
+ },
+ {
+ "EventCode": "101E4",
+ "EventName": "PM_MRK_L1_ICACHE_MISS",
+ "BriefDescription": "Marked Instruction suffered an icache Miss."
+ },
+ {
+ "EventCode": "101EA",
+ "EventName": "PM_MRK_L1_RELOAD_VALID",
+ "BriefDescription": "Marked demand reload."
+ },
+ {
+ "EventCode": "100F4",
+ "EventName": "PM_FLOP_CMPL",
+ "BriefDescription": "Floating Point Operations Completed. Includes any type. It counts once for each 1, 2, 4 or 8 flop instruction. Use PM_1|2|4|8_FLOP_CMPL events to count flops."
+ },
+ {
+ "EventCode": "100FA",
+ "EventName": "PM_RUN_LATCH_ANY_THREAD_CYC",
+ "BriefDescription": "Cycles when at least one thread has the run latch set."
+ },
+ {
+ "EventCode": "100FC",
+ "EventName": "PM_LD_REF_L1",
+ "BriefDescription": "All L1 D cache load references counted at finish, gated by reject. In P9 and earlier this event counted only cacheable loads but in P10 both cacheable and non-cacheable loads are included."
+ },
+ {
+ "EventCode": "20006",
+ "EventName": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue."
+ },
+ {
+ "EventCode": "2000C",
+ "EventName": "PM_RUN_LATCH_ALL_THREADS_CYC",
+ "BriefDescription": "Cycles when the run latch is set for all threads."
+ },
+ {
+ "EventCode": "2E010",
+ "EventName": "PM_ADJUNCT_INST_CMPL",
+ "BriefDescription": "PowerPC instructions that completed while the thread is in Adjunct state."
+ },
+ {
+ "EventCode": "2E014",
+ "EventName": "PM_STCX_FIN",
+ "BriefDescription": "Conditional store instruction (STCX) finished. LARX and STCX are instructions used to acquire a lock."
+ },
+ {
+ "EventCode": "20130",
+ "EventName": "PM_MRK_INST_DECODED",
+ "BriefDescription": "An instruction was marked at decode time. Random Instruction Sampling (RIS) only."
+ },
+ {
+ "EventCode": "20132",
+ "EventName": "PM_MRK_DFU_ISSUE",
+ "BriefDescription": "The marked instruction was a decimal floating point operation issued to the VSU. Measured at issue time."
+ },
+ {
+ "EventCode": "20134",
+ "EventName": "PM_MRK_FXU_ISSUE",
+ "BriefDescription": "The marked instruction was a fixed point operation issued to the VSU. Measured at issue time."
+ },
+ {
+ "EventCode": "2505C",
+ "EventName": "PM_VSU_ISSUE",
+ "BriefDescription": "At least one VSU instruction was issued to one of the VSU pipes. Up to 4 per cycle. Includes fixed point operations."
+ },
+ {
+ "EventCode": "2F054",
+ "EventName": "PM_DISP_SS1_2_INSTR_CYC",
+ "BriefDescription": "Cycles in which Superslice 1 dispatches either 1 or 2 instructions."
+ },
+ {
+ "EventCode": "2F056",
+ "EventName": "PM_DISP_SS1_4_INSTR_CYC",
+ "BriefDescription": "Cycles in which Superslice 1 dispatches either 3 or 4 instructions."
+ },
+ {
+ "EventCode": "2006C",
+ "EventName": "PM_RUN_CYC_SMT4_MODE",
+ "BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT4 mode."
+ },
+ {
+ "EventCode": "201E0",
+ "EventName": "PM_MRK_DATA_FROM_MEMORY",
+ "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss for a marked load."
+ },
+ {
+ "EventCode": "201E4",
+ "EventName": "PM_MRK_DATA_FROM_L3MISS",
+ "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked load."
+ },
+ {
+ "EventCode": "201E8",
+ "EventName": "PM_THRESH_EXC_512",
+ "BriefDescription": "Threshold counter exceeded a value of 512."
+ },
+ {
+ "EventCode": "200F2",
+ "EventName": "PM_INST_DISP",
+ "BriefDescription": "PowerPC instructions dispatched."
+ },
+ {
+ "EventCode": "30132",
+ "EventName": "PM_MRK_VSU_FIN",
+ "BriefDescription": "VSU marked instructions finished. Excludes simple FX instructions issued to the Store Unit."
+ },
+ {
+ "EventCode": "30038",
+ "EventName": "PM_EXEC_STALL_DMISS_LMEM",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local memory, local OpenCapp cache, or local OpenCapp memory."
+ },
+ {
+ "EventCode": "3F04A",
+ "EventName": "PM_LSU_ST5_FIN",
+ "BriefDescription": "LSU Finished an internal operation in ST2 port."
+ },
+ {
+ "EventCode": "34054",
+ "EventName": "PM_EXEC_STALL_DMISS_L2L3_NOCONFLICT",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, without a dispatch conflict."
+ },
+ {
+ "EventCode": "3405A",
+ "EventName": "PM_PRIVILEGED_INST_CMPL",
+ "BriefDescription": "PowerPC Instructions that completed while the thread is in Privileged state."
+ },
+ {
+ "EventCode": "3F150",
+ "EventName": "PM_MRK_ST_DRAIN_CYC",
+ "BriefDescription": "cycles to drain st from core to L2."
+ },
+ {
+ "EventCode": "3F054",
+ "EventName": "PM_DISP_SS0_4_INSTR_CYC",
+ "BriefDescription": "Cycles in which Superslice 0 dispatches either 3 or 4 instructions."
+ },
+ {
+ "EventCode": "3F056",
+ "EventName": "PM_DISP_SS0_8_INSTR_CYC",
+ "BriefDescription": "Cycles in which Superslice 0 dispatches either 5, 6, 7 or 8 instructions."
+ },
+ {
+ "EventCode": "30162",
+ "EventName": "PM_MRK_ISSUE_DEPENDENT_LOAD",
+ "BriefDescription": "The marked instruction was dependent on a load. It is eligible for issue kill."
+ },
+ {
+ "EventCode": "40114",
+ "EventName": "PM_MRK_START_PROBE_NOP_DISP",
+ "BriefDescription": "Marked Start probe nop dispatched. Instruction AND R0,R0,R0."
+ },
+ {
+ "EventCode": "4001C",
+ "EventName": "PM_VSU_FIN",
+ "BriefDescription": "VSU instructions finished."
+ },
+ {
+ "EventCode": "4C01A",
+ "EventName": "PM_EXEC_STALL_DMISS_OFF_NODE",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a distant chip."
+ },
+ {
+ "EventCode": "4D012",
+ "EventName": "PM_PMC3_SAVED",
+ "BriefDescription": "The conditions for the speculative event selected for PMC3 are met and PMC3 is charged."
+ },
+ {
+ "EventCode": "4D022",
+ "EventName": "PM_HYPERVISOR_INST_CMPL",
+ "BriefDescription": "PowerPC instructions that completed while the thread is in hypervisor state."
+ },
+ {
+ "EventCode": "4D026",
+ "EventName": "PM_ULTRAVISOR_CYC",
+ "BriefDescription": "Cycles when the thread is in Ultravisor state. MSR[S HV PR]=110."
+ },
+ {
+ "EventCode": "4D028",
+ "EventName": "PM_PRIVILEGED_CYC",
+ "BriefDescription": "Cycles when the thread is in Privileged state. MSR[S HV PR]=x00."
+ },
+ {
+ "EventCode": "40030",
+ "EventName": "PM_INST_FIN",
+ "BriefDescription": "Instructions finished."
+ },
+ {
+ "EventCode": "44146",
+ "EventName": "PM_MRK_STCX_CORE_CYC",
+ "BriefDescription": "Cycles spent in the core portion of a marked Stcx instruction. It starts counting when the instruction is decoded and stops counting when it drains into the L2."
+ },
+ {
+ "EventCode": "44054",
+ "EventName": "PM_VECTOR_LD_CMPL",
+ "BriefDescription": "Vector load instructions completed."
+ },
+ {
+ "EventCode": "45054",
+ "EventName": "PM_FMA_CMPL",
+ "BriefDescription": "Two floating point instructions completed (FMA class of instructions: fmadd, fnmadd, fmsub, fnmsub). Scalar instructions only."
+ },
+ {
+ "EventCode": "45056",
+ "EventName": "PM_SCALAR_FLOP_CMPL",
+ "BriefDescription": "Scalar floating point instructions completed."
+ },
+ {
+ "EventCode": "4505C",
+ "EventName": "PM_MATH_FLOP_CMPL",
+ "BriefDescription": "Math floating point instructions completed."
+ },
+ {
+ "EventCode": "4D05E",
+ "EventName": "PM_BR_CMPL",
+ "BriefDescription": "A branch completed. All branches are included."
+ },
+ {
+ "EventCode": "4E15E",
+ "EventName": "PM_MRK_INST_FLUSHED",
+ "BriefDescription": "The marked instruction was flushed."
+ },
+ {
+ "EventCode": "401E6",
+ "EventName": "PM_MRK_INST_FROM_L3MISS",
+ "BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "401E8",
+ "EventName": "PM_MRK_DATA_FROM_L2MISS",
+ "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss for a marked load."
+ },
+ {
+ "EventCode": "400F0",
+ "EventName": "PM_LD_DEMAND_MISS_L1_FIN",
+ "BriefDescription": "Load Missed L1, counted at finish time."
+ },
+ {
+ "EventCode": "400FA",
+ "EventName": "PM_RUN_INST_CMPL",
+ "BriefDescription": "Completed PowerPC instructions gated by the run latch."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
new file mode 100644
index 000000000000..b61b5cc157ee
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
@@ -0,0 +1,297 @@
+[
+ {
+ "EventCode": "100FE",
+ "EventName": "PM_INST_CMPL",
+ "BriefDescription": "PowerPC instructions completed."
+ },
+ {
+ "EventCode": "10006",
+ "EventName": "PM_DISP_STALL_HELD_OTHER_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any other reason."
+ },
+ {
+ "EventCode": "1000C",
+ "EventName": "PM_LSU_LD0_FIN",
+ "BriefDescription": "LSU Finished an internal operation in LD0 port."
+ },
+ {
+ "EventCode": "1000E",
+ "EventName": "PM_MMA_ISSUED",
+ "BriefDescription": "MMA instructions issued."
+ },
+ {
+ "EventCode": "10012",
+ "EventName": "PM_LSU_ST0_FIN",
+ "BriefDescription": "LSU Finished an internal operation in ST0 port."
+ },
+ {
+ "EventCode": "10014",
+ "EventName": "PM_LSU_ST4_FIN",
+ "BriefDescription": "LSU Finished an internal operation in ST4 port."
+ },
+ {
+ "EventCode": "10018",
+ "EventName": "PM_IC_DEMAND_CYC",
+ "BriefDescription": "Cycles in which an instruction reload is pending to satisfy a demand miss."
+ },
+ {
+ "EventCode": "10022",
+ "EventName": "PM_PMC2_SAVED",
+ "BriefDescription": "The conditions for the speculative event selected for PMC2 are met and PMC2 is charged."
+ },
+ {
+ "EventCode": "10024",
+ "EventName": "PM_PMC5_OVERFLOW",
+ "BriefDescription": "The event selected for PMC5 caused the event counter to overflow."
+ },
+ {
+ "EventCode": "10058",
+ "EventName": "PM_EXEC_STALL_FIN_AT_DISP",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline finished at dispatch and did not require execution in the LSU, BRU or VSU."
+ },
+ {
+ "EventCode": "1005A",
+ "EventName": "PM_FLUSH_MPRED",
+ "BriefDescription": "A flush occurred due to a mispredicted branch. Includes target and direction."
+ },
+ {
+ "EventCode": "1C05A",
+ "EventName": "PM_DERAT_MISS_2M",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M. Implies radix translation. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "10064",
+ "EventName": "PM_DISP_STALL_IC_L2",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2."
+ },
+ {
+ "EventCode": "10068",
+ "EventName": "PM_BR_FIN",
+ "BriefDescription": "A branch instruction finished. Includes predicted/mispredicted/unconditional."
+ },
+ {
+ "EventCode": "1006A",
+ "EventName": "PM_FX_LSU_FIN",
+ "BriefDescription": "Simple fixed point instruction issued to the store unit. Measured at finish time."
+ },
+ {
+ "EventCode": "1006C",
+ "EventName": "PM_RUN_CYC_ST_MODE",
+ "BriefDescription": "Cycles when the run latch is set and the core is in ST mode."
+ },
+ {
+ "EventCode": "20004",
+ "EventName": "PM_ISSUE_STALL",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was dispatched but not issued yet."
+ },
+ {
+ "EventCode": "2000A",
+ "EventName": "PM_HYPERVISOR_CYC",
+ "BriefDescription": "Cycles when the thread is in Hypervisor state. MSR[S HV PR]=010."
+ },
+ {
+ "EventCode": "2000E",
+ "EventName": "PM_LSU_LD1_FIN",
+ "BriefDescription": "LSU Finished an internal operation in LD1 port."
+ },
+ {
+ "EventCode": "2C014",
+ "EventName": "PM_CMPL_STALL_SPECIAL",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline required special handling before completing."
+ },
+ {
+ "EventCode": "2C018",
+ "EventName": "PM_EXEC_STALL_DMISS_L3MISS",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a source beyond the local L2 or local L3."
+ },
+ {
+ "EventCode": "2D010",
+ "EventName": "PM_LSU_ST1_FIN",
+ "BriefDescription": "LSU Finished an internal operation in ST1 port."
+ },
+ {
+ "EventCode": "2D012",
+ "EventName": "PM_VSU1_ISSUE",
+ "BriefDescription": "VSU instructions issued to VSU pipe 1."
+ },
+ {
+ "EventCode": "2D018",
+ "EventName": "PM_EXEC_STALL_VSU",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the VSU (includes FXU, VSU, CRU)."
+ },
+ {
+ "EventCode": "2E01E",
+ "EventName": "PM_EXEC_STALL_NTC_FLUSH",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children."
+ },
+ {
+ "EventCode": "2013C",
+ "EventName": "PM_MRK_FX_LSU_FIN",
+ "BriefDescription": "The marked instruction was simple fixed point that was issued to the store unit. Measured at finish time."
+ },
+ {
+ "EventCode": "2405A",
+ "EventName": "PM_NTC_FIN",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline (NTC) finishes. Note that instructions can finish out of order, therefore not all the instructions that finish have a Next-to-complete status."
+ },
+ {
+ "EventCode": "201E2",
+ "EventName": "PM_MRK_LD_MISS_L1",
+ "BriefDescription": "Marked DL1 Demand Miss counted at finish time."
+ },
+ {
+ "EventCode": "200F4",
+ "EventName": "PM_RUN_CYC",
+ "BriefDescription": "Processor cycles gated by the run latch."
+ },
+ {
+ "EventCode": "30004",
+ "EventName": "PM_DISP_STALL_FLUSH",
+ "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet NTC. PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC."
+ },
+ {
+ "EventCode": "30008",
+ "EventName": "PM_EXEC_STALL",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting to finish in one of the execution units (BRU, LSU, VSU). Only cycles between issue and finish are counted in this category."
+ },
+ {
+ "EventCode": "3001A",
+ "EventName": "PM_LSU_ST2_FIN",
+ "BriefDescription": "LSU Finished an internal operation in ST2 port."
+ },
+ {
+ "EventCode": "30020",
+ "EventName": "PM_PMC2_REWIND",
+ "BriefDescription": "The speculative event selected for PMC2 rewinds and the counter for PMC2 is not charged."
+ },
+ {
+ "EventCode": "30022",
+ "EventName": "PM_PMC4_SAVED",
+ "BriefDescription": "The conditions for the speculative event selected for PMC4 are met and PMC4 is charged."
+ },
+ {
+ "EventCode": "30024",
+ "EventName": "PM_PMC6_OVERFLOW",
+ "BriefDescription": "The event selected for PMC6 caused the event counter to overflow."
+ },
+ {
+ "EventCode": "30028",
+ "EventName": "PM_CMPL_STALL_MEM_ECC",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for the non-speculative finish of either a stcx waiting for its result or a load waiting for non-critical sectors of data and ECC."
+ },
+ {
+ "EventCode": "30036",
+ "EventName": "PM_EXEC_STALL_SIMPLE_FX",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a simple fixed point instruction executing in the Load Store Unit."
+ },
+ {
+ "EventCode": "3003A",
+ "EventName": "PM_CMPL_STALL_EXCEPTION",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was not allowed to complete because it was interrupted by ANY exception, which has to be serviced before the instruction can complete."
+ },
+ {
+ "EventCode": "3F044",
+ "EventName": "PM_VSU2_ISSUE",
+ "BriefDescription": "VSU instructions issued to VSU pipe 2."
+ },
+ {
+ "EventCode": "30058",
+ "EventName": "PM_TLBIE_FIN",
+ "BriefDescription": "TLBIE instructions finished in the LSU. Two TLBIEs can finish each cycle. All will be counted."
+ },
+ {
+ "EventCode": "3D058",
+ "EventName": "PM_SCALAR_FSQRT_FDIV_ISSUE",
+ "BriefDescription": "Scalar versions of four floating point operations: fdiv,fsqrt (xvdivdp, xvdivsp, xvsqrtdp, xvsqrtsp)."
+ },
+ {
+ "EventCode": "30066",
+ "EventName": "PM_LSU_FIN",
+ "BriefDescription": "LSU Finished an internal operation (up to 4 per cycle)."
+ },
+ {
+ "EventCode": "40004",
+ "EventName": "PM_FXU_ISSUE",
+ "BriefDescription": "A fixed point instruction was issued to the VSU."
+ },
+ {
+ "EventCode": "40008",
+ "EventName": "PM_NTC_ALL_FIN",
+ "BriefDescription": "Cycles in which both instructions in the ICT entry pair show as finished. These are the cycles between finish and completion for the oldest pair of instructions in the pipeline."
+ },
+ {
+ "EventCode": "40010",
+ "EventName": "PM_PMC3_OVERFLOW",
+ "BriefDescription": "The event selected for PMC3 caused the event counter to overflow."
+ },
+ {
+ "EventCode": "4C012",
+ "EventName": "PM_EXEC_STALL_DERAT_ONLY_MISS",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered an ERAT miss and waited for it resolve."
+ },
+ {
+ "EventCode": "4C018",
+ "EventName": "PM_CMPL_STALL",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline cannot complete because the thread was blocked for any reason."
+ },
+ {
+ "EventCode": "4C01E",
+ "EventName": "PM_LSU_ST3_FIN",
+ "BriefDescription": "LSU Finished an internal operation in ST3 port."
+ },
+ {
+ "EventCode": "4D018",
+ "EventName": "PM_EXEC_STALL_BRU",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the Branch unit."
+ },
+ {
+ "EventCode": "4D01A",
+ "EventName": "PM_CMPL_STALL_HWSYNC",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a hwsync waiting for response from L2 before completing."
+ },
+ {
+ "EventCode": "4D01C",
+ "EventName": "PM_EXEC_STALL_TLBIEL",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIEL instruction executing in the Load Store Unit. TLBIEL instructions have lower overhead than TLBIE instructions because they don't get set to the nest."
+ },
+ {
+ "EventCode": "4E012",
+ "EventName": "PM_EXEC_STALL_UNKNOWN",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline completed without an ntf_type pulse. The ntf_pulse was missed by the ISU because the NTF finishes and completions came too close together."
+ },
+ {
+ "EventCode": "4D020",
+ "EventName": "PM_VSU3_ISSUE",
+ "BriefDescription": "VSU instruction was issued to VSU pipe 3."
+ },
+ {
+ "EventCode": "40132",
+ "EventName": "PM_MRK_LSU_FIN",
+ "BriefDescription": "LSU marked instruction finish."
+ },
+ {
+ "EventCode": "45058",
+ "EventName": "PM_IC_MISS_CMPL",
+ "BriefDescription": "Non-speculative icache miss, counted at completion."
+ },
+ {
+ "EventCode": "4D050",
+ "EventName": "PM_VSU_NON_FLOP_CMPL",
+ "BriefDescription": "Non-floating point VSU instructions completed."
+ },
+ {
+ "EventCode": "4D052",
+ "EventName": "PM_2FLOP_CMPL",
+ "BriefDescription": "Double Precision vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg completed."
+ },
+ {
+ "EventCode": "400F2",
+ "EventName": "PM_1PLUS_PPC_DISP",
+ "BriefDescription": "Cycles at least one Instr Dispatched."
+ },
+ {
+ "EventCode": "400F8",
+ "EventName": "PM_FLUSH",
+ "BriefDescription": "Flush (any type)."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
new file mode 100644
index 000000000000..ea122a91ceb0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
@@ -0,0 +1,22 @@
+[
+ {
+ "EventCode": "301E8",
+ "EventName": "PM_THRESH_EXC_64",
+ "BriefDescription": "Threshold counter exceeded a value of 64."
+ },
+ {
+ "EventCode": "45050",
+ "EventName": "PM_1FLOP_CMPL",
+ "BriefDescription": "One floating point instruction completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
+ },
+ {
+ "EventCode": "45052",
+ "EventName": "PM_4FLOP_CMPL",
+ "BriefDescription": "Four floating point instructions completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
+ },
+ {
+ "EventCode": "4D054",
+ "EventName": "PM_8FLOP_CMPL",
+ "BriefDescription": "Four Double Precision vector instructions completed."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/translation.json b/tools/perf/pmu-events/arch/powerpc/power10/translation.json
new file mode 100644
index 000000000000..5a714e3dd71a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/translation.json
@@ -0,0 +1,57 @@
+[
+ {
+ "EventCode": "1F15E",
+ "EventName": "PM_MRK_START_PROBE_NOP_CMPL",
+ "BriefDescription": "Marked Start probe nop (AND R0,R0,R0) completed."
+ },
+ {
+ "EventCode": "20016",
+ "EventName": "PM_ST_FIN",
+ "BriefDescription": "Store finish count. Includes speculative activity."
+ },
+ {
+ "EventCode": "20018",
+ "EventName": "PM_ST_FWD",
+ "BriefDescription": "Store forwards that finished."
+ },
+ {
+ "EventCode": "2011C",
+ "EventName": "PM_MRK_NTF_CYC",
+ "BriefDescription": "Cycles during which the marked instruction is the oldest in the pipeline (NTF or NTC)."
+ },
+ {
+ "EventCode": "2E01C",
+ "EventName": "PM_EXEC_STALL_TLBIE",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIE instruction executing in the Load Store Unit."
+ },
+ {
+ "EventCode": "201E6",
+ "EventName": "PM_THRESH_EXC_32",
+ "BriefDescription": "Threshold counter exceeded a value of 32."
+ },
+ {
+ "EventCode": "200F0",
+ "EventName": "PM_ST_CMPL",
+ "BriefDescription": "Stores completed from S2Q (2nd-level store queue). This event includes regular stores, stcx and cache inhibited stores. The following operations are excluded (pteupdate, snoop tlbie complete, store atomics, miso, load atomic payloads, tlbie, tlbsync, slbieg, isync, msgsnd, slbiag, cpabort, copy, tcheck, tend, stsync, dcbst, icbi, dcbf, hwsync, lwsync, ptesync, eieio, msgsync)."
+ },
+ {
+ "EventCode": "200FE",
+ "EventName": "PM_DATA_FROM_L2MISS",
+ "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss."
+ },
+ {
+ "EventCode": "30010",
+ "EventName": "PM_PMC2_OVERFLOW",
+ "BriefDescription": "The event selected for PMC2 caused the event counter to overflow."
+ },
+ {
+ "EventCode": "4D010",
+ "EventName": "PM_PMC1_SAVED",
+ "BriefDescription": "The conditions for the speculative event selected for PMC1 are met and PMC1 is charged."
+ },
+ {
+ "EventCode": "4D05C",
+ "EventName": "PM_DPP_FLOP_CMPL",
+ "BriefDescription": "Double-Precision or Quad-Precision instructions completed."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/metrics.json b/tools/perf/pmu-events/arch/powerpc/power8/metrics.json
index fc4aa6c2ddc9..4e25525b7da6 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/metrics.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/metrics.json
@@ -885,37 +885,37 @@
"MetricName": "flush_rate_percent"
},
{
- "BriefDescription": "GCT slot utilization (11 to 14) as a % of cycles this thread had atleast 1 slot valid",
+ "BriefDescription": "GCT slot utilization (11 to 14) as a % of cycles this thread had at least 1 slot valid",
"MetricExpr": "PM_GCT_UTIL_11_14_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
"MetricGroup": "general",
"MetricName": "gct_util_11to14_slots_percent"
},
{
- "BriefDescription": "GCT slot utilization (15 to 17) as a % of cycles this thread had atleast 1 slot valid",
+ "BriefDescription": "GCT slot utilization (15 to 17) as a % of cycles this thread had at least 1 slot valid",
"MetricExpr": "PM_GCT_UTIL_15_17_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
"MetricGroup": "general",
"MetricName": "gct_util_15to17_slots_percent"
},
{
- "BriefDescription": "GCT slot utilization 18+ as a % of cycles this thread had atleast 1 slot valid",
+ "BriefDescription": "GCT slot utilization 18+ as a % of cycles this thread had at least 1 slot valid",
"MetricExpr": "PM_GCT_UTIL_18_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
"MetricGroup": "general",
"MetricName": "gct_util_18plus_slots_percent"
},
{
- "BriefDescription": "GCT slot utilization (1 to 2) as a % of cycles this thread had atleast 1 slot valid",
+ "BriefDescription": "GCT slot utilization (1 to 2) as a % of cycles this thread had at least 1 slot valid",
"MetricExpr": "PM_GCT_UTIL_1_2_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
"MetricGroup": "general",
"MetricName": "gct_util_1to2_slots_percent"
},
{
- "BriefDescription": "GCT slot utilization (3 to 6) as a % of cycles this thread had atleast 1 slot valid",
+ "BriefDescription": "GCT slot utilization (3 to 6) as a % of cycles this thread had at least 1 slot valid",
"MetricExpr": "PM_GCT_UTIL_3_6_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
"MetricGroup": "general",
"MetricName": "gct_util_3to6_slots_percent"
},
{
- "BriefDescription": "GCT slot utilization (7 to 10) as a % of cycles this thread had atleast 1 slot valid",
+ "BriefDescription": "GCT slot utilization (7 to 10) as a % of cycles this thread had at least 1 slot valid",
"MetricExpr": "PM_GCT_UTIL_7_10_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
"MetricGroup": "general",
"MetricName": "gct_util_7to10_slots_percent"
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/metrics.json b/tools/perf/pmu-events/arch/powerpc/power9/metrics.json
index f8784c608479..db86ba36224d 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/metrics.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/metrics.json
@@ -1210,156 +1210,24 @@
"MetricName": "inst_from_rmem_percent"
},
{
- "BriefDescription": "%L2 Modified CO Cache read Utilization (4 pclks per disp attempt)",
- "MetricExpr": "((PM_L2_CASTOUT_MOD/2)*4)/ PM_RUN_CYC * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_co_m_rd_util"
- },
- {
- "BriefDescription": "L2 dcache invalidates per run inst (per core)",
- "MetricExpr": "(PM_L2_DC_INV / 2) / PM_RUN_INST_CMPL * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_dc_inv_rate_percent"
- },
- {
"BriefDescription": "Demand load misses as a % of L2 LD dispatches (per thread)",
"MetricExpr": "PM_L1_DCACHE_RELOAD_VALID / (PM_L2_LD / 2) * 100",
"MetricGroup": "l2_stats",
"MetricName": "l2_dem_ld_disp_percent"
},
{
- "BriefDescription": "L2 Icache invalidates per run inst (per core)",
- "MetricExpr": "(PM_L2_IC_INV / 2) / PM_RUN_INST_CMPL * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_ic_inv_rate_percent"
- },
- {
- "BriefDescription": "L2 Inst misses as a % of total L2 Inst dispatches (per thread)",
- "MetricExpr": "PM_L2_INST_MISS / PM_L2_INST * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_inst_miss_ratio_percent"
- },
- {
- "BriefDescription": "Average number of cycles between L2 Load hits",
- "MetricExpr": "(PM_L2_LD_HIT / PM_RUN_CYC) / 2",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_ld_hit_frequency"
- },
- {
- "BriefDescription": "Average number of cycles between L2 Load misses",
- "MetricExpr": "(PM_L2_LD_MISS / PM_RUN_CYC) / 2",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_ld_miss_frequency"
- },
- {
- "BriefDescription": "L2 Load misses as a % of total L2 Load dispatches (per thread)",
- "MetricExpr": "PM_L2_LD_MISS / PM_L2_LD * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_ld_miss_ratio_percent"
- },
- {
- "BriefDescription": "% L2 load disp attempts Cache read Utilization (4 pclks per disp attempt)",
- "MetricExpr": "((PM_L2_RCLD_DISP/2)*4)/ PM_RUN_CYC * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_ld_rd_util"
- },
- {
- "BriefDescription": "L2 load misses that require a cache write (4 pclks per disp attempt) % of pclks",
- "MetricExpr": "((( PM_L2_LD_DISP - PM_L2_LD_HIT)/2)*4)/ PM_RUN_CYC * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_ldmiss_wr_util"
- },
- {
- "BriefDescription": "L2 local pump prediction success",
- "MetricExpr": "PM_L2_LOC_GUESS_CORRECT / (PM_L2_LOC_GUESS_CORRECT + PM_L2_LOC_GUESS_WRONG) * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_local_pred_correct_percent"
- },
- {
- "BriefDescription": "L2 COs that were in M,Me,Mu state as a % of all L2 COs",
- "MetricExpr": "PM_L2_CASTOUT_MOD / (PM_L2_CASTOUT_MOD + PM_L2_CASTOUT_SHR) * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_mod_co_percent"
- },
- {
- "BriefDescription": "% of L2 Load RC dispatch atampts that failed because of address collisions and cclass conflicts",
- "MetricExpr": "(PM_L2_RCLD_DISP_FAIL_ADDR )/ PM_L2_RCLD_DISP * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_rc_ld_disp_addr_fail_percent"
- },
- {
- "BriefDescription": "% of L2 Load RC dispatch attempts that failed",
- "MetricExpr": "(PM_L2_RCLD_DISP_FAIL_ADDR + PM_L2_RCLD_DISP_FAIL_OTHER)/ PM_L2_RCLD_DISP * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_rc_ld_disp_fail_percent"
- },
- {
- "BriefDescription": "% of L2 Store RC dispatch atampts that failed because of address collisions and cclass conflicts",
- "MetricExpr": "PM_L2_RCST_DISP_FAIL_ADDR / PM_L2_RCST_DISP * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_rc_st_disp_addr_fail_percent"
- },
- {
- "BriefDescription": "% of L2 Store RC dispatch attempts that failed",
- "MetricExpr": "(PM_L2_RCST_DISP_FAIL_ADDR + PM_L2_RCST_DISP_FAIL_OTHER)/ PM_L2_RCST_DISP * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_rc_st_disp_fail_percent"
- },
- {
- "BriefDescription": "L2 Cache Read Utilization (per core)",
- "MetricExpr": "(((PM_L2_RCLD_DISP/2)*4)/ PM_RUN_CYC * 100) + (((PM_L2_RCST_DISP/2)*4)/PM_RUN_CYC * 100) + (((PM_L2_CASTOUT_MOD/2)*4)/PM_RUN_CYC * 100)",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_rd_util_percent"
- },
- {
- "BriefDescription": "L2 COs that were in T,Te,Si,S state as a % of all L2 COs",
- "MetricExpr": "PM_L2_CASTOUT_SHR / (PM_L2_CASTOUT_MOD + PM_L2_CASTOUT_SHR) * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_shr_co_percent"
- },
- {
"BriefDescription": "L2 Store misses as a % of total L2 Store dispatches (per thread)",
"MetricExpr": "PM_L2_ST_MISS / PM_L2_ST * 100",
"MetricGroup": "l2_stats",
"MetricName": "l2_st_miss_ratio_percent"
},
{
- "BriefDescription": "% L2 store disp attempts Cache read Utilization (4 pclks per disp attempt)",
- "MetricExpr": "((PM_L2_RCST_DISP/2)*4) / PM_RUN_CYC * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_st_rd_util"
- },
- {
"BriefDescription": "L2 stores that require a cache write (4 pclks per disp attempt) % of pclks",
"MetricExpr": "((PM_L2_ST_DISP/2)*4) / PM_RUN_CYC * 100",
"MetricGroup": "l2_stats",
"MetricName": "l2_st_wr_util"
},
{
- "BriefDescription": "L2 Cache Write Utilization (per core)",
- "MetricExpr": "((((PM_L2_LD_DISP - PM_L2_LD_HIT)/2)*4) / PM_RUN_CYC * 100) + (((PM_L2_ST_DISP/2)*4) / PM_RUN_CYC * 100)",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_wr_util_percent"
- },
- {
- "BriefDescription": "Average number of cycles between L3 Load hits",
- "MetricExpr": "(PM_L3_LD_HIT / PM_RUN_CYC) / 2",
- "MetricGroup": "l3_stats",
- "MetricName": "l3_ld_hit_frequency"
- },
- {
- "BriefDescription": "Average number of cycles between L3 Load misses",
- "MetricExpr": "(PM_L3_LD_MISS / PM_RUN_CYC) / 2",
- "MetricGroup": "l3_stats",
- "MetricName": "l3_ld_miss_frequency"
- },
- {
- "BriefDescription": "Average number of Write-in machines used. 1 of 8 WI machines is sampled every L3 cycle",
- "MetricExpr": "(PM_L3_WI_USAGE / PM_RUN_CYC) * 8",
- "MetricGroup": "l3_stats",
- "MetricName": "l3_wi_usage"
- },
- {
"BriefDescription": "Average icache miss latency",
"MetricExpr": "PM_IC_DEMAND_CYC / PM_IC_DEMAND_REQ",
"MetricGroup": "latency",
@@ -1823,7 +1691,7 @@
"MetricName": "custom_secs"
},
{
- "BriefDescription": "Percentage Cycles atleast one instruction dispatched",
+ "BriefDescription": "Percentage Cycles at least one instruction dispatched",
"MetricExpr": "PM_1PLUS_PPC_DISP / PM_CYC * 100",
"MetricName": "cycles_atleast_one_inst_dispatched_percent"
},
diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
index 4ea7ec4f496e..0d46cb82bd52 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
@@ -38,31 +38,31 @@
"EventName": "ic_fetch_stall.ic_stall_any",
"EventCode": "0x87",
"BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ic_fetch_stall.ic_stall_dq_empty",
"EventCode": "0x87",
"BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ic_fetch_stall.ic_stall_back_pressure",
"EventCode": "0x87",
"BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ic_cache_inval.l2_invalidating_probe",
"EventCode": "0x8c",
"BriefDescription": "IC line invalidated due to L2 invalidating probe (external or LS). The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ic_cache_inval.fill_invalidated",
"EventCode": "0x8c",
"BriefDescription": "IC line invalidated due to overwriting fill response. The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "bp_tlb_rel",
@@ -97,25 +97,25 @@
"EventName": "l2_request_g1.change_to_x",
"EventCode": "0x60",
"BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache state change requests. Request change to writable, check L2 for current state.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "l2_request_g1.prefetch_l2_cmd",
"EventCode": "0x60",
"BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). PrefetchL2Cmd.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "l2_request_g1.l2_hw_pf",
"EventCode": "0x60",
"BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). L2 Prefetcher. All prefetches accepted by L2 pipeline, hit or miss. Types of PF and L2 hit/miss broken out in a separate perfmon event.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "l2_request_g1.group2",
"EventCode": "0x60",
"BriefDescription": "Miscellaneous events covered in more detail by l2_request_g2 (PMCx061).",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l2_request_g1.all_no_prefetch",
@@ -150,31 +150,31 @@
"EventName": "l2_request_g2.ic_rd_sized_nc",
"EventCode": "0x61",
"BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Instruction cache read sized non-cacheable.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "l2_request_g2.smc_inval",
"EventCode": "0x61",
"BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Self-modifying code invalidates.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "l2_request_g2.bus_locks_originator",
"EventCode": "0x61",
"BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Bus locks.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "l2_request_g2.bus_locks_responses",
"EventCode": "0x61",
"BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Bus lock response.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l2_latency.l2_cycles_waiting_on_fills",
"EventCode": "0x62",
"BriefDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l2_wcb_req.wcb_write",
@@ -192,13 +192,13 @@
"EventName": "l2_wcb_req.zero_byte_store",
"EventCode": "0x63",
"BriefDescription": "LS to L2 WCB zero byte store requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) zero byte store requests.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "l2_wcb_req.cl_zero",
"EventCode": "0x63",
"BriefDescription": "LS to L2 WCB cache line zeroing requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) cache line zeroing requests.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l2_cache_req_stat.ls_rd_blk_cs",
@@ -228,37 +228,37 @@
"EventName": "l2_cache_req_stat.ls_rd_blk_c",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache request miss in L2 (all types).",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "l2_cache_req_stat.ic_fill_hit_x",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache hit modifiable line in L2.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "l2_cache_req_stat.ic_fill_hit_s",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache hit clean line in L2.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "l2_cache_req_stat.ic_fill_miss",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request miss in L2.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l2_cache_req_stat.ic_access_in_l2",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache requests in L2.",
- "UMask": "0x7"
+ "UMask": "0x07"
},
{
"EventName": "l2_cache_req_stat.ic_dc_miss_in_l2",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request miss in L2 and Data cache request miss in L2 (all types).",
- "UMask": "0x9"
+ "UMask": "0x09"
},
{
"EventName": "l2_cache_req_stat.ic_dc_hit_in_l2",
@@ -270,12 +270,12 @@
"EventName": "l2_fill_pending.l2_fill_busy",
"EventCode": "0x6d",
"BriefDescription": "Cycles with fill pending from L2. Total cycles spent with one or more fill requests in flight from L2.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l2_pf_hit_l2",
"EventCode": "0x70",
- "BriefDescription": "L2 prefetch hit in L2.",
+ "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
"UMask": "0xff"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/core.json b/tools/perf/pmu-events/arch/x86/amdzen1/core.json
index 653b11b23399..4dceeabc4a9f 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen1/core.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/core.json
@@ -68,21 +68,21 @@
"EventCode": "0xcb",
"BriefDescription": "SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
"PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ex_ret_mmx_fp_instr.mmx_instr",
"EventCode": "0xcb",
"BriefDescription": "MMX instructions.",
"PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. MMX instructions.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ex_ret_mmx_fp_instr.x87_instr",
"EventCode": "0xcb",
"BriefDescription": "x87 instructions.",
"PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. x87 instructions.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ex_ret_cond",
@@ -103,19 +103,19 @@
"EventName": "ex_tagged_ibs_ops.ibs_count_rollover",
"EventCode": "0x1cf",
"BriefDescription": "Tagged IBS Ops. Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ex_tagged_ibs_ops.ibs_tagged_ops_ret",
"EventCode": "0x1cf",
"BriefDescription": "Tagged IBS Ops. Number of Ops tagged by IBS that retired.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ex_tagged_ibs_ops.ibs_tagged_ops",
"EventCode": "0x1cf",
"BriefDescription": "Tagged IBS Ops. Number of Ops tagged by IBS.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ex_ret_fus_brnch_inst",
diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/floating-point.json b/tools/perf/pmu-events/arch/x86/amdzen1/floating-point.json
index a35542bd3b36..3995b528ebd6 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen1/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/floating-point.json
@@ -39,35 +39,35 @@
"EventCode": "0x00",
"BriefDescription": "Total number uOps assigned to all fpu pipes.",
"PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to all pipes.",
- "UMask": "0xf"
+ "UMask": "0x0f"
},
{
"EventName": "fpu_pipe_assignment.total3",
"EventCode": "0x00",
"BriefDescription": "Total number of fp uOps on pipe 3.",
"PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one-cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 3.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "fpu_pipe_assignment.total2",
"EventCode": "0x00",
"BriefDescription": "Total number of fp uOps on pipe 2.",
"PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 2.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fpu_pipe_assignment.total1",
"EventCode": "0x00",
"BriefDescription": "Total number of fp uOps on pipe 1.",
"PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 1.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fpu_pipe_assignment.total0",
"EventCode": "0x00",
"BriefDescription": "Total number of fp uOps on pipe 0.",
"PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 0.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "fp_sched_empty",
@@ -79,28 +79,28 @@
"EventCode": "0x02",
"BriefDescription": "All Ops.",
"PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8.",
- "UMask": "0x7"
+ "UMask": "0x07"
},
{
"EventName": "fp_retx87_fp_ops.div_sqr_r_ops",
"EventCode": "0x02",
"BriefDescription": "Divide and square root Ops.",
"PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Divide and square root Ops.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fp_retx87_fp_ops.mul_ops",
"EventCode": "0x02",
"BriefDescription": "Multiply Ops.",
"PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Multiply Ops.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fp_retx87_fp_ops.add_sub_ops",
"EventCode": "0x02",
"BriefDescription": "Add/subtract Ops.",
"PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Add/subtract Ops.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "fp_ret_sse_avx_ops.all",
@@ -142,83 +142,83 @@
"EventCode": "0x03",
"BriefDescription": "Single precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
"PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "fp_ret_sse_avx_ops.sp_div_flops",
"EventCode": "0x03",
"BriefDescription": "Single-precision divide/square root FLOPS.",
"PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision divide/square root FLOPS.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fp_ret_sse_avx_ops.sp_mult_flops",
"EventCode": "0x03",
"BriefDescription": "Single-precision multiply FLOPS.",
"PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision multiply FLOPS.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fp_ret_sse_avx_ops.sp_add_sub_flops",
"EventCode": "0x03",
"BriefDescription": "Single-precision add/subtract FLOPS.",
"PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision add/subtract FLOPS.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "fp_num_mov_elim_scal_op.optimized",
"EventCode": "0x04",
"BriefDescription": "Number of Scalar Ops optimized.",
"PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of Scalar Ops optimized.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "fp_num_mov_elim_scal_op.opt_potential",
"EventCode": "0x04",
"BriefDescription": "Number of Ops that are candidates for optimization (have Z-bit either set or pass).",
"PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of Ops that are candidates for optimization (have Z-bit either set or pass).",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fp_num_mov_elim_scal_op.sse_mov_ops_elim",
"EventCode": "0x04",
"BriefDescription": "Number of SSE Move Ops eliminated.",
"PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of SSE Move Ops eliminated.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fp_num_mov_elim_scal_op.sse_mov_ops",
"EventCode": "0x04",
"BriefDescription": "Number of SSE Move Ops.",
"PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of SSE Move Ops.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "fp_retired_ser_ops.x87_ctrl_ret",
"EventCode": "0x05",
"BriefDescription": "x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits.",
"PublicDescription": "The number of serializing Ops retired. x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "fp_retired_ser_ops.x87_bot_ret",
"EventCode": "0x05",
"BriefDescription": "x87 bottom-executing uOps retired.",
"PublicDescription": "The number of serializing Ops retired. x87 bottom-executing uOps retired.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fp_retired_ser_ops.sse_ctrl_ret",
"EventCode": "0x05",
"BriefDescription": "SSE control word mispredict traps due to mispredictions in RC, FTZ or DAZ, or changes in mask bits.",
"PublicDescription": "The number of serializing Ops retired. SSE control word mispredict traps due to mispredictions in RC, FTZ or DAZ, or changes in mask bits.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fp_retired_ser_ops.sse_bot_ret",
"EventCode": "0x05",
"BriefDescription": "SSE bottom-executing uOps retired.",
"PublicDescription": "The number of serializing Ops retired. SSE bottom-executing uOps retired.",
- "UMask": "0x1"
+ "UMask": "0x01"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/memory.json b/tools/perf/pmu-events/arch/x86/amdzen1/memory.json
index b33a3c308019..385022fb026e 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen1/memory.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/memory.json
@@ -3,25 +3,25 @@
"EventName": "ls_locks.bus_lock",
"EventCode": "0x25",
"BriefDescription": "Bus lock when a locked operations crosses a cache boundary or is done on an uncacheable memory type.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_dispatch.ld_st_dispatch",
"EventCode": "0x29",
"BriefDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed. Load-op-Stores.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ls_dispatch.store_dispatch",
"EventCode": "0x29",
"BriefDescription": "Counts the number of stores dispatched to the LS unit. Unit Masks ADDed.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_dispatch.ld_dispatch",
"EventCode": "0x29",
"BriefDescription": "Counts the number of loads dispatched to the LS unit. Unit Masks ADDed.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_stlf",
@@ -37,13 +37,13 @@
"EventName": "ls_mab_alloc.dc_prefetcher",
"EventCode": "0x41",
"BriefDescription": "LS MAB allocates by type - DC prefetcher.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_mab_alloc.stores",
"EventCode": "0x41",
"BriefDescription": "LS MAB allocates by type - stores.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_mab_alloc.loads",
@@ -85,61 +85,61 @@
"EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Reload of a page of 1G size.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Reload of a page of 2M size.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_32k_l2_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Reload of a page of 32K size.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Reload of a page of 4K size.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_tablewalker.iside",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks on I-side.",
- "UMask": "0xc"
+ "UMask": "0x0c"
},
{
"EventName": "ls_tablewalker.ic_type1",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks IC Type 1.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_tablewalker.ic_type0",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks IC Type 0.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ls_tablewalker.dside",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks on D-side.",
- "UMask": "0x3"
+ "UMask": "0x03"
},
{
"EventName": "ls_tablewalker.dc_type1",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks DC Type 1.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_tablewalker.dc_type0",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks DC Type 0.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_misal_accesses",
@@ -150,31 +150,31 @@
"EventName": "ls_pref_instr_disp.prefetch_nta",
"EventCode": "0x4b",
"BriefDescription": "Software Prefetch Instructions (PREFETCHNTA instruction) Dispatched.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ls_pref_instr_disp.store_prefetch_w",
"EventCode": "0x4b",
"BriefDescription": "Software Prefetch Instructions (3DNow PREFETCHW instruction) Dispatched.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_pref_instr_disp.load_prefetch_w",
"EventCode": "0x4b",
"BriefDescription": "Software Prefetch Instructions Dispatched. Prefetch, Prefetch_T0_T1_T2.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_inef_sw_pref.mab_mch_cnt",
"EventCode": "0x52",
"BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core. Software PREFETCH instruction saw a match on an already-allocated miss request buffer.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_inef_sw_pref.data_pipe_sw_pf_dc_hit",
"EventCode": "0x52",
"BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core. Software PREFETCH instruction saw a DC hit.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_not_halted_cyc",
diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/other.json b/tools/perf/pmu-events/arch/x86/amdzen1/other.json
index ff780098d36e..7626986ce1fb 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen1/other.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/other.json
@@ -3,13 +3,13 @@
"EventName": "ic_oc_mode_switch.oc_ic_mode_switch",
"EventCode": "0x28a",
"BriefDescription": "OC Mode Switch. OC to IC mode switch.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ic_oc_mode_switch.ic_oc_mode_switch",
"EventCode": "0x28a",
"BriefDescription": "OC Mode Switch. IC to OC mode switch.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "de_dis_dispatch_token_stalls0.retire_token_stall",
@@ -33,24 +33,24 @@
"EventName": "de_dis_dispatch_token_stalls0.alsq3_0_token_stall",
"EventCode": "0xaf",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 3_0 Tokens unavailable.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "de_dis_dispatch_token_stalls0.alsq3_token_stall",
"EventCode": "0xaf",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 3 Tokens unavailable.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "de_dis_dispatch_token_stalls0.alsq2_token_stall",
"EventCode": "0xaf",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 2 Tokens unavailable.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "de_dis_dispatch_token_stalls0.alsq1_token_stall",
"EventCode": "0xaf",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 1 Tokens unavailable.",
- "UMask": "0x1"
+ "UMask": "0x01"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
index 2cfe2d2f3bfd..bf5083c1c260 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
@@ -10,7 +10,7 @@
"EventName": "all_dc_accesses",
"EventCode": "0x29",
"BriefDescription": "All L1 Data Cache Accesses",
- "UMask": "0x7"
+ "UMask": "0x07"
},
{
"MetricName": "all_l2_cache_accesses",
@@ -79,10 +79,10 @@
"UMask": "0x70"
},
{
- "MetricName": "l2_cache_hits_from_l2_hwpf",
+ "EventName": "l2_cache_hits_from_l2_hwpf",
+ "EventCode": "0x70",
"BriefDescription": "L2 Cache Hits from L2 HWPF",
- "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
- "MetricGroup": "l2_cache"
+ "UMask": "0xff"
},
{
"EventName": "l3_accesses",
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/branch.json b/tools/perf/pmu-events/arch/x86/amdzen2/branch.json
index ef4166a66288..84fb43fa59ad 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen2/branch.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/branch.json
@@ -24,25 +24,25 @@
"EventName": "bp_l1_tlb_fetch_hit",
"EventCode": "0x94",
"BriefDescription": "The number of instruction fetches that hit in the L1 ITLB.",
- "UMask": "0xFF"
+ "UMask": "0xff"
},
{
"EventName": "bp_l1_tlb_fetch_hit.if1g",
"EventCode": "0x94",
"BriefDescription": "The number of instruction fetches that hit in the L1 ITLB. Instruction fetches to a 1GB page.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "bp_l1_tlb_fetch_hit.if2m",
"EventCode": "0x94",
"BriefDescription": "The number of instruction fetches that hit in the L1 ITLB. Instruction fetches to a 2MB page.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "bp_l1_tlb_fetch_hit.if4k",
"EventCode": "0x94",
"BriefDescription": "The number of instruction fetches that hit in the L1 ITLB. Instruction fetches to a 4KB page.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "bp_tlb_rel",
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/cache.json b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
index f61b982f83ca..c858fb9477e3 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
@@ -27,25 +27,25 @@
"EventName": "l2_request_g1.change_to_x",
"EventCode": "0x60",
"BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache state change requests. Request change to writable, check L2 for current state.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "l2_request_g1.prefetch_l2_cmd",
"EventCode": "0x60",
"BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). PrefetchL2Cmd.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "l2_request_g1.l2_hw_pf",
"EventCode": "0x60",
"BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). L2 Prefetcher. All prefetches accepted by L2 pipeline, hit or miss. Types of PF and L2 hit/miss broken out in a separate perfmon event.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "l2_request_g1.group2",
"EventCode": "0x60",
"BriefDescription": "Miscellaneous events covered in more detail by l2_request_g2 (PMCx061).",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l2_request_g1.all_no_prefetch",
@@ -80,31 +80,31 @@
"EventName": "l2_request_g2.ic_rd_sized_nc",
"EventCode": "0x61",
"BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Instruction cache read sized non-cacheable.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "l2_request_g2.smc_inval",
"EventCode": "0x61",
"BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Self-modifying code invalidates.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "l2_request_g2.bus_locks_originator",
"EventCode": "0x61",
"BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Bus locks.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "l2_request_g2.bus_locks_responses",
"EventCode": "0x61",
"BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Bus lock response.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l2_latency.l2_cycles_waiting_on_fills",
"EventCode": "0x62",
"BriefDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l2_wcb_req.wcb_write",
@@ -122,13 +122,13 @@
"EventName": "l2_wcb_req.zero_byte_store",
"EventCode": "0x63",
"BriefDescription": "LS to L2 WCB zero byte store requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) zero byte store requests.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "l2_wcb_req.cl_zero",
"EventCode": "0x63",
"BriefDescription": "LS to L2 WCB cache line zeroing requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) cache line zeroing requests.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l2_cache_req_stat.ls_rd_blk_cs",
@@ -158,37 +158,37 @@
"EventName": "l2_cache_req_stat.ls_rd_blk_c",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache request miss in L2 (all types).",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "l2_cache_req_stat.ic_fill_hit_x",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache hit modifiable line in L2.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "l2_cache_req_stat.ic_fill_hit_s",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache hit clean line in L2.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "l2_cache_req_stat.ic_fill_miss",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request miss in L2.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l2_cache_req_stat.ic_access_in_l2",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache requests in L2.",
- "UMask": "0x7"
+ "UMask": "0x07"
},
{
"EventName": "l2_cache_req_stat.ic_dc_miss_in_l2",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request miss in L2 and Data cache request miss in L2 (all types).",
- "UMask": "0x9"
+ "UMask": "0x09"
},
{
"EventName": "l2_cache_req_stat.ic_dc_hit_in_l2",
@@ -200,12 +200,12 @@
"EventName": "l2_fill_pending.l2_fill_busy",
"EventCode": "0x6d",
"BriefDescription": "Cycles with fill pending from L2. Total cycles spent with one or more fill requests in flight from L2.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l2_pf_hit_l2",
"EventCode": "0x70",
- "BriefDescription": "L2 prefetch hit in L2.",
+ "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
"UMask": "0xff"
},
{
@@ -255,19 +255,19 @@
"EventName": "bp_l1_tlb_miss_l2_tlb_miss.if1g",
"EventCode": "0x85",
"BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs. Instruction fetches to a 1GB page.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "bp_l1_tlb_miss_l2_tlb_miss.if2m",
"EventCode": "0x85",
"BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs. Instruction fetches to a 2MB page.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "bp_l1_tlb_miss_l2_tlb_miss.if4k",
"EventCode": "0x85",
"BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs. Instruction fetches to a 4KB page.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "bp_snp_re_sync",
@@ -278,43 +278,43 @@
"EventName": "ic_fetch_stall.ic_stall_any",
"EventCode": "0x87",
"BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ic_fetch_stall.ic_stall_dq_empty",
"EventCode": "0x87",
"BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ic_fetch_stall.ic_stall_back_pressure",
"EventCode": "0x87",
"BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ic_cache_inval.l2_invalidating_probe",
"EventCode": "0x8c",
"BriefDescription": "IC line invalidated due to L2 invalidating probe (external or LS). The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ic_cache_inval.fill_invalidated",
"EventCode": "0x8c",
"BriefDescription": "IC line invalidated due to overwriting fill response. The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ic_oc_mode_switch.oc_ic_mode_switch",
"EventCode": "0x28a",
"BriefDescription": "OC Mode Switch. OC to IC mode switch.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ic_oc_mode_switch.ic_oc_mode_switch",
"EventCode": "0x28a",
"BriefDescription": "OC Mode Switch. IC to OC mode switch.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l3_request_g1.caching_l3_cache_accesses",
@@ -353,7 +353,7 @@
},
{
"EventName": "xi_ccx_sdp_req1.all_l3_miss_req_typs",
- "EventCode": "0x9A",
+ "EventCode": "0x9a",
"BriefDescription": "All L3 Miss Request Types. Ignores SliceMask and ThreadMask.",
"UMask": "0x3f",
"Unit": "L3PMC"
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/core.json b/tools/perf/pmu-events/arch/x86/amdzen2/core.json
index 4b75183da94a..bed14829f0bc 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen2/core.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/core.json
@@ -68,21 +68,21 @@
"EventCode": "0xcb",
"BriefDescription": "SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
"PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ex_ret_mmx_fp_instr.mmx_instr",
"EventCode": "0xcb",
"BriefDescription": "MMX instructions.",
"PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. MMX instructions.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ex_ret_mmx_fp_instr.x87_instr",
"EventCode": "0xcb",
"BriefDescription": "x87 instructions.",
"PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. x87 instructions.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ex_ret_cond",
@@ -108,19 +108,19 @@
"EventName": "ex_tagged_ibs_ops.ibs_count_rollover",
"EventCode": "0x1cf",
"BriefDescription": "Tagged IBS Ops. Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ex_tagged_ibs_ops.ibs_tagged_ops_ret",
"EventCode": "0x1cf",
"BriefDescription": "Tagged IBS Ops. Number of Ops tagged by IBS that retired.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ex_tagged_ibs_ops.ibs_tagged_ops",
"EventCode": "0x1cf",
"BriefDescription": "Tagged IBS Ops. Number of Ops tagged by IBS.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ex_ret_fus_brnch_inst",
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/floating-point.json b/tools/perf/pmu-events/arch/x86/amdzen2/floating-point.json
index 622a0c420e46..91ed96f2580b 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen2/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/floating-point.json
@@ -4,35 +4,35 @@
"EventCode": "0x00",
"BriefDescription": "Total number of fp uOps.",
"PublicDescription": "Total number of fp uOps. The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS.",
- "UMask": "0xf"
+ "UMask": "0x0f"
},
{
"EventName": "fpu_pipe_assignment.total3",
"EventCode": "0x00",
"BriefDescription": "Total number uOps assigned to pipe 3.",
"PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one-cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 3.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "fpu_pipe_assignment.total2",
"EventCode": "0x00",
"BriefDescription": "Total number uOps assigned to pipe 2.",
"PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 2.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fpu_pipe_assignment.total1",
"EventCode": "0x00",
"BriefDescription": "Total number uOps assigned to pipe 1.",
"PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 1.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fpu_pipe_assignment.total0",
"EventCode": "0x00",
"BriefDescription": "Total number of fp uOps on pipe 0.",
"PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 0.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "fp_ret_sse_avx_ops.all",
@@ -45,96 +45,96 @@
"EventCode": "0x03",
"BriefDescription": "Multiply-add FLOPS. Multiply-add counts as 2 FLOPS. This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
"PublicDescription": "",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "fp_ret_sse_avx_ops.div_flops",
"EventCode": "0x03",
"BriefDescription": "Divide/square root FLOPS. This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fp_ret_sse_avx_ops.mult_flops",
"EventCode": "0x03",
"BriefDescription": "Multiply FLOPS. This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fp_ret_sse_avx_ops.add_sub_flops",
"EventCode": "0x03",
"BriefDescription": "Add/subtract FLOPS. This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "fp_num_mov_elim_scal_op.optimized",
"EventCode": "0x04",
"BriefDescription": "Number of Scalar Ops optimized. This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "fp_num_mov_elim_scal_op.opt_potential",
"EventCode": "0x04",
"BriefDescription": "Number of Ops that are candidates for optimization (have Z-bit either set or pass). This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fp_num_mov_elim_scal_op.sse_mov_ops_elim",
"EventCode": "0x04",
"BriefDescription": "Number of SSE Move Ops eliminated. This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fp_num_mov_elim_scal_op.sse_mov_ops",
"EventCode": "0x04",
"BriefDescription": "Number of SSE Move Ops. This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "fp_retired_ser_ops.sse_bot_ret",
"EventCode": "0x05",
"BriefDescription": "SSE bottom-executing uOps retired. The number of serializing Ops retired.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "fp_retired_ser_ops.sse_ctrl_ret",
"EventCode": "0x05",
"BriefDescription": "The number of serializing Ops retired. SSE control word mispredict traps due to mispredictions in RC, FTZ or DAZ, or changes in mask bits.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fp_retired_ser_ops.x87_bot_ret",
"EventCode": "0x05",
"BriefDescription": "x87 bottom-executing uOps retired. The number of serializing Ops retired.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fp_retired_ser_ops.x87_ctrl_ret",
"EventCode": "0x05",
"BriefDescription": "x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits. The number of serializing Ops retired.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "fp_disp_faults.ymm_spill_fault",
"EventCode": "0x0e",
"BriefDescription": "Floating Point Dispatch Faults. YMM spill fault.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "fp_disp_faults.ymm_fill_fault",
"EventCode": "0x0e",
"BriefDescription": "Floating Point Dispatch Faults. YMM fill fault.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fp_disp_faults.xmm_fill_fault",
"EventCode": "0x0e",
"BriefDescription": "Floating Point Dispatch Faults. XMM fill fault.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fp_disp_faults.x87_fill_fault",
"EventCode": "0x0e",
"BriefDescription": "Floating Point Dispatch Faults. x87 fill fault.",
- "UMask": "0x1"
+ "UMask": "0x01"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/memory.json b/tools/perf/pmu-events/arch/x86/amdzen2/memory.json
index 715046b339cb..89822b9ddb79 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen2/memory.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/memory.json
@@ -4,31 +4,31 @@
"EventCode": "0x24",
"BriefDescription": "Non-forwardable conflict; used to reduce STLI's via software. All reasons. Store To Load Interlock (STLI) are loads that were unable to complete because of a possible match with an older store, and the older store could not do STLF for some reason.",
"PublicDescription" : "Store-to-load conflicts: A load was unable to complete due to a non-forwardable conflict with an older store. Most commonly, a load's address range partially but not completely overlaps with an uncompleted older store. Software can avoid this problem by using same-size and same-alignment loads and stores when accessing the same data. Vector/SIMD code is particularly susceptible to this problem; software should construct wide vector stores by manipulating vector elements in registers using shuffle/blend/swap instructions prior to storing to memory, instead of using narrow element-by-element stores.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_locks.spec_lock_hi_spec",
"EventCode": "0x25",
"BriefDescription": "Retired lock instructions. High speculative cacheable lock speculation succeeded.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_locks.spec_lock_lo_spec",
"EventCode": "0x25",
"BriefDescription": "Retired lock instructions. Low speculative cacheable lock speculation succeeded.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ls_locks.non_spec_lock",
"EventCode": "0x25",
"BriefDescription": "Retired lock instructions. Non-speculative lock succeeded.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_locks.bus_lock",
"EventCode": "0x25",
"BriefDescription": "Retired lock instructions. Bus lock when a locked operations crosses a cache boundary or is done on an uncacheable memory type. Comparable to legacy bus lock.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_ret_cl_flush",
@@ -44,33 +44,33 @@
"EventName": "ls_dispatch.ld_st_dispatch",
"EventCode": "0x29",
"BriefDescription": "Dispatch of a single op that performs a load from and store to the same memory address. Number of single ops that do load/store to an address.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ls_dispatch.store_dispatch",
"EventCode": "0x29",
"BriefDescription": "Number of stores dispatched. Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_dispatch.ld_dispatch",
"EventCode": "0x29",
"BriefDescription": "Number of loads dispatched. Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_smi_rx",
- "EventCode": "0x2B",
+ "EventCode": "0x2b",
"BriefDescription": "Number of SMIs received."
},
{
"EventName": "ls_int_taken",
- "EventCode": "0x2C",
+ "EventCode": "0x2c",
"BriefDescription": "Number of interrupts taken."
},
{
"EventName": "ls_rdtsc",
- "EventCode": "0x2D",
+ "EventCode": "0x2d",
"BriefDescription": "Number of reads of the TSC (RDTSC instructions). The count is speculative."
},
{
@@ -93,19 +93,19 @@
"EventName": "ls_mab_alloc.dc_prefetcher",
"EventCode": "0x41",
"BriefDescription": "LS MAB Allocates by Type. DC prefetcher.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_mab_alloc.stores",
"EventCode": "0x41",
"BriefDescription": "LS MAB Allocates by Type. Stores.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_mab_alloc.loads",
"EventCode": "0x41",
"BriefDescription": "LS MAB Allocates by Type. Loads.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_refills_from_sys.ls_mabresp_rmt_dram",
@@ -123,19 +123,19 @@
"EventName": "ls_refills_from_sys.ls_mabresp_lcl_dram",
"EventCode": "0x43",
"BriefDescription": "Demand Data Cache Fills by Data Source. DRAM or IO from this thread's die.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_refills_from_sys.ls_mabresp_lcl_cache",
"EventCode": "0x43",
"BriefDescription": "Demand Data Cache Fills by Data Source. Hit in cache; local CCX (not Local L2), or Remote CCX and the address's Home Node is on this thread's die.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_refills_from_sys.ls_mabresp_lcl_l2",
"EventCode": "0x43",
"BriefDescription": "Demand Data Cache Fills by Data Source. Local L2 hit.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_l1_d_tlb_miss.all",
@@ -171,61 +171,61 @@
"EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Miss. DTLB reload to a 1G page that hit in the L2 TLB.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Miss. DTLB reload to a 2M page that hit in the L2 TLB.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_coalesced_page_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Miss. DTLB reload hit a coalesced page.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Miss. DTLB reload to a 4K page that hit in the L2 TLB.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_tablewalker.iside",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks on I-side.",
- "UMask": "0xc"
+ "UMask": "0x0c"
},
{
"EventName": "ls_tablewalker.ic_type1",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks IC Type 1.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_tablewalker.ic_type0",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks IC Type 0.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ls_tablewalker.dside",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks on D-side.",
- "UMask": "0x3"
+ "UMask": "0x03"
},
{
"EventName": "ls_tablewalker.dc_type1",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks DC Type 1.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_tablewalker.dc_type0",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks DC Type 0.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_misal_accesses",
@@ -242,31 +242,31 @@
"EventName": "ls_pref_instr_disp.prefetch_nta",
"EventCode": "0x4b",
"BriefDescription": "Software Prefetch Instructions Dispatched (Speculative). PrefetchNTA instruction. See docAPM3 PREFETCHlevel.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ls_pref_instr_disp.prefetch_w",
"EventCode": "0x4b",
"BriefDescription": "Software Prefetch Instructions Dispatched (Speculative). See docAPM3 PREFETCHW.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_pref_instr_disp.prefetch",
"EventCode": "0x4b",
"BriefDescription": "Software Prefetch Instructions Dispatched (Speculative). Prefetch_T0_T1_T2. PrefetchT0, T1 and T2 instructions. See docAPM3 PREFETCHlevel.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_inef_sw_pref.mab_mch_cnt",
"EventCode": "0x52",
"BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core. Software PREFETCH instruction saw a match on an already-allocated miss request buffer.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_inef_sw_pref.data_pipe_sw_pf_dc_hit",
"EventCode": "0x52",
"BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core. Software PREFETCH instruction saw a DC hit.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_sw_pf_dc_fill.ls_mabresp_rmt_dram",
@@ -284,49 +284,49 @@
"EventName": "ls_sw_pf_dc_fill.ls_mabresp_lcl_dram",
"EventCode": "0x59",
"BriefDescription": "Software Prefetch Data Cache Fills by Data Source. DRAM or IO from this thread's die. From DRAM (home node local).",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_sw_pf_dc_fill.ls_mabresp_lcl_cache",
"EventCode": "0x59",
"BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From another cache (home node local).",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_sw_pf_dc_fill.ls_mabresp_lcl_l2",
"EventCode": "0x59",
"BriefDescription": "Software Prefetch Data Cache Fills by Data Source. Local L2 hit.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_hw_pf_dc_fill.ls_mabresp_rmt_dram",
- "EventCode": "0x5A",
+ "EventCode": "0x5a",
"BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From DRAM (home node remote).",
"UMask": "0x40"
},
{
"EventName": "ls_hw_pf_dc_fill.ls_mabresp_rmt_cache",
- "EventCode": "0x5A",
+ "EventCode": "0x5a",
"BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From another cache (home node remote).",
"UMask": "0x10"
},
{
"EventName": "ls_hw_pf_dc_fill.ls_mabresp_lcl_dram",
- "EventCode": "0x5A",
+ "EventCode": "0x5a",
"BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From DRAM (home node local).",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_hw_pf_dc_fill.ls_mabresp_lcl_cache",
- "EventCode": "0x5A",
+ "EventCode": "0x5a",
"BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From another cache (home node local).",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_hw_pf_dc_fill.ls_mabresp_lcl_l2",
- "EventCode": "0x5A",
+ "EventCode": "0x5a",
"BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. Local L2 hit.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_not_halted_cyc",
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/other.json b/tools/perf/pmu-events/arch/x86/amdzen2/other.json
index e94994d4a60e..1bdf106ca785 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen2/other.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/other.json
@@ -14,13 +14,13 @@
"EventName": "de_dis_uops_from_decoder.opcache_dispatched",
"EventCode": "0xaa",
"BriefDescription": "Count of dispatched Ops from OpCache.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "de_dis_uops_from_decoder.decoder_dispatched",
"EventCode": "0xaa",
"BriefDescription": "Count of dispatched Ops from Decoder.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "de_dis_dispatch_token_stalls1.fp_misc_rsrc_stall",
@@ -50,25 +50,25 @@
"EventName": "de_dis_dispatch_token_stalls1.int_sched_misc_token_stall",
"EventCode": "0xae",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Integer Scheduler miscellaneous resource stall.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "de_dis_dispatch_token_stalls1.store_queue_token_stall",
"EventCode": "0xae",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Store queue resource stall. Applies to all ops with store semantics.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "de_dis_dispatch_token_stalls1.load_queue_token_stall",
"EventCode": "0xae",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Load queue resource stall. Applies to all ops with load semantics.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "de_dis_dispatch_token_stalls1.int_phy_reg_file_token_stall",
"EventCode": "0xae",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Integer Physical Register File resource stall. Applies to all ops that have an integer destination register.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "de_dis_dispatch_token_stalls0.sc_agu_dispatch_stall",
@@ -92,24 +92,24 @@
"EventName": "de_dis_dispatch_token_stalls0.alu_token_stall",
"EventCode": "0xaf",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALU tokens total unavailable.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "de_dis_dispatch_token_stalls0.alsq3_0_token_stall",
"EventCode": "0xaf",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ3_0_TokenStall.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "de_dis_dispatch_token_stalls0.alsq2_token_stall",
"EventCode": "0xaf",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 2 Tokens unavailable.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "de_dis_dispatch_token_stalls0.alsq1_token_stall",
"EventCode": "0xaf",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 1 Tokens unavailable.",
- "UMask": "0x1"
+ "UMask": "0x01"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
index 2ef91e25e661..a71694a043ba 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
@@ -10,7 +10,7 @@
"EventName": "all_dc_accesses",
"EventCode": "0x29",
"BriefDescription": "All L1 Data Cache Accesses",
- "UMask": "0x7"
+ "UMask": "0x07"
},
{
"MetricName": "all_l2_cache_accesses",
@@ -79,10 +79,10 @@
"UMask": "0x70"
},
{
- "MetricName": "l2_cache_hits_from_l2_hwpf",
+ "EventName": "l2_cache_hits_from_l2_hwpf",
+ "EventCode": "0x70",
"BriefDescription": "L2 Cache Hits from L2 HWPF",
- "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
- "MetricGroup": "l2_cache"
+ "UMask": "0xff"
},
{
"EventName": "l3_accesses",
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/branch.json b/tools/perf/pmu-events/arch/x86/amdzen3/branch.json
new file mode 100644
index 000000000000..018a7fe94fb9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/branch.json
@@ -0,0 +1,53 @@
+[
+ {
+ "EventName": "bp_l1_btb_correct",
+ "EventCode": "0x8a",
+ "BriefDescription": "L1 Branch Prediction Overrides Existing Prediction (speculative)."
+ },
+ {
+ "EventName": "bp_l2_btb_correct",
+ "EventCode": "0x8b",
+ "BriefDescription": "L2 Branch Prediction Overrides Existing Prediction (speculative)."
+ },
+ {
+ "EventName": "bp_dyn_ind_pred",
+ "EventCode": "0x8e",
+ "BriefDescription": "Dynamic Indirect Predictions.",
+ "PublicDescription": "The number of times a branch used the indirect predictor to make a prediction."
+ },
+ {
+ "EventName": "bp_de_redirect",
+ "EventCode": "0x91",
+ "BriefDescription": "Decode Redirects",
+ "PublicDescription": "The number of times the instruction decoder overrides the predicted target."
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit",
+ "EventCode": "0x94",
+ "BriefDescription": "The number of instruction fetches that hit in the L1 ITLB.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit.if1g",
+ "EventCode": "0x94",
+ "BriefDescription": "The number of instruction fetches that hit in the L1 ITLB. L1 Instruction TLB hit (1G page size).",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit.if2m",
+ "EventCode": "0x94",
+ "BriefDescription": "The number of instruction fetches that hit in the L1 ITLB. L1 Instruction TLB hit (2M page size).",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit.if4k",
+ "EventCode": "0x94",
+ "BriefDescription": "The number of instruction fetches that hit in the L1 ITLB. L1 Instrcution TLB hit (4K or 16K page size).",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "bp_tlb_rel",
+ "EventCode": "0x99",
+ "BriefDescription": "The number of ITLB reload requests."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/cache.json b/tools/perf/pmu-events/arch/x86/amdzen3/cache.json
new file mode 100644
index 000000000000..fa1d7499a2e3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/cache.json
@@ -0,0 +1,402 @@
+[
+ {
+ "EventName": "l2_request_g1.rd_blk_l",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache reads (including hardware and software prefetch).",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_request_g1.rd_blk_x",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache stores.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_request_g1.ls_rd_blk_c_s",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache shared reads.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_request_g1.cacheable_ic_read",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Instruction cache reads.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_request_g1.change_to_x",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache state change requests. Request change to writable, check L2 for current state.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "l2_request_g1.prefetch_l2_cmd",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). PrefetchL2Cmd.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "l2_request_g1.l2_hw_pf",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). L2 Prefetcher. All prefetches accepted by L2 pipeline, hit or miss. Types of PF and L2 hit/miss broken out in a separate perfmon event.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "l2_request_g1.group2",
+ "EventCode": "0x60",
+ "BriefDescription": "Miscellaneous events covered in more detail by l2_request_g2 (PMCx061).",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_request_g1.all_no_prefetch",
+ "EventCode": "0x60",
+ "UMask": "0xf9"
+ },
+ {
+ "EventName": "l2_request_g2.group1",
+ "EventCode": "0x61",
+ "BriefDescription": "Miscellaneous events covered in more detail by l2_request_g1 (PMCx060).",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_request_g2.ls_rd_sized",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Data cache read sized.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_request_g2.ls_rd_sized_nc",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Data cache read sized non-cacheable.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_request_g2.ic_rd_sized",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Instruction cache read sized.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_request_g2.ic_rd_sized_nc",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Instruction cache read sized non-cacheable.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "l2_request_g2.smc_inval",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Self-modifying code invalidates.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "l2_request_g2.bus_locks_originator",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Bus locks.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "l2_request_g2.bus_locks_responses",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Bus lock response.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_latency.l2_cycles_waiting_on_fills",
+ "EventCode": "0x62",
+ "BriefDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_wcb_req.wcb_write",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB write requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) write requests.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_wcb_req.wcb_close",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB close requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) close requests.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_wcb_req.zero_byte_store",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB zero byte store requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) zero byte store requests.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "l2_wcb_req.cl_zero",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB cache line zeroing requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) cache line zeroing requests.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_cs",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache shared read hit in L2",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_x",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache read hit in L2. Modifiable.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_s",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache read hit non-modifiable line in L2.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_x",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache store or state change hit in L2.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_c",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache request miss in L2 (all types). Use l2_cache_misses_from_dc_misses instead.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_hit_x",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache hit modifiable line in L2.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_hit_s",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache hit non-modifiable line in L2.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_miss",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request miss in L2. Use l2_cache_misses_from_ic_miss instead.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_access_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache requests in L2.",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_dc_miss_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request miss in L2 and Data cache request miss in L2 (all types).",
+ "UMask": "0x09"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_dc_hit_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request hit in L2 and Data cache request hit in L2 (all types).",
+ "UMask": "0xf6"
+ },
+ {
+ "EventName": "l2_fill_pending.l2_fill_busy",
+ "EventCode": "0x6d",
+ "BriefDescription": "Cycles with fill pending from L2. Total cycles spent with one or more fill requests in flight from L2.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_pf_hit_l2",
+ "EventCode": "0x70",
+ "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_hit_l3",
+ "EventCode": "0x71",
+ "BriefDescription": "L2 prefetcher hits in L3. Counts all L2 prefetches accepted by the L2 pipeline which miss the L2 cache and hit the L3.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_l3",
+ "EventCode": "0x72",
+ "BriefDescription": "L2 prefetcher misses in L3. Counts all L2 prefetches accepted by the L2 pipeline which miss the L2 and the L3 caches.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "ic_fw32",
+ "EventCode": "0x80",
+ "BriefDescription": "The number of 32B fetch windows transferred from IC pipe to DE instruction decoder (includes non-cacheable and cacheable fill responses)."
+ },
+ {
+ "EventName": "ic_fw32_miss",
+ "EventCode": "0x81",
+ "BriefDescription": "The number of 32B fetch windows tried to read the L1 IC and missed in the full tag."
+ },
+ {
+ "EventName": "ic_cache_fill_l2",
+ "EventCode": "0x82",
+ "BriefDescription": "Instruction Cache Refills from L2. The number of 64 byte instruction cache line was fulfilled from the L2 cache."
+ },
+ {
+ "EventName": "ic_cache_fill_sys",
+ "EventCode": "0x83",
+ "BriefDescription": "Instruction Cache Refills from System. The number of 64 byte instruction cache line fulfilled from system memory or another cache."
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_hit",
+ "EventCode": "0x84",
+ "BriefDescription": "L1 ITLB Miss, L2 ITLB Hit. The number of instruction fetches that miss in the L1 ITLB but hit in the L2 ITLB."
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.coalesced_4k",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of valid fills into the ITLB originating from the LS Page-Table Walker. Tablewalk requests are issued for L1-ITLB and L2-ITLB misses. Walk for >4K Coalesced page.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if1g",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of valid fills into the ITLB originating from the LS Page-Table Walker. Tablewalk requests are issued for L1-ITLB and L2-ITLB misses. Walk for 1G page.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if2m",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of valid fills into the ITLB originating from the LS Page-Table Walker. Tablewalk requests are issued for L1-ITLB and L2-ITLB misses. Walk for 2M page.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if4k",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of valid fills into the ITLB originating from the LS Page-Table Walker. Tablewalk requests are issued for L1-ITLB and L2-ITLB misses. Walk to 4K page.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "bp_snp_re_sync",
+ "EventCode": "0x86",
+ "BriefDescription": "The number of pipeline restarts caused by invalidating probes that hit on the instruction stream currently being executed. This would happen if the active instruction stream was being modified by another processor in an MP system - typically a highly unlikely event."
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_any",
+ "EventCode": "0x87",
+ "BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_dq_empty",
+ "EventCode": "0x87",
+ "BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_back_pressure",
+ "EventCode": "0x87",
+ "BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ic_cache_inval.l2_invalidating_probe",
+ "EventCode": "0x8c",
+ "BriefDescription": "IC line invalidated due to L2 invalidating probe (external or LS). The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ic_cache_inval.fill_invalidated",
+ "EventCode": "0x8c",
+ "BriefDescription": "IC line invalidated due to overwriting fill response. The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ic_tag_hit_miss.all_instruction_cache_accesses",
+ "EventCode": "0x18e",
+ "BriefDescription": "All Instruction Cache Accesses. Counts various IC tag related hit and miss events.",
+ "UMask": "0x1f"
+ },
+ {
+ "EventName": "ic_tag_hit_miss.instruction_cache_miss",
+ "EventCode": "0x18e",
+ "BriefDescription": "Instruction Cache Miss. Counts various IC tag related hit and miss events.",
+ "UMask": "0x18"
+ },
+ {
+ "EventName": "ic_tag_hit_miss.instruction_cache_hit",
+ "EventCode": "0x18e",
+ "BriefDescription": "Instruction Cache Hit. Counts various IC tag related hit and miss events.",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "ic_oc_mode_switch.oc_ic_mode_switch",
+ "EventCode": "0x28a",
+ "BriefDescription": "OC Mode Switch. OC to IC mode switch.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ic_oc_mode_switch.ic_oc_mode_switch",
+ "EventCode": "0x28a",
+ "BriefDescription": "OC Mode Switch. IC to OC mode switch.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "op_cache_hit_miss.all_op_cache_accesses",
+ "EventCode": "0x28f",
+ "BriefDescription": "All Op Cache accesses. Counts Op Cache micro-tag hit/miss events",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "op_cache_hit_miss.op_cache_miss",
+ "EventCode": "0x28f",
+ "BriefDescription": "Op Cache Miss. Counts Op Cache micro-tag hit/miss events",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "op_cache_hit_miss.op_cache_hit",
+ "EventCode": "0x28f",
+ "BriefDescription": "Op Cache Hit. Counts Op Cache micro-tag hit/miss events",
+ "UMask": "0x03"
+ },
+ {
+ "EventName": "l3_request_g1.caching_l3_cache_accesses",
+ "EventCode": "0x01",
+ "BriefDescription": "Caching: L3 cache accesses",
+ "UMask": "0x80",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_lookup_state.all_l3_req_typs",
+ "EventCode": "0x04",
+ "BriefDescription": "All L3 Request Types. All L3 cache Requests",
+ "UMask": "0xff",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_comb_clstr_state.other_l3_miss_typs",
+ "EventCode": "0x06",
+ "BriefDescription": "Other L3 Miss Request Types",
+ "UMask": "0xfe",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_comb_clstr_state.request_miss",
+ "EventCode": "0x06",
+ "BriefDescription": "L3 cache misses",
+ "UMask": "0x01",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "xi_sys_fill_latency",
+ "EventCode": "0x90",
+ "BriefDescription": "L3 Cache Miss Latency. Total cycles for all transactions divided by 16. Ignores SliceMask and ThreadMask.",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "xi_ccx_sdp_req1",
+ "EventCode": "0x9a",
+ "BriefDescription": "L3 Misses by Request Type. Ignores SliceID, EnAllSlices, CoreID, EnAllCores and ThreadMask. Requires unit mask 0xFF to engage event for counting.",
+ "UMask": "0xff",
+ "Unit": "L3PMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/core.json b/tools/perf/pmu-events/arch/x86/amdzen3/core.json
new file mode 100644
index 000000000000..4e27a2be359e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/core.json
@@ -0,0 +1,137 @@
+[
+ {
+ "EventName": "ex_ret_instr",
+ "EventCode": "0xc0",
+ "BriefDescription": "Retired Instructions."
+ },
+ {
+ "EventName": "ex_ret_ops",
+ "EventCode": "0xc1",
+ "BriefDescription": "Retired Ops. Use macro_ops_retired instead.",
+ "PublicDescription": "The number of macro-ops retired."
+ },
+ {
+ "EventName": "ex_ret_brn",
+ "EventCode": "0xc2",
+ "BriefDescription": "Retired Branch Instructions.",
+ "PublicDescription": "The number of branch instructions retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
+ },
+ {
+ "EventName": "ex_ret_brn_misp",
+ "EventCode": "0xc3",
+ "BriefDescription": "Retired Branch Instructions Mispredicted.",
+ "PublicDescription": "The number of retired branch instructions, that were mispredicted."
+ },
+ {
+ "EventName": "ex_ret_brn_tkn",
+ "EventCode": "0xc4",
+ "BriefDescription": "Retired Taken Branch Instructions.",
+ "PublicDescription": "The number of taken branches that were retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
+ },
+ {
+ "EventName": "ex_ret_brn_tkn_misp",
+ "EventCode": "0xc5",
+ "BriefDescription": "Retired Taken Branch Instructions Mispredicted.",
+ "PublicDescription": "The number of retired taken branch instructions that were mispredicted."
+ },
+ {
+ "EventName": "ex_ret_brn_far",
+ "EventCode": "0xc6",
+ "BriefDescription": "Retired Far Control Transfers.",
+ "PublicDescription": "The number of far control transfers retired including far call/jump/return, IRET, SYSCALL and SYSRET, plus exceptions and interrupts. Far control transfers are not subject to branch prediction."
+ },
+ {
+ "EventName": "ex_ret_brn_resync",
+ "EventCode": "0xc7",
+ "BriefDescription": "Retired Branch Resyncs.",
+ "PublicDescription": "The number of resync branches. These reflect pipeline restarts due to certain microcode assists and events such as writes to the active instruction stream, among other things. Each occurrence reflects a restart penalty similar to a branch mispredict. This is relatively rare."
+ },
+ {
+ "EventName": "ex_ret_near_ret",
+ "EventCode": "0xc8",
+ "BriefDescription": "Retired Near Returns.",
+ "PublicDescription": "The number of near return instructions (RET or RET Iw) retired."
+ },
+ {
+ "EventName": "ex_ret_near_ret_mispred",
+ "EventCode": "0xc9",
+ "BriefDescription": "Retired Near Returns Mispredicted.",
+ "PublicDescription": "The number of near returns retired that were not correctly predicted by the return address predictor. Each such mispredict incurs the same penalty as a mispredicted conditional branch instruction."
+ },
+ {
+ "EventName": "ex_ret_brn_ind_misp",
+ "EventCode": "0xca",
+ "BriefDescription": "Retired Indirect Branch Instructions Mispredicted.",
+ "PublicDescription": "The number of indirect branches retired that were not correctly predicted. Each such mispredict incurs the same penalty as a mispredicted conditional branch instruction. Note that only EX mispredicts are counted."
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.sse_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.mmx_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "MMX instructions.",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. MMX instructions.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.x87_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "x87 instructions.",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. x87 instructions.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ex_ret_ind_brch_instr",
+ "EventCode": "0xcc",
+ "BriefDescription": "Retired Indirect Branch Instructions. The number of indirect branches retired."
+ },
+ {
+ "EventName": "ex_ret_cond",
+ "EventCode": "0xd1",
+ "BriefDescription": "Retired Conditional Branch Instructions."
+ },
+ {
+ "EventName": "ex_div_busy",
+ "EventCode": "0xd3",
+ "BriefDescription": "Div Cycles Busy count."
+ },
+ {
+ "EventName": "ex_div_count",
+ "EventCode": "0xd4",
+ "BriefDescription": "Div Op Count."
+ },
+ {
+ "EventName": "ex_ret_msprd_brnch_instr_dir_msmtch",
+ "EventCode": "0x1c7",
+ "BriefDescription": "Retired Mispredicted Branch Instructions due to Direction Mismatch",
+ "PublicDescription": "The number of retired conditional branch instructions that were not correctly predicted because of a branch direction mismatch."
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_count_rollover",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Tagged IBS Ops. Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops_ret",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Tagged IBS Ops. Number of Ops tagged by IBS that retired.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Tagged IBS Ops. Number of Ops tagged by IBS.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ex_ret_fused_instr",
+ "EventCode": "0x1d0",
+ "BriefDescription": "Counts retired Fused Instructions."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/data-fabric.json b/tools/perf/pmu-events/arch/x86/amdzen3/data-fabric.json
new file mode 100644
index 000000000000..40271df40015
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/data-fabric.json
@@ -0,0 +1,98 @@
+[
+ {
+ "EventName": "remote_outbound_data_controller_0",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 0",
+ "EventCode": "0x7c7",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_outbound_data_controller_1",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 1",
+ "EventCode": "0x807",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_outbound_data_controller_2",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 2",
+ "EventCode": "0x847",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_outbound_data_controller_3",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 3",
+ "EventCode": "0x887",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_0",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x07",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_1",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x47",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_2",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x87",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_3",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0xc7",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_4",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x107",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_5",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x147",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_6",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x187",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_7",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x1c7",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/floating-point.json b/tools/perf/pmu-events/arch/x86/amdzen3/floating-point.json
new file mode 100644
index 000000000000..98cfcb9c78ec
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/floating-point.json
@@ -0,0 +1,139 @@
+[
+ {
+ "EventName": "fpu_pipe_assignment.total",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number of fp uOps.",
+ "PublicDescription": "Total number of fp uOps. The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS.",
+ "UMask": "0x0f"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total3",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number uOps assigned to pipe 3.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one-cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 3.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total2",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number uOps assigned to pipe 2.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 2.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total1",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number uOps assigned to pipe 1.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 1.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total0",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number of fp uOps on pipe 0.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 0.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.all",
+ "EventCode": "0x03",
+ "BriefDescription": "All FLOPS. This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.mac_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Multiply-Accumulate FLOPs. Each MAC operation is counted as 2 FLOPS. This is a retire-based event. The number of retired SSE/AVX FLOPs. The number of events logged per cycle can vary from 0 to 64. This event requires the use of the MergeEvent since it can count above 15 events per cycle. See 2.1.17.3 [Large Increment per Cycle Events]. It does not provide a useful count without the use of the MergeEvent.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.div_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Divide/square root FLOPs. This is a retire-based event. The number of retired SSE/AVX FLOPs. The number of events logged per cycle can vary from 0 to 64. This event requires the use of the MergeEvent since it can count above 15 events per cycle. See 2.1.17.3 [Large Increment per Cycle Events]. It does not provide a useful count without the use of the MergeEvent.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.mult_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Multiply FLOPs. This is a retire-based event. The number of retired SSE/AVX FLOPs. The number of events logged per cycle can vary from 0 to 64. This event requires the use of the MergeEvent since it can count above 15 events per cycle. See 2.1.17.3 [Large Increment per Cycle Events]. It does not provide a useful count without the use of the MergeEvent.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.add_sub_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Add/subtract FLOPs. This is a retire-based event. The number of retired SSE/AVX FLOPs. The number of events logged per cycle can vary from 0 to 64. This event requires the use of the MergeEvent since it can count above 15 events per cycle. See 2.1.17.3 [Large Increment per Cycle Events]. It does not provide a useful count without the use of the MergeEvent.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.optimized",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of Scalar Ops optimized. This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.opt_potential",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of Ops that are candidates for optimization (have Z-bit either set or pass). This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops_elim",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of SSE Move Ops eliminated. This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of SSE Move Ops. This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.sse_bot_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "SSE/AVX bottom-executing ops retired. The number of serializing Ops retired.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.sse_ctrl_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "SSE/AVX control word mispredict traps. The number of serializing Ops retired.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.x87_bot_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "x87 bottom-executing ops retired. The number of serializing Ops retired.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.x87_ctrl_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits. The number of serializing Ops retired.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "fp_disp_faults.ymm_spill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating Point Dispatch Faults. YMM spill fault.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fp_disp_faults.ymm_fill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating Point Dispatch Faults. YMM fill fault.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fp_disp_faults.xmm_fill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating Point Dispatch Faults. XMM fill fault.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fp_disp_faults.x87_fill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating Point Dispatch Faults. x87 fill fault.",
+ "UMask": "0x01"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/memory.json b/tools/perf/pmu-events/arch/x86/amdzen3/memory.json
new file mode 100644
index 000000000000..a2833955dcd2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/memory.json
@@ -0,0 +1,428 @@
+[
+ {
+ "EventName": "ls_bad_status2.stli_other",
+ "EventCode": "0x24",
+ "BriefDescription": "Non-forwardable conflict; used to reduce STLI's via software. All reasons. Store To Load Interlock (STLI) are loads that were unable to complete because of a possible match with an older store, and the older store could not do STLF for some reason.",
+ "PublicDescription" : "Store-to-load conflicts: A load was unable to complete due to a non-forwardable conflict with an older store. Most commonly, a load's address range partially but not completely overlaps with an uncompleted older store. Software can avoid this problem by using same-size and same-alignment loads and stores when accessing the same data. Vector/SIMD code is particularly susceptible to this problem; software should construct wide vector stores by manipulating vector elements in registers using shuffle/blend/swap instructions prior to storing to memory, instead of using narrow element-by-element stores.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_locks.spec_lock_hi_spec",
+ "EventCode": "0x25",
+ "BriefDescription": "Retired lock instructions. High speculative cacheable lock speculation succeeded.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_locks.spec_lock_lo_spec",
+ "EventCode": "0x25",
+ "BriefDescription": "Retired lock instructions. Low speculative cacheable lock speculation succeeded.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_locks.non_spec_lock",
+ "EventCode": "0x25",
+ "BriefDescription": "Retired lock instructions. Non-speculative lock succeeded.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_locks.bus_lock",
+ "EventCode": "0x25",
+ "BriefDescription": "Retired lock instructions. Comparable to legacy bus lock.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_ret_cl_flush",
+ "EventCode": "0x26",
+ "BriefDescription": "The number of retired CLFLUSH instructions. This is a non-speculative event."
+ },
+ {
+ "EventName": "ls_ret_cpuid",
+ "EventCode": "0x27",
+ "BriefDescription": "The number of CPUID instructions retired."
+ },
+ {
+ "EventName": "ls_dispatch.ld_st_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Load-op-Store Dispatch. Dispatch of a single op that performs a load from and store to the same memory address. Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_dispatch.store_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Dispatch of a single op that performs a memory store. Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_dispatch.ld_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Dispatch of a single op that performs a memory load. Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_smi_rx",
+ "EventCode": "0x2b",
+ "BriefDescription": "Counts the number of SMIs received."
+ },
+ {
+ "EventName": "ls_int_taken",
+ "EventCode": "0x2c",
+ "BriefDescription": "Counts the number of interrupts taken."
+ },
+ {
+ "EventName": "ls_rdtsc",
+ "EventCode": "0x2d",
+ "BriefDescription": "Number of reads of the TSC (RDTSC instructions). The count is speculative."
+ },
+ {
+ "EventName": "ls_stlf",
+ "EventCode": "0x35",
+ "BriefDescription": "Number of STLF hits."
+ },
+ {
+ "EventName": "ls_st_commit_cancel2.st_commit_cancel_wcb_full",
+ "EventCode": "0x37",
+ "BriefDescription": "A non-cacheable store and the non-cacheable commit buffer is full.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_dc_accesses",
+ "EventCode": "0x40",
+ "BriefDescription": "Number of accesses to the dcache for load/store references.",
+ "PublicDescription": "The number of accesses to the data cache for load and store references. This may include certain microcode scratchpad accesses, although these are generally rare. Each increment represents an eight-byte access, although the instruction may only be accessing a portion of that. This event is a speculative event."
+ },
+ {
+ "EventName": "ls_mab_alloc.all_allocations",
+ "EventCode": "0x41",
+ "BriefDescription": "All Allocations. Counts when a LS pipe allocates a MAB entry.",
+ "UMask": "0x7f"
+ },
+ {
+ "EventName": "ls_mab_alloc.hardware_prefetcher_allocations",
+ "EventCode": "0x41",
+ "BriefDescription": "Hardware Prefetcher Allocations. Counts when a LS pipe allocates a MAB entry.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_mab_alloc.load_store_allocations",
+ "EventCode": "0x41",
+ "BriefDescription": "Load Store Allocations. Counts when a LS pipe allocates a MAB entry.",
+ "UMask": "0x3f"
+ },
+ {
+ "EventName": "ls_mab_alloc.dc_prefetcher",
+ "EventCode": "0x41",
+ "BriefDescription": "LS MAB Allocates by Type. DC prefetcher.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_mab_alloc.stores",
+ "EventCode": "0x41",
+ "BriefDescription": "LS MAB Allocates by Type. Stores.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_mab_alloc.loads",
+ "EventCode": "0x41",
+ "BriefDescription": "LS MAB Allocates by Type. Loads.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.mem_io_remote",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. From DRAM or IO connected in different Node.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.ext_cache_remote",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. From CCX Cache in different Node.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.mem_io_local",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. From DRAM or IO connected in same node.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.ext_cache_local",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. From cache of different CCX in same node.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.int_cache",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. From L3 or different L2 in same CCX.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.lcl_l2",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. From Local L2 to the core.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.mem_io_remote",
+ "EventCode": "0x44",
+ "BriefDescription": "Any Data Cache Fills by Data Source. From DRAM or IO connected in different Node.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.ext_cache_remote",
+ "EventCode": "0x44",
+ "BriefDescription": "Any Data Cache Fills by Data Source. From CCX Cache in different Node.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.mem_io_local",
+ "EventCode": "0x44",
+ "BriefDescription": "Any Data Cache Fills by Data Source. From DRAM or IO connected in same node.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.ext_cache_local",
+ "EventCode": "0x44",
+ "BriefDescription": "Any Data Cache Fills by Data Source. From cache of different CCX in same node.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.int_cache",
+ "EventCode": "0x44",
+ "BriefDescription": "Any Data Cache Fills by Data Source. From L3 or different L2 in same CCX.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.lcl_l2",
+ "EventCode": "0x44",
+ "BriefDescription": "Any Data Cache Fills by Data Source. From Local L2 to the core.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.all",
+ "EventCode": "0x45",
+ "BriefDescription": "All L1 DTLB Misses or Reloads. Use l1_dtlb_misses instead.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 1G page that also missed in the L2 TLB.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 2M page that also missed in the L2 TLB.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_coalesced_page_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload coalesced page that also missed in the L2 TLB.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 4K page that missed the L2 TLB.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 1G page that hit in the L2 TLB.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 2M page that hit in the L2 TLB.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_coalesced_page_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a coalesced page that hit in the L2 TLB.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 4K page that hit in the L2 TLB.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_tablewalker.iside",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks on I-side.",
+ "UMask": "0x0c"
+ },
+ {
+ "EventName": "ls_tablewalker.ic_type1",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks IC Type 1.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_tablewalker.ic_type0",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks IC Type 0.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_tablewalker.dside",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks on D-side.",
+ "UMask": "0x03"
+ },
+ {
+ "EventName": "ls_tablewalker.dc_type1",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks DC Type 1.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_tablewalker.dc_type0",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks DC Type 0.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_misal_loads.ma4k",
+ "EventCode": "0x47",
+ "BriefDescription": "The number of 4KB misaligned (i.e., page crossing) loads.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_misal_loads.ma64",
+ "EventCode": "0x47",
+ "BriefDescription": "The number of 64B misaligned (i.e., cacheline crossing) loads.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_pref_instr_disp",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions Dispatched (Speculative).",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.prefetch_nta",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions Dispatched (Speculative). PrefetchNTA instruction. See docAPM3 PREFETCHlevel.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.prefetch_w",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions Dispatched (Speculative). PrefetchW instruction. See docAPM3 PREFETCHW.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.prefetch",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions Dispatched (Speculative). PrefetchT0, T1 and T2 instructions. See docAPM3 PREFETCHlevel.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_inef_sw_pref.mab_mch_cnt",
+ "EventCode": "0x52",
+ "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core. Software PREFETCH instruction saw a match on an already-allocated miss request buffer.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_inef_sw_pref.data_pipe_sw_pf_dc_hit",
+ "EventCode": "0x52",
+ "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core. Software PREFETCH instruction saw a DC hit.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.mem_io_remote",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From DRAM or IO connected in different Node.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.ext_cache_remote",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From CCX Cache in different Node.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.mem_io_local",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From DRAM or IO connected in same node.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.ext_cache_local",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From cache of different CCX in same node.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.int_cache",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From L3 or different L2 in same CCX.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.lcl_l2",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From Local L2 to the core.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.mem_io_remote",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From DRAM or IO connected in different Node.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.ext_cache_remote",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From CCX Cache in different Node.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.mem_io_local",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From DRAM or IO connected in same node.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.ext_cache_local",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From cache of different CCX in same node.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.int_cache",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From L3 or different L2 in same CCX.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.lcl_l2",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From Local L2 to the core.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_alloc_mab_count",
+ "EventCode": "0x5f",
+ "BriefDescription": "Count of Allocated Mabs",
+ "PublicDescription": "This event counts the in-flight L1 data cache misses (allocated Miss Address Buffers) divided by 4 and rounded down each cycle unless used with the MergeEvent functionality. If the MergeEvent is used, it counts the exact number of outstanding L1 data cache misses. See 2.1.17.3 [Large Increment per Cycle Events]."
+ },
+ {
+ "EventName": "ls_not_halted_cyc",
+ "EventCode": "0x76",
+ "BriefDescription": "Cycles not in Halt."
+ },
+ {
+ "EventName": "ls_tlb_flush.all_tlb_flushes",
+ "EventCode": "0x78",
+ "BriefDescription": "All TLB Flushes. Requires unit mask 0xFF to engage event for counting. Use all_tlbs_flushed instead",
+ "UMask": "0xff"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/other.json b/tools/perf/pmu-events/arch/x86/amdzen3/other.json
new file mode 100644
index 000000000000..7da5d0791ea3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/other.json
@@ -0,0 +1,103 @@
+[
+ {
+ "EventName": "de_dis_uop_queue_empty_di0",
+ "EventCode": "0xa9",
+ "BriefDescription": "Cycles where the Micro-Op Queue is empty."
+ },
+ {
+ "EventName": "de_dis_cops_from_decoder.disp_op_type.any_integer_dispatch",
+ "EventCode": "0xab",
+ "BriefDescription": "Any Integer dispatch. Types of Oops Dispatched from Decoder.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "de_dis_cops_from_decoder.disp_op_type.any_fp_dispatch",
+ "EventCode": "0xab",
+ "BriefDescription": "Any FP dispatch. Types of Oops Dispatched from Decoder.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.fp_flush_recovery_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. FP Flush recovery stall.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.fp_sch_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. FP scheduler resource stall. Applies to ops that use the FP scheduler.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.fp_reg_file_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. Floating point register file resource stall. Applies to all FP ops that have a destination register.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.taken_brnch_buffer_rsrc",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. Taken branch buffer resource stall.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.int_sched_misc_token_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Integer Scheduler miscellaneous resource stall.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.store_queue_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. Store Queue resource stall. Applies to all ops with store semantics.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.load_queue_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. Load Queue resource stall. Applies to all ops with load semantics.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.int_phy_reg_file_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. Integer Physical Register File resource stall. Integer Physical Register File, applies to all ops that have an integer destination register.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.retire_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Insufficient Retire Queue tokens available.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.agsq_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. AGSQ Tokens unavailable.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.int_sch3_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. No tokens for Integer Scheduler Queue 3 available.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.int_sch2_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. No tokens for Integer Scheduler Queue 2 available.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.int_sch1_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. No tokens for Integer Scheduler Queue 1 available.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.int_sch0_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. No tokens for Integer Scheduler Queue 0 available.",
+ "UMask": "0x01"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen3/recommended.json
new file mode 100644
index 000000000000..988cf68ae825
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/recommended.json
@@ -0,0 +1,214 @@
+[
+ {
+ "MetricName": "branch_misprediction_ratio",
+ "BriefDescription": "Execution-Time Branch Misprediction Ratio (Non-Speculative)",
+ "MetricExpr": "d_ratio(ex_ret_brn_misp, ex_ret_brn)",
+ "MetricGroup": "branch_prediction",
+ "ScaleUnit": "100%"
+ },
+ {
+ "EventName": "all_data_cache_accesses",
+ "EventCode": "0x29",
+ "BriefDescription": "All L1 Data Cache Accesses",
+ "UMask": "0x07"
+ },
+ {
+ "MetricName": "all_l2_cache_accesses",
+ "BriefDescription": "All L2 Cache Accesses",
+ "MetricExpr": "l2_request_g1.all_no_prefetch + l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "EventName": "l2_cache_accesses_from_ic_misses",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 Cache Accesses from L1 Instruction Cache Misses (including prefetch)",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_cache_accesses_from_dc_misses",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 Cache Accesses from L1 Data Cache Misses (including prefetch)",
+ "UMask": "0xe8"
+ },
+ {
+ "MetricName": "l2_cache_accesses_from_l2_hwpf",
+ "BriefDescription": "L2 Cache Accesses from L2 HWPF",
+ "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "all_l2_cache_misses",
+ "BriefDescription": "All L2 Cache Misses",
+ "MetricExpr": "l2_cache_req_stat.ic_dc_miss_in_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "EventName": "l2_cache_misses_from_ic_miss",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Misses from L1 Instruction Cache Misses",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_cache_misses_from_dc_misses",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Misses from L1 Data Cache Misses",
+ "UMask": "0x08"
+ },
+ {
+ "MetricName": "l2_cache_misses_from_l2_hwpf",
+ "BriefDescription": "L2 Cache Misses from L2 Cache HWPF",
+ "MetricExpr": "l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "all_l2_cache_hits",
+ "BriefDescription": "All L2 Cache Hits",
+ "MetricExpr": "l2_cache_req_stat.ic_dc_hit_in_l2 + l2_pf_hit_l2",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "EventName": "l2_cache_hits_from_ic_misses",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Hits from L1 Instruction Cache Misses",
+ "UMask": "0x06"
+ },
+ {
+ "EventName": "l2_cache_hits_from_dc_misses",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Hits from L1 Data Cache Misses",
+ "UMask": "0xf0"
+ },
+ {
+ "EventName": "l2_cache_hits_from_l2_hwpf",
+ "EventCode": "0x70",
+ "BriefDescription": "L2 Cache Hits from L2 Cache HWPF",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l3_cache_accesses",
+ "EventCode": "0x04",
+ "BriefDescription": "L3 Cache Accesses",
+ "UMask": "0xff",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_misses",
+ "EventCode": "0x04",
+ "BriefDescription": "L3 Misses (includes cacheline state change requests)",
+ "UMask": "0x01",
+ "Unit": "L3PMC"
+ },
+ {
+ "MetricName": "l3_read_miss_latency",
+ "BriefDescription": "Average L3 Read Miss Latency (in core clocks)",
+ "MetricExpr": "(xi_sys_fill_latency * 16) / xi_ccx_sdp_req1",
+ "MetricGroup": "l3_cache",
+ "ScaleUnit": "1core clocks"
+ },
+ {
+ "MetricName": "op_cache_fetch_miss_ratio",
+ "BriefDescription": "Op Cache (64B) Fetch Miss Ratio",
+ "MetricExpr": "d_ratio(op_cache_hit_miss.op_cache_miss, op_cache_hit_miss.all_op_cache_accesses)",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "ic_fetch_miss_ratio",
+ "BriefDescription": "Instruction Cache (32B) Fetch Miss Ratio",
+ "MetricExpr": "d_ratio(ic_tag_hit_miss.instruction_cache_miss, ic_tag_hit_miss.all_instruction_cache_accesses)",
+ "MetricGroup": "l2_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "EventName": "l1_data_cache_fills_from_memory",
+ "EventCode": "0x44",
+ "BriefDescription": "L1 Data Cache Fills: From Memory",
+ "UMask": "0x48"
+ },
+ {
+ "EventName": "l1_data_cache_fills_from_remote_node",
+ "EventCode": "0x44",
+ "BriefDescription": "L1 Data Cache Fills: From Remote Node",
+ "UMask": "0x50"
+ },
+ {
+ "EventName": "l1_data_cache_fills_from_within_same_ccx",
+ "EventCode": "0x44",
+ "BriefDescription": "L1 Data Cache Fills: From within same CCX",
+ "UMask": "0x03"
+ },
+ {
+ "EventName": "l1_data_cache_fills_from_external_ccx_cache",
+ "EventCode": "0x44",
+ "BriefDescription": "L1 Data Cache Fills: From External CCX Cache",
+ "UMask": "0x14"
+ },
+ {
+ "EventName": "l1_data_cache_fills_all",
+ "EventCode": "0x44",
+ "BriefDescription": "L1 Data Cache Fills: All",
+ "UMask": "0xff"
+ },
+ {
+ "MetricName": "l1_itlb_misses",
+ "BriefDescription": "L1 ITLB Misses",
+ "MetricExpr": "bp_l1_tlb_miss_l2_tlb_hit + bp_l1_tlb_miss_l2_tlb_miss",
+ "MetricGroup": "tlb"
+ },
+ {
+ "EventName": "l2_itlb_misses",
+ "EventCode": "0x85",
+ "BriefDescription": "L2 ITLB Misses & Instruction page walks",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "l1_dtlb_misses",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Misses",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l2_dtlb_misses",
+ "EventCode": "0x45",
+ "BriefDescription": "L2 DTLB Misses & Data page walks",
+ "UMask": "0xf0"
+ },
+ {
+ "EventName": "all_tlbs_flushed",
+ "EventCode": "0x78",
+ "BriefDescription": "All TLBs Flushed",
+ "UMask": "0xff"
+ },
+ {
+ "MetricName": "macro_ops_dispatched",
+ "BriefDescription": "Macro-ops Dispatched",
+ "MetricExpr": "de_dis_cops_from_decoder.disp_op_type.any_integer_dispatch + de_dis_cops_from_decoder.disp_op_type.any_fp_dispatch",
+ "MetricGroup": "decoder"
+ },
+ {
+ "EventName": "sse_avx_stalls",
+ "EventCode": "0x0e",
+ "BriefDescription": "Mixed SSE/AVX Stalls",
+ "UMask": "0x0e"
+ },
+ {
+ "EventName": "macro_ops_retired",
+ "EventCode": "0xc1",
+ "BriefDescription": "Macro-ops Retired"
+ },
+ {
+ "MetricName": "all_remote_links_outbound",
+ "BriefDescription": "Approximate: Outbound data bytes for all Remote Links for a node (die)",
+ "MetricExpr": "remote_outbound_data_controller_0 + remote_outbound_data_controller_1 + remote_outbound_data_controller_2 + remote_outbound_data_controller_3",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "3e-5MiB"
+ },
+ {
+ "MetricName": "nps1_die_to_dram",
+ "BriefDescription": "Approximate: Combined DRAM B/bytes of all channels on a NPS1 node (die) (may need --metric-no-group)",
+ "MetricExpr": "dram_channel_data_controller_0 + dram_channel_data_controller_1 + dram_channel_data_controller_2 + dram_channel_data_controller_3 + dram_channel_data_controller_4 + dram_channel_data_controller_5 + dram_channel_data_controller_6 + dram_channel_data_controller_7",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "6.1e-5MiB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index 2f2a209e87e1..0a6a8c7f937f 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -24,6 +24,7 @@ GenuineIntel-6-1F,v2,nehalemep,core
GenuineIntel-6-1A,v2,nehalemep,core
GenuineIntel-6-2E,v2,nehalemex,core
GenuineIntel-6-[4589]E,v24,skylake,core
+GenuineIntel-6-A[56],v24,skylake,core
GenuineIntel-6-37,v13,silvermont,core
GenuineIntel-6-4D,v13,silvermont,core
GenuineIntel-6-4C,v13,silvermont,core
@@ -35,7 +36,9 @@ GenuineIntel-6-55-[01234],v1,skylakex,core
GenuineIntel-6-55-[56789ABCDEF],v1,cascadelakex,core
GenuineIntel-6-7D,v1,icelake,core
GenuineIntel-6-7E,v1,icelake,core
+GenuineIntel-6-8[CD],v1,icelake,core
+GenuineIntel-6-A7,v1,icelake,core
GenuineIntel-6-86,v1,tremontx,core
AuthenticAMD-23-([12][0-9A-F]|[0-9A-F]),v2,amdzen1,core
AuthenticAMD-23-[[:xdigit:]]+,v1,amdzen2,core
-AuthenticAMD-25-[[:xdigit:]]+,v1,amdzen2,core
+AuthenticAMD-25-[[:xdigit:]]+,v1,amdzen3,core
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index e1f3f5c8c550..ed4f0bd72e5a 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -285,6 +285,8 @@ static struct map {
{ "imx8_ddr", "imx8_ddr" },
{ "L3PMC", "amd_l3" },
{ "DFPMC", "amd_df" },
+ { "cpu_core", "cpu_core" },
+ { "cpu_atom", "cpu_atom" },
{}
};
@@ -1149,7 +1151,7 @@ static int process_one_file(const char *fpath, const struct stat *sb,
* and directory tree could result in build failure due to table
* names not being found.
*
- * Atleast for now, be strict with processing JSON file names.
+ * At least for now, be strict with processing JSON file names.
* i.e. if JSON file name cannot be mapped to C-style table name,
* fail.
*/
diff --git a/tools/perf/scripts/python/netdev-times.py b/tools/perf/scripts/python/netdev-times.py
index ea0c8b90a783..a0cfc7fe5908 100644
--- a/tools/perf/scripts/python/netdev-times.py
+++ b/tools/perf/scripts/python/netdev-times.py
@@ -356,7 +356,7 @@ def handle_irq_softirq_exit(event_info):
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
- # merge information realted to a NET_RX softirq
+ # merge information related to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
index dd39ce9b0277..9b40a25376ae 100644
--- a/tools/perf/tests/attr.c
+++ b/tools/perf/tests/attr.c
@@ -34,6 +34,7 @@
#include "event.h"
#include "util.h"
#include "tests.h"
+#include "pmu.h"
#define ENV "PERF_TEST_ATTR"
@@ -184,6 +185,9 @@ int test__attr(struct test *test __maybe_unused, int subtest __maybe_unused)
char path_dir[PATH_MAX];
char *exec_path;
+ if (perf_pmu__has_hybrid())
+ return TEST_SKIP;
+
/* First try development tree tests. */
if (!lstat("./tests", &st))
return run_dir("./tests", "./perf");
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index cc9fbcedb364..ef37353636d8 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -225,11 +225,11 @@ int test__bp_signal(struct test *test __maybe_unused, int subtest __maybe_unused
*
* The test case check following error conditions:
* - we get stuck in signal handler because of debug
- * exception being triggered receursively due to
+ * exception being triggered recursively due to
* the wrong RF EFLAG management
*
* - we never trigger the sig_handler breakpoint due
- * to the rong RF EFLAG management
+ * to the wrong RF EFLAG management
*
*/
@@ -242,7 +242,7 @@ int test__bp_signal(struct test *test __maybe_unused, int subtest __maybe_unused
ioctl(fd3, PERF_EVENT_IOC_ENABLE, 0);
/*
- * Kick off the test by trigering 'fd1'
+ * Kick off the test by triggering 'fd1'
* breakpoint.
*/
test_function();
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 2fdc7b2f996e..9866cddebf23 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -658,7 +658,7 @@ static int do_test_code_reading(bool try_kcore)
/*
* Both cpus and threads are now owned by evlist
* and will be freed by following perf_evlist__set_maps
- * call. Getting refference to keep them alive.
+ * call. Getting reference to keep them alive.
*/
perf_cpu_map__get(cpus);
perf_thread_map__get(threads);
diff --git a/tools/perf/tests/demangle-ocaml-test.c b/tools/perf/tests/demangle-ocaml-test.c
index a273ed5163d7..0043be812355 100644
--- a/tools/perf/tests/demangle-ocaml-test.c
+++ b/tools/perf/tests/demangle-ocaml-test.c
@@ -19,14 +19,14 @@ int test__demangle_ocaml(struct test *test __maybe_unused, int subtest __maybe_u
{ "main",
NULL },
{ "camlStdlib__array__map_154",
- "Stdlib.array.map" },
+ "Stdlib.array.map_154" },
{ "camlStdlib__anon_fn$5bstdlib$2eml$3a334$2c0$2d$2d54$5d_1453",
- "Stdlib.anon_fn[stdlib.ml:334,0--54]" },
+ "Stdlib.anon_fn[stdlib.ml:334,0--54]_1453" },
{ "camlStdlib__bytes__$2b$2b_2205",
- "Stdlib.bytes.++" },
+ "Stdlib.bytes.++_2205" },
};
- for (i = 0; i < sizeof(test_cases) / sizeof(test_cases[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
buf = ocaml_demangle_sym(test_cases[i].mangled);
if ((buf == NULL && test_cases[i].demangled != NULL)
|| (buf != NULL && test_cases[i].demangled == NULL)
diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c
index f7f3e5b4c180..b74cf80d1f10 100644
--- a/tools/perf/tests/evsel-roundtrip-name.c
+++ b/tools/perf/tests/evsel-roundtrip-name.c
@@ -4,6 +4,7 @@
#include "parse-events.h"
#include "tests.h"
#include "debug.h"
+#include "pmu.h"
#include <errno.h>
#include <linux/kernel.h>
@@ -62,7 +63,8 @@ static int perf_evsel__roundtrip_cache_name_test(void)
return ret;
}
-static int __perf_evsel__name_array_test(const char *names[], int nr_names)
+static int __perf_evsel__name_array_test(const char *names[], int nr_names,
+ int distance)
{
int i, err;
struct evsel *evsel;
@@ -82,9 +84,9 @@ static int __perf_evsel__name_array_test(const char *names[], int nr_names)
err = 0;
evlist__for_each_entry(evlist, evsel) {
- if (strcmp(evsel__name(evsel), names[evsel->idx])) {
+ if (strcmp(evsel__name(evsel), names[evsel->idx / distance])) {
--err;
- pr_debug("%s != %s\n", evsel__name(evsel), names[evsel->idx]);
+ pr_debug("%s != %s\n", evsel__name(evsel), names[evsel->idx / distance]);
}
}
@@ -93,18 +95,21 @@ out_delete_evlist:
return err;
}
-#define perf_evsel__name_array_test(names) \
- __perf_evsel__name_array_test(names, ARRAY_SIZE(names))
+#define perf_evsel__name_array_test(names, distance) \
+ __perf_evsel__name_array_test(names, ARRAY_SIZE(names), distance)
int test__perf_evsel__roundtrip_name_test(struct test *test __maybe_unused, int subtest __maybe_unused)
{
int err = 0, ret = 0;
- err = perf_evsel__name_array_test(evsel__hw_names);
+ if (perf_pmu__has_hybrid())
+ return perf_evsel__name_array_test(evsel__hw_names, 2);
+
+ err = perf_evsel__name_array_test(evsel__hw_names, 1);
if (err)
ret = err;
- err = __perf_evsel__name_array_test(evsel__sw_names, PERF_COUNT_SW_DUMMY + 1);
+ err = __perf_evsel__name_array_test(evsel__sw_names, PERF_COUNT_SW_DUMMY + 1, 1);
if (err)
ret = err;
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 3f2e1a581247..890cb1f5bf53 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -47,7 +47,7 @@ static struct sample fake_samples[] = {
};
/*
- * Will be casted to struct ip_callchain which has all 64 bit entries
+ * Will be cast to struct ip_callchain which has all 64 bit entries
* of nr and ips[].
*/
static u64 fake_callchains[][10] = {
@@ -297,7 +297,7 @@ out:
return err;
}
-/* callcain + NO children */
+/* callchain + NO children */
static int test2(struct evsel *evsel, struct machine *machine)
{
int err;
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index 123e07d35b55..ca6120cd1d90 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -150,13 +150,13 @@ int test__hists_filter(struct test *test __maybe_unused, int subtest __maybe_unu
}
TEST_ASSERT_VAL("Invalid nr samples",
- hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10);
+ hists->stats.nr_samples == 10);
TEST_ASSERT_VAL("Invalid nr hist entries",
hists->nr_entries == 9);
TEST_ASSERT_VAL("Invalid total period",
hists->stats.total_period == 1000);
TEST_ASSERT_VAL("Unmatched nr samples",
- hists->stats.nr_events[PERF_RECORD_SAMPLE] ==
+ hists->stats.nr_samples ==
hists->stats.nr_non_filtered_samples);
TEST_ASSERT_VAL("Unmatched nr hist entries",
hists->nr_entries == hists->nr_non_filtered_entries);
@@ -175,7 +175,7 @@ int test__hists_filter(struct test *test __maybe_unused, int subtest __maybe_unu
/* normal stats should be invariant */
TEST_ASSERT_VAL("Invalid nr samples",
- hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10);
+ hists->stats.nr_samples == 10);
TEST_ASSERT_VAL("Invalid nr hist entries",
hists->nr_entries == 9);
TEST_ASSERT_VAL("Invalid total period",
@@ -204,7 +204,7 @@ int test__hists_filter(struct test *test __maybe_unused, int subtest __maybe_unu
/* normal stats should be invariant */
TEST_ASSERT_VAL("Invalid nr samples",
- hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10);
+ hists->stats.nr_samples == 10);
TEST_ASSERT_VAL("Invalid nr hist entries",
hists->nr_entries == 9);
TEST_ASSERT_VAL("Invalid total period",
@@ -239,7 +239,7 @@ int test__hists_filter(struct test *test __maybe_unused, int subtest __maybe_unu
/* normal stats should be invariant */
TEST_ASSERT_VAL("Invalid nr samples",
- hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10);
+ hists->stats.nr_samples == 10);
TEST_ASSERT_VAL("Invalid nr hist entries",
hists->nr_entries == 9);
TEST_ASSERT_VAL("Invalid total period",
@@ -268,7 +268,7 @@ int test__hists_filter(struct test *test __maybe_unused, int subtest __maybe_unu
/* normal stats should be invariant */
TEST_ASSERT_VAL("Invalid nr samples",
- hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10);
+ hists->stats.nr_samples == 10);
TEST_ASSERT_VAL("Invalid nr hist entries",
hists->nr_entries == 9);
TEST_ASSERT_VAL("Invalid total period",
@@ -299,7 +299,7 @@ int test__hists_filter(struct test *test __maybe_unused, int subtest __maybe_unu
/* normal stats should be invariant */
TEST_ASSERT_VAL("Invalid nr samples",
- hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10);
+ hists->stats.nr_samples == 10);
TEST_ASSERT_VAL("Invalid nr hist entries",
hists->nr_entries == 9);
TEST_ASSERT_VAL("Invalid total period",
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index a90fa043c066..94bd5d215d94 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -155,7 +155,6 @@ run += make_no_syscall_tbl
run += make_with_babeltrace
run += make_with_clangllvm
run += make_with_libpfm4
-run += make_with_gtk2
run += make_help
run += make_doc
run += make_perf_o
@@ -172,7 +171,6 @@ run += make_install_prefix_slash
# run += make_install_info
# run += make_install_pdf
run += make_minimal
-run += make_static
ifneq ($(call has,ctags),)
run += make_tags
@@ -307,6 +305,26 @@ $(run):
$(call test,$@) && \
rm -rf $@ $$TMP_DEST || (cat $@ ; false)
+make_with_gtk2:
+ $(call clean)
+ @TMP_DEST=$$(mktemp -d); \
+ cmd="cd $(PERF) && $(MAKE_F) $($@) $(PARALLEL_OPT) $(O_OPT) DESTDIR=$$TMP_DEST"; \
+ printf "%*.*s: %s\n" $(max_width) $(max_width) "$@" "$$cmd" && echo $$cmd > $@ && \
+ ( eval $$cmd ) >> $@ 2>&1; \
+ echo " test: $(call test,$@)" >> $@ 2>&1; \
+ $(call test,$@) && \
+ rm -rf $@ $$TMP_DEST || (cat $@ ; false)
+
+make_static:
+ $(call clean)
+ @TMP_DEST=$$(mktemp -d); \
+ cmd="cd $(PERF) && $(MAKE_F) $($@) $(PARALLEL_OPT) $(O_OPT) DESTDIR=$$TMP_DEST"; \
+ printf "%*.*s: %s\n" $(max_width) $(max_width) "$@" "$$cmd" && echo $$cmd > $@ && \
+ ( eval $$cmd ) >> $@ 2>&1; \
+ echo " test: $(call test,$@)" >> $@ 2>&1; \
+ $(call test,$@) && \
+ rm -rf $@ $$TMP_DEST || (cat $@ ; false)
+
$(run_O):
$(call clean)
@TMP_O=$$(mktemp -d); \
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index a7f6661e6112..0f113b2b36a3 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -20,7 +20,7 @@
#if defined(__s390x__)
/* Return true if kvm module is available and loaded. Test this
- * and retun success when trace point kvm_s390_create_vm
+ * and return success when trace point kvm_s390_create_vm
* exists. Otherwise this test always fails.
*/
static bool kvm_s390_create_vm_valid(void)
@@ -1512,6 +1512,124 @@ static int test__all_tracepoints(struct evlist *evlist)
return test__checkevent_tracepoint_multi(evlist);
}
+static int test__hybrid_hw_event_with_pmu(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config);
+ return 0;
+}
+
+static int test__hybrid_hw_group_event(struct evlist *evlist)
+{
+ struct evsel *evsel, *leader;
+
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0xc0 == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ return 0;
+}
+
+static int test__hybrid_sw_hw_group_event(struct evlist *evlist)
+{
+ struct evsel *evsel, *leader;
+
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ return 0;
+}
+
+static int test__hybrid_hw_sw_group_event(struct evlist *evlist)
+{
+ struct evsel *evsel, *leader;
+
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ return 0;
+}
+
+static int test__hybrid_group_modifier1(struct evlist *evlist)
+{
+ struct evsel *evsel, *leader;
+
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0xc0 == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ return 0;
+}
+
+static int test__hybrid_raw1(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
+
+ /* The type of second event is randome value */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
+ return 0;
+}
+
+static int test__hybrid_raw2(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
+ return 0;
+}
+
+static int test__hybrid_cache_event(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x2 == (evsel->core.attr.config & 0xffffffff));
+
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x10002 == (evsel->core.attr.config & 0xffffffff));
+ return 0;
+}
+
struct evlist_test {
const char *name;
__u32 type;
@@ -1868,6 +1986,54 @@ static struct terms_test test__terms[] = {
},
};
+static struct evlist_test test__hybrid_events[] = {
+ {
+ .name = "cpu_core/cpu-cycles/",
+ .check = test__hybrid_hw_event_with_pmu,
+ .id = 0,
+ },
+ {
+ .name = "{cpu_core/cpu-cycles/,cpu_core/instructions/}",
+ .check = test__hybrid_hw_group_event,
+ .id = 1,
+ },
+ {
+ .name = "{cpu-clock,cpu_core/cpu-cycles/}",
+ .check = test__hybrid_sw_hw_group_event,
+ .id = 2,
+ },
+ {
+ .name = "{cpu_core/cpu-cycles/,cpu-clock}",
+ .check = test__hybrid_hw_sw_group_event,
+ .id = 3,
+ },
+ {
+ .name = "{cpu_core/cpu-cycles/k,cpu_core/instructions/u}",
+ .check = test__hybrid_group_modifier1,
+ .id = 4,
+ },
+ {
+ .name = "r1a",
+ .check = test__hybrid_raw1,
+ .id = 5,
+ },
+ {
+ .name = "cpu_core/r1a/",
+ .check = test__hybrid_raw2,
+ .id = 6,
+ },
+ {
+ .name = "cpu_core/config=10,config1,config2=3,period=1000/u",
+ .check = test__checkevent_pmu,
+ .id = 7,
+ },
+ {
+ .name = "cpu_core/LLC-loads/,cpu_atom/LLC-load-misses/",
+ .check = test__hybrid_cache_event,
+ .id = 8,
+ },
+};
+
static int test_event(struct evlist_test *e)
{
struct parse_events_error err;
@@ -2035,6 +2201,11 @@ do { \
ret2 = ret1; \
} while (0)
+ if (perf_pmu__has_hybrid()) {
+ TEST_EVENTS(test__hybrid_events);
+ return ret2;
+ }
+
TEST_EVENTS(test__events);
if (test_pmu())
diff --git a/tools/perf/tests/parse-metric.c b/tools/perf/tests/parse-metric.c
index 6dc1db1626ad..4f6f4904e852 100644
--- a/tools/perf/tests/parse-metric.c
+++ b/tools/perf/tests/parse-metric.c
@@ -11,6 +11,7 @@
#include "debug.h"
#include "expr.h"
#include "stat.h"
+#include "pmu.h"
static struct pmu_event pme_test[] = {
{
@@ -98,7 +99,7 @@ static u64 find_value(const char *name, struct value *values)
if (!strcmp(name, v->event))
return v->val;
v++;
- };
+ }
return 0;
}
@@ -186,7 +187,7 @@ static int __compute_metric(const char *name, struct value *vals,
*ratio2 = compute_single(&metric_events, evlist, &st, name2);
out:
- /* ... clenup. */
+ /* ... cleanup. */
metricgroup__rblist_exit(&metric_events);
runtime_stat__exit(&st);
evlist__free_stats(evlist);
@@ -372,10 +373,13 @@ int test__parse_metric(struct test *test __maybe_unused, int subtest __maybe_unu
{
TEST_ASSERT_VAL("IPC failed", test_ipc() == 0);
TEST_ASSERT_VAL("frontend failed", test_frontend() == 0);
- TEST_ASSERT_VAL("cache_miss_cycles failed", test_cache_miss_cycles() == 0);
TEST_ASSERT_VAL("DCache_L2 failed", test_dcache_l2() == 0);
TEST_ASSERT_VAL("recursion fail failed", test_recursion_fail() == 0);
- TEST_ASSERT_VAL("test metric group", test_metric_group() == 0);
TEST_ASSERT_VAL("Memory bandwidth", test_memory_bandwidth() == 0);
+
+ if (!perf_pmu__has_hybrid()) {
+ TEST_ASSERT_VAL("cache_miss_cycles failed", test_cache_miss_cycles() == 0);
+ TEST_ASSERT_VAL("test metric group", test_metric_group() == 0);
+ }
return 0;
}
diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c
index 680c3cffb128..85d75b9b25a1 100644
--- a/tools/perf/tests/perf-time-to-tsc.c
+++ b/tools/perf/tests/perf-time-to-tsc.c
@@ -20,6 +20,7 @@
#include "tsc.h"
#include "mmap.h"
#include "tests.h"
+#include "pmu.h"
#define CHECK__(x) { \
while ((x) < 0) { \
@@ -88,6 +89,17 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
evsel->core.attr.disabled = 1;
evsel->core.attr.enable_on_exec = 0;
+ /*
+ * For hybrid "cycles:u", it creates two events.
+ * Init the second evsel here.
+ */
+ if (perf_pmu__has_hybrid()) {
+ evsel = evsel__next(evsel);
+ evsel->core.attr.comm = 1;
+ evsel->core.attr.disabled = 1;
+ evsel->core.attr.enable_on_exec = 0;
+ }
+
CHECK__(evlist__open(evlist));
CHECK__(evlist__mmap(evlist, UINT_MAX));
diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c
index 0ca6a5a53523..b8aff8fb50d8 100644
--- a/tools/perf/tests/pmu-events.c
+++ b/tools/perf/tests/pmu-events.c
@@ -12,6 +12,7 @@
#include "util/evlist.h"
#include "util/expr.h"
#include "util/parse-events.h"
+#include "metricgroup.h"
struct perf_pmu_test_event {
/* used for matching against events from generated pmu-events.c */
@@ -471,9 +472,74 @@ static void expr_failure(const char *msg,
pr_debug("On expression %s\n", pe->metric_expr);
}
+struct metric {
+ struct list_head list;
+ struct metric_ref metric_ref;
+};
+
+static int resolve_metric_simple(struct expr_parse_ctx *pctx,
+ struct list_head *compound_list,
+ struct pmu_events_map *map,
+ const char *metric_name)
+{
+ struct hashmap_entry *cur, *cur_tmp;
+ struct metric *metric, *tmp;
+ size_t bkt;
+ bool all;
+ int rc;
+
+ do {
+ all = true;
+ hashmap__for_each_entry_safe((&pctx->ids), cur, cur_tmp, bkt) {
+ struct metric_ref *ref;
+ struct pmu_event *pe;
+
+ pe = metricgroup__find_metric(cur->key, map);
+ if (!pe)
+ continue;
+
+ if (!strcmp(metric_name, (char *)cur->key)) {
+ pr_warning("Recursion detected for metric %s\n", metric_name);
+ rc = -1;
+ goto out_err;
+ }
+
+ all = false;
+
+ /* The metric key itself needs to go out.. */
+ expr__del_id(pctx, cur->key);
+
+ metric = malloc(sizeof(*metric));
+ if (!metric) {
+ rc = -ENOMEM;
+ goto out_err;
+ }
+
+ ref = &metric->metric_ref;
+ ref->metric_name = pe->metric_name;
+ ref->metric_expr = pe->metric_expr;
+ list_add_tail(&metric->list, compound_list);
+
+ rc = expr__find_other(pe->metric_expr, NULL, pctx, 0);
+ if (rc)
+ goto out_err;
+ break; /* The hashmap has been modified, so restart */
+ }
+ } while (!all);
+
+ return 0;
+
+out_err:
+ list_for_each_entry_safe(metric, tmp, compound_list, list)
+ free(metric);
+
+ return rc;
+
+}
+
static int test_parsing(void)
{
- struct pmu_events_map *cpus_map = perf_pmu__find_map(NULL);
+ struct pmu_events_map *cpus_map = pmu_events_map__find();
struct pmu_events_map *map;
struct pmu_event *pe;
int i, j, k;
@@ -488,7 +554,9 @@ static int test_parsing(void)
break;
j = 0;
for (;;) {
+ struct metric *metric, *tmp;
struct hashmap_entry *cur;
+ LIST_HEAD(compound_list);
size_t bkt;
pe = &map->table[j++];
@@ -504,6 +572,13 @@ static int test_parsing(void)
continue;
}
+ if (resolve_metric_simple(&ctx, &compound_list, map,
+ pe->metric_name)) {
+ expr_failure("Could not resolve metrics", map, pe);
+ ret++;
+ goto exit; /* Don't tolerate errors due to severity */
+ }
+
/*
* Add all ids with a made up value. The value may
* trigger divide by zero when subtracted and so try to
@@ -519,6 +594,11 @@ static int test_parsing(void)
ret++;
}
+ list_for_each_entry_safe(metric, tmp, &compound_list, list) {
+ expr__add_ref(&ctx, &metric->metric_ref);
+ free(metric);
+ }
+
if (expr__parse(&result, &ctx, pe->metric_expr, 0)) {
expr_failure("Parse failed", map, pe);
ret++;
@@ -527,6 +607,7 @@ static int test_parsing(void)
}
}
/* TODO: fail when not ok */
+exit:
return ret == 0 ? TEST_OK : TEST_SKIP;
}
diff --git a/tools/perf/tests/shell/buildid.sh b/tools/perf/tests/shell/buildid.sh
index 416af614bbe0..f05670d1e39e 100755
--- a/tools/perf/tests/shell/buildid.sh
+++ b/tools/perf/tests/shell/buildid.sh
@@ -14,18 +14,56 @@ if ! [ -x "$(command -v cc)" ]; then
exit 2
fi
+# check what we need to test windows binaries
+add_pe=1
+run_pe=1
+if ! perf version --build-options | grep -q 'libbfd: .* on '; then
+ echo "WARNING: perf not built with libbfd. PE binaries will not be tested."
+ add_pe=0
+ run_pe=0
+fi
+if ! which wine > /dev/null; then
+ echo "WARNING: wine not found. PE binaries will not be run."
+ run_pe=0
+fi
+
+# set up wine
+if [ ${run_pe} -eq 1 ]; then
+ wineprefix=$(mktemp -d /tmp/perf.wineprefix.XXX)
+ export WINEPREFIX=${wineprefix}
+ # clear display variables to prevent wine from popping up dialogs
+ unset DISPLAY
+ unset WAYLAND_DISPLAY
+fi
+
ex_md5=$(mktemp /tmp/perf.ex.MD5.XXX)
ex_sha1=$(mktemp /tmp/perf.ex.SHA1.XXX)
+ex_pe=$(dirname $0)/../pe-file.exe
echo 'int main(void) { return 0; }' | cc -Wl,--build-id=sha1 -o ${ex_sha1} -x c -
echo 'int main(void) { return 0; }' | cc -Wl,--build-id=md5 -o ${ex_md5} -x c -
-echo "test binaries: ${ex_sha1} ${ex_md5}"
+echo "test binaries: ${ex_sha1} ${ex_md5} ${ex_pe}"
check()
{
- id=`readelf -n ${1} 2>/dev/null | grep 'Build ID' | awk '{print $3}'`
-
+ case $1 in
+ *.exe)
+ # We don't have a tool that can pull a nicely formatted build-id out of
+ # a PE file, but we can extract the whole section with objcopy and
+ # format it ourselves. The .buildid section is a Debug Directory
+ # containing a CodeView entry:
+ # https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#debug-directory-image-only
+ # https://github.com/dotnet/runtime/blob/da94c022576a5c3bbc0e896f006565905eb137f9/docs/design/specs/PE-COFF.md
+ # The build-id starts at byte 33 and must be rearranged into a GUID.
+ id=`objcopy -O binary --only-section=.buildid $1 /dev/stdout | \
+ cut -c 33-48 | hexdump -ve '/1 "%02x"' | \
+ sed 's@^\(..\)\(..\)\(..\)\(..\)\(..\)\(..\)\(..\)\(..\)\(.*\)0a$@\4\3\2\1\6\5\8\7\9@'`
+ ;;
+ *)
+ id=`readelf -n ${1} 2>/dev/null | grep 'Build ID' | awk '{print $3}'`
+ ;;
+ esac
echo "build id: ${id}"
link=${build_id_dir}/.build-id/${id:0:2}/${id:2}
@@ -50,7 +88,7 @@ check()
exit 1
fi
- ${perf} buildid-cache -l | grep $id
+ ${perf} buildid-cache -l | grep ${id}
if [ $? -ne 0 ]; then
echo "failed: ${id} is not reported by \"perf buildid-cache -l\""
exit 1
@@ -79,16 +117,20 @@ test_record()
{
data=$(mktemp /tmp/perf.data.XXX)
build_id_dir=$(mktemp -d /tmp/perf.debug.XXX)
+ log=$(mktemp /tmp/perf.log.XXX)
perf="perf --buildid-dir ${build_id_dir}"
- ${perf} record --buildid-all -o ${data} ${1}
+ echo "running: perf record $@"
+ ${perf} record --buildid-all -o ${data} $@ &> ${log}
if [ $? -ne 0 ]; then
- echo "failed: record ${1}"
+ echo "failed: record $@"
+ echo "see log: ${log}"
exit 1
fi
- check ${1}
+ check ${@: -1}
+ rm -f ${log}
rm -rf ${build_id_dir}
rm -rf ${data}
}
@@ -96,12 +138,21 @@ test_record()
# add binaries manual via perf buildid-cache -a
test_add ${ex_sha1}
test_add ${ex_md5}
+if [ ${add_pe} -eq 1 ]; then
+ test_add ${ex_pe}
+fi
# add binaries via perf record post processing
test_record ${ex_sha1}
test_record ${ex_md5}
+if [ ${run_pe} -eq 1 ]; then
+ test_record wine ${ex_pe}
+fi
# cleanup
rm ${ex_sha1} ${ex_md5}
+if [ ${run_pe} -eq 1 ]; then
+ rm -r ${wineprefix}
+fi
exit ${err}
diff --git a/tools/perf/tests/shell/daemon.sh b/tools/perf/tests/shell/daemon.sh
index 58984380b211..45fc24af5b07 100755
--- a/tools/perf/tests/shell/daemon.sh
+++ b/tools/perf/tests/shell/daemon.sh
@@ -98,6 +98,23 @@ check_line_other()
fi
}
+daemon_exit()
+{
+ local config=$1
+
+ local line=`perf daemon --config ${config} -x: | head -1`
+ local pid=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $1 }'`
+
+ # Reset trap handler.
+ trap - SIGINT SIGTERM
+
+ # stop daemon
+ perf daemon stop --config ${config}
+
+ # ... and wait for the pid to go away
+ tail --pid=${pid} -f /dev/null
+}
+
daemon_start()
{
local config=$1
@@ -105,29 +122,24 @@ daemon_start()
perf daemon start --config ${config}
+ # Clean up daemon if interrupted.
+ trap "echo 'FAILED: Signal caught'; daemon_exit ${config}; exit 1" SIGINT SIGTERM
+
# wait for the session to ping
local state="FAIL"
+ local retries=0
while [ "${state}" != "OK" ]; do
state=`perf daemon ping --config ${config} --session ${session} | awk '{ print $1 }'`
sleep 0.05
+ retries=$((${retries} +1))
+ if [ ${retries} -ge 600 ]; then
+ echo "FAILED: Timeout waiting for daemon to ping"
+ daemon_exit ${config}
+ exit 1
+ fi
done
}
-daemon_exit()
-{
- local base=$1
- local config=$2
-
- local line=`perf daemon --config ${config} -x: | head -1`
- local pid=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $1 }'`
-
- # stop daemon
- perf daemon stop --config ${config}
-
- # ... and wait for the pid to go away
- tail --pid=${pid} -f /dev/null
-}
-
test_list()
{
echo "test daemon list"
@@ -171,7 +183,7 @@ EOF
${base}/session-time/ack "0"
# stop daemon
- daemon_exit ${base} ${config}
+ daemon_exit ${config}
rm -rf ${base}
rm -f ${config}
@@ -288,7 +300,7 @@ EOF
done
# stop daemon
- daemon_exit ${base} ${config}
+ daemon_exit ${config}
rm -rf ${base}
rm -f ${config}
@@ -333,7 +345,7 @@ EOF
fi
# stop daemon
- daemon_exit ${base} ${config}
+ daemon_exit ${config}
# check that sessions are gone
if [ -d "/proc/${pid_size}" ]; then
@@ -374,7 +386,7 @@ EOF
perf daemon signal --config ${config}
# stop daemon
- daemon_exit ${base} ${config}
+ daemon_exit ${config}
# count is 2 perf.data for signals and 1 for perf record finished
count=`ls ${base}/session-test/ | grep perf.data | wc -l`
@@ -420,7 +432,7 @@ EOF
fi
# stop daemon
- daemon_exit ${base} ${config}
+ daemon_exit ${config}
rm -rf ${base}
rm -f ${config}
@@ -457,7 +469,7 @@ EOF
fi
# stop daemon
- daemon_exit ${base} ${config}
+ daemon_exit ${config}
rm -rf ${base}
rm -f ${config}
diff --git a/tools/perf/tests/shell/stat+csv_summary.sh b/tools/perf/tests/shell/stat+csv_summary.sh
new file mode 100755
index 000000000000..5571ff75eb42
--- /dev/null
+++ b/tools/perf/tests/shell/stat+csv_summary.sh
@@ -0,0 +1,31 @@
+#!/bin/sh
+# perf stat csv summary test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+#
+# 1.001364330 9224197 cycles 8012885033 100.00
+# summary 9224197 cycles 8012885033 100.00
+#
+perf stat -e cycles -x' ' -I1000 --interval-count 1 --summary 2>&1 | \
+grep -e summary | \
+while read summary num event run pct
+do
+ if [ $summary != "summary" ]; then
+ exit 1
+ fi
+done
+
+#
+# 1.001360298 9148534 cycles 8012853854 100.00
+#9148534 cycles 8012853854 100.00
+#
+perf stat -e cycles -x' ' -I1000 --interval-count 1 --summary --no-csv-summary 2>&1 | \
+grep -e summary | \
+while read num event run pct
+do
+ exit 1
+done
+
+exit 0
diff --git a/tools/perf/tests/shell/stat+shadow_stat.sh b/tools/perf/tests/shell/stat+shadow_stat.sh
index ebebd3596cf9..e6e35fc6c882 100755
--- a/tools/perf/tests/shell/stat+shadow_stat.sh
+++ b/tools/perf/tests/shell/stat+shadow_stat.sh
@@ -7,6 +7,9 @@ set -e
# skip if system-wide mode is forbidden
perf stat -a true > /dev/null 2>&1 || exit 2
+# skip if on hybrid platform
+perf stat -a -e cycles sleep 1 2>&1 | grep -e cpu_core && exit 2
+
test_global_aggr()
{
perf stat -a --no-big-num -e cycles,instructions sleep 1 2>&1 | \
diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
new file mode 100755
index 000000000000..22eb31e48ca7
--- /dev/null
+++ b/tools/perf/tests/shell/stat_bpf_counters.sh
@@ -0,0 +1,31 @@
+#!/bin/sh
+# perf stat --bpf-counters test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# check whether $2 is within +/- 10% of $1
+compare_number()
+{
+ first_num=$1
+ second_num=$2
+
+ # upper bound is first_num * 110%
+ upper=$(( $first_num + $first_num / 10 ))
+ # lower bound is first_num * 90%
+ lower=$(( $first_num - $first_num / 10 ))
+
+ if [ $second_num -gt $upper ] || [ $second_num -lt $lower ]; then
+ echo "The difference between $first_num and $second_num are greater than 10%."
+ exit 1
+ fi
+}
+
+# skip if --bpf-counters is not supported
+perf stat --bpf-counters true > /dev/null 2>&1 || exit 2
+
+base_cycles=$(perf stat --no-big-num -e cycles -- perf bench sched messaging -g 1 -l 100 -t 2>&1 | awk '/cycles/ {print $1}')
+bpf_cycles=$(perf stat --no-big-num --bpf-counters -e cycles -- perf bench sched messaging -g 1 -l 100 -t 2>&1 | awk '/cycles/ {print $1}')
+
+compare_number $base_cycles $bpf_cycles
+exit 0
diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
index 3ebaa758df77..62c0ec21aaa8 100644
--- a/tools/perf/tests/switch-tracking.c
+++ b/tools/perf/tests/switch-tracking.c
@@ -18,6 +18,7 @@
#include "record.h"
#include "tests.h"
#include "util/mmap.h"
+#include "pmu.h"
static int spin_sleep(void)
{
@@ -371,7 +372,10 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
cpu_clocks_evsel = evlist__last(evlist);
/* Second event */
- err = parse_events(evlist, "cycles:u", NULL);
+ if (perf_pmu__has_hybrid())
+ err = parse_events(evlist, "cpu_core/cycles/u", NULL);
+ else
+ err = parse_events(evlist, "cycles:u", NULL);
if (err) {
pr_debug("Failed to parse event cycles:u\n");
goto out_err;
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
index 74748ed75b2c..ec4e3b21b831 100644
--- a/tools/perf/tests/topology.c
+++ b/tools/perf/tests/topology.c
@@ -8,6 +8,7 @@
#include "session.h"
#include "evlist.h"
#include "debug.h"
+#include "pmu.h"
#include <linux/err.h>
#define TEMPL "/tmp/perf-test-XXXXXX"
@@ -40,8 +41,16 @@ static int session_write_header(char *path)
session = perf_session__new(&data, false, NULL);
TEST_ASSERT_VAL("can't get session", !IS_ERR(session));
- session->evlist = evlist__new_default();
- TEST_ASSERT_VAL("can't get evlist", session->evlist);
+ if (!perf_pmu__has_hybrid()) {
+ session->evlist = evlist__new_default();
+ TEST_ASSERT_VAL("can't get evlist", session->evlist);
+ } else {
+ struct parse_events_error err;
+
+ session->evlist = evlist__new();
+ TEST_ASSERT_VAL("can't get evlist", session->evlist);
+ parse_events(session->evlist, "cpu_core/cycles/", &err);
+ }
perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
perf_header__set_feat(&session->header, HEADER_NRCPUS);
@@ -80,7 +89,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
* CPU 1 is on core_id 1 and physical_package_id 3
*
* Core_id and physical_package_id are platform and architecture
- * dependend and might have higher numbers than the CPU id.
+ * dependent and might have higher numbers than the CPU id.
* This actually depends on the configuration.
*
* In this case process_cpu_topology() prints error message:
diff --git a/tools/perf/trace/beauty/fsconfig.sh b/tools/perf/trace/beauty/fsconfig.sh
index 83fb24df05c9..bc6ef7bb7a5f 100755
--- a/tools/perf/trace/beauty/fsconfig.sh
+++ b/tools/perf/trace/beauty/fsconfig.sh
@@ -10,8 +10,7 @@ fi
linux_mount=${linux_header_dir}/mount.h
printf "static const char *fsconfig_cmds[] = {\n"
-regex='^[[:space:]]*+FSCONFIG_([[:alnum:]_]+)[[:space:]]*=[[:space:]]*([[:digit:]]+)[[:space:]]*,[[:space:]]*.*'
-egrep $regex ${linux_mount} | \
- sed -r "s/$regex/\2 \1/g" | \
- xargs printf "\t[%s] = \"%s\",\n"
+ms='[[:space:]]*'
+sed -nr "s/^${ms}FSCONFIG_([[:alnum:]_]+)${ms}=${ms}([[:digit:]]+)${ms},.*/\t[\2] = \"\1\",/p" \
+ ${linux_mount}
printf "};\n"
diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h
index 385894b4a8bb..b8fc5c53ba6f 100644
--- a/tools/perf/trace/beauty/include/linux/socket.h
+++ b/tools/perf/trace/beauty/include/linux/socket.h
@@ -85,7 +85,7 @@ struct mmsghdr {
/*
* POSIX 1003.1g - ancillary data object information
- * Ancillary data consits of a sequence of pairs of
+ * Ancillary data consists of a sequence of pairs of
* (cmsghdr, cmsg_data[])
*/
diff --git a/tools/perf/trace/beauty/tracepoints/x86_msr.sh b/tools/perf/trace/beauty/tracepoints/x86_msr.sh
index 27ee1ea1fe94..9b0614a87831 100755
--- a/tools/perf/trace/beauty/tracepoints/x86_msr.sh
+++ b/tools/perf/trace/beauty/tracepoints/x86_msr.sh
@@ -15,7 +15,7 @@ x86_msr_index=${arch_x86_header_dir}/msr-index.h
printf "static const char *x86_MSRs[] = {\n"
regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MSR_([[:alnum:]][[:alnum:]_]+)[[:space:]]+(0x00000[[:xdigit:]]+)[[:space:]]*.*'
-egrep $regex ${x86_msr_index} | egrep -v 'MSR_(ATOM|P[46]|IA32_(TSCDEADLINE|UCODE_REV)|IDT_FCR4)' | \
+egrep $regex ${x86_msr_index} | egrep -v 'MSR_(ATOM|P[46]|IA32_(TSC_DEADLINE|UCODE_REV)|IDT_FCR4)' | \
sed -r "s/$regex/\2 \1/g" | sort -n | \
xargs printf "\t[%s] = \"%s\",\n"
printf "};\n\n"
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 35b82caf8090..ad0a70f0edaf 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -381,6 +381,25 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
return true;
}
+#define SYM_TITLE_MAX_SIZE (PATH_MAX + 64)
+
+static void annotate_browser__show_full_location(struct ui_browser *browser)
+{
+ struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
+ struct disasm_line *cursor = disasm_line(ab->selection);
+ struct annotation_line *al = &cursor->al;
+
+ if (al->offset != -1)
+ ui_helpline__puts("Only available for source code lines.");
+ else if (al->fileloc == NULL)
+ ui_helpline__puts("No source file location.");
+ else {
+ char help_line[SYM_TITLE_MAX_SIZE];
+ sprintf (help_line, "Source file location: %s", al->fileloc);
+ ui_helpline__puts(help_line);
+ }
+}
+
static void ui_browser__init_asm_mode(struct ui_browser *browser)
{
struct annotation *notes = browser__annotation(browser);
@@ -388,8 +407,6 @@ static void ui_browser__init_asm_mode(struct ui_browser *browser)
browser->nr_entries = notes->nr_asm_entries;
}
-#define SYM_TITLE_MAX_SIZE (PATH_MAX + 64)
-
static int sym_title(struct symbol *sym, struct map *map, char *title,
size_t sz, int percent_type)
{
@@ -398,7 +415,7 @@ static int sym_title(struct symbol *sym, struct map *map, char *title,
}
/*
- * This can be called from external jumps, i.e. jumps from one functon
+ * This can be called from external jumps, i.e. jumps from one function
* to another, like from the kernel's entry_SYSCALL_64 function to the
* swapgs_restore_regs_and_return_to_usermode() function.
*
@@ -747,6 +764,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
"c Show min/max cycle\n"
"/ Search string\n"
"k Toggle line numbers\n"
+ "l Show full source file location\n"
"P Print to [symbol_name].annotation file.\n"
"r Run available scripts\n"
"p Toggle percent type [local/global]\n"
@@ -760,6 +778,9 @@ static int annotate_browser__run(struct annotate_browser *browser,
case 'k':
notes->options->show_linenr = !notes->options->show_linenr;
continue;
+ case 'l':
+ annotate_browser__show_full_location (&browser->b);
+ continue;
case 'H':
nd = browser->curr_hot;
break;
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 3b9818ee9546..b72ee6822222 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -117,7 +117,7 @@ static void hist_browser__update_rows(struct hist_browser *hb)
browser->rows -= browser->extra_title_lines;
/*
* Verify if we were at the last line and that line isn't
- * visibe because we now show the header line(s).
+ * visible because we now show the header line(s).
*/
index_row = browser->index - browser->top_idx;
if (index_row >= browser->rows)
@@ -682,6 +682,7 @@ static int hist_browser__handle_hotkey(struct hist_browser *browser, bool warn_l
switch (key) {
case K_TIMER: {
struct hist_browser_timer *hbt = browser->hbt;
+ struct evsel *evsel = hists_to_evsel(browser->hists);
u64 nr_entries;
WARN_ON_ONCE(!hbt);
@@ -696,10 +697,10 @@ static int hist_browser__handle_hotkey(struct hist_browser *browser, bool warn_l
ui_browser__update_nr_entries(&browser->b, nr_entries);
if (warn_lost_event &&
- (browser->hists->stats.nr_lost_warned !=
- browser->hists->stats.nr_events[PERF_RECORD_LOST])) {
- browser->hists->stats.nr_lost_warned =
- browser->hists->stats.nr_events[PERF_RECORD_LOST];
+ (evsel->evlist->stats.nr_lost_warned !=
+ evsel->evlist->stats.nr_events[PERF_RECORD_LOST])) {
+ evsel->evlist->stats.nr_lost_warned =
+ evsel->evlist->stats.nr_events[PERF_RECORD_LOST];
ui_browser__warn_lost_events(&browser->b);
}
@@ -3416,7 +3417,7 @@ static void perf_evsel_menu__write(struct ui_browser *browser,
struct evsel *evsel = list_entry(entry, struct evsel, core.node);
struct hists *hists = evsel__hists(evsel);
bool current_entry = ui_browser__is_current_entry(browser, row);
- unsigned long nr_events = hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ unsigned long nr_events = hists->stats.nr_samples;
const char *ev_name = evsel__name(evsel);
char bf[256], unit;
const char *warn = " ";
@@ -3432,7 +3433,7 @@ static void perf_evsel_menu__write(struct ui_browser *browser,
for_each_group_member(pos, evsel) {
struct hists *pos_hists = evsel__hists(pos);
- nr_events += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ nr_events += pos_hists->stats.nr_samples;
}
}
@@ -3441,7 +3442,7 @@ static void perf_evsel_menu__write(struct ui_browser *browser,
unit, unit == ' ' ? "" : " ", ev_name);
ui_browser__printf(browser, "%s", bf);
- nr_events = hists->stats.nr_events[PERF_RECORD_LOST];
+ nr_events = evsel->evlist->stats.nr_events[PERF_RECORD_LOST];
if (nr_events != 0) {
menu->lost_events = true;
if (!current_entry)
@@ -3647,7 +3648,7 @@ static int block_hists_browser__title(struct hist_browser *browser, char *bf,
{
struct hists *hists = evsel__hists(browser->block_evsel);
const char *evname = evsel__name(browser->block_evsel);
- unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ unsigned long nr_samples = hists->stats.nr_samples;
int ret;
ret = scnprintf(bf, size, "# Samples: %lu", nr_samples);
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 2ab2af4d4849..f36270485168 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -897,10 +897,12 @@ out:
return ret;
}
-size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
+size_t events_stats__fprintf(struct events_stats *stats, FILE *fp,
+ bool skip_empty)
{
int i;
size_t ret = 0;
+ u32 total = stats->nr_events[0];
for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
const char *name;
@@ -908,8 +910,17 @@ size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
name = perf_event__name(i);
if (!strcmp(name, "UNKNOWN"))
continue;
+ if (skip_empty && !stats->nr_events[i])
+ continue;
- ret += fprintf(fp, "%16s events: %10d\n", name, stats->nr_events[i]);
+ if (i && total) {
+ ret += fprintf(fp, "%16s events: %10d (%4.1f%%)\n",
+ name, stats->nr_events[i],
+ 100.0 * stats->nr_events[i] / total);
+ } else {
+ ret += fprintf(fp, "%16s events: %10d\n",
+ name, stats->nr_events[i]);
+ }
}
return ret;
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index e3e12f9d4733..8c0d9f368ebc 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -10,6 +10,7 @@ perf-y += db-export.o
perf-y += env.o
perf-y += event.o
perf-y += evlist.o
+perf-y += evlist-hybrid.o
perf-y += sideband_evlist.o
perf-y += evsel.o
perf-y += evsel_fprintf.o
@@ -23,6 +24,7 @@ perf-y += llvm-utils.o
perf-y += mmap.o
perf-y += memswap.o
perf-y += parse-events.o
+perf-y += parse-events-hybrid.o
perf-y += perf_regs.o
perf-y += path.o
perf-y += print_binary.o
@@ -69,6 +71,7 @@ perf-y += parse-events-bison.o
perf-y += pmu.o
perf-y += pmu-flex.o
perf-y += pmu-bison.o
+perf-y += pmu-hybrid.o
perf-y += trace-event-read.o
perf-y += trace-event-info.o
perf-y += trace-event-scripting.o
@@ -102,6 +105,7 @@ perf-y += rwsem.o
perf-y += thread-stack.o
perf-y += spark.o
perf-y += topdown.o
+perf-y += iostat.o
perf-y += stream.o
perf-$(CONFIG_AUXTRACE) += auxtrace.o
perf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
@@ -164,6 +168,7 @@ perf-$(CONFIG_LIBUNWIND_X86) += libunwind/x86_32.o
perf-$(CONFIG_LIBUNWIND_AARCH64) += libunwind/arm64.o
perf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o
+perf-y += data-convert-json.o
perf-y += scripting-engines/
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index e60841b86d27..abe1499a9164 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1161,6 +1161,7 @@ struct annotate_args {
s64 offset;
char *line;
int line_nr;
+ char *fileloc;
};
static void annotation_line__init(struct annotation_line *al,
@@ -1170,6 +1171,7 @@ static void annotation_line__init(struct annotation_line *al,
al->offset = args->offset;
al->line = strdup(args->line);
al->line_nr = args->line_nr;
+ al->fileloc = args->fileloc;
al->data_nr = nr;
}
@@ -1366,7 +1368,6 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start
{
struct disasm_line *dl = container_of(al, struct disasm_line, al);
static const char *prev_line;
- static const char *prev_color;
if (al->offset != -1) {
double max_percent = 0.0;
@@ -1405,20 +1406,6 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start
color = get_percent_color(max_percent);
- /*
- * Also color the filename and line if needed, with
- * the same color than the percentage. Don't print it
- * twice for close colored addr with the same filename:line
- */
- if (al->path) {
- if (!prev_line || strcmp(prev_line, al->path)
- || color != prev_color) {
- color_fprintf(stdout, color, " %s", al->path);
- prev_line = al->path;
- prev_color = color;
- }
- }
-
for (i = 0; i < nr_percent; i++) {
struct annotation_data *data = &al->data[i];
double percent;
@@ -1439,6 +1426,19 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start
printf(" : ");
disasm_line__print(dl, start, addr_fmt_width);
+
+ /*
+ * Also color the filename and line if needed, with
+ * the same color than the percentage. Don't print it
+ * twice for close colored addr with the same filename:line
+ */
+ if (al->path) {
+ if (!prev_line || strcmp(prev_line, al->path)) {
+ color_fprintf(stdout, color, " // %s", al->path);
+ prev_line = al->path;
+ }
+ }
+
printf("\n");
} else if (max_lines && printed >= max_lines)
return 1;
@@ -1454,7 +1454,7 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start
if (!*al->line)
printf(" %*s:\n", width, " ");
else
- printf(" %*s: %*s %s\n", width, " ", addr_fmt_width, " ", al->line);
+ printf(" %*s: %-*d %s\n", width, " ", addr_fmt_width, al->line_nr, al->line);
}
return 0;
@@ -1482,7 +1482,7 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start
*/
static int symbol__parse_objdump_line(struct symbol *sym,
struct annotate_args *args,
- char *parsed_line, int *line_nr)
+ char *parsed_line, int *line_nr, char **fileloc)
{
struct map *map = args->ms.map;
struct annotation *notes = symbol__annotation(sym);
@@ -1494,6 +1494,7 @@ static int symbol__parse_objdump_line(struct symbol *sym,
/* /filename:linenr ? Save line number and ignore. */
if (regexec(&file_lineno, parsed_line, 2, match, 0) == 0) {
*line_nr = atoi(parsed_line + match[1].rm_so);
+ *fileloc = strdup(parsed_line);
return 0;
}
@@ -1513,6 +1514,7 @@ static int symbol__parse_objdump_line(struct symbol *sym,
args->offset = offset;
args->line = parsed_line;
args->line_nr = *line_nr;
+ args->fileloc = *fileloc;
args->ms.sym = sym;
dl = disasm_line__new(args);
@@ -1807,6 +1809,7 @@ static int symbol__disassemble_bpf(struct symbol *sym,
args->offset = -1;
args->line = strdup(srcline);
args->line_nr = 0;
+ args->fileloc = NULL;
args->ms.sym = sym;
dl = disasm_line__new(args);
if (dl) {
@@ -1818,6 +1821,7 @@ static int symbol__disassemble_bpf(struct symbol *sym,
args->offset = pc;
args->line = buf + prev_buf_size;
args->line_nr = 0;
+ args->fileloc = NULL;
args->ms.sym = sym;
dl = disasm_line__new(args);
if (dl)
@@ -1852,6 +1856,7 @@ symbol__disassemble_bpf_image(struct symbol *sym,
args->offset = -1;
args->line = strdup("to be implemented");
args->line_nr = 0;
+ args->fileloc = NULL;
dl = disasm_line__new(args);
if (dl)
annotation_line__add(&dl->al, &notes->src->source);
@@ -1933,6 +1938,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
bool delete_extract = false;
bool decomp = false;
int lineno = 0;
+ char *fileloc = NULL;
int nline;
char *line;
size_t line_len;
@@ -2060,7 +2066,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
* See disasm_line__new() and struct disasm_line::line_nr.
*/
if (symbol__parse_objdump_line(sym, args, expanded_line,
- &lineno) < 0)
+ &lineno, &fileloc) < 0)
break;
nline++;
}
@@ -3144,6 +3150,10 @@ static int annotation__config(const char *var, const char *value, void *data)
opt->use_offset = perf_config_bool("use_offset", value);
} else if (!strcmp(var, "annotate.disassembler_style")) {
opt->disassembler_style = value;
+ } else if (!strcmp(var, "annotate.demangle")) {
+ symbol_conf.demangle = perf_config_bool("demangle", value);
+ } else if (!strcmp(var, "annotate.demangle_kernel")) {
+ symbol_conf.demangle_kernel = perf_config_bool("demangle_kernel", value);
} else {
pr_debug("%s variable unknown, ignoring...", var);
}
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 096cdaf21b01..3757416bcf46 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -84,6 +84,7 @@ struct annotation_options {
print_lines,
full_path,
show_linenr,
+ show_fileloc,
show_nr_jumps,
show_minmax_cycle,
show_asm_raw,
@@ -136,6 +137,7 @@ struct annotation_line {
s64 offset;
char *line;
int line_nr;
+ char *fileloc;
int jump_sources;
float ipc;
u64 cycles;
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 9087f1bffd3d..fbb3c4057c30 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -671,7 +671,7 @@ int bpf__probe(struct bpf_object *obj)
* After probing, let's consider prologue, which
* adds program fetcher to BPF programs.
*
- * hook_load_preprocessorr() hooks pre-processor
+ * hook_load_preprocessor() hooks pre-processor
* to bpf_program, let it generate prologue
* dynamically during loading.
*/
diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
index 04f89120b323..ddb52f748c8e 100644
--- a/tools/perf/util/bpf_counter.c
+++ b/tools/perf/util/bpf_counter.c
@@ -5,6 +5,7 @@
#include <assert.h>
#include <limits.h>
#include <unistd.h>
+#include <sys/file.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <linux/err.h>
@@ -12,14 +13,24 @@
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <bpf/libbpf.h>
+#include <api/fs/fs.h>
+#include <perf/bpf_perf.h>
#include "bpf_counter.h"
#include "counts.h"
#include "debug.h"
#include "evsel.h"
+#include "evlist.h"
#include "target.h"
+#include "cpumap.h"
+#include "thread_map.h"
#include "bpf_skel/bpf_prog_profiler.skel.h"
+#include "bpf_skel/bperf_u.h"
+#include "bpf_skel/bperf_leader.skel.h"
+#include "bpf_skel/bperf_follower.skel.h"
+
+#define ATTR_MAP_SIZE 16
static inline void *u64_to_ptr(__u64 ptr)
{
@@ -204,6 +215,17 @@ static int bpf_program_profiler__enable(struct evsel *evsel)
return 0;
}
+static int bpf_program_profiler__disable(struct evsel *evsel)
+{
+ struct bpf_counter *counter;
+
+ list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
+ assert(counter->skel != NULL);
+ bpf_prog_profiler_bpf__detach(counter->skel);
+ }
+ return 0;
+}
+
static int bpf_program_profiler__read(struct evsel *evsel)
{
// perf_cpu_map uses /sys/devices/system/cpu/online
@@ -269,22 +291,527 @@ static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu,
struct bpf_counter_ops bpf_program_profiler_ops = {
.load = bpf_program_profiler__load,
.enable = bpf_program_profiler__enable,
+ .disable = bpf_program_profiler__disable,
.read = bpf_program_profiler__read,
.destroy = bpf_program_profiler__destroy,
.install_pe = bpf_program_profiler__install_pe,
};
+static __u32 bpf_link_get_id(int fd)
+{
+ struct bpf_link_info link_info = {0};
+ __u32 link_info_len = sizeof(link_info);
+
+ bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
+ return link_info.id;
+}
+
+static __u32 bpf_link_get_prog_id(int fd)
+{
+ struct bpf_link_info link_info = {0};
+ __u32 link_info_len = sizeof(link_info);
+
+ bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
+ return link_info.prog_id;
+}
+
+static __u32 bpf_map_get_id(int fd)
+{
+ struct bpf_map_info map_info = {0};
+ __u32 map_info_len = sizeof(map_info);
+
+ bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
+ return map_info.id;
+}
+
+static bool bperf_attr_map_compatible(int attr_map_fd)
+{
+ struct bpf_map_info map_info = {0};
+ __u32 map_info_len = sizeof(map_info);
+ int err;
+
+ err = bpf_obj_get_info_by_fd(attr_map_fd, &map_info, &map_info_len);
+
+ if (err)
+ return false;
+ return (map_info.key_size == sizeof(struct perf_event_attr)) &&
+ (map_info.value_size == sizeof(struct perf_event_attr_map_entry));
+}
+
+static int bperf_lock_attr_map(struct target *target)
+{
+ char path[PATH_MAX];
+ int map_fd, err;
+
+ if (target->attr_map) {
+ scnprintf(path, PATH_MAX, "%s", target->attr_map);
+ } else {
+ scnprintf(path, PATH_MAX, "%s/fs/bpf/%s", sysfs__mountpoint(),
+ BPF_PERF_DEFAULT_ATTR_MAP_PATH);
+ }
+
+ if (access(path, F_OK)) {
+ map_fd = bpf_create_map(BPF_MAP_TYPE_HASH,
+ sizeof(struct perf_event_attr),
+ sizeof(struct perf_event_attr_map_entry),
+ ATTR_MAP_SIZE, 0);
+ if (map_fd < 0)
+ return -1;
+
+ err = bpf_obj_pin(map_fd, path);
+ if (err) {
+ /* someone pinned the map in parallel? */
+ close(map_fd);
+ map_fd = bpf_obj_get(path);
+ if (map_fd < 0)
+ return -1;
+ }
+ } else {
+ map_fd = bpf_obj_get(path);
+ if (map_fd < 0)
+ return -1;
+ }
+
+ if (!bperf_attr_map_compatible(map_fd)) {
+ close(map_fd);
+ return -1;
+
+ }
+ err = flock(map_fd, LOCK_EX);
+ if (err) {
+ close(map_fd);
+ return -1;
+ }
+ return map_fd;
+}
+
+/* trigger the leader program on a cpu */
+static int bperf_trigger_reading(int prog_fd, int cpu)
+{
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .ctx_in = NULL,
+ .ctx_size_in = 0,
+ .flags = BPF_F_TEST_RUN_ON_CPU,
+ .cpu = cpu,
+ .retval = 0,
+ );
+
+ return bpf_prog_test_run_opts(prog_fd, &opts);
+}
+
+static int bperf_check_target(struct evsel *evsel,
+ struct target *target,
+ enum bperf_filter_type *filter_type,
+ __u32 *filter_entry_cnt)
+{
+ if (evsel->leader->core.nr_members > 1) {
+ pr_err("bpf managed perf events do not yet support groups.\n");
+ return -1;
+ }
+
+ /* determine filter type based on target */
+ if (target->system_wide) {
+ *filter_type = BPERF_FILTER_GLOBAL;
+ *filter_entry_cnt = 1;
+ } else if (target->cpu_list) {
+ *filter_type = BPERF_FILTER_CPU;
+ *filter_entry_cnt = perf_cpu_map__nr(evsel__cpus(evsel));
+ } else if (target->tid) {
+ *filter_type = BPERF_FILTER_PID;
+ *filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
+ } else if (target->pid || evsel->evlist->workload.pid != -1) {
+ *filter_type = BPERF_FILTER_TGID;
+ *filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
+ } else {
+ pr_err("bpf managed perf events do not yet support these targets.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static struct perf_cpu_map *all_cpu_map;
+
+static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
+ struct perf_event_attr_map_entry *entry)
+{
+ struct bperf_leader_bpf *skel = bperf_leader_bpf__open();
+ int link_fd, diff_map_fd, err;
+ struct bpf_link *link = NULL;
+
+ if (!skel) {
+ pr_err("Failed to open leader skeleton\n");
+ return -1;
+ }
+
+ bpf_map__resize(skel->maps.events, libbpf_num_possible_cpus());
+ err = bperf_leader_bpf__load(skel);
+ if (err) {
+ pr_err("Failed to load leader skeleton\n");
+ goto out;
+ }
+
+ err = -1;
+ link = bpf_program__attach(skel->progs.on_switch);
+ if (!link) {
+ pr_err("Failed to attach leader program\n");
+ goto out;
+ }
+
+ link_fd = bpf_link__fd(link);
+ diff_map_fd = bpf_map__fd(skel->maps.diff_readings);
+ entry->link_id = bpf_link_get_id(link_fd);
+ entry->diff_map_id = bpf_map_get_id(diff_map_fd);
+ err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, entry, BPF_ANY);
+ assert(err == 0);
+
+ evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry->link_id);
+ assert(evsel->bperf_leader_link_fd >= 0);
+
+ /*
+ * save leader_skel for install_pe, which is called within
+ * following evsel__open_per_cpu call
+ */
+ evsel->leader_skel = skel;
+ evsel__open_per_cpu(evsel, all_cpu_map, -1);
+
+out:
+ bperf_leader_bpf__destroy(skel);
+ bpf_link__destroy(link);
+ return err;
+}
+
+static int bperf__load(struct evsel *evsel, struct target *target)
+{
+ struct perf_event_attr_map_entry entry = {0xffffffff, 0xffffffff};
+ int attr_map_fd, diff_map_fd = -1, err;
+ enum bperf_filter_type filter_type;
+ __u32 filter_entry_cnt, i;
+
+ if (bperf_check_target(evsel, target, &filter_type, &filter_entry_cnt))
+ return -1;
+
+ if (!all_cpu_map) {
+ all_cpu_map = perf_cpu_map__new(NULL);
+ if (!all_cpu_map)
+ return -1;
+ }
+
+ evsel->bperf_leader_prog_fd = -1;
+ evsel->bperf_leader_link_fd = -1;
+
+ /*
+ * Step 1: hold a fd on the leader program and the bpf_link, if
+ * the program is not already gone, reload the program.
+ * Use flock() to ensure exclusive access to the perf_event_attr
+ * map.
+ */
+ attr_map_fd = bperf_lock_attr_map(target);
+ if (attr_map_fd < 0) {
+ pr_err("Failed to lock perf_event_attr map\n");
+ return -1;
+ }
+
+ err = bpf_map_lookup_elem(attr_map_fd, &evsel->core.attr, &entry);
+ if (err) {
+ err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, &entry, BPF_ANY);
+ if (err)
+ goto out;
+ }
+
+ evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry.link_id);
+ if (evsel->bperf_leader_link_fd < 0 &&
+ bperf_reload_leader_program(evsel, attr_map_fd, &entry))
+ goto out;
+
+ /*
+ * The bpf_link holds reference to the leader program, and the
+ * leader program holds reference to the maps. Therefore, if
+ * link_id is valid, diff_map_id should also be valid.
+ */
+ evsel->bperf_leader_prog_fd = bpf_prog_get_fd_by_id(
+ bpf_link_get_prog_id(evsel->bperf_leader_link_fd));
+ assert(evsel->bperf_leader_prog_fd >= 0);
+
+ diff_map_fd = bpf_map_get_fd_by_id(entry.diff_map_id);
+ assert(diff_map_fd >= 0);
+
+ /*
+ * bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check
+ * whether the kernel support it
+ */
+ err = bperf_trigger_reading(evsel->bperf_leader_prog_fd, 0);
+ if (err) {
+ pr_err("The kernel does not support test_run for raw_tp BPF programs.\n"
+ "Therefore, --use-bpf might show inaccurate readings\n");
+ goto out;
+ }
+
+ /* Step 2: load the follower skeleton */
+ evsel->follower_skel = bperf_follower_bpf__open();
+ if (!evsel->follower_skel) {
+ pr_err("Failed to open follower skeleton\n");
+ goto out;
+ }
+
+ /* attach fexit program to the leader program */
+ bpf_program__set_attach_target(evsel->follower_skel->progs.fexit_XXX,
+ evsel->bperf_leader_prog_fd, "on_switch");
+
+ /* connect to leader diff_reading map */
+ bpf_map__reuse_fd(evsel->follower_skel->maps.diff_readings, diff_map_fd);
+
+ /* set up reading map */
+ bpf_map__set_max_entries(evsel->follower_skel->maps.accum_readings,
+ filter_entry_cnt);
+ /* set up follower filter based on target */
+ bpf_map__set_max_entries(evsel->follower_skel->maps.filter,
+ filter_entry_cnt);
+ err = bperf_follower_bpf__load(evsel->follower_skel);
+ if (err) {
+ pr_err("Failed to load follower skeleton\n");
+ bperf_follower_bpf__destroy(evsel->follower_skel);
+ evsel->follower_skel = NULL;
+ goto out;
+ }
+
+ for (i = 0; i < filter_entry_cnt; i++) {
+ int filter_map_fd;
+ __u32 key;
+
+ if (filter_type == BPERF_FILTER_PID ||
+ filter_type == BPERF_FILTER_TGID)
+ key = evsel->core.threads->map[i].pid;
+ else if (filter_type == BPERF_FILTER_CPU)
+ key = evsel->core.cpus->map[i];
+ else
+ break;
+
+ filter_map_fd = bpf_map__fd(evsel->follower_skel->maps.filter);
+ bpf_map_update_elem(filter_map_fd, &key, &i, BPF_ANY);
+ }
+
+ evsel->follower_skel->bss->type = filter_type;
+
+ err = bperf_follower_bpf__attach(evsel->follower_skel);
+
+out:
+ if (err && evsel->bperf_leader_link_fd >= 0)
+ close(evsel->bperf_leader_link_fd);
+ if (err && evsel->bperf_leader_prog_fd >= 0)
+ close(evsel->bperf_leader_prog_fd);
+ if (diff_map_fd >= 0)
+ close(diff_map_fd);
+
+ flock(attr_map_fd, LOCK_UN);
+ close(attr_map_fd);
+
+ return err;
+}
+
+static int bperf__install_pe(struct evsel *evsel, int cpu, int fd)
+{
+ struct bperf_leader_bpf *skel = evsel->leader_skel;
+
+ return bpf_map_update_elem(bpf_map__fd(skel->maps.events),
+ &cpu, &fd, BPF_ANY);
+}
+
+/*
+ * trigger the leader prog on each cpu, so the accum_reading map could get
+ * the latest readings.
+ */
+static int bperf_sync_counters(struct evsel *evsel)
+{
+ int num_cpu, i, cpu;
+
+ num_cpu = all_cpu_map->nr;
+ for (i = 0; i < num_cpu; i++) {
+ cpu = all_cpu_map->map[i];
+ bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu);
+ }
+ return 0;
+}
+
+static int bperf__enable(struct evsel *evsel)
+{
+ evsel->follower_skel->bss->enabled = 1;
+ return 0;
+}
+
+static int bperf__disable(struct evsel *evsel)
+{
+ evsel->follower_skel->bss->enabled = 0;
+ return 0;
+}
+
+static int bperf__read(struct evsel *evsel)
+{
+ struct bperf_follower_bpf *skel = evsel->follower_skel;
+ __u32 num_cpu_bpf = cpu__max_cpu();
+ struct bpf_perf_event_value values[num_cpu_bpf];
+ int reading_map_fd, err = 0;
+ __u32 i, j, num_cpu;
+
+ bperf_sync_counters(evsel);
+ reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
+
+ for (i = 0; i < bpf_map__max_entries(skel->maps.accum_readings); i++) {
+ __u32 cpu;
+
+ err = bpf_map_lookup_elem(reading_map_fd, &i, values);
+ if (err)
+ goto out;
+ switch (evsel->follower_skel->bss->type) {
+ case BPERF_FILTER_GLOBAL:
+ assert(i == 0);
+
+ num_cpu = all_cpu_map->nr;
+ for (j = 0; j < num_cpu; j++) {
+ cpu = all_cpu_map->map[j];
+ perf_counts(evsel->counts, cpu, 0)->val = values[cpu].counter;
+ perf_counts(evsel->counts, cpu, 0)->ena = values[cpu].enabled;
+ perf_counts(evsel->counts, cpu, 0)->run = values[cpu].running;
+ }
+ break;
+ case BPERF_FILTER_CPU:
+ cpu = evsel->core.cpus->map[i];
+ perf_counts(evsel->counts, i, 0)->val = values[cpu].counter;
+ perf_counts(evsel->counts, i, 0)->ena = values[cpu].enabled;
+ perf_counts(evsel->counts, i, 0)->run = values[cpu].running;
+ break;
+ case BPERF_FILTER_PID:
+ case BPERF_FILTER_TGID:
+ perf_counts(evsel->counts, 0, i)->val = 0;
+ perf_counts(evsel->counts, 0, i)->ena = 0;
+ perf_counts(evsel->counts, 0, i)->run = 0;
+
+ for (cpu = 0; cpu < num_cpu_bpf; cpu++) {
+ perf_counts(evsel->counts, 0, i)->val += values[cpu].counter;
+ perf_counts(evsel->counts, 0, i)->ena += values[cpu].enabled;
+ perf_counts(evsel->counts, 0, i)->run += values[cpu].running;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+out:
+ return err;
+}
+
+static int bperf__destroy(struct evsel *evsel)
+{
+ bperf_follower_bpf__destroy(evsel->follower_skel);
+ close(evsel->bperf_leader_prog_fd);
+ close(evsel->bperf_leader_link_fd);
+ return 0;
+}
+
+/*
+ * bperf: share hardware PMCs with BPF
+ *
+ * perf uses performance monitoring counters (PMC) to monitor system
+ * performance. The PMCs are limited hardware resources. For example,
+ * Intel CPUs have 3x fixed PMCs and 4x programmable PMCs per cpu.
+ *
+ * Modern data center systems use these PMCs in many different ways:
+ * system level monitoring, (maybe nested) container level monitoring, per
+ * process monitoring, profiling (in sample mode), etc. In some cases,
+ * there are more active perf_events than available hardware PMCs. To allow
+ * all perf_events to have a chance to run, it is necessary to do expensive
+ * time multiplexing of events.
+ *
+ * On the other hand, many monitoring tools count the common metrics
+ * (cycles, instructions). It is a waste to have multiple tools create
+ * multiple perf_events of "cycles" and occupy multiple PMCs.
+ *
+ * bperf tries to reduce such wastes by allowing multiple perf_events of
+ * "cycles" or "instructions" (at different scopes) to share PMUs. Instead
+ * of having each perf-stat session to read its own perf_events, bperf uses
+ * BPF programs to read the perf_events and aggregate readings to BPF maps.
+ * Then, the perf-stat session(s) reads the values from these BPF maps.
+ *
+ * ||
+ * shared progs and maps <- || -> per session progs and maps
+ * ||
+ * --------------- ||
+ * | perf_events | ||
+ * --------------- fexit || -----------------
+ * | --------||----> | follower prog |
+ * --------------- / || --- -----------------
+ * cs -> | leader prog |/ ||/ | |
+ * --> --------------- /|| -------------- ------------------
+ * / | | / || | filter map | | accum_readings |
+ * / ------------ ------------ || -------------- ------------------
+ * | | prev map | | diff map | || |
+ * | ------------ ------------ || |
+ * \ || |
+ * = \ ==================================================== | ============
+ * \ / user space
+ * \ /
+ * \ /
+ * BPF_PROG_TEST_RUN BPF_MAP_LOOKUP_ELEM
+ * \ /
+ * \ /
+ * \------ perf-stat ----------------------/
+ *
+ * The figure above shows the architecture of bperf. Note that the figure
+ * is divided into 3 regions: shared progs and maps (top left), per session
+ * progs and maps (top right), and user space (bottom).
+ *
+ * The leader prog is triggered on each context switch (cs). The leader
+ * prog reads perf_events and stores the difference (current_reading -
+ * previous_reading) to the diff map. For the same metric, e.g. "cycles",
+ * multiple perf-stat sessions share the same leader prog.
+ *
+ * Each perf-stat session creates a follower prog as fexit program to the
+ * leader prog. It is possible to attach up to BPF_MAX_TRAMP_PROGS (38)
+ * follower progs to the same leader prog. The follower prog checks current
+ * task and processor ID to decide whether to add the value from the diff
+ * map to its accumulated reading map (accum_readings).
+ *
+ * Finally, perf-stat user space reads the value from accum_reading map.
+ *
+ * Besides context switch, it is also necessary to trigger the leader prog
+ * before perf-stat reads the value. Otherwise, the accum_reading map may
+ * not have the latest reading from the perf_events. This is achieved by
+ * triggering the event via sys_bpf(BPF_PROG_TEST_RUN) to each CPU.
+ *
+ * Comment before the definition of struct perf_event_attr_map_entry
+ * describes how different sessions of perf-stat share information about
+ * the leader prog.
+ */
+
+struct bpf_counter_ops bperf_ops = {
+ .load = bperf__load,
+ .enable = bperf__enable,
+ .disable = bperf__disable,
+ .read = bperf__read,
+ .install_pe = bperf__install_pe,
+ .destroy = bperf__destroy,
+};
+
+static inline bool bpf_counter_skip(struct evsel *evsel)
+{
+ return list_empty(&evsel->bpf_counter_list) &&
+ evsel->follower_skel == NULL;
+}
+
int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd)
{
- if (list_empty(&evsel->bpf_counter_list))
+ if (bpf_counter_skip(evsel))
return 0;
return evsel->bpf_counter_ops->install_pe(evsel, cpu, fd);
}
int bpf_counter__load(struct evsel *evsel, struct target *target)
{
- if (target__has_bpf(target))
+ if (target->bpf_str)
evsel->bpf_counter_ops = &bpf_program_profiler_ops;
+ else if (target->use_bpf || evsel->bpf_counter ||
+ evsel__match_bpf_counter_events(evsel->name))
+ evsel->bpf_counter_ops = &bperf_ops;
if (evsel->bpf_counter_ops)
return evsel->bpf_counter_ops->load(evsel, target);
@@ -293,21 +820,28 @@ int bpf_counter__load(struct evsel *evsel, struct target *target)
int bpf_counter__enable(struct evsel *evsel)
{
- if (list_empty(&evsel->bpf_counter_list))
+ if (bpf_counter_skip(evsel))
return 0;
return evsel->bpf_counter_ops->enable(evsel);
}
+int bpf_counter__disable(struct evsel *evsel)
+{
+ if (bpf_counter_skip(evsel))
+ return 0;
+ return evsel->bpf_counter_ops->disable(evsel);
+}
+
int bpf_counter__read(struct evsel *evsel)
{
- if (list_empty(&evsel->bpf_counter_list))
+ if (bpf_counter_skip(evsel))
return -EAGAIN;
return evsel->bpf_counter_ops->read(evsel);
}
void bpf_counter__destroy(struct evsel *evsel)
{
- if (list_empty(&evsel->bpf_counter_list))
+ if (bpf_counter_skip(evsel))
return;
evsel->bpf_counter_ops->destroy(evsel);
evsel->bpf_counter_ops = NULL;
diff --git a/tools/perf/util/bpf_counter.h b/tools/perf/util/bpf_counter.h
index 2eca210e5dc1..d6d907c3dcf9 100644
--- a/tools/perf/util/bpf_counter.h
+++ b/tools/perf/util/bpf_counter.h
@@ -18,6 +18,7 @@ typedef int (*bpf_counter_evsel_install_pe_op)(struct evsel *evsel,
struct bpf_counter_ops {
bpf_counter_evsel_target_op load;
bpf_counter_evsel_op enable;
+ bpf_counter_evsel_op disable;
bpf_counter_evsel_op read;
bpf_counter_evsel_op destroy;
bpf_counter_evsel_install_pe_op install_pe;
@@ -32,13 +33,14 @@ struct bpf_counter {
int bpf_counter__load(struct evsel *evsel, struct target *target);
int bpf_counter__enable(struct evsel *evsel);
+int bpf_counter__disable(struct evsel *evsel);
int bpf_counter__read(struct evsel *evsel);
void bpf_counter__destroy(struct evsel *evsel);
int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
#else /* HAVE_BPF_SKEL */
-#include<linux/err.h>
+#include <linux/err.h>
static inline int bpf_counter__load(struct evsel *evsel __maybe_unused,
struct target *target __maybe_unused)
@@ -51,6 +53,11 @@ static inline int bpf_counter__enable(struct evsel *evsel __maybe_unused)
return 0;
}
+static inline int bpf_counter__disable(struct evsel *evsel __maybe_unused)
+{
+ return 0;
+}
+
static inline int bpf_counter__read(struct evsel *evsel __maybe_unused)
{
return -EAGAIN;
diff --git a/tools/perf/util/bpf_skel/bperf.h b/tools/perf/util/bpf_skel/bperf.h
new file mode 100644
index 000000000000..186a5551ddb9
--- /dev/null
+++ b/tools/perf/util/bpf_skel/bperf.h
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2021 Facebook
+
+#ifndef __BPERF_STAT_H
+#define __BPERF_STAT_H
+
+typedef struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+ __uint(max_entries, 1);
+} reading_map;
+
+#endif /* __BPERF_STAT_H */
diff --git a/tools/perf/util/bpf_skel/bperf_follower.bpf.c b/tools/perf/util/bpf_skel/bperf_follower.bpf.c
new file mode 100644
index 000000000000..b8fa3cb2da23
--- /dev/null
+++ b/tools/perf/util/bpf_skel/bperf_follower.bpf.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2021 Facebook
+#include <linux/bpf.h>
+#include <linux/perf_event.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bperf.h"
+#include "bperf_u.h"
+
+reading_map diff_readings SEC(".maps");
+reading_map accum_readings SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} filter SEC(".maps");
+
+enum bperf_filter_type type = 0;
+int enabled = 0;
+
+SEC("fexit/XXX")
+int BPF_PROG(fexit_XXX)
+{
+ struct bpf_perf_event_value *diff_val, *accum_val;
+ __u32 filter_key, zero = 0;
+ __u32 *accum_key;
+
+ if (!enabled)
+ return 0;
+
+ switch (type) {
+ case BPERF_FILTER_GLOBAL:
+ accum_key = &zero;
+ goto do_add;
+ case BPERF_FILTER_CPU:
+ filter_key = bpf_get_smp_processor_id();
+ break;
+ case BPERF_FILTER_PID:
+ filter_key = bpf_get_current_pid_tgid() & 0xffffffff;
+ break;
+ case BPERF_FILTER_TGID:
+ filter_key = bpf_get_current_pid_tgid() >> 32;
+ break;
+ default:
+ return 0;
+ }
+
+ accum_key = bpf_map_lookup_elem(&filter, &filter_key);
+ if (!accum_key)
+ return 0;
+
+do_add:
+ diff_val = bpf_map_lookup_elem(&diff_readings, &zero);
+ if (!diff_val)
+ return 0;
+
+ accum_val = bpf_map_lookup_elem(&accum_readings, accum_key);
+ if (!accum_val)
+ return 0;
+
+ accum_val->counter += diff_val->counter;
+ accum_val->enabled += diff_val->enabled;
+ accum_val->running += diff_val->running;
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/perf/util/bpf_skel/bperf_leader.bpf.c b/tools/perf/util/bpf_skel/bperf_leader.bpf.c
new file mode 100644
index 000000000000..4f70d1459e86
--- /dev/null
+++ b/tools/perf/util/bpf_skel/bperf_leader.bpf.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2021 Facebook
+#include <linux/bpf.h>
+#include <linux/perf_event.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bperf.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(int));
+ __uint(map_flags, BPF_F_PRESERVE_ELEMS);
+} events SEC(".maps");
+
+reading_map prev_readings SEC(".maps");
+reading_map diff_readings SEC(".maps");
+
+SEC("raw_tp/sched_switch")
+int BPF_PROG(on_switch)
+{
+ struct bpf_perf_event_value val, *prev_val, *diff_val;
+ __u32 key = bpf_get_smp_processor_id();
+ __u32 zero = 0;
+ long err;
+
+ prev_val = bpf_map_lookup_elem(&prev_readings, &zero);
+ if (!prev_val)
+ return 0;
+
+ diff_val = bpf_map_lookup_elem(&diff_readings, &zero);
+ if (!diff_val)
+ return 0;
+
+ err = bpf_perf_event_read_value(&events, key, &val, sizeof(val));
+ if (err)
+ return 0;
+
+ diff_val->counter = val.counter - prev_val->counter;
+ diff_val->enabled = val.enabled - prev_val->enabled;
+ diff_val->running = val.running - prev_val->running;
+ *prev_val = val;
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/perf/util/bpf_skel/bperf_u.h b/tools/perf/util/bpf_skel/bperf_u.h
new file mode 100644
index 000000000000..1ce0c2c905c1
--- /dev/null
+++ b/tools/perf/util/bpf_skel/bperf_u.h
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2021 Facebook
+
+#ifndef __BPERF_STAT_U_H
+#define __BPERF_STAT_U_H
+
+enum bperf_filter_type {
+ BPERF_FILTER_GLOBAL = 1,
+ BPERF_FILTER_CPU,
+ BPERF_FILTER_PID,
+ BPERF_FILTER_TGID,
+};
+
+#endif /* __BPERF_STAT_U_H */
diff --git a/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c b/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
index c7cec92d0236..ab12b4c4ece2 100644
--- a/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
+++ b/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
@@ -52,7 +52,7 @@ int BPF_PROG(fentry_XXX)
static inline void
fexit_update_maps(struct bpf_perf_event_value *after)
{
- struct bpf_perf_event_value *before, diff, *accum;
+ struct bpf_perf_event_value *before, diff;
__u32 zero = 0;
before = bpf_map_lookup_elem(&fentry_readings, &zero);
@@ -78,7 +78,6 @@ int BPF_PROG(fexit_XXX)
{
struct bpf_perf_event_value reading;
__u32 cpu = bpf_get_smp_processor_id();
- __u32 one = 1, zero = 0;
int err;
/* read all events before updating the maps, to reduce error */
diff --git a/tools/perf/util/call-path.h b/tools/perf/util/call-path.h
index 6b3229106f16..5875cfc8106e 100644
--- a/tools/perf/util/call-path.h
+++ b/tools/perf/util/call-path.h
@@ -23,7 +23,7 @@
* @children: tree of call paths of functions called
*
* In combination with the call_return structure, the call_path structure
- * defines a context-sensitve call-graph.
+ * defines a context-sensitive call-graph.
*/
struct call_path {
struct call_path *parent;
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 1b60985690bb..8e2777133bd9 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -877,7 +877,7 @@ append_chain_children(struct callchain_node *root,
if (!node)
return -1;
- /* lookup in childrens */
+ /* lookup in children */
while (*p) {
enum match_result ret;
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 6984c77068a3..63d472b336de 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -18,6 +18,7 @@
#include "util/hist.h" /* perf_hist_config */
#include "util/llvm-utils.h" /* perf_llvm_config */
#include "util/stat.h" /* perf_stat__set_big_num */
+#include "util/evsel.h" /* evsel__hw_names, evsel__use_bpf_counters */
#include "build-id.h"
#include "debug.h"
#include "config.h"
@@ -457,6 +458,12 @@ static int perf_stat_config(const char *var, const char *value)
if (!strcmp(var, "stat.big-num"))
perf_stat__set_big_num(perf_config_bool(var, value));
+ if (!strcmp(var, "stat.no-csv-summary"))
+ perf_stat__set_no_csv_summary(perf_config_bool(var, value));
+
+ if (!strcmp(var, "stat.bpf-counter-events"))
+ evsel__bpf_counter_events = strdup(value);
+
/* Add other config variables here. */
return 0;
}
@@ -699,7 +706,7 @@ static int collect_config(const char *var, const char *value,
/* perf_config_set can contain both user and system config items.
* So we should know where each value is from.
* The classification would be needed when a particular config file
- * is overwrited by setting feature i.e. set_config().
+ * is overwritten by setting feature i.e. set_config().
*/
if (strcmp(config_file_name, perf_etc_perfconfig()) == 0) {
section->from_system_config = true;
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index 3f4bc4050477..059bcec3f651 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -6,6 +6,7 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/coresight-pmu.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/zalloc.h>
@@ -316,7 +317,7 @@ cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue *etmq,
* This is the first timestamp we've seen since the beginning of traces
* or a discontinuity. Since timestamps packets are generated *after*
* range packets have been generated, we need to estimate the time at
- * which instructions started by substracting the number of instructions
+ * which instructions started by subtracting the number of instructions
* executed to the timestamp.
*/
packet_queue->timestamp = elem->timestamp - packet_queue->instr_count;
@@ -491,13 +492,42 @@ cs_etm_decoder__set_tid(struct cs_etm_queue *etmq,
const ocsd_generic_trace_elem *elem,
const uint8_t trace_chan_id)
{
- pid_t tid;
+ pid_t tid = -1;
+ static u64 pid_fmt;
+ int ret;
- /* Ignore PE_CONTEXT packets that don't have a valid contextID */
- if (!elem->context.ctxt_id_valid)
+ /*
+ * As all the ETMs run at the same exception level, the system should
+ * have the same PID format crossing CPUs. So cache the PID format
+ * and reuse it for sequential decoding.
+ */
+ if (!pid_fmt) {
+ ret = cs_etm__get_pid_fmt(trace_chan_id, &pid_fmt);
+ if (ret)
+ return OCSD_RESP_FATAL_SYS_ERR;
+ }
+
+ /*
+ * Process the PE_CONTEXT packets if we have a valid contextID or VMID.
+ * If the kernel is running at EL2, the PID is traced in CONTEXTIDR_EL2
+ * as VMID, Bit ETM_OPT_CTXTID2 is set in this case.
+ */
+ switch (pid_fmt) {
+ case BIT(ETM_OPT_CTXTID):
+ if (elem->context.ctxt_id_valid)
+ tid = elem->context.context_id;
+ break;
+ case BIT(ETM_OPT_CTXTID2):
+ if (elem->context.vmid_valid)
+ tid = elem->context.vmid;
+ break;
+ default:
+ break;
+ }
+
+ if (tid == -1)
return OCSD_RESP_CONT;
- tid = elem->context.context_id;
if (cs_etm__etmq_set_tid(etmq, tid, trace_chan_id))
return OCSD_RESP_FATAL_SYS_ERR;
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index a2a369e2fbb6..7e63e7dedc33 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitops.h>
+#include <linux/coresight-pmu.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/log2.h>
@@ -156,11 +157,52 @@ int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
return 0;
}
+/*
+ * The returned PID format is presented by two bits:
+ *
+ * Bit ETM_OPT_CTXTID: CONTEXTIDR or CONTEXTIDR_EL1 is traced;
+ * Bit ETM_OPT_CTXTID2: CONTEXTIDR_EL2 is traced.
+ *
+ * It's possible that the two bits ETM_OPT_CTXTID and ETM_OPT_CTXTID2
+ * are enabled at the same time when the session runs on an EL2 kernel.
+ * This means the CONTEXTIDR_EL1 and CONTEXTIDR_EL2 both will be
+ * recorded in the trace data, the tool will selectively use
+ * CONTEXTIDR_EL2 as PID.
+ */
+int cs_etm__get_pid_fmt(u8 trace_chan_id, u64 *pid_fmt)
+{
+ struct int_node *inode;
+ u64 *metadata, val;
+
+ inode = intlist__find(traceid_list, trace_chan_id);
+ if (!inode)
+ return -EINVAL;
+
+ metadata = inode->priv;
+
+ if (metadata[CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
+ val = metadata[CS_ETM_ETMCR];
+ /* CONTEXTIDR is traced */
+ if (val & BIT(ETM_OPT_CTXTID))
+ *pid_fmt = BIT(ETM_OPT_CTXTID);
+ } else {
+ val = metadata[CS_ETMV4_TRCCONFIGR];
+ /* CONTEXTIDR_EL2 is traced */
+ if (val & (BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT)))
+ *pid_fmt = BIT(ETM_OPT_CTXTID2);
+ /* CONTEXTIDR_EL1 is traced */
+ else if (val & BIT(ETM4_CFG_BIT_CTXTID))
+ *pid_fmt = BIT(ETM_OPT_CTXTID);
+ }
+
+ return 0;
+}
+
void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
u8 trace_chan_id)
{
/*
- * Wnen a timestamp packet is encountered the backend code
+ * When a timestamp packet is encountered the backend code
* is stopped so that the front end has time to process packets
* that were accumulated in the traceID queue. Since there can
* be more than one channel per cs_etm_queue, we need to specify
@@ -1655,7 +1697,7 @@ static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq, u8 trace_chan_id,
* | 1 1 0 1 1 1 1 1 | imm8 |
* +-----------------+--------+
*
- * According to the specifiction, it only defines SVC for T32
+ * According to the specification, it only defines SVC for T32
* with 16 bits instruction and has no definition for 32bits;
* so below only read 2 bytes as instruction size for T32.
*/
@@ -1887,7 +1929,7 @@ static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
/*
* If the previous packet is an exception return packet
- * and the return address just follows SVC instuction,
+ * and the return address just follows SVC instruction,
* it needs to calibrate the previous packet sample flags
* as PERF_IP_FLAG_SYSCALLRET.
*/
@@ -1961,7 +2003,7 @@ static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
* contain exception type related info so we cannot decide
* the exception type purely based on exception return packet.
* If we record the exception number from exception packet and
- * reuse it for excpetion return packet, this is not reliable
+ * reuse it for exception return packet, this is not reliable
* due the trace can be discontinuity or the interrupt can
* be nested, thus the recorded exception number cannot be
* used for exception return packet for these two cases.
@@ -2435,7 +2477,7 @@ static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
}
static const char * const cs_etm_global_header_fmts[] = {
- [CS_HEADER_VERSION_0] = " Header version %llx\n",
+ [CS_HEADER_VERSION] = " Header version %llx\n",
[CS_PMU_TYPE_CPUS] = " PMU type/num cpus %llx\n",
[CS_ETM_SNAPSHOT] = " Snapshot %llx\n",
};
@@ -2443,6 +2485,7 @@ static const char * const cs_etm_global_header_fmts[] = {
static const char * const cs_etm_priv_fmts[] = {
[CS_ETM_MAGIC] = " Magic number %llx\n",
[CS_ETM_CPU] = " CPU %lld\n",
+ [CS_ETM_NR_TRC_PARAMS] = " NR_TRC_PARAMS %llx\n",
[CS_ETM_ETMCR] = " ETMCR %llx\n",
[CS_ETM_ETMTRACEIDR] = " ETMTRACEIDR %llx\n",
[CS_ETM_ETMCCER] = " ETMCCER %llx\n",
@@ -2452,6 +2495,7 @@ static const char * const cs_etm_priv_fmts[] = {
static const char * const cs_etmv4_priv_fmts[] = {
[CS_ETM_MAGIC] = " Magic number %llx\n",
[CS_ETM_CPU] = " CPU %lld\n",
+ [CS_ETM_NR_TRC_PARAMS] = " NR_TRC_PARAMS %llx\n",
[CS_ETMV4_TRCCONFIGR] = " TRCCONFIGR %llx\n",
[CS_ETMV4_TRCTRACEIDR] = " TRCTRACEIDR %llx\n",
[CS_ETMV4_TRCIDR0] = " TRCIDR0 %llx\n",
@@ -2461,26 +2505,167 @@ static const char * const cs_etmv4_priv_fmts[] = {
[CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %llx\n",
};
-static void cs_etm__print_auxtrace_info(__u64 *val, int num)
+static const char * const param_unk_fmt =
+ " Unknown parameter [%d] %llx\n";
+static const char * const magic_unk_fmt =
+ " Magic number Unknown %llx\n";
+
+static int cs_etm__print_cpu_metadata_v0(__u64 *val, int *offset)
{
- int i, j, cpu = 0;
+ int i = *offset, j, nr_params = 0, fmt_offset;
+ __u64 magic;
- for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
- fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
+ /* check magic value */
+ magic = val[i + CS_ETM_MAGIC];
+ if ((magic != __perf_cs_etmv3_magic) &&
+ (magic != __perf_cs_etmv4_magic)) {
+ /* failure - note bad magic value */
+ fprintf(stdout, magic_unk_fmt, magic);
+ return -EINVAL;
+ }
+
+ /* print common header block */
+ fprintf(stdout, cs_etm_priv_fmts[CS_ETM_MAGIC], val[i++]);
+ fprintf(stdout, cs_etm_priv_fmts[CS_ETM_CPU], val[i++]);
+
+ if (magic == __perf_cs_etmv3_magic) {
+ nr_params = CS_ETM_NR_TRC_PARAMS_V0;
+ fmt_offset = CS_ETM_ETMCR;
+ /* after common block, offset format index past NR_PARAMS */
+ for (j = fmt_offset; j < nr_params + fmt_offset; j++, i++)
+ fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
+ } else if (magic == __perf_cs_etmv4_magic) {
+ nr_params = CS_ETMV4_NR_TRC_PARAMS_V0;
+ fmt_offset = CS_ETMV4_TRCCONFIGR;
+ /* after common block, offset format index past NR_PARAMS */
+ for (j = fmt_offset; j < nr_params + fmt_offset; j++, i++)
+ fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
+ }
+ *offset = i;
+ return 0;
+}
+
+static int cs_etm__print_cpu_metadata_v1(__u64 *val, int *offset)
+{
+ int i = *offset, j, total_params = 0;
+ __u64 magic;
+
+ magic = val[i + CS_ETM_MAGIC];
+ /* total params to print is NR_PARAMS + common block size for v1 */
+ total_params = val[i + CS_ETM_NR_TRC_PARAMS] + CS_ETM_COMMON_BLK_MAX_V1;
- for (i = CS_HEADER_VERSION_0_MAX; cpu < num; cpu++) {
- if (val[i] == __perf_cs_etmv3_magic)
- for (j = 0; j < CS_ETM_PRIV_MAX; j++, i++)
+ if (magic == __perf_cs_etmv3_magic) {
+ for (j = 0; j < total_params; j++, i++) {
+ /* if newer record - could be excess params */
+ if (j >= CS_ETM_PRIV_MAX)
+ fprintf(stdout, param_unk_fmt, j, val[i]);
+ else
fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
- else if (val[i] == __perf_cs_etmv4_magic)
- for (j = 0; j < CS_ETMV4_PRIV_MAX; j++, i++)
+ }
+ } else if (magic == __perf_cs_etmv4_magic) {
+ for (j = 0; j < total_params; j++, i++) {
+ /* if newer record - could be excess params */
+ if (j >= CS_ETMV4_PRIV_MAX)
+ fprintf(stdout, param_unk_fmt, j, val[i]);
+ else
fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
- else
- /* failure.. return */
+ }
+ } else {
+ /* failure - note bad magic value and error out */
+ fprintf(stdout, magic_unk_fmt, magic);
+ return -EINVAL;
+ }
+ *offset = i;
+ return 0;
+}
+
+static void cs_etm__print_auxtrace_info(__u64 *val, int num)
+{
+ int i, cpu = 0, version, err;
+
+ /* bail out early on bad header version */
+ version = val[0];
+ if (version > CS_HEADER_CURRENT_VERSION) {
+ /* failure.. return */
+ fprintf(stdout, " Unknown Header Version = %x, ", version);
+ fprintf(stdout, "Version supported <= %x\n", CS_HEADER_CURRENT_VERSION);
+ return;
+ }
+
+ for (i = 0; i < CS_HEADER_VERSION_MAX; i++)
+ fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
+
+ for (i = CS_HEADER_VERSION_MAX; cpu < num; cpu++) {
+ if (version == 0)
+ err = cs_etm__print_cpu_metadata_v0(val, &i);
+ else if (version == 1)
+ err = cs_etm__print_cpu_metadata_v1(val, &i);
+ if (err)
return;
}
}
+/*
+ * Read a single cpu parameter block from the auxtrace_info priv block.
+ *
+ * For version 1 there is a per cpu nr_params entry. If we are handling
+ * version 1 file, then there may be less, the same, or more params
+ * indicated by this value than the compile time number we understand.
+ *
+ * For a version 0 info block, there are a fixed number, and we need to
+ * fill out the nr_param value in the metadata we create.
+ */
+static u64 *cs_etm__create_meta_blk(u64 *buff_in, int *buff_in_offset,
+ int out_blk_size, int nr_params_v0)
+{
+ u64 *metadata = NULL;
+ int hdr_version;
+ int nr_in_params, nr_out_params, nr_cmn_params;
+ int i, k;
+
+ metadata = zalloc(sizeof(*metadata) * out_blk_size);
+ if (!metadata)
+ return NULL;
+
+ /* read block current index & version */
+ i = *buff_in_offset;
+ hdr_version = buff_in[CS_HEADER_VERSION];
+
+ if (!hdr_version) {
+ /* read version 0 info block into a version 1 metadata block */
+ nr_in_params = nr_params_v0;
+ metadata[CS_ETM_MAGIC] = buff_in[i + CS_ETM_MAGIC];
+ metadata[CS_ETM_CPU] = buff_in[i + CS_ETM_CPU];
+ metadata[CS_ETM_NR_TRC_PARAMS] = nr_in_params;
+ /* remaining block params at offset +1 from source */
+ for (k = CS_ETM_COMMON_BLK_MAX_V1 - 1; k < nr_in_params; k++)
+ metadata[k + 1] = buff_in[i + k];
+ /* version 0 has 2 common params */
+ nr_cmn_params = 2;
+ } else {
+ /* read version 1 info block - input and output nr_params may differ */
+ /* version 1 has 3 common params */
+ nr_cmn_params = 3;
+ nr_in_params = buff_in[i + CS_ETM_NR_TRC_PARAMS];
+
+ /* if input has more params than output - skip excess */
+ nr_out_params = nr_in_params + nr_cmn_params;
+ if (nr_out_params > out_blk_size)
+ nr_out_params = out_blk_size;
+
+ for (k = CS_ETM_MAGIC; k < nr_out_params; k++)
+ metadata[k] = buff_in[i + k];
+
+ /* record the actual nr params we copied */
+ metadata[CS_ETM_NR_TRC_PARAMS] = nr_out_params - nr_cmn_params;
+ }
+
+ /* adjust in offset by number of in params used */
+ i += nr_in_params + nr_cmn_params;
+ *buff_in_offset = i;
+ return metadata;
+}
+
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session)
{
@@ -2492,11 +2677,12 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
int info_header_size;
int total_size = auxtrace_info->header.size;
int priv_size = 0;
- int num_cpu;
- int err = 0, idx = -1;
- int i, j, k;
+ int num_cpu, trcidr_idx;
+ int err = 0;
+ int i, j;
u64 *ptr, *hdr = NULL;
u64 **metadata = NULL;
+ u64 hdr_version;
/*
* sizeof(auxtrace_info_event::type) +
@@ -2512,16 +2698,21 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
/* First the global part */
ptr = (u64 *) auxtrace_info->priv;
- /* Look for version '0' of the header */
- if (ptr[0] != 0)
+ /* Look for version of the header */
+ hdr_version = ptr[0];
+ if (hdr_version > CS_HEADER_CURRENT_VERSION) {
+ /* print routine will print an error on bad version */
+ if (dump_trace)
+ cs_etm__print_auxtrace_info(auxtrace_info->priv, 0);
return -EINVAL;
+ }
- hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_0_MAX);
+ hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_MAX);
if (!hdr)
return -ENOMEM;
/* Extract header information - see cs-etm.h for format */
- for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
+ for (i = 0; i < CS_HEADER_VERSION_MAX; i++)
hdr[i] = ptr[i];
num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
@@ -2552,35 +2743,31 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
*/
for (j = 0; j < num_cpu; j++) {
if (ptr[i] == __perf_cs_etmv3_magic) {
- metadata[j] = zalloc(sizeof(*metadata[j]) *
- CS_ETM_PRIV_MAX);
- if (!metadata[j]) {
- err = -ENOMEM;
- goto err_free_metadata;
- }
- for (k = 0; k < CS_ETM_PRIV_MAX; k++)
- metadata[j][k] = ptr[i + k];
+ metadata[j] =
+ cs_etm__create_meta_blk(ptr, &i,
+ CS_ETM_PRIV_MAX,
+ CS_ETM_NR_TRC_PARAMS_V0);
/* The traceID is our handle */
- idx = metadata[j][CS_ETM_ETMTRACEIDR];
- i += CS_ETM_PRIV_MAX;
+ trcidr_idx = CS_ETM_ETMTRACEIDR;
+
} else if (ptr[i] == __perf_cs_etmv4_magic) {
- metadata[j] = zalloc(sizeof(*metadata[j]) *
- CS_ETMV4_PRIV_MAX);
- if (!metadata[j]) {
- err = -ENOMEM;
- goto err_free_metadata;
- }
- for (k = 0; k < CS_ETMV4_PRIV_MAX; k++)
- metadata[j][k] = ptr[i + k];
+ metadata[j] =
+ cs_etm__create_meta_blk(ptr, &i,
+ CS_ETMV4_PRIV_MAX,
+ CS_ETMV4_NR_TRC_PARAMS_V0);
/* The traceID is our handle */
- idx = metadata[j][CS_ETMV4_TRCTRACEIDR];
- i += CS_ETMV4_PRIV_MAX;
+ trcidr_idx = CS_ETMV4_TRCTRACEIDR;
+ }
+
+ if (!metadata[j]) {
+ err = -ENOMEM;
+ goto err_free_metadata;
}
/* Get an RB node for this CPU */
- inode = intlist__findnew(traceid_list, idx);
+ inode = intlist__findnew(traceid_list, metadata[j][trcidr_idx]);
/* Something went wrong, no need to continue */
if (!inode) {
@@ -2601,7 +2788,7 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
}
/*
- * Each of CS_HEADER_VERSION_0_MAX, CS_ETM_PRIV_MAX and
+ * Each of CS_HEADER_VERSION_MAX, CS_ETM_PRIV_MAX and
* CS_ETMV4_PRIV_MAX mark how many double words are in the
* global metadata, and each cpu's metadata respectively.
* The following tests if the correct number of double words was
@@ -2703,6 +2890,12 @@ err_free_traceid_list:
intlist__delete(traceid_list);
err_free_hdr:
zfree(&hdr);
-
+ /*
+ * At this point, as a minimum we have valid header. Dump the rest of
+ * the info section - the print routines will error out on structural
+ * issues.
+ */
+ if (dump_trace)
+ cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
return err;
}
diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
index 4ad925d6d799..36428918411e 100644
--- a/tools/perf/util/cs-etm.h
+++ b/tools/perf/util/cs-etm.h
@@ -12,28 +12,43 @@
struct perf_session;
-/* Versionning header in case things need tro change in the future. That way
+/*
+ * Versioning header in case things need to change in the future. That way
* decoding of old snapshot is still possible.
*/
enum {
/* Starting with 0x0 */
- CS_HEADER_VERSION_0,
+ CS_HEADER_VERSION,
/* PMU->type (32 bit), total # of CPUs (32 bit) */
CS_PMU_TYPE_CPUS,
CS_ETM_SNAPSHOT,
- CS_HEADER_VERSION_0_MAX,
+ CS_HEADER_VERSION_MAX,
};
+/*
+ * Update the version for new format.
+ *
+ * New version 1 format adds a param count to the per cpu metadata.
+ * This allows easy adding of new metadata parameters.
+ * Requires that new params always added after current ones.
+ * Also allows client reader to handle file versions that are different by
+ * checking the number of params in the file vs the number expected.
+ */
+#define CS_HEADER_CURRENT_VERSION 1
+
/* Beginning of header common to both ETMv3 and V4 */
enum {
CS_ETM_MAGIC,
CS_ETM_CPU,
+ /* Number of trace config params in following ETM specific block */
+ CS_ETM_NR_TRC_PARAMS,
+ CS_ETM_COMMON_BLK_MAX_V1,
};
/* ETMv3/PTM metadata */
enum {
/* Dynamic, configurable parameters */
- CS_ETM_ETMCR = CS_ETM_CPU + 1,
+ CS_ETM_ETMCR = CS_ETM_COMMON_BLK_MAX_V1,
CS_ETM_ETMTRACEIDR,
/* RO, taken from sysFS */
CS_ETM_ETMCCER,
@@ -41,10 +56,13 @@ enum {
CS_ETM_PRIV_MAX,
};
+/* define fixed version 0 length - allow new format reader to read old files. */
+#define CS_ETM_NR_TRC_PARAMS_V0 (CS_ETM_ETMIDR - CS_ETM_ETMCR + 1)
+
/* ETMv4 metadata */
enum {
/* Dynamic, configurable parameters */
- CS_ETMV4_TRCCONFIGR = CS_ETM_CPU + 1,
+ CS_ETMV4_TRCCONFIGR = CS_ETM_COMMON_BLK_MAX_V1,
CS_ETMV4_TRCTRACEIDR,
/* RO, taken from sysFS */
CS_ETMV4_TRCIDR0,
@@ -55,9 +73,12 @@ enum {
CS_ETMV4_PRIV_MAX,
};
+/* define fixed version 0 length - allow new format reader to read old files. */
+#define CS_ETMV4_NR_TRC_PARAMS_V0 (CS_ETMV4_TRCAUTHSTATUS - CS_ETMV4_TRCCONFIGR + 1)
+
/*
* ETMv3 exception encoding number:
- * See Embedded Trace Macrocell spcification (ARM IHI 0014Q)
+ * See Embedded Trace Macrocell specification (ARM IHI 0014Q)
* table 7-12 Encoding of Exception[3:0] for non-ARMv7-M processors.
*/
enum {
@@ -162,7 +183,7 @@ struct cs_etm_packet_queue {
#define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb)
-#define CS_ETM_HEADER_SIZE (CS_HEADER_VERSION_0_MAX * sizeof(u64))
+#define CS_ETM_HEADER_SIZE (CS_HEADER_VERSION_MAX * sizeof(u64))
#define __perf_cs_etmv3_magic 0x3030303030303030ULL
#define __perf_cs_etmv4_magic 0x4040404040404040ULL
@@ -173,6 +194,7 @@ struct cs_etm_packet_queue {
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session);
int cs_etm__get_cpu(u8 trace_chan_id, int *cpu);
+int cs_etm__get_pid_fmt(u8 trace_chan_id, u64 *pid_fmt);
int cs_etm__etmq_set_tid(struct cs_etm_queue *etmq,
pid_t tid, u8 trace_chan_id);
bool cs_etm__etmq_is_timeless(struct cs_etm_queue *etmq);
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index 8b67bd97d122..cace349fb700 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -21,7 +21,7 @@
#include <babeltrace/ctf/events.h>
#include <traceevent/event-parse.h>
#include "asm/bug.h"
-#include "data-convert-bt.h"
+#include "data-convert.h"
#include "session.h"
#include "debug.h"
#include "tool.h"
@@ -949,7 +949,7 @@ static char *change_name(char *name, char *orig_name, int dup)
/*
* Add '_' prefix to potential keywork. According to
* Mathieu Desnoyers (https://lore.kernel.org/lkml/1074266107.40857.1422045946295.JavaMail.zimbra@efficios.com),
- * futher CTF spec updating may require us to use '$'.
+ * further CTF spec updating may require us to use '$'.
*/
if (dup < 0)
len = strlen(name) + sizeof("_");
diff --git a/tools/perf/util/data-convert-bt.h b/tools/perf/util/data-convert-bt.h
deleted file mode 100644
index 821674d63c4e..000000000000
--- a/tools/perf/util/data-convert-bt.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DATA_CONVERT_BT_H
-#define __DATA_CONVERT_BT_H
-#include "data-convert.h"
-#ifdef HAVE_LIBBABELTRACE_SUPPORT
-
-int bt_convert__perf2ctf(const char *input_name, const char *to_ctf,
- struct perf_data_convert_opts *opts);
-
-#endif /* HAVE_LIBBABELTRACE_SUPPORT */
-#endif /* __DATA_CONVERT_BT_H */
diff --git a/tools/perf/util/data-convert-json.c b/tools/perf/util/data-convert-json.c
new file mode 100644
index 000000000000..355cd1948bdf
--- /dev/null
+++ b/tools/perf/util/data-convert-json.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * JSON export.
+ *
+ * Copyright (C) 2021, CodeWeavers Inc. <nfraser@codeweavers.com>
+ */
+
+#include "data-convert.h"
+
+#include <fcntl.h>
+#include <inttypes.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "linux/compiler.h"
+#include "linux/err.h"
+#include "util/auxtrace.h"
+#include "util/debug.h"
+#include "util/dso.h"
+#include "util/event.h"
+#include "util/evsel.h"
+#include "util/evlist.h"
+#include "util/header.h"
+#include "util/map.h"
+#include "util/session.h"
+#include "util/symbol.h"
+#include "util/thread.h"
+#include "util/tool.h"
+
+struct convert_json {
+ struct perf_tool tool;
+ FILE *out;
+ bool first;
+ u64 events_count;
+};
+
+// Outputs a JSON-encoded string surrounded by quotes with characters escaped.
+static void output_json_string(FILE *out, const char *s)
+{
+ fputc('"', out);
+ while (*s) {
+ switch (*s) {
+
+ // required escapes with special forms as per RFC 8259
+ case '"': fputs("\\\"", out); break;
+ case '\\': fputs("\\\\", out); break;
+ case '\b': fputs("\\b", out); break;
+ case '\f': fputs("\\f", out); break;
+ case '\n': fputs("\\n", out); break;
+ case '\r': fputs("\\r", out); break;
+ case '\t': fputs("\\t", out); break;
+
+ default:
+ // all other control characters must be escaped by hex code
+ if (*s <= 0x1f)
+ fprintf(out, "\\u%04x", *s);
+ else
+ fputc(*s, out);
+ break;
+ }
+
+ ++s;
+ }
+ fputc('"', out);
+}
+
+// Outputs an optional comma, newline and indentation to delimit a new value
+// from the previous one in a JSON object or array.
+static void output_json_delimiters(FILE *out, bool comma, int depth)
+{
+ int i;
+
+ if (comma)
+ fputc(',', out);
+ fputc('\n', out);
+ for (i = 0; i < depth; ++i)
+ fputc('\t', out);
+}
+
+// Outputs a printf format string (with delimiter) as a JSON value.
+__printf(4, 5)
+static void output_json_format(FILE *out, bool comma, int depth, const char *format, ...)
+{
+ va_list args;
+
+ output_json_delimiters(out, comma, depth);
+ va_start(args, format);
+ vfprintf(out, format, args);
+ va_end(args);
+}
+
+// Outputs a JSON key-value pair where the value is a string.
+static void output_json_key_string(FILE *out, bool comma, int depth,
+ const char *key, const char *value)
+{
+ output_json_delimiters(out, comma, depth);
+ output_json_string(out, key);
+ fputs(": ", out);
+ output_json_string(out, value);
+}
+
+// Outputs a JSON key-value pair where the value is a printf format string.
+__printf(5, 6)
+static void output_json_key_format(FILE *out, bool comma, int depth,
+ const char *key, const char *format, ...)
+{
+ va_list args;
+
+ output_json_delimiters(out, comma, depth);
+ output_json_string(out, key);
+ fputs(": ", out);
+ va_start(args, format);
+ vfprintf(out, format, args);
+ va_end(args);
+}
+
+static void output_sample_callchain_entry(struct perf_tool *tool,
+ u64 ip, struct addr_location *al)
+{
+ struct convert_json *c = container_of(tool, struct convert_json, tool);
+ FILE *out = c->out;
+
+ output_json_format(out, false, 4, "{");
+ output_json_key_format(out, false, 5, "ip", "\"0x%" PRIx64 "\"", ip);
+
+ if (al && al->sym && al->sym->namelen) {
+ fputc(',', out);
+ output_json_key_string(out, false, 5, "symbol", al->sym->name);
+
+ if (al->map && al->map->dso) {
+ const char *dso = al->map->dso->short_name;
+
+ if (dso && strlen(dso) > 0) {
+ fputc(',', out);
+ output_json_key_string(out, false, 5, "dso", dso);
+ }
+ }
+ }
+
+ output_json_format(out, false, 4, "}");
+}
+
+static int process_sample_event(struct perf_tool *tool,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample,
+ struct evsel *evsel __maybe_unused,
+ struct machine *machine)
+{
+ struct convert_json *c = container_of(tool, struct convert_json, tool);
+ FILE *out = c->out;
+ struct addr_location al, tal;
+ u8 cpumode = PERF_RECORD_MISC_USER;
+
+ if (machine__resolve(machine, &al, sample) < 0) {
+ pr_err("Sample resolution failed!\n");
+ return -1;
+ }
+
+ ++c->events_count;
+
+ if (c->first)
+ c->first = false;
+ else
+ fputc(',', out);
+ output_json_format(out, false, 2, "{");
+
+ output_json_key_format(out, false, 3, "timestamp", "%" PRIi64, sample->time);
+ output_json_key_format(out, true, 3, "pid", "%i", al.thread->pid_);
+ output_json_key_format(out, true, 3, "tid", "%i", al.thread->tid);
+
+ if (al.thread->cpu >= 0)
+ output_json_key_format(out, true, 3, "cpu", "%i", al.thread->cpu);
+
+ output_json_key_string(out, true, 3, "comm", thread__comm_str(al.thread));
+
+ output_json_key_format(out, true, 3, "callchain", "[");
+ if (sample->callchain) {
+ unsigned int i;
+ bool ok;
+ bool first_callchain = true;
+
+ for (i = 0; i < sample->callchain->nr; ++i) {
+ u64 ip = sample->callchain->ips[i];
+
+ if (ip >= PERF_CONTEXT_MAX) {
+ switch (ip) {
+ case PERF_CONTEXT_HV:
+ cpumode = PERF_RECORD_MISC_HYPERVISOR;
+ break;
+ case PERF_CONTEXT_KERNEL:
+ cpumode = PERF_RECORD_MISC_KERNEL;
+ break;
+ case PERF_CONTEXT_USER:
+ cpumode = PERF_RECORD_MISC_USER;
+ break;
+ default:
+ pr_debug("invalid callchain context: %"
+ PRId64 "\n", (s64) ip);
+ break;
+ }
+ continue;
+ }
+
+ if (first_callchain)
+ first_callchain = false;
+ else
+ fputc(',', out);
+
+ ok = thread__find_symbol(al.thread, cpumode, ip, &tal);
+ output_sample_callchain_entry(tool, ip, ok ? &tal : NULL);
+ }
+ } else {
+ output_sample_callchain_entry(tool, sample->ip, &al);
+ }
+ output_json_format(out, false, 3, "]");
+
+ output_json_format(out, false, 2, "}");
+ return 0;
+}
+
+static void output_headers(struct perf_session *session, struct convert_json *c)
+{
+ struct stat st;
+ struct perf_header *header = &session->header;
+ int ret;
+ int fd = perf_data__fd(session->data);
+ int i;
+ FILE *out = c->out;
+
+ output_json_key_format(out, false, 2, "header-version", "%u", header->version);
+
+ ret = fstat(fd, &st);
+ if (ret >= 0) {
+ time_t stctime = st.st_mtime;
+ char buf[256];
+
+ strftime(buf, sizeof(buf), "%FT%TZ", gmtime(&stctime));
+ output_json_key_string(out, true, 2, "captured-on", buf);
+ } else {
+ pr_debug("Failed to get mtime of source file, not writing captured-on");
+ }
+
+ output_json_key_format(out, true, 2, "data-offset", "%" PRIu64, header->data_offset);
+ output_json_key_format(out, true, 2, "data-size", "%" PRIu64, header->data_size);
+ output_json_key_format(out, true, 2, "feat-offset", "%" PRIu64, header->feat_offset);
+
+ output_json_key_string(out, true, 2, "hostname", header->env.hostname);
+ output_json_key_string(out, true, 2, "os-release", header->env.os_release);
+ output_json_key_string(out, true, 2, "arch", header->env.arch);
+
+ output_json_key_string(out, true, 2, "cpu-desc", header->env.cpu_desc);
+ output_json_key_string(out, true, 2, "cpuid", header->env.cpuid);
+ output_json_key_format(out, true, 2, "nrcpus-online", "%u", header->env.nr_cpus_online);
+ output_json_key_format(out, true, 2, "nrcpus-avail", "%u", header->env.nr_cpus_avail);
+
+ if (header->env.clock.enabled) {
+ output_json_key_format(out, true, 2, "clockid",
+ "%u", header->env.clock.clockid);
+ output_json_key_format(out, true, 2, "clock-time",
+ "%" PRIu64, header->env.clock.clockid_ns);
+ output_json_key_format(out, true, 2, "real-time",
+ "%" PRIu64, header->env.clock.tod_ns);
+ }
+
+ output_json_key_string(out, true, 2, "perf-version", header->env.version);
+
+ output_json_key_format(out, true, 2, "cmdline", "[");
+ for (i = 0; i < header->env.nr_cmdline; i++) {
+ output_json_delimiters(out, i != 0, 3);
+ output_json_string(c->out, header->env.cmdline_argv[i]);
+ }
+ output_json_format(out, false, 2, "]");
+}
+
+int bt_convert__perf2json(const char *input_name, const char *output_name,
+ struct perf_data_convert_opts *opts __maybe_unused)
+{
+ struct perf_session *session;
+ int fd;
+ int ret = -1;
+
+ struct convert_json c = {
+ .tool = {
+ .sample = process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .mmap2 = perf_event__process_mmap2,
+ .comm = perf_event__process_comm,
+ .namespaces = perf_event__process_namespaces,
+ .cgroup = perf_event__process_cgroup,
+ .exit = perf_event__process_exit,
+ .fork = perf_event__process_fork,
+ .lost = perf_event__process_lost,
+ .tracing_data = perf_event__process_tracing_data,
+ .build_id = perf_event__process_build_id,
+ .id_index = perf_event__process_id_index,
+ .auxtrace_info = perf_event__process_auxtrace_info,
+ .auxtrace = perf_event__process_auxtrace,
+ .event_update = perf_event__process_event_update,
+ .ordered_events = true,
+ .ordering_requires_timestamps = true,
+ },
+ .first = true,
+ .events_count = 0,
+ };
+
+ struct perf_data data = {
+ .mode = PERF_DATA_MODE_READ,
+ .path = input_name,
+ .force = opts->force,
+ };
+
+ if (opts->all) {
+ pr_err("--all is currently unsupported for JSON output.\n");
+ goto err;
+ }
+ if (opts->tod) {
+ pr_err("--tod is currently unsupported for JSON output.\n");
+ goto err;
+ }
+
+ fd = open(output_name, O_CREAT | O_WRONLY | (opts->force ? O_TRUNC : O_EXCL), 0666);
+ if (fd == -1) {
+ if (errno == EEXIST)
+ pr_err("Output file exists. Use --force to overwrite it.\n");
+ else
+ pr_err("Error opening output file!\n");
+ goto err;
+ }
+
+ c.out = fdopen(fd, "w");
+ if (!c.out) {
+ fprintf(stderr, "Error opening output file!\n");
+ close(fd);
+ goto err;
+ }
+
+ session = perf_session__new(&data, false, &c.tool);
+ if (IS_ERR(session)) {
+ fprintf(stderr, "Error creating perf session!\n");
+ goto err_fclose;
+ }
+
+ if (symbol__init(&session->header.env) < 0) {
+ fprintf(stderr, "Symbol init error!\n");
+ goto err_session_delete;
+ }
+
+ // The opening brace is printed manually because it isn't delimited from a
+ // previous value (i.e. we don't want a leading newline)
+ fputc('{', c.out);
+
+ // Version number for future-proofing. Most additions should be able to be
+ // done in a backwards-compatible way so this should only need to be bumped
+ // if some major breaking change must be made.
+ output_json_format(c.out, false, 1, "\"linux-perf-json-version\": 1");
+
+ // Output headers
+ output_json_format(c.out, true, 1, "\"headers\": {");
+ output_headers(session, &c);
+ output_json_format(c.out, false, 1, "}");
+
+ // Output samples
+ output_json_format(c.out, true, 1, "\"samples\": [");
+ perf_session__process_events(session);
+ output_json_format(c.out, false, 1, "]");
+ output_json_format(c.out, false, 0, "}");
+ fputc('\n', c.out);
+
+ fprintf(stderr,
+ "[ perf data convert: Converted '%s' into JSON data '%s' ]\n",
+ data.path, output_name);
+
+ fprintf(stderr,
+ "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples) ]\n",
+ (ftell(c.out)) / 1024.0 / 1024.0, c.events_count);
+
+ ret = 0;
+err_session_delete:
+ perf_session__delete(session);
+err_fclose:
+ fclose(c.out);
+err:
+ return ret;
+}
diff --git a/tools/perf/util/data-convert.h b/tools/perf/util/data-convert.h
index feab5f114e37..1b4c5f598415 100644
--- a/tools/perf/util/data-convert.h
+++ b/tools/perf/util/data-convert.h
@@ -2,10 +2,20 @@
#ifndef __DATA_CONVERT_H
#define __DATA_CONVERT_H
+#include <stdbool.h>
+
struct perf_data_convert_opts {
bool force;
bool all;
bool tod;
};
+#ifdef HAVE_LIBBABELTRACE_SUPPORT
+int bt_convert__perf2ctf(const char *input_name, const char *to_ctf,
+ struct perf_data_convert_opts *opts);
+#endif /* HAVE_LIBBABELTRACE_SUPPORT */
+
+int bt_convert__perf2json(const char *input_name, const char *to_ctf,
+ struct perf_data_convert_opts *opts);
+
#endif /* __DATA_CONVERT_H */
diff --git a/tools/perf/util/demangle-java.c b/tools/perf/util/demangle-java.c
index 39c05200ed65..ddf33d58bcd3 100644
--- a/tools/perf/util/demangle-java.c
+++ b/tools/perf/util/demangle-java.c
@@ -147,7 +147,7 @@ error:
* Demangle Java function signature (openJDK, not GCJ)
* input:
* str: string to parse. String is not modified
- * flags: comobination of JAVA_DEMANGLE_* flags to modify demangling
+ * flags: combination of JAVA_DEMANGLE_* flags to modify demangling
* return:
* if input can be demangled, then a newly allocated string is returned.
* if input cannot be demangled, then NULL is returned
@@ -164,7 +164,7 @@ java_demangle_sym(const char *str, int flags)
if (!str)
return NULL;
- /* find start of retunr type */
+ /* find start of return type */
p = strrchr(str, ')');
if (!p)
return NULL;
diff --git a/tools/perf/util/demangle-ocaml.c b/tools/perf/util/demangle-ocaml.c
index 3df14e67c622..9d707bb60b4b 100644
--- a/tools/perf/util/demangle-ocaml.c
+++ b/tools/perf/util/demangle-ocaml.c
@@ -64,17 +64,5 @@ ocaml_demangle_sym(const char *sym)
}
result[j] = '\0';
- /* scan backwards to remove an "_" followed by decimal digits */
- if (j != 0 && isdigit(result[j - 1])) {
- while (--j) {
- if (!isdigit(result[j])) {
- break;
- }
- }
- if (result[j] == '_') {
- result[j] = '\0';
- }
- }
-
return result;
}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index cd2fe64a3c5d..52e7101c5609 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -216,7 +216,7 @@ struct dso {
/* dso__for_each_symbol - iterate over the symbols of given type
*
- * @dso: the 'struct dso *' in which symbols itereated
+ * @dso: the 'struct dso *' in which symbols are iterated
* @pos: the 'struct symbol *' to use as a loop cursor
* @n: the 'struct rb_node *' to use as a temporary storage
*/
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 7b2d471a6419..b2f4920e19a6 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -91,7 +91,7 @@ static Dwarf_Line *cu_getsrc_die(Dwarf_Die *cu_die, Dwarf_Addr addr)
return NULL;
} while (laddr == addr);
l++;
- /* Going foward to find the statement line */
+ /* Going forward to find the statement line */
do {
line = dwarf_onesrcline(lines, l++);
if (!line || dwarf_lineaddr(line, &laddr) != 0 ||
@@ -177,7 +177,7 @@ int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
* die_get_linkage_name - Get the linkage name of the object
* @dw_die: A DIE of the object
*
- * Get the linkage name attiribute of given @dw_die.
+ * Get the linkage name attribute of given @dw_die.
* For C++ binary, the linkage name will be the mangled symbol.
*/
const char *die_get_linkage_name(Dwarf_Die *dw_die)
@@ -739,7 +739,7 @@ static int __die_walk_instances_cb(Dwarf_Die *inst, void *data)
* @data: user data
*
* Walk on the instances of give @in_die. @in_die must be an inlined function
- * declartion. This returns the return value of @callback if it returns
+ * declaration. This returns the return value of @callback if it returns
* non-zero value, or -ENOENT if there is no instance.
*/
int die_walk_instances(Dwarf_Die *or_die, int (*callback)(Dwarf_Die *, void *),
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index 506006e0cf66..cb99646843a9 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -22,7 +22,7 @@ const char *cu_get_comp_dir(Dwarf_Die *cu_die);
int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr,
const char **fname, int *lineno);
-/* Walk on funcitons at given address */
+/* Walk on functions at given address */
int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
int (*callback)(Dwarf_Die *, void *), void *data);
diff --git a/tools/perf/util/dwarf-regs.c b/tools/perf/util/dwarf-regs.c
index 1b49ecee5aff..3fa4486742cd 100644
--- a/tools/perf/util/dwarf-regs.c
+++ b/tools/perf/util/dwarf-regs.c
@@ -24,6 +24,7 @@
#include "../arch/s390/include/dwarf-regs-table.h"
#include "../arch/sparc/include/dwarf-regs-table.h"
#include "../arch/xtensa/include/dwarf-regs-table.h"
+#include "../arch/mips/include/dwarf-regs-table.h"
#define __get_dwarf_regstr(tbl, n) (((n) < ARRAY_SIZE(tbl)) ? (tbl)[(n)] : NULL)
@@ -53,6 +54,8 @@ const char *get_dwarf_regstr(unsigned int n, unsigned int machine)
return __get_dwarf_regstr(sparc_regstr_tbl, n);
case EM_XTENSA:
return __get_dwarf_regstr(xtensa_regstr_tbl, n);
+ case EM_MIPS:
+ return __get_dwarf_regstr(mips_regstr_tbl, n);
default:
pr_err("ELF MACHINE %x is not supported.\n", machine);
}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index f603edbbbc6f..8a62fb39e365 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -147,6 +147,7 @@ struct perf_sample {
u8 cpumode;
u16 misc;
u16 ins_lat;
+ u16 p_stage_cyc;
bool no_hw_idx; /* No hw_idx collected in branch_stack */
char insn[MAX_INSN];
void *raw_data;
@@ -427,5 +428,7 @@ char *get_page_size_name(u64 size, char *str);
void arch_perf_parse_sample_weight(struct perf_sample *data, const __u64 *array, u64 type);
void arch_perf_synthesize_sample_weight(const struct perf_sample *data, __u64 *array, u64 type);
+const char *arch_perf_header_entry(const char *se_header);
+int arch_support_sort_key(const char *sort_key);
#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/events_stats.h b/tools/perf/util/events_stats.h
index 859cb34fcff2..3480bafd414b 100644
--- a/tools/perf/util/events_stats.h
+++ b/tools/perf/util/events_stats.h
@@ -21,20 +21,17 @@
* all struct perf_record_lost_samples.lost fields reported.
*
* The total_period is needed because by default auto-freq is used, so
- * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
+ * multiplying nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
* the total number of low level events, it is necessary to to sum all struct
* perf_record_sample.period and stash the result in total_period.
*/
struct events_stats {
- u64 total_period;
- u64 total_non_filtered_period;
u64 total_lost;
u64 total_lost_samples;
u64 total_aux_lost;
u64 total_aux_partial;
u64 total_invalid_chains;
u32 nr_events[PERF_RECORD_HEADER_MAX];
- u32 nr_non_filtered_samples;
u32 nr_lost_warned;
u32 nr_unknown_events;
u32 nr_invalid_chains;
@@ -44,8 +41,16 @@ struct events_stats {
u32 nr_proc_map_timeout;
};
+struct hists_stats {
+ u64 total_period;
+ u64 total_non_filtered_period;
+ u32 nr_samples;
+ u32 nr_non_filtered_samples;
+};
+
void events_stats__inc(struct events_stats *stats, u32 type);
-size_t events_stats__fprintf(struct events_stats *stats, FILE *fp);
+size_t events_stats__fprintf(struct events_stats *stats, FILE *fp,
+ bool skip_empty);
#endif /* __PERF_EVENTS_STATS_ */
diff --git a/tools/perf/util/evlist-hybrid.c b/tools/perf/util/evlist-hybrid.c
new file mode 100644
index 000000000000..db3f5fbdebe1
--- /dev/null
+++ b/tools/perf/util/evlist-hybrid.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <errno.h>
+#include <inttypes.h>
+#include "cpumap.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "../perf.h"
+#include "util/pmu-hybrid.h"
+#include "util/evlist-hybrid.h"
+#include "debug.h"
+#include <unistd.h>
+#include <stdlib.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <perf/evlist.h>
+#include <perf/evsel.h>
+#include <perf/cpumap.h>
+
+int evlist__add_default_hybrid(struct evlist *evlist, bool precise)
+{
+ struct evsel *evsel;
+ struct perf_pmu *pmu;
+ __u64 config;
+ struct perf_cpu_map *cpus;
+
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ config = PERF_COUNT_HW_CPU_CYCLES |
+ ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT);
+ evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE,
+ config);
+ if (!evsel)
+ return -ENOMEM;
+
+ cpus = perf_cpu_map__get(pmu->cpus);
+ evsel->core.cpus = cpus;
+ evsel->core.own_cpus = perf_cpu_map__get(cpus);
+ evsel->pmu_name = strdup(pmu->name);
+ evlist__add(evlist, evsel);
+ }
+
+ return 0;
+}
+
+static bool group_hybrid_conflict(struct evsel *leader)
+{
+ struct evsel *pos, *prev = NULL;
+
+ for_each_group_evsel(pos, leader) {
+ if (!evsel__is_hybrid(pos))
+ continue;
+
+ if (prev && strcmp(prev->pmu_name, pos->pmu_name))
+ return true;
+
+ prev = pos;
+ }
+
+ return false;
+}
+
+void evlist__warn_hybrid_group(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel__is_group_leader(evsel) &&
+ evsel->core.nr_members > 1 &&
+ group_hybrid_conflict(evsel)) {
+ pr_warning("WARNING: events in group from "
+ "different hybrid PMUs!\n");
+ return;
+ }
+ }
+}
+
+bool evlist__has_hybrid(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->pmu_name &&
+ perf_pmu__is_hybrid(evsel->pmu_name)) {
+ return true;
+ }
+ }
+
+ return false;
+}
diff --git a/tools/perf/util/evlist-hybrid.h b/tools/perf/util/evlist-hybrid.h
new file mode 100644
index 000000000000..19f74b4c340a
--- /dev/null
+++ b/tools/perf/util/evlist-hybrid.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_EVLIST_HYBRID_H
+#define __PERF_EVLIST_HYBRID_H
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include "evlist.h"
+#include <unistd.h>
+
+int evlist__add_default_hybrid(struct evlist *evlist, bool precise);
+void evlist__warn_hybrid_group(struct evlist *evlist);
+bool evlist__has_hybrid(struct evlist *evlist);
+
+#endif /* __PERF_EVLIST_HYBRID_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 882cd1f721d9..6e5c41528c7d 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -17,6 +17,7 @@
#include "evsel.h"
#include "debug.h"
#include "units.h"
+#include "bpf_counter.h"
#include <internal/lib.h> // page_size
#include "affinity.h"
#include "../perf.h"
@@ -25,6 +26,7 @@
#include "util/string2.h"
#include "util/perf_api_probe.h"
#include "util/evsel_fprintf.h"
+#include "util/evlist-hybrid.h"
#include <signal.h>
#include <unistd.h>
#include <sched.h>
@@ -36,6 +38,7 @@
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
+#include <sys/prctl.h>
#include <linux/bitops.h>
#include <linux/hash.h>
@@ -246,8 +249,10 @@ void evlist__set_leader(struct evlist *evlist)
int __evlist__add_default(struct evlist *evlist, bool precise)
{
- struct evsel *evsel = evsel__new_cycles(precise);
+ struct evsel *evsel;
+ evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE,
+ PERF_COUNT_HW_CPU_CYCLES);
if (evsel == NULL)
return -ENOMEM;
@@ -420,6 +425,9 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
if (affinity__setup(&affinity) < 0)
return;
+ evlist__for_each_entry(evlist, pos)
+ bpf_counter__disable(pos);
+
/* Disable 'immediate' events last */
for (imm = 0; imm <= 1; imm++) {
evlist__for_each_cpu(evlist, i, cpu) {
@@ -1209,7 +1217,7 @@ bool evlist__valid_read_format(struct evlist *evlist)
}
}
- /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
+ /* PERF_SAMPLE_READ implies PERF_FORMAT_ID. */
if ((sample_type & PERF_SAMPLE_READ) &&
!(read_format & PERF_FORMAT_ID)) {
return false;
@@ -1406,6 +1414,13 @@ int evlist__prepare_workload(struct evlist *evlist, struct target *target, const
fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
/*
+ * Change the name of this process not to confuse --exclude-perf users
+ * that sees 'perf' in the window up to the execvp() and thinks that
+ * perf samples are not being excluded.
+ */
+ prctl(PR_SET_NAME, "perf-exec");
+
+ /*
* Tell the parent we're ready to go
*/
close(child_ready_pipe[1]);
@@ -2130,3 +2145,22 @@ struct evsel *evlist__find_evsel(struct evlist *evlist, int idx)
}
return NULL;
}
+
+int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf)
+{
+ struct evsel *evsel;
+ int printed = 0;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel__is_dummy_event(evsel))
+ continue;
+ if (size > (strlen(evsel__name(evsel)) + (printed ? 2 : 1))) {
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "," : "", evsel__name(evsel));
+ } else {
+ printed += scnprintf(bf + printed, size - printed, "%s...", printed ? "," : "");
+ break;
+ }
+ }
+
+ return printed;
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index b695ffaae519..a8b97b50cceb 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -365,4 +365,6 @@ int evlist__ctlfd_ack(struct evlist *evlist);
#define EVLIST_DISABLED_MSG "Events disabled\n"
struct evsel *evlist__find_evsel(struct evlist *evlist, int idx);
+
+int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf);
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 7ecbc8e2fbfa..4a3cd1b5bb33 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -47,6 +47,7 @@
#include "memswap.h"
#include "util.h"
#include "hashmap.h"
+#include "pmu-hybrid.h"
#include "../perf-sys.h"
#include "util/parse-branch-options.h"
#include <internal/xyarray.h>
@@ -295,11 +296,11 @@ static bool perf_event_can_profile_kernel(void)
return perf_event_paranoid_check(1);
}
-struct evsel *evsel__new_cycles(bool precise)
+struct evsel *evsel__new_cycles(bool precise, __u32 type, __u64 config)
{
struct perf_event_attr attr = {
- .type = PERF_TYPE_HARDWARE,
- .config = PERF_COUNT_HW_CPU_CYCLES,
+ .type = type,
+ .config = config,
.exclude_kernel = !perf_event_can_profile_kernel(),
};
struct evsel *evsel;
@@ -492,6 +493,28 @@ const char *evsel__hw_names[PERF_COUNT_HW_MAX] = {
"ref-cycles",
};
+char *evsel__bpf_counter_events;
+
+bool evsel__match_bpf_counter_events(const char *name)
+{
+ int name_len;
+ bool match;
+ char *ptr;
+
+ if (!evsel__bpf_counter_events)
+ return false;
+
+ ptr = strstr(evsel__bpf_counter_events, name);
+ name_len = strlen(name);
+
+ /* check name matches a full token in evsel__bpf_counter_events */
+ match = (ptr != NULL) &&
+ ((ptr == evsel__bpf_counter_events) || (*(ptr - 1) == ',')) &&
+ ((*(ptr + name_len) == ',') || (*(ptr + name_len) == '\0'));
+
+ return match;
+}
+
static const char *__evsel__hw_name(u64 config)
{
if (config < PERF_COUNT_HW_MAX && evsel__hw_names[config])
@@ -621,7 +644,7 @@ const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_AL
#define COP(x) (1 << x)
/*
- * cache operartion stat
+ * cache operation stat
* L1I : Read and prefetch only
* ITLB and BPU : Read-only
*/
@@ -2275,7 +2298,7 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
/*
* Undo swap of u64, then swap on individual u32s,
* get the size of the raw area and undo all of the
- * swap. The pevent interface handles endianity by
+ * swap. The pevent interface handles endianness by
* itself.
*/
if (swapped) {
@@ -2797,3 +2820,8 @@ void evsel__zero_per_pkg(struct evsel *evsel)
hashmap__clear(evsel->per_pkg_mask);
}
}
+
+bool evsel__is_hybrid(struct evsel *evsel)
+{
+ return evsel->pmu_name && perf_pmu__is_hybrid(evsel->pmu_name);
+}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 6026487353dd..75cf5dbfe208 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -20,6 +20,8 @@ union perf_event;
struct bpf_counter_ops;
struct target;
struct hashmap;
+struct bperf_leader_bpf;
+struct bperf_follower_bpf;
typedef int (evsel__sb_cb_t)(union perf_event *event, void *data);
@@ -80,6 +82,7 @@ struct evsel {
bool auto_merge_stats;
bool collect_stat;
bool weak_group;
+ bool bpf_counter;
int bpf_fd;
struct bpf_object *bpf_obj;
};
@@ -113,6 +116,7 @@ struct evsel {
bool merged_stat;
bool reset_group;
bool errored;
+ bool use_config_name;
struct hashmap *per_pkg_mask;
struct evsel *leader;
struct list_head config_terms;
@@ -130,8 +134,24 @@ struct evsel {
* See also evsel__has_callchain().
*/
__u64 synth_sample_type;
- struct list_head bpf_counter_list;
+
+ /*
+ * bpf_counter_ops serves two use cases:
+ * 1. perf-stat -b counting events used byBPF programs
+ * 2. perf-stat --use-bpf use BPF programs to aggregate counts
+ */
struct bpf_counter_ops *bpf_counter_ops;
+
+ /* for perf-stat -b */
+ struct list_head bpf_counter_list;
+
+ /* for perf-stat --use-bpf */
+ int bperf_leader_prog_fd;
+ int bperf_leader_link_fd;
+ union {
+ struct bperf_leader_bpf *leader_skel;
+ struct bperf_follower_bpf *follower_skel;
+ };
};
struct perf_missing_features {
@@ -157,7 +177,6 @@ struct perf_missing_features {
extern struct perf_missing_features perf_missing_features;
struct perf_cpu_map;
-struct target;
struct thread_map;
struct record_opts;
@@ -202,7 +221,7 @@ static inline struct evsel *evsel__newtp(const char *sys, const char *name)
return evsel__newtp_idx(sys, name, 0);
}
-struct evsel *evsel__new_cycles(bool precise);
+struct evsel *evsel__new_cycles(bool precise, __u32 type, __u64 config);
struct tep_event *event_format__new(const char *sys, const char *name);
@@ -222,6 +241,11 @@ void evsel__calc_id_pos(struct evsel *evsel);
bool evsel__is_cache_op_valid(u8 type, u8 op);
+static inline bool evsel__is_bpf(struct evsel *evsel)
+{
+ return evsel->bpf_counter_ops != NULL;
+}
+
#define EVSEL__MAX_ALIASES 8
extern const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES];
@@ -229,6 +253,9 @@ extern const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALI
extern const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES];
extern const char *evsel__hw_names[PERF_COUNT_HW_MAX];
extern const char *evsel__sw_names[PERF_COUNT_SW_MAX];
+extern char *evsel__bpf_counter_events;
+bool evsel__match_bpf_counter_events(const char *name);
+
int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size);
const char *evsel__name(struct evsel *evsel);
@@ -435,4 +462,5 @@ struct perf_env *evsel__env(struct evsel *evsel);
int evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
void evsel__zero_per_pkg(struct evsel *evsel);
+bool evsel__is_hybrid(struct evsel *evsel);
#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/expr.h b/tools/perf/util/expr.h
index dcf8d19b83c8..85df3e4771e4 100644
--- a/tools/perf/util/expr.h
+++ b/tools/perf/util/expr.h
@@ -3,7 +3,7 @@
#define PARSE_CTX_H 1
// There are fixes that need to land upstream before we can use libbpf's headers,
-// for now use our copy uncoditionally, since the data structures at this point
+// for now use our copy unconditionally, since the data structures at this point
// are exactly the same, no problem.
//#ifdef HAVE_LIBBPF_SUPPORT
//#include <bpf/hashmap.h>
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 20effdff76ce..aa1e42518d37 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -127,7 +127,7 @@ static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size)
return 0;
}
-/* Return: 0 if succeded, -ERR if failed. */
+/* Return: 0 if succeeded, -ERR if failed. */
int do_write(struct feat_fd *ff, const void *buf, size_t size)
{
if (!ff->buf)
@@ -135,7 +135,7 @@ int do_write(struct feat_fd *ff, const void *buf, size_t size)
return __do_write_buf(ff, buf, size);
}
-/* Return: 0 if succeded, -ERR if failed. */
+/* Return: 0 if succeeded, -ERR if failed. */
static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
{
u64 *p = (u64 *) set;
@@ -154,7 +154,7 @@ static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
return 0;
}
-/* Return: 0 if succeded, -ERR if failed. */
+/* Return: 0 if succeeded, -ERR if failed. */
int write_padded(struct feat_fd *ff, const void *bf,
size_t count, size_t count_aligned)
{
@@ -170,7 +170,7 @@ int write_padded(struct feat_fd *ff, const void *bf,
#define string_size(str) \
(PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
-/* Return: 0 if succeded, -ERR if failed. */
+/* Return: 0 if succeeded, -ERR if failed. */
static int do_write_string(struct feat_fd *ff, const char *str)
{
u32 len, olen;
@@ -266,7 +266,7 @@ static char *do_read_string(struct feat_fd *ff)
return NULL;
}
-/* Return: 0 if succeded, -ERR if failed. */
+/* Return: 0 if succeeded, -ERR if failed. */
static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
{
unsigned long *set;
@@ -2874,7 +2874,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
int err = -1;
if (ff->ph->needs_swap) {
- pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
+ pr_warning("interpreting bpf_prog_info from systems with endianness is not yet supported\n");
return 0;
}
@@ -2942,7 +2942,7 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
int err = -1;
if (ff->ph->needs_swap) {
- pr_warning("interpreting btf from systems with endianity is not yet supported\n");
+ pr_warning("interpreting btf from systems with endianness is not yet supported\n");
return 0;
}
@@ -3481,11 +3481,11 @@ static const size_t attr_pipe_abi_sizes[] = {
};
/*
- * In the legacy pipe format, there is an implicit assumption that endiannesss
+ * In the legacy pipe format, there is an implicit assumption that endianness
* between host recording the samples, and host parsing the samples is the
* same. This is not always the case given that the pipe output may always be
* redirected into a file and analyzed on a different machine with possibly a
- * different endianness and perf_event ABI revsions in the perf tool itself.
+ * different endianness and perf_event ABI revisions in the perf tool itself.
*/
static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
{
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index c82f5fc26af8..65fe65ba03c2 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -211,6 +211,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10);
hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13);
hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13);
+ hists__new_col_len(hists, HISTC_P_STAGE_CYC, 13);
if (symbol_conf.nanosecs)
hists__new_col_len(hists, HISTC_TIME, 16);
else
@@ -289,13 +290,14 @@ static long hist_time(unsigned long htime)
}
static void he_stat__add_period(struct he_stat *he_stat, u64 period,
- u64 weight, u64 ins_lat)
+ u64 weight, u64 ins_lat, u64 p_stage_cyc)
{
he_stat->period += period;
he_stat->weight += weight;
he_stat->nr_events += 1;
he_stat->ins_lat += ins_lat;
+ he_stat->p_stage_cyc += p_stage_cyc;
}
static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
@@ -308,6 +310,7 @@ static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
dest->nr_events += src->nr_events;
dest->weight += src->weight;
dest->ins_lat += src->ins_lat;
+ dest->p_stage_cyc += src->p_stage_cyc;
}
static void he_stat__decay(struct he_stat *he_stat)
@@ -597,6 +600,7 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
u64 period = entry->stat.period;
u64 weight = entry->stat.weight;
u64 ins_lat = entry->stat.ins_lat;
+ u64 p_stage_cyc = entry->stat.p_stage_cyc;
bool leftmost = true;
p = &hists->entries_in->rb_root.rb_node;
@@ -615,11 +619,11 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
if (!cmp) {
if (sample_self) {
- he_stat__add_period(&he->stat, period, weight, ins_lat);
+ he_stat__add_period(&he->stat, period, weight, ins_lat, p_stage_cyc);
hist_entry__add_callchain_period(he, period);
}
if (symbol_conf.cumulate_callchain)
- he_stat__add_period(he->stat_acc, period, weight, ins_lat);
+ he_stat__add_period(he->stat_acc, period, weight, ins_lat, p_stage_cyc);
/*
* This mem info was allocated from sample__resolve_mem
@@ -731,6 +735,7 @@ __hists__add_entry(struct hists *hists,
.period = sample->period,
.weight = sample->weight,
.ins_lat = sample->ins_lat,
+ .p_stage_cyc = sample->p_stage_cyc,
},
.parent = sym_parent,
.filtered = symbol__parent_filter(sym_parent) | al->filtered,
@@ -2320,14 +2325,19 @@ void events_stats__inc(struct events_stats *stats, u32 type)
++stats->nr_events[type];
}
-void hists__inc_nr_events(struct hists *hists, u32 type)
+static void hists_stats__inc(struct hists_stats *stats)
{
- events_stats__inc(&hists->stats, type);
+ ++stats->nr_samples;
+}
+
+void hists__inc_nr_events(struct hists *hists)
+{
+ hists_stats__inc(&hists->stats);
}
void hists__inc_nr_samples(struct hists *hists, bool filtered)
{
- events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
+ hists_stats__inc(&hists->stats);
if (!filtered)
hists->stats.nr_non_filtered_samples++;
}
@@ -2666,14 +2676,21 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
}
}
-size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp)
+size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp,
+ bool skip_empty)
{
struct evsel *pos;
size_t ret = 0;
evlist__for_each_entry(evlist, pos) {
+ struct hists *hists = evsel__hists(pos);
+
+ if (skip_empty && !hists->stats.nr_samples)
+ continue;
+
ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
- ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
+ ret += fprintf(fp, "%16s events: %10d\n",
+ "SAMPLE", hists->stats.nr_samples);
}
return ret;
@@ -2693,7 +2710,7 @@ int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool sh
const struct dso *dso = hists->dso_filter;
struct thread *thread = hists->thread_filter;
int socket_id = hists->socket_filter;
- unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ unsigned long nr_samples = hists->stats.nr_samples;
u64 nr_events = hists->stats.total_period;
struct evsel *evsel = hists_to_evsel(hists);
const char *ev_name = evsel__name(evsel);
@@ -2720,7 +2737,7 @@ int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool sh
nr_samples += pos_hists->stats.nr_non_filtered_samples;
nr_events += pos_hists->stats.total_non_filtered_period;
} else {
- nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ nr_samples += pos_hists->stats.nr_samples;
nr_events += pos_hists->stats.total_period;
}
}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 3c537232294b..5343b62476e6 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -75,6 +75,7 @@ enum hist_column {
HISTC_MEM_BLOCKED,
HISTC_LOCAL_INS_LAT,
HISTC_GLOBAL_INS_LAT,
+ HISTC_P_STAGE_CYC,
HISTC_NR_COLS, /* Last entry */
};
@@ -95,7 +96,7 @@ struct hists {
const char *uid_filter_str;
const char *symbol_filter_str;
pthread_mutex_t lock;
- struct events_stats stats;
+ struct hists_stats stats;
u64 event_stream;
u16 col_len[HISTC_NR_COLS];
bool has_callchains;
@@ -195,13 +196,14 @@ struct hist_entry *hists__get_entry(struct hists *hists, int idx);
u64 hists__total_period(struct hists *hists);
void hists__reset_stats(struct hists *hists);
void hists__inc_stats(struct hists *hists, struct hist_entry *h);
-void hists__inc_nr_events(struct hists *hists, u32 type);
+void hists__inc_nr_events(struct hists *hists);
void hists__inc_nr_samples(struct hists *hists, bool filtered);
size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
int max_cols, float min_pcnt, FILE *fp,
bool ignore_callchains);
-size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp);
+size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp,
+ bool skip_empty);
void hists__filter_by_dso(struct hists *hists);
void hists__filter_by_thread(struct hists *hists);
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
index 2f6cc7eea251..593f20e9774c 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
@@ -169,11 +169,13 @@ int intel_pt_get_insn(const unsigned char *buf, size_t len, int x86_64,
struct intel_pt_insn *intel_pt_insn)
{
struct insn insn;
+ int ret;
- insn_init(&insn, buf, len, x86_64);
- insn_get_length(&insn);
- if (!insn_complete(&insn) || insn.length > len)
+ ret = insn_decode(&insn, buf, len,
+ x86_64 ? INSN_MODE_64 : INSN_MODE_32);
+ if (ret < 0 || insn.length > len)
return -1;
+
intel_pt_insn_decoder(&insn, intel_pt_insn);
if (insn.length < INTEL_PT_INSN_BUF_SZ)
memcpy(intel_pt_insn->buf, buf, insn.length);
@@ -194,12 +196,13 @@ const char *dump_insn(struct perf_insn *x, uint64_t ip __maybe_unused,
u8 *inbuf, int inlen, int *lenp)
{
struct insn insn;
- int n, i;
+ int n, i, ret;
int left;
- insn_init(&insn, inbuf, inlen, x->is64bit);
- insn_get_length(&insn);
- if (!insn_complete(&insn) || insn.length > inlen)
+ ret = insn_decode(&insn, inbuf, inlen,
+ x->is64bit ? INSN_MODE_64 : INSN_MODE_32);
+
+ if (ret < 0 || insn.length > inlen)
return "<bad>";
if (lenp)
*lenp = insn.length;
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index f6e28ac231b7..8658d42ce57a 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -3569,7 +3569,7 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
/*
* Since this thread will not be kept in any rbtree not in a
* list, initialize its list node so that at thread__put() the
- * current thread lifetime assuption is kept and we don't segfault
+ * current thread lifetime assumption is kept and we don't segfault
* at list_del_init().
*/
INIT_LIST_HEAD(&pt->unknown_thread->node);
diff --git a/tools/perf/util/iostat.c b/tools/perf/util/iostat.c
new file mode 100644
index 000000000000..57dd49da28fe
--- /dev/null
+++ b/tools/perf/util/iostat.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "util/iostat.h"
+#include "util/debug.h"
+
+enum iostat_mode_t iostat_mode = IOSTAT_NONE;
+
+__weak int iostat_prepare(struct evlist *evlist __maybe_unused,
+ struct perf_stat_config *config __maybe_unused)
+{
+ return -1;
+}
+
+__weak int iostat_parse(const struct option *opt __maybe_unused,
+ const char *str __maybe_unused,
+ int unset __maybe_unused)
+{
+ pr_err("iostat mode is not supported on current platform\n");
+ return -1;
+}
+
+__weak void iostat_list(struct evlist *evlist __maybe_unused,
+ struct perf_stat_config *config __maybe_unused)
+{
+}
+
+__weak void iostat_release(struct evlist *evlist __maybe_unused)
+{
+}
+
+__weak void iostat_print_header_prefix(struct perf_stat_config *config __maybe_unused)
+{
+}
+
+__weak void iostat_print_metric(struct perf_stat_config *config __maybe_unused,
+ struct evsel *evsel __maybe_unused,
+ struct perf_stat_output_ctx *out __maybe_unused)
+{
+}
+
+__weak void iostat_prefix(struct evlist *evlist __maybe_unused,
+ struct perf_stat_config *config __maybe_unused,
+ char *prefix __maybe_unused,
+ struct timespec *ts __maybe_unused)
+{
+}
+
+__weak void iostat_print_counters(struct evlist *evlist __maybe_unused,
+ struct perf_stat_config *config __maybe_unused,
+ struct timespec *ts __maybe_unused,
+ char *prefix __maybe_unused,
+ iostat_print_counter_t print_cnt_cb __maybe_unused)
+{
+}
diff --git a/tools/perf/util/iostat.h b/tools/perf/util/iostat.h
new file mode 100644
index 000000000000..23c1c46a331a
--- /dev/null
+++ b/tools/perf/util/iostat.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * perf iostat
+ *
+ * Copyright (C) 2020, Intel Corporation
+ *
+ * Authors: Alexander Antonov <alexander.antonov@linux.intel.com>
+ */
+
+#ifndef _IOSTAT_H
+#define _IOSTAT_H
+
+#include <subcmd/parse-options.h>
+#include "util/stat.h"
+#include "util/parse-events.h"
+#include "util/evlist.h"
+
+struct option;
+struct perf_stat_config;
+struct evlist;
+struct timespec;
+
+enum iostat_mode_t {
+ IOSTAT_NONE = -1,
+ IOSTAT_RUN = 0,
+ IOSTAT_LIST = 1
+};
+
+extern enum iostat_mode_t iostat_mode;
+
+typedef void (*iostat_print_counter_t)(struct perf_stat_config *, struct evsel *, char *);
+
+int iostat_prepare(struct evlist *evlist, struct perf_stat_config *config);
+int iostat_parse(const struct option *opt, const char *str,
+ int unset __maybe_unused);
+void iostat_list(struct evlist *evlist, struct perf_stat_config *config);
+void iostat_release(struct evlist *evlist);
+void iostat_prefix(struct evlist *evlist, struct perf_stat_config *config,
+ char *prefix, struct timespec *ts);
+void iostat_print_header_prefix(struct perf_stat_config *config);
+void iostat_print_metric(struct perf_stat_config *config, struct evsel *evsel,
+ struct perf_stat_output_ctx *out);
+void iostat_print_counters(struct evlist *evlist,
+ struct perf_stat_config *config, struct timespec *ts,
+ char *prefix, iostat_print_counter_t print_cnt_cb);
+
+#endif /* _IOSTAT_H */
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index 9760d8e7b386..917a9c707371 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -396,21 +396,31 @@ static pid_t jr_entry_tid(struct jit_buf_desc *jd, union jr_entry *jr)
static uint64_t convert_timestamp(struct jit_buf_desc *jd, uint64_t timestamp)
{
- struct perf_tsc_conversion tc;
+ struct perf_tsc_conversion tc = { .time_shift = 0, };
+ struct perf_record_time_conv *time_conv = &jd->session->time_conv;
if (!jd->use_arch_timestamp)
return timestamp;
- tc.time_shift = jd->session->time_conv.time_shift;
- tc.time_mult = jd->session->time_conv.time_mult;
- tc.time_zero = jd->session->time_conv.time_zero;
- tc.time_cycles = jd->session->time_conv.time_cycles;
- tc.time_mask = jd->session->time_conv.time_mask;
- tc.cap_user_time_zero = jd->session->time_conv.cap_user_time_zero;
- tc.cap_user_time_short = jd->session->time_conv.cap_user_time_short;
+ tc.time_shift = time_conv->time_shift;
+ tc.time_mult = time_conv->time_mult;
+ tc.time_zero = time_conv->time_zero;
- if (!tc.cap_user_time_zero)
- return 0;
+ /*
+ * The event TIME_CONV was extended for the fields from "time_cycles"
+ * when supported cap_user_time_short, for backward compatibility,
+ * checks the event size and assigns these extended fields if these
+ * fields are contained in the event.
+ */
+ if (event_contains(*time_conv, time_cycles)) {
+ tc.time_cycles = time_conv->time_cycles;
+ tc.time_mask = time_conv->time_mask;
+ tc.cap_user_time_zero = time_conv->cap_user_time_zero;
+ tc.cap_user_time_short = time_conv->cap_user_time_short;
+
+ if (!tc.cap_user_time_zero)
+ return 0;
+ }
return tsc_to_perf_time(timestamp, &tc);
}
diff --git a/tools/perf/util/levenshtein.c b/tools/perf/util/levenshtein.c
index a217ecf0359d..6a6712635aa4 100644
--- a/tools/perf/util/levenshtein.c
+++ b/tools/perf/util/levenshtein.c
@@ -30,7 +30,7 @@
*
* It does so by calculating the costs of the path ending in characters
* i (in string1) and j (in string2), respectively, given that the last
- * operation is a substition, a swap, a deletion, or an insertion.
+ * operation is a substitution, a swap, a deletion, or an insertion.
*
* This implementation allows the costs to be weighted:
*
diff --git a/tools/perf/util/libunwind/arm64.c b/tools/perf/util/libunwind/arm64.c
index 6b4e5a0892f8..c397be0c2e32 100644
--- a/tools/perf/util/libunwind/arm64.c
+++ b/tools/perf/util/libunwind/arm64.c
@@ -4,7 +4,7 @@
* generic one.
*
* The function 'LIBUNWIND__ARCH_REG_ID' name is set according to arch
- * name and the defination of this function is included directly from
+ * name and the definition of this function is included directly from
* 'arch/arm64/util/unwind-libunwind.c', to make sure that this function
* is defined no matter what arch the host is.
*
diff --git a/tools/perf/util/libunwind/x86_32.c b/tools/perf/util/libunwind/x86_32.c
index 21c216c40a3b..b2b92d030aef 100644
--- a/tools/perf/util/libunwind/x86_32.c
+++ b/tools/perf/util/libunwind/x86_32.c
@@ -4,7 +4,7 @@
* generic one.
*
* The function 'LIBUNWIND__ARCH_REG_ID' name is set according to arch
- * name and the defination of this function is included directly from
+ * name and the definition of this function is included directly from
* 'arch/x86/util/unwind-libunwind.c', to make sure that this function
* is defined no matter what arch the host is.
*
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index dbdffb6673fe..3ceaf7ef3301 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -471,7 +471,7 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
/*
* This is an optional work. Even it fail we can continue our
- * work. Needn't to check error return.
+ * work. Needn't check error return.
*/
llvm__get_kbuild_opts(&kbuild_dir, &kbuild_include_opts);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index b5c2d8be4144..3ff4936a15a4 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -905,7 +905,7 @@ static struct map *machine__addnew_module_map(struct machine *machine, u64 start
maps__insert(&machine->kmaps, map);
- /* Put the map here because maps__insert alread got it */
+ /* Put the map here because maps__insert already got it */
map__put(map);
out:
/* put the dso here, corresponding to machine__findnew_module_dso */
@@ -1952,7 +1952,7 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
* maps because that is what the kernel just did.
*
* But when synthesizing, this should not be done. If we do, we end up
- * with overlapping maps as we process the sythesized MMAP2 events that
+ * with overlapping maps as we process the synthesized MMAP2 events that
* get delivered shortly thereafter.
*
* Use the FORK event misc flags in an internal way to signal this
@@ -2038,8 +2038,8 @@ int machine__process_event(struct machine *machine, union perf_event *event,
static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
{
if (!regexec(regex, sym->name, 0, NULL, 0))
- return 1;
- return 0;
+ return true;
+ return false;
}
static void ip__resolve_ams(struct thread *thread,
@@ -2518,7 +2518,7 @@ static bool has_stitched_lbr(struct thread *thread,
/*
* Check if there are identical LBRs between two samples.
- * Identicall LBRs must have same from, to and flags values. Also,
+ * Identical LBRs must have same from, to and flags values. Also,
* they have to be saved in the same LBR registers (same physical
* index).
*
@@ -2588,7 +2588,7 @@ err:
}
/*
- * Recolve LBR callstack chain sample
+ * Resolve LBR callstack chain sample
* Return:
* 1 on success get LBR callchain information
* 0 no available LBR callchain information, should try fp
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 9f32825c98d8..d32f5b28c1fb 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -75,7 +75,7 @@ struct thread;
/* map__for_each_symbol - iterate over the symbols in the given map
*
- * @map: the 'struct map *' in which symbols itereated
+ * @map: the 'struct map *' in which symbols are iterated
* @pos: the 'struct symbol *' to use as a loop cursor
* @n: the 'struct rb_node *' to use as a temporary storage
* Note: caller must ensure map->dso is not NULL (map is loaded).
@@ -86,7 +86,7 @@ struct thread;
/* map__for_each_symbol_with_name - iterate over the symbols in the given map
* that have the given name
*
- * @map: the 'struct map *' in which symbols itereated
+ * @map: the 'struct map *' in which symbols are iterated
* @sym_name: the symbol name
* @pos: the 'struct symbol *' to use as a loop cursor
*/
diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h
index 755cef7e0625..cacdebd65b8a 100644
--- a/tools/perf/util/mem-events.h
+++ b/tools/perf/util/mem-events.h
@@ -44,7 +44,6 @@ bool is_mem_loads_aux_event(struct evsel *leader);
void perf_mem_events__list(void);
-struct mem_info;
int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
@@ -81,7 +80,7 @@ struct c2c_stats {
u32 rmt_dram; /* count of loads miss to remote DRAM */
u32 blk_data; /* count of loads blocked by data */
u32 blk_addr; /* count of loads blocked by address conflict */
- u32 nomap; /* count of load/stores with no phys adrs */
+ u32 nomap; /* count of load/stores with no phys addrs */
u32 noparse; /* count of unparsable data sources */
};
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 26c990e32378..8336dd8e8098 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -181,7 +181,7 @@ static bool evsel_same_pmu(struct evsel *ev1, struct evsel *ev2)
* @pctx: the parse context for the metric expression.
* @metric_no_merge: don't attempt to share events for the metric with other
* metrics.
- * @has_constraint: is there a contraint on the group of events? In which case
+ * @has_constraint: is there a constraint on the group of events? In which case
* the events won't be grouped.
* @metric_events: out argument, null terminated array of evsel's associated
* with the metric.
@@ -618,7 +618,7 @@ static int metricgroup__print_sys_event_iter(struct pmu_event *pe, void *data)
void metricgroup__print(bool metrics, bool metricgroups, char *filter,
bool raw, bool details)
{
- struct pmu_events_map *map = perf_pmu__find_map(NULL);
+ struct pmu_events_map *map = pmu_events_map__find();
struct pmu_event *pe;
int i;
struct rblist groups;
@@ -900,7 +900,8 @@ static int __add_metric(struct list_head *metric_list,
(match_metric(__pe->metric_group, __metric) || \
match_metric(__pe->metric_name, __metric)))
-static struct pmu_event *find_metric(const char *metric, struct pmu_events_map *map)
+struct pmu_event *metricgroup__find_metric(const char *metric,
+ struct pmu_events_map *map)
{
struct pmu_event *pe;
int i;
@@ -985,7 +986,7 @@ static int __resolve_metric(struct metric *m,
struct expr_id *parent;
struct pmu_event *pe;
- pe = find_metric(cur->key, map);
+ pe = metricgroup__find_metric(cur->key, map);
if (!pe)
continue;
@@ -1253,8 +1254,7 @@ int metricgroup__parse_groups(const struct option *opt,
struct rblist *metric_events)
{
struct evlist *perf_evlist = *(struct evlist **)opt->value;
- struct pmu_events_map *map = perf_pmu__find_map(NULL);
-
+ struct pmu_events_map *map = pmu_events_map__find();
return parse_groups(perf_evlist, str, metric_no_group,
metric_no_merge, NULL, metric_events, map);
@@ -1273,7 +1273,7 @@ int metricgroup__parse_groups_test(struct evlist *evlist,
bool metricgroup__has_metric(const char *metric)
{
- struct pmu_events_map *map = perf_pmu__find_map(NULL);
+ struct pmu_events_map *map = pmu_events_map__find();
struct pmu_event *pe;
int i;
diff --git a/tools/perf/util/metricgroup.h b/tools/perf/util/metricgroup.h
index ed1b9392e624..cc4a92492a61 100644
--- a/tools/perf/util/metricgroup.h
+++ b/tools/perf/util/metricgroup.h
@@ -9,7 +9,6 @@
struct evlist;
struct evsel;
-struct evlist;
struct option;
struct rblist;
struct pmu_events_map;
@@ -44,7 +43,8 @@ int metricgroup__parse_groups(const struct option *opt,
bool metric_no_group,
bool metric_no_merge,
struct rblist *metric_events);
-
+struct pmu_event *metricgroup__find_metric(const char *metric,
+ struct pmu_events_map *map);
int metricgroup__parse_groups_test(struct evlist *evlist,
struct pmu_events_map *map,
const char *str,
diff --git a/tools/perf/util/parse-events-hybrid.c b/tools/perf/util/parse-events-hybrid.c
new file mode 100644
index 000000000000..10160ab126f9
--- /dev/null
+++ b/tools/perf/util/parse-events-hybrid.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/err.h>
+#include <linux/zalloc.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/param.h>
+#include "evlist.h"
+#include "evsel.h"
+#include "parse-events.h"
+#include "parse-events-hybrid.h"
+#include "debug.h"
+#include "pmu.h"
+#include "pmu-hybrid.h"
+#include "perf.h"
+
+static void config_hybrid_attr(struct perf_event_attr *attr,
+ int type, int pmu_type)
+{
+ /*
+ * attr.config layout for type PERF_TYPE_HARDWARE and
+ * PERF_TYPE_HW_CACHE
+ *
+ * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA
+ * AA: hardware event ID
+ * EEEEEEEE: PMU type ID
+ * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB
+ * BB: hardware cache ID
+ * CC: hardware cache op ID
+ * DD: hardware cache op result ID
+ * EEEEEEEE: PMU type ID
+ * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
+ */
+ attr->type = type;
+ attr->config = attr->config | ((__u64)pmu_type << PERF_PMU_TYPE_SHIFT);
+}
+
+static int create_event_hybrid(__u32 config_type, int *idx,
+ struct list_head *list,
+ struct perf_event_attr *attr, char *name,
+ struct list_head *config_terms,
+ struct perf_pmu *pmu)
+{
+ struct evsel *evsel;
+ __u32 type = attr->type;
+ __u64 config = attr->config;
+
+ config_hybrid_attr(attr, config_type, pmu->type);
+ evsel = parse_events__add_event_hybrid(list, idx, attr, name,
+ pmu, config_terms);
+ if (evsel)
+ evsel->pmu_name = strdup(pmu->name);
+ else
+ return -ENOMEM;
+
+ attr->type = type;
+ attr->config = config;
+ return 0;
+}
+
+static int pmu_cmp(struct parse_events_state *parse_state,
+ struct perf_pmu *pmu)
+{
+ if (!parse_state->hybrid_pmu_name)
+ return 0;
+
+ return strcmp(parse_state->hybrid_pmu_name, pmu->name);
+}
+
+static int add_hw_hybrid(struct parse_events_state *parse_state,
+ struct list_head *list, struct perf_event_attr *attr,
+ char *name, struct list_head *config_terms)
+{
+ struct perf_pmu *pmu;
+ int ret;
+
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ if (pmu_cmp(parse_state, pmu))
+ continue;
+
+ ret = create_event_hybrid(PERF_TYPE_HARDWARE,
+ &parse_state->idx, list, attr, name,
+ config_terms, pmu);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int create_raw_event_hybrid(int *idx, struct list_head *list,
+ struct perf_event_attr *attr, char *name,
+ struct list_head *config_terms,
+ struct perf_pmu *pmu)
+{
+ struct evsel *evsel;
+
+ attr->type = pmu->type;
+ evsel = parse_events__add_event_hybrid(list, idx, attr, name,
+ pmu, config_terms);
+ if (evsel)
+ evsel->pmu_name = strdup(pmu->name);
+ else
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int add_raw_hybrid(struct parse_events_state *parse_state,
+ struct list_head *list, struct perf_event_attr *attr,
+ char *name, struct list_head *config_terms)
+{
+ struct perf_pmu *pmu;
+ int ret;
+
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ if (pmu_cmp(parse_state, pmu))
+ continue;
+
+ ret = create_raw_event_hybrid(&parse_state->idx, list, attr,
+ name, config_terms, pmu);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int parse_events__add_numeric_hybrid(struct parse_events_state *parse_state,
+ struct list_head *list,
+ struct perf_event_attr *attr,
+ char *name, struct list_head *config_terms,
+ bool *hybrid)
+{
+ *hybrid = false;
+ if (attr->type == PERF_TYPE_SOFTWARE)
+ return 0;
+
+ if (!perf_pmu__has_hybrid())
+ return 0;
+
+ *hybrid = true;
+ if (attr->type != PERF_TYPE_RAW) {
+ return add_hw_hybrid(parse_state, list, attr, name,
+ config_terms);
+ }
+
+ return add_raw_hybrid(parse_state, list, attr, name,
+ config_terms);
+}
+
+int parse_events__add_cache_hybrid(struct list_head *list, int *idx,
+ struct perf_event_attr *attr, char *name,
+ struct list_head *config_terms,
+ bool *hybrid,
+ struct parse_events_state *parse_state)
+{
+ struct perf_pmu *pmu;
+ int ret;
+
+ *hybrid = false;
+ if (!perf_pmu__has_hybrid())
+ return 0;
+
+ *hybrid = true;
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ if (pmu_cmp(parse_state, pmu))
+ continue;
+
+ ret = create_event_hybrid(PERF_TYPE_HW_CACHE, idx, list,
+ attr, name, config_terms, pmu);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/util/parse-events-hybrid.h b/tools/perf/util/parse-events-hybrid.h
new file mode 100644
index 000000000000..f33bd67aa851
--- /dev/null
+++ b/tools/perf/util/parse-events-hybrid.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_PARSE_EVENTS_HYBRID_H
+#define __PERF_PARSE_EVENTS_HYBRID_H
+
+#include <linux/list.h>
+#include <stdbool.h>
+#include <linux/types.h>
+#include <linux/perf_event.h>
+#include <string.h>
+
+int parse_events__add_numeric_hybrid(struct parse_events_state *parse_state,
+ struct list_head *list,
+ struct perf_event_attr *attr,
+ char *name, struct list_head *config_terms,
+ bool *hybrid);
+
+int parse_events__add_cache_hybrid(struct list_head *list, int *idx,
+ struct perf_event_attr *attr, char *name,
+ struct list_head *config_terms,
+ bool *hybrid,
+ struct parse_events_state *parse_state);
+
+#endif /* __PERF_PARSE_EVENTS_HYBRID_H */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index c0c0fab22cb8..4dad14265b81 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -37,6 +37,8 @@
#include "util/evsel_config.h"
#include "util/event.h"
#include "util/pfm.h"
+#include "util/parse-events-hybrid.h"
+#include "util/pmu-hybrid.h"
#include "perf.h"
#define MAX_NAME_LEN 100
@@ -47,6 +49,9 @@ extern int parse_events_debug;
int parse_events_parse(void *parse_state, void *scanner);
static int get_config_terms(struct list_head *head_config,
struct list_head *head_terms __maybe_unused);
+static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state,
+ const char *str, char *pmu_name,
+ struct list_head *list);
static struct perf_pmu_event_symbol *perf_pmu_events_list;
/*
@@ -452,14 +457,16 @@ static int config_attr(struct perf_event_attr *attr,
int parse_events_add_cache(struct list_head *list, int *idx,
char *type, char *op_result1, char *op_result2,
struct parse_events_error *err,
- struct list_head *head_config)
+ struct list_head *head_config,
+ struct parse_events_state *parse_state)
{
struct perf_event_attr attr;
LIST_HEAD(config_terms);
char name[MAX_NAME_LEN], *config_name;
int cache_type = -1, cache_op = -1, cache_result = -1;
char *op_result[2] = { op_result1, op_result2 };
- int i, n;
+ int i, n, ret;
+ bool hybrid;
/*
* No fallback - if we cannot get a clear cache type
@@ -519,6 +526,13 @@ int parse_events_add_cache(struct list_head *list, int *idx,
if (get_config_terms(head_config, &config_terms))
return -ENOMEM;
}
+
+ ret = parse_events__add_cache_hybrid(list, idx, &attr,
+ config_name ? : name, &config_terms,
+ &hybrid, parse_state);
+ if (hybrid)
+ return ret;
+
return add_event(list, idx, &attr, config_name ? : name, &config_terms);
}
@@ -846,9 +860,9 @@ split_bpf_config_terms(struct list_head *evt_head_config,
struct parse_events_term *term, *temp;
/*
- * Currectly, all possible user config term
+ * Currently, all possible user config term
* belong to bpf object. parse_events__is_hardcoded_term()
- * happends to be a good flag.
+ * happens to be a good flag.
*
* See parse_events_config_bpf() and
* config_term_tracepoint().
@@ -898,7 +912,7 @@ int parse_events_load_bpf(struct parse_events_state *parse_state,
/*
* Caller doesn't know anything about obj_head_config,
- * so combine them together again before returnning.
+ * so combine them together again before returning.
*/
if (head_config)
list_splice_tail(&obj_head_config, head_config);
@@ -1185,10 +1199,10 @@ do { \
}
/*
- * Check term availbility after basic checking so
+ * Check term availability after basic checking so
* PARSE_EVENTS__TERM_TYPE_USER can be found and filtered.
*
- * If check availbility at the entry of this function,
+ * If check availability at the entry of this function,
* user will see "'<sysfs term>' is not usable in 'perf stat'"
* if an invalid config term is provided for legacy events
* (for example, instructions/badterm/...), which is confusing.
@@ -1419,6 +1433,8 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
{
struct perf_event_attr attr;
LIST_HEAD(config_terms);
+ bool hybrid;
+ int ret;
memset(&attr, 0, sizeof(attr));
attr.type = type;
@@ -1433,6 +1449,12 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
return -ENOMEM;
}
+ ret = parse_events__add_numeric_hybrid(parse_state, list, &attr,
+ get_config_name(head_config),
+ &config_terms, &hybrid);
+ if (hybrid)
+ return ret;
+
return add_event(list, &parse_state->idx, &attr,
get_config_name(head_config), &config_terms);
}
@@ -1456,6 +1478,33 @@ static bool config_term_percore(struct list_head *config_terms)
return false;
}
+static int parse_events__inside_hybrid_pmu(struct parse_events_state *parse_state,
+ struct list_head *list, char *name,
+ struct list_head *head_config)
+{
+ struct parse_events_term *term;
+ int ret = -1;
+
+ if (parse_state->fake_pmu || !head_config || list_empty(head_config) ||
+ !perf_pmu__is_hybrid(name)) {
+ return -1;
+ }
+
+ /*
+ * More than one term in list.
+ */
+ if (head_config->next && head_config->next->next != head_config)
+ return -1;
+
+ term = list_first_entry(head_config, struct parse_events_term, list);
+ if (term && term->config && strcmp(term->config, "event")) {
+ ret = parse_events__with_hybrid_pmu(parse_state, term->config,
+ name, list);
+ }
+
+ return ret;
+}
+
int parse_events_add_pmu(struct parse_events_state *parse_state,
struct list_head *list, char *name,
struct list_head *head_config,
@@ -1549,6 +1598,11 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms))
return -ENOMEM;
+ if (!parse_events__inside_hybrid_pmu(parse_state, list, name,
+ head_config)) {
+ return 0;
+ }
+
if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
struct evsel_config_term *pos, *tmp;
@@ -1567,6 +1621,9 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
if (!evsel)
return -ENOMEM;
+ if (evsel->name)
+ evsel->use_config_name = true;
+
evsel->pmu_name = name ? strdup(name) : NULL;
evsel->use_uncore_alias = use_uncore_alias;
evsel->percore = config_term_percore(&evsel->config_terms);
@@ -1804,6 +1861,7 @@ struct event_modifier {
int pinned;
int weak;
int exclusive;
+ int bpf_counter;
};
static int get_event_modifier(struct event_modifier *mod, char *str,
@@ -1824,6 +1882,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
int exclude = eu | ek | eh;
int exclude_GH = evsel ? evsel->exclude_GH : 0;
int weak = 0;
+ int bpf_counter = 0;
memset(mod, 0, sizeof(*mod));
@@ -1867,6 +1926,8 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
exclusive = 1;
} else if (*str == 'W') {
weak = 1;
+ } else if (*str == 'b') {
+ bpf_counter = 1;
} else
break;
@@ -1898,6 +1959,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
mod->sample_read = sample_read;
mod->pinned = pinned;
mod->weak = weak;
+ mod->bpf_counter = bpf_counter;
mod->exclusive = exclusive;
return 0;
@@ -1912,7 +1974,7 @@ static int check_modifier(char *str)
char *p = str;
/* The sizeof includes 0 byte as well. */
- if (strlen(str) > (sizeof("ukhGHpppPSDIWe") - 1))
+ if (strlen(str) > (sizeof("ukhGHpppPSDIWeb") - 1))
return -1;
while (*p) {
@@ -1953,6 +2015,7 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add)
evsel->sample_read = mod.sample_read;
evsel->precise_max = mod.precise_max;
evsel->weak_group = mod.weak;
+ evsel->bpf_counter = mod.bpf_counter;
if (evsel__is_group_leader(evsel)) {
evsel->core.attr.pinned = mod.pinned;
@@ -2162,6 +2225,33 @@ int parse_events_terms(struct list_head *terms, const char *str)
return ret;
}
+static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state,
+ const char *str, char *pmu_name,
+ struct list_head *list)
+{
+ struct parse_events_state ps = {
+ .list = LIST_HEAD_INIT(ps.list),
+ .stoken = PE_START_EVENTS,
+ .hybrid_pmu_name = pmu_name,
+ .idx = parse_state->idx,
+ };
+ int ret;
+
+ ret = parse_events__scanner(str, &ps);
+ perf_pmu__parse_cleanup();
+
+ if (!ret) {
+ if (!list_empty(&ps.list)) {
+ list_splice(&ps.list, list);
+ parse_state->idx = ps.idx;
+ return 0;
+ } else
+ return -1;
+ }
+
+ return ret;
+}
+
int __parse_events(struct evlist *evlist, const char *str,
struct parse_events_error *err, struct perf_pmu *fake_pmu)
{
@@ -3185,3 +3275,12 @@ char *parse_events_formats_error_string(char *additional_terms)
fail:
return NULL;
}
+
+struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx,
+ struct perf_event_attr *attr,
+ char *name, struct perf_pmu *pmu,
+ struct list_head *config_terms)
+{
+ return __add_event(list, idx, attr, true, name, pmu,
+ config_terms, false, NULL);
+}
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index e80c9b74f2f2..bf6e41aa9b6a 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -138,6 +138,7 @@ struct parse_events_state {
struct list_head *terms;
int stoken;
struct perf_pmu *fake_pmu;
+ char *hybrid_pmu_name;
};
void parse_events__handle_error(struct parse_events_error *err, int idx,
@@ -188,7 +189,8 @@ int parse_events_add_tool(struct parse_events_state *parse_state,
int parse_events_add_cache(struct list_head *list, int *idx,
char *type, char *op_result1, char *op_result2,
struct parse_events_error *error,
- struct list_head *head_config);
+ struct list_head *head_config,
+ struct parse_events_state *parse_state);
int parse_events_add_breakpoint(struct list_head *list, int *idx,
u64 addr, char *type, u64 len);
int parse_events_add_pmu(struct parse_events_state *parse_state,
@@ -263,4 +265,9 @@ static inline bool is_sdt_event(char *str __maybe_unused)
int perf_pmu__test_parse_init(void);
+struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx,
+ struct perf_event_attr *attr,
+ char *name, struct perf_pmu *pmu,
+ struct list_head *config_terms);
+
#endif /* __PERF_PARSE_EVENTS_H */
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 0b36285a9435..fb8646cc3e83 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -210,7 +210,7 @@ name_tag [\'][a-zA-Z_*?\[\]][a-zA-Z0-9_*?\-,\.\[\]:=]*[\']
name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.:]*
drv_cfg_term [a-zA-Z0-9_\.]+(=[a-zA-Z0-9_*?\.:]+)?
/* If you add a modifier you need to update check_modifier() */
-modifier_event [ukhpPGHSDIWe]+
+modifier_event [ukhpPGHSDIWeb]+
modifier_bp [rwx]{1,3}
%%
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index d57ac86ce7ca..aba12a4d488e 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -454,7 +454,8 @@ PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT opt_e
list = alloc_list();
ABORT_ON(!list);
- err = parse_events_add_cache(list, &parse_state->idx, $1, $3, $5, error, $6);
+ err = parse_events_add_cache(list, &parse_state->idx, $1, $3, $5, error, $6,
+ parse_state);
parse_events_terms__delete($6);
free($1);
free($3);
@@ -475,7 +476,8 @@ PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT opt_event_config
list = alloc_list();
ABORT_ON(!list);
- err = parse_events_add_cache(list, &parse_state->idx, $1, $3, NULL, error, $4);
+ err = parse_events_add_cache(list, &parse_state->idx, $1, $3, NULL, error, $4,
+ parse_state);
parse_events_terms__delete($4);
free($1);
free($3);
@@ -495,7 +497,8 @@ PE_NAME_CACHE_TYPE opt_event_config
list = alloc_list();
ABORT_ON(!list);
- err = parse_events_add_cache(list, &parse_state->idx, $1, NULL, NULL, error, $2);
+ err = parse_events_add_cache(list, &parse_state->idx, $1, NULL, NULL, error, $2,
+ parse_state);
parse_events_terms__delete($2);
free($1);
if (err) {
diff --git a/tools/perf/util/pmu-hybrid.c b/tools/perf/util/pmu-hybrid.c
new file mode 100644
index 000000000000..f51ccaac60ee
--- /dev/null
+++ b/tools/perf/util/pmu-hybrid.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/list.h>
+#include <linux/compiler.h>
+#include <linux/string.h>
+#include <linux/zalloc.h>
+#include <sys/types.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdarg.h>
+#include <locale.h>
+#include <api/fs/fs.h>
+#include "fncache.h"
+#include "pmu-hybrid.h"
+
+LIST_HEAD(perf_pmu__hybrid_pmus);
+
+bool perf_pmu__hybrid_mounted(const char *name)
+{
+ char path[PATH_MAX];
+ const char *sysfs;
+ FILE *file;
+ int n, cpu;
+
+ if (strncmp(name, "cpu_", 4))
+ return false;
+
+ sysfs = sysfs__mountpoint();
+ if (!sysfs)
+ return false;
+
+ snprintf(path, PATH_MAX, CPUS_TEMPLATE_CPU, sysfs, name);
+ if (!file_available(path))
+ return false;
+
+ file = fopen(path, "r");
+ if (!file)
+ return false;
+
+ n = fscanf(file, "%u", &cpu);
+ fclose(file);
+ if (n <= 0)
+ return false;
+
+ return true;
+}
+
+struct perf_pmu *perf_pmu__find_hybrid_pmu(const char *name)
+{
+ struct perf_pmu *pmu;
+
+ if (!name)
+ return NULL;
+
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ if (!strcmp(name, pmu->name))
+ return pmu;
+ }
+
+ return NULL;
+}
+
+bool perf_pmu__is_hybrid(const char *name)
+{
+ return perf_pmu__find_hybrid_pmu(name) != NULL;
+}
+
+char *perf_pmu__hybrid_type_to_pmu(const char *type)
+{
+ char *pmu_name = NULL;
+
+ if (asprintf(&pmu_name, "cpu_%s", type) < 0)
+ return NULL;
+
+ if (perf_pmu__is_hybrid(pmu_name))
+ return pmu_name;
+
+ /*
+ * pmu may be not scanned, check the sysfs.
+ */
+ if (perf_pmu__hybrid_mounted(pmu_name))
+ return pmu_name;
+
+ free(pmu_name);
+ return NULL;
+}
diff --git a/tools/perf/util/pmu-hybrid.h b/tools/perf/util/pmu-hybrid.h
new file mode 100644
index 000000000000..d0fa7bc50a76
--- /dev/null
+++ b/tools/perf/util/pmu-hybrid.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PMU_HYBRID_H
+#define __PMU_HYBRID_H
+
+#include <linux/perf_event.h>
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <stdbool.h>
+#include "pmu.h"
+
+extern struct list_head perf_pmu__hybrid_pmus;
+
+#define perf_pmu__for_each_hybrid_pmu(pmu) \
+ list_for_each_entry(pmu, &perf_pmu__hybrid_pmus, hybrid_list)
+
+bool perf_pmu__hybrid_mounted(const char *name);
+
+struct perf_pmu *perf_pmu__find_hybrid_pmu(const char *name);
+bool perf_pmu__is_hybrid(const char *name);
+char *perf_pmu__hybrid_type_to_pmu(const char *type);
+
+#endif /* __PMU_HYBRID_H */
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 46fd0f998484..88c8ecdc60b0 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -25,6 +25,7 @@
#include "string2.h"
#include "strbuf.h"
#include "fncache.h"
+#include "pmu-hybrid.h"
struct perf_pmu perf_pmu__fake;
@@ -39,6 +40,7 @@ int perf_pmu_parse(struct list_head *list, char *name);
extern FILE *perf_pmu_in;
static LIST_HEAD(pmus);
+static bool hybrid_scanned;
/*
* Parse & process all the sysfs attributes located under
@@ -283,6 +285,7 @@ void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
zfree(&newalias->str);
zfree(&newalias->metric_expr);
zfree(&newalias->metric_name);
+ zfree(&newalias->pmu_name);
parse_events_terms__purge(&newalias->terms);
free(newalias);
}
@@ -297,6 +300,10 @@ static bool perf_pmu_merge_alias(struct perf_pmu_alias *newalias,
list_for_each_entry(a, alist, list) {
if (!strcasecmp(newalias->name, a->name)) {
+ if (newalias->pmu_name && a->pmu_name &&
+ !strcasecmp(newalias->pmu_name, a->pmu_name)) {
+ continue;
+ }
perf_pmu_update_alias(a, newalias);
perf_pmu_free_alias(newalias);
return true;
@@ -306,18 +313,27 @@ static bool perf_pmu_merge_alias(struct perf_pmu_alias *newalias,
}
static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
- char *desc, char *val,
- char *long_desc, char *topic,
- char *unit, char *perpkg,
- char *metric_expr,
- char *metric_name,
- char *deprecated)
+ char *desc, char *val, struct pmu_event *pe)
{
struct parse_events_term *term;
struct perf_pmu_alias *alias;
int ret;
int num;
char newval[256];
+ char *long_desc = NULL, *topic = NULL, *unit = NULL, *perpkg = NULL,
+ *metric_expr = NULL, *metric_name = NULL, *deprecated = NULL,
+ *pmu_name = NULL;
+
+ if (pe) {
+ long_desc = (char *)pe->long_desc;
+ topic = (char *)pe->topic;
+ unit = (char *)pe->unit;
+ perpkg = (char *)pe->perpkg;
+ metric_expr = (char *)pe->metric_expr;
+ metric_name = (char *)pe->metric_name;
+ deprecated = (char *)pe->deprecated;
+ pmu_name = (char *)pe->pmu;
+ }
alias = malloc(sizeof(*alias));
if (!alias)
@@ -382,6 +398,7 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
}
alias->per_pkg = perpkg && sscanf(perpkg, "%d", &num) == 1 && num == 1;
alias->str = strdup(newval);
+ alias->pmu_name = pmu_name ? strdup(pmu_name) : NULL;
if (deprecated)
alias->deprecated = true;
@@ -406,8 +423,7 @@ static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FI
/* Remove trailing newline from sysfs file */
strim(buf);
- return __perf_pmu__new_alias(list, dir, name, NULL, buf, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL);
+ return __perf_pmu__new_alias(list, dir, name, NULL, buf, NULL);
}
static inline bool pmu_alias_info_file(char *name)
@@ -599,7 +615,6 @@ static struct perf_cpu_map *__pmu_cpumask(const char *path)
*/
#define SYS_TEMPLATE_ID "./bus/event_source/devices/%s/identifier"
#define CPUS_TEMPLATE_UNCORE "%s/bus/event_source/devices/%s/cpumask"
-#define CPUS_TEMPLATE_CPU "%s/bus/event_source/devices/%s/cpus"
static struct perf_cpu_map *pmu_cpumask(const char *name)
{
@@ -631,6 +646,9 @@ static bool pmu_is_uncore(const char *name)
char path[PATH_MAX];
const char *sysfs;
+ if (perf_pmu__hybrid_mounted(name))
+ return false;
+
sysfs = sysfs__mountpoint();
snprintf(path, PATH_MAX, CPUS_TEMPLATE_UNCORE, sysfs, name);
return file_available(path);
@@ -717,6 +735,11 @@ struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu)
return map;
}
+struct pmu_events_map *__weak pmu_events_map__find(void)
+{
+ return perf_pmu__find_map(NULL);
+}
+
bool pmu_uncore_alias_match(const char *pmu_name, const char *name)
{
char *tmp = NULL, *tok, *str;
@@ -793,11 +816,7 @@ new_alias:
/* need type casts to override 'const' */
__perf_pmu__new_alias(head, NULL, (char *)pe->name,
(char *)pe->desc, (char *)pe->event,
- (char *)pe->long_desc, (char *)pe->topic,
- (char *)pe->unit, (char *)pe->perpkg,
- (char *)pe->metric_expr,
- (char *)pe->metric_name,
- (char *)pe->deprecated);
+ pe);
}
}
@@ -864,13 +883,7 @@ static int pmu_add_sys_aliases_iter_fn(struct pmu_event *pe, void *data)
(char *)pe->name,
(char *)pe->desc,
(char *)pe->event,
- (char *)pe->long_desc,
- (char *)pe->topic,
- (char *)pe->unit,
- (char *)pe->perpkg,
- (char *)pe->metric_expr,
- (char *)pe->metric_name,
- (char *)pe->deprecated);
+ pe);
}
return 0;
@@ -942,6 +955,7 @@ static struct perf_pmu *pmu_lookup(const char *name)
pmu->is_uncore = pmu_is_uncore(name);
if (pmu->is_uncore)
pmu->id = pmu_id(name);
+ pmu->is_hybrid = perf_pmu__hybrid_mounted(name);
pmu->max_precise = pmu_max_precise(name);
pmu_add_cpu_aliases(&aliases, pmu);
pmu_add_sys_aliases(&aliases, pmu);
@@ -953,6 +967,9 @@ static struct perf_pmu *pmu_lookup(const char *name)
list_splice(&aliases, &pmu->aliases);
list_add_tail(&pmu->list, &pmus);
+ if (pmu->is_hybrid)
+ list_add_tail(&pmu->hybrid_list, &perf_pmu__hybrid_pmus);
+
pmu->default_config = perf_pmu__get_default_config(pmu);
return pmu;
@@ -1069,7 +1086,7 @@ int perf_pmu__format_type(struct list_head *formats, const char *name)
/*
* Sets value based on the format definition (format parameter)
- * and unformated value (value parameter).
+ * and unformatted value (value parameter).
*/
static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v,
bool zero)
@@ -1408,7 +1425,7 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
}
/*
- * if no unit or scale foundin aliases, then
+ * if no unit or scale found in aliases, then
* set defaults as for evsel
* unit cannot left to NULL
*/
@@ -1845,3 +1862,13 @@ void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
"'%llx' not supported by kernel)!\n",
name ?: "N/A", buf, config);
}
+
+bool perf_pmu__has_hybrid(void)
+{
+ if (!hybrid_scanned) {
+ hybrid_scanned = true;
+ perf_pmu__scan(NULL);
+ }
+
+ return !list_empty(&perf_pmu__hybrid_pmus);
+}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 160b0f561771..a790ef758171 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -5,6 +5,7 @@
#include <linux/bitmap.h>
#include <linux/compiler.h>
#include <linux/perf_event.h>
+#include <linux/list.h>
#include <stdbool.h>
#include "parse-events.h"
#include "pmu-events/pmu-events.h"
@@ -19,6 +20,7 @@ enum {
#define PERF_PMU_FORMAT_BITS 64
#define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/"
+#define CPUS_TEMPLATE_CPU "%s/bus/event_source/devices/%s/cpus"
struct perf_event_attr;
@@ -34,6 +36,7 @@ struct perf_pmu {
__u32 type;
bool selectable;
bool is_uncore;
+ bool is_hybrid;
bool auxtrace;
int max_precise;
struct perf_event_attr *default_config;
@@ -42,6 +45,7 @@ struct perf_pmu {
struct list_head aliases; /* HEAD struct perf_pmu_alias -> list */
struct list_head caps; /* HEAD struct perf_pmu_caps -> list */
struct list_head list; /* ELEM */
+ struct list_head hybrid_list;
};
extern struct perf_pmu perf_pmu__fake;
@@ -72,6 +76,7 @@ struct perf_pmu_alias {
bool deprecated;
char *metric_expr;
char *metric_name;
+ char *pmu_name;
};
struct perf_pmu *perf_pmu__find(const char *name);
@@ -114,6 +119,7 @@ void pmu_add_cpu_aliases_map(struct list_head *head, struct perf_pmu *pmu,
struct pmu_events_map *map);
struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu);
+struct pmu_events_map *pmu_events_map__find(void);
bool pmu_uncore_alias_match(const char *pmu_name, const char *name);
void perf_pmu_free_alias(struct perf_pmu_alias *alias);
@@ -126,4 +132,6 @@ int perf_pmu__caps_parse(struct perf_pmu *pmu);
void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
char *name);
+bool perf_pmu__has_hybrid(void);
+
#endif /* __PMU_H */
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index a9cff3a50ddf..a78c8d59a555 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -3228,7 +3228,7 @@ errout:
return err;
}
-/* Concatinate two arrays */
+/* Concatenate two arrays */
static void *memcat(void *a, size_t sz_a, void *b, size_t sz_b)
{
void *ret;
@@ -3258,7 +3258,7 @@ concat_probe_trace_events(struct probe_trace_event **tevs, int *ntevs,
if (*ntevs + ntevs2 > probe_conf.max_probes)
ret = -E2BIG;
else {
- /* Concatinate the array of probe_trace_event */
+ /* Concatenate the array of probe_trace_event */
new_tevs = memcat(*tevs, (*ntevs) * sizeof(**tevs),
*tevs2, ntevs2 * sizeof(**tevs2));
if (!new_tevs)
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 1b118c9c86a6..866f2d514d72 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -164,7 +164,7 @@ static struct probe_trace_arg_ref *alloc_trace_arg_ref(long offs)
/*
* Convert a location into trace_arg.
* If tvar == NULL, this just checks variable can be converted.
- * If fentry == true and vr_die is a parameter, do huristic search
+ * If fentry == true and vr_die is a parameter, do heuristic search
* for the location fuzzed by function entry mcount.
*/
static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
@@ -498,7 +498,7 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname,
" nor array.\n", varname);
return -EINVAL;
}
- /* While prcessing unnamed field, we don't care about this */
+ /* While processing unnamed field, we don't care about this */
if (field->ref && dwarf_diename(vr_die)) {
pr_err("Semantic error: %s must be referred by '.'\n",
field->name);
@@ -1832,7 +1832,7 @@ static int line_range_walk_cb(const char *fname, int lineno,
(lf->lno_s > lineno || lf->lno_e < lineno))
return 0;
- /* Make sure this line can be reversable */
+ /* Make sure this line can be reversible */
if (cu_find_lineinfo(&lf->cu_die, addr, &__fname, &__lineno) > 0
&& (lineno != __lineno || strcmp(fname, __fname)))
return 0;
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index 845dd46e3c61..d7c976671e3a 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -37,3 +37,5 @@ util/units.c
util/affinity.c
util/rwsem.c
util/hashmap.c
+util/pmu-hybrid.c
+util/fncache.c
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 278abecb5bdf..412f8e79e409 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -90,6 +90,7 @@ int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
*/
void bpf_counter__destroy(struct evsel *evsel);
int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
+int bpf_counter__disable(struct evsel *evsel);
void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
{
@@ -100,6 +101,11 @@ int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_
return 0;
}
+int bpf_counter__disable(struct evsel *evsel __maybe_unused)
+{
+ return 0;
+}
+
/*
* Support debug printing even though util/debug.c is not linked. That means
* implementing 'verbose' and 'eprintf'.
diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
index 078a71773565..8130b56aa04b 100644
--- a/tools/perf/util/s390-cpumsf.c
+++ b/tools/perf/util/s390-cpumsf.c
@@ -45,7 +45,7 @@
* the data portion is mmap()'ed.
*
* To sort the queues in chronological order, all queue access is controlled
- * by the auxtrace_heap. This is basicly a stack, each stack element has two
+ * by the auxtrace_heap. This is basically a stack, each stack element has two
* entries, the queue number and a time stamp. However the stack is sorted by
* the time stamps. The highest time stamp is at the bottom the lowest
* (nearest) time stamp is at the top. That sort order is maintained at all
@@ -65,11 +65,11 @@
* stamp of the last processed entry of the auxtrace_buffer replaces the
* current auxtrace_heap top.
*
- * 3. Auxtrace_queues might run of out data and are feeded by the
+ * 3. Auxtrace_queues might run of out data and are fed by the
* PERF_RECORD_AUXTRACE handling, see s390_cpumsf_process_auxtrace_event().
*
* Event Generation
- * Each sampling-data entry in the auxilary trace data generates a perf sample.
+ * Each sampling-data entry in the auxiliary trace data generates a perf sample.
* This sample is filled
* with data from the auxtrace such as PID/TID, instruction address, CPU state,
* etc. This sample is processed with perf_session__deliver_synth_event() to
@@ -575,7 +575,7 @@ static unsigned long long get_trailer_time(const unsigned char *buf)
* pointer to the queue, the second parameter is the time stamp. This
* is the time stamp:
* - of the event that triggered this processing.
- * - or the time stamp when the last proccesing of this queue stopped.
+ * - or the time stamp when the last processing of this queue stopped.
* In this case it stopped at a 4KB page boundary and record the
* position on where to continue processing on the next invocation
* (see buffer->use_data and buffer->use_size).
@@ -640,7 +640,7 @@ static int s390_cpumsf_samples(struct s390_cpumsf_queue *sfq, u64 *ts)
goto out;
}
- pos += dsdes; /* Skip diagnositic entry */
+ pos += dsdes; /* Skip diagnostic entry */
/* Check for trailer entry */
if (!s390_cpumsf_reached_trailer(bsdes + dsdes, pos)) {
diff --git a/tools/perf/util/s390-sample-raw.c b/tools/perf/util/s390-sample-raw.c
index cfcf8d534d76..08ec3c3ae0ee 100644
--- a/tools/perf/util/s390-sample-raw.c
+++ b/tools/perf/util/s390-sample-raw.c
@@ -160,11 +160,9 @@ static void s390_cpumcfdg_dump(struct perf_sample *sample)
const char *color = PERF_COLOR_BLUE;
struct cf_ctrset_entry *cep, ce;
struct pmu_events_map *map;
- struct perf_pmu pmu;
u64 *p;
- memset(&pmu, 0, sizeof(pmu));
- map = perf_pmu__find_map(&pmu);
+ map = pmu_events_map__find();
while (offset < len) {
cep = (struct cf_ctrset_entry *)(buf + offset);
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index c83c2c6564e0..4e4aa4c97ac5 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -1531,7 +1531,7 @@ static void set_table_handlers(struct tables *tables)
* Attempt to use the call path root from the call return
* processor, if the call return processor is in use. Otherwise,
* we allocate a new call path root. This prevents exporting
- * duplicate call path ids when both are in use simultaniously.
+ * duplicate call path ids when both are in use simultaneously.
*/
if (tables->dbe.crp)
tables->dbe.cpr = tables->dbe.crp->cpr;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 859832a82496..a12cf4f0e97a 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -29,6 +29,7 @@
#include "thread-stack.h"
#include "sample-raw.h"
#include "stat.h"
+#include "tsc.h"
#include "ui/progress.h"
#include "../perf.h"
#include "arch/common.h"
@@ -451,6 +452,16 @@ static int process_stat_round_stub(struct perf_session *perf_session __maybe_unu
return 0;
}
+static int process_event_time_conv_stub(struct perf_session *perf_session __maybe_unused,
+ union perf_event *event)
+{
+ if (dump_trace)
+ perf_event__fprintf_time_conv(event, stdout);
+
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused,
u64 file_offset __maybe_unused)
@@ -532,7 +543,7 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
if (tool->stat_round == NULL)
tool->stat_round = process_stat_round_stub;
if (tool->time_conv == NULL)
- tool->time_conv = process_event_op2_stub;
+ tool->time_conv = process_event_time_conv_stub;
if (tool->feature == NULL)
tool->feature = process_event_op2_stub;
if (tool->compressed == NULL)
@@ -949,6 +960,19 @@ static void perf_event__stat_round_swap(union perf_event *event,
event->stat_round.time = bswap_64(event->stat_round.time);
}
+static void perf_event__time_conv_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
+{
+ event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
+ event->time_conv.time_mult = bswap_64(event->time_conv.time_mult);
+ event->time_conv.time_zero = bswap_64(event->time_conv.time_zero);
+
+ if (event_contains(event->time_conv, time_cycles)) {
+ event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
+ event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
+ }
+}
+
typedef void (*perf_event__swap_op)(union perf_event *event,
bool sample_id_all);
@@ -985,7 +1009,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_STAT] = perf_event__stat_swap,
[PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
[PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
- [PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
+ [PERF_RECORD_TIME_CONV] = perf_event__time_conv_swap,
[PERF_RECORD_HEADER_MAX] = NULL,
};
@@ -1069,7 +1093,7 @@ static void callchain__lbr_callstack_printf(struct perf_sample *sample)
* in "to" register.
* For example, there is a call stack
* "A"->"B"->"C"->"D".
- * The LBR registers will recorde like
+ * The LBR registers will be recorded like
* "C"->"D", "B"->"C", "A"->"B".
* So only the first "to" register and all "from"
* registers are needed to construct the whole stack.
@@ -1302,8 +1326,10 @@ static void dump_sample(struct evsel *evsel, union perf_event *event,
if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
printf("... weight: %" PRIu64 "", sample->weight);
- if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT)
+ if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
printf(",0x%"PRIx16"", sample->ins_lat);
+ printf(",0x%"PRIx16"", sample->p_stage_cyc);
+ }
printf("\n");
}
@@ -1584,7 +1610,7 @@ static s64 perf_session__process_user_event(struct perf_session *session,
return tool->event_update(tool, event, &session->evlist);
case PERF_RECORD_HEADER_EVENT_TYPE:
/*
- * Depreceated, but we need to handle it for sake
+ * Deprecated, but we need to handle it for sake
* of old data files create in pipe mode.
*/
return 0;
@@ -2350,7 +2376,8 @@ size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp
return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
}
-size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
+size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp,
+ bool skip_empty)
{
size_t ret;
const char *msg = "";
@@ -2360,7 +2387,7 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
- ret += events_stats__fprintf(&session->evlist->stats, fp);
+ ret += events_stats__fprintf(&session->evlist->stats, fp, skip_empty);
return ret;
}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index f76480166d38..e31ba4c92a6c 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -113,7 +113,8 @@ size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp);
size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
bool (fn)(struct dso *dso, int parm), int parm);
-size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp);
+size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp,
+ bool skip_empty);
struct evsel *perf_session__find_first_evtype(struct perf_session *session,
unsigned int type);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 552b590485bf..88ce47f2547e 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -25,6 +25,7 @@
#include <traceevent/event-parse.h>
#include "mem-events.h"
#include "annotate.h"
+#include "event.h"
#include "time-utils.h"
#include "cgroup.h"
#include "machine.h"
@@ -36,7 +37,7 @@ const char default_parent_pattern[] = "^sys_|^do_page_fault";
const char *parent_pattern = default_parent_pattern;
const char *default_sort_order = "comm,dso,symbol";
const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
-const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat";
+const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,p_stage_cyc";
const char default_top_sort_order[] = "dso,symbol";
const char default_diff_sort_order[] = "dso,symbol";
const char default_tracepoint_sort_order[] = "trace";
@@ -45,6 +46,8 @@ const char *field_order;
regex_t ignore_callees_regex;
int have_ignore_callees = 0;
enum sort_mode sort__mode = SORT_MODE__NORMAL;
+const char *dynamic_headers[] = {"local_ins_lat", "p_stage_cyc"};
+const char *arch_specific_sort_keys[] = {"p_stage_cyc"};
/*
* Replaces all occurrences of a char used with the:
@@ -1408,6 +1411,25 @@ struct sort_entry sort_global_ins_lat = {
.se_width_idx = HISTC_GLOBAL_INS_LAT,
};
+static int64_t
+sort__global_p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return left->stat.p_stage_cyc - right->stat.p_stage_cyc;
+}
+
+static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%-*u", width, he->stat.p_stage_cyc);
+}
+
+struct sort_entry sort_p_stage_cyc = {
+ .se_header = "Pipeline Stage Cycle",
+ .se_cmp = sort__global_p_stage_cyc_cmp,
+ .se_snprintf = hist_entry__p_stage_cyc_snprintf,
+ .se_width_idx = HISTC_P_STAGE_CYC,
+};
+
struct sort_entry sort_mem_daddr_sym = {
.se_header = "Data Symbol",
.se_cmp = sort__daddr_cmp,
@@ -1816,6 +1838,21 @@ struct sort_dimension {
int taken;
};
+int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
+{
+ return 0;
+}
+
+const char * __weak arch_perf_header_entry(const char *se_header)
+{
+ return se_header;
+}
+
+static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
+{
+ sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
+}
+
#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
static struct sort_dimension common_sort_dimensions[] = {
@@ -1841,6 +1878,7 @@ static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
+ DIM(SORT_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_p_stage_cyc),
};
#undef DIM
@@ -2739,7 +2777,20 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
struct evlist *evlist,
int level)
{
- unsigned int i;
+ unsigned int i, j;
+
+ /*
+ * Check to see if there are any arch specific
+ * sort dimensions not applicable for the current
+ * architecture. If so, Skip that sort key since
+ * we don't want to display it in the output fields.
+ */
+ for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
+ if (!strcmp(arch_specific_sort_keys[j], tok) &&
+ !arch_support_sort_key(tok)) {
+ return 0;
+ }
+ }
for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
struct sort_dimension *sd = &common_sort_dimensions[i];
@@ -2747,6 +2798,11 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
if (strncasecmp(tok, sd->name, strlen(tok)))
continue;
+ for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
+ if (!strcmp(dynamic_headers[j], sd->name))
+ sort_dimension_add_dynamic_header(sd);
+ }
+
if (sd->entry == &sort_parent) {
int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
if (ret) {
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 63f67a3f3630..87a092645aa7 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -51,6 +51,7 @@ struct he_stat {
u64 period_guest_us;
u64 weight;
u64 ins_lat;
+ u64 p_stage_cyc;
u32 nr_events;
};
@@ -234,6 +235,7 @@ enum sort_type {
SORT_CODE_PAGE_SIZE,
SORT_LOCAL_INS_LAT,
SORT_GLOBAL_INS_LAT,
+ SORT_PIPELINE_STAGE_CYC,
/* branch stack specific sort keys */
__SORT_BRANCH_STACK,
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 7f09cdaf5b60..a76fff5e7d83 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -17,6 +17,8 @@
#include "cgroup.h"
#include <api/fs/fs.h>
#include "util.h"
+#include "iostat.h"
+#include "pmu-hybrid.h"
#define CNTR_NOT_SUPPORTED "<not supported>"
#define CNTR_NOT_COUNTED "<not counted>"
@@ -310,6 +312,11 @@ static void print_metric_header(struct perf_stat_config *config,
struct outstate *os = ctx;
char tbuf[1024];
+ /* In case of iostat, print metric header for first root port only */
+ if (config->iostat_run &&
+ os->evsel->priv != os->evsel->evlist->selected->priv)
+ return;
+
if (!valid_only_metric(unit))
return;
unit = fixunit(tbuf, os->evsel, unit);
@@ -439,6 +446,12 @@ static void printout(struct perf_stat_config *config, struct aggr_cpu_id id, int
if (counter->cgrp)
os.nfields++;
}
+
+ if (!config->no_csv_summary && config->csv_output &&
+ config->summary && !config->interval) {
+ fprintf(config->output, "%16s%s", "summary", config->csv_sep);
+ }
+
if (run == 0 || ena == 0 || counter->counts->scaled == -1) {
if (config->metric_only) {
pm(config, &os, NULL, "", "", 0);
@@ -526,6 +539,7 @@ static void uniquify_event_name(struct evsel *counter)
{
char *new_name;
char *config;
+ int ret = 0;
if (counter->uniquified_name ||
!counter->pmu_name || !strncmp(counter->name, counter->pmu_name,
@@ -540,8 +554,17 @@ static void uniquify_event_name(struct evsel *counter)
counter->name = new_name;
}
} else {
- if (asprintf(&new_name,
- "%s [%s]", counter->name, counter->pmu_name) > 0) {
+ if (perf_pmu__has_hybrid()) {
+ if (!counter->use_config_name) {
+ ret = asprintf(&new_name, "%s/%s/",
+ counter->pmu_name, counter->name);
+ }
+ } else {
+ ret = asprintf(&new_name, "%s [%s]",
+ counter->name, counter->pmu_name);
+ }
+
+ if (ret) {
free(counter->name);
counter->name = new_name;
}
@@ -644,6 +667,9 @@ static void print_counter_aggrdata(struct perf_stat_config *config,
if (!collect_data(config, counter, aggr_cb, &ad))
return;
+ if (perf_pmu__has_hybrid() && ad.ena == 0)
+ return;
+
nr = ad.nr;
ena = ad.ena;
run = ad.run;
@@ -952,8 +978,11 @@ static void print_metric_headers(struct perf_stat_config *config,
if (config->csv_output) {
if (config->interval)
fputs("time,", config->output);
- fputs(aggr_header_csv[config->aggr_mode], config->output);
+ if (!config->iostat_run)
+ fputs(aggr_header_csv[config->aggr_mode], config->output);
}
+ if (config->iostat_run)
+ iostat_print_header_prefix(config);
/* Print metrics headers only */
evlist__for_each_entry(evlist, counter) {
@@ -983,7 +1012,8 @@ static void print_interval(struct perf_stat_config *config,
if (config->interval_clear)
puts(CONSOLE_CLEAR);
- sprintf(prefix, "%6lu.%09lu%s", (unsigned long) ts->tv_sec, ts->tv_nsec, config->csv_sep);
+ if (!config->iostat_run)
+ sprintf(prefix, "%6lu.%09lu%s", (unsigned long) ts->tv_sec, ts->tv_nsec, config->csv_sep);
if ((num_print_interval == 0 && !config->csv_output) || config->interval_clear) {
switch (config->aggr_mode) {
@@ -1019,9 +1049,11 @@ static void print_interval(struct perf_stat_config *config,
break;
case AGGR_GLOBAL:
default:
- fprintf(output, "# time");
- if (!metric_only)
- fprintf(output, " counts %*s events\n", unit_width, "unit");
+ if (!config->iostat_run) {
+ fprintf(output, "# time");
+ if (!metric_only)
+ fprintf(output, " counts %*s events\n", unit_width, "unit");
+ }
case AGGR_UNSET:
break;
}
@@ -1214,6 +1246,9 @@ void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *conf
struct evsel *counter;
char buf[64], *prefix = NULL;
+ if (config->iostat_run)
+ evlist->selected = evlist__first(evlist);
+
if (interval)
print_interval(config, evlist, prefix = buf, ts);
else
@@ -1226,7 +1261,7 @@ void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *conf
print_metric_headers(config, evlist, prefix, false);
if (num_print_iv++ == 25)
num_print_iv = 0;
- if (config->aggr_mode == AGGR_GLOBAL && prefix)
+ if (config->aggr_mode == AGGR_GLOBAL && prefix && !config->iostat_run)
fprintf(config->output, "%s", prefix);
}
@@ -1243,11 +1278,16 @@ void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *conf
}
break;
case AGGR_GLOBAL:
- evlist__for_each_entry(evlist, counter) {
- print_counter_aggr(config, counter, prefix);
+ if (config->iostat_run)
+ iostat_print_counters(evlist, config, ts, prefix = buf,
+ print_counter_aggr);
+ else {
+ evlist__for_each_entry(evlist, counter) {
+ print_counter_aggr(config, counter, prefix);
+ }
+ if (metric_only)
+ fputc('\n', config->output);
}
- if (metric_only)
- fputc('\n', config->output);
break;
case AGGR_NONE:
if (metric_only)
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 6ccf21a72f06..39967a45f55b 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -9,7 +9,9 @@
#include "expr.h"
#include "metricgroup.h"
#include "cgroup.h"
+#include "units.h"
#include <linux/zalloc.h>
+#include "iostat.h"
/*
* AGGR_GLOBAL: Use CPU 0
@@ -961,7 +963,9 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
struct metric_event *me;
int num = 1;
- if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
+ if (config->iostat_run) {
+ iostat_print_metric(config, evsel, out);
+ } else if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd);
if (total) {
@@ -1270,18 +1274,15 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL,
evsel->name, evsel->metric_name, NULL, 1, cpu, out, st);
} else if (runtime_stat_n(st, STAT_NSECS, cpu, &rsd) != 0) {
- char unit = 'M';
- char unit_buf[10];
+ char unit = ' ';
+ char unit_buf[10] = "/sec";
total = runtime_stat_avg(st, STAT_NSECS, cpu, &rsd);
-
if (total)
- ratio = 1000.0 * avg / total;
- if (ratio < 0.001) {
- ratio *= 1000;
- unit = 'K';
- }
- snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
+ ratio = convert_unit_double(1000000000.0 * avg / total, &unit);
+
+ if (unit != ' ')
+ snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
print_metric(config, ctxp, NULL, "%8.3f", unit_buf, ratio);
} else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
print_smi_cost(config, cpu, out, st, &rsd);
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index c400f8dde017..2db46b9bebd0 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -76,8 +76,7 @@ double rel_stddev_stats(double stddev, double avg)
return pct;
}
-bool __perf_evsel_stat__is(struct evsel *evsel,
- enum perf_stat_evsel_id id)
+bool __perf_stat_evsel__is(struct evsel *evsel, enum perf_stat_evsel_id id)
{
struct perf_stat_evsel *ps = evsel->stats;
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index d85c292148bb..32c8527de347 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -128,10 +128,12 @@ struct perf_stat_config {
bool all_user;
bool percore_show_thread;
bool summary;
+ bool no_csv_summary;
bool metric_no_group;
bool metric_no_merge;
bool stop_read_counter;
bool quiet;
+ bool iostat_run;
FILE *output;
unsigned int interval;
unsigned int timeout;
@@ -160,6 +162,7 @@ struct perf_stat_config {
};
void perf_stat__set_big_num(int set);
+void perf_stat__set_no_csv_summary(int set);
void update_stats(struct stats *stats, u64 val);
double avg_stats(struct stats *stats);
@@ -187,11 +190,10 @@ struct perf_aggr_thread_value {
u64 ena;
};
-bool __perf_evsel_stat__is(struct evsel *evsel,
- enum perf_stat_evsel_id id);
+bool __perf_stat_evsel__is(struct evsel *evsel, enum perf_stat_evsel_id id);
#define perf_stat_evsel__is(evsel, id) \
- __perf_evsel_stat__is(evsel, PERF_STAT_EVSEL_ID__ ## id)
+ __perf_stat_evsel__is(evsel, PERF_STAT_EVSEL_ID__ ## id)
extern struct runtime_stat rt_stat;
extern struct stats walltime_nsecs_stats;
diff --git a/tools/perf/util/strbuf.h b/tools/perf/util/strbuf.h
index ea94d8628980..be94d7046fa0 100644
--- a/tools/perf/util/strbuf.h
+++ b/tools/perf/util/strbuf.h
@@ -12,7 +12,7 @@
* build complex strings/buffers whose final size isn't easily known.
*
* It is NOT legal to copy the ->buf pointer away.
- * `strbuf_detach' is the operation that detachs a buffer from its shell
+ * `strbuf_detach' is the operation that detaches a buffer from its shell
* while keeping the shell valid wrt its invariants.
*
* 2. the ->buf member is a byte array that has at least ->len + 1 bytes
diff --git a/tools/perf/util/strfilter.h b/tools/perf/util/strfilter.h
index e0c25a40f796..c05aca9ca582 100644
--- a/tools/perf/util/strfilter.h
+++ b/tools/perf/util/strfilter.h
@@ -8,8 +8,8 @@
/* A node of string filter */
struct strfilter_node {
- struct strfilter_node *l; /* Tree left branche (for &,|) */
- struct strfilter_node *r; /* Tree right branche (for !,&,|) */
+ struct strfilter_node *l; /* Tree left branch (for &,|) */
+ struct strfilter_node *r; /* Tree right branch (for !,&,|) */
const char *p; /* Operator or rule */
};
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 6dff843fd883..4c56aa837434 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -1058,7 +1058,7 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
curr_dso->symtab_type = dso->symtab_type;
maps__insert(kmaps, curr_map);
/*
- * Add it before we drop the referece to curr_map, i.e. while
+ * Add it before we drop the reference to curr_map, i.e. while
* we still are sure to have a reference to this DSO via
* *curr_map->dso.
*/
diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
index 35c936ce33ef..2664fb65e47a 100644
--- a/tools/perf/util/symbol_fprintf.c
+++ b/tools/perf/util/symbol_fprintf.c
@@ -68,7 +68,7 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso,
for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
- fprintf(fp, "%s\n", pos->sym.name);
+ ret += fprintf(fp, "%s\n", pos->sym.name);
}
return ret;
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index dff178103ce5..35aa0c0f7cd9 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -1211,7 +1211,7 @@ static size_t mask_size(struct perf_cpu_map *map, int *max)
*max = 0;
for (i = 0; i < map->nr; i++) {
- /* bit possition of the cpu is + 1 */
+ /* bit position of the cpu is + 1 */
int bit = map->map[i] + 1;
if (bit > *max)
@@ -1237,7 +1237,7 @@ void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int
* mask = size of 'struct perf_record_record_cpu_map' +
* maximum cpu bit converted to size of longs
*
- * and finaly + the size of 'struct perf_record_cpu_map_data'.
+ * and finally + the size of 'struct perf_record_cpu_map_data'.
*/
size_cpus = cpus_size(map);
size_mask = mask_size(map, max);
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c
index 03bd99d3be16..a2e906858891 100644
--- a/tools/perf/util/syscalltbl.c
+++ b/tools/perf/util/syscalltbl.c
@@ -34,6 +34,10 @@ static const char **syscalltbl_native = syscalltbl_powerpc_32;
#include <asm/syscalls.c>
const int syscalltbl_native_max_id = SYSCALLTBL_ARM64_MAX_ID;
static const char **syscalltbl_native = syscalltbl_arm64;
+#elif defined(__mips__)
+#include <asm/syscalls_n64.c>
+const int syscalltbl_native_max_id = SYSCALLTBL_MIPS_N64_MAX_ID;
+static const char **syscalltbl_native = syscalltbl_mips_n64;
#endif
struct syscall {
diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h
index f132c6c2eef8..4ff56217f2a6 100644
--- a/tools/perf/util/target.h
+++ b/tools/perf/util/target.h
@@ -16,6 +16,8 @@ struct target {
bool uses_mmap;
bool default_per_cpu;
bool per_thread;
+ bool use_bpf;
+ const char *attr_map;
};
enum target_errno {
@@ -64,11 +66,6 @@ static inline bool target__has_cpu(struct target *target)
return target->system_wide || target->cpu_list;
}
-static inline bool target__has_bpf(struct target *target)
-{
- return target->bpf_str;
-}
-
static inline bool target__none(struct target *target)
{
return !target__has_task(target) && !target__has_cpu(target);
diff --git a/tools/perf/util/thread-stack.h b/tools/perf/util/thread-stack.h
index 3bc47a42af8e..b3cd09beb62f 100644
--- a/tools/perf/util/thread-stack.h
+++ b/tools/perf/util/thread-stack.h
@@ -16,7 +16,6 @@ struct comm;
struct ip_callchain;
struct symbol;
struct dso;
-struct comm;
struct perf_sample;
struct addr_location;
struct call_path;
diff --git a/tools/perf/util/tsc.c b/tools/perf/util/tsc.c
index 62b4c75c966c..f19791d46e99 100644
--- a/tools/perf/util/tsc.c
+++ b/tools/perf/util/tsc.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
+#include <inttypes.h>
+#include <string.h>
#include <linux/compiler.h>
#include <linux/perf_event.h>
@@ -110,3 +112,31 @@ u64 __weak rdtsc(void)
{
return 0;
}
+
+size_t perf_event__fprintf_time_conv(union perf_event *event, FILE *fp)
+{
+ struct perf_record_time_conv *tc = (struct perf_record_time_conv *)event;
+ size_t ret;
+
+ ret = fprintf(fp, "\n... Time Shift %" PRI_lu64 "\n", tc->time_shift);
+ ret += fprintf(fp, "... Time Muliplier %" PRI_lu64 "\n", tc->time_mult);
+ ret += fprintf(fp, "... Time Zero %" PRI_lu64 "\n", tc->time_zero);
+
+ /*
+ * The event TIME_CONV was extended for the fields from "time_cycles"
+ * when supported cap_user_time_short, for backward compatibility,
+ * prints the extended fields only if they are contained in the event.
+ */
+ if (event_contains(*tc, time_cycles)) {
+ ret += fprintf(fp, "... Time Cycles %" PRI_lu64 "\n",
+ tc->time_cycles);
+ ret += fprintf(fp, "... Time Mask %#" PRI_lx64 "\n",
+ tc->time_mask);
+ ret += fprintf(fp, "... Cap Time Zero %" PRId32 "\n",
+ tc->cap_user_time_zero);
+ ret += fprintf(fp, "... Cap Time Short %" PRId32 "\n",
+ tc->cap_user_time_short);
+ }
+
+ return ret;
+}
diff --git a/tools/perf/util/tsc.h b/tools/perf/util/tsc.h
index 72a15419f3b3..7d83a31732a7 100644
--- a/tools/perf/util/tsc.h
+++ b/tools/perf/util/tsc.h
@@ -4,6 +4,8 @@
#include <linux/types.h>
+#include "event.h"
+
struct perf_tsc_conversion {
u16 time_shift;
u32 time_mult;
@@ -24,4 +26,6 @@ u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc);
u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc);
u64 rdtsc(void);
+size_t perf_event__fprintf_time_conv(union perf_event *event, FILE *fp);
+
#endif // __PERF_TSC_H
diff --git a/tools/perf/util/units.c b/tools/perf/util/units.c
index a46762aec4c9..32c39cfe209b 100644
--- a/tools/perf/util/units.c
+++ b/tools/perf/util/units.c
@@ -33,28 +33,35 @@ unsigned long parse_tag_value(const char *str, struct parse_tag *tags)
return (unsigned long) -1;
}
-unsigned long convert_unit(unsigned long value, char *unit)
+double convert_unit_double(double value, char *unit)
{
*unit = ' ';
- if (value > 1000) {
- value /= 1000;
+ if (value > 1000.0) {
+ value /= 1000.0;
*unit = 'K';
}
- if (value > 1000) {
- value /= 1000;
+ if (value > 1000.0) {
+ value /= 1000.0;
*unit = 'M';
}
- if (value > 1000) {
- value /= 1000;
+ if (value > 1000.0) {
+ value /= 1000.0;
*unit = 'G';
}
return value;
}
+unsigned long convert_unit(unsigned long value, char *unit)
+{
+ double v = convert_unit_double((double)value, unit);
+
+ return (unsigned long)v;
+}
+
int unit_number__scnprintf(char *buf, size_t size, u64 n)
{
char unit[4] = "BKMG";
diff --git a/tools/perf/util/units.h b/tools/perf/util/units.h
index 99263b6a23f7..ea43e74e3240 100644
--- a/tools/perf/util/units.h
+++ b/tools/perf/util/units.h
@@ -12,6 +12,7 @@ struct parse_tag {
unsigned long parse_tag_value(const char *str, struct parse_tag *tags);
+double convert_unit_double(double value, char *unit);
unsigned long convert_unit(unsigned long value, char *unit);
int unit_number__scnprintf(char *buf, size_t size, u64 n);
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 9aededc0bc06..71a353349181 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -82,7 +82,7 @@ UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
#define DW_EH_PE_funcrel 0x40 /* start-of-procedure-relative */
#define DW_EH_PE_aligned 0x50 /* aligned pointer */
-/* Flags intentionaly not handled, since they're not needed:
+/* Flags intentionally not handled, since they're not needed:
* #define DW_EH_PE_indirect 0x80
* #define DW_EH_PE_uleb128 0x01
* #define DW_EH_PE_udata2 0x02
diff --git a/tools/power/acpi/common/cmfsize.c b/tools/power/acpi/common/cmfsize.c
index 9ea2c0aeb86c..185b8c588e1d 100644
--- a/tools/power/acpi/common/cmfsize.c
+++ b/tools/power/acpi/common/cmfsize.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/******************************************************************************
*
- * Module Name: cfsize - Common get file size function
+ * Module Name: cmfsize - Common get file size function
*
* Copyright (C) 2000 - 2021, Intel Corp.
*
diff --git a/tools/power/pm-graph/sleepgraph.py b/tools/power/pm-graph/sleepgraph.py
index 81f4b8abbdf7..ffd50953a024 100755
--- a/tools/power/pm-graph/sleepgraph.py
+++ b/tools/power/pm-graph/sleepgraph.py
@@ -6819,7 +6819,7 @@ if __name__ == '__main__':
sysvals.outdir = val
sysvals.notestrun = True
if(os.path.isdir(val) == False):
- doError('%s is not accesible' % val)
+ doError('%s is not accessible' % val)
elif(arg == '-filter'):
try:
val = next(args)
diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
index 582feb88eca3..ab940c508ef0 100644
--- a/tools/power/x86/intel-speed-select/isst-config.c
+++ b/tools/power/x86/intel-speed-select/isst-config.c
@@ -15,7 +15,7 @@ struct process_cmd_struct {
int arg;
};
-static const char *version_str = "v1.8";
+static const char *version_str = "v1.9";
static const int supported_api_ver = 1;
static struct isst_if_platform_info isst_platform_info;
static char *progname;
@@ -381,6 +381,18 @@ static void set_cpu_online_offline(int cpu, int state)
close(fd);
}
+static void force_all_cpus_online(void)
+{
+ int i;
+
+ fprintf(stderr, "Forcing all CPUs online\n");
+
+ for (i = 0; i < topo_max_cpus; ++i)
+ set_cpu_online_offline(i, 1);
+
+ unlink("/var/run/isst_cpu_topology.dat");
+}
+
#define MAX_PACKAGE_COUNT 8
#define MAX_DIE_PER_PACKAGE 2
static void for_each_online_package_in_set(void (*callback)(int, void *, void *,
@@ -959,6 +971,10 @@ static void isst_print_extended_platform_info(void)
fprintf(outf, "Intel(R) SST-BF (feature base-freq) is not supported\n");
ret = isst_read_pm_config(i, &cp_state, &cp_cap);
+ if (ret) {
+ fprintf(outf, "Intel(R) SST-CP (feature core-power) status is unknown\n");
+ return;
+ }
if (cp_cap)
fprintf(outf, "Intel(R) SST-CP (feature core-power) is supported\n");
else
@@ -2763,6 +2779,7 @@ static void usage(void)
printf("\t[-f|--format] : output format [json|text]. Default: text\n");
printf("\t[-h|--help] : Print help\n");
printf("\t[-i|--info] : Print platform information\n");
+ printf("\t[-a|--all-cpus-online] : Force online every CPU in the system\n");
printf("\t[-o|--out] : Output file\n");
printf("\t\t\tDefault : stderr\n");
printf("\t[-p|--pause] : Delay between two mail box commands in milliseconds\n");
@@ -2791,7 +2808,6 @@ static void usage(void)
static void print_version(void)
{
fprintf(outf, "Version %s\n", version_str);
- fprintf(outf, "Build date %s time %s\n", __DATE__, __TIME__);
exit(0);
}
@@ -2800,11 +2816,12 @@ static void cmdline(int argc, char **argv)
const char *pathname = "/dev/isst_interface";
char *ptr;
FILE *fp;
- int opt;
+ int opt, force_cpus_online = 0;
int option_index = 0;
int ret;
static struct option long_options[] = {
+ { "all-cpus-online", no_argument, 0, 'a' },
{ "cpu", required_argument, 0, 'c' },
{ "debug", no_argument, 0, 'd' },
{ "format", required_argument, 0, 'f' },
@@ -2840,9 +2857,12 @@ static void cmdline(int argc, char **argv)
}
progname = argv[0];
- while ((opt = getopt_long_only(argc, argv, "+c:df:hio:v", long_options,
+ while ((opt = getopt_long_only(argc, argv, "+c:df:hio:va", long_options,
&option_index)) != -1) {
switch (opt) {
+ case 'a':
+ force_cpus_online = 1;
+ break;
case 'c':
parse_cpu_command(optarg);
break;
@@ -2892,6 +2912,8 @@ static void cmdline(int argc, char **argv)
exit(0);
}
set_max_cpu_num();
+ if (force_cpus_online)
+ force_all_cpus_online();
store_cpu_topology();
set_cpu_present_cpu_mask();
set_cpu_target_cpu_mask();
diff --git a/tools/power/x86/intel-speed-select/isst-display.c b/tools/power/x86/intel-speed-select/isst-display.c
index 8e54ce47648e..3bf1820c0da1 100644
--- a/tools/power/x86/intel-speed-select/isst-display.c
+++ b/tools/power/x86/intel-speed-select/isst-display.c
@@ -25,10 +25,14 @@ static void printcpulist(int str_len, char *str, int mask_size,
index = snprintf(&str[curr_index],
str_len - curr_index, ",");
curr_index += index;
+ if (curr_index >= str_len)
+ break;
}
index = snprintf(&str[curr_index], str_len - curr_index, "%d",
i);
curr_index += index;
+ if (curr_index >= str_len)
+ break;
first = 0;
}
}
@@ -64,10 +68,14 @@ static void printcpumask(int str_len, char *str, int mask_size,
index = snprintf(&str[curr_index], str_len - curr_index, "%08x",
mask[i]);
curr_index += index;
+ if (curr_index >= str_len)
+ break;
if (i) {
strncat(&str[curr_index], ",", str_len - curr_index);
curr_index++;
}
+ if (curr_index >= str_len)
+ break;
}
free(mask);
@@ -185,7 +193,7 @@ static void _isst_pbf_display_information(int cpu, FILE *outf, int level,
int disp_level)
{
char header[256];
- char value[256];
+ char value[512];
snprintf(header, sizeof(header), "speed-select-base-freq-properties");
format_and_print(outf, disp_level, header, NULL);
@@ -349,7 +357,7 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
struct isst_pkg_ctdp *pkg_dev)
{
char header[256];
- char value[256];
+ char value[512];
static int level;
int i;
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index a402f32a145c..25adfec2cb39 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -39,8 +39,6 @@ EXTRA_WARNINGS += -Wundef
EXTRA_WARNINGS += -Wwrite-strings
EXTRA_WARNINGS += -Wformat
-CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
-
# Makefiles suck: This macro sets a default value of $(2) for the
# variable named by $(1), unless the variable has been set by
# environment or command line. This is necessary for CC and AR
@@ -52,12 +50,22 @@ define allow-override
$(eval $(1) = $(2)))
endef
+ifneq ($(LLVM),)
+$(call allow-override,CC,clang)
+$(call allow-override,AR,llvm-ar)
+$(call allow-override,LD,ld.lld)
+$(call allow-override,CXX,clang++)
+$(call allow-override,STRIP,llvm-strip)
+else
# Allow setting various cross-compile vars or setting CROSS_COMPILE as a prefix.
$(call allow-override,CC,$(CROSS_COMPILE)gcc)
$(call allow-override,AR,$(CROSS_COMPILE)ar)
$(call allow-override,LD,$(CROSS_COMPILE)ld)
$(call allow-override,CXX,$(CROSS_COMPILE)g++)
$(call allow-override,STRIP,$(CROSS_COMPILE)strip)
+endif
+
+CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
ifneq ($(LLVM),)
HOSTAR ?= llvm-ar
@@ -86,7 +94,8 @@ endif
# in newer systems.
# Needed for the __raw_cmpxchg in tools/arch/x86/include/asm/cmpxchg.h
#
-# See https://lkml.org/lkml/2006/11/28/253 and https://gcc.gnu.org/gcc-4.8/changes.html,
+# See https://lore.kernel.org/lkml/9a8748490611281710g78402fbeh8ff7fcc162dbcbca@mail.gmail.com/
+# and https://gcc.gnu.org/gcc-4.8/changes.html,
# that takes into account Linus's comments (search for Wshadow) for the reasoning about
# -Wshadow not being interesting before gcc 4.8.
diff --git a/tools/spi/Makefile b/tools/spi/Makefile
index ada881afb489..0aa6dbd31fb8 100644
--- a/tools/spi/Makefile
+++ b/tools/spi/Makefile
@@ -25,11 +25,12 @@ include $(srctree)/tools/build/Makefile.include
#
# We need the following to be outside of kernel tree
#
-$(OUTPUT)include/linux/spi/spidev.h: ../../include/uapi/linux/spi/spidev.h
+$(OUTPUT)include/linux/spi: ../../include/uapi/linux/spi
mkdir -p $(OUTPUT)include/linux/spi 2>&1 || true
ln -sf $(CURDIR)/../../include/uapi/linux/spi/spidev.h $@
+ ln -sf $(CURDIR)/../../include/uapi/linux/spi/spi.h $@
-prepare: $(OUTPUT)include/linux/spi/spidev.h
+prepare: $(OUTPUT)include/linux/spi
#
# spidev_test
diff --git a/tools/testing/ktest/examples/vmware.conf b/tools/testing/ktest/examples/vmware.conf
new file mode 100644
index 000000000000..61958163d242
--- /dev/null
+++ b/tools/testing/ktest/examples/vmware.conf
@@ -0,0 +1,137 @@
+#
+# This config is an example usage of ktest.pl with a vmware guest
+#
+# VMware Setup:
+# -------------
+# - Edit the Virtual Machine ("Edit virtual machine settings")
+# - Add a Serial Port
+# - You almost certainly want it set "Connect at power on"
+# - Select "Use socket (named pipe)"
+# - Select a name that you'll recognize, like 'ktestserialpipe'
+# - From: Server
+# - To: A Virtual Machine
+# - Save
+# - Make sure you note the name, it will be in the base directory of the
+# virtual machine (where the "disks" are stored. The default
+# is /var/lib/vmware/<virtual machine name>/<the name you entered above>
+#
+# - Make note of the path to the VM
+# </End VMware setup>
+#
+# The guest is called 'Guest' and this would be something that
+# could be run on the host to test a virtual machine target.
+
+MACHINE = Guest
+
+# Name of the serial pipe you set in the VMware settings
+VMWARE_SERIAL_NAME = <the name you entered above>
+
+# Define a variable of the name of the VM
+# Noting this needs to be the name of the kmx file, and usually, the
+# name of the directory that it's in. If the directory and name
+# differ change the VMWARE_VM_DIR accordingly.
+# Please ommit the .kmx extension
+VMWARE_VM_NAME = <virtual machine name>
+
+# VM dir name. This is usually the same as the virtual machine's name,
+# but not always the case. Change if they differ
+VMWARE_VM_DIR = ${VMWARE_VM_NAME}
+
+# Base directory that the Virtual machine is contained in
+# /var/lib/vmware is the default on Linux
+VMWARE_VM_BASE_DIR = /var/lib/vmware/${VMWARE_VM_DIR}
+
+# Use ncat to read the unix pipe. Anything that can read the Unix Pipe
+# and output it's contents to stdout will work
+CONSOLE = /usr/bin/ncat -U ${VMWARE_VM_BASE_DIR}/${VMWARE_SERIAL_NAME}
+
+# Define what version of Workstation you are using
+# This is used by vmrun to use the appropriate appripriate pieces to
+# test this. In all likelihood you want 'ws' or 'player'
+# Valid options:
+# ws - Workstation (Windows or Linux host)
+# fusion - Fusion (Mac host)
+# player - Using VMware Player (Windows or Linux host)
+# Note: vmrun has to run directly on the host machine
+VMWARE_HOST_TYPE = ws
+
+# VMware provides `vmrun` to allow you to do certain things to the virtual machine
+# This should hard reset the VM and force a boot
+VMWARE_POWER_CYCLE = /usr/bin/vmrun -T ${VMWARE_HOST_TYPE} reset ${VMWARE_VM_BASE_DIR}/${VMWARE_VM_NAME}.kmx nogui
+
+#*************************************#
+# This part is the same as test.conf #
+#*************************************#
+
+# The include files will set up the type of test to run. Just set TEST to
+# which test you want to run.
+#
+# TESTS = patchcheck, randconfig, boot, test, config-bisect, bisect, min-config
+#
+# See the include/*.conf files that define these tests
+#
+TEST := patchcheck
+
+# Some tests may have more than one test to run. Define MULTI := 1 to run
+# the extra tests.
+MULTI := 0
+
+# In case you want to differentiate which type of system you are testing
+BITS := 64
+
+# REBOOT = none, error, fail, empty
+# See include/defaults.conf
+REBOOT := empty
+
+
+# The defaults file will set up various settings that can be used by all
+# machine configs.
+INCLUDE include/defaults.conf
+
+
+#*************************************#
+# Now we are different from test.conf #
+#*************************************#
+
+
+# The example here assumes that Guest is running a Fedora release
+# that uses dracut for its initfs. The POST_INSTALL will be executed
+# after the install of the kernel and modules are complete.
+#
+POST_INSTALL = ${SSH} /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION
+
+# Guests sometimes get stuck on reboot. We wait 3 seconds after running
+# the reboot command and then do a full power-cycle of the guest.
+# This forces the guest to restart.
+#
+POWERCYCLE_AFTER_REBOOT = 3
+
+# We do the same after the halt command, but this time we wait 20 seconds.
+POWEROFF_AFTER_HALT = 20
+
+
+# As the defaults.conf file has a POWER_CYCLE option already defined,
+# and options can not be defined in the same section more than once
+# (all DEFAULTS sections are considered the same). We use the
+# DEFAULTS OVERRIDE to tell ktest.pl to ignore the previous defined
+# options, for the options set in the OVERRIDE section.
+#
+DEFAULTS OVERRIDE
+
+# Instead of using the default POWER_CYCLE option defined in
+# defaults.conf, we use virsh to cycle it. To do so, we destroy
+# the guest, wait 5 seconds, and then start it up again.
+# Crude, but effective.
+#
+POWER_CYCLE = ${VMWARE_POWER_CYCLE}
+
+
+DEFAULTS
+
+# The following files each handle a different test case.
+# Having them included allows you to set up more than one machine and share
+# the same tests.
+INCLUDE include/patchcheck.conf
+INCLUDE include/tests.conf
+INCLUDE include/bisect.conf
+INCLUDE include/min-config.conf
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 4e2450964517..09d1578f9d66 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -24,7 +24,7 @@ my %evals;
#default opts
my %default = (
- "MAILER" => "sendmail", # default mailer
+ "MAILER" => "sendmail", # default mailer
"EMAIL_ON_ERROR" => 1,
"EMAIL_WHEN_FINISHED" => 1,
"EMAIL_WHEN_CANCELED" => 0,
@@ -36,15 +36,15 @@ my %default = (
"CLOSE_CONSOLE_SIGNAL" => "INT",
"TIMEOUT" => 120,
"TMP_DIR" => "/tmp/ktest/\${MACHINE}",
- "SLEEP_TIME" => 60, # sleep time between tests
+ "SLEEP_TIME" => 60, # sleep time between tests
"BUILD_NOCLEAN" => 0,
"REBOOT_ON_ERROR" => 0,
"POWEROFF_ON_ERROR" => 0,
"REBOOT_ON_SUCCESS" => 1,
"POWEROFF_ON_SUCCESS" => 0,
"BUILD_OPTIONS" => "",
- "BISECT_SLEEP_TIME" => 60, # sleep time between bisects
- "PATCHCHECK_SLEEP_TIME" => 60, # sleep time between patch checks
+ "BISECT_SLEEP_TIME" => 60, # sleep time between bisects
+ "PATCHCHECK_SLEEP_TIME" => 60, # sleep time between patch checks
"CLEAR_LOG" => 0,
"BISECT_MANUAL" => 0,
"BISECT_SKIP" => 1,
@@ -512,6 +512,69 @@ $config_help{"REBOOT_SCRIPT"} = << "EOF"
EOF
;
+# used with process_expression()
+my $d = 0;
+
+# defined before get_test_name()
+my $in_die = 0;
+
+# defined before process_warning_line()
+my $check_build_re = ".*:.*(warning|error|Error):.*";
+my $utf8_quote = "\\x{e2}\\x{80}(\\x{98}|\\x{99})";
+
+# defined before child_finished()
+my $child_done;
+
+# config_ignore holds the configs that were set (or unset) for
+# a good config and we will ignore these configs for the rest
+# of a config bisect. These configs stay as they were.
+my %config_ignore;
+
+# config_set holds what all configs were set as.
+my %config_set;
+
+# config_off holds the set of configs that the bad config had disabled.
+# We need to record them and set them in the .config when running
+# olddefconfig, because olddefconfig keeps the defaults.
+my %config_off;
+
+# config_off_tmp holds a set of configs to turn off for now
+my @config_off_tmp;
+
+# config_list is the set of configs that are being tested
+my %config_list;
+my %null_config;
+
+my %dependency;
+
+# found above run_config_bisect()
+my $pass = 1;
+
+# found above add_dep()
+
+my %depends;
+my %depcount;
+my $iflevel = 0;
+my @ifdeps;
+
+# prevent recursion
+my %read_kconfigs;
+
+# found above test_this_config()
+my %min_configs;
+my %keep_configs;
+my %save_configs;
+my %processed_configs;
+my %nochange_config;
+
+#
+# These are first defined here, main function later on
+#
+sub run_command;
+sub start_monitor;
+sub end_monitor;
+sub wait_for_monitor;
+
sub _logit {
if (defined($opt{"LOG_FILE"})) {
print LOG @_;
@@ -537,7 +600,7 @@ sub read_prompt {
my $ans;
for (;;) {
- if ($cancel) {
+ if ($cancel) {
print "$prompt [y/n/C] ";
} else {
print "$prompt [Y/n] ";
@@ -760,7 +823,7 @@ sub process_variables {
# remove the space added in the beginning
$retval =~ s/ //;
- return "$retval"
+ return "$retval";
}
sub set_value {
@@ -863,7 +926,6 @@ sub value_defined {
defined($opt{$2});
}
-my $d = 0;
sub process_expression {
my ($name, $val) = @_;
@@ -978,7 +1040,6 @@ sub __read_config {
$override = 0;
if ($type eq "TEST_START") {
-
if ($num_tests_set) {
die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n";
}
@@ -1048,7 +1109,6 @@ sub __read_config {
$test_num = $old_test_num;
$repeat = $old_repeat;
}
-
} elsif (/^\s*ELSE\b(.*)$/) {
if (!$if) {
die "$name: $.: ELSE found with out matching IF section\n$_";
@@ -1095,7 +1155,7 @@ sub __read_config {
}
}
}
-
+
if ( ! -r $file ) {
die "$name: $.: Can't read file $file\n$_";
}
@@ -1186,13 +1246,13 @@ sub __read_config {
}
sub get_test_case {
- print "What test case would you like to run?\n";
- print " (build, install or boot)\n";
- print " Other tests are available but require editing ktest.conf\n";
- print " (see tools/testing/ktest/sample.conf)\n";
- my $ans = <STDIN>;
- chomp $ans;
- $default{"TEST_TYPE"} = $ans;
+ print "What test case would you like to run?\n";
+ print " (build, install or boot)\n";
+ print " Other tests are available but require editing ktest.conf\n";
+ print " (see tools/testing/ktest/sample.conf)\n";
+ my $ans = <STDIN>;
+ chomp $ans;
+ $default{"TEST_TYPE"} = $ans;
}
sub read_config {
@@ -1368,11 +1428,6 @@ sub eval_option {
return $option;
}
-sub run_command;
-sub start_monitor;
-sub end_monitor;
-sub wait_for_monitor;
-
sub reboot {
my ($time) = @_;
my $powercycle = 0;
@@ -1457,8 +1512,6 @@ sub do_not_reboot {
($test_type eq "config_bisect" && $opt{"CONFIG_BISECT_TYPE[$i]"} eq "build");
}
-my $in_die = 0;
-
sub get_test_name() {
my $name;
@@ -1471,7 +1524,6 @@ sub get_test_name() {
}
sub dodie {
-
# avoid recursion
return if ($in_die);
$in_die = 1;
@@ -1481,10 +1533,8 @@ sub dodie {
doprint "CRITICAL FAILURE... [TEST $i] ", @_, "\n";
if ($reboot_on_error && !do_not_reboot) {
-
doprint "REBOOTING\n";
reboot_to_good;
-
} elsif ($poweroff_on_error && defined($power_off)) {
doprint "POWERING OFF\n";
`$power_off`;
@@ -1519,13 +1569,14 @@ sub dodie {
close O;
close L;
}
- send_email("KTEST: critical failure for test $i [$name]",
- "Your test started at $script_start_time has failed with:\n@_\n", $log_file);
+
+ send_email("KTEST: critical failure for test $i [$name]",
+ "Your test started at $script_start_time has failed with:\n@_\n", $log_file);
}
if ($monitor_cnt) {
- # restore terminal settings
- system("stty $stty_orig");
+ # restore terminal settings
+ system("stty $stty_orig");
}
if (defined($post_test)) {
@@ -1709,81 +1760,81 @@ sub wait_for_monitor {
}
sub save_logs {
- my ($result, $basedir) = @_;
- my @t = localtime;
- my $date = sprintf "%04d%02d%02d%02d%02d%02d",
- 1900+$t[5],$t[4],$t[3],$t[2],$t[1],$t[0];
+ my ($result, $basedir) = @_;
+ my @t = localtime;
+ my $date = sprintf "%04d%02d%02d%02d%02d%02d",
+ 1900+$t[5],$t[4],$t[3],$t[2],$t[1],$t[0];
- my $type = $build_type;
- if ($type =~ /useconfig/) {
- $type = "useconfig";
- }
+ my $type = $build_type;
+ if ($type =~ /useconfig/) {
+ $type = "useconfig";
+ }
- my $dir = "$machine-$test_type-$type-$result-$date";
+ my $dir = "$machine-$test_type-$type-$result-$date";
- $dir = "$basedir/$dir";
+ $dir = "$basedir/$dir";
- if (!-d $dir) {
- mkpath($dir) or
- dodie "can't create $dir";
- }
+ if (!-d $dir) {
+ mkpath($dir) or
+ dodie "can't create $dir";
+ }
- my %files = (
- "config" => $output_config,
- "buildlog" => $buildlog,
- "dmesg" => $dmesg,
- "testlog" => $testlog,
- );
+ my %files = (
+ "config" => $output_config,
+ "buildlog" => $buildlog,
+ "dmesg" => $dmesg,
+ "testlog" => $testlog,
+ );
- while (my ($name, $source) = each(%files)) {
- if (-f "$source") {
- cp "$source", "$dir/$name" or
- dodie "failed to copy $source";
- }
+ while (my ($name, $source) = each(%files)) {
+ if (-f "$source") {
+ cp "$source", "$dir/$name" or
+ dodie "failed to copy $source";
}
+ }
- doprint "*** Saved info to $dir ***\n";
+ doprint "*** Saved info to $dir ***\n";
}
sub fail {
- if ($die_on_failure) {
- dodie @_;
- }
+ if ($die_on_failure) {
+ dodie @_;
+ }
- doprint "FAILED\n";
+ doprint "FAILED\n";
- my $i = $iteration;
+ my $i = $iteration;
- # no need to reboot for just building.
- if (!do_not_reboot) {
- doprint "REBOOTING\n";
- reboot_to_good $sleep_time;
- }
+ # no need to reboot for just building.
+ if (!do_not_reboot) {
+ doprint "REBOOTING\n";
+ reboot_to_good $sleep_time;
+ }
- my $name = "";
+ my $name = "";
- if (defined($test_name)) {
- $name = " ($test_name)";
- }
+ if (defined($test_name)) {
+ $name = " ($test_name)";
+ }
- print_times;
+ print_times;
- doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
- doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
- doprint "KTEST RESULT: TEST $i$name Failed: ", @_, "\n";
- doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
- doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+ doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+ doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+ doprint "KTEST RESULT: TEST $i$name Failed: ", @_, "\n";
+ doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+ doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
- if (defined($store_failures)) {
- save_logs "fail", $store_failures;
- }
+ if (defined($store_failures)) {
+ save_logs "fail", $store_failures;
+ }
- if (defined($post_test)) {
- run_command $post_test;
- }
+ if (defined($post_test)) {
+ run_command $post_test;
+ }
- return 1;
+ return 1;
}
sub run_command {
@@ -1915,8 +1966,8 @@ sub _get_grub_index {
my ($command, $target, $skip) = @_;
return if (defined($grub_number) && defined($last_grub_menu) &&
- $last_grub_menu eq $grub_menu && defined($last_machine) &&
- $last_machine eq $machine);
+ $last_grub_menu eq $grub_menu && defined($last_machine) &&
+ $last_machine eq $machine);
doprint "Find $reboot_type menu ... ";
$grub_number = -1;
@@ -1924,8 +1975,8 @@ sub _get_grub_index {
my $ssh_grub = $ssh_exec;
$ssh_grub =~ s,\$SSH_COMMAND,$command,g;
- open(IN, "$ssh_grub |")
- or dodie "unable to execute $command";
+ open(IN, "$ssh_grub |") or
+ dodie "unable to execute $command";
my $found = 0;
@@ -1969,9 +2020,9 @@ sub get_grub_index {
$target = '^menuentry.*' . $grub_menu_qt;
$skip = '^menuentry\s|^submenu\s';
} elsif ($reboot_type eq "grub2bls") {
- $command = $grub_bls_get;
- $target = '^title=.*' . $grub_menu_qt;
- $skip = '^title=';
+ $command = $grub_bls_get;
+ $target = '^title=.*' . $grub_menu_qt;
+ $skip = '^title=';
} else {
return;
}
@@ -1979,8 +2030,7 @@ sub get_grub_index {
_get_grub_index($command, $target, $skip);
}
-sub wait_for_input
-{
+sub wait_for_input {
my ($fp, $time) = @_;
my $start_time;
my $rin;
@@ -2096,7 +2146,6 @@ sub monitor {
my $version_found = 0;
while (!$done) {
-
if ($bug && defined($stop_after_failure) &&
$stop_after_failure >= 0) {
my $time = $stop_after_failure - (time - $failure_start);
@@ -2349,9 +2398,6 @@ sub start_monitor_and_install {
return monitor;
}
-my $check_build_re = ".*:.*(warning|error|Error):.*";
-my $utf8_quote = "\\x{e2}\\x{80}(\\x{98}|\\x{99})";
-
sub process_warning_line {
my ($line) = @_;
@@ -2394,7 +2440,7 @@ sub check_buildlog {
while (<IN>) {
if (/$check_build_re/) {
my $warning = process_warning_line $_;
-
+
$warnings_list{$warning} = 1;
}
}
@@ -2571,7 +2617,6 @@ sub build {
run_command "mv $outputdir/config_temp $output_config" or
dodie "moving config_temp";
}
-
} elsif (!$noclean) {
unlink "$output_config";
run_command "$make mrproper" or
@@ -2594,6 +2639,9 @@ sub build {
# Run old config regardless, to enforce min configurations
make_oldconfig;
+ if (not defined($build_options)){
+ $build_options = "";
+ }
my $build_ret = run_command "$make $build_options", $buildlog;
if (defined($post_build)) {
@@ -2649,14 +2697,15 @@ sub success {
print_times;
- doprint "\n\n*******************************************\n";
- doprint "*******************************************\n";
- doprint "KTEST RESULT: TEST $i$name SUCCESS!!!! **\n";
- doprint "*******************************************\n";
- doprint "*******************************************\n";
+ doprint "\n\n";
+ doprint "*******************************************\n";
+ doprint "*******************************************\n";
+ doprint "KTEST RESULT: TEST $i$name SUCCESS!!!! **\n";
+ doprint "*******************************************\n";
+ doprint "*******************************************\n";
if (defined($store_successes)) {
- save_logs "success", $store_successes;
+ save_logs "success", $store_successes;
}
if ($i != $opt{"NUM_TESTS"} && !do_not_reboot) {
@@ -2698,8 +2747,6 @@ sub child_run_test {
exit $run_command_status;
}
-my $child_done;
-
sub child_finished {
$child_done = 1;
}
@@ -3031,7 +3078,6 @@ sub bisect {
}
if ($do_check) {
-
# get current HEAD
my $head = get_sha1("HEAD");
@@ -3071,13 +3117,11 @@ sub bisect {
run_command "git bisect replay $replay" or
dodie "failed to run replay";
} else {
-
run_command "git bisect good $good" or
dodie "could not set bisect good to $good";
run_git_bisect "git bisect bad $bad" or
dodie "could not set bisect bad to $bad";
-
}
if (defined($start)) {
@@ -3103,35 +3147,13 @@ sub bisect {
success $i;
}
-# config_ignore holds the configs that were set (or unset) for
-# a good config and we will ignore these configs for the rest
-# of a config bisect. These configs stay as they were.
-my %config_ignore;
-
-# config_set holds what all configs were set as.
-my %config_set;
-
-# config_off holds the set of configs that the bad config had disabled.
-# We need to record them and set them in the .config when running
-# olddefconfig, because olddefconfig keeps the defaults.
-my %config_off;
-
-# config_off_tmp holds a set of configs to turn off for now
-my @config_off_tmp;
-
-# config_list is the set of configs that are being tested
-my %config_list;
-my %null_config;
-
-my %dependency;
-
sub assign_configs {
my ($hash, $config) = @_;
doprint "Reading configs from $config\n";
- open (IN, $config)
- or dodie "Failed to read $config";
+ open (IN, $config) or
+ dodie "Failed to read $config";
while (<IN>) {
chomp;
@@ -3219,8 +3241,6 @@ sub config_bisect_end {
doprint "***************************************\n\n";
}
-my $pass = 1;
-
sub run_config_bisect {
my ($good, $bad, $last_result) = @_;
my $reset = "";
@@ -3243,13 +3263,13 @@ sub run_config_bisect {
$ret = run_config_bisect_test $config_bisect_type;
if ($ret) {
- doprint "NEW GOOD CONFIG ($pass)\n";
+ doprint "NEW GOOD CONFIG ($pass)\n";
system("cp $output_config $tmpdir/good_config.tmp.$pass");
$pass++;
# Return 3 for good config
return 3;
} else {
- doprint "NEW BAD CONFIG ($pass)\n";
+ doprint "NEW BAD CONFIG ($pass)\n";
system("cp $output_config $tmpdir/bad_config.tmp.$pass");
$pass++;
# Return 4 for bad config
@@ -3284,10 +3304,11 @@ sub config_bisect {
if (!defined($config_bisect_exec)) {
# First check the location that ktest.pl ran
- my @locations = ( "$pwd/config-bisect.pl",
- "$dirname/config-bisect.pl",
- "$builddir/tools/testing/ktest/config-bisect.pl",
- undef );
+ my @locations = (
+ "$pwd/config-bisect.pl",
+ "$dirname/config-bisect.pl",
+ "$builddir/tools/testing/ktest/config-bisect.pl",
+ undef );
foreach my $loc (@locations) {
doprint "loc = $loc\n";
$config_bisect_exec = $loc;
@@ -3368,7 +3389,7 @@ sub config_bisect {
} while ($ret == 3 || $ret == 4);
if ($ret == 2) {
- config_bisect_end "$good_config.tmp", "$bad_config.tmp";
+ config_bisect_end "$good_config.tmp", "$bad_config.tmp";
}
return $ret if ($ret < 0);
@@ -3511,14 +3532,6 @@ sub patchcheck {
return 1;
}
-my %depends;
-my %depcount;
-my $iflevel = 0;
-my @ifdeps;
-
-# prevent recursion
-my %read_kconfigs;
-
sub add_dep {
# $config depends on $dep
my ($config, $dep) = @_;
@@ -3548,7 +3561,6 @@ sub read_kconfig {
my $cont = 0;
my $line;
-
if (! -f $kconfig) {
doprint "file $kconfig does not exist, skipping\n";
return;
@@ -3630,8 +3642,8 @@ sub read_kconfig {
sub read_depends {
# find out which arch this is by the kconfig file
- open (IN, $output_config)
- or dodie "Failed to read $output_config";
+ open (IN, $output_config) or
+ dodie "Failed to read $output_config";
my $arch;
while (<IN>) {
if (m,Linux/(\S+)\s+\S+\s+Kernel Configuration,) {
@@ -3657,7 +3669,7 @@ sub read_depends {
if (! -f $kconfig && $arch =~ /\d$/) {
my $orig = $arch;
- # some subarchs have numbers, truncate them
+ # some subarchs have numbers, truncate them
$arch =~ s/\d*$//;
$kconfig = "$builddir/arch/$arch/Kconfig";
if (! -f $kconfig) {
@@ -3706,7 +3718,6 @@ sub get_depends {
my @configs;
while ($dep =~ /[$valid]/) {
-
if ($dep =~ /^[^$valid]*([$valid]+)/) {
my $conf = "CONFIG_" . $1;
@@ -3721,12 +3732,6 @@ sub get_depends {
return @configs;
}
-my %min_configs;
-my %keep_configs;
-my %save_configs;
-my %processed_configs;
-my %nochange_config;
-
sub test_this_config {
my ($config) = @_;
@@ -3852,7 +3857,7 @@ sub make_min_config {
foreach my $config (@config_keys) {
my $kconfig = chomp_config $config;
if (!defined $depcount{$kconfig}) {
- $depcount{$kconfig} = 0;
+ $depcount{$kconfig} = 0;
}
}
@@ -3887,7 +3892,6 @@ sub make_min_config {
my $take_two = 0;
while (!$done) {
-
my $config;
my $found;
@@ -3898,7 +3902,7 @@ sub make_min_config {
# Sort keys by who is most dependent on
@test_configs = sort { $depcount{chomp_config($b)} <=> $depcount{chomp_config($a)} }
- @test_configs ;
+ @test_configs ;
# Put configs that did not modify the config at the end.
my $reset = 1;
@@ -3954,13 +3958,13 @@ sub make_min_config {
my $failed = 0;
build "oldconfig" or $failed = 1;
if (!$failed) {
- start_monitor_and_install or $failed = 1;
+ start_monitor_and_install or $failed = 1;
- if ($type eq "test" && !$failed) {
- do_run_test or $failed = 1;
- }
+ if ($type eq "test" && !$failed) {
+ do_run_test or $failed = 1;
+ }
- end_monitor;
+ end_monitor;
}
$in_bisect = 0;
@@ -3974,8 +3978,8 @@ sub make_min_config {
# update new ignore configs
if (defined($ignore_config)) {
- open (OUT, ">$temp_config")
- or dodie "Can't write to $temp_config";
+ open (OUT, ">$temp_config") or
+ dodie "Can't write to $temp_config";
foreach my $config (keys %save_configs) {
print OUT "$save_configs{$config}\n";
}
@@ -4002,8 +4006,8 @@ sub make_min_config {
}
# Save off all the current mandatory configs
- open (OUT, ">$temp_config")
- or dodie "Can't write to $temp_config";
+ open (OUT, ">$temp_config") or
+ dodie "Can't write to $temp_config";
foreach my $config (keys %keep_configs) {
print OUT "$keep_configs{$config}\n";
}
@@ -4041,7 +4045,6 @@ sub make_warnings_file {
open(IN, $buildlog) or dodie "Can't open $buildlog";
while (<IN>) {
-
# Some compilers use UTF-8 extended for quotes
# for distcc heterogeneous systems, this causes issues
s/$utf8_quote/'/g;
@@ -4057,98 +4060,6 @@ sub make_warnings_file {
success $i;
}
-$#ARGV < 1 or die "ktest.pl version: $VERSION\n usage: ktest.pl [config-file]\n";
-
-if ($#ARGV == 0) {
- $ktest_config = $ARGV[0];
- if (! -f $ktest_config) {
- print "$ktest_config does not exist.\n";
- if (!read_yn "Create it?") {
- exit 0;
- }
- }
-}
-
-if (! -f $ktest_config) {
- $newconfig = 1;
- get_test_case;
- open(OUT, ">$ktest_config") or die "Can not create $ktest_config";
- print OUT << "EOF"
-# Generated by ktest.pl
-#
-
-# PWD is a ktest.pl variable that will result in the process working
-# directory that ktest.pl is executed in.
-
-# THIS_DIR is automatically assigned the PWD of the path that generated
-# the config file. It is best to use this variable when assigning other
-# directory paths within this directory. This allows you to easily
-# move the test cases to other locations or to other machines.
-#
-THIS_DIR := $variable{"PWD"}
-
-# Define each test with TEST_START
-# The config options below it will override the defaults
-TEST_START
-TEST_TYPE = $default{"TEST_TYPE"}
-
-DEFAULTS
-EOF
-;
- close(OUT);
-}
-read_config $ktest_config;
-
-if (defined($opt{"LOG_FILE"})) {
- $opt{"LOG_FILE"} = eval_option("LOG_FILE", $opt{"LOG_FILE"}, -1);
-}
-
-# Append any configs entered in manually to the config file.
-my @new_configs = keys %entered_configs;
-if ($#new_configs >= 0) {
- print "\nAppending entered in configs to $ktest_config\n";
- open(OUT, ">>$ktest_config") or die "Can not append to $ktest_config";
- foreach my $config (@new_configs) {
- print OUT "$config = $entered_configs{$config}\n";
- $opt{$config} = process_variables($entered_configs{$config});
- }
-}
-
-if (defined($opt{"LOG_FILE"})) {
- if ($opt{"CLEAR_LOG"}) {
- unlink $opt{"LOG_FILE"};
- }
- open(LOG, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}";
- LOG->autoflush(1);
-}
-
-doprint "\n\nSTARTING AUTOMATED TESTS\n\n";
-
-for (my $i = 0, my $repeat = 1; $i <= $opt{"NUM_TESTS"}; $i += $repeat) {
-
- if (!$i) {
- doprint "DEFAULT OPTIONS:\n";
- } else {
- doprint "\nTEST $i OPTIONS";
- if (defined($repeat_tests{$i})) {
- $repeat = $repeat_tests{$i};
- doprint " ITERATE $repeat";
- }
- doprint "\n";
- }
-
- foreach my $option (sort keys %opt) {
-
- if ($option =~ /\[(\d+)\]$/) {
- next if ($i != $1);
- } else {
- next if ($i);
- }
-
- doprint "$option = $opt{$option}\n";
- }
-}
-
sub option_defined {
my ($option) = @_;
@@ -4261,7 +4172,6 @@ sub do_send_mail {
}
sub send_email {
-
if (defined($mailto)) {
if (!defined($mailer)) {
doprint "No email sent: email or mailer not specified in config.\n";
@@ -4274,12 +4184,103 @@ sub send_email {
sub cancel_test {
if ($email_when_canceled) {
my $name = get_test_name;
- send_email("KTEST: Your [$name] test was cancelled",
- "Your test started at $script_start_time was cancelled: sig int");
+ send_email("KTEST: Your [$name] test was cancelled",
+ "Your test started at $script_start_time was cancelled: sig int");
}
die "\nCaught Sig Int, test interrupted: $!\n"
}
+$#ARGV < 1 or die "ktest.pl version: $VERSION\n usage: ktest.pl [config-file]\n";
+
+if ($#ARGV == 0) {
+ $ktest_config = $ARGV[0];
+ if (! -f $ktest_config) {
+ print "$ktest_config does not exist.\n";
+ if (!read_yn "Create it?") {
+ exit 0;
+ }
+ }
+}
+
+if (! -f $ktest_config) {
+ $newconfig = 1;
+ get_test_case;
+ open(OUT, ">$ktest_config") or die "Can not create $ktest_config";
+ print OUT << "EOF"
+# Generated by ktest.pl
+#
+
+# PWD is a ktest.pl variable that will result in the process working
+# directory that ktest.pl is executed in.
+
+# THIS_DIR is automatically assigned the PWD of the path that generated
+# the config file. It is best to use this variable when assigning other
+# directory paths within this directory. This allows you to easily
+# move the test cases to other locations or to other machines.
+#
+THIS_DIR := $variable{"PWD"}
+
+# Define each test with TEST_START
+# The config options below it will override the defaults
+TEST_START
+TEST_TYPE = $default{"TEST_TYPE"}
+
+DEFAULTS
+EOF
+;
+ close(OUT);
+}
+read_config $ktest_config;
+
+if (defined($opt{"LOG_FILE"})) {
+ $opt{"LOG_FILE"} = eval_option("LOG_FILE", $opt{"LOG_FILE"}, -1);
+}
+
+# Append any configs entered in manually to the config file.
+my @new_configs = keys %entered_configs;
+if ($#new_configs >= 0) {
+ print "\nAppending entered in configs to $ktest_config\n";
+ open(OUT, ">>$ktest_config") or die "Can not append to $ktest_config";
+ foreach my $config (@new_configs) {
+ print OUT "$config = $entered_configs{$config}\n";
+ $opt{$config} = process_variables($entered_configs{$config});
+ }
+}
+
+if (defined($opt{"LOG_FILE"})) {
+ if ($opt{"CLEAR_LOG"}) {
+ unlink $opt{"LOG_FILE"};
+ }
+ open(LOG, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}";
+ LOG->autoflush(1);
+}
+
+doprint "\n\nSTARTING AUTOMATED TESTS\n\n";
+
+for (my $i = 0, my $repeat = 1; $i <= $opt{"NUM_TESTS"}; $i += $repeat) {
+
+ if (!$i) {
+ doprint "DEFAULT OPTIONS:\n";
+ } else {
+ doprint "\nTEST $i OPTIONS";
+ if (defined($repeat_tests{$i})) {
+ $repeat = $repeat_tests{$i};
+ doprint " ITERATE $repeat";
+ }
+ doprint "\n";
+ }
+
+ foreach my $option (sort keys %opt) {
+ if ($option =~ /\[(\d+)\]$/) {
+ next if ($i != $1);
+ } else {
+ next if ($i);
+ }
+
+ doprint "$option = $opt{$option}\n";
+ }
+}
+
$SIG{INT} = qw(cancel_test);
# First we need to do is the builds
@@ -4323,15 +4324,15 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
# The first test may override the PRE_KTEST option
if ($i == 1) {
- if (defined($pre_ktest)) {
- doprint "\n";
- run_command $pre_ktest;
- }
- if ($email_when_started) {
+ if (defined($pre_ktest)) {
+ doprint "\n";
+ run_command $pre_ktest;
+ }
+ if ($email_when_started) {
my $name = get_test_name;
- send_email("KTEST: Your [$name] test was started",
- "Your test was started on $script_start_time");
- }
+ send_email("KTEST: Your [$name] test was started",
+ "Your test was started on $script_start_time");
+ }
}
# Any test can override the POST_KTEST option
@@ -4409,7 +4410,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
my $ret = run_command $pre_test;
if (!$ret && defined($pre_test_die) &&
$pre_test_die) {
- dodie "failed to pre_test\n";
+ dodie "failed to pre_test\n";
}
}
@@ -4503,12 +4504,11 @@ if ($opt{"POWEROFF_ON_SUCCESS"}) {
run_command $switch_to_good;
}
-
doprint "\n $successes of $opt{NUM_TESTS} tests were successful\n\n";
if ($email_when_finished) {
send_email("KTEST: Your test has finished!",
- "$successes of $opt{NUM_TESTS} tests started at $script_start_time were successful!");
+ "$successes of $opt{NUM_TESTS} tests started at $script_start_time were successful!");
}
if (defined($opt{"LOG_FILE"})) {
@@ -4517,3 +4517,12 @@ if (defined($opt{"LOG_FILE"})) {
}
exit 0;
+
+##
+# The following are here to standardize tabs/spaces/etc across the most likely editors
+###
+
+# Local Variables:
+# mode: perl
+# End:
+# vim: softtabstop=4
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index d5144fcb03ac..5da8fb3762f9 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -184,7 +184,9 @@ def add_common_opts(parser) -> None:
help='Run all KUnit tests through allyesconfig',
action='store_true')
parser.add_argument('--kunitconfig',
- help='Path to Kconfig fragment that enables KUnit tests',
+ help='Path to Kconfig fragment that enables KUnit tests.'
+ ' If given a directory, (e.g. lib/kunit), "/.kunitconfig" '
+ 'will get automatically appended.',
metavar='kunitconfig')
def add_build_opts(parser) -> None:
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index f309a33256cd..89a7d4024e87 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -132,6 +132,8 @@ class LinuxSourceTree(object):
return
if kunitconfig_path:
+ if os.path.isdir(kunitconfig_path):
+ kunitconfig_path = os.path.join(kunitconfig_path, KUNITCONFIG_PATH)
if not os.path.exists(kunitconfig_path):
raise ConfigError(f'Specified kunitconfig ({kunitconfig_path}) does not exist')
else:
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 1ad3049e9069..2e809dd956a7 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -251,6 +251,12 @@ class LinuxSourceTreeTest(unittest.TestCase):
with tempfile.NamedTemporaryFile('wt') as kunitconfig:
tree = kunit_kernel.LinuxSourceTree('', kunitconfig_path=kunitconfig.name)
+ def test_dir_kunitconfig(self):
+ with tempfile.TemporaryDirectory('') as dir:
+ with open(os.path.join(dir, '.kunitconfig'), 'w') as f:
+ pass
+ tree = kunit_kernel.LinuxSourceTree('', kunitconfig_path=dir)
+
# TODO: add more test cases.
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 6c575cf34a71..bc3299a20338 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -25,6 +25,7 @@ TARGETS += ir
TARGETS += kcmp
TARGETS += kexec
TARGETS += kvm
+TARGETS += landlock
TARGETS += lib
TARGETS += livepatch
TARGETS += lkdtm
diff --git a/tools/testing/selftests/arm64/Makefile b/tools/testing/selftests/arm64/Makefile
index 2c9d012797a7..ced910fb4019 100644
--- a/tools/testing/selftests/arm64/Makefile
+++ b/tools/testing/selftests/arm64/Makefile
@@ -4,7 +4,7 @@
ARCH ?= $(shell uname -m 2>/dev/null || echo not)
ifneq (,$(filter $(ARCH),aarch64 arm64))
-ARM64_SUBTARGETS ?= tags signal pauth fp mte
+ARM64_SUBTARGETS ?= tags signal pauth fp mte bti
else
ARM64_SUBTARGETS :=
endif
diff --git a/tools/testing/selftests/arm64/bti/.gitignore b/tools/testing/selftests/arm64/bti/.gitignore
new file mode 100644
index 000000000000..73869fabada4
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/.gitignore
@@ -0,0 +1,2 @@
+btitest
+nobtitest
diff --git a/tools/testing/selftests/arm64/bti/Makefile b/tools/testing/selftests/arm64/bti/Makefile
new file mode 100644
index 000000000000..73e013c082a6
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/Makefile
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: GPL-2.0
+
+TEST_GEN_PROGS := btitest nobtitest
+
+PROGS := $(patsubst %,gen/%,$(TEST_GEN_PROGS))
+
+# These tests are built as freestanding binaries since otherwise BTI
+# support in ld.so is required which is not currently widespread; when
+# it is available it will still be useful to test this separately as the
+# cases for statically linked and dynamically lined binaries are
+# slightly different.
+
+CFLAGS_NOBTI = -DBTI=0
+CFLAGS_BTI = -mbranch-protection=standard -DBTI=1
+
+CFLAGS_COMMON = -ffreestanding -Wall -Wextra $(CFLAGS)
+
+BTI_CC_COMMAND = $(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -c -o $@ $<
+NOBTI_CC_COMMAND = $(CC) $(CFLAGS_NOBTI) $(CFLAGS_COMMON) -c -o $@ $<
+
+%-bti.o: %.c
+ $(BTI_CC_COMMAND)
+
+%-bti.o: %.S
+ $(BTI_CC_COMMAND)
+
+%-nobti.o: %.c
+ $(NOBTI_CC_COMMAND)
+
+%-nobti.o: %.S
+ $(NOBTI_CC_COMMAND)
+
+BTI_OBJS = \
+ test-bti.o \
+ signal-bti.o \
+ start-bti.o \
+ syscall-bti.o \
+ system-bti.o \
+ teststubs-bti.o \
+ trampoline-bti.o
+gen/btitest: $(BTI_OBJS)
+ $(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -nostdlib -o $@ $^
+
+NOBTI_OBJS = \
+ test-nobti.o \
+ signal-nobti.o \
+ start-nobti.o \
+ syscall-nobti.o \
+ system-nobti.o \
+ teststubs-nobti.o \
+ trampoline-nobti.o
+gen/nobtitest: $(NOBTI_OBJS)
+ $(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -nostdlib -o $@ $^
+
+# Including KSFT lib.mk here will also mangle the TEST_GEN_PROGS list
+# to account for any OUTPUT target-dirs optionally provided by
+# the toplevel makefile
+include ../../lib.mk
+
+$(TEST_GEN_PROGS): $(PROGS)
+ cp $(PROGS) $(OUTPUT)/
diff --git a/tools/testing/selftests/arm64/bti/assembler.h b/tools/testing/selftests/arm64/bti/assembler.h
new file mode 100644
index 000000000000..04e7b72880ef
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/assembler.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#ifndef ASSEMBLER_H
+#define ASSEMBLER_H
+
+#define NT_GNU_PROPERTY_TYPE_0 5
+#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
+
+/* Bits for GNU_PROPERTY_AARCH64_FEATURE_1_BTI */
+#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI (1U << 0)
+#define GNU_PROPERTY_AARCH64_FEATURE_1_PAC (1U << 1)
+
+
+.macro startfn name:req
+ .globl \name
+\name:
+ .macro endfn
+ .size \name, . - \name
+ .type \name, @function
+ .purgem endfn
+ .endm
+.endm
+
+.macro emit_aarch64_feature_1_and
+ .pushsection .note.gnu.property, "a"
+ .align 3
+ .long 2f - 1f
+ .long 6f - 3f
+ .long NT_GNU_PROPERTY_TYPE_0
+1: .string "GNU"
+2:
+ .align 3
+3: .long GNU_PROPERTY_AARCH64_FEATURE_1_AND
+ .long 5f - 4f
+4:
+#if BTI
+ .long GNU_PROPERTY_AARCH64_FEATURE_1_PAC | \
+ GNU_PROPERTY_AARCH64_FEATURE_1_BTI
+#else
+ .long 0
+#endif
+5:
+ .align 3
+6:
+ .popsection
+.endm
+
+.macro paciasp
+ hint 0x19
+.endm
+
+.macro autiasp
+ hint 0x1d
+.endm
+
+.macro __bti_
+ hint 0x20
+.endm
+
+.macro __bti_c
+ hint 0x22
+.endm
+
+.macro __bti_j
+ hint 0x24
+.endm
+
+.macro __bti_jc
+ hint 0x26
+.endm
+
+.macro bti what=
+ __bti_\what
+.endm
+
+#endif /* ! ASSEMBLER_H */
diff --git a/tools/testing/selftests/arm64/bti/btitest.h b/tools/testing/selftests/arm64/bti/btitest.h
new file mode 100644
index 000000000000..2aff9b10336e
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/btitest.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#ifndef BTITEST_H
+#define BTITEST_H
+
+/* Trampolines for calling the test stubs: */
+void call_using_br_x0(void (*)(void));
+void call_using_br_x16(void (*)(void));
+void call_using_blr(void (*)(void));
+
+/* Test stubs: */
+void nohint_func(void);
+void bti_none_func(void);
+void bti_c_func(void);
+void bti_j_func(void);
+void bti_jc_func(void);
+void paciasp_func(void);
+
+#endif /* !BTITEST_H */
diff --git a/tools/testing/selftests/arm64/bti/compiler.h b/tools/testing/selftests/arm64/bti/compiler.h
new file mode 100644
index 000000000000..ebb6204f447a
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/compiler.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#ifndef COMPILER_H
+#define COMPILER_H
+
+#define __always_unused __attribute__((__unused__))
+#define __noreturn __attribute__((__noreturn__))
+#define __unreachable() __builtin_unreachable()
+
+/* curse(e) has value e, but the compiler cannot assume so */
+#define curse(e) ({ \
+ __typeof__(e) __curse_e = (e); \
+ asm ("" : "+r" (__curse_e)); \
+ __curse_e; \
+})
+
+#endif /* ! COMPILER_H */
diff --git a/tools/testing/selftests/arm64/bti/gen/.gitignore b/tools/testing/selftests/arm64/bti/gen/.gitignore
new file mode 100644
index 000000000000..73869fabada4
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/gen/.gitignore
@@ -0,0 +1,2 @@
+btitest
+nobtitest
diff --git a/tools/testing/selftests/arm64/bti/signal.c b/tools/testing/selftests/arm64/bti/signal.c
new file mode 100644
index 000000000000..f3fd29b91141
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/signal.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "system.h"
+#include "signal.h"
+
+int sigemptyset(sigset_t *s)
+{
+ unsigned int i;
+
+ for (i = 0; i < _NSIG_WORDS; ++i)
+ s->sig[i] = 0;
+
+ return 0;
+}
+
+int sigaddset(sigset_t *s, int n)
+{
+ if (n < 1 || n > _NSIG)
+ return -EINVAL;
+
+ s->sig[(n - 1) / _NSIG_BPW] |= 1UL << (n - 1) % _NSIG_BPW;
+ return 0;
+}
+
+int sigaction(int n, struct sigaction *sa, const struct sigaction *old)
+{
+ return syscall(__NR_rt_sigaction, n, sa, old, sizeof(sa->sa_mask));
+}
+
+int sigprocmask(int how, const sigset_t *mask, sigset_t *old)
+{
+ return syscall(__NR_rt_sigprocmask, how, mask, old, sizeof(*mask));
+}
diff --git a/tools/testing/selftests/arm64/bti/signal.h b/tools/testing/selftests/arm64/bti/signal.h
new file mode 100644
index 000000000000..103457dc880e
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/signal.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#ifndef SIGNAL_H
+#define SIGNAL_H
+
+#include <linux/signal.h>
+
+#include "system.h"
+
+typedef __sighandler_t sighandler_t;
+
+int sigemptyset(sigset_t *s);
+int sigaddset(sigset_t *s, int n);
+int sigaction(int n, struct sigaction *sa, const struct sigaction *old);
+int sigprocmask(int how, const sigset_t *mask, sigset_t *old);
+
+#endif /* ! SIGNAL_H */
diff --git a/tools/testing/selftests/arm64/bti/start.S b/tools/testing/selftests/arm64/bti/start.S
new file mode 100644
index 000000000000..831f952e0572
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/start.S
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "assembler.h"
+
+startfn _start
+ mov x0, sp
+ b start
+endfn
+
+emit_aarch64_feature_1_and
diff --git a/tools/testing/selftests/arm64/bti/syscall.S b/tools/testing/selftests/arm64/bti/syscall.S
new file mode 100644
index 000000000000..8dde8b6f3db1
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/syscall.S
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "assembler.h"
+
+startfn syscall
+ bti c
+ mov w8, w0
+ mov x0, x1
+ mov x1, x2
+ mov x2, x3
+ mov x3, x4
+ mov x4, x5
+ mov x5, x6
+ mov x6, x7
+ svc #0
+ ret
+endfn
+
+emit_aarch64_feature_1_and
diff --git a/tools/testing/selftests/arm64/bti/system.c b/tools/testing/selftests/arm64/bti/system.c
new file mode 100644
index 000000000000..6385d8d4973b
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/system.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "system.h"
+
+#include <asm/unistd.h>
+
+#include "compiler.h"
+
+void __noreturn exit(int n)
+{
+ syscall(__NR_exit, n);
+ __unreachable();
+}
+
+ssize_t write(int fd, const void *buf, size_t size)
+{
+ return syscall(__NR_write, fd, buf, size);
+}
diff --git a/tools/testing/selftests/arm64/bti/system.h b/tools/testing/selftests/arm64/bti/system.h
new file mode 100644
index 000000000000..aca118589705
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/system.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#ifndef SYSTEM_H
+#define SYSTEM_H
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+
+typedef __kernel_size_t size_t;
+typedef __kernel_ssize_t ssize_t;
+
+#include <linux/errno.h>
+#include <asm/hwcap.h>
+#include <asm/ptrace.h>
+#include <asm/unistd.h>
+
+#include "compiler.h"
+
+long syscall(int nr, ...);
+
+void __noreturn exit(int n);
+ssize_t write(int fd, const void *buf, size_t size);
+
+#endif /* ! SYSTEM_H */
diff --git a/tools/testing/selftests/arm64/bti/test.c b/tools/testing/selftests/arm64/bti/test.c
new file mode 100644
index 000000000000..656b04976ccc
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/test.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019,2021 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "system.h"
+
+#include <linux/errno.h>
+#include <linux/auxvec.h>
+#include <linux/signal.h>
+#include <asm/sigcontext.h>
+#include <asm/ucontext.h>
+
+typedef struct ucontext ucontext_t;
+
+#include "btitest.h"
+#include "compiler.h"
+#include "signal.h"
+
+#define EXPECTED_TESTS 18
+
+static volatile unsigned int test_num = 1;
+static unsigned int test_passed;
+static unsigned int test_failed;
+static unsigned int test_skipped;
+
+static void fdputs(int fd, const char *str)
+{
+ size_t len = 0;
+ const char *p = str;
+
+ while (*p++)
+ ++len;
+
+ write(fd, str, len);
+}
+
+static void putstr(const char *str)
+{
+ fdputs(1, str);
+}
+
+static void putnum(unsigned int num)
+{
+ char c;
+
+ if (num / 10)
+ putnum(num / 10);
+
+ c = '0' + (num % 10);
+ write(1, &c, 1);
+}
+
+#define puttestname(test_name, trampoline_name) do { \
+ putstr(test_name); \
+ putstr("/"); \
+ putstr(trampoline_name); \
+} while (0)
+
+void print_summary(void)
+{
+ putstr("# Totals: pass:");
+ putnum(test_passed);
+ putstr(" fail:");
+ putnum(test_failed);
+ putstr(" xfail:0 xpass:0 skip:");
+ putnum(test_skipped);
+ putstr(" error:0\n");
+}
+
+static const char *volatile current_test_name;
+static const char *volatile current_trampoline_name;
+static volatile int sigill_expected, sigill_received;
+
+static void handler(int n, siginfo_t *si __always_unused,
+ void *uc_ __always_unused)
+{
+ ucontext_t *uc = uc_;
+
+ putstr("# \t[SIGILL in ");
+ puttestname(current_test_name, current_trampoline_name);
+ putstr(", BTYPE=");
+ write(1, &"00011011"[((uc->uc_mcontext.pstate & PSR_BTYPE_MASK)
+ >> PSR_BTYPE_SHIFT) * 2], 2);
+ if (!sigill_expected) {
+ putstr("]\n");
+ putstr("not ok ");
+ putnum(test_num);
+ putstr(" ");
+ puttestname(current_test_name, current_trampoline_name);
+ putstr("(unexpected SIGILL)\n");
+ print_summary();
+ exit(128 + n);
+ }
+
+ putstr(" (expected)]\n");
+ sigill_received = 1;
+ /* zap BTYPE so that resuming the faulting code will work */
+ uc->uc_mcontext.pstate &= ~PSR_BTYPE_MASK;
+}
+
+static int skip_all;
+
+static void __do_test(void (*trampoline)(void (*)(void)),
+ void (*fn)(void),
+ const char *trampoline_name,
+ const char *name,
+ int expect_sigill)
+{
+ if (skip_all) {
+ test_skipped++;
+ putstr("ok ");
+ putnum(test_num);
+ putstr(" ");
+ puttestname(name, trampoline_name);
+ putstr(" # SKIP\n");
+
+ return;
+ }
+
+ /* Branch Target exceptions should only happen in BTI binaries: */
+ if (!BTI)
+ expect_sigill = 0;
+
+ sigill_expected = expect_sigill;
+ sigill_received = 0;
+ current_test_name = name;
+ current_trampoline_name = trampoline_name;
+
+ trampoline(fn);
+
+ if (expect_sigill && !sigill_received) {
+ putstr("not ok ");
+ test_failed++;
+ } else {
+ putstr("ok ");
+ test_passed++;
+ }
+ putnum(test_num++);
+ putstr(" ");
+ puttestname(name, trampoline_name);
+ putstr("\n");
+}
+
+#define do_test(expect_sigill_br_x0, \
+ expect_sigill_br_x16, \
+ expect_sigill_blr, \
+ name) \
+do { \
+ __do_test(call_using_br_x0, name, "call_using_br_x0", #name, \
+ expect_sigill_br_x0); \
+ __do_test(call_using_br_x16, name, "call_using_br_x16", #name, \
+ expect_sigill_br_x16); \
+ __do_test(call_using_blr, name, "call_using_blr", #name, \
+ expect_sigill_blr); \
+} while (0)
+
+void start(int *argcp)
+{
+ struct sigaction sa;
+ void *const *p;
+ const struct auxv_entry {
+ unsigned long type;
+ unsigned long val;
+ } *auxv;
+ unsigned long hwcap = 0, hwcap2 = 0;
+
+ putstr("TAP version 13\n");
+ putstr("1..");
+ putnum(EXPECTED_TESTS);
+ putstr("\n");
+
+ /* Gross hack for finding AT_HWCAP2 from the initial process stack: */
+ p = (void *const *)argcp + 1 + *argcp + 1; /* start of environment */
+ /* step over environment */
+ while (*p++)
+ ;
+ for (auxv = (const struct auxv_entry *)p; auxv->type != AT_NULL; ++auxv) {
+ switch (auxv->type) {
+ case AT_HWCAP:
+ hwcap = auxv->val;
+ break;
+ case AT_HWCAP2:
+ hwcap2 = auxv->val;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (hwcap & HWCAP_PACA)
+ putstr("# HWCAP_PACA present\n");
+ else
+ putstr("# HWCAP_PACA not present\n");
+
+ if (hwcap2 & HWCAP2_BTI) {
+ putstr("# HWCAP2_BTI present\n");
+ if (!(hwcap & HWCAP_PACA))
+ putstr("# Bad hardware? Expect problems.\n");
+ } else {
+ putstr("# HWCAP2_BTI not present\n");
+ skip_all = 1;
+ }
+
+ putstr("# Test binary");
+ if (!BTI)
+ putstr(" not");
+ putstr(" built for BTI\n");
+
+ sa.sa_handler = (sighandler_t)(void *)handler;
+ sa.sa_flags = SA_SIGINFO;
+ sigemptyset(&sa.sa_mask);
+ sigaction(SIGILL, &sa, NULL);
+ sigaddset(&sa.sa_mask, SIGILL);
+ sigprocmask(SIG_UNBLOCK, &sa.sa_mask, NULL);
+
+ do_test(1, 1, 1, nohint_func);
+ do_test(1, 1, 1, bti_none_func);
+ do_test(1, 0, 0, bti_c_func);
+ do_test(0, 0, 1, bti_j_func);
+ do_test(0, 0, 0, bti_jc_func);
+ do_test(1, 0, 0, paciasp_func);
+
+ print_summary();
+
+ if (test_num - 1 != EXPECTED_TESTS)
+ putstr("# WARNING - EXPECTED TEST COUNT WRONG\n");
+
+ if (test_failed)
+ exit(1);
+ else
+ exit(0);
+}
diff --git a/tools/testing/selftests/arm64/bti/teststubs.S b/tools/testing/selftests/arm64/bti/teststubs.S
new file mode 100644
index 000000000000..b62c8c35f67e
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/teststubs.S
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "assembler.h"
+
+startfn bti_none_func
+ bti
+ ret
+endfn
+
+startfn bti_c_func
+ bti c
+ ret
+endfn
+
+startfn bti_j_func
+ bti j
+ ret
+endfn
+
+startfn bti_jc_func
+ bti jc
+ ret
+endfn
+
+startfn paciasp_func
+ paciasp
+ autiasp
+ ret
+endfn
+
+startfn nohint_func
+ ret
+endfn
+
+emit_aarch64_feature_1_and
diff --git a/tools/testing/selftests/arm64/bti/trampoline.S b/tools/testing/selftests/arm64/bti/trampoline.S
new file mode 100644
index 000000000000..09beb3f361f1
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/trampoline.S
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "assembler.h"
+
+startfn call_using_br_x0
+ bti c
+ br x0
+endfn
+
+startfn call_using_br_x16
+ bti c
+ mov x16, x0
+ br x16
+endfn
+
+startfn call_using_blr
+ paciasp
+ stp x29, x30, [sp, #-16]!
+ blr x0
+ ldp x29, x30, [sp], #16
+ autiasp
+ ret
+endfn
+
+emit_aarch64_feature_1_and
diff --git a/tools/testing/selftests/arm64/mte/Makefile b/tools/testing/selftests/arm64/mte/Makefile
index 0b3af552632a..409e3e53d00a 100644
--- a/tools/testing/selftests/arm64/mte/Makefile
+++ b/tools/testing/selftests/arm64/mte/Makefile
@@ -1,14 +1,18 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (C) 2020 ARM Limited
-CFLAGS += -std=gnu99 -I. -lpthread
+# preserve CC value from top level Makefile
+ifeq ($(CC),cc)
+CC := $(CROSS_COMPILE)gcc
+endif
+
+CFLAGS += -std=gnu99 -I. -pthread
+LDFLAGS += -pthread
SRCS := $(filter-out mte_common_util.c,$(wildcard *.c))
PROGS := $(patsubst %.c,%,$(SRCS))
#Add mte compiler option
-ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep gcc),)
CFLAGS += -march=armv8.5-a+memtag
-endif
#check if the compiler works well
mte_cc_support := $(shell if ($(CC) $(CFLAGS) -E -x c /dev/null -o /dev/null 2>&1) then echo "1"; fi)
@@ -19,11 +23,14 @@ TEST_GEN_PROGS := $(PROGS)
# Get Kernel headers installed and use them.
KSFT_KHDR_INSTALL := 1
+else
+ $(warning compiler "$(CC)" does not support the ARMv8.5 MTE extension.)
+ $(warning test program "mte" will not be created.)
endif
# Include KSFT lib.mk.
include ../../lib.mk
ifeq ($(mte_cc_support),1)
-$(TEST_GEN_PROGS): mte_common_util.c mte_common_util.h mte_helper.S
+$(TEST_GEN_PROGS): mte_common_util.c mte_helper.S
endif
diff --git a/tools/testing/selftests/arm64/mte/check_ksm_options.c b/tools/testing/selftests/arm64/mte/check_ksm_options.c
index 3b23c4d61d38..88c74bc46d4f 100644
--- a/tools/testing/selftests/arm64/mte/check_ksm_options.c
+++ b/tools/testing/selftests/arm64/mte/check_ksm_options.c
@@ -33,7 +33,10 @@ static unsigned long read_sysfs(char *str)
ksft_print_msg("ERR: missing %s\n", str);
return 0;
}
- fscanf(f, "%lu", &val);
+ if (fscanf(f, "%lu", &val) != 1) {
+ ksft_print_msg("ERR: parsing %s\n", str);
+ val = 0;
+ }
fclose(f);
return val;
}
diff --git a/tools/testing/selftests/arm64/mte/check_user_mem.c b/tools/testing/selftests/arm64/mte/check_user_mem.c
index 4bfa80f2a8c3..1de7a0abd0ae 100644
--- a/tools/testing/selftests/arm64/mte/check_user_mem.c
+++ b/tools/testing/selftests/arm64/mte/check_user_mem.c
@@ -33,7 +33,8 @@ static int check_usermem_access_fault(int mem_type, int mode, int mapping)
if (fd == -1)
return KSFT_FAIL;
for (i = 0; i < len; i++)
- write(fd, &val, sizeof(val));
+ if (write(fd, &val, sizeof(val)) != sizeof(val))
+ return KSFT_FAIL;
lseek(fd, 0, 0);
ptr = mte_allocate_memory(len, mem_type, mapping, true);
if (check_allocated_memory(ptr, len, mem_type, true) != KSFT_PASS) {
diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c
index 39f8908988ea..f50ac31920d1 100644
--- a/tools/testing/selftests/arm64/mte/mte_common_util.c
+++ b/tools/testing/selftests/arm64/mte/mte_common_util.c
@@ -181,10 +181,17 @@ void *mte_allocate_file_memory(size_t size, int mem_type, int mapping, bool tags
}
/* Initialize the file for mappable size */
lseek(fd, 0, SEEK_SET);
- for (index = INIT_BUFFER_SIZE; index < size; index += INIT_BUFFER_SIZE)
- write(fd, buffer, INIT_BUFFER_SIZE);
+ for (index = INIT_BUFFER_SIZE; index < size; index += INIT_BUFFER_SIZE) {
+ if (write(fd, buffer, INIT_BUFFER_SIZE) != INIT_BUFFER_SIZE) {
+ perror("initialising buffer");
+ return NULL;
+ }
+ }
index -= INIT_BUFFER_SIZE;
- write(fd, buffer, size - index);
+ if (write(fd, buffer, size - index) != size - index) {
+ perror("initialising buffer");
+ return NULL;
+ }
return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, fd);
}
@@ -202,9 +209,15 @@ void *mte_allocate_file_memory_tag_range(size_t size, int mem_type, int mapping,
/* Initialize the file for mappable size */
lseek(fd, 0, SEEK_SET);
for (index = INIT_BUFFER_SIZE; index < map_size; index += INIT_BUFFER_SIZE)
- write(fd, buffer, INIT_BUFFER_SIZE);
+ if (write(fd, buffer, INIT_BUFFER_SIZE) != INIT_BUFFER_SIZE) {
+ perror("initialising buffer");
+ return NULL;
+ }
index -= INIT_BUFFER_SIZE;
- write(fd, buffer, map_size - index);
+ if (write(fd, buffer, map_size - index) != map_size - index) {
+ perror("initialising buffer");
+ return NULL;
+ }
return __mte_allocate_memory_range(size, mem_type, mapping, range_before,
range_after, true, fd);
}
@@ -271,29 +284,20 @@ int mte_switch_mode(int mte_option, unsigned long incl_mask)
en |= (incl_mask << PR_MTE_TAG_SHIFT);
/* Enable address tagging ABI, mte error reporting mode and tag inclusion mask. */
- if (!prctl(PR_SET_TAGGED_ADDR_CTRL, en, 0, 0, 0) == 0) {
+ if (prctl(PR_SET_TAGGED_ADDR_CTRL, en, 0, 0, 0) != 0) {
ksft_print_msg("FAIL:prctl PR_SET_TAGGED_ADDR_CTRL for mte mode\n");
return -EINVAL;
}
return 0;
}
-#define ID_AA64PFR1_MTE_SHIFT 8
-#define ID_AA64PFR1_MTE 2
-
int mte_default_setup(void)
{
- unsigned long hwcaps = getauxval(AT_HWCAP);
+ unsigned long hwcaps2 = getauxval(AT_HWCAP2);
unsigned long en = 0;
int ret;
- if (!(hwcaps & HWCAP_CPUID)) {
- ksft_print_msg("FAIL: CPUID registers unavailable\n");
- return KSFT_FAIL;
- }
- /* Read ID_AA64PFR1_EL1 register */
- asm volatile("mrs %0, id_aa64pfr1_el1" : "=r"(hwcaps) : : "memory");
- if (((hwcaps >> ID_AA64PFR1_MTE_SHIFT) & MT_TAG_MASK) != ID_AA64PFR1_MTE) {
+ if (!(hwcaps2 & HWCAP2_MTE)) {
ksft_print_msg("FAIL: MTE features unavailable\n");
return KSFT_SKIP;
}
@@ -333,6 +337,7 @@ int create_temp_file(void)
/* Create a file in the tmpfs filesystem */
fd = mkstemp(&filename[0]);
if (fd == -1) {
+ perror(filename);
ksft_print_msg("FAIL: Unable to open temporary file\n");
return 0;
}
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index c0c48fdb9ac1..4866f6a21901 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+bpf-helpers*
+bpf-syscall*
test_verifier
test_maps
test_lru_map
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 044bfdcf5b74..511259c2c6c5 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-include ../../../../scripts/Kbuild.include
+include ../../../build/Build.include
include ../../../scripts/Makefile.arch
include ../../../scripts/Makefile.include
@@ -21,13 +21,18 @@ endif
BPF_GCC ?= $(shell command -v bpf-gcc;)
SAN_CFLAGS ?=
-CFLAGS += -g -rdynamic -Wall -O2 $(GENFLAGS) $(SAN_CFLAGS) \
+CFLAGS += -g -O0 -rdynamic -Wall $(GENFLAGS) $(SAN_CFLAGS) \
-I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \
-I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT) \
-Dbpf_prog_load=bpf_prog_test_load \
-Dbpf_load_program=bpf_test_load_program
LDLIBS += -lcap -lelf -lz -lrt -lpthread
+# Silence some warnings when compiled with clang
+ifneq ($(LLVM),)
+CFLAGS += -Wno-unused-command-line-argument
+endif
+
# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_verifier_log test_dev_cgroup \
@@ -68,6 +73,7 @@ TEST_PROGS := test_kmod.sh \
test_bpftool_build.sh \
test_bpftool.sh \
test_bpftool_metadata.sh \
+ test_doc_build.sh \
test_xsk.sh
TEST_PROGS_EXTENDED := with_addr.sh \
@@ -103,6 +109,7 @@ override define CLEAN
$(call msg,CLEAN)
$(Q)$(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN)
$(Q)$(MAKE) -C bpf_testmod clean
+ $(Q)$(MAKE) docs-clean
endef
include ../lib.mk
@@ -198,18 +205,27 @@ $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
$(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/bpftool
$(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
CC=$(HOSTCC) LD=$(HOSTLD) \
+ EXTRA_CFLAGS='-g -O0' \
OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \
prefix= DESTDIR=$(HOST_SCRATCH_DIR)/ install
- $(Q)mkdir -p $(BUILD_DIR)/bpftool/Documentation
- $(Q)RST2MAN_OPTS="--exit-status=1" $(MAKE) $(submake_extras) \
- -C $(BPFTOOLDIR)/Documentation \
- OUTPUT=$(BUILD_DIR)/bpftool/Documentation/ \
- prefix= DESTDIR=$(SCRATCH_DIR)/ install
+
+all: docs
+
+docs:
+ $(Q)RST2MAN_OPTS="--exit-status=1" $(MAKE) $(submake_extras) \
+ -f Makefile.docs \
+ prefix= OUTPUT=$(OUTPUT)/ DESTDIR=$(OUTPUT)/ $@
+
+docs-clean:
+ $(Q)$(MAKE) $(submake_extras) \
+ -f Makefile.docs \
+ prefix= OUTPUT=$(OUTPUT)/ DESTDIR=$(OUTPUT)/ $@
$(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
../../../include/uapi/linux/bpf.h \
| $(INCLUDE_DIR) $(BUILD_DIR)/libbpf
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \
+ EXTRA_CFLAGS='-g -O0' \
DESTDIR=$(SCRATCH_DIR) prefix= all install_headers
ifneq ($(BPFOBJ),$(HOST_BPFOBJ))
@@ -217,11 +233,12 @@ $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
../../../include/uapi/linux/bpf.h \
| $(INCLUDE_DIR) $(HOST_BUILD_DIR)/libbpf
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) \
- OUTPUT=$(HOST_BUILD_DIR)/libbpf/ CC=$(HOSTCC) LD=$(HOSTLD) \
+ EXTRA_CFLAGS='-g -O0' \
+ OUTPUT=$(HOST_BUILD_DIR)/libbpf/ CC=$(HOSTCC) LD=$(HOSTLD) \
DESTDIR=$(HOST_SCRATCH_DIR)/ prefix= all install_headers
endif
-$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR)
+$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
ifeq ($(VMLINUX_H),)
$(call msg,GEN,,$@)
$(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
@@ -292,6 +309,16 @@ endef
SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c
+LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
+ linked_vars.skel.h linked_maps.skel.h
+
+test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o
+linked_funcs.skel.h-deps := linked_funcs1.o linked_funcs2.o
+linked_vars.skel.h-deps := linked_vars1.o linked_vars2.o
+linked_maps.skel.h-deps := linked_maps1.o linked_maps2.o
+
+LINKED_BPF_SRCS := $(patsubst %.o,%.c,$(foreach skel,$(LINKED_SKELS),$($(skel)-deps)))
+
# Set up extra TRUNNER_XXX "temporary" variables in the environment (relies on
# $eval()) and pass control to DEFINE_TEST_RUNNER_RULES.
# Parameters:
@@ -310,8 +337,9 @@ TRUNNER_TESTS_HDR := $(TRUNNER_TESTS_DIR)/tests.h
TRUNNER_BPF_SRCS := $$(notdir $$(wildcard $(TRUNNER_BPF_PROGS_DIR)/*.c))
TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, $$(TRUNNER_BPF_SRCS))
TRUNNER_BPF_SKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.skel.h, \
- $$(filter-out $(SKEL_BLACKLIST), \
+ $$(filter-out $(SKEL_BLACKLIST) $(LINKED_BPF_SRCS),\
$$(TRUNNER_BPF_SRCS)))
+TRUNNER_BPF_SKELS_LINKED := $$(addprefix $$(TRUNNER_OUTPUT)/,$(LINKED_SKELS))
TEST_GEN_FILES += $$(TRUNNER_BPF_OBJS)
# Evaluate rules now with extra TRUNNER_XXX variables above already defined
@@ -344,11 +372,22 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \
$$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
$(TRUNNER_BPF_CFLAGS))
-$(TRUNNER_BPF_SKELS): $(TRUNNER_OUTPUT)/%.skel.h: \
- $(TRUNNER_OUTPUT)/%.o \
- | $(BPFTOOL) $(TRUNNER_OUTPUT)
+$(TRUNNER_BPF_SKELS): %.skel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
- $(Q)$$(BPFTOOL) gen skeleton $$< > $$@
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked1.o) $$<
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked2.o) $$(<:.o=.linked1.o)
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked3.o) $$(<:.o=.linked2.o)
+ $(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
+ $(Q)$$(BPFTOOL) gen skeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$@
+
+$(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT)
+ $$(call msg,LINK-BPF,$(TRUNNER_BINARY),$$(@:.skel.h=.o))
+ $(Q)$$(BPFTOOL) gen object $$(@:.skel.h=.linked1.o) $$(addprefix $(TRUNNER_OUTPUT)/,$$($$(@F)-deps))
+ $(Q)$$(BPFTOOL) gen object $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked1.o)
+ $(Q)$$(BPFTOOL) gen object $$(@:.skel.h=.linked3.o) $$(@:.skel.h=.linked2.o)
+ $(Q)diff $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked3.o)
+ $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
+ $(Q)$$(BPFTOOL) gen skeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$@
endif
# ensure we set up tests.h header generation rule just once
@@ -370,6 +409,7 @@ $(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o: \
$(TRUNNER_EXTRA_HDRS) \
$(TRUNNER_BPF_OBJS) \
$(TRUNNER_BPF_SKELS) \
+ $(TRUNNER_BPF_SKELS_LINKED) \
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
$$(call msg,TEST-OBJ,$(TRUNNER_BINARY),$$@)
$(Q)cd $$(@D) && $$(CC) -I. $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F)
@@ -382,11 +422,12 @@ $(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o: \
$$(call msg,EXT-OBJ,$(TRUNNER_BINARY),$$@)
$(Q)$$(CC) $$(CFLAGS) -c $$< $$(LDLIBS) -o $$@
-# only copy extra resources if in flavored build
+# non-flavored in-srctree builds receive special treatment, in particular, we
+# do not need to copy extra resources (see e.g. test_btf_dump_case())
$(TRUNNER_BINARY)-extras: $(TRUNNER_EXTRA_FILES) | $(TRUNNER_OUTPUT)
-ifneq ($2,)
+ifneq ($2:$(OUTPUT),:$(shell pwd))
$$(call msg,EXT-COPY,$(TRUNNER_BINARY),$(TRUNNER_EXTRA_FILES))
- $(Q)cp -a $$^ $(TRUNNER_OUTPUT)/
+ $(Q)rsync -aq $$^ $(TRUNNER_OUTPUT)/
endif
$(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
@@ -452,7 +493,7 @@ $(OUTPUT)/test_verifier: test_verifier.c verifier/tests.h $(BPFOBJ) | $(OUTPUT)
# Make sure we are able to include and link libbpf against c++.
$(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ)
$(call msg,CXX,,$@)
- $(Q)$(CXX) $(CFLAGS) $^ $(LDLIBS) -o $@
+ $(Q)$(CXX) $(CFLAGS) $(filter %.a %.o %.cpp,$^) $(LDLIBS) -o $@
# Benchmark runner
$(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h
@@ -476,3 +517,5 @@ EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
feature \
$(addprefix $(OUTPUT)/,*.o *.skel.h no_alu32 bpf_gcc bpf_testmod.ko)
+
+.PHONY: docs docs-clean
diff --git a/tools/testing/selftests/bpf/Makefile.docs b/tools/testing/selftests/bpf/Makefile.docs
new file mode 100644
index 000000000000..ccf260021e83
--- /dev/null
+++ b/tools/testing/selftests/bpf/Makefile.docs
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+include ../../../scripts/Makefile.include
+include ../../../scripts/utilities.mak
+
+INSTALL ?= install
+RM ?= rm -f
+RMDIR ?= rmdir --ignore-fail-on-non-empty
+
+ifeq ($(V),1)
+ Q =
+else
+ Q = @
+endif
+
+prefix ?= /usr/local
+mandir ?= $(prefix)/man
+man2dir = $(mandir)/man2
+man7dir = $(mandir)/man7
+
+SYSCALL_RST = bpf-syscall.rst
+MAN2_RST = $(SYSCALL_RST)
+
+HELPERS_RST = bpf-helpers.rst
+MAN7_RST = $(HELPERS_RST)
+
+_DOC_MAN2 = $(patsubst %.rst,%.2,$(MAN2_RST))
+DOC_MAN2 = $(addprefix $(OUTPUT),$(_DOC_MAN2))
+
+_DOC_MAN7 = $(patsubst %.rst,%.7,$(MAN7_RST))
+DOC_MAN7 = $(addprefix $(OUTPUT),$(_DOC_MAN7))
+
+DOCTARGETS := helpers syscall
+
+docs: $(DOCTARGETS)
+syscall: man2
+helpers: man7
+man2: $(DOC_MAN2)
+man7: $(DOC_MAN7)
+
+RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
+
+# Configure make rules for the man page bpf-$1.$2.
+# $1 - target for scripts/bpf_doc.py
+# $2 - man page section to generate the troff file
+define DOCS_RULES =
+$(OUTPUT)bpf-$1.rst: ../../../../include/uapi/linux/bpf.h
+ $$(QUIET_GEN)../../../../scripts/bpf_doc.py $1 \
+ --filename $$< > $$@
+
+$(OUTPUT)%.$2: $(OUTPUT)%.rst
+ifndef RST2MAN_DEP
+ $$(error "rst2man not found, but required to generate man pages")
+endif
+ $$(QUIET_GEN)rst2man $$< > $$@
+
+docs-clean-$1:
+ $$(call QUIET_CLEAN, eBPF_$1-manpage)
+ $(Q)$(RM) $$(DOC_MAN$2) $(OUTPUT)bpf-$1.rst
+
+docs-install-$1: docs
+ $$(call QUIET_INSTALL, eBPF_$1-manpage)
+ $(Q)$(INSTALL) -d -m 755 $(DESTDIR)$$(man$2dir)
+ $(Q)$(INSTALL) -m 644 $$(DOC_MAN$2) $(DESTDIR)$$(man$2dir)
+
+docs-uninstall-$1:
+ $$(call QUIET_UNINST, eBPF_$1-manpage)
+ $(Q)$(RM) $$(addprefix $(DESTDIR)$$(man$2dir)/,$$(_DOC_MAN$2))
+ $(Q)$(RMDIR) $(DESTDIR)$$(man$2dir)
+
+.PHONY: $1 docs-clean-$1 docs-install-$1 docs-uninstall-$1
+endef
+
+# Create the make targets to generate manual pages by name and section
+$(eval $(call DOCS_RULES,helpers,7))
+$(eval $(call DOCS_RULES,syscall,2))
+
+docs-clean: $(foreach doctarget,$(DOCTARGETS), docs-clean-$(doctarget))
+docs-install: $(foreach doctarget,$(DOCTARGETS), docs-install-$(doctarget))
+docs-uninstall: $(foreach doctarget,$(DOCTARGETS), docs-uninstall-$(doctarget))
+
+.PHONY: docs docs-clean docs-install docs-uninstall man2 man7
diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst
index fd148b8410fa..3353778c30f8 100644
--- a/tools/testing/selftests/bpf/README.rst
+++ b/tools/testing/selftests/bpf/README.rst
@@ -111,6 +111,45 @@ available in 10.0.1. The patch is available in llvm 11.0.0 trunk.
__ https://reviews.llvm.org/D78466
+bpf_verif_scale/loop6.o test failure with Clang 12
+==================================================
+
+With Clang 12, the following bpf_verif_scale test failed:
+ * ``bpf_verif_scale/loop6.o``
+
+The verifier output looks like
+
+.. code-block:: c
+
+ R1 type=ctx expected=fp
+ The sequence of 8193 jumps is too complex.
+
+The reason is compiler generating the following code
+
+.. code-block:: c
+
+ ; for (i = 0; (i < VIRTIO_MAX_SGS) && (i < num); i++) {
+ 14: 16 05 40 00 00 00 00 00 if w5 == 0 goto +64 <LBB0_6>
+ 15: bc 51 00 00 00 00 00 00 w1 = w5
+ 16: 04 01 00 00 ff ff ff ff w1 += -1
+ 17: 67 05 00 00 20 00 00 00 r5 <<= 32
+ 18: 77 05 00 00 20 00 00 00 r5 >>= 32
+ 19: a6 01 01 00 05 00 00 00 if w1 < 5 goto +1 <LBB0_4>
+ 20: b7 05 00 00 06 00 00 00 r5 = 6
+ 00000000000000a8 <LBB0_4>:
+ 21: b7 02 00 00 00 00 00 00 r2 = 0
+ 22: b7 01 00 00 00 00 00 00 r1 = 0
+ ; for (i = 0; (i < VIRTIO_MAX_SGS) && (i < num); i++) {
+ 23: 7b 1a e0 ff 00 00 00 00 *(u64 *)(r10 - 32) = r1
+ 24: 7b 5a c0 ff 00 00 00 00 *(u64 *)(r10 - 64) = r5
+
+Note that insn #15 has w1 = w5 and w1 is refined later but
+r5(w5) is eventually saved on stack at insn #24 for later use.
+This cause later verifier failure. The bug has been `fixed`__ in
+Clang 13.
+
+__ https://reviews.llvm.org/D97479
+
BPF CO-RE-based tests and Clang version
=======================================
@@ -131,3 +170,35 @@ failures:
.. _2: https://reviews.llvm.org/D85174
.. _3: https://reviews.llvm.org/D83878
.. _4: https://reviews.llvm.org/D83242
+
+Floating-point tests and Clang version
+======================================
+
+Certain selftests, e.g. core_reloc, require support for the floating-point
+types, which was introduced in `Clang 13`__. The older Clang versions will
+either crash when compiling these tests, or generate an incorrect BTF.
+
+__ https://reviews.llvm.org/D83289
+
+Kernel function call test and Clang version
+===========================================
+
+Some selftests (e.g. kfunc_call and bpf_tcp_ca) require a LLVM support
+to generate extern function in BTF. It was introduced in `Clang 13`__.
+
+Without it, the error from compiling bpf selftests looks like:
+
+.. code-block:: console
+
+ libbpf: failed to find BTF for extern 'tcp_slow_start' [25] section: -2
+
+__ https://reviews.llvm.org/D93563
+
+Clang dependencies for static linking tests
+===========================================
+
+linked_vars, linked_maps, and linked_funcs tests depend on `Clang fix`__ to
+generate valid BTF information for weak variables. Please make sure you use
+Clang that contains the fix.
+
+__ https://reviews.llvm.org/D100362
diff --git a/tools/testing/selftests/bpf/bpf_tcp_helpers.h b/tools/testing/selftests/bpf/bpf_tcp_helpers.h
index 91f0fac632f4..029589c008c9 100644
--- a/tools/testing/selftests/bpf/bpf_tcp_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_tcp_helpers.h
@@ -187,16 +187,6 @@ struct tcp_congestion_ops {
typeof(y) __y = (y); \
__x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
-static __always_inline __u32 tcp_slow_start(struct tcp_sock *tp, __u32 acked)
-{
- __u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
-
- acked -= cwnd - tp->snd_cwnd;
- tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
-
- return acked;
-}
-
static __always_inline bool tcp_in_slow_start(const struct tcp_sock *tp)
{
return tp->snd_cwnd < tp->snd_ssthresh;
@@ -213,22 +203,7 @@ static __always_inline bool tcp_is_cwnd_limited(const struct sock *sk)
return !!BPF_CORE_READ_BITFIELD(tp, is_cwnd_limited);
}
-static __always_inline void tcp_cong_avoid_ai(struct tcp_sock *tp, __u32 w, __u32 acked)
-{
- /* If credits accumulated at a higher w, apply them gently now. */
- if (tp->snd_cwnd_cnt >= w) {
- tp->snd_cwnd_cnt = 0;
- tp->snd_cwnd++;
- }
-
- tp->snd_cwnd_cnt += acked;
- if (tp->snd_cwnd_cnt >= w) {
- __u32 delta = tp->snd_cwnd_cnt / w;
-
- tp->snd_cwnd_cnt -= delta * w;
- tp->snd_cwnd += delta;
- }
- tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
-}
+extern __u32 tcp_slow_start(struct tcp_sock *tp, __u32 acked) __ksym;
+extern void tcp_cong_avoid_ai(struct tcp_sock *tp, __u32 w, __u32 acked) __ksym;
#endif
diff --git a/tools/testing/selftests/bpf/btf_helpers.c b/tools/testing/selftests/bpf/btf_helpers.c
index 48f90490f922..b692e6ead9b5 100644
--- a/tools/testing/selftests/bpf/btf_helpers.c
+++ b/tools/testing/selftests/bpf/btf_helpers.c
@@ -23,6 +23,7 @@ static const char * const btf_kind_str_mapping[] = {
[BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
[BTF_KIND_VAR] = "VAR",
[BTF_KIND_DATASEC] = "DATASEC",
+ [BTF_KIND_FLOAT] = "FLOAT",
};
static const char *btf_kind_str(__u16 kind)
@@ -173,6 +174,9 @@ int fprintf_btf_type_raw(FILE *out, const struct btf *btf, __u32 id)
}
break;
}
+ case BTF_KIND_FLOAT:
+ fprintf(out, " size=%u", t->size);
+ break;
default:
break;
}
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 37e1f303fc11..5192305159ec 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -44,3 +44,5 @@ CONFIG_SECURITYFS=y
CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_READ_POLICY=y
CONFIG_BLK_DEV_LOOP=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_DYNAMIC_FTRACE=y
diff --git a/tools/testing/selftests/bpf/get_cgroup_id_user.c b/tools/testing/selftests/bpf/get_cgroup_id_user.c
index b8d6aef99db4..99628e1a1e58 100644
--- a/tools/testing/selftests/bpf/get_cgroup_id_user.c
+++ b/tools/testing/selftests/bpf/get_cgroup_id_user.c
@@ -57,6 +57,10 @@ int main(int argc, char **argv)
__u32 key = 0, pid;
int exit_code = 1;
char buf[256];
+ const struct timespec req = {
+ .tv_sec = 1,
+ .tv_nsec = 0,
+ };
cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno))
@@ -115,7 +119,7 @@ int main(int argc, char **argv)
goto close_pmu;
/* trigger some syscalls */
- sleep(1);
+ syscall(__NR_nanosleep, &req, NULL);
err = bpf_map_lookup_elem(cgidmap_fd, &key, &kcgid);
if (CHECK(err, "bpf_map_lookup_elem", "err %d errno %d\n", err, errno))
diff --git a/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c
index f0a64d8ac59a..f4d870da7684 100644
--- a/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c
+++ b/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c
@@ -9,10 +9,13 @@
#include <test_maps.h>
+static int nr_cpus;
+
static void map_batch_update(int map_fd, __u32 max_entries, int *keys,
- int *values)
+ __s64 *values, bool is_pcpu)
{
- int i, err;
+ int i, j, err;
+ int cpu_offset = 0;
DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
.elem_flags = 0,
.flags = 0,
@@ -20,22 +23,41 @@ static void map_batch_update(int map_fd, __u32 max_entries, int *keys,
for (i = 0; i < max_entries; i++) {
keys[i] = i;
- values[i] = i + 1;
+ if (is_pcpu) {
+ cpu_offset = i * nr_cpus;
+ for (j = 0; j < nr_cpus; j++)
+ (values + cpu_offset)[j] = i + 1 + j;
+ } else {
+ values[i] = i + 1;
+ }
}
err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts);
CHECK(err, "bpf_map_update_batch()", "error:%s\n", strerror(errno));
}
-static void map_batch_verify(int *visited, __u32 max_entries,
- int *keys, int *values)
+static void map_batch_verify(int *visited, __u32 max_entries, int *keys,
+ __s64 *values, bool is_pcpu)
{
- int i;
+ int i, j;
+ int cpu_offset = 0;
memset(visited, 0, max_entries * sizeof(*visited));
for (i = 0; i < max_entries; i++) {
- CHECK(keys[i] + 1 != values[i], "key/value checking",
- "error: i %d key %d value %d\n", i, keys[i], values[i]);
+ if (is_pcpu) {
+ cpu_offset = i * nr_cpus;
+ for (j = 0; j < nr_cpus; j++) {
+ __s64 value = (values + cpu_offset)[j];
+ CHECK(keys[i] + j + 1 != value,
+ "key/value checking",
+ "error: i %d j %d key %d value %lld\n", i,
+ j, keys[i], value);
+ }
+ } else {
+ CHECK(keys[i] + 1 != values[i], "key/value checking",
+ "error: i %d key %d value %lld\n", i, keys[i],
+ values[i]);
+ }
visited[i] = 1;
}
for (i = 0; i < max_entries; i++) {
@@ -44,20 +66,21 @@ static void map_batch_verify(int *visited, __u32 max_entries,
}
}
-void test_array_map_batch_ops(void)
+static void __test_map_lookup_and_update_batch(bool is_pcpu)
{
struct bpf_create_map_attr xattr = {
.name = "array_map",
- .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_type = is_pcpu ? BPF_MAP_TYPE_PERCPU_ARRAY :
+ BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int),
- .value_size = sizeof(int),
+ .value_size = sizeof(__s64),
};
- int map_fd, *keys, *values, *visited;
+ int map_fd, *keys, *visited;
__u32 count, total, total_success;
const __u32 max_entries = 10;
- bool nospace_err;
__u64 batch = 0;
- int err, step;
+ int err, step, value_size;
+ void *values;
DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
.elem_flags = 0,
.flags = 0,
@@ -68,35 +91,35 @@ void test_array_map_batch_ops(void)
CHECK(map_fd == -1,
"bpf_create_map_xattr()", "error:%s\n", strerror(errno));
- keys = malloc(max_entries * sizeof(int));
- values = malloc(max_entries * sizeof(int));
- visited = malloc(max_entries * sizeof(int));
+ value_size = sizeof(__s64);
+ if (is_pcpu)
+ value_size *= nr_cpus;
+
+ keys = calloc(max_entries, sizeof(*keys));
+ values = calloc(max_entries, value_size);
+ visited = calloc(max_entries, sizeof(*visited));
CHECK(!keys || !values || !visited, "malloc()", "error:%s\n",
strerror(errno));
- /* populate elements to the map */
- map_batch_update(map_fd, max_entries, keys, values);
-
/* test 1: lookup in a loop with various steps. */
total_success = 0;
for (step = 1; step < max_entries; step++) {
- map_batch_update(map_fd, max_entries, keys, values);
- map_batch_verify(visited, max_entries, keys, values);
+ map_batch_update(map_fd, max_entries, keys, values, is_pcpu);
+ map_batch_verify(visited, max_entries, keys, values, is_pcpu);
memset(keys, 0, max_entries * sizeof(*keys));
- memset(values, 0, max_entries * sizeof(*values));
+ memset(values, 0, max_entries * value_size);
batch = 0;
total = 0;
/* iteratively lookup/delete elements with 'step'
* elements each.
*/
count = step;
- nospace_err = false;
while (true) {
err = bpf_map_lookup_batch(map_fd,
- total ? &batch : NULL, &batch,
- keys + total,
- values + total,
- &count, &opts);
+ total ? &batch : NULL,
+ &batch, keys + total,
+ values + total * value_size,
+ &count, &opts);
CHECK((err && errno != ENOENT), "lookup with steps",
"error: %s\n", strerror(errno));
@@ -107,13 +130,10 @@ void test_array_map_batch_ops(void)
}
- if (nospace_err == true)
- continue;
-
CHECK(total != max_entries, "lookup with steps",
"total = %u, max_entries = %u\n", total, max_entries);
- map_batch_verify(visited, max_entries, keys, values);
+ map_batch_verify(visited, max_entries, keys, values, is_pcpu);
total_success++;
}
@@ -121,9 +141,30 @@ void test_array_map_batch_ops(void)
CHECK(total_success == 0, "check total_success",
"unexpected failure\n");
- printf("%s:PASS\n", __func__);
-
free(keys);
free(values);
free(visited);
}
+
+static void array_map_batch_ops(void)
+{
+ __test_map_lookup_and_update_batch(false);
+ printf("test_%s:PASS\n", __func__);
+}
+
+static void array_percpu_map_batch_ops(void)
+{
+ __test_map_lookup_and_update_batch(true);
+ printf("test_%s:PASS\n", __func__);
+}
+
+void test_array_map_batch_ops(void)
+{
+ nr_cpus = libbpf_num_possible_cpus();
+
+ CHECK(nr_cpus < 0, "nr_cpus checking",
+ "error: get possible cpus failed");
+
+ array_map_batch_ops();
+ array_percpu_map_batch_ops();
+}
diff --git a/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c
new file mode 100644
index 000000000000..2e986e5e4cac
--- /dev/null
+++ b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <arpa/inet.h>
+#include <linux/bpf.h>
+#include <netinet/in.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include <test_maps.h>
+
+struct test_lpm_key {
+ __u32 prefix;
+ struct in_addr ipv4;
+};
+
+static void map_batch_update(int map_fd, __u32 max_entries,
+ struct test_lpm_key *keys, int *values)
+{
+ __u32 i;
+ int err;
+ char buff[16] = { 0 };
+ DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
+ .elem_flags = 0,
+ .flags = 0,
+ );
+
+ for (i = 0; i < max_entries; i++) {
+ keys[i].prefix = 32;
+ snprintf(buff, 16, "192.168.1.%d", i + 1);
+ inet_pton(AF_INET, buff, &keys[i].ipv4);
+ values[i] = i + 1;
+ }
+
+ err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts);
+ CHECK(err, "bpf_map_update_batch()", "error:%s\n", strerror(errno));
+}
+
+static void map_batch_verify(int *visited, __u32 max_entries,
+ struct test_lpm_key *keys, int *values)
+{
+ char buff[16] = { 0 };
+ int lower_byte = 0;
+ __u32 i;
+
+ memset(visited, 0, max_entries * sizeof(*visited));
+ for (i = 0; i < max_entries; i++) {
+ inet_ntop(AF_INET, &keys[i].ipv4, buff, 32);
+ CHECK(sscanf(buff, "192.168.1.%d", &lower_byte) == EOF,
+ "sscanf()", "error: i %d\n", i);
+ CHECK(lower_byte != values[i], "key/value checking",
+ "error: i %d key %s value %d\n", i, buff, values[i]);
+ visited[i] = 1;
+ }
+ for (i = 0; i < max_entries; i++) {
+ CHECK(visited[i] != 1, "visited checking",
+ "error: keys array at index %d missing\n", i);
+ }
+}
+
+void test_lpm_trie_map_batch_ops(void)
+{
+ struct bpf_create_map_attr xattr = {
+ .name = "lpm_trie_map",
+ .map_type = BPF_MAP_TYPE_LPM_TRIE,
+ .key_size = sizeof(struct test_lpm_key),
+ .value_size = sizeof(int),
+ .map_flags = BPF_F_NO_PREALLOC,
+ };
+ struct test_lpm_key *keys, key;
+ int map_fd, *values, *visited;
+ __u32 step, count, total, total_success;
+ const __u32 max_entries = 10;
+ __u64 batch = 0;
+ int err;
+ DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
+ .elem_flags = 0,
+ .flags = 0,
+ );
+
+ xattr.max_entries = max_entries;
+ map_fd = bpf_create_map_xattr(&xattr);
+ CHECK(map_fd == -1, "bpf_create_map_xattr()", "error:%s\n",
+ strerror(errno));
+
+ keys = malloc(max_entries * sizeof(struct test_lpm_key));
+ values = malloc(max_entries * sizeof(int));
+ visited = malloc(max_entries * sizeof(int));
+ CHECK(!keys || !values || !visited, "malloc()", "error:%s\n",
+ strerror(errno));
+
+ total_success = 0;
+ for (step = 1; step < max_entries; step++) {
+ map_batch_update(map_fd, max_entries, keys, values);
+ map_batch_verify(visited, max_entries, keys, values);
+ memset(keys, 0, max_entries * sizeof(*keys));
+ memset(values, 0, max_entries * sizeof(*values));
+ batch = 0;
+ total = 0;
+ /* iteratively lookup/delete elements with 'step'
+ * elements each.
+ */
+ count = step;
+ while (true) {
+ err = bpf_map_lookup_batch(map_fd,
+ total ? &batch : NULL, &batch,
+ keys + total, values + total, &count, &opts);
+
+ CHECK((err && errno != ENOENT), "lookup with steps",
+ "error: %s\n", strerror(errno));
+
+ total += count;
+ if (err)
+ break;
+ }
+
+ CHECK(total != max_entries, "lookup with steps",
+ "total = %u, max_entries = %u\n", total, max_entries);
+
+ map_batch_verify(visited, max_entries, keys, values);
+
+ total = 0;
+ count = step;
+ while (total < max_entries) {
+ if (max_entries - total < step)
+ count = max_entries - total;
+ err = bpf_map_delete_batch(map_fd, keys + total, &count,
+ &opts);
+ CHECK((err && errno != ENOENT), "delete batch",
+ "error: %s\n", strerror(errno));
+ total += count;
+ if (err)
+ break;
+ }
+ CHECK(total != max_entries, "delete with steps",
+ "total = %u, max_entries = %u\n", total, max_entries);
+
+ /* check map is empty, errono == ENOENT */
+ err = bpf_map_get_next_key(map_fd, NULL, &key);
+ CHECK(!err || errno != ENOENT, "bpf_map_get_next_key()",
+ "error: %s\n", strerror(errno));
+
+ total_success++;
+ }
+
+ CHECK(total_success == 0, "check total_success",
+ "unexpected failure\n");
+
+ printf("%s:PASS\n", __func__);
+
+ free(keys);
+ free(values);
+ free(visited);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index a0ee87c8e1ea..9dc4e3dfbcf3 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -2,6 +2,44 @@
#include <test_progs.h>
#include "test_attach_probe.skel.h"
+#if defined(__powerpc64__) && defined(_CALL_ELF) && _CALL_ELF == 2
+
+#define OP_RT_RA_MASK 0xffff0000UL
+#define LIS_R2 0x3c400000UL
+#define ADDIS_R2_R12 0x3c4c0000UL
+#define ADDI_R2_R2 0x38420000UL
+
+static ssize_t get_offset(ssize_t addr, ssize_t base)
+{
+ u32 *insn = (u32 *) addr;
+
+ /*
+ * A PPC64 ABIv2 function may have a local and a global entry
+ * point. We need to use the local entry point when patching
+ * functions, so identify and step over the global entry point
+ * sequence.
+ *
+ * The global entry point sequence is always of the form:
+ *
+ * addis r2,r12,XXXX
+ * addi r2,r2,XXXX
+ *
+ * A linker optimisation may convert the addis to lis:
+ *
+ * lis r2,XXXX
+ * addi r2,r2,XXXX
+ */
+ if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
+ ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
+ ((*(insn + 1) & OP_RT_RA_MASK) == ADDI_R2_R2))
+ return (ssize_t)(insn + 2) - base;
+ else
+ return addr - base;
+}
+#else
+#define get_offset(addr, base) (addr - base)
+#endif
+
ssize_t get_base_addr() {
size_t start, offset;
char buf[256];
@@ -36,7 +74,7 @@ void test_attach_probe(void)
if (CHECK(base_addr < 0, "get_base_addr",
"failed to find base addr: %zd", base_addr))
return;
- uprobe_offset = (size_t)&get_base_addr - base_addr;
+ uprobe_offset = get_offset((size_t)&get_base_addr, base_addr);
skel = test_attach_probe__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index 74c45d557a2b..2d3590cfb5e1 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -147,6 +147,7 @@ static void test_task_stack(void)
return;
do_dummy_read(skel->progs.dump_task_stack);
+ do_dummy_read(skel->progs.get_task_user_stacks);
bpf_iter_task_stack__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
index e698ee6bb6c2..3d002c245d2b 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
@@ -76,6 +76,7 @@ void test_bpf_verif_scale(void)
{ "loop2.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
{ "loop4.o", BPF_PROG_TYPE_SCHED_CLS },
{ "loop5.o", BPF_PROG_TYPE_SCHED_CLS },
+ { "loop6.o", BPF_PROG_TYPE_KPROBE },
/* partial unroll. 19k insn in a loop.
* Total program size 20.8k insn.
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index 6a7ee7420701..0457ae32b270 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -1903,7 +1903,7 @@ static struct btf_raw_test raw_tests[] = {
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
- BTF_TYPE_ENC(0, 0x10000000, 4),
+ BTF_TYPE_ENC(0, 0x20000000, 4),
BTF_END_RAW,
},
.str_sec = "",
@@ -3531,6 +3531,136 @@ static struct btf_raw_test raw_tests[] = {
.max_entries = 1,
},
+{
+ .descr = "float test #1, well-formed",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
+ /* [1] */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [2] */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 4), /* [3] */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 8), /* [4] */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 12), /* [5] */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 16), /* [6] */
+ BTF_STRUCT_ENC(NAME_TBD, 5, 48), /* [7] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0),
+ BTF_MEMBER_ENC(NAME_TBD, 3, 32),
+ BTF_MEMBER_ENC(NAME_TBD, 4, 64),
+ BTF_MEMBER_ENC(NAME_TBD, 5, 128),
+ BTF_MEMBER_ENC(NAME_TBD, 6, 256),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0_Float16\0float\0double\0_Float80\0long_double"
+ "\0floats\0a\0b\0c\0d\0e"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "float_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 48,
+ .key_type_id = 1,
+ .value_type_id = 7,
+ .max_entries = 1,
+},
+{
+ .descr = "float test #2, invalid vlen",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
+ /* [1] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 1), 4),
+ /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0float"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "float_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 2,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "vlen != 0",
+},
+{
+ .descr = "float test #3, invalid kind_flag",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
+ /* [1] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_FLOAT, 1, 0), 4),
+ /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0float"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "float_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 2,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid btf_info kind_flag",
+},
+{
+ .descr = "float test #4, member does not fit",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
+ /* [1] */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 4), /* [2] */
+ BTF_STRUCT_ENC(NAME_TBD, 1, 2), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0float\0floats\0x"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "float_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 3,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Member exceeds struct_size",
+},
+{
+ .descr = "float test #5, member is not properly aligned",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
+ /* [1] */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 4), /* [2] */
+ BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, 8),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0float\0floats\0x"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "float_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 3,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Member is not properly aligned",
+},
+{
+ .descr = "float test #6, invalid size",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
+ /* [1] */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 6), /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0float"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "float_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 6,
+ .key_type_id = 1,
+ .value_type_id = 2,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid type_size",
+},
+
}; /* struct btf_raw_test raw_tests[] */
static const char *get_next_str(const char *start, const char *end)
@@ -6281,11 +6411,12 @@ const struct btf_dedup_test dedup_tests[] = {
/* int[16] */
BTF_TYPE_ARRAY_ENC(1, 1, 16), /* [2] */
/* struct s { */
- BTF_STRUCT_ENC(NAME_NTH(2), 4, 84), /* [3] */
+ BTF_STRUCT_ENC(NAME_NTH(2), 5, 88), /* [3] */
BTF_MEMBER_ENC(NAME_NTH(3), 4, 0), /* struct s *next; */
BTF_MEMBER_ENC(NAME_NTH(4), 5, 64), /* const int *a; */
BTF_MEMBER_ENC(NAME_NTH(5), 2, 128), /* int b[16]; */
BTF_MEMBER_ENC(NAME_NTH(6), 1, 640), /* int c; */
+ BTF_MEMBER_ENC(NAME_NTH(8), 13, 672), /* float d; */
/* ptr -> [3] struct s */
BTF_PTR_ENC(3), /* [4] */
/* ptr -> [6] const int */
@@ -6296,39 +6427,43 @@ const struct btf_dedup_test dedup_tests[] = {
/* full copy of the above */
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [7] */
BTF_TYPE_ARRAY_ENC(7, 7, 16), /* [8] */
- BTF_STRUCT_ENC(NAME_NTH(2), 4, 84), /* [9] */
+ BTF_STRUCT_ENC(NAME_NTH(2), 5, 88), /* [9] */
BTF_MEMBER_ENC(NAME_NTH(3), 10, 0),
BTF_MEMBER_ENC(NAME_NTH(4), 11, 64),
BTF_MEMBER_ENC(NAME_NTH(5), 8, 128),
BTF_MEMBER_ENC(NAME_NTH(6), 7, 640),
+ BTF_MEMBER_ENC(NAME_NTH(8), 13, 672),
BTF_PTR_ENC(9), /* [10] */
BTF_PTR_ENC(12), /* [11] */
BTF_CONST_ENC(7), /* [12] */
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [13] */
BTF_END_RAW,
},
- BTF_STR_SEC("\0int\0s\0next\0a\0b\0c\0"),
+ BTF_STR_SEC("\0int\0s\0next\0a\0b\0c\0float\0d"),
},
.expect = {
.raw_types = {
/* int */
- BTF_TYPE_INT_ENC(NAME_NTH(4), BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(NAME_NTH(5), BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* int[16] */
BTF_TYPE_ARRAY_ENC(1, 1, 16), /* [2] */
/* struct s { */
- BTF_STRUCT_ENC(NAME_NTH(6), 4, 84), /* [3] */
- BTF_MEMBER_ENC(NAME_NTH(5), 4, 0), /* struct s *next; */
+ BTF_STRUCT_ENC(NAME_NTH(8), 5, 88), /* [3] */
+ BTF_MEMBER_ENC(NAME_NTH(7), 4, 0), /* struct s *next; */
BTF_MEMBER_ENC(NAME_NTH(1), 5, 64), /* const int *a; */
BTF_MEMBER_ENC(NAME_NTH(2), 2, 128), /* int b[16]; */
BTF_MEMBER_ENC(NAME_NTH(3), 1, 640), /* int c; */
+ BTF_MEMBER_ENC(NAME_NTH(4), 7, 672), /* float d; */
/* ptr -> [3] struct s */
BTF_PTR_ENC(3), /* [4] */
/* ptr -> [6] const int */
BTF_PTR_ENC(6), /* [5] */
/* const -> [1] int */
BTF_CONST_ENC(1), /* [6] */
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [7] */
BTF_END_RAW,
},
- BTF_STR_SEC("\0a\0b\0c\0int\0next\0s"),
+ BTF_STR_SEC("\0a\0b\0c\0d\0int\0float\0next\0s"),
},
.opts = {
.dont_resolve_fwds = false,
@@ -6449,9 +6584,10 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8),
BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */
BTF_END_RAW,
},
- BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M"),
+ BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N"),
},
.expect = {
.raw_types = {
@@ -6474,16 +6610,17 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8),
BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */
BTF_END_RAW,
},
- BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M"),
+ BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N"),
},
.opts = {
.dont_resolve_fwds = false,
},
},
{
- .descr = "dedup: no int duplicates",
+ .descr = "dedup: no int/float duplicates",
.input = {
.raw_types = {
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 8),
@@ -6498,9 +6635,15 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 27, 8),
/* different byte size */
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
+ /* all allowed sizes */
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 2),
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 4),
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 8),
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 12),
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 16),
BTF_END_RAW,
},
- BTF_STR_SEC("\0int\0some other int"),
+ BTF_STR_SEC("\0int\0some other int\0float"),
},
.expect = {
.raw_types = {
@@ -6516,9 +6659,15 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 27, 8),
/* different byte size */
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
+ /* all allowed sizes */
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 2),
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 4),
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 8),
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 12),
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 16),
BTF_END_RAW,
},
- BTF_STR_SEC("\0int\0some other int"),
+ BTF_STR_SEC("\0int\0some other int\0float"),
},
.opts = {
.dont_resolve_fwds = false,
@@ -6630,6 +6779,7 @@ static int btf_type_size(const struct btf_type *t)
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
+ case BTF_KIND_FLOAT:
return base_size;
case BTF_KIND_INT:
return base_size + sizeof(__u32);
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
index c60091ee8a21..5e129dc2073c 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
@@ -77,7 +77,7 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
snprintf(out_file, sizeof(out_file), "/tmp/%s.output.XXXXXX", t->file);
fd = mkstemp(out_file);
- if (CHECK(fd < 0, "create_tmp", "failed to create file: %d\n", fd)) {
+ if (!ASSERT_GE(fd, 0, "create_tmp")) {
err = fd;
goto done;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_endian.c b/tools/testing/selftests/bpf/prog_tests/btf_endian.c
index 8c52d72c876e..8ab5d3e358dd 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_endian.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_endian.c
@@ -6,8 +6,6 @@
#include <test_progs.h>
#include <bpf/btf.h>
-static int duration = 0;
-
void test_btf_endian() {
#if __BYTE_ORDER == __LITTLE_ENDIAN
enum btf_endianness endian = BTF_LITTLE_ENDIAN;
@@ -71,7 +69,7 @@ void test_btf_endian() {
/* now modify original BTF */
var_id = btf__add_var(btf, "some_var", BTF_VAR_GLOBAL_ALLOCATED, 1);
- CHECK(var_id <= 0, "var_id", "failed %d\n", var_id);
+ ASSERT_GT(var_id, 0, "var_id");
btf__free(swap_btf);
swap_btf = NULL;
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_link.c b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c
index 4d9b514b3fd9..736796e56ed1 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_link.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c
@@ -54,7 +54,7 @@ void test_cgroup_link(void)
for (i = 0; i < cg_nr; i++) {
cgs[i].fd = create_and_get_cgroup(cgs[i].path);
- if (CHECK(cgs[i].fd < 0, "cg_create", "fail: %d\n", cgs[i].fd))
+ if (!ASSERT_GE(cgs[i].fd, 0, "cg_create"))
goto cleanup;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
index 06eb956ff7bb..607710826dca 100644
--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
@@ -210,11 +210,6 @@ static int duration = 0;
.bpf_obj_file = "test_core_reloc_existence.o", \
.btf_src_file = "btf__core_reloc_" #name ".o" \
-#define FIELD_EXISTS_ERR_CASE(name) { \
- FIELD_EXISTS_CASE_COMMON(name), \
- .fails = true, \
-}
-
#define BITFIELDS_CASE_COMMON(objfile, test_name_prefix, name) \
.case_name = test_name_prefix#name, \
.bpf_obj_file = objfile, \
@@ -222,7 +217,7 @@ static int duration = 0;
#define BITFIELDS_CASE(name, ...) { \
BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \
- "direct:", name), \
+ "probed:", name), \
.input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \
.input_len = sizeof(struct core_reloc_##name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
@@ -230,7 +225,7 @@ static int duration = 0;
.output_len = sizeof(struct core_reloc_bitfields_output), \
}, { \
BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \
- "probed:", name), \
+ "direct:", name), \
.input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \
.input_len = sizeof(struct core_reloc_##name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
@@ -266,6 +261,7 @@ static int duration = 0;
.arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \
.ptr_sz = 8, /* always 8-byte pointer for BPF */ \
.enum_sz = sizeof(((type *)0)->enum_field), \
+ .float_sz = sizeof(((type *)0)->float_field), \
}
#define SIZE_CASE(name) { \
@@ -550,8 +546,7 @@ static struct core_reloc_test_case test_cases[] = {
ARRAYS_ERR_CASE(arrays___err_too_small),
ARRAYS_ERR_CASE(arrays___err_too_shallow),
ARRAYS_ERR_CASE(arrays___err_non_array),
- ARRAYS_ERR_CASE(arrays___err_wrong_val_type1),
- ARRAYS_ERR_CASE(arrays___err_wrong_val_type2),
+ ARRAYS_ERR_CASE(arrays___err_wrong_val_type),
ARRAYS_ERR_CASE(arrays___err_bad_zero_sz_arr),
/* enum/ptr/int handling scenarios */
@@ -642,13 +637,25 @@ static struct core_reloc_test_case test_cases[] = {
},
.output_len = sizeof(struct core_reloc_existence_output),
},
-
- FIELD_EXISTS_ERR_CASE(existence__err_int_sz),
- FIELD_EXISTS_ERR_CASE(existence__err_int_type),
- FIELD_EXISTS_ERR_CASE(existence__err_int_kind),
- FIELD_EXISTS_ERR_CASE(existence__err_arr_kind),
- FIELD_EXISTS_ERR_CASE(existence__err_arr_value_type),
- FIELD_EXISTS_ERR_CASE(existence__err_struct_type),
+ {
+ FIELD_EXISTS_CASE_COMMON(existence___wrong_field_defs),
+ .input = STRUCT_TO_CHAR_PTR(core_reloc_existence___wrong_field_defs) {
+ },
+ .input_len = sizeof(struct core_reloc_existence___wrong_field_defs),
+ .output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
+ .a_exists = 0,
+ .b_exists = 0,
+ .c_exists = 0,
+ .arr_exists = 0,
+ .s_exists = 0,
+ .a_value = 0xff000001u,
+ .b_value = 0xff000002u,
+ .c_value = 0xff000003u,
+ .arr_value = 0xff000004u,
+ .s_value = 0xff000005u,
+ },
+ .output_len = sizeof(struct core_reloc_existence_output),
+ },
/* bitfield relocation checks */
BITFIELDS_CASE(bitfields, {
@@ -857,13 +864,20 @@ void test_core_reloc(void)
"prog '%s' not found\n", probe_name))
goto cleanup;
+
+ if (test_case->btf_src_file) {
+ err = access(test_case->btf_src_file, R_OK);
+ if (!ASSERT_OK(err, "btf_src_file"))
+ goto cleanup;
+ }
+
load_attr.obj = obj;
load_attr.log_level = 0;
load_attr.target_btf_path = test_case->btf_src_file;
err = bpf_object__load_xattr(&load_attr);
if (err) {
if (!test_case->fails)
- CHECK(false, "obj_load", "failed to load prog '%s': %d\n", probe_name, err);
+ ASSERT_OK(err, "obj_load");
goto cleanup;
}
@@ -902,10 +916,8 @@ void test_core_reloc(void)
goto cleanup;
}
- if (test_case->fails) {
- CHECK(false, "obj_load_fail", "should fail to load prog '%s'\n", probe_name);
+ if (!ASSERT_FALSE(test_case->fails, "obj_load_should_fail"))
goto cleanup;
- }
equal = memcmp(data->out, test_case->output,
test_case->output_len) == 0;
diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
index 04ebbf1cb390..7cb111b11995 100644
--- a/tools/testing/selftests/bpf/prog_tests/fentry_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
@@ -3,35 +3,57 @@
#include <test_progs.h>
#include "fentry_test.skel.h"
-void test_fentry_test(void)
+static int fentry_test(struct fentry_test *fentry_skel)
{
- struct fentry_test *fentry_skel = NULL;
int err, prog_fd, i;
__u32 duration = 0, retval;
+ struct bpf_link *link;
__u64 *result;
- fentry_skel = fentry_test__open_and_load();
- if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n"))
- goto cleanup;
-
err = fentry_test__attach(fentry_skel);
- if (CHECK(err, "fentry_attach", "fentry attach failed: %d\n", err))
- goto cleanup;
+ if (!ASSERT_OK(err, "fentry_attach"))
+ return err;
+
+ /* Check that already linked program can't be attached again. */
+ link = bpf_program__attach(fentry_skel->progs.test1);
+ if (!ASSERT_ERR_PTR(link, "fentry_attach_link"))
+ return -1;
prog_fd = bpf_program__fd(fentry_skel->progs.test1);
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
- CHECK(err || retval, "test_run",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(retval, 0, "test_run");
result = (__u64 *)fentry_skel->bss;
- for (i = 0; i < 6; i++) {
- if (CHECK(result[i] != 1, "result",
- "fentry_test%d failed err %lld\n", i + 1, result[i]))
- goto cleanup;
+ for (i = 0; i < sizeof(*fentry_skel->bss) / sizeof(__u64); i++) {
+ if (!ASSERT_EQ(result[i], 1, "fentry_result"))
+ return -1;
}
+ fentry_test__detach(fentry_skel);
+
+ /* zero results for re-attach test */
+ memset(fentry_skel->bss, 0, sizeof(*fentry_skel->bss));
+ return 0;
+}
+
+void test_fentry_test(void)
+{
+ struct fentry_test *fentry_skel = NULL;
+ int err;
+
+ fentry_skel = fentry_test__open_and_load();
+ if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_load"))
+ goto cleanup;
+
+ err = fentry_test(fentry_skel);
+ if (!ASSERT_OK(err, "fentry_first_attach"))
+ goto cleanup;
+
+ err = fentry_test(fentry_skel);
+ ASSERT_OK(err, "fentry_second_attach");
+
cleanup:
fentry_test__destroy(fentry_skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
index 5c0448910426..63990842d20f 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
@@ -58,42 +58,73 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
test_cb cb)
{
struct bpf_object *obj = NULL, *tgt_obj;
+ __u32 retval, tgt_prog_id, info_len;
+ struct bpf_prog_info prog_info = {};
struct bpf_program **prog = NULL;
struct bpf_link **link = NULL;
- __u32 duration = 0, retval;
int err, tgt_fd, i;
+ struct btf *btf;
err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
&tgt_obj, &tgt_fd);
- if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n",
- target_obj_file, err, errno))
+ if (!ASSERT_OK(err, "tgt_prog_load"))
return;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
.attach_prog_fd = tgt_fd,
);
+ info_len = sizeof(prog_info);
+ err = bpf_obj_get_info_by_fd(tgt_fd, &prog_info, &info_len);
+ if (!ASSERT_OK(err, "tgt_fd_get_info"))
+ goto close_prog;
+
+ tgt_prog_id = prog_info.id;
+ btf = bpf_object__btf(tgt_obj);
+
link = calloc(sizeof(struct bpf_link *), prog_cnt);
+ if (!ASSERT_OK_PTR(link, "link_ptr"))
+ goto close_prog;
+
prog = calloc(sizeof(struct bpf_program *), prog_cnt);
- if (CHECK(!link || !prog, "alloc_memory", "failed to alloc memory"))
+ if (!ASSERT_OK_PTR(prog, "prog_ptr"))
goto close_prog;
obj = bpf_object__open_file(obj_file, &opts);
- if (CHECK(IS_ERR_OR_NULL(obj), "obj_open",
- "failed to open %s: %ld\n", obj_file,
- PTR_ERR(obj)))
+ if (!ASSERT_OK_PTR(obj, "obj_open"))
goto close_prog;
err = bpf_object__load(obj);
- if (CHECK(err, "obj_load", "err %d\n", err))
+ if (!ASSERT_OK(err, "obj_load"))
goto close_prog;
for (i = 0; i < prog_cnt; i++) {
+ struct bpf_link_info link_info;
+ char *tgt_name;
+ __s32 btf_id;
+
+ tgt_name = strstr(prog_name[i], "/");
+ if (!ASSERT_OK_PTR(tgt_name, "tgt_name"))
+ goto close_prog;
+ btf_id = btf__find_by_name_kind(btf, tgt_name + 1, BTF_KIND_FUNC);
+
prog[i] = bpf_object__find_program_by_title(obj, prog_name[i]);
- if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name[i]))
+ if (!ASSERT_OK_PTR(prog[i], prog_name[i]))
goto close_prog;
+
link[i] = bpf_program__attach_trace(prog[i]);
- if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
+ if (!ASSERT_OK_PTR(link[i], "attach_trace"))
goto close_prog;
+
+ info_len = sizeof(link_info);
+ memset(&link_info, 0, sizeof(link_info));
+ err = bpf_obj_get_info_by_fd(bpf_link__fd(link[i]),
+ &link_info, &info_len);
+ ASSERT_OK(err, "link_fd_get_info");
+ ASSERT_EQ(link_info.tracing.attach_type,
+ bpf_program__get_expected_attach_type(prog[i]),
+ "link_attach_type");
+ ASSERT_EQ(link_info.tracing.target_obj_id, tgt_prog_id, "link_tgt_obj_id");
+ ASSERT_EQ(link_info.tracing.target_btf_id, btf_id, "link_tgt_btf_id");
}
if (cb) {
@@ -106,10 +137,9 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
goto close_prog;
err = bpf_prog_test_run(tgt_fd, 1, &pkt_v6, sizeof(pkt_v6),
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "ipv6",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
+ NULL, NULL, &retval, NULL);
+ ASSERT_OK(err, "prog_run");
+ ASSERT_EQ(retval, 0, "prog_run_ret");
if (check_data_map(obj, prog_cnt, false))
goto close_prog;
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
index 6c4d42a2386f..ccc7e8a34ab6 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
@@ -39,7 +39,7 @@ void test_fexit_sleep(void)
goto cleanup;
cpid = clone(do_sleep, child_stack + STACK_SIZE, CLONE_FILES | SIGCHLD, fexit_skel);
- if (CHECK(cpid == -1, "clone", strerror(errno)))
+ if (CHECK(cpid == -1, "clone", "%s\n", strerror(errno)))
goto cleanup;
/* wait until first sys_nanosleep ends and second sys_nanosleep starts */
@@ -65,7 +65,7 @@ void test_fexit_sleep(void)
/* kill the thread to unwind sys_nanosleep stack through the trampoline */
kill(cpid, 9);
- if (CHECK(waitpid(cpid, &wstatus, 0) == -1, "waitpid", strerror(errno)))
+ if (CHECK(waitpid(cpid, &wstatus, 0) == -1, "waitpid", "%s\n", strerror(errno)))
goto cleanup;
if (CHECK(WEXITSTATUS(wstatus) != 0, "exitstatus", "failed"))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c
index 78d7a2765c27..6792e41f7f69 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c
@@ -3,35 +3,57 @@
#include <test_progs.h>
#include "fexit_test.skel.h"
-void test_fexit_test(void)
+static int fexit_test(struct fexit_test *fexit_skel)
{
- struct fexit_test *fexit_skel = NULL;
int err, prog_fd, i;
__u32 duration = 0, retval;
+ struct bpf_link *link;
__u64 *result;
- fexit_skel = fexit_test__open_and_load();
- if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n"))
- goto cleanup;
-
err = fexit_test__attach(fexit_skel);
- if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
- goto cleanup;
+ if (!ASSERT_OK(err, "fexit_attach"))
+ return err;
+
+ /* Check that already linked program can't be attached again. */
+ link = bpf_program__attach(fexit_skel->progs.test1);
+ if (!ASSERT_ERR_PTR(link, "fexit_attach_link"))
+ return -1;
prog_fd = bpf_program__fd(fexit_skel->progs.test1);
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
- CHECK(err || retval, "test_run",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(retval, 0, "test_run");
result = (__u64 *)fexit_skel->bss;
- for (i = 0; i < 6; i++) {
- if (CHECK(result[i] != 1, "result",
- "fexit_test%d failed err %lld\n", i + 1, result[i]))
- goto cleanup;
+ for (i = 0; i < sizeof(*fexit_skel->bss) / sizeof(__u64); i++) {
+ if (!ASSERT_EQ(result[i], 1, "fexit_result"))
+ return -1;
}
+ fexit_test__detach(fexit_skel);
+
+ /* zero results for re-attach test */
+ memset(fexit_skel->bss, 0, sizeof(*fexit_skel->bss));
+ return 0;
+}
+
+void test_fexit_test(void)
+{
+ struct fexit_test *fexit_skel = NULL;
+ int err;
+
+ fexit_skel = fexit_test__open_and_load();
+ if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_load"))
+ goto cleanup;
+
+ err = fexit_test(fexit_skel);
+ if (!ASSERT_OK(err, "fexit_first_attach"))
+ goto cleanup;
+
+ err = fexit_test(fexit_skel);
+ ASSERT_OK(err, "fexit_second_attach");
+
cleanup:
fexit_test__destroy(fexit_skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/for_each.c b/tools/testing/selftests/bpf/prog_tests/for_each.c
new file mode 100644
index 000000000000..68eb12a287d4
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/for_each.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "for_each_hash_map_elem.skel.h"
+#include "for_each_array_map_elem.skel.h"
+
+static unsigned int duration;
+
+static void test_hash_map(void)
+{
+ int i, err, hashmap_fd, max_entries, percpu_map_fd;
+ struct for_each_hash_map_elem *skel;
+ __u64 *percpu_valbuf = NULL;
+ __u32 key, num_cpus, retval;
+ __u64 val;
+
+ skel = for_each_hash_map_elem__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load"))
+ return;
+
+ hashmap_fd = bpf_map__fd(skel->maps.hashmap);
+ max_entries = bpf_map__max_entries(skel->maps.hashmap);
+ for (i = 0; i < max_entries; i++) {
+ key = i;
+ val = i + 1;
+ err = bpf_map_update_elem(hashmap_fd, &key, &val, BPF_ANY);
+ if (!ASSERT_OK(err, "map_update"))
+ goto out;
+ }
+
+ num_cpus = bpf_num_possible_cpus();
+ percpu_map_fd = bpf_map__fd(skel->maps.percpu_map);
+ percpu_valbuf = malloc(sizeof(__u64) * num_cpus);
+ if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
+ goto out;
+
+ key = 1;
+ for (i = 0; i < num_cpus; i++)
+ percpu_valbuf[i] = i + 1;
+ err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY);
+ if (!ASSERT_OK(err, "percpu_map_update"))
+ goto out;
+
+ err = bpf_prog_test_run(bpf_program__fd(skel->progs.test_pkt_access),
+ 1, &pkt_v4, sizeof(pkt_v4), NULL, NULL,
+ &retval, &duration);
+ if (CHECK(err || retval, "ipv4", "err %d errno %d retval %d\n",
+ err, errno, retval))
+ goto out;
+
+ ASSERT_EQ(skel->bss->hashmap_output, 4, "hashmap_output");
+ ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems");
+
+ key = 1;
+ err = bpf_map_lookup_elem(hashmap_fd, &key, &val);
+ ASSERT_ERR(err, "hashmap_lookup");
+
+ ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
+ ASSERT_LT(skel->bss->cpu, num_cpus, "num_cpus");
+ ASSERT_EQ(skel->bss->percpu_map_elems, 1, "percpu_map_elems");
+ ASSERT_EQ(skel->bss->percpu_key, 1, "percpu_key");
+ ASSERT_EQ(skel->bss->percpu_val, skel->bss->cpu + 1, "percpu_val");
+ ASSERT_EQ(skel->bss->percpu_output, 100, "percpu_output");
+out:
+ free(percpu_valbuf);
+ for_each_hash_map_elem__destroy(skel);
+}
+
+static void test_array_map(void)
+{
+ __u32 key, num_cpus, max_entries, retval;
+ int i, arraymap_fd, percpu_map_fd, err;
+ struct for_each_array_map_elem *skel;
+ __u64 *percpu_valbuf = NULL;
+ __u64 val, expected_total;
+
+ skel = for_each_array_map_elem__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load"))
+ return;
+
+ arraymap_fd = bpf_map__fd(skel->maps.arraymap);
+ expected_total = 0;
+ max_entries = bpf_map__max_entries(skel->maps.arraymap);
+ for (i = 0; i < max_entries; i++) {
+ key = i;
+ val = i + 1;
+ /* skip the last iteration for expected total */
+ if (i != max_entries - 1)
+ expected_total += val;
+ err = bpf_map_update_elem(arraymap_fd, &key, &val, BPF_ANY);
+ if (!ASSERT_OK(err, "map_update"))
+ goto out;
+ }
+
+ num_cpus = bpf_num_possible_cpus();
+ percpu_map_fd = bpf_map__fd(skel->maps.percpu_map);
+ percpu_valbuf = malloc(sizeof(__u64) * num_cpus);
+ if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
+ goto out;
+
+ key = 0;
+ for (i = 0; i < num_cpus; i++)
+ percpu_valbuf[i] = i + 1;
+ err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY);
+ if (!ASSERT_OK(err, "percpu_map_update"))
+ goto out;
+
+ err = bpf_prog_test_run(bpf_program__fd(skel->progs.test_pkt_access),
+ 1, &pkt_v4, sizeof(pkt_v4), NULL, NULL,
+ &retval, &duration);
+ if (CHECK(err || retval, "ipv4", "err %d errno %d retval %d\n",
+ err, errno, retval))
+ goto out;
+
+ ASSERT_EQ(skel->bss->arraymap_output, expected_total, "array_output");
+ ASSERT_EQ(skel->bss->cpu + 1, skel->bss->percpu_val, "percpu_val");
+
+out:
+ free(percpu_valbuf);
+ for_each_array_map_elem__destroy(skel);
+}
+
+void test_for_each(void)
+{
+ if (test__start_subtest("hash_map"))
+ test_hash_map();
+ if (test__start_subtest("array_map"))
+ test_array_map();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
index 42c3a3103c26..d65107919998 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
@@ -134,7 +134,7 @@ void test_kfree_skb(void)
/* make sure kfree_skb program was triggered
* and it sent expected skb into ring buffer
*/
- CHECK_FAIL(!passed);
+ ASSERT_TRUE(passed, "passed");
err = bpf_map_lookup_elem(bpf_map__fd(global_data), &zero, test_ok);
if (CHECK(err, "get_result",
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
new file mode 100644
index 000000000000..7fc0951ee75f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "kfunc_call_test.skel.h"
+#include "kfunc_call_test_subprog.skel.h"
+
+static void test_main(void)
+{
+ struct kfunc_call_test *skel;
+ int prog_fd, retval, err;
+
+ skel = kfunc_call_test__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel"))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.kfunc_call_test1);
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+ NULL, NULL, (__u32 *)&retval, NULL);
+ ASSERT_OK(err, "bpf_prog_test_run(test1)");
+ ASSERT_EQ(retval, 12, "test1-retval");
+
+ prog_fd = bpf_program__fd(skel->progs.kfunc_call_test2);
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+ NULL, NULL, (__u32 *)&retval, NULL);
+ ASSERT_OK(err, "bpf_prog_test_run(test2)");
+ ASSERT_EQ(retval, 3, "test2-retval");
+
+ kfunc_call_test__destroy(skel);
+}
+
+static void test_subprog(void)
+{
+ struct kfunc_call_test_subprog *skel;
+ int prog_fd, retval, err;
+
+ skel = kfunc_call_test_subprog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel"))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.kfunc_call_test1);
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+ NULL, NULL, (__u32 *)&retval, NULL);
+ ASSERT_OK(err, "bpf_prog_test_run(test1)");
+ ASSERT_EQ(retval, 10, "test1-retval");
+ ASSERT_NEQ(skel->data->active_res, -1, "active_res");
+ ASSERT_EQ(skel->data->sk_state, BPF_TCP_CLOSE, "sk_state");
+
+ kfunc_call_test_subprog__destroy(skel);
+}
+
+void test_kfunc_call(void)
+{
+ if (test__start_subtest("main"))
+ test_main();
+
+ if (test__start_subtest("subprog"))
+ test_subprog();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
new file mode 100644
index 000000000000..e9916f2817ec
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <test_progs.h>
+#include <sys/syscall.h>
+#include "linked_funcs.skel.h"
+
+void test_linked_funcs(void)
+{
+ int err;
+ struct linked_funcs *skel;
+
+ skel = linked_funcs__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->rodata->my_tid = syscall(SYS_gettid);
+ skel->bss->syscall_id = SYS_getpgid;
+
+ err = linked_funcs__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ err = linked_funcs__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* trigger */
+ syscall(SYS_getpgid);
+
+ ASSERT_EQ(skel->bss->output_val1, 2000 + 2000, "output_val1");
+ ASSERT_EQ(skel->bss->output_ctx1, SYS_getpgid, "output_ctx1");
+ ASSERT_EQ(skel->bss->output_weak1, 42, "output_weak1");
+
+ ASSERT_EQ(skel->bss->output_val2, 2 * 1000 + 2 * (2 * 1000), "output_val2");
+ ASSERT_EQ(skel->bss->output_ctx2, SYS_getpgid, "output_ctx2");
+ /* output_weak2 should never be updated */
+ ASSERT_EQ(skel->bss->output_weak2, 0, "output_weak2");
+
+cleanup:
+ linked_funcs__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_maps.c b/tools/testing/selftests/bpf/prog_tests/linked_maps.c
new file mode 100644
index 000000000000..85dcaaaf2775
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/linked_maps.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <test_progs.h>
+#include <sys/syscall.h>
+#include "linked_maps.skel.h"
+
+void test_linked_maps(void)
+{
+ int err;
+ struct linked_maps *skel;
+
+ skel = linked_maps__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ err = linked_maps__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* trigger */
+ syscall(SYS_getpgid);
+
+ ASSERT_EQ(skel->bss->output_first1, 2000, "output_first1");
+ ASSERT_EQ(skel->bss->output_second1, 2, "output_second1");
+ ASSERT_EQ(skel->bss->output_weak1, 2, "output_weak1");
+
+cleanup:
+ linked_maps__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_vars.c b/tools/testing/selftests/bpf/prog_tests/linked_vars.c
new file mode 100644
index 000000000000..267166abe4c1
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/linked_vars.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <test_progs.h>
+#include <sys/syscall.h>
+#include "linked_vars.skel.h"
+
+void test_linked_vars(void)
+{
+ int err;
+ struct linked_vars *skel;
+
+ skel = linked_vars__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->bss->input_bss1 = 1000;
+ skel->bss->input_bss2 = 2000;
+ skel->bss->input_bss_weak = 3000;
+
+ err = linked_vars__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ err = linked_vars__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* trigger */
+ syscall(SYS_getpgid);
+
+ ASSERT_EQ(skel->bss->output_bss1, 1000 + 2000 + 3000, "output_bss1");
+ ASSERT_EQ(skel->bss->output_bss2, 1000 + 2000 + 3000, "output_bss2");
+ /* 10 comes from "winner" input_data_weak in first obj file */
+ ASSERT_EQ(skel->bss->output_data1, 1 + 2 + 10, "output_bss1");
+ ASSERT_EQ(skel->bss->output_data2, 1 + 2 + 10, "output_bss2");
+ /* 100 comes from "winner" input_rodata_weak in first obj file */
+ ASSERT_EQ(skel->bss->output_rodata1, 11 + 22 + 100, "output_weak1");
+ ASSERT_EQ(skel->bss->output_rodata2, 11 + 22 + 100, "output_weak2");
+
+cleanup:
+ linked_vars__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/map_ptr.c b/tools/testing/selftests/bpf/prog_tests/map_ptr.c
index c230a573c373..4972f92205c7 100644
--- a/tools/testing/selftests/bpf/prog_tests/map_ptr.c
+++ b/tools/testing/selftests/bpf/prog_tests/map_ptr.c
@@ -12,11 +12,22 @@ void test_map_ptr(void)
__u32 duration = 0, retval;
char buf[128];
int err;
+ int page_size = getpagesize();
- skel = map_ptr_kern__open_and_load();
- if (CHECK(!skel, "skel_open_load", "open_load failed\n"))
+ skel = map_ptr_kern__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
+ err = bpf_map__set_max_entries(skel->maps.m_ringbuf, page_size);
+ if (!ASSERT_OK(err, "bpf_map__set_max_entries"))
+ goto cleanup;
+
+ err = map_ptr_kern__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ skel->bss->page_size = page_size;
+
err = bpf_prog_test_run(bpf_program__fd(skel->progs.cg_skb), 1, &pkt_v4,
sizeof(pkt_v4), buf, NULL, &retval, NULL);
diff --git a/tools/testing/selftests/bpf/prog_tests/mmap.c b/tools/testing/selftests/bpf/prog_tests/mmap.c
index 9c3c5c0f068f..37b002ca1167 100644
--- a/tools/testing/selftests/bpf/prog_tests/mmap.c
+++ b/tools/testing/selftests/bpf/prog_tests/mmap.c
@@ -29,22 +29,36 @@ void test_mmap(void)
struct test_mmap *skel;
__u64 val = 0;
- skel = test_mmap__open_and_load();
- if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
+ skel = test_mmap__open();
+ if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
+ err = bpf_map__set_max_entries(skel->maps.rdonly_map, page_size);
+ if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
+ goto cleanup;
+
+ /* at least 4 pages of data */
+ err = bpf_map__set_max_entries(skel->maps.data_map,
+ 4 * (page_size / sizeof(u64)));
+ if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
+ goto cleanup;
+
+ err = test_mmap__load(skel);
+ if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
+ goto cleanup;
+
bss_map = skel->maps.bss;
data_map = skel->maps.data_map;
data_map_fd = bpf_map__fd(data_map);
rdmap_fd = bpf_map__fd(skel->maps.rdonly_map);
- tmp1 = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0);
+ tmp1 = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0);
if (CHECK(tmp1 != MAP_FAILED, "rdonly_write_mmap", "unexpected success\n")) {
- munmap(tmp1, 4096);
+ munmap(tmp1, page_size);
goto cleanup;
}
/* now double-check if it's mmap()'able at all */
- tmp1 = mmap(NULL, 4096, PROT_READ, MAP_SHARED, rdmap_fd, 0);
+ tmp1 = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rdmap_fd, 0);
if (CHECK(tmp1 == MAP_FAILED, "rdonly_read_mmap", "failed: %d\n", errno))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/module_attach.c b/tools/testing/selftests/bpf/prog_tests/module_attach.c
index 5bc53d53d86e..d85a69b7ce44 100644
--- a/tools/testing/selftests/bpf/prog_tests/module_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/module_attach.c
@@ -45,12 +45,18 @@ static int trigger_module_test_write(int write_sz)
return 0;
}
+static int delete_module(const char *name, int flags)
+{
+ return syscall(__NR_delete_module, name, flags);
+}
+
void test_module_attach(void)
{
const int READ_SZ = 456;
const int WRITE_SZ = 457;
struct test_module_attach* skel;
struct test_module_attach__bss *bss;
+ struct bpf_link *link;
int err;
skel = test_module_attach__open();
@@ -84,6 +90,23 @@ void test_module_attach(void)
ASSERT_EQ(bss->fexit_ret, -EIO, "fexit_tet");
ASSERT_EQ(bss->fmod_ret_read_sz, READ_SZ, "fmod_ret");
+ test_module_attach__detach(skel);
+
+ /* attach fentry/fexit and make sure it get's module reference */
+ link = bpf_program__attach(skel->progs.handle_fentry);
+ if (!ASSERT_OK_PTR(link, "attach_fentry"))
+ goto cleanup;
+
+ ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module");
+ bpf_link__destroy(link);
+
+ link = bpf_program__attach(skel->progs.handle_fexit);
+ if (!ASSERT_OK_PTR(link, "attach_fexit"))
+ goto cleanup;
+
+ ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module");
+ bpf_link__destroy(link);
+
cleanup:
test_module_attach__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
index 31a3114906e2..2535788e135f 100644
--- a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
+++ b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
@@ -68,10 +68,10 @@ static void test_ns_current_pid_tgid_new_ns(void)
cpid = clone(test_current_pid_tgid, child_stack + STACK_SIZE,
CLONE_NEWPID | SIGCHLD, NULL);
- if (CHECK(cpid == -1, "clone", strerror(errno)))
+ if (CHECK(cpid == -1, "clone", "%s\n", strerror(errno)))
return;
- if (CHECK(waitpid(cpid, &wstatus, 0) == -1, "waitpid", strerror(errno)))
+ if (CHECK(waitpid(cpid, &wstatus, 0) == -1, "waitpid", "%s\n", strerror(errno)))
return;
if (CHECK(WEXITSTATUS(wstatus) != 0, "newns_pidtgid", "failed"))
diff --git a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
index 935a294f049a..131d7f7eeb42 100644
--- a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
+++ b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
@@ -2,12 +2,31 @@
#include <test_progs.h>
#include <network_helpers.h>
-void test_prog_run_xattr(void)
+#include "test_pkt_access.skel.h"
+
+static const __u32 duration;
+
+static void check_run_cnt(int prog_fd, __u64 run_cnt)
{
- const char *file = "./test_pkt_access.o";
- struct bpf_object *obj;
- char buf[10];
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
int err;
+
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (CHECK(err, "get_prog_info", "failed to get bpf_prog_info for fd %d\n", prog_fd))
+ return;
+
+ CHECK(run_cnt != info.run_cnt, "run_cnt",
+ "incorrect number of repetitions, want %llu have %llu\n", run_cnt, info.run_cnt);
+}
+
+void test_prog_run_xattr(void)
+{
+ struct test_pkt_access *skel;
+ int err, stats_fd = -1;
+ char buf[10] = {};
+ __u64 run_cnt = 0;
+
struct bpf_prog_test_run_attr tattr = {
.repeat = 1,
.data_in = &pkt_v4,
@@ -16,12 +35,15 @@ void test_prog_run_xattr(void)
.data_size_out = 5,
};
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj,
- &tattr.prog_fd);
- if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
+ stats_fd = bpf_enable_stats(BPF_STATS_RUN_TIME);
+ if (CHECK_ATTR(stats_fd < 0, "enable_stats", "failed %d\n", errno))
return;
- memset(buf, 0, sizeof(buf));
+ skel = test_pkt_access__open_and_load();
+ if (CHECK_ATTR(!skel, "open_and_load", "failed\n"))
+ goto cleanup;
+
+ tattr.prog_fd = bpf_program__fd(skel->progs.test_pkt_access);
err = bpf_prog_test_run_xattr(&tattr);
CHECK_ATTR(err != -1 || errno != ENOSPC || tattr.retval, "run",
@@ -34,8 +56,12 @@ void test_prog_run_xattr(void)
CHECK_ATTR(buf[5] != 0, "overflow",
"BPF_PROG_TEST_RUN ignored size hint\n");
+ run_cnt += tattr.repeat;
+ check_run_cnt(tattr.prog_fd, run_cnt);
+
tattr.data_out = NULL;
tattr.data_size_out = 0;
+ tattr.repeat = 2;
errno = 0;
err = bpf_prog_test_run_xattr(&tattr);
@@ -46,5 +72,12 @@ void test_prog_run_xattr(void)
err = bpf_prog_test_run_xattr(&tattr);
CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err);
- bpf_object__close(obj);
+ run_cnt += tattr.repeat;
+ check_run_cnt(tattr.prog_fd, run_cnt);
+
+cleanup:
+ if (skel)
+ test_pkt_access__destroy(skel);
+ if (stats_fd != -1)
+ close(stats_fd);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
index 6ace5e9efec1..d3c2de2c24d1 100644
--- a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
+++ b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
@@ -160,11 +160,8 @@ int test_resolve_btfids(void)
break;
if (i > 0) {
- ret = CHECK(test_set.ids[i - 1] > test_set.ids[i],
- "sort_check",
- "test_set is not sorted\n");
- if (ret)
- break;
+ if (!ASSERT_LE(test_set.ids[i - 1], test_set.ids[i], "sort_check"))
+ return -1;
}
}
diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
index fddbc5db5d6a..de78617f6550 100644
--- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c
+++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
@@ -87,11 +87,20 @@ void test_ringbuf(void)
pthread_t thread;
long bg_ret = -1;
int err, cnt;
+ int page_size = getpagesize();
- skel = test_ringbuf__open_and_load();
- if (CHECK(!skel, "skel_open_load", "skeleton open&load failed\n"))
+ skel = test_ringbuf__open();
+ if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
+ err = bpf_map__set_max_entries(skel->maps.ringbuf, page_size);
+ if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
+ goto cleanup;
+
+ err = test_ringbuf__load(skel);
+ if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
+ goto cleanup;
+
/* only trigger BPF program for current process */
skel->bss->pid = getpid();
@@ -110,9 +119,9 @@ void test_ringbuf(void)
CHECK(skel->bss->avail_data != 3 * rec_sz,
"err_avail_size", "exp %ld, got %ld\n",
3L * rec_sz, skel->bss->avail_data);
- CHECK(skel->bss->ring_size != 4096,
+ CHECK(skel->bss->ring_size != page_size,
"err_ring_size", "exp %ld, got %ld\n",
- 4096L, skel->bss->ring_size);
+ (long)page_size, skel->bss->ring_size);
CHECK(skel->bss->cons_pos != 0,
"err_cons_pos", "exp %ld, got %ld\n",
0L, skel->bss->cons_pos);
diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
index d37161e59bb2..cef63e703924 100644
--- a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
@@ -41,13 +41,42 @@ static int process_sample(void *ctx, void *data, size_t len)
void test_ringbuf_multi(void)
{
struct test_ringbuf_multi *skel;
- struct ring_buffer *ringbuf;
+ struct ring_buffer *ringbuf = NULL;
int err;
+ int page_size = getpagesize();
+ int proto_fd = -1;
- skel = test_ringbuf_multi__open_and_load();
- if (CHECK(!skel, "skel_open_load", "skeleton open&load failed\n"))
+ skel = test_ringbuf_multi__open();
+ if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
+ err = bpf_map__set_max_entries(skel->maps.ringbuf1, page_size);
+ if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
+ goto cleanup;
+
+ err = bpf_map__set_max_entries(skel->maps.ringbuf2, page_size);
+ if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
+ goto cleanup;
+
+ err = bpf_map__set_max_entries(bpf_map__inner_map(skel->maps.ringbuf_arr), page_size);
+ if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
+ goto cleanup;
+
+ proto_fd = bpf_create_map(BPF_MAP_TYPE_RINGBUF, 0, 0, page_size, 0);
+ if (CHECK(proto_fd == -1, "bpf_create_map", "bpf_create_map failed\n"))
+ goto cleanup;
+
+ err = bpf_map__set_inner_map_fd(skel->maps.ringbuf_hash, proto_fd);
+ if (CHECK(err != 0, "bpf_map__set_inner_map_fd", "bpf_map__set_inner_map_fd failed\n"))
+ goto cleanup;
+
+ err = test_ringbuf_multi__load(skel);
+ if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
+ goto cleanup;
+
+ close(proto_fd);
+ proto_fd = -1;
+
/* only trigger BPF program for current process */
skel->bss->pid = getpid();
@@ -97,6 +126,8 @@ void test_ringbuf_multi(void)
2L, skel->bss->total);
cleanup:
+ if (proto_fd >= 0)
+ close(proto_fd);
ring_buffer__free(ringbuf);
test_ringbuf_multi__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
index 9ff0412e1fd3..45c82db3c58c 100644
--- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
+++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
@@ -241,6 +241,48 @@ fail:
return -1;
}
+static __u64 socket_cookie(int fd)
+{
+ __u64 cookie;
+ socklen_t cookie_len = sizeof(cookie);
+
+ if (CHECK(getsockopt(fd, SOL_SOCKET, SO_COOKIE, &cookie, &cookie_len) < 0,
+ "getsockopt(SO_COOKIE)", "%s\n", strerror(errno)))
+ return 0;
+ return cookie;
+}
+
+static int fill_sk_lookup_ctx(struct bpf_sk_lookup *ctx, const char *local_ip, __u16 local_port,
+ const char *remote_ip, __u16 remote_port)
+{
+ void *local, *remote;
+ int err;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->local_port = local_port;
+ ctx->remote_port = htons(remote_port);
+
+ if (is_ipv6(local_ip)) {
+ ctx->family = AF_INET6;
+ local = &ctx->local_ip6[0];
+ remote = &ctx->remote_ip6[0];
+ } else {
+ ctx->family = AF_INET;
+ local = &ctx->local_ip4;
+ remote = &ctx->remote_ip4;
+ }
+
+ err = inet_pton(ctx->family, local_ip, local);
+ if (CHECK(err != 1, "inet_pton", "local_ip failed\n"))
+ return 1;
+
+ err = inet_pton(ctx->family, remote_ip, remote);
+ if (CHECK(err != 1, "inet_pton", "remote_ip failed\n"))
+ return 1;
+
+ return 0;
+}
+
static int send_byte(int fd)
{
ssize_t n;
@@ -1009,18 +1051,27 @@ static void test_drop_on_reuseport(struct test_sk_lookup *skel)
static void run_sk_assign(struct test_sk_lookup *skel,
struct bpf_program *lookup_prog,
- const char *listen_ip, const char *connect_ip)
+ const char *remote_ip, const char *local_ip)
{
- int client_fd, peer_fd, server_fds[MAX_SERVERS] = { -1 };
- struct bpf_link *lookup_link;
+ int server_fds[MAX_SERVERS] = { -1 };
+ struct bpf_sk_lookup ctx;
+ __u64 server_cookie;
int i, err;
- lookup_link = attach_lookup_prog(lookup_prog);
- if (!lookup_link)
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .ctx_in = &ctx,
+ .ctx_size_in = sizeof(ctx),
+ .ctx_out = &ctx,
+ .ctx_size_out = sizeof(ctx),
+ );
+
+ if (fill_sk_lookup_ctx(&ctx, local_ip, EXT_PORT, remote_ip, INT_PORT))
return;
+ ctx.protocol = IPPROTO_TCP;
+
for (i = 0; i < ARRAY_SIZE(server_fds); i++) {
- server_fds[i] = make_server(SOCK_STREAM, listen_ip, 0, NULL);
+ server_fds[i] = make_server(SOCK_STREAM, local_ip, 0, NULL);
if (server_fds[i] < 0)
goto close_servers;
@@ -1030,23 +1081,25 @@ static void run_sk_assign(struct test_sk_lookup *skel,
goto close_servers;
}
- client_fd = make_client(SOCK_STREAM, connect_ip, EXT_PORT);
- if (client_fd < 0)
+ server_cookie = socket_cookie(server_fds[SERVER_B]);
+ if (!server_cookie)
+ return;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(lookup_prog), &opts);
+ if (CHECK(err, "test_run", "failed with error %d\n", errno))
+ goto close_servers;
+
+ if (CHECK(ctx.cookie == 0, "ctx.cookie", "no socket selected\n"))
goto close_servers;
- peer_fd = accept(server_fds[SERVER_B], NULL, NULL);
- if (CHECK(peer_fd < 0, "accept", "failed\n"))
- goto close_client;
+ CHECK(ctx.cookie != server_cookie, "ctx.cookie",
+ "selected sk %llu instead of %llu\n", ctx.cookie, server_cookie);
- close(peer_fd);
-close_client:
- close(client_fd);
close_servers:
for (i = 0; i < ARRAY_SIZE(server_fds); i++) {
if (server_fds[i] != -1)
close(server_fds[i]);
}
- bpf_link__destroy(lookup_link);
}
static void run_sk_assign_v4(struct test_sk_lookup *skel,
diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf.c b/tools/testing/selftests/bpf/prog_tests/snprintf.c
new file mode 100644
index 000000000000..a958c22aec75
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/snprintf.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Google LLC. */
+
+#include <test_progs.h>
+#include "test_snprintf.skel.h"
+#include "test_snprintf_single.skel.h"
+
+#define EXP_NUM_OUT "-8 9 96 -424242 1337 DABBAD00"
+#define EXP_NUM_RET sizeof(EXP_NUM_OUT)
+
+#define EXP_IP_OUT "127.000.000.001 0000:0000:0000:0000:0000:0000:0000:0001"
+#define EXP_IP_RET sizeof(EXP_IP_OUT)
+
+/* The third specifier, %pB, depends on compiler inlining so don't check it */
+#define EXP_SYM_OUT "schedule schedule+0x0/"
+#define MIN_SYM_RET sizeof(EXP_SYM_OUT)
+
+/* The third specifier, %p, is a hashed pointer which changes on every reboot */
+#define EXP_ADDR_OUT "0000000000000000 ffff00000add4e55 "
+#define EXP_ADDR_RET sizeof(EXP_ADDR_OUT "unknownhashedptr")
+
+#define EXP_STR_OUT "str1 longstr"
+#define EXP_STR_RET sizeof(EXP_STR_OUT)
+
+#define EXP_OVER_OUT "%over"
+#define EXP_OVER_RET 10
+
+#define EXP_PAD_OUT " 4 000"
+#define EXP_PAD_RET 900007
+
+#define EXP_NO_ARG_OUT "simple case"
+#define EXP_NO_ARG_RET 12
+
+#define EXP_NO_BUF_RET 29
+
+void test_snprintf_positive(void)
+{
+ char exp_addr_out[] = EXP_ADDR_OUT;
+ char exp_sym_out[] = EXP_SYM_OUT;
+ struct test_snprintf *skel;
+
+ skel = test_snprintf__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ if (!ASSERT_OK(test_snprintf__attach(skel), "skel_attach"))
+ goto cleanup;
+
+ /* trigger tracepoint */
+ usleep(1);
+
+ ASSERT_STREQ(skel->bss->num_out, EXP_NUM_OUT, "num_out");
+ ASSERT_EQ(skel->bss->num_ret, EXP_NUM_RET, "num_ret");
+
+ ASSERT_STREQ(skel->bss->ip_out, EXP_IP_OUT, "ip_out");
+ ASSERT_EQ(skel->bss->ip_ret, EXP_IP_RET, "ip_ret");
+
+ ASSERT_OK(memcmp(skel->bss->sym_out, exp_sym_out,
+ sizeof(exp_sym_out) - 1), "sym_out");
+ ASSERT_LT(MIN_SYM_RET, skel->bss->sym_ret, "sym_ret");
+
+ ASSERT_OK(memcmp(skel->bss->addr_out, exp_addr_out,
+ sizeof(exp_addr_out) - 1), "addr_out");
+ ASSERT_EQ(skel->bss->addr_ret, EXP_ADDR_RET, "addr_ret");
+
+ ASSERT_STREQ(skel->bss->str_out, EXP_STR_OUT, "str_out");
+ ASSERT_EQ(skel->bss->str_ret, EXP_STR_RET, "str_ret");
+
+ ASSERT_STREQ(skel->bss->over_out, EXP_OVER_OUT, "over_out");
+ ASSERT_EQ(skel->bss->over_ret, EXP_OVER_RET, "over_ret");
+
+ ASSERT_STREQ(skel->bss->pad_out, EXP_PAD_OUT, "pad_out");
+ ASSERT_EQ(skel->bss->pad_ret, EXP_PAD_RET, "pad_ret");
+
+ ASSERT_STREQ(skel->bss->noarg_out, EXP_NO_ARG_OUT, "no_arg_out");
+ ASSERT_EQ(skel->bss->noarg_ret, EXP_NO_ARG_RET, "no_arg_ret");
+
+ ASSERT_EQ(skel->bss->nobuf_ret, EXP_NO_BUF_RET, "no_buf_ret");
+
+cleanup:
+ test_snprintf__destroy(skel);
+}
+
+#define min(a, b) ((a) < (b) ? (a) : (b))
+
+/* Loads an eBPF object calling bpf_snprintf with up to 10 characters of fmt */
+static int load_single_snprintf(char *fmt)
+{
+ struct test_snprintf_single *skel;
+ int ret;
+
+ skel = test_snprintf_single__open();
+ if (!skel)
+ return -EINVAL;
+
+ memcpy(skel->rodata->fmt, fmt, min(strlen(fmt) + 1, 10));
+
+ ret = test_snprintf_single__load(skel);
+ test_snprintf_single__destroy(skel);
+
+ return ret;
+}
+
+void test_snprintf_negative(void)
+{
+ ASSERT_OK(load_single_snprintf("valid %d"), "valid usage");
+
+ ASSERT_ERR(load_single_snprintf("0123456789"), "no terminating zero");
+ ASSERT_ERR(load_single_snprintf("%d %d"), "too many specifiers");
+ ASSERT_ERR(load_single_snprintf("%pi5"), "invalid specifier 1");
+ ASSERT_ERR(load_single_snprintf("%a"), "invalid specifier 2");
+ ASSERT_ERR(load_single_snprintf("%"), "invalid specifier 3");
+ ASSERT_ERR(load_single_snprintf("%12345678"), "invalid specifier 4");
+ ASSERT_ERR(load_single_snprintf("%--------"), "invalid specifier 5");
+ ASSERT_ERR(load_single_snprintf("\x80"), "non ascii character");
+ ASSERT_ERR(load_single_snprintf("\x1"), "non printable character");
+}
+
+void test_snprintf(void)
+{
+ if (test__start_subtest("snprintf_positive"))
+ test_snprintf_positive();
+ if (test__start_subtest("snprintf_negative"))
+ test_snprintf_negative();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c b/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c
index 686b40f11a45..76e1f5fe18fa 100644
--- a/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c
@@ -42,9 +42,7 @@ void test_snprintf_btf(void)
* and it set expected return values from bpf_trace_printk()s
* and all tests ran.
*/
- if (CHECK(bss->ret <= 0,
- "bpf_snprintf_btf: got return value",
- "ret <= 0 %ld test %d\n", bss->ret, bss->ran_subtests))
+ if (!ASSERT_GT(bss->ret, 0, "bpf_snprintf_ret"))
goto cleanup;
if (CHECK(bss->ran_subtests == 0, "check if subtests ran",
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
index b8b48cac2ac3..ab77596b64e3 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
@@ -7,6 +7,7 @@
#include "test_skmsg_load_helpers.skel.h"
#include "test_sockmap_update.skel.h"
#include "test_sockmap_invalid_update.skel.h"
+#include "test_sockmap_skb_verdict_attach.skel.h"
#include "bpf_iter_sockmap.skel.h"
#define TCP_REPAIR 19 /* TCP sock is under repair right now */
@@ -281,6 +282,39 @@ out:
bpf_iter_sockmap__destroy(skel);
}
+static void test_sockmap_skb_verdict_attach(enum bpf_attach_type first,
+ enum bpf_attach_type second)
+{
+ struct test_sockmap_skb_verdict_attach *skel;
+ int err, map, verdict;
+
+ skel = test_sockmap_skb_verdict_attach__open_and_load();
+ if (CHECK_FAIL(!skel)) {
+ perror("test_sockmap_skb_verdict_attach__open_and_load");
+ return;
+ }
+
+ verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
+ map = bpf_map__fd(skel->maps.sock_map);
+
+ err = bpf_prog_attach(verdict, map, first, 0);
+ if (CHECK_FAIL(err)) {
+ perror("bpf_prog_attach");
+ goto out;
+ }
+
+ err = bpf_prog_attach(verdict, map, second, 0);
+ assert(err == -1 && errno == EBUSY);
+
+ err = bpf_prog_detach2(verdict, map, first);
+ if (CHECK_FAIL(err)) {
+ perror("bpf_prog_detach2");
+ goto out;
+ }
+out:
+ test_sockmap_skb_verdict_attach__destroy(skel);
+}
+
void test_sockmap_basic(void)
{
if (test__start_subtest("sockmap create_update_free"))
@@ -301,4 +335,10 @@ void test_sockmap_basic(void)
test_sockmap_copy(BPF_MAP_TYPE_SOCKMAP);
if (test__start_subtest("sockhash copy"))
test_sockmap_copy(BPF_MAP_TYPE_SOCKHASH);
+ if (test__start_subtest("sockmap skb_verdict attach")) {
+ test_sockmap_skb_verdict_attach(BPF_SK_SKB_VERDICT,
+ BPF_SK_SKB_STREAM_VERDICT);
+ test_sockmap_skb_verdict_attach(BPF_SK_SKB_STREAM_VERDICT,
+ BPF_SK_SKB_VERDICT);
+ }
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
index d7d65a700799..648d9ae898d2 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
@@ -1014,8 +1014,8 @@ static void test_skb_redir_to_connected(struct test_sockmap_listen *skel,
struct bpf_map *inner_map, int family,
int sotype)
{
- int verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
- int parser = bpf_program__fd(skel->progs.prog_skb_parser);
+ int verdict = bpf_program__fd(skel->progs.prog_stream_verdict);
+ int parser = bpf_program__fd(skel->progs.prog_stream_parser);
int verdict_map = bpf_map__fd(skel->maps.verdict_map);
int sock_map = bpf_map__fd(inner_map);
int err;
@@ -1125,8 +1125,8 @@ static void test_skb_redir_to_listening(struct test_sockmap_listen *skel,
struct bpf_map *inner_map, int family,
int sotype)
{
- int verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
- int parser = bpf_program__fd(skel->progs.prog_skb_parser);
+ int verdict = bpf_program__fd(skel->progs.prog_stream_verdict);
+ int parser = bpf_program__fd(skel->progs.prog_stream_parser);
int verdict_map = bpf_map__fd(skel->maps.verdict_map);
int sock_map = bpf_map__fd(inner_map);
int err;
@@ -1603,6 +1603,141 @@ static void test_reuseport(struct test_sockmap_listen *skel,
}
}
+static void udp_redir_to_connected(int family, int sotype, int sock_mapfd,
+ int verd_mapfd, enum redir_mode mode)
+{
+ const char *log_prefix = redir_mode_str(mode);
+ struct sockaddr_storage addr;
+ int c0, c1, p0, p1;
+ unsigned int pass;
+ socklen_t len;
+ int err, n;
+ u64 value;
+ u32 key;
+ char b;
+
+ zero_verdict_count(verd_mapfd);
+
+ p0 = socket_loopback(family, sotype | SOCK_NONBLOCK);
+ if (p0 < 0)
+ return;
+ len = sizeof(addr);
+ err = xgetsockname(p0, sockaddr(&addr), &len);
+ if (err)
+ goto close_peer0;
+
+ c0 = xsocket(family, sotype | SOCK_NONBLOCK, 0);
+ if (c0 < 0)
+ goto close_peer0;
+ err = xconnect(c0, sockaddr(&addr), len);
+ if (err)
+ goto close_cli0;
+ err = xgetsockname(c0, sockaddr(&addr), &len);
+ if (err)
+ goto close_cli0;
+ err = xconnect(p0, sockaddr(&addr), len);
+ if (err)
+ goto close_cli0;
+
+ p1 = socket_loopback(family, sotype | SOCK_NONBLOCK);
+ if (p1 < 0)
+ goto close_cli0;
+ err = xgetsockname(p1, sockaddr(&addr), &len);
+ if (err)
+ goto close_cli0;
+
+ c1 = xsocket(family, sotype | SOCK_NONBLOCK, 0);
+ if (c1 < 0)
+ goto close_peer1;
+ err = xconnect(c1, sockaddr(&addr), len);
+ if (err)
+ goto close_cli1;
+ err = xgetsockname(c1, sockaddr(&addr), &len);
+ if (err)
+ goto close_cli1;
+ err = xconnect(p1, sockaddr(&addr), len);
+ if (err)
+ goto close_cli1;
+
+ key = 0;
+ value = p0;
+ err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
+ if (err)
+ goto close_cli1;
+
+ key = 1;
+ value = p1;
+ err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
+ if (err)
+ goto close_cli1;
+
+ n = write(c1, "a", 1);
+ if (n < 0)
+ FAIL_ERRNO("%s: write", log_prefix);
+ if (n == 0)
+ FAIL("%s: incomplete write", log_prefix);
+ if (n < 1)
+ goto close_cli1;
+
+ key = SK_PASS;
+ err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass);
+ if (err)
+ goto close_cli1;
+ if (pass != 1)
+ FAIL("%s: want pass count 1, have %d", log_prefix, pass);
+
+ n = read(mode == REDIR_INGRESS ? p0 : c0, &b, 1);
+ if (n < 0)
+ FAIL_ERRNO("%s: read", log_prefix);
+ if (n == 0)
+ FAIL("%s: incomplete read", log_prefix);
+
+close_cli1:
+ xclose(c1);
+close_peer1:
+ xclose(p1);
+close_cli0:
+ xclose(c0);
+close_peer0:
+ xclose(p0);
+}
+
+static void udp_skb_redir_to_connected(struct test_sockmap_listen *skel,
+ struct bpf_map *inner_map, int family)
+{
+ int verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
+ int verdict_map = bpf_map__fd(skel->maps.verdict_map);
+ int sock_map = bpf_map__fd(inner_map);
+ int err;
+
+ err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0);
+ if (err)
+ return;
+
+ skel->bss->test_ingress = false;
+ udp_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map,
+ REDIR_EGRESS);
+ skel->bss->test_ingress = true;
+ udp_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map,
+ REDIR_INGRESS);
+
+ xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT);
+}
+
+static void test_udp_redir(struct test_sockmap_listen *skel, struct bpf_map *map,
+ int family)
+{
+ const char *family_name, *map_name;
+ char s[MAX_TEST_NAME];
+
+ family_name = family_str(family);
+ map_name = map_type_str(map);
+ snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__);
+ if (!test__start_subtest(s))
+ return;
+ udp_skb_redir_to_connected(skel, map, family);
+}
+
static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map,
int family)
{
@@ -1611,6 +1746,7 @@ static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map,
test_redir(skel, map, family, SOCK_STREAM);
test_reuseport(skel, map, family, SOCK_STREAM);
test_reuseport(skel, map, family, SOCK_DGRAM);
+ test_udp_redir(skel, map, family);
}
void test_sockmap_listen(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
index d5b44b135c00..4b937e5dbaca 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
@@ -3,6 +3,7 @@
#include "cgroup_helpers.h"
#include <linux/tcp.h>
+#include "sockopt_sk.skel.h"
#ifndef SOL_TCP
#define SOL_TCP IPPROTO_TCP
@@ -191,60 +192,30 @@ err:
return -1;
}
-static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title)
+static void run_test(int cgroup_fd)
{
- enum bpf_attach_type attach_type;
- enum bpf_prog_type prog_type;
- struct bpf_program *prog;
- int err;
+ struct sockopt_sk *skel;
- err = libbpf_prog_type_by_name(title, &prog_type, &attach_type);
- if (err) {
- log_err("Failed to deduct types for %s BPF program", title);
- return -1;
- }
+ skel = sockopt_sk__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
- prog = bpf_object__find_program_by_title(obj, title);
- if (!prog) {
- log_err("Failed to find %s BPF program", title);
- return -1;
- }
-
- err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd,
- attach_type, 0);
- if (err) {
- log_err("Failed to attach %s BPF program", title);
- return -1;
- }
-
- return 0;
-}
-
-static void run_test(int cgroup_fd)
-{
- struct bpf_prog_load_attr attr = {
- .file = "./sockopt_sk.o",
- };
- struct bpf_object *obj;
- int ignored;
- int err;
-
- err = bpf_prog_load_xattr(&attr, &obj, &ignored);
- if (CHECK_FAIL(err))
- return;
+ skel->bss->page_size = getpagesize();
- err = prog_attach(obj, cgroup_fd, "cgroup/getsockopt");
- if (CHECK_FAIL(err))
- goto close_bpf_object;
+ skel->links._setsockopt =
+ bpf_program__attach_cgroup(skel->progs._setsockopt, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links._setsockopt, "setsockopt_link"))
+ goto cleanup;
- err = prog_attach(obj, cgroup_fd, "cgroup/setsockopt");
- if (CHECK_FAIL(err))
- goto close_bpf_object;
+ skel->links._getsockopt =
+ bpf_program__attach_cgroup(skel->progs._getsockopt, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links._getsockopt, "getsockopt_link"))
+ goto cleanup;
- CHECK_FAIL(getsetsockopt());
+ ASSERT_OK(getsetsockopt(), "getsetsockopt");
-close_bpf_object:
- bpf_object__close(obj);
+cleanup:
+ sockopt_sk__destroy(skel);
}
void test_sockopt_sk(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/static_linked.c b/tools/testing/selftests/bpf/prog_tests/static_linked.c
new file mode 100644
index 000000000000..46556976dccc
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/static_linked.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+
+#include <test_progs.h>
+#include "test_static_linked.skel.h"
+
+void test_static_linked(void)
+{
+ int err;
+ struct test_static_linked* skel;
+
+ skel = test_static_linked__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->rodata->rovar1 = 1;
+ skel->bss->static_var1 = 2;
+ skel->bss->static_var11 = 3;
+
+ skel->rodata->rovar2 = 4;
+ skel->bss->static_var2 = 5;
+ skel->bss->static_var22 = 6;
+
+ err = test_static_linked__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ err = test_static_linked__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* trigger */
+ usleep(1);
+
+ ASSERT_EQ(skel->bss->var1, 1 * 2 + 2 + 3, "var1");
+ ASSERT_EQ(skel->bss->var2, 4 * 3 + 5 + 6, "var2");
+
+cleanup:
+ test_static_linked__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
new file mode 100644
index 000000000000..035c263aab1b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#define _GNU_SOURCE /* See feature_test_macros(7) */
+#include <unistd.h>
+#include <sys/syscall.h> /* For SYS_xxx definitions */
+#include <sys/types.h>
+#include <test_progs.h>
+#include "task_local_storage.skel.h"
+#include "task_local_storage_exit_creds.skel.h"
+#include "task_ls_recursion.skel.h"
+
+static void test_sys_enter_exit(void)
+{
+ struct task_local_storage *skel;
+ int err;
+
+ skel = task_local_storage__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ skel->bss->target_pid = syscall(SYS_gettid);
+
+ err = task_local_storage__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ syscall(SYS_gettid);
+ syscall(SYS_gettid);
+
+ /* 3x syscalls: 1x attach and 2x gettid */
+ ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt");
+ ASSERT_EQ(skel->bss->exit_cnt, 3, "exit_cnt");
+ ASSERT_EQ(skel->bss->mismatch_cnt, 0, "mismatch_cnt");
+out:
+ task_local_storage__destroy(skel);
+}
+
+static void test_exit_creds(void)
+{
+ struct task_local_storage_exit_creds *skel;
+ int err;
+
+ skel = task_local_storage_exit_creds__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ err = task_local_storage_exit_creds__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ /* trigger at least one exit_creds() */
+ if (CHECK_FAIL(system("ls > /dev/null")))
+ goto out;
+
+ /* sync rcu to make sure exit_creds() is called for "ls" */
+ kern_sync_rcu();
+ ASSERT_EQ(skel->bss->valid_ptr_count, 0, "valid_ptr_count");
+ ASSERT_NEQ(skel->bss->null_ptr_count, 0, "null_ptr_count");
+out:
+ task_local_storage_exit_creds__destroy(skel);
+}
+
+static void test_recursion(void)
+{
+ struct task_ls_recursion *skel;
+ int err;
+
+ skel = task_ls_recursion__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ err = task_ls_recursion__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ /* trigger sys_enter, make sure it does not cause deadlock */
+ syscall(SYS_gettid);
+
+out:
+ task_ls_recursion__destroy(skel);
+}
+
+void test_task_local_storage(void)
+{
+ if (test__start_subtest("sys_enter_exit"))
+ test_sys_enter_exit();
+ if (test__start_subtest("exit_creds"))
+ test_exit_creds();
+ if (test__start_subtest("recursion"))
+ test_recursion();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_ima.c b/tools/testing/selftests/bpf/prog_tests/test_ima.c
index b54bc0c351b7..0252f61d611a 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_ima.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_ima.c
@@ -68,7 +68,8 @@ void test_test_ima(void)
goto close_prog;
snprintf(cmd, sizeof(cmd), "./ima_setup.sh setup %s", measured_dir);
- if (CHECK_FAIL(system(cmd)))
+ err = system(cmd);
+ if (CHECK(err, "failed to run command", "%s, errno = %d\n", cmd, errno))
goto close_clean;
err = run_measured_process(measured_dir, &skel->bss->monitored_pid);
@@ -81,7 +82,8 @@ void test_test_ima(void)
close_clean:
snprintf(cmd, sizeof(cmd), "./ima_setup.sh cleanup %s", measured_dir);
- CHECK_FAIL(system(cmd));
+ err = system(cmd);
+ CHECK(err, "failed to run command", "%s, errno = %d\n", cmd, errno);
close_prog:
ima__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_lsm.c b/tools/testing/selftests/bpf/prog_tests/test_lsm.c
index 2755e4f81499..244c01125126 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_lsm.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_lsm.c
@@ -51,43 +51,64 @@ int exec_cmd(int *monitored_pid)
return -EINVAL;
}
-void test_test_lsm(void)
+static int test_lsm(struct lsm *skel)
{
- struct lsm *skel = NULL;
- int err, duration = 0;
+ struct bpf_link *link;
int buf = 1234;
-
- skel = lsm__open_and_load();
- if (CHECK(!skel, "skel_load", "lsm skeleton failed\n"))
- goto close_prog;
+ int err;
err = lsm__attach(skel);
- if (CHECK(err, "attach", "lsm attach failed: %d\n", err))
- goto close_prog;
+ if (!ASSERT_OK(err, "attach"))
+ return err;
+
+ /* Check that already linked program can't be attached again. */
+ link = bpf_program__attach(skel->progs.test_int_hook);
+ if (!ASSERT_ERR_PTR(link, "attach_link"))
+ return -1;
err = exec_cmd(&skel->bss->monitored_pid);
- if (CHECK(err < 0, "exec_cmd", "err %d errno %d\n", err, errno))
- goto close_prog;
+ if (!ASSERT_OK(err, "exec_cmd"))
+ return err;
- CHECK(skel->bss->bprm_count != 1, "bprm_count", "bprm_count = %d\n",
- skel->bss->bprm_count);
+ ASSERT_EQ(skel->bss->bprm_count, 1, "bprm_count");
skel->bss->monitored_pid = getpid();
err = stack_mprotect();
- if (CHECK(errno != EPERM, "stack_mprotect", "want err=EPERM, got %d\n",
- errno))
- goto close_prog;
+ if (!ASSERT_EQ(errno, EPERM, "stack_mprotect"))
+ return err;
- CHECK(skel->bss->mprotect_count != 1, "mprotect_count",
- "mprotect_count = %d\n", skel->bss->mprotect_count);
+ ASSERT_EQ(skel->bss->mprotect_count, 1, "mprotect_count");
syscall(__NR_setdomainname, &buf, -2L);
syscall(__NR_setdomainname, 0, -3L);
syscall(__NR_setdomainname, ~0L, -4L);
- CHECK(skel->bss->copy_test != 3, "copy_test",
- "copy_test = %d\n", skel->bss->copy_test);
+ ASSERT_EQ(skel->bss->copy_test, 3, "copy_test");
+
+ lsm__detach(skel);
+
+ skel->bss->copy_test = 0;
+ skel->bss->bprm_count = 0;
+ skel->bss->mprotect_count = 0;
+ return 0;
+}
+
+void test_test_lsm(void)
+{
+ struct lsm *skel = NULL;
+ int err;
+
+ skel = lsm__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "lsm_skel_load"))
+ goto close_prog;
+
+ err = test_lsm(skel);
+ if (!ASSERT_OK(err, "test_lsm_first_attach"))
+ goto close_prog;
+
+ err = test_lsm(skel);
+ ASSERT_OK(err, "test_lsm_second_attach");
close_prog:
lsm__destroy(skel);
diff --git a/tools/testing/selftests/bpf/progs/bind4_prog.c b/tools/testing/selftests/bpf/progs/bind4_prog.c
index 115a3b0ad984..474c6a62078a 100644
--- a/tools/testing/selftests/bpf/progs/bind4_prog.c
+++ b/tools/testing/selftests/bpf/progs/bind4_prog.c
@@ -57,6 +57,27 @@ static __inline int bind_to_device(struct bpf_sock_addr *ctx)
return 0;
}
+static __inline int bind_reuseport(struct bpf_sock_addr *ctx)
+{
+ int val = 1;
+
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)) || !val)
+ return 1;
+ val = 0;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)) || val)
+ return 1;
+
+ return 0;
+}
+
static __inline int misc_opts(struct bpf_sock_addr *ctx, int opt)
{
int old, tmp, new = 0xeb9f;
@@ -127,6 +148,10 @@ int bind_v4_prog(struct bpf_sock_addr *ctx)
if (misc_opts(ctx, SO_MARK) || misc_opts(ctx, SO_PRIORITY))
return 0;
+ /* Set reuseport and unset */
+ if (bind_reuseport(ctx))
+ return 0;
+
ctx->user_ip4 = bpf_htonl(SERV4_REWRITE_IP);
ctx->user_port = bpf_htons(SERV4_REWRITE_PORT);
diff --git a/tools/testing/selftests/bpf/progs/bind6_prog.c b/tools/testing/selftests/bpf/progs/bind6_prog.c
index 4c0d348034b9..c19cfa869f30 100644
--- a/tools/testing/selftests/bpf/progs/bind6_prog.c
+++ b/tools/testing/selftests/bpf/progs/bind6_prog.c
@@ -63,6 +63,27 @@ static __inline int bind_to_device(struct bpf_sock_addr *ctx)
return 0;
}
+static __inline int bind_reuseport(struct bpf_sock_addr *ctx)
+{
+ int val = 1;
+
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)) || !val)
+ return 1;
+ val = 0;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)) || val)
+ return 1;
+
+ return 0;
+}
+
static __inline int misc_opts(struct bpf_sock_addr *ctx, int opt)
{
int old, tmp, new = 0xeb9f;
@@ -141,6 +162,10 @@ int bind_v6_prog(struct bpf_sock_addr *ctx)
if (misc_opts(ctx, SO_MARK) || misc_opts(ctx, SO_PRIORITY))
return 0;
+ /* Set reuseport and unset */
+ if (bind_reuseport(ctx))
+ return 0;
+
ctx->user_ip6[0] = bpf_htonl(SERV6_REWRITE_IP_0);
ctx->user_ip6[1] = bpf_htonl(SERV6_REWRITE_IP_1);
ctx->user_ip6[2] = bpf_htonl(SERV6_REWRITE_IP_2);
diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c
index 6939bfd8690f..f62df4d023f9 100644
--- a/tools/testing/selftests/bpf/progs/bpf_cubic.c
+++ b/tools/testing/selftests/bpf/progs/bpf_cubic.c
@@ -174,8 +174,8 @@ static __always_inline void bictcp_hystart_reset(struct sock *sk)
* as long as it is used in one of the func ptr
* under SEC(".struct_ops").
*/
-SEC("struct_ops/bictcp_init")
-void BPF_PROG(bictcp_init, struct sock *sk)
+SEC("struct_ops/bpf_cubic_init")
+void BPF_PROG(bpf_cubic_init, struct sock *sk)
{
struct bictcp *ca = inet_csk_ca(sk);
@@ -192,7 +192,7 @@ void BPF_PROG(bictcp_init, struct sock *sk)
* The remaining tcp-cubic functions have an easier way.
*/
SEC("no-sec-prefix-bictcp_cwnd_event")
-void BPF_PROG(bictcp_cwnd_event, struct sock *sk, enum tcp_ca_event event)
+void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
{
if (event == CA_EVENT_TX_START) {
struct bictcp *ca = inet_csk_ca(sk);
@@ -384,7 +384,7 @@ tcp_friendliness:
}
/* Or simply use the BPF_STRUCT_OPS to avoid the SEC boiler plate. */
-void BPF_STRUCT_OPS(bictcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
+void BPF_STRUCT_OPS(bpf_cubic_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
@@ -403,7 +403,7 @@ void BPF_STRUCT_OPS(bictcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
tcp_cong_avoid_ai(tp, ca->cnt, acked);
}
-__u32 BPF_STRUCT_OPS(bictcp_recalc_ssthresh, struct sock *sk)
+__u32 BPF_STRUCT_OPS(bpf_cubic_recalc_ssthresh, struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
@@ -420,7 +420,7 @@ __u32 BPF_STRUCT_OPS(bictcp_recalc_ssthresh, struct sock *sk)
return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
}
-void BPF_STRUCT_OPS(bictcp_state, struct sock *sk, __u8 new_state)
+void BPF_STRUCT_OPS(bpf_cubic_state, struct sock *sk, __u8 new_state)
{
if (new_state == TCP_CA_Loss) {
bictcp_reset(inet_csk_ca(sk));
@@ -496,7 +496,7 @@ static __always_inline void hystart_update(struct sock *sk, __u32 delay)
}
}
-void BPF_STRUCT_OPS(bictcp_acked, struct sock *sk,
+void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk,
const struct ack_sample *sample)
{
const struct tcp_sock *tp = tcp_sk(sk);
@@ -525,21 +525,21 @@ void BPF_STRUCT_OPS(bictcp_acked, struct sock *sk,
hystart_update(sk, delay);
}
-__u32 BPF_STRUCT_OPS(tcp_reno_undo_cwnd, struct sock *sk)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
+extern __u32 tcp_reno_undo_cwnd(struct sock *sk) __ksym;
- return max(tp->snd_cwnd, tp->prior_cwnd);
+__u32 BPF_STRUCT_OPS(bpf_cubic_undo_cwnd, struct sock *sk)
+{
+ return tcp_reno_undo_cwnd(sk);
}
SEC(".struct_ops")
struct tcp_congestion_ops cubic = {
- .init = (void *)bictcp_init,
- .ssthresh = (void *)bictcp_recalc_ssthresh,
- .cong_avoid = (void *)bictcp_cong_avoid,
- .set_state = (void *)bictcp_state,
- .undo_cwnd = (void *)tcp_reno_undo_cwnd,
- .cwnd_event = (void *)bictcp_cwnd_event,
- .pkts_acked = (void *)bictcp_acked,
+ .init = (void *)bpf_cubic_init,
+ .ssthresh = (void *)bpf_cubic_recalc_ssthresh,
+ .cong_avoid = (void *)bpf_cubic_cong_avoid,
+ .set_state = (void *)bpf_cubic_state,
+ .undo_cwnd = (void *)bpf_cubic_undo_cwnd,
+ .cwnd_event = (void *)bpf_cubic_cwnd_event,
+ .pkts_acked = (void *)bpf_cubic_acked,
.name = "bpf_cubic",
};
diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
index 4dc1a967776a..fd42247da8b4 100644
--- a/tools/testing/selftests/bpf/progs/bpf_dctcp.c
+++ b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
@@ -194,22 +194,12 @@ __u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
}
-SEC("struct_ops/tcp_reno_cong_avoid")
-void BPF_PROG(tcp_reno_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
-{
- struct tcp_sock *tp = tcp_sk(sk);
-
- if (!tcp_is_cwnd_limited(sk))
- return;
+extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
- /* In "safe" area, increase. */
- if (tcp_in_slow_start(tp)) {
- acked = tcp_slow_start(tp, acked);
- if (!acked)
- return;
- }
- /* In dangerous area, increase slowly. */
- tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
+SEC("struct_ops/dctcp_reno_cong_avoid")
+void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
+{
+ tcp_reno_cong_avoid(sk, ack, acked);
}
SEC(".struct_ops")
@@ -226,7 +216,7 @@ struct tcp_congestion_ops dctcp = {
.in_ack_event = (void *)dctcp_update_alpha,
.cwnd_event = (void *)dctcp_cwnd_event,
.ssthresh = (void *)dctcp_ssthresh,
- .cong_avoid = (void *)tcp_reno_cong_avoid,
+ .cong_avoid = (void *)dctcp_cong_avoid,
.undo_cwnd = (void *)dctcp_cwnd_undo,
.set_state = (void *)dctcp_state,
.flags = TCP_CONG_NEEDS_ECN,
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
index 50e59a2e142e..43c36f5f7649 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
@@ -35,3 +35,30 @@ int dump_task_stack(struct bpf_iter__task *ctx)
return 0;
}
+
+SEC("iter/task")
+int get_task_user_stacks(struct bpf_iter__task *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct task_struct *task = ctx->task;
+ uint64_t buf_sz = 0;
+ int64_t res;
+
+ if (task == (void *)0)
+ return 0;
+
+ res = bpf_get_task_stack(task, entries,
+ MAX_STACK_TRACE_DEPTH * SIZE_OF_ULONG, BPF_F_USER_STACK);
+ if (res <= 0)
+ return 0;
+
+ buf_sz += res;
+
+ /* If the verifier doesn't refine bpf_get_task_stack res, and instead
+ * assumes res is entirely unknown, this program will fail to load as
+ * the verifier will believe that max buf_sz value allows reading
+ * past the end of entries in bpf_seq_write call
+ */
+ bpf_seq_write(seq, &entries, buf_sz);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
deleted file mode 100644
index dd0ffa518f36..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_existence___err_wrong_arr_kind x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
deleted file mode 100644
index bc83372088ad..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_existence___err_wrong_arr_value_type x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
deleted file mode 100644
index 917bec41be08..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_existence___err_wrong_int_kind x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
deleted file mode 100644
index 6ec7e6ec1c91..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_existence___err_wrong_int_sz x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
deleted file mode 100644
index 7bbcacf2b0d1..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_existence___err_wrong_int_type x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
deleted file mode 100644
index f384dd38ec70..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_existence___err_wrong_struct_type x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
new file mode 100644
index 000000000000..d14b496190c3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_existence___wrong_field_defs x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
index 3ac0c9afc35a..8aaa24a00322 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
@@ -213,6 +213,12 @@ struct struct_with_embedded_stuff {
struct_in_array_t *v;
};
+struct float_struct {
+ float f;
+ const double *d;
+ volatile long double *ld;
+};
+
struct root_struct {
enum e1 _1;
enum e2 _2;
@@ -227,6 +233,7 @@ struct root_struct {
union_fwd_t *_12;
union_fwd_ptr_t _13;
struct struct_with_embedded_stuff _14;
+ struct float_struct _15;
};
/* ------ END-EXPECTED-OUTPUT ------ */
diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h
index 9a2850850121..c95c0cabe951 100644
--- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
+++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
@@ -700,27 +700,11 @@ struct core_reloc_existence___minimal {
int a;
};
-struct core_reloc_existence___err_wrong_int_sz {
- short a;
-};
-
-struct core_reloc_existence___err_wrong_int_type {
+struct core_reloc_existence___wrong_field_defs {
+ void *a;
int b[1];
-};
-
-struct core_reloc_existence___err_wrong_int_kind {
struct{ int x; } c;
-};
-
-struct core_reloc_existence___err_wrong_arr_kind {
int arr;
-};
-
-struct core_reloc_existence___err_wrong_arr_value_type {
- short arr[1];
-};
-
-struct core_reloc_existence___err_wrong_struct_type {
int s;
};
@@ -807,6 +791,7 @@ struct core_reloc_size_output {
int arr_elem_sz;
int ptr_sz;
int enum_sz;
+ int float_sz;
};
struct core_reloc_size {
@@ -816,6 +801,7 @@ struct core_reloc_size {
int arr_field[4];
void *ptr_field;
enum { VALUE = 123 } enum_field;
+ float float_field;
};
struct core_reloc_size___diff_sz {
@@ -825,6 +811,7 @@ struct core_reloc_size___diff_sz {
char arr_field[10];
void *ptr_field;
enum { OTHER_VALUE = 0xFFFFFFFFFFFFFFFF } enum_field;
+ double float_field;
};
/* Error case of two candidates with the fields (int_field) at the same
@@ -839,6 +826,7 @@ struct core_reloc_size___err_ambiguous1 {
int arr_field[4];
void *ptr_field;
enum { VALUE___1 = 123 } enum_field;
+ float float_field;
};
struct core_reloc_size___err_ambiguous2 {
@@ -850,6 +838,7 @@ struct core_reloc_size___err_ambiguous2 {
int arr_field[4];
void *ptr_field;
enum { VALUE___2 = 123 } enum_field;
+ float float_field;
};
/*
diff --git a/tools/testing/selftests/bpf/progs/fentry_test.c b/tools/testing/selftests/bpf/progs/fentry_test.c
index 5f645fdaba6f..52a550d281d9 100644
--- a/tools/testing/selftests/bpf/progs/fentry_test.c
+++ b/tools/testing/selftests/bpf/progs/fentry_test.c
@@ -64,7 +64,7 @@ __u64 test7_result = 0;
SEC("fentry/bpf_fentry_test7")
int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
{
- if (arg == 0)
+ if (!arg)
test7_result = 1;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/fexit_test.c b/tools/testing/selftests/bpf/progs/fexit_test.c
index 0952affb22a6..8f1ccb7302e1 100644
--- a/tools/testing/selftests/bpf/progs/fexit_test.c
+++ b/tools/testing/selftests/bpf/progs/fexit_test.c
@@ -65,7 +65,7 @@ __u64 test7_result = 0;
SEC("fexit/bpf_fentry_test7")
int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
{
- if (arg == 0)
+ if (!arg)
test7_result = 1;
return 0;
}
@@ -74,7 +74,7 @@ __u64 test8_result = 0;
SEC("fexit/bpf_fentry_test8")
int BPF_PROG(test8, struct bpf_fentry_test_t *arg)
{
- if (arg->a == 0)
+ if (!arg->a)
test8_result = 1;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c b/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
new file mode 100644
index 000000000000..75e8e1069fe7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 3);
+ __type(key, __u32);
+ __type(value, __u64);
+} arraymap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} percpu_map SEC(".maps");
+
+struct callback_ctx {
+ int output;
+};
+
+static __u64
+check_array_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+ struct callback_ctx *data)
+{
+ data->output += *val;
+ if (*key == 1)
+ return 1; /* stop the iteration */
+ return 0;
+}
+
+__u32 cpu = 0;
+__u64 percpu_val = 0;
+
+static __u64
+check_percpu_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+ struct callback_ctx *data)
+{
+ cpu = bpf_get_smp_processor_id();
+ percpu_val = *val;
+ return 0;
+}
+
+u32 arraymap_output = 0;
+
+SEC("classifier")
+int test_pkt_access(struct __sk_buff *skb)
+{
+ struct callback_ctx data;
+
+ data.output = 0;
+ bpf_for_each_map_elem(&arraymap, check_array_elem, &data, 0);
+ arraymap_output = data.output;
+
+ bpf_for_each_map_elem(&percpu_map, check_percpu_elem, (void *)0, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c b/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
new file mode 100644
index 000000000000..913dd91aafff
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 3);
+ __type(key, __u32);
+ __type(value, __u64);
+} hashmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} percpu_map SEC(".maps");
+
+struct callback_ctx {
+ struct __sk_buff *ctx;
+ int input;
+ int output;
+};
+
+static __u64
+check_hash_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+ struct callback_ctx *data)
+{
+ struct __sk_buff *skb = data->ctx;
+ __u32 k;
+ __u64 v;
+
+ if (skb) {
+ k = *key;
+ v = *val;
+ if (skb->len == 10000 && k == 10 && v == 10)
+ data->output = 3; /* impossible path */
+ else
+ data->output = 4;
+ } else {
+ data->output = data->input;
+ bpf_map_delete_elem(map, key);
+ }
+
+ return 0;
+}
+
+__u32 cpu = 0;
+__u32 percpu_called = 0;
+__u32 percpu_key = 0;
+__u64 percpu_val = 0;
+int percpu_output = 0;
+
+static __u64
+check_percpu_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+ struct callback_ctx *unused)
+{
+ struct callback_ctx data;
+
+ percpu_called++;
+ cpu = bpf_get_smp_processor_id();
+ percpu_key = *key;
+ percpu_val = *val;
+
+ data.ctx = 0;
+ data.input = 100;
+ data.output = 0;
+ bpf_for_each_map_elem(&hashmap, check_hash_elem, &data, 0);
+ percpu_output = data.output;
+
+ return 0;
+}
+
+int hashmap_output = 0;
+int hashmap_elems = 0;
+int percpu_map_elems = 0;
+
+SEC("classifier")
+int test_pkt_access(struct __sk_buff *skb)
+{
+ struct callback_ctx data;
+
+ data.ctx = skb;
+ data.input = 10;
+ data.output = 0;
+ hashmap_elems = bpf_for_each_map_elem(&hashmap, check_hash_elem, &data, 0);
+ hashmap_output = data.output;
+
+ percpu_map_elems = bpf_for_each_map_elem(&percpu_map, check_percpu_elem,
+ (void *)0, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
new file mode 100644
index 000000000000..470f8723e463
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_tcp_helpers.h"
+
+extern int bpf_kfunc_call_test2(struct sock *sk, __u32 a, __u32 b) __ksym;
+extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b,
+ __u32 c, __u64 d) __ksym;
+
+SEC("classifier")
+int kfunc_call_test2(struct __sk_buff *skb)
+{
+ struct bpf_sock *sk = skb->sk;
+
+ if (!sk)
+ return -1;
+
+ sk = bpf_sk_fullsock(sk);
+ if (!sk)
+ return -1;
+
+ return bpf_kfunc_call_test2((struct sock *)sk, 1, 2);
+}
+
+SEC("classifier")
+int kfunc_call_test1(struct __sk_buff *skb)
+{
+ struct bpf_sock *sk = skb->sk;
+ __u64 a = 1ULL << 32;
+ __u32 ret;
+
+ if (!sk)
+ return -1;
+
+ sk = bpf_sk_fullsock(sk);
+ if (!sk)
+ return -1;
+
+ a = bpf_kfunc_call_test1((struct sock *)sk, 1, a | 2, 3, a | 4);
+ ret = a >> 32; /* ret should be 2 */
+ ret += (__u32)a; /* ret should be 12 */
+
+ return ret;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
new file mode 100644
index 000000000000..b2dcb7d9cb03
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_tcp_helpers.h"
+
+extern const int bpf_prog_active __ksym;
+extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b,
+ __u32 c, __u64 d) __ksym;
+extern struct sock *bpf_kfunc_call_test3(struct sock *sk) __ksym;
+int active_res = -1;
+int sk_state = -1;
+
+int __noinline f1(struct __sk_buff *skb)
+{
+ struct bpf_sock *sk = skb->sk;
+ int *active;
+
+ if (!sk)
+ return -1;
+
+ sk = bpf_sk_fullsock(sk);
+ if (!sk)
+ return -1;
+
+ active = (int *)bpf_per_cpu_ptr(&bpf_prog_active,
+ bpf_get_smp_processor_id());
+ if (active)
+ active_res = *active;
+
+ sk_state = bpf_kfunc_call_test3((struct sock *)sk)->__sk_common.skc_state;
+
+ return (__u32)bpf_kfunc_call_test1((struct sock *)sk, 1, 2, 3, 4);
+}
+
+SEC("classifier")
+int kfunc_call_test1(struct __sk_buff *skb)
+{
+ return f1(skb);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_funcs1.c b/tools/testing/selftests/bpf/progs/linked_funcs1.c
new file mode 100644
index 000000000000..b964ec1390c2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_funcs1.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+/* weak and shared between two files */
+const volatile int my_tid __weak;
+long syscall_id __weak;
+
+int output_val1;
+int output_ctx1;
+int output_weak1;
+
+/* same "subprog" name in all files, but it's ok because they all are static */
+static __noinline int subprog(int x)
+{
+ /* but different formula */
+ return x * 1;
+}
+
+/* Global functions can't be void */
+int set_output_val1(int x)
+{
+ output_val1 = x + subprog(x);
+ return x;
+}
+
+/* This function can't be verified as global, as it assumes raw_tp/sys_enter
+ * context and accesses syscall id (second argument). So we mark it as
+ * __hidden, so that libbpf will mark it as static in the final object file,
+ * right before verifying it in the kernel.
+ *
+ * But we don't mark it as __hidden here, rather at extern site. __hidden is
+ * "contaminating" visibility, so it will get propagated from either extern or
+ * actual definition (including from the losing __weak definition).
+ */
+void set_output_ctx1(__u64 *ctx)
+{
+ output_ctx1 = ctx[1]; /* long id, same as in BPF_PROG below */
+}
+
+/* this weak instance should win because it's the first one */
+__weak int set_output_weak(int x)
+{
+ output_weak1 = x;
+ return x;
+}
+
+extern int set_output_val2(int x);
+
+/* here we'll force set_output_ctx2() to be __hidden in the final obj file */
+__hidden extern void set_output_ctx2(__u64 *ctx);
+
+SEC("raw_tp/sys_enter")
+int BPF_PROG(handler1, struct pt_regs *regs, long id)
+{
+ if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id)
+ return 0;
+
+ set_output_val2(1000);
+ set_output_ctx2(ctx); /* ctx definition is hidden in BPF_PROG macro */
+
+ /* keep input value the same across both files to avoid dependency on
+ * handler call order; differentiate by output_weak1 vs output_weak2.
+ */
+ set_output_weak(42);
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_funcs2.c b/tools/testing/selftests/bpf/progs/linked_funcs2.c
new file mode 100644
index 000000000000..575e958e60b7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_funcs2.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+/* weak and shared between both files */
+const volatile int my_tid __weak;
+long syscall_id __weak;
+
+int output_val2;
+int output_ctx2;
+int output_weak2; /* should stay zero */
+
+/* same "subprog" name in all files, but it's ok because they all are static */
+static __noinline int subprog(int x)
+{
+ /* but different formula */
+ return x * 2;
+}
+
+/* Global functions can't be void */
+int set_output_val2(int x)
+{
+ output_val2 = 2 * x + 2 * subprog(x);
+ return 2 * x;
+}
+
+/* This function can't be verified as global, as it assumes raw_tp/sys_enter
+ * context and accesses syscall id (second argument). So we mark it as
+ * __hidden, so that libbpf will mark it as static in the final object file,
+ * right before verifying it in the kernel.
+ *
+ * But we don't mark it as __hidden here, rather at extern site. __hidden is
+ * "contaminating" visibility, so it will get propagated from either extern or
+ * actual definition (including from the losing __weak definition).
+ */
+void set_output_ctx2(__u64 *ctx)
+{
+ output_ctx2 = ctx[1]; /* long id, same as in BPF_PROG below */
+}
+
+/* this weak instance should lose, because it will be processed second */
+__weak int set_output_weak(int x)
+{
+ output_weak2 = x;
+ return 2 * x;
+}
+
+extern int set_output_val1(int x);
+
+/* here we'll force set_output_ctx1() to be __hidden in the final obj file */
+__hidden extern void set_output_ctx1(__u64 *ctx);
+
+SEC("raw_tp/sys_enter")
+int BPF_PROG(handler2, struct pt_regs *regs, long id)
+{
+ if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id)
+ return 0;
+
+ set_output_val1(2000);
+ set_output_ctx1(ctx); /* ctx definition is hidden in BPF_PROG macro */
+
+ /* keep input value the same across both files to avoid dependency on
+ * handler call order; differentiate by output_weak1 vs output_weak2.
+ */
+ set_output_weak(42);
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_maps1.c b/tools/testing/selftests/bpf/progs/linked_maps1.c
new file mode 100644
index 000000000000..52291515cc72
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_maps1.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct my_key { long x; };
+struct my_value { long x; };
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, struct my_key);
+ __type(value, struct my_value);
+ __uint(max_entries, 16);
+} map1 SEC(".maps");
+
+ /* Matches map2 definition in linked_maps2.c. Order of the attributes doesn't
+ * matter.
+ */
+typedef struct {
+ __uint(max_entries, 8);
+ __type(key, int);
+ __type(value, int);
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+} map2_t;
+
+extern map2_t map2 SEC(".maps");
+
+/* This should be the winning map definition, but we have no way of verifying,
+ * so we just make sure that it links and works without errors
+ */
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 16);
+} map_weak __weak SEC(".maps");
+
+int output_first1;
+int output_second1;
+int output_weak1;
+
+SEC("raw_tp/sys_enter")
+int BPF_PROG(handler_enter1)
+{
+ /* update values with key = 1 */
+ int key = 1, val = 1;
+ struct my_key key_struct = { .x = 1 };
+ struct my_value val_struct = { .x = 1000 };
+
+ bpf_map_update_elem(&map1, &key_struct, &val_struct, 0);
+ bpf_map_update_elem(&map2, &key, &val, 0);
+ bpf_map_update_elem(&map_weak, &key, &val, 0);
+
+ return 0;
+}
+
+SEC("raw_tp/sys_exit")
+int BPF_PROG(handler_exit1)
+{
+ /* lookup values with key = 2, set in another file */
+ int key = 2, *val;
+ struct my_key key_struct = { .x = 2 };
+ struct my_value *value_struct;
+
+ value_struct = bpf_map_lookup_elem(&map1, &key_struct);
+ if (value_struct)
+ output_first1 = value_struct->x;
+
+ val = bpf_map_lookup_elem(&map2, &key);
+ if (val)
+ output_second1 = *val;
+
+ val = bpf_map_lookup_elem(&map_weak, &key);
+ if (val)
+ output_weak1 = *val;
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_maps2.c b/tools/testing/selftests/bpf/progs/linked_maps2.c
new file mode 100644
index 000000000000..0693687474ed
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_maps2.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+/* modifiers and typedefs are ignored when comparing key/value types */
+typedef struct my_key { long x; } key_type;
+typedef struct my_value { long x; } value_type;
+
+extern struct {
+ __uint(max_entries, 16);
+ __type(key, key_type);
+ __type(value, value_type);
+ __uint(type, BPF_MAP_TYPE_HASH);
+} map1 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 8);
+} map2 SEC(".maps");
+
+/* this definition will lose, but it has to exactly match the winner */
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 16);
+} map_weak __weak SEC(".maps");
+
+int output_first2;
+int output_second2;
+int output_weak2;
+
+SEC("raw_tp/sys_enter")
+int BPF_PROG(handler_enter2)
+{
+ /* update values with key = 2 */
+ int key = 2, val = 2;
+ key_type key_struct = { .x = 2 };
+ value_type val_struct = { .x = 2000 };
+
+ bpf_map_update_elem(&map1, &key_struct, &val_struct, 0);
+ bpf_map_update_elem(&map2, &key, &val, 0);
+ bpf_map_update_elem(&map_weak, &key, &val, 0);
+
+ return 0;
+}
+
+SEC("raw_tp/sys_exit")
+int BPF_PROG(handler_exit2)
+{
+ /* lookup values with key = 1, set in another file */
+ int key = 1, *val;
+ key_type key_struct = { .x = 1 };
+ value_type *value_struct;
+
+ value_struct = bpf_map_lookup_elem(&map1, &key_struct);
+ if (value_struct)
+ output_first2 = value_struct->x;
+
+ val = bpf_map_lookup_elem(&map2, &key);
+ if (val)
+ output_second2 = *val;
+
+ val = bpf_map_lookup_elem(&map_weak, &key);
+ if (val)
+ output_weak2 = *val;
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_vars1.c b/tools/testing/selftests/bpf/progs/linked_vars1.c
new file mode 100644
index 000000000000..ef9e9d0bb0ca
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_vars1.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+extern int LINUX_KERNEL_VERSION __kconfig;
+/* this weak extern will be strict due to the other file's strong extern */
+extern bool CONFIG_BPF_SYSCALL __kconfig __weak;
+extern const void bpf_link_fops __ksym __weak;
+
+int input_bss1;
+int input_data1 = 1;
+const volatile int input_rodata1 = 11;
+
+int input_bss_weak __weak;
+/* these two definitions should win */
+int input_data_weak __weak = 10;
+const volatile int input_rodata_weak __weak = 100;
+
+extern int input_bss2;
+extern int input_data2;
+extern const int input_rodata2;
+
+int output_bss1;
+int output_data1;
+int output_rodata1;
+
+long output_sink1;
+
+static __noinline int get_bss_res(void)
+{
+ /* just make sure all the relocations work against .text as well */
+ return input_bss1 + input_bss2 + input_bss_weak;
+}
+
+SEC("raw_tp/sys_enter")
+int BPF_PROG(handler1)
+{
+ output_bss1 = get_bss_res();
+ output_data1 = input_data1 + input_data2 + input_data_weak;
+ output_rodata1 = input_rodata1 + input_rodata2 + input_rodata_weak;
+
+ /* make sure we actually use above special externs, otherwise compiler
+ * will optimize them out
+ */
+ output_sink1 = LINUX_KERNEL_VERSION
+ + CONFIG_BPF_SYSCALL
+ + (long)&bpf_link_fops;
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_vars2.c b/tools/testing/selftests/bpf/progs/linked_vars2.c
new file mode 100644
index 000000000000..e4f5bd388a3c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_vars2.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+extern int LINUX_KERNEL_VERSION __kconfig;
+/* when an extern is defined as both strong and weak, resulting symbol will be strong */
+extern bool CONFIG_BPF_SYSCALL __kconfig;
+extern const void __start_BTF __ksym;
+
+int input_bss2;
+int input_data2 = 2;
+const volatile int input_rodata2 = 22;
+
+int input_bss_weak __weak;
+/* these two weak variables should lose */
+int input_data_weak __weak = 20;
+const volatile int input_rodata_weak __weak = 200;
+
+extern int input_bss1;
+extern int input_data1;
+extern const int input_rodata1;
+
+int output_bss2;
+int output_data2;
+int output_rodata2;
+
+int output_sink2;
+
+static __noinline int get_data_res(void)
+{
+ /* just make sure all the relocations work against .text as well */
+ return input_data1 + input_data2 + input_data_weak;
+}
+
+SEC("raw_tp/sys_enter")
+int BPF_PROG(handler2)
+{
+ output_bss2 = input_bss1 + input_bss2 + input_bss_weak;
+ output_data2 = get_data_res();
+ output_rodata2 = input_rodata1 + input_rodata2 + input_rodata_weak;
+
+ /* make sure we actually use above special externs, otherwise compiler
+ * will optimize them out
+ */
+ output_sink2 = LINUX_KERNEL_VERSION
+ + CONFIG_BPF_SYSCALL
+ + (long)&__start_BTF;
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/loop6.c b/tools/testing/selftests/bpf/progs/loop6.c
new file mode 100644
index 000000000000..38de0331e6b4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/loop6.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/ptrace.h>
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* typically virtio scsi has max SGs of 6 */
+#define VIRTIO_MAX_SGS 6
+
+/* Verifier will fail with SG_MAX = 128. The failure can be
+ * workarounded with a smaller SG_MAX, e.g. 10.
+ */
+#define WORKAROUND
+#ifdef WORKAROUND
+#define SG_MAX 10
+#else
+/* typically virtio blk has max SEG of 128 */
+#define SG_MAX 128
+#endif
+
+#define SG_CHAIN 0x01UL
+#define SG_END 0x02UL
+
+struct scatterlist {
+ unsigned long page_link;
+ unsigned int offset;
+ unsigned int length;
+};
+
+#define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN)
+#define sg_is_last(sg) ((sg)->page_link & SG_END)
+#define sg_chain_ptr(sg) \
+ ((struct scatterlist *) ((sg)->page_link & ~(SG_CHAIN | SG_END)))
+
+static inline struct scatterlist *__sg_next(struct scatterlist *sgp)
+{
+ struct scatterlist sg;
+
+ bpf_probe_read_kernel(&sg, sizeof(sg), sgp);
+ if (sg_is_last(&sg))
+ return NULL;
+
+ sgp++;
+
+ bpf_probe_read_kernel(&sg, sizeof(sg), sgp);
+ if (sg_is_chain(&sg))
+ sgp = sg_chain_ptr(&sg);
+
+ return sgp;
+}
+
+static inline struct scatterlist *get_sgp(struct scatterlist **sgs, int i)
+{
+ struct scatterlist *sgp;
+
+ bpf_probe_read_kernel(&sgp, sizeof(sgp), sgs + i);
+ return sgp;
+}
+
+int config = 0;
+int result = 0;
+
+SEC("kprobe/virtqueue_add_sgs")
+int BPF_KPROBE(trace_virtqueue_add_sgs, void *unused, struct scatterlist **sgs,
+ unsigned int out_sgs, unsigned int in_sgs)
+{
+ struct scatterlist *sgp = NULL;
+ __u64 length1 = 0, length2 = 0;
+ unsigned int i, n, len;
+
+ if (config != 0)
+ return 0;
+
+ for (i = 0; (i < VIRTIO_MAX_SGS) && (i < out_sgs); i++) {
+ for (n = 0, sgp = get_sgp(sgs, i); sgp && (n < SG_MAX);
+ sgp = __sg_next(sgp)) {
+ bpf_probe_read_kernel(&len, sizeof(len), &sgp->length);
+ length1 += len;
+ n++;
+ }
+ }
+
+ for (i = 0; (i < VIRTIO_MAX_SGS) && (i < in_sgs); i++) {
+ for (n = 0, sgp = get_sgp(sgs, i); sgp && (n < SG_MAX);
+ sgp = __sg_next(sgp)) {
+ bpf_probe_read_kernel(&len, sizeof(len), &sgp->length);
+ length2 += len;
+ n++;
+ }
+ }
+
+ config = 1;
+ result = length2 - length1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
index d8850bc6a9f1..d1d304c980f0 100644
--- a/tools/testing/selftests/bpf/progs/map_ptr_kern.c
+++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
@@ -12,6 +12,7 @@ _Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
__u32 g_line = 0;
+int page_size = 0; /* userspace should set it */
#define VERIFY_TYPE(type, func) ({ \
g_map_type = type; \
@@ -635,7 +636,6 @@ struct bpf_ringbuf_map {
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
- __uint(max_entries, 1 << 12);
} m_ringbuf SEC(".maps");
static inline int check_ringbuf(void)
@@ -643,7 +643,7 @@ static inline int check_ringbuf(void)
struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
- VERIFY(check(&ringbuf->map, map, 0, 0, 1 << 12));
+ VERIFY(check(&ringbuf->map, map, 0, 0, page_size));
return 1;
}
diff --git a/tools/testing/selftests/bpf/progs/skb_pkt_end.c b/tools/testing/selftests/bpf/progs/skb_pkt_end.c
index cf6823f42e80..7f2eaa2f89f8 100644
--- a/tools/testing/selftests/bpf/progs/skb_pkt_end.c
+++ b/tools/testing/selftests/bpf/progs/skb_pkt_end.c
@@ -4,7 +4,6 @@
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
-#define NULL 0
#define INLINE __always_inline
#define skb_shorter(skb, len) ((void *)(long)(skb)->data + (len) > (void *)(long)skb->data_end)
diff --git a/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
index fdb4bf4408fa..eeaf6e75c9a2 100644
--- a/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
@@ -8,18 +8,6 @@ int _version SEC("version") = 1;
SEC("sk_msg1")
int bpf_prog1(struct sk_msg_md *msg)
{
- void *data_end = (void *)(long) msg->data_end;
- void *data = (void *)(long) msg->data;
-
- char *d;
-
- if (data + 8 > data_end)
- return SK_DROP;
-
- bpf_printk("data length %i\n", (__u64)msg->data_end - (__u64)msg->data);
- d = (char *)data;
- bpf_printk("hello sendmsg hook %i %i\n", d[0], d[1]);
-
return SK_PASS;
}
diff --git a/tools/testing/selftests/bpf/progs/sockopt_sk.c b/tools/testing/selftests/bpf/progs/sockopt_sk.c
index d3597f81e6e9..8acdb99b5959 100644
--- a/tools/testing/selftests/bpf/progs/sockopt_sk.c
+++ b/tools/testing/selftests/bpf/progs/sockopt_sk.c
@@ -6,11 +6,8 @@
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1;
-#ifndef PAGE_SIZE
-#define PAGE_SIZE 4096
-#endif
+int page_size = 0; /* userspace should set it */
#ifndef SOL_TCP
#define SOL_TCP IPPROTO_TCP
@@ -90,7 +87,7 @@ int _getsockopt(struct bpf_sockopt *ctx)
* program can only see the first PAGE_SIZE
* bytes of data.
*/
- if (optval_end - optval != PAGE_SIZE)
+ if (optval_end - optval != page_size)
return 0; /* EPERM, unexpected data size */
return 1;
@@ -161,7 +158,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
/* Original optlen is larger than PAGE_SIZE. */
- if (ctx->optlen != PAGE_SIZE * 2)
+ if (ctx->optlen != page_size * 2)
return 0; /* EPERM, unexpected data size */
if (optval + 1 > optval_end)
@@ -175,7 +172,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
* program can only see the first PAGE_SIZE
* bytes of data.
*/
- if (optval_end - optval != PAGE_SIZE)
+ if (optval_end - optval != page_size)
return 0; /* EPERM, unexpected data size */
return 1;
diff --git a/tools/testing/selftests/bpf/progs/task_local_storage.c b/tools/testing/selftests/bpf/progs/task_local_storage.c
new file mode 100644
index 000000000000..80a0a20db88d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_local_storage.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} enter_id SEC(".maps");
+
+#define MAGIC_VALUE 0xabcd1234
+
+pid_t target_pid = 0;
+int mismatch_cnt = 0;
+int enter_cnt = 0;
+int exit_cnt = 0;
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(on_enter, struct pt_regs *regs, long id)
+{
+ struct task_struct *task;
+ long *ptr;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ ptr = bpf_task_storage_get(&enter_id, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ return 0;
+
+ __sync_fetch_and_add(&enter_cnt, 1);
+ *ptr = MAGIC_VALUE + enter_cnt;
+
+ return 0;
+}
+
+SEC("tp_btf/sys_exit")
+int BPF_PROG(on_exit, struct pt_regs *regs, long id)
+{
+ struct task_struct *task;
+ long *ptr;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ ptr = bpf_task_storage_get(&enter_id, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ return 0;
+
+ __sync_fetch_and_add(&exit_cnt, 1);
+ if (*ptr != MAGIC_VALUE + exit_cnt)
+ __sync_fetch_and_add(&mismatch_cnt, 1);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c b/tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c
new file mode 100644
index 000000000000..81758c0aef99
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, __u64);
+} task_storage SEC(".maps");
+
+int valid_ptr_count = 0;
+int null_ptr_count = 0;
+
+SEC("fentry/exit_creds")
+int BPF_PROG(trace_exit_creds, struct task_struct *task)
+{
+ __u64 *ptr;
+
+ ptr = bpf_task_storage_get(&task_storage, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ __sync_fetch_and_add(&valid_ptr_count, 1);
+ else
+ __sync_fetch_and_add(&null_ptr_count, 1);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_ls_recursion.c b/tools/testing/selftests/bpf/progs/task_ls_recursion.c
new file mode 100644
index 000000000000..564583dca7c8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_ls_recursion.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_a SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_b SEC(".maps");
+
+SEC("fentry/bpf_local_storage_lookup")
+int BPF_PROG(on_lookup)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+
+ bpf_task_storage_delete(&map_a, task);
+ bpf_task_storage_delete(&map_b, task);
+ return 0;
+}
+
+SEC("fentry/bpf_local_storage_update")
+int BPF_PROG(on_update)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ long *ptr;
+
+ ptr = bpf_task_storage_get(&map_a, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ *ptr += 1;
+
+ ptr = bpf_task_storage_get(&map_b, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ *ptr += 1;
+
+ return 0;
+}
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(on_enter, struct pt_regs *regs, long id)
+{
+ struct task_struct *task;
+ long *ptr;
+
+ task = bpf_get_current_task_btf();
+ ptr = bpf_task_storage_get(&map_a, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ *ptr = 200;
+
+ ptr = bpf_task_storage_get(&map_b, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ *ptr = 100;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_size.c b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c
index d7fb6cfc7891..7b2d576aeea1 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_size.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c
@@ -21,6 +21,7 @@ struct core_reloc_size_output {
int arr_elem_sz;
int ptr_sz;
int enum_sz;
+ int float_sz;
};
struct core_reloc_size {
@@ -30,6 +31,7 @@ struct core_reloc_size {
int arr_field[4];
void *ptr_field;
enum { VALUE = 123 } enum_field;
+ float float_field;
};
SEC("raw_tracepoint/sys_enter")
@@ -45,6 +47,7 @@ int test_core_size(void *ctx)
out->arr_elem_sz = bpf_core_field_size(in->arr_field[0]);
out->ptr_sz = bpf_core_field_size(in->ptr_field);
out->enum_sz = bpf_core_field_size(in->enum_field);
+ out->float_sz = bpf_core_field_size(in->float_field);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func10.c b/tools/testing/selftests/bpf/progs/test_global_func10.c
index 61c2ae92ce41..97b7031d0e22 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func10.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func10.c
@@ -14,7 +14,7 @@ struct Big {
__noinline int foo(const struct Big *big)
{
- if (big == 0)
+ if (!big)
return 0;
return bpf_get_prandom_u32() < big->y;
diff --git a/tools/testing/selftests/bpf/progs/test_mmap.c b/tools/testing/selftests/bpf/progs/test_mmap.c
index 4eb42cff5fe9..5a5cc19a15bf 100644
--- a/tools/testing/selftests/bpf/progs/test_mmap.c
+++ b/tools/testing/selftests/bpf/progs/test_mmap.c
@@ -9,7 +9,6 @@ char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 4096);
__uint(map_flags, BPF_F_MMAPABLE | BPF_F_RDONLY_PROG);
__type(key, __u32);
__type(value, char);
@@ -17,7 +16,6 @@ struct {
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 512 * 4); /* at least 4 pages of data */
__uint(map_flags, BPF_F_MMAPABLE);
__type(key, __u32);
__type(value, __u64);
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf.c b/tools/testing/selftests/bpf/progs/test_ringbuf.c
index 8ba9959b036b..6b3f288b7c63 100644
--- a/tools/testing/selftests/bpf/progs/test_ringbuf.c
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf.c
@@ -15,7 +15,6 @@ struct sample {
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
- __uint(max_entries, 1 << 12);
} ringbuf SEC(".maps");
/* inputs */
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c b/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c
index edf3b6953533..197b86546dca 100644
--- a/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c
@@ -15,7 +15,6 @@ struct sample {
struct ringbuf_map {
__uint(type, BPF_MAP_TYPE_RINGBUF);
- __uint(max_entries, 1 << 12);
} ringbuf1 SEC(".maps"),
ringbuf2 SEC(".maps");
@@ -31,6 +30,17 @@ struct {
},
};
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __array(values, struct ringbuf_map);
+} ringbuf_hash SEC(".maps") = {
+ .values = {
+ [0] = &ringbuf1,
+ },
+};
+
/* inputs */
int pid = 0;
int target_ring = 0;
diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
index 1032b292af5b..ac6f7f205e25 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_lookup.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
@@ -64,6 +64,10 @@ static const int PROG_DONE = 1;
static const __u32 KEY_SERVER_A = SERVER_A;
static const __u32 KEY_SERVER_B = SERVER_B;
+static const __u16 SRC_PORT = bpf_htons(8008);
+static const __u32 SRC_IP4 = IP4(127, 0, 0, 2);
+static const __u32 SRC_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000002);
+
static const __u16 DST_PORT = 7007; /* Host byte order */
static const __u32 DST_IP4 = IP4(127, 0, 0, 1);
static const __u32 DST_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000001);
@@ -398,11 +402,12 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
if (LSW(ctx->protocol, 0) != IPPROTO_TCP)
return SK_DROP;
- /* Narrow loads from remote_port field. Expect non-0 value. */
- if (LSB(ctx->remote_port, 0) == 0 && LSB(ctx->remote_port, 1) == 0 &&
- LSB(ctx->remote_port, 2) == 0 && LSB(ctx->remote_port, 3) == 0)
+ /* Narrow loads from remote_port field. Expect SRC_PORT. */
+ if (LSB(ctx->remote_port, 0) != ((SRC_PORT >> 0) & 0xff) ||
+ LSB(ctx->remote_port, 1) != ((SRC_PORT >> 8) & 0xff) ||
+ LSB(ctx->remote_port, 2) != 0 || LSB(ctx->remote_port, 3) != 0)
return SK_DROP;
- if (LSW(ctx->remote_port, 0) == 0)
+ if (LSW(ctx->remote_port, 0) != SRC_PORT)
return SK_DROP;
/* Narrow loads from local_port field. Expect DST_PORT. */
@@ -415,11 +420,14 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
/* Narrow loads from IPv4 fields */
if (v4) {
- /* Expect non-0.0.0.0 in remote_ip4 */
- if (LSB(ctx->remote_ip4, 0) == 0 && LSB(ctx->remote_ip4, 1) == 0 &&
- LSB(ctx->remote_ip4, 2) == 0 && LSB(ctx->remote_ip4, 3) == 0)
+ /* Expect SRC_IP4 in remote_ip4 */
+ if (LSB(ctx->remote_ip4, 0) != ((SRC_IP4 >> 0) & 0xff) ||
+ LSB(ctx->remote_ip4, 1) != ((SRC_IP4 >> 8) & 0xff) ||
+ LSB(ctx->remote_ip4, 2) != ((SRC_IP4 >> 16) & 0xff) ||
+ LSB(ctx->remote_ip4, 3) != ((SRC_IP4 >> 24) & 0xff))
return SK_DROP;
- if (LSW(ctx->remote_ip4, 0) == 0 && LSW(ctx->remote_ip4, 1) == 0)
+ if (LSW(ctx->remote_ip4, 0) != ((SRC_IP4 >> 0) & 0xffff) ||
+ LSW(ctx->remote_ip4, 1) != ((SRC_IP4 >> 16) & 0xffff))
return SK_DROP;
/* Expect DST_IP4 in local_ip4 */
@@ -448,20 +456,32 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
/* Narrow loads from IPv6 fields */
if (!v4) {
- /* Expect non-:: IP in remote_ip6 */
- if (LSB(ctx->remote_ip6[0], 0) == 0 && LSB(ctx->remote_ip6[0], 1) == 0 &&
- LSB(ctx->remote_ip6[0], 2) == 0 && LSB(ctx->remote_ip6[0], 3) == 0 &&
- LSB(ctx->remote_ip6[1], 0) == 0 && LSB(ctx->remote_ip6[1], 1) == 0 &&
- LSB(ctx->remote_ip6[1], 2) == 0 && LSB(ctx->remote_ip6[1], 3) == 0 &&
- LSB(ctx->remote_ip6[2], 0) == 0 && LSB(ctx->remote_ip6[2], 1) == 0 &&
- LSB(ctx->remote_ip6[2], 2) == 0 && LSB(ctx->remote_ip6[2], 3) == 0 &&
- LSB(ctx->remote_ip6[3], 0) == 0 && LSB(ctx->remote_ip6[3], 1) == 0 &&
- LSB(ctx->remote_ip6[3], 2) == 0 && LSB(ctx->remote_ip6[3], 3) == 0)
+ /* Expect SRC_IP6 in remote_ip6 */
+ if (LSB(ctx->remote_ip6[0], 0) != ((SRC_IP6[0] >> 0) & 0xff) ||
+ LSB(ctx->remote_ip6[0], 1) != ((SRC_IP6[0] >> 8) & 0xff) ||
+ LSB(ctx->remote_ip6[0], 2) != ((SRC_IP6[0] >> 16) & 0xff) ||
+ LSB(ctx->remote_ip6[0], 3) != ((SRC_IP6[0] >> 24) & 0xff) ||
+ LSB(ctx->remote_ip6[1], 0) != ((SRC_IP6[1] >> 0) & 0xff) ||
+ LSB(ctx->remote_ip6[1], 1) != ((SRC_IP6[1] >> 8) & 0xff) ||
+ LSB(ctx->remote_ip6[1], 2) != ((SRC_IP6[1] >> 16) & 0xff) ||
+ LSB(ctx->remote_ip6[1], 3) != ((SRC_IP6[1] >> 24) & 0xff) ||
+ LSB(ctx->remote_ip6[2], 0) != ((SRC_IP6[2] >> 0) & 0xff) ||
+ LSB(ctx->remote_ip6[2], 1) != ((SRC_IP6[2] >> 8) & 0xff) ||
+ LSB(ctx->remote_ip6[2], 2) != ((SRC_IP6[2] >> 16) & 0xff) ||
+ LSB(ctx->remote_ip6[2], 3) != ((SRC_IP6[2] >> 24) & 0xff) ||
+ LSB(ctx->remote_ip6[3], 0) != ((SRC_IP6[3] >> 0) & 0xff) ||
+ LSB(ctx->remote_ip6[3], 1) != ((SRC_IP6[3] >> 8) & 0xff) ||
+ LSB(ctx->remote_ip6[3], 2) != ((SRC_IP6[3] >> 16) & 0xff) ||
+ LSB(ctx->remote_ip6[3], 3) != ((SRC_IP6[3] >> 24) & 0xff))
return SK_DROP;
- if (LSW(ctx->remote_ip6[0], 0) == 0 && LSW(ctx->remote_ip6[0], 1) == 0 &&
- LSW(ctx->remote_ip6[1], 0) == 0 && LSW(ctx->remote_ip6[1], 1) == 0 &&
- LSW(ctx->remote_ip6[2], 0) == 0 && LSW(ctx->remote_ip6[2], 1) == 0 &&
- LSW(ctx->remote_ip6[3], 0) == 0 && LSW(ctx->remote_ip6[3], 1) == 0)
+ if (LSW(ctx->remote_ip6[0], 0) != ((SRC_IP6[0] >> 0) & 0xffff) ||
+ LSW(ctx->remote_ip6[0], 1) != ((SRC_IP6[0] >> 16) & 0xffff) ||
+ LSW(ctx->remote_ip6[1], 0) != ((SRC_IP6[1] >> 0) & 0xffff) ||
+ LSW(ctx->remote_ip6[1], 1) != ((SRC_IP6[1] >> 16) & 0xffff) ||
+ LSW(ctx->remote_ip6[2], 0) != ((SRC_IP6[2] >> 0) & 0xffff) ||
+ LSW(ctx->remote_ip6[2], 1) != ((SRC_IP6[2] >> 16) & 0xffff) ||
+ LSW(ctx->remote_ip6[3], 0) != ((SRC_IP6[3] >> 0) & 0xffff) ||
+ LSW(ctx->remote_ip6[3], 1) != ((SRC_IP6[3] >> 16) & 0xffff))
return SK_DROP;
/* Expect DST_IP6 in local_ip6 */
if (LSB(ctx->local_ip6[0], 0) != ((DST_IP6[0] >> 0) & 0xff) ||
diff --git a/tools/testing/selftests/bpf/progs/test_snprintf.c b/tools/testing/selftests/bpf/progs/test_snprintf.c
new file mode 100644
index 000000000000..951a0301c553
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_snprintf.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Google LLC. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char num_out[64] = {};
+long num_ret = 0;
+
+char ip_out[64] = {};
+long ip_ret = 0;
+
+char sym_out[64] = {};
+long sym_ret = 0;
+
+char addr_out[64] = {};
+long addr_ret = 0;
+
+char str_out[64] = {};
+long str_ret = 0;
+
+char over_out[6] = {};
+long over_ret = 0;
+
+char pad_out[10] = {};
+long pad_ret = 0;
+
+char noarg_out[64] = {};
+long noarg_ret = 0;
+
+long nobuf_ret = 0;
+
+extern const void schedule __ksym;
+
+SEC("raw_tp/sys_enter")
+int handler(const void *ctx)
+{
+ /* Convenient values to pretty-print */
+ const __u8 ex_ipv4[] = {127, 0, 0, 1};
+ const __u8 ex_ipv6[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1};
+ static const char str1[] = "str1";
+ static const char longstr[] = "longstr";
+
+ /* Integer types */
+ num_ret = BPF_SNPRINTF(num_out, sizeof(num_out),
+ "%d %u %x %li %llu %lX",
+ -8, 9, 150, -424242, 1337, 0xDABBAD00);
+ /* IP addresses */
+ ip_ret = BPF_SNPRINTF(ip_out, sizeof(ip_out), "%pi4 %pI6",
+ &ex_ipv4, &ex_ipv6);
+ /* Symbol lookup formatting */
+ sym_ret = BPF_SNPRINTF(sym_out, sizeof(sym_out), "%ps %pS %pB",
+ &schedule, &schedule, &schedule);
+ /* Kernel pointers */
+ addr_ret = BPF_SNPRINTF(addr_out, sizeof(addr_out), "%pK %px %p",
+ 0, 0xFFFF00000ADD4E55, 0xFFFF00000ADD4E55);
+ /* Strings embedding */
+ str_ret = BPF_SNPRINTF(str_out, sizeof(str_out), "%s %+05s",
+ str1, longstr);
+ /* Overflow */
+ over_ret = BPF_SNPRINTF(over_out, sizeof(over_out), "%%overflow");
+ /* Padding of fixed width numbers */
+ pad_ret = BPF_SNPRINTF(pad_out, sizeof(pad_out), "%5d %0900000X", 4, 4);
+ /* No args */
+ noarg_ret = BPF_SNPRINTF(noarg_out, sizeof(noarg_out), "simple case");
+ /* No buffer */
+ nobuf_ret = BPF_SNPRINTF(NULL, 0, "only interested in length %d", 60);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_snprintf_single.c b/tools/testing/selftests/bpf/progs/test_snprintf_single.c
new file mode 100644
index 000000000000..402adaf344f9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_snprintf_single.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Google LLC. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/* The format string is filled from the userspace such that loading fails */
+static const char fmt[10];
+
+SEC("raw_tp/sys_enter")
+int handler(const void *ctx)
+{
+ unsigned long long arg = 42;
+
+ bpf_snprintf(NULL, 0, fmt, &arg, sizeof(arg));
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
index a3a366c57ce1..a39eba9f5201 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
@@ -29,15 +29,16 @@ struct {
} verdict_map SEC(".maps");
static volatile bool test_sockmap; /* toggled by user-space */
+static volatile bool test_ingress; /* toggled by user-space */
SEC("sk_skb/stream_parser")
-int prog_skb_parser(struct __sk_buff *skb)
+int prog_stream_parser(struct __sk_buff *skb)
{
return skb->len;
}
SEC("sk_skb/stream_verdict")
-int prog_skb_verdict(struct __sk_buff *skb)
+int prog_stream_verdict(struct __sk_buff *skb)
{
unsigned int *count;
__u32 zero = 0;
@@ -55,6 +56,27 @@ int prog_skb_verdict(struct __sk_buff *skb)
return verdict;
}
+SEC("sk_skb/skb_verdict")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ unsigned int *count;
+ __u32 zero = 0;
+ int verdict;
+
+ if (test_sockmap)
+ verdict = bpf_sk_redirect_map(skb, &sock_map, zero,
+ test_ingress ? BPF_F_INGRESS : 0);
+ else
+ verdict = bpf_sk_redirect_hash(skb, &sock_hash, &zero,
+ test_ingress ? BPF_F_INGRESS : 0);
+
+ count = bpf_map_lookup_elem(&verdict_map, &verdict);
+ if (count)
+ (*count)++;
+
+ return verdict;
+}
+
SEC("sk_msg")
int prog_msg_verdict(struct sk_msg_md *msg)
{
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c b/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c
new file mode 100644
index 000000000000..2d31f66e4f23
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, __u64);
+} sock_map SEC(".maps");
+
+SEC("sk_skb/skb_verdict")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ return SK_DROP;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_static_linked1.c b/tools/testing/selftests/bpf/progs/test_static_linked1.c
new file mode 100644
index 000000000000..ea1a6c4c7172
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_static_linked1.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/* 8-byte aligned .bss */
+static volatile long static_var1;
+static volatile int static_var11;
+int var1 = 0;
+/* 4-byte aligned .rodata */
+const volatile int rovar1;
+
+/* same "subprog" name in both files */
+static __noinline int subprog(int x)
+{
+ /* but different formula */
+ return x * 2;
+}
+
+SEC("raw_tp/sys_enter")
+int handler1(const void *ctx)
+{
+ var1 = subprog(rovar1) + static_var1 + static_var11;
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
+int VERSION SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_static_linked2.c b/tools/testing/selftests/bpf/progs/test_static_linked2.c
new file mode 100644
index 000000000000..54d8d1ab577c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_static_linked2.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/* 4-byte aligned .bss */
+static volatile int static_var2;
+static volatile int static_var22;
+int var2 = 0;
+/* 8-byte aligned .rodata */
+const volatile long rovar2;
+
+/* same "subprog" name in both files */
+static __noinline int subprog(int x)
+{
+ /* but different formula */
+ return x * 3;
+}
+
+SEC("raw_tp/sys_enter")
+int handler2(const void *ctx)
+{
+ var2 = subprog(rovar2) + static_var2 + static_var22;
+
+ return 0;
+}
+
+/* different name and/or type of the variable doesn't matter */
+char _license[] SEC("license") = "GPL";
+int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
index 37bce7a7c394..84cd63259554 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
@@ -24,14 +24,29 @@ static const int cfg_port = 8000;
static const int cfg_udp_src = 20000;
+#define L2_PAD_SZ (sizeof(struct vxlanhdr) + ETH_HLEN)
+
#define UDP_PORT 5555
#define MPLS_OVER_UDP_PORT 6635
#define ETH_OVER_UDP_PORT 7777
+#define VXLAN_UDP_PORT 8472
+
+#define EXTPROTO_VXLAN 0x1
+
+#define VXLAN_N_VID (1u << 24)
+#define VXLAN_VNI_MASK bpf_htonl((VXLAN_N_VID - 1) << 8)
+#define VXLAN_FLAGS 0x8
+#define VXLAN_VNI 1
/* MPLS label 1000 with S bit (last label) set and ttl of 255. */
static const __u32 mpls_label = __bpf_constant_htonl(1000 << 12 |
MPLS_LS_S_MASK | 0xff);
+struct vxlanhdr {
+ __be32 vx_flags;
+ __be32 vx_vni;
+} __attribute__((packed));
+
struct gre_hdr {
__be16 flags;
__be16 protocol;
@@ -45,13 +60,13 @@ union l4hdr {
struct v4hdr {
struct iphdr ip;
union l4hdr l4hdr;
- __u8 pad[16]; /* enough space for L2 header */
+ __u8 pad[L2_PAD_SZ]; /* space for L2 header / vxlan header ... */
} __attribute__((packed));
struct v6hdr {
struct ipv6hdr ip;
union l4hdr l4hdr;
- __u8 pad[16]; /* enough space for L2 header */
+ __u8 pad[L2_PAD_SZ]; /* space for L2 header / vxlan header ... */
} __attribute__((packed));
static __always_inline void set_ipv4_csum(struct iphdr *iph)
@@ -69,14 +84,15 @@ static __always_inline void set_ipv4_csum(struct iphdr *iph)
iph->check = ~((csum & 0xffff) + (csum >> 16));
}
-static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
- __u16 l2_proto)
+static __always_inline int __encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
+ __u16 l2_proto, __u16 ext_proto)
{
__u16 udp_dst = UDP_PORT;
struct iphdr iph_inner;
struct v4hdr h_outer;
struct tcphdr tcph;
int olen, l2_len;
+ __u8 *l2_hdr = NULL;
int tcp_off;
__u64 flags;
@@ -141,7 +157,11 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
break;
case ETH_P_TEB:
l2_len = ETH_HLEN;
- udp_dst = ETH_OVER_UDP_PORT;
+ if (ext_proto & EXTPROTO_VXLAN) {
+ udp_dst = VXLAN_UDP_PORT;
+ l2_len += sizeof(struct vxlanhdr);
+ } else
+ udp_dst = ETH_OVER_UDP_PORT;
break;
}
flags |= BPF_F_ADJ_ROOM_ENCAP_L2(l2_len);
@@ -171,14 +191,26 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
}
/* add L2 encap (if specified) */
+ l2_hdr = (__u8 *)&h_outer + olen;
switch (l2_proto) {
case ETH_P_MPLS_UC:
- *((__u32 *)((__u8 *)&h_outer + olen)) = mpls_label;
+ *(__u32 *)l2_hdr = mpls_label;
break;
case ETH_P_TEB:
- if (bpf_skb_load_bytes(skb, 0, (__u8 *)&h_outer + olen,
- ETH_HLEN))
+ flags |= BPF_F_ADJ_ROOM_ENCAP_L2_ETH;
+
+ if (ext_proto & EXTPROTO_VXLAN) {
+ struct vxlanhdr *vxlan_hdr = (struct vxlanhdr *)l2_hdr;
+
+ vxlan_hdr->vx_flags = VXLAN_FLAGS;
+ vxlan_hdr->vx_vni = bpf_htonl((VXLAN_VNI & VXLAN_VNI_MASK) << 8);
+
+ l2_hdr += sizeof(struct vxlanhdr);
+ }
+
+ if (bpf_skb_load_bytes(skb, 0, l2_hdr, ETH_HLEN))
return TC_ACT_SHOT;
+
break;
}
olen += l2_len;
@@ -214,14 +246,21 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
return TC_ACT_OK;
}
-static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
+static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
__u16 l2_proto)
{
+ return __encap_ipv4(skb, encap_proto, l2_proto, 0);
+}
+
+static __always_inline int __encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
+ __u16 l2_proto, __u16 ext_proto)
+{
__u16 udp_dst = UDP_PORT;
struct ipv6hdr iph_inner;
struct v6hdr h_outer;
struct tcphdr tcph;
int olen, l2_len;
+ __u8 *l2_hdr = NULL;
__u16 tot_len;
__u64 flags;
@@ -249,7 +288,11 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
break;
case ETH_P_TEB:
l2_len = ETH_HLEN;
- udp_dst = ETH_OVER_UDP_PORT;
+ if (ext_proto & EXTPROTO_VXLAN) {
+ udp_dst = VXLAN_UDP_PORT;
+ l2_len += sizeof(struct vxlanhdr);
+ } else
+ udp_dst = ETH_OVER_UDP_PORT;
break;
}
flags |= BPF_F_ADJ_ROOM_ENCAP_L2(l2_len);
@@ -267,7 +310,7 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src);
h_outer.l4hdr.udp.dest = bpf_htons(udp_dst);
tot_len = bpf_ntohs(iph_inner.payload_len) + sizeof(iph_inner) +
- sizeof(h_outer.l4hdr.udp);
+ sizeof(h_outer.l4hdr.udp) + l2_len;
h_outer.l4hdr.udp.check = 0;
h_outer.l4hdr.udp.len = bpf_htons(tot_len);
break;
@@ -278,13 +321,24 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
}
/* add L2 encap (if specified) */
+ l2_hdr = (__u8 *)&h_outer + olen;
switch (l2_proto) {
case ETH_P_MPLS_UC:
- *((__u32 *)((__u8 *)&h_outer + olen)) = mpls_label;
+ *(__u32 *)l2_hdr = mpls_label;
break;
case ETH_P_TEB:
- if (bpf_skb_load_bytes(skb, 0, (__u8 *)&h_outer + olen,
- ETH_HLEN))
+ flags |= BPF_F_ADJ_ROOM_ENCAP_L2_ETH;
+
+ if (ext_proto & EXTPROTO_VXLAN) {
+ struct vxlanhdr *vxlan_hdr = (struct vxlanhdr *)l2_hdr;
+
+ vxlan_hdr->vx_flags = VXLAN_FLAGS;
+ vxlan_hdr->vx_vni = bpf_htonl((VXLAN_VNI & VXLAN_VNI_MASK) << 8);
+
+ l2_hdr += sizeof(struct vxlanhdr);
+ }
+
+ if (bpf_skb_load_bytes(skb, 0, l2_hdr, ETH_HLEN))
return TC_ACT_SHOT;
break;
}
@@ -309,6 +363,12 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
return TC_ACT_OK;
}
+static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
+ __u16 l2_proto)
+{
+ return __encap_ipv6(skb, encap_proto, l2_proto, 0);
+}
+
SEC("encap_ipip_none")
int __encap_ipip_none(struct __sk_buff *skb)
{
@@ -372,6 +432,17 @@ int __encap_udp_eth(struct __sk_buff *skb)
return TC_ACT_OK;
}
+SEC("encap_vxlan_eth")
+int __encap_vxlan_eth(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
+ return __encap_ipv4(skb, IPPROTO_UDP,
+ ETH_P_TEB,
+ EXTPROTO_VXLAN);
+ else
+ return TC_ACT_OK;
+}
+
SEC("encap_sit_none")
int __encap_sit_none(struct __sk_buff *skb)
{
@@ -444,6 +515,17 @@ int __encap_ip6udp_eth(struct __sk_buff *skb)
return TC_ACT_OK;
}
+SEC("encap_ip6vxlan_eth")
+int __encap_ip6vxlan_eth(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
+ return __encap_ipv6(skb, IPPROTO_UDP,
+ ETH_P_TEB,
+ EXTPROTO_VXLAN);
+ else
+ return TC_ACT_OK;
+}
+
static int decap_internal(struct __sk_buff *skb, int off, int len, char proto)
{
char buf[sizeof(struct v6hdr)];
@@ -479,6 +561,9 @@ static int decap_internal(struct __sk_buff *skb, int off, int len, char proto)
case ETH_OVER_UDP_PORT:
olen += ETH_HLEN;
break;
+ case VXLAN_UDP_PORT:
+ olen += ETH_HLEN + sizeof(struct vxlanhdr);
+ break;
}
break;
default:
diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
index ba6eadfec565..e7b673117436 100644
--- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
@@ -396,7 +396,7 @@ int _ip6vxlan_get_tunnel(struct __sk_buff *skb)
SEC("geneve_set_tunnel")
int _geneve_set_tunnel(struct __sk_buff *skb)
{
- int ret, ret2;
+ int ret;
struct bpf_tunnel_key key;
struct geneve_opt gopt;
diff --git a/tools/testing/selftests/bpf/test_bpftool_build.sh b/tools/testing/selftests/bpf/test_bpftool_build.sh
index 2db3c60e1e61..ac349a5cea7e 100755
--- a/tools/testing/selftests/bpf/test_bpftool_build.sh
+++ b/tools/testing/selftests/bpf/test_bpftool_build.sh
@@ -85,23 +85,6 @@ make_with_tmpdir() {
echo
}
-make_doc_and_clean() {
- echo -e "\$PWD: $PWD"
- echo -e "command: make -s $* doc >/dev/null"
- RST2MAN_OPTS="--exit-status=1" make $J -s $* doc
- if [ $? -ne 0 ] ; then
- ERROR=1
- printf "FAILURE: Errors or warnings when building documentation\n"
- fi
- (
- if [ $# -ge 1 ] ; then
- cd ${@: -1}
- fi
- make -s doc-clean
- )
- echo
-}
-
echo "Trying to build bpftool"
echo -e "... through kbuild\n"
@@ -162,7 +145,3 @@ make_and_clean
make_with_tmpdir OUTPUT
make_with_tmpdir O
-
-echo -e "Checking documentation build\n"
-# From tools/bpf/bpftool
-make_doc_and_clean
diff --git a/tools/testing/selftests/bpf/test_btf.h b/tools/testing/selftests/bpf/test_btf.h
index 2023725f1962..e2394eea4b7f 100644
--- a/tools/testing/selftests/bpf/test_btf.h
+++ b/tools/testing/selftests/bpf/test_btf.h
@@ -66,4 +66,7 @@
#define BTF_FUNC_ENC(name, func_proto) \
BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), func_proto)
+#define BTF_TYPE_FLOAT_ENC(name, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz)
+
#endif /* _TEST_BTF_H */
diff --git a/tools/testing/selftests/bpf/test_doc_build.sh b/tools/testing/selftests/bpf/test_doc_build.sh
new file mode 100755
index 000000000000..7eb940a7b2eb
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_doc_build.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+# Assume script is located under tools/testing/selftests/bpf/. We want to start
+# build attempts from the top of kernel repository.
+SCRIPT_REL_PATH=$(realpath --relative-to=$PWD $0)
+SCRIPT_REL_DIR=$(dirname $SCRIPT_REL_PATH)
+KDIR_ROOT_DIR=$(realpath $PWD/$SCRIPT_REL_DIR/../../../../)
+cd $KDIR_ROOT_DIR
+
+for tgt in docs docs-clean; do
+ make -s -C $PWD/$SCRIPT_REL_DIR $tgt;
+done
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index f7c2fd89d01a..dda52cb649dc 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -130,6 +130,20 @@ extern int test__join_cgroup(const char *path);
#define CHECK_ATTR(condition, tag, format...) \
_CHECK(condition, tag, tattr.duration, format)
+#define ASSERT_TRUE(actual, name) ({ \
+ static int duration = 0; \
+ bool ___ok = (actual); \
+ CHECK(!___ok, (name), "unexpected %s: got FALSE\n", (name)); \
+ ___ok; \
+})
+
+#define ASSERT_FALSE(actual, name) ({ \
+ static int duration = 0; \
+ bool ___ok = !(actual); \
+ CHECK(!___ok, (name), "unexpected %s: got TRUE\n", (name)); \
+ ___ok; \
+})
+
#define ASSERT_EQ(actual, expected, name) ({ \
static int duration = 0; \
typeof(actual) ___act = (actual); \
@@ -152,6 +166,50 @@ extern int test__join_cgroup(const char *path);
___ok; \
})
+#define ASSERT_LT(actual, expected, name) ({ \
+ static int duration = 0; \
+ typeof(actual) ___act = (actual); \
+ typeof(expected) ___exp = (expected); \
+ bool ___ok = ___act < ___exp; \
+ CHECK(!___ok, (name), \
+ "unexpected %s: actual %lld >= expected %lld\n", \
+ (name), (long long)(___act), (long long)(___exp)); \
+ ___ok; \
+})
+
+#define ASSERT_LE(actual, expected, name) ({ \
+ static int duration = 0; \
+ typeof(actual) ___act = (actual); \
+ typeof(expected) ___exp = (expected); \
+ bool ___ok = ___act <= ___exp; \
+ CHECK(!___ok, (name), \
+ "unexpected %s: actual %lld > expected %lld\n", \
+ (name), (long long)(___act), (long long)(___exp)); \
+ ___ok; \
+})
+
+#define ASSERT_GT(actual, expected, name) ({ \
+ static int duration = 0; \
+ typeof(actual) ___act = (actual); \
+ typeof(expected) ___exp = (expected); \
+ bool ___ok = ___act > ___exp; \
+ CHECK(!___ok, (name), \
+ "unexpected %s: actual %lld <= expected %lld\n", \
+ (name), (long long)(___act), (long long)(___exp)); \
+ ___ok; \
+})
+
+#define ASSERT_GE(actual, expected, name) ({ \
+ static int duration = 0; \
+ typeof(actual) ___act = (actual); \
+ typeof(expected) ___exp = (expected); \
+ bool ___ok = ___act >= ___exp; \
+ CHECK(!___ok, (name), \
+ "unexpected %s: actual %lld < expected %lld\n", \
+ (name), (long long)(___act), (long long)(___exp)); \
+ ___ok; \
+})
+
#define ASSERT_STREQ(actual, expected, name) ({ \
static int duration = 0; \
const char *___act = actual; \
@@ -167,7 +225,8 @@ extern int test__join_cgroup(const char *path);
static int duration = 0; \
long long ___res = (res); \
bool ___ok = ___res == 0; \
- CHECK(!___ok, (name), "unexpected error: %lld\n", ___res); \
+ CHECK(!___ok, (name), "unexpected error: %lld (errno %d)\n", \
+ ___res, errno); \
___ok; \
})
@@ -199,7 +258,7 @@ extern int test__join_cgroup(const char *path);
#define ASSERT_ERR_PTR(ptr, name) ({ \
static int duration = 0; \
const void *___res = (ptr); \
- bool ___ok = IS_ERR(___res) \
+ bool ___ok = IS_ERR(___res); \
CHECK(!___ok, (name), "unexpected pointer: %p\n", ___res); \
___ok; \
})
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index 427ca00a3217..eefd445b96fc 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -732,7 +732,7 @@ static int sendmsg_test(struct sockmap_options *opt)
* socket is not a valid test. So in this case lets not
* enable kTLS but still run the test.
*/
- if (!txmsg_redir || (txmsg_redir && txmsg_ingress)) {
+ if (!txmsg_redir || txmsg_ingress) {
err = sockmap_init_ktls(opt->verbose, rx_fd);
if (err)
return err;
diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh
index 7c76b841b17b..c9dde9b9d987 100755
--- a/tools/testing/selftests/bpf/test_tc_tunnel.sh
+++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh
@@ -44,8 +44,8 @@ setup() {
# clamp route to reserve room for tunnel headers
ip -netns "${ns1}" -4 route flush table main
ip -netns "${ns1}" -6 route flush table main
- ip -netns "${ns1}" -4 route add "${ns2_v4}" mtu 1458 dev veth1
- ip -netns "${ns1}" -6 route add "${ns2_v6}" mtu 1438 dev veth1
+ ip -netns "${ns1}" -4 route add "${ns2_v4}" mtu 1450 dev veth1
+ ip -netns "${ns1}" -6 route add "${ns2_v6}" mtu 1430 dev veth1
sleep 1
@@ -105,6 +105,12 @@ if [[ "$#" -eq "0" ]]; then
echo "sit"
$0 ipv6 sit none 100
+ echo "ip4 vxlan"
+ $0 ipv4 vxlan eth 2000
+
+ echo "ip6 vxlan"
+ $0 ipv6 ip6vxlan eth 2000
+
for mac in none mpls eth ; do
echo "ip gre $mac"
$0 ipv4 gre $mac 100
@@ -214,6 +220,9 @@ if [[ "$tuntype" =~ "udp" ]]; then
targs="encap fou encap-sport auto encap-dport $dport"
elif [[ "$tuntype" =~ "gre" && "$mac" == "eth" ]]; then
ttype=$gretaptype
+elif [[ "$tuntype" =~ "vxlan" && "$mac" == "eth" ]]; then
+ ttype="vxlan"
+ targs="id 1 dstport 8472 udp6zerocsumrx"
else
ttype=$tuntype
targs=""
@@ -242,7 +251,7 @@ if [[ "$tuntype" == "ip6udp" && "$mac" == "mpls" ]]; then
elif [[ "$tuntype" =~ "udp" && "$mac" == "eth" ]]; then
# No support for TEB fou tunnel; expect failure.
expect_tun_fail=1
-elif [[ "$tuntype" =~ "gre" && "$mac" == "eth" ]]; then
+elif [[ "$tuntype" =~ (gre|vxlan) && "$mac" == "eth" ]]; then
# Share ethernet address between tunnel/veth2 so L2 decap works.
ethaddr=$(ip netns exec "${ns2}" ip link show veth2 | \
awk '/ether/ { print $2 }')
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 58b5a349d3ba..1512092e1e68 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -105,7 +105,7 @@ struct bpf_test {
enum bpf_prog_type prog_type;
uint8_t flags;
void (*fill_helper)(struct bpf_test *self);
- uint8_t runs;
+ int runs;
#define bpf_testdata_struct_t \
struct { \
uint32_t retval, retval_unpriv; \
@@ -1165,7 +1165,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
run_errs = 0;
run_successes = 0;
- if (!alignment_prevented_execution && fd_prog >= 0) {
+ if (!alignment_prevented_execution && fd_prog >= 0 && test->runs >= 0) {
uint32_t expected_val;
int i;
diff --git a/tools/testing/selftests/bpf/test_xsk.sh b/tools/testing/selftests/bpf/test_xsk.sh
index 88a7483eaae4..46633a3bfb0b 100755
--- a/tools/testing/selftests/bpf/test_xsk.sh
+++ b/tools/testing/selftests/bpf/test_xsk.sh
@@ -71,13 +71,21 @@
#
# Run (full output without color-coding):
# sudo ./test_xsk.sh
+#
+# Run with verbose output:
+# sudo ./test_xsk.sh -v
+#
+# Run and dump packet contents:
+# sudo ./test_xsk.sh -D
. xsk_prereqs.sh
-while getopts c flag
+while getopts "cvD" flag
do
case "${flag}" in
c) colorconsole=1;;
+ v) verbose=1;;
+ D) dump_pkts=1;;
esac
done
@@ -95,17 +103,22 @@ NS1=af_xdp${VETH1_POSTFIX}
MTU=1500
setup_vethPairs() {
- echo "setting up ${VETH0}: namespace: ${NS0}"
+ if [[ $verbose -eq 1 ]]; then
+ echo "setting up ${VETH0}: namespace: ${NS0}"
+ fi
ip netns add ${NS1}
- ip link add ${VETH0} type veth peer name ${VETH1}
+ ip link add ${VETH0} numtxqueues 4 numrxqueues 4 type veth peer name ${VETH1} numtxqueues 4 numrxqueues 4
if [ -f /proc/net/if_inet6 ]; then
echo 1 > /proc/sys/net/ipv6/conf/${VETH0}/disable_ipv6
fi
- echo "setting up ${VETH1}: namespace: ${NS1}"
+ if [[ $verbose -eq 1 ]]; then
+ echo "setting up ${VETH1}: namespace: ${NS1}"
+ fi
ip link set ${VETH1} netns ${NS1}
ip netns exec ${NS1} ip link set ${VETH1} mtu ${MTU}
ip link set ${VETH0} mtu ${MTU}
ip netns exec ${NS1} ip link set ${VETH1} up
+ ip netns exec ${NS1} ip link set dev lo up
ip link set ${VETH0} up
}
@@ -125,121 +138,24 @@ echo "${VETH0}:${VETH1},${NS1}" > ${SPECFILE}
validate_veth_spec_file
-echo "Spec file created: ${SPECFILE}"
-
-test_status $retval "${TEST_NAME}"
-
-## START TESTS
-
-statusList=()
-
-### TEST 1
-TEST_NAME="XSK KSELFTEST FRAMEWORK"
-
-echo "Switching interfaces [${VETH0}, ${VETH1}] to XDP Generic mode"
-vethXDPgeneric ${VETH0} ${VETH1} ${NS1}
-
-retval=$?
-if [ $retval -eq 0 ]; then
- echo "Switching interfaces [${VETH0}, ${VETH1}] to XDP Native mode"
- vethXDPnative ${VETH0} ${VETH1} ${NS1}
+if [[ $verbose -eq 1 ]]; then
+ echo "Spec file created: ${SPECFILE}"
+ VERBOSE_ARG="-v"
fi
-retval=$?
-test_status $retval "${TEST_NAME}"
-statusList+=($retval)
-
-### TEST 2
-TEST_NAME="SKB NOPOLL"
-
-vethXDPgeneric ${VETH0} ${VETH1} ${NS1}
-
-params=("-S")
-execxdpxceiver params
-
-retval=$?
-test_status $retval "${TEST_NAME}"
-statusList+=($retval)
-
-### TEST 3
-TEST_NAME="SKB POLL"
-
-vethXDPgeneric ${VETH0} ${VETH1} ${NS1}
-
-params=("-S" "-p")
-execxdpxceiver params
-
-retval=$?
-test_status $retval "${TEST_NAME}"
-statusList+=($retval)
-
-### TEST 4
-TEST_NAME="DRV NOPOLL"
-
-vethXDPnative ${VETH0} ${VETH1} ${NS1}
-
-params=("-N")
-execxdpxceiver params
-
-retval=$?
-test_status $retval "${TEST_NAME}"
-statusList+=($retval)
-
-### TEST 5
-TEST_NAME="DRV POLL"
-
-vethXDPnative ${VETH0} ${VETH1} ${NS1}
-
-params=("-N" "-p")
-execxdpxceiver params
-
-retval=$?
-test_status $retval "${TEST_NAME}"
-statusList+=($retval)
-
-### TEST 6
-TEST_NAME="SKB SOCKET TEARDOWN"
-
-vethXDPgeneric ${VETH0} ${VETH1} ${NS1}
-
-params=("-S" "-T")
-execxdpxceiver params
-
-retval=$?
-test_status $retval "${TEST_NAME}"
-statusList+=($retval)
-
-### TEST 7
-TEST_NAME="DRV SOCKET TEARDOWN"
-
-vethXDPnative ${VETH0} ${VETH1} ${NS1}
-
-params=("-N" "-T")
-execxdpxceiver params
+if [[ $dump_pkts -eq 1 ]]; then
+ DUMP_PKTS_ARG="-D"
+fi
-retval=$?
test_status $retval "${TEST_NAME}"
-statusList+=($retval)
-### TEST 8
-TEST_NAME="SKB BIDIRECTIONAL SOCKETS"
-
-vethXDPgeneric ${VETH0} ${VETH1} ${NS1}
-
-params=("-S" "-B")
-execxdpxceiver params
-
-retval=$?
-test_status $retval "${TEST_NAME}"
-statusList+=($retval)
+## START TESTS
-### TEST 9
-TEST_NAME="DRV BIDIRECTIONAL SOCKETS"
+statusList=()
-vethXDPnative ${VETH0} ${VETH1} ${NS1}
+TEST_NAME="XSK KSELFTESTS"
-params=("-N" "-B")
-execxdpxceiver params
+execxdpxceiver
retval=$?
test_status $retval "${TEST_NAME}"
diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c
index 1b138cd2b187..1b1c798e9248 100644
--- a/tools/testing/selftests/bpf/verifier/array_access.c
+++ b/tools/testing/selftests/bpf/verifier/array_access.c
@@ -186,7 +186,7 @@
},
.fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
- .errstr = "invalid access to map value, value_size=48 off=44 size=8",
+ .errstr = "R0 unbounded memory access",
.result_unpriv = REJECT,
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
diff --git a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
index 69b048cf46d9..3e024c891178 100644
--- a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
+++ b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
@@ -42,3 +42,46 @@
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
+{
+ "bpf_get_task_stack return R0 range is refined",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_6, 0), // ctx->meta->seq
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, 8), // ctx->task
+ BPF_LD_MAP_FD(BPF_REG_1, 0), // fixup_map_array_48b
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), // keep buf for seq_write
+ BPF_MOV64_IMM(BPF_REG_3, 48),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_EMIT_CALL(BPF_FUNC_get_task_stack),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_9),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_seq_write),
+
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACING,
+ .expected_attach_type = BPF_TRACE_ITER,
+ .kfunc = "task",
+ .runs = -1, // Don't run, just load
+ .fixup_map_array_48b = { 3 },
+},
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index eb888c8479c3..336a749673d1 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -19,7 +19,7 @@
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "function calls to other bpf functions are allowed for",
+ .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 1,
@@ -136,7 +136,7 @@
{
"calls: wrong src reg",
.insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 3, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
@@ -397,7 +397,7 @@
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "function calls to other bpf functions are allowed for",
+ .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.fixup_map_hash_48b = { 3 },
.result_unpriv = REJECT,
.result = ACCEPT,
@@ -1977,7 +1977,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "function calls to other bpf functions are allowed for",
+ .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.result_unpriv = REJECT,
.result = ACCEPT,
},
@@ -2003,7 +2003,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "function calls to other bpf functions are allowed for",
+ .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.errstr = "!read_ok",
.result = REJECT,
},
@@ -2028,7 +2028,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "function calls to other bpf functions are allowed for",
+ .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.errstr = "!read_ok",
.result = REJECT,
},
diff --git a/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c b/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c
index fb13ca2d5606..d78627be060f 100644
--- a/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c
+++ b/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c
@@ -239,6 +239,7 @@
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
+ .runs = -1,
},
/* invalid 8-byte reads from a 4-byte fields in bpf_sk_lookup */
{
diff --git a/tools/testing/selftests/bpf/verifier/dead_code.c b/tools/testing/selftests/bpf/verifier/dead_code.c
index 5cf361d8eb1c..17fe33a75034 100644
--- a/tools/testing/selftests/bpf/verifier/dead_code.c
+++ b/tools/testing/selftests/bpf/verifier/dead_code.c
@@ -85,7 +85,7 @@
BPF_MOV64_IMM(BPF_REG_0, 12),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "function calls to other bpf functions are allowed for",
+ .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 7,
@@ -103,7 +103,7 @@
BPF_MOV64_IMM(BPF_REG_0, 12),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "function calls to other bpf functions are allowed for",
+ .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 7,
@@ -121,7 +121,7 @@
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -5),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "function calls to other bpf functions are allowed for",
+ .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 7,
@@ -137,7 +137,7 @@
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "function calls to other bpf functions are allowed for",
+ .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
@@ -152,7 +152,7 @@
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "function calls to other bpf functions are allowed for",
+ .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
diff --git a/tools/testing/selftests/bpf/vmtest.sh b/tools/testing/selftests/bpf/vmtest.sh
index 26ae8d0b6ce3..8889b3f55236 100755
--- a/tools/testing/selftests/bpf/vmtest.sh
+++ b/tools/testing/selftests/bpf/vmtest.sh
@@ -17,19 +17,22 @@ KCONFIG_URL="https://raw.githubusercontent.com/libbpf/libbpf/master/travis-ci/vm
KCONFIG_API_URL="https://api.github.com/repos/libbpf/libbpf/contents/travis-ci/vmtest/configs/latest.config"
INDEX_URL="https://raw.githubusercontent.com/libbpf/libbpf/master/travis-ci/vmtest/configs/INDEX"
NUM_COMPILE_JOBS="$(nproc)"
+LOG_FILE_BASE="$(date +"bpf_selftests.%Y-%m-%d_%H-%M-%S")"
+LOG_FILE="${LOG_FILE_BASE}.log"
+EXIT_STATUS_FILE="${LOG_FILE_BASE}.exit_status"
usage()
{
cat <<EOF
-Usage: $0 [-i] [-d <output_dir>] -- [<command>]
+Usage: $0 [-i] [-s] [-d <output_dir>] -- [<command>]
<command> is the command you would normally run when you are in
tools/testing/selftests/bpf. e.g:
$0 -- ./test_progs -t test_lsm
-If no command is specified, "${DEFAULT_COMMAND}" will be run by
-default.
+If no command is specified and a debug shell (-s) is not requested,
+"${DEFAULT_COMMAND}" will be run by default.
If you build your kernel using KBUILD_OUTPUT= or O= options, these
can be passed as environment variables to the script:
@@ -46,6 +49,9 @@ Options:
-d) Update the output directory (default: ${OUTPUT_DIR})
-j) Number of jobs for compilation, similar to -j in make
(default: ${NUM_COMPILE_JOBS})
+ -s) Instead of powering off the VM, start an interactive
+ shell. If <command> is specified, the shell runs after
+ the command finishes executing
EOF
}
@@ -146,7 +152,7 @@ update_init_script()
local init_script_dir="${OUTPUT_DIR}/${MOUNT_DIR}/etc/rcS.d"
local init_script="${init_script_dir}/S50-startup"
local command="$1"
- local log_file="$2"
+ local exit_command="$2"
mount_image
@@ -160,17 +166,26 @@ EOF
fi
- sudo bash -c "cat >${init_script}" <<EOF
-#!/bin/bash
+ sudo bash -c "echo '#!/bin/bash' > ${init_script}"
+
+ if [[ "${command}" != "" ]]; then
+ sudo bash -c "cat >>${init_script}" <<EOF
+# Have a default value in the exit status file
+# incase the VM is forcefully stopped.
+echo "130" > "/root/${EXIT_STATUS_FILE}"
{
cd /root/bpf
echo ${command}
stdbuf -oL -eL ${command}
-} 2>&1 | tee /root/${log_file}
-poweroff -f
+ echo "\$?" > "/root/${EXIT_STATUS_FILE}"
+} 2>&1 | tee "/root/${LOG_FILE}"
+# Ensure that the logs are written to disk
+sync
EOF
+ fi
+ sudo bash -c "echo ${exit_command} >> ${init_script}"
sudo chmod a+x "${init_script}"
unmount_image
}
@@ -221,10 +236,12 @@ EOF
copy_logs()
{
local mount_dir="${OUTPUT_DIR}/${MOUNT_DIR}"
- local log_file="${mount_dir}/root/$1"
+ local log_file="${mount_dir}/root/${LOG_FILE}"
+ local exit_status_file="${mount_dir}/root/${EXIT_STATUS_FILE}"
mount_image
sudo cp ${log_file} "${OUTPUT_DIR}"
+ sudo cp ${exit_status_file} "${OUTPUT_DIR}"
sudo rm -f ${log_file}
unmount_image
}
@@ -263,14 +280,15 @@ main()
{
local script_dir="$(cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P)"
local kernel_checkout=$(realpath "${script_dir}"/../../../../)
- local log_file="$(date +"bpf_selftests.%Y-%m-%d_%H-%M-%S.log")"
# By default the script searches for the kernel in the checkout directory but
# it also obeys environment variables O= and KBUILD_OUTPUT=
local kernel_bzimage="${kernel_checkout}/${X86_BZIMAGE}"
local command="${DEFAULT_COMMAND}"
local update_image="no"
+ local exit_command="poweroff -f"
+ local debug_shell="no"
- while getopts 'hkid:j:' opt; do
+ while getopts 'hskid:j:' opt; do
case ${opt} in
i)
update_image="yes"
@@ -281,6 +299,11 @@ main()
j)
NUM_COMPILE_JOBS="$OPTARG"
;;
+ s)
+ command=""
+ debug_shell="yes"
+ exit_command="bash"
+ ;;
h)
usage
exit 0
@@ -299,7 +322,7 @@ main()
done
shift $((OPTIND -1))
- if [[ $# -eq 0 ]]; then
+ if [[ $# -eq 0 && "${debug_shell}" == "no" ]]; then
echo "No command specified, will run ${DEFAULT_COMMAND} in the vm"
else
command="$@"
@@ -347,19 +370,25 @@ main()
fi
update_selftests "${kernel_checkout}" "${make_command}"
- update_init_script "${command}" "${log_file}"
+ update_init_script "${command}" "${exit_command}"
run_vm "${kernel_bzimage}"
- copy_logs "${log_file}"
- echo "Logs saved in ${OUTPUT_DIR}/${log_file}"
+ if [[ "${command}" != "" ]]; then
+ copy_logs
+ echo "Logs saved in ${OUTPUT_DIR}/${LOG_FILE}"
+ fi
}
catch()
{
local exit_code=$1
+ local exit_status_file="${OUTPUT_DIR}/${EXIT_STATUS_FILE}"
# This is just a cleanup and the directory may
# have already been unmounted. So, don't let this
# clobber the error code we intend to return.
unmount_image || true
+ if [[ -f "${exit_status_file}" ]]; then
+ exit_code="$(cat ${exit_status_file})"
+ fi
exit ${exit_code}
}
diff --git a/tools/testing/selftests/bpf/xdpxceiver.c b/tools/testing/selftests/bpf/xdpxceiver.c
index f4a96d5ff524..1135fb980814 100644
--- a/tools/testing/selftests/bpf/xdpxceiver.c
+++ b/tools/testing/selftests/bpf/xdpxceiver.c
@@ -18,12 +18,7 @@
* These selftests test AF_XDP SKB and Native/DRV modes using veth
* Virtual Ethernet interfaces.
*
- * The following tests are run:
- *
- * 1. AF_XDP SKB mode
- * Generic mode XDP is driver independent, used when the driver does
- * not have support for XDP. Works on any netdevice using sockets and
- * generic XDP path. XDP hook from netif_receive_skb().
+ * For each mode, the following tests are run:
* a. nopoll - soft-irq processing
* b. poll - using poll() syscall
* c. Socket Teardown
@@ -33,19 +28,25 @@
* Configure sockets as bi-directional tx/rx sockets, sets up fill and
* completion rings on each socket, tx/rx in both directions. Only nopoll
* mode is used
+ * e. Statistics
+ * Trigger some error conditions and ensure that the appropriate statistics
+ * are incremented. Within this test, the following statistics are tested:
+ * i. rx dropped
+ * Increase the UMEM frame headroom to a value which results in
+ * insufficient space in the rx buffer for both the packet and the headroom.
+ * ii. tx invalid
+ * Set the 'len' field of tx descriptors to an invalid value (umem frame
+ * size + 1).
+ * iii. rx ring full
+ * Reduce the size of the RX ring to a fraction of the fill ring size.
+ * iv. fill queue empty
+ * Do not populate the fill queue and then try to receive pkts.
+ * f. bpf_link resource persistence
+ * Configure sockets at indexes 0 and 1, run a traffic on queue ids 0,
+ * then remove xsk sockets from queue 0 on both veth interfaces and
+ * finally run a traffic on queues ids 1
*
- * 2. AF_XDP DRV/Native mode
- * Works on any netdevice with XDP_REDIRECT support, driver dependent. Processes
- * packets before SKB allocation. Provides better performance than SKB. Driver
- * hook available just after DMA of buffer descriptor.
- * a. nopoll
- * b. poll
- * c. Socket Teardown
- * d. Bi-directional sockets
- * - Only copy mode is supported because veth does not currently support
- * zero-copy mode
- *
- * Total tests: 8
+ * Total tests: 12
*
* Flow:
* -----
@@ -58,7 +59,7 @@
* - Rx thread verifies if all 10k packets were received and delivered in-order,
* and have the right content
*
- * Enable/disable debug mode:
+ * Enable/disable packet dump mode:
* --------------------------
* To enable L2 - L4 headers and payload dump of each packet on STDOUT, add
* parameter -D to params array in test_xsk.sh, i.e. params=("-S" "-D")
@@ -96,35 +97,34 @@ typedef __u16 __sum16;
#include "xdpxceiver.h"
#include "../kselftest.h"
+static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62";
+static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61";
+static const char *IP1 = "192.168.100.162";
+static const char *IP2 = "192.168.100.161";
+static const u16 UDP_PORT1 = 2020;
+static const u16 UDP_PORT2 = 2121;
+
static void __exit_with_error(int error, const char *file, const char *func, int line)
{
- ksft_test_result_fail
- ("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error, strerror(error));
- ksft_exit_xfail();
+ if (configured_mode == TEST_MODE_UNCONFIGURED) {
+ ksft_exit_fail_msg
+ ("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error, strerror(error));
+ } else {
+ ksft_test_result_fail
+ ("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error, strerror(error));
+ ksft_exit_xfail();
+ }
}
#define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__)
#define print_ksft_result(void)\
- (ksft_test_result_pass("PASS: %s %s %s%s\n", uut ? "DRV" : "SKB", opt_poll ? "POLL" :\
- "NOPOLL", opt_teardown ? "Socket Teardown" : "",\
- opt_bidi ? "Bi-directional Sockets" : ""))
-
-static void pthread_init_mutex(void)
-{
- pthread_mutex_init(&sync_mutex, NULL);
- pthread_mutex_init(&sync_mutex_tx, NULL);
- pthread_cond_init(&signal_rx_condition, NULL);
- pthread_cond_init(&signal_tx_condition, NULL);
-}
-
-static void pthread_destroy_mutex(void)
-{
- pthread_mutex_destroy(&sync_mutex);
- pthread_mutex_destroy(&sync_mutex_tx);
- pthread_cond_destroy(&signal_rx_condition);
- pthread_cond_destroy(&signal_tx_condition);
-}
+ (ksft_test_result_pass("PASS: %s %s %s%s%s%s\n", configured_mode ? "DRV" : "SKB",\
+ test_type == TEST_TYPE_POLL ? "POLL" : "NOPOLL",\
+ test_type == TEST_TYPE_TEARDOWN ? "Socket Teardown" : "",\
+ test_type == TEST_TYPE_BIDI ? "Bi-directional Sockets" : "",\
+ test_type == TEST_TYPE_STATS ? "Stats" : "",\
+ test_type == TEST_TYPE_BPF_RES ? "BPF RES" : ""))
static void *memset32_htonl(void *dest, u32 val, u32 size)
{
@@ -143,24 +143,11 @@ static void *memset32_htonl(void *dest, u32 val, u32 size)
}
/*
- * This function code has been taken from
- * Linux kernel lib/checksum.c
- */
-static inline unsigned short from32to16(unsigned int x)
-{
- /* add up 16-bit and 16-bit for 16+c bit */
- x = (x & 0xffff) + (x >> 16);
- /* add up carry.. */
- x = (x & 0xffff) + (x >> 16);
- return x;
-}
-
-/*
* Fold a partial checksum
* This function code has been taken from
* Linux kernel include/asm-generic/checksum.h
*/
-static inline __u16 csum_fold(__u32 csum)
+static __u16 csum_fold(__u32 csum)
{
u32 sum = (__force u32)csum;
@@ -173,7 +160,7 @@ static inline __u16 csum_fold(__u32 csum)
* This function code has been taken from
* Linux kernel lib/checksum.c
*/
-static inline u32 from64to32(u64 x)
+static u32 from64to32(u64 x)
{
/* add up 32-bit and 32-bit for 32+c bit */
x = (x & 0xffffffff) + (x >> 32);
@@ -182,13 +169,11 @@ static inline u32 from64to32(u64 x)
return (u32)x;
}
-__u32 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum);
-
/*
* This function code has been taken from
* Linux kernel lib/checksum.c
*/
-__u32 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum)
+static __u32 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum)
{
unsigned long long s = (__force u32)sum;
@@ -206,13 +191,12 @@ __u32 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u3
* This function has been taken from
* Linux kernel include/asm-generic/checksum.h
*/
-static inline __u16
-csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum)
+static __u16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
-static inline u16 udp_csum(u32 saddr, u32 daddr, u32 len, u8 proto, u16 *udp_pkt)
+static u16 udp_csum(u32 saddr, u32 daddr, u32 len, u8 proto, u16 *udp_pkt)
{
u32 csum = 0;
u32 cnt = 0;
@@ -267,26 +251,37 @@ static void gen_eth_frame(struct xsk_umem_info *umem, u64 addr)
memcpy(xsk_umem__get_data(umem->buffer, addr), pkt_data, PKT_SIZE);
}
-static void xsk_configure_umem(struct ifobject *data, void *buffer, u64 size)
+static void xsk_configure_umem(struct ifobject *data, void *buffer, int idx)
{
+ struct xsk_umem_config cfg = {
+ .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
+ .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
+ .frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE,
+ .frame_headroom = frame_headroom,
+ .flags = XSK_UMEM__DEFAULT_FLAGS
+ };
+ int size = num_frames * XSK_UMEM__DEFAULT_FRAME_SIZE;
+ struct xsk_umem_info *umem;
int ret;
- data->umem = calloc(1, sizeof(struct xsk_umem_info));
- if (!data->umem)
+ umem = calloc(1, sizeof(struct xsk_umem_info));
+ if (!umem)
exit_with_error(errno);
- ret = xsk_umem__create(&data->umem->umem, buffer, size,
- &data->umem->fq, &data->umem->cq, NULL);
+ ret = xsk_umem__create(&umem->umem, buffer, size,
+ &umem->fq, &umem->cq, &cfg);
if (ret)
exit_with_error(ret);
- data->umem->buffer = buffer;
+ umem->buffer = buffer;
+
+ data->umem_arr[idx] = umem;
}
static void xsk_populate_fill_ring(struct xsk_umem_info *umem)
{
int ret, i;
- u32 idx;
+ u32 idx = 0;
ret = xsk_ring_prod__reserve(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS, &idx);
if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS)
@@ -296,51 +291,48 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem)
xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS);
}
-static int xsk_configure_socket(struct ifobject *ifobject)
+static int xsk_configure_socket(struct ifobject *ifobject, int idx)
{
struct xsk_socket_config cfg;
+ struct xsk_socket_info *xsk;
struct xsk_ring_cons *rxr;
struct xsk_ring_prod *txr;
int ret;
- ifobject->xsk = calloc(1, sizeof(struct xsk_socket_info));
- if (!ifobject->xsk)
+ xsk = calloc(1, sizeof(struct xsk_socket_info));
+ if (!xsk)
exit_with_error(errno);
- ifobject->xsk->umem = ifobject->umem;
- cfg.rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
+ xsk->umem = ifobject->umem;
+ cfg.rx_size = rxqsize;
cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
cfg.libbpf_flags = 0;
- cfg.xdp_flags = opt_xdp_flags;
- cfg.bind_flags = opt_xdp_bind_flags;
+ cfg.xdp_flags = xdp_flags;
+ cfg.bind_flags = xdp_bind_flags;
- if (!opt_bidi) {
- rxr = (ifobject->fv.vector == rx) ? &ifobject->xsk->rx : NULL;
- txr = (ifobject->fv.vector == tx) ? &ifobject->xsk->tx : NULL;
+ if (test_type != TEST_TYPE_BIDI) {
+ rxr = (ifobject->fv.vector == rx) ? &xsk->rx : NULL;
+ txr = (ifobject->fv.vector == tx) ? &xsk->tx : NULL;
} else {
- rxr = &ifobject->xsk->rx;
- txr = &ifobject->xsk->tx;
+ rxr = &xsk->rx;
+ txr = &xsk->tx;
}
- ret = xsk_socket__create(&ifobject->xsk->xsk, ifobject->ifname,
- opt_queue, ifobject->umem->umem, rxr, txr, &cfg);
-
+ ret = xsk_socket__create(&xsk->xsk, ifobject->ifname, idx,
+ ifobject->umem->umem, rxr, txr, &cfg);
if (ret)
return 1;
+ ifobject->xsk_arr[idx] = xsk;
+
return 0;
}
static struct option long_options[] = {
{"interface", required_argument, 0, 'i'},
{"queue", optional_argument, 0, 'q'},
- {"poll", no_argument, 0, 'p'},
- {"xdp-skb", no_argument, 0, 'S'},
- {"xdp-native", no_argument, 0, 'N'},
- {"copy", no_argument, 0, 'c'},
- {"tear-down", no_argument, 0, 'T'},
- {"bidi", optional_argument, 0, 'B'},
- {"debug", optional_argument, 0, 'D'},
+ {"dump-pkts", optional_argument, 0, 'D'},
+ {"verbose", no_argument, 0, 'v'},
{"tx-pkt-count", optional_argument, 0, 'C'},
{0, 0, 0, 0}
};
@@ -352,23 +344,21 @@ static void usage(const char *prog)
" Options:\n"
" -i, --interface Use interface\n"
" -q, --queue=n Use queue n (default 0)\n"
- " -p, --poll Use poll syscall\n"
- " -S, --xdp-skb=n Use XDP SKB mode\n"
- " -N, --xdp-native=n Enforce XDP DRV (native) mode\n"
- " -c, --copy Force copy mode\n"
- " -T, --tear-down Tear down sockets by repeatedly recreating them\n"
- " -B, --bidi Bi-directional sockets test\n"
- " -D, --debug Debug mode - dump packets L2 - L5\n"
+ " -D, --dump-pkts Dump packets L2 - L5\n"
+ " -v, --verbose Verbose output\n"
" -C, --tx-pkt-count=n Number of packets to send\n";
ksft_print_msg(str, prog);
}
-static bool switch_namespace(int idx)
+static int switch_namespace(const char *nsname)
{
char fqns[26] = "/var/run/netns/";
int nsfd;
- strncat(fqns, ifdict[idx]->nsname, sizeof(fqns) - strlen(fqns) - 1);
+ if (!nsname || strlen(nsname) == 0)
+ return -1;
+
+ strncat(fqns, nsname, sizeof(fqns) - strlen(fqns) - 1);
nsfd = open(fqns, O_RDONLY);
if (nsfd == -1)
@@ -377,26 +367,9 @@ static bool switch_namespace(int idx)
if (setns(nsfd, 0) == -1)
exit_with_error(errno);
- return true;
-}
+ print_verbose("NS switched: %s\n", nsname);
-static void *nsswitchthread(void *args)
-{
- struct targs *targs = args;
-
- targs->retptr = false;
-
- if (switch_namespace(targs->idx)) {
- ifdict[targs->idx]->ifindex = if_nametoindex(ifdict[targs->idx]->ifname);
- if (!ifdict[targs->idx]->ifindex) {
- ksft_test_result_fail("ERROR: [%s] interface \"%s\" does not exist\n",
- __func__, ifdict[targs->idx]->ifname);
- } else {
- ksft_print_msg("Interface found: %s\n", ifdict[targs->idx]->ifname);
- targs->retptr = true;
- }
- }
- pthread_exit(NULL);
+ return nsfd;
}
static int validate_interfaces(void)
@@ -408,33 +381,6 @@ static int validate_interfaces(void)
ret = false;
ksft_test_result_fail("ERROR: interfaces: -i <int>,<ns> -i <int>,<ns>.");
}
- if (strcmp(ifdict[i]->nsname, "")) {
- struct targs *targs;
-
- targs = malloc(sizeof(*targs));
- if (!targs)
- exit_with_error(errno);
-
- targs->idx = i;
- if (pthread_create(&ns_thread, NULL, nsswitchthread, targs))
- exit_with_error(errno);
-
- pthread_join(ns_thread, NULL);
-
- if (targs->retptr)
- ksft_print_msg("NS switched: %s\n", ifdict[i]->nsname);
-
- free(targs);
- } else {
- ifdict[i]->ifindex = if_nametoindex(ifdict[i]->ifname);
- if (!ifdict[i]->ifindex) {
- ksft_test_result_fail
- ("ERROR: interface \"%s\" does not exist\n", ifdict[i]->ifname);
- ret = false;
- } else {
- ksft_print_msg("Interface found: %s\n", ifdict[i]->ifname);
- }
- }
}
return ret;
}
@@ -446,7 +392,7 @@ static void parse_command_line(int argc, char **argv)
opterr = 0;
for (;;) {
- c = getopt_long(argc, argv, "i:q:pSNcTBDC:", long_options, &option_index);
+ c = getopt_long(argc, argv, "i:DC:v", long_options, &option_index);
if (c == -1)
break;
@@ -466,43 +412,26 @@ static void parse_command_line(int argc, char **argv)
MAX_INTERFACES_NAMESPACE_CHARS);
interface_index++;
break;
- case 'q':
- opt_queue = atoi(optarg);
- break;
- case 'p':
- opt_poll = 1;
- break;
- case 'S':
- opt_xdp_flags |= XDP_FLAGS_SKB_MODE;
- opt_xdp_bind_flags |= XDP_COPY;
- uut = ORDER_CONTENT_VALIDATE_XDP_SKB;
- break;
- case 'N':
- opt_xdp_flags |= XDP_FLAGS_DRV_MODE;
- opt_xdp_bind_flags |= XDP_COPY;
- uut = ORDER_CONTENT_VALIDATE_XDP_DRV;
- break;
- case 'c':
- opt_xdp_bind_flags |= XDP_COPY;
- break;
- case 'T':
- opt_teardown = 1;
- break;
- case 'B':
- opt_bidi = 1;
- break;
case 'D':
debug_pkt_dump = 1;
break;
case 'C':
opt_pkt_count = atoi(optarg);
break;
+ case 'v':
+ opt_verbose = 1;
+ break;
default:
usage(basename(argv[0]));
ksft_exit_xfail();
}
}
+ if (!opt_pkt_count) {
+ print_verbose("No tx-pkt-count specified, using default %u\n", DEFAULT_PKT_CNT);
+ opt_pkt_count = DEFAULT_PKT_CNT;
+ }
+
if (!validate_interfaces()) {
usage(basename(argv[0]));
ksft_exit_xfail();
@@ -519,7 +448,7 @@ static void kick_tx(struct xsk_socket_info *xsk)
exit_with_error(errno);
}
-static inline void complete_tx_only(struct xsk_socket_info *xsk, int batch_size)
+static void complete_tx_only(struct xsk_socket_info *xsk, int batch_size)
{
unsigned int rcvd;
u32 idx;
@@ -527,7 +456,7 @@ static inline void complete_tx_only(struct xsk_socket_info *xsk, int batch_size)
if (!xsk->outstanding_tx)
return;
- if (!NEED_WAKEUP || xsk_ring_prod__needs_wakeup(&xsk->tx))
+ if (xsk_ring_prod__needs_wakeup(&xsk->tx))
kick_tx(xsk);
rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
@@ -597,8 +526,10 @@ static void rx_pkt(struct xsk_socket_info *xsk, struct pollfd *fds)
static void tx_only(struct xsk_socket_info *xsk, u32 *frameptr, int batch_size)
{
- u32 idx;
+ u32 idx = 0;
unsigned int i;
+ bool tx_invalid_test = stat_test_type == STAT_TEST_TX_INVALID;
+ u32 len = tx_invalid_test ? XSK_UMEM__DEFAULT_FRAME_SIZE + 1 : PKT_SIZE;
while (xsk_ring_prod__reserve(&xsk->tx, batch_size, &idx) < batch_size)
complete_tx_only(xsk, batch_size);
@@ -607,17 +538,21 @@ static void tx_only(struct xsk_socket_info *xsk, u32 *frameptr, int batch_size)
struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
tx_desc->addr = (*frameptr + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
- tx_desc->len = PKT_SIZE;
+ tx_desc->len = len;
}
xsk_ring_prod__submit(&xsk->tx, batch_size);
- xsk->outstanding_tx += batch_size;
+ if (!tx_invalid_test) {
+ xsk->outstanding_tx += batch_size;
+ } else if (xsk_ring_prod__needs_wakeup(&xsk->tx)) {
+ kick_tx(xsk);
+ }
*frameptr += batch_size;
*frameptr %= num_frames;
complete_tx_only(xsk, batch_size);
}
-static inline int get_batch_size(int pkt_cnt)
+static int get_batch_size(int pkt_cnt)
{
if (!opt_pkt_count)
return BATCH_SIZE;
@@ -654,7 +589,7 @@ static void tx_only_all(struct ifobject *ifobject)
while ((opt_pkt_count && pkt_cnt < opt_pkt_count) || !opt_pkt_count) {
int batch_size = get_batch_size(pkt_cnt);
- if (opt_poll) {
+ if (test_type == TEST_TYPE_POLL) {
ret = poll(fds, 1, POLL_TMOUT);
if (ret <= 0)
continue;
@@ -673,48 +608,43 @@ static void tx_only_all(struct ifobject *ifobject)
static void worker_pkt_dump(void)
{
- struct in_addr ipaddr;
+ struct ethhdr *ethhdr;
+ struct iphdr *iphdr;
+ struct udphdr *udphdr;
+ char s[128];
+ int payload;
+ void *ptr;
fprintf(stdout, "---------------------------------------\n");
for (int iter = 0; iter < num_frames - 1; iter++) {
+ ptr = pkt_buf[iter]->payload;
+ ethhdr = ptr;
+ iphdr = ptr + sizeof(*ethhdr);
+ udphdr = ptr + sizeof(*ethhdr) + sizeof(*iphdr);
+
/*extract L2 frame */
fprintf(stdout, "DEBUG>> L2: dst mac: ");
for (int i = 0; i < ETH_ALEN; i++)
- fprintf(stdout, "%02X", ((struct ethhdr *)
- pkt_buf[iter]->payload)->h_dest[i]);
+ fprintf(stdout, "%02X", ethhdr->h_dest[i]);
fprintf(stdout, "\nDEBUG>> L2: src mac: ");
for (int i = 0; i < ETH_ALEN; i++)
- fprintf(stdout, "%02X", ((struct ethhdr *)
- pkt_buf[iter]->payload)->h_source[i]);
+ fprintf(stdout, "%02X", ethhdr->h_source[i]);
/*extract L3 frame */
- fprintf(stdout, "\nDEBUG>> L3: ip_hdr->ihl: %02X\n",
- ((struct iphdr *)(pkt_buf[iter]->payload + sizeof(struct ethhdr)))->ihl);
-
- ipaddr.s_addr =
- ((struct iphdr *)(pkt_buf[iter]->payload + sizeof(struct ethhdr)))->saddr;
- fprintf(stdout, "DEBUG>> L3: ip_hdr->saddr: %s\n", inet_ntoa(ipaddr));
-
- ipaddr.s_addr =
- ((struct iphdr *)(pkt_buf[iter]->payload + sizeof(struct ethhdr)))->daddr;
- fprintf(stdout, "DEBUG>> L3: ip_hdr->daddr: %s\n", inet_ntoa(ipaddr));
-
+ fprintf(stdout, "\nDEBUG>> L3: ip_hdr->ihl: %02X\n", iphdr->ihl);
+ fprintf(stdout, "DEBUG>> L3: ip_hdr->saddr: %s\n",
+ inet_ntop(AF_INET, &iphdr->saddr, s, sizeof(s)));
+ fprintf(stdout, "DEBUG>> L3: ip_hdr->daddr: %s\n",
+ inet_ntop(AF_INET, &iphdr->daddr, s, sizeof(s)));
/*extract L4 frame */
- fprintf(stdout, "DEBUG>> L4: udp_hdr->src: %d\n",
- ntohs(((struct udphdr *)(pkt_buf[iter]->payload +
- sizeof(struct ethhdr) +
- sizeof(struct iphdr)))->source));
-
- fprintf(stdout, "DEBUG>> L4: udp_hdr->dst: %d\n",
- ntohs(((struct udphdr *)(pkt_buf[iter]->payload +
- sizeof(struct ethhdr) +
- sizeof(struct iphdr)))->dest));
+ fprintf(stdout, "DEBUG>> L4: udp_hdr->src: %d\n", ntohs(udphdr->source));
+ fprintf(stdout, "DEBUG>> L4: udp_hdr->dst: %d\n", ntohs(udphdr->dest));
/*extract L5 frame */
- int payload = *((uint32_t *)(pkt_buf[iter]->payload + PKT_HDR_SIZE));
+ payload = *((uint32_t *)(ptr + PKT_HDR_SIZE));
if (payload == EOT) {
- ksft_print_msg("End-of-transmission frame received\n");
+ print_verbose("End-of-transmission frame received\n");
fprintf(stdout, "---------------------------------------\n");
break;
}
@@ -723,6 +653,48 @@ static void worker_pkt_dump(void)
}
}
+static void worker_stats_validate(struct ifobject *ifobject)
+{
+ struct xdp_statistics stats;
+ socklen_t optlen;
+ int err;
+ struct xsk_socket *xsk = stat_test_type == STAT_TEST_TX_INVALID ?
+ ifdict[!ifobject->ifdict_index]->xsk->xsk :
+ ifobject->xsk->xsk;
+ int fd = xsk_socket__fd(xsk);
+ unsigned long xsk_stat = 0, expected_stat = opt_pkt_count;
+
+ sigvar = 0;
+
+ optlen = sizeof(stats);
+ err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
+ if (err)
+ return;
+
+ if (optlen == sizeof(struct xdp_statistics)) {
+ switch (stat_test_type) {
+ case STAT_TEST_RX_DROPPED:
+ xsk_stat = stats.rx_dropped;
+ break;
+ case STAT_TEST_TX_INVALID:
+ xsk_stat = stats.tx_invalid_descs;
+ break;
+ case STAT_TEST_RX_FULL:
+ xsk_stat = stats.rx_ring_full;
+ expected_stat -= RX_FULL_RXQSIZE;
+ break;
+ case STAT_TEST_RX_FILL_EMPTY:
+ xsk_stat = stats.rx_fill_ring_empty_descs;
+ break;
+ default:
+ break;
+ }
+
+ if (xsk_stat == expected_stat)
+ sigvar = 1;
+ }
+}
+
static void worker_pkt_validate(void)
{
u32 payloadseqnum = -2;
@@ -746,7 +718,7 @@ static void worker_pkt_validate(void)
}
if (payloadseqnum == EOT) {
- ksft_print_msg("End-of-transmission frame received: PASS\n");
+ print_verbose("End-of-transmission frame received: PASS\n");
sigvar = 1;
break;
}
@@ -773,37 +745,69 @@ static void worker_pkt_validate(void)
}
}
-static void thread_common_ops(struct ifobject *ifobject, void *bufs, pthread_mutex_t *mutexptr,
- atomic_int *spinningptr)
+static void thread_common_ops(struct ifobject *ifobject, void *bufs)
{
+ int umem_sz = num_frames * XSK_UMEM__DEFAULT_FRAME_SIZE;
int ctr = 0;
int ret;
- xsk_configure_umem(ifobject, bufs, num_frames * XSK_UMEM__DEFAULT_FRAME_SIZE);
- ret = xsk_configure_socket(ifobject);
+ ifobject->ns_fd = switch_namespace(ifobject->nsname);
+
+ if (test_type == TEST_TYPE_BPF_RES)
+ umem_sz *= 2;
+
+ bufs = mmap(NULL, umem_sz,
+ PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (bufs == MAP_FAILED)
+ exit_with_error(errno);
+
+ xsk_configure_umem(ifobject, bufs, 0);
+ ifobject->umem = ifobject->umem_arr[0];
+ ret = xsk_configure_socket(ifobject, 0);
/* Retry Create Socket if it fails as xsk_socket__create()
* is asynchronous
- *
- * Essential to lock Mutex here to prevent Tx thread from
- * entering before Rx and causing a deadlock
*/
- pthread_mutex_lock(mutexptr);
while (ret && ctr < SOCK_RECONF_CTR) {
- atomic_store(spinningptr, 1);
- xsk_configure_umem(ifobject, bufs, num_frames * XSK_UMEM__DEFAULT_FRAME_SIZE);
- ret = xsk_configure_socket(ifobject);
+ xsk_configure_umem(ifobject, bufs, 0);
+ ifobject->umem = ifobject->umem_arr[0];
+ ret = xsk_configure_socket(ifobject, 0);
usleep(USLEEP_MAX);
ctr++;
}
- atomic_store(spinningptr, 0);
- pthread_mutex_unlock(mutexptr);
if (ctr >= SOCK_RECONF_CTR)
exit_with_error(ret);
+
+ ifobject->umem = ifobject->umem_arr[0];
+ ifobject->xsk = ifobject->xsk_arr[0];
+
+ if (test_type == TEST_TYPE_BPF_RES) {
+ xsk_configure_umem(ifobject, (u8 *)bufs + (umem_sz / 2), 1);
+ ifobject->umem = ifobject->umem_arr[1];
+ ret = xsk_configure_socket(ifobject, 1);
+ }
+
+ ifobject->umem = ifobject->umem_arr[0];
+ ifobject->xsk = ifobject->xsk_arr[0];
+ print_verbose("Interface [%s] vector [%s]\n",
+ ifobject->ifname, ifobject->fv.vector == tx ? "Tx" : "Rx");
+}
+
+static bool testapp_is_test_two_stepped(void)
+{
+ return (test_type != TEST_TYPE_BIDI && test_type != TEST_TYPE_BPF_RES) || second_step;
+}
+
+static void testapp_cleanup_xsk_res(struct ifobject *ifobj)
+{
+ if (testapp_is_test_two_stepped()) {
+ xsk_socket__delete(ifobj->xsk->xsk);
+ (void)xsk_umem__delete(ifobj->umem->umem);
+ }
}
-static void *worker_testapp_validate(void *arg)
+static void *worker_testapp_validate_tx(void *arg)
{
struct udphdr *udp_hdr =
(struct udphdr *)(pkt_data + sizeof(struct ethhdr) + sizeof(struct iphdr));
@@ -813,149 +817,97 @@ static void *worker_testapp_validate(void *arg)
struct generic_data data;
void *bufs = NULL;
- pthread_attr_setstacksize(&attr, THREAD_STACK);
-
- if (!bidi_pass) {
- bufs = mmap(NULL, num_frames * XSK_UMEM__DEFAULT_FRAME_SIZE,
- PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (bufs == MAP_FAILED)
- exit_with_error(errno);
-
- if (strcmp(ifobject->nsname, ""))
- switch_namespace(ifobject->ifdict_index);
+ if (!second_step)
+ thread_common_ops(ifobject, bufs);
+
+ for (int i = 0; i < num_frames; i++) {
+ /*send EOT frame */
+ if (i == (num_frames - 1))
+ data.seqnum = -1;
+ else
+ data.seqnum = i;
+ gen_udp_hdr(&data, ifobject, udp_hdr);
+ gen_ip_hdr(ifobject, ip_hdr);
+ gen_udp_csum(udp_hdr, ip_hdr);
+ gen_eth_hdr(ifobject, eth_hdr);
+ gen_eth_frame(ifobject->umem, i * XSK_UMEM__DEFAULT_FRAME_SIZE);
}
- if (ifobject->fv.vector == tx) {
- int spinningrxctr = 0;
-
- if (!bidi_pass)
- thread_common_ops(ifobject, bufs, &sync_mutex_tx, &spinning_tx);
-
- while (atomic_load(&spinning_rx) && spinningrxctr < SOCK_RECONF_CTR) {
- spinningrxctr++;
- usleep(USLEEP_MAX);
- }
+ print_verbose("Sending %d packets on interface %s\n",
+ (opt_pkt_count - 1), ifobject->ifname);
+ tx_only_all(ifobject);
- ksft_print_msg("Interface [%s] vector [Tx]\n", ifobject->ifname);
- for (int i = 0; i < num_frames; i++) {
- /*send EOT frame */
- if (i == (num_frames - 1))
- data.seqnum = -1;
- else
- data.seqnum = i;
- gen_udp_hdr(&data, ifobject, udp_hdr);
- gen_ip_hdr(ifobject, ip_hdr);
- gen_udp_csum(udp_hdr, ip_hdr);
- gen_eth_hdr(ifobject, eth_hdr);
- gen_eth_frame(ifobject->umem, i * XSK_UMEM__DEFAULT_FRAME_SIZE);
- }
+ testapp_cleanup_xsk_res(ifobject);
+ pthread_exit(NULL);
+}
- ksft_print_msg("Sending %d packets on interface %s\n",
- (opt_pkt_count - 1), ifobject->ifname);
- tx_only_all(ifobject);
- } else if (ifobject->fv.vector == rx) {
- struct pollfd fds[MAX_SOCKS] = { };
- int ret;
+static void *worker_testapp_validate_rx(void *arg)
+{
+ struct ifobject *ifobject = (struct ifobject *)arg;
+ struct pollfd fds[MAX_SOCKS] = { };
+ void *bufs = NULL;
- if (!bidi_pass)
- thread_common_ops(ifobject, bufs, &sync_mutex_tx, &spinning_rx);
+ if (!second_step)
+ thread_common_ops(ifobject, bufs);
- ksft_print_msg("Interface [%s] vector [Rx]\n", ifobject->ifname);
+ if (stat_test_type != STAT_TEST_RX_FILL_EMPTY)
xsk_populate_fill_ring(ifobject->umem);
- TAILQ_INIT(&head);
- if (debug_pkt_dump) {
- pkt_buf = calloc(num_frames, sizeof(*pkt_buf));
- if (!pkt_buf)
- exit_with_error(errno);
- }
+ TAILQ_INIT(&head);
+ if (debug_pkt_dump) {
+ pkt_buf = calloc(num_frames, sizeof(*pkt_buf));
+ if (!pkt_buf)
+ exit_with_error(errno);
+ }
- fds[0].fd = xsk_socket__fd(ifobject->xsk->xsk);
- fds[0].events = POLLIN;
+ fds[0].fd = xsk_socket__fd(ifobject->xsk->xsk);
+ fds[0].events = POLLIN;
- pthread_mutex_lock(&sync_mutex);
- pthread_cond_signal(&signal_rx_condition);
- pthread_mutex_unlock(&sync_mutex);
+ pthread_barrier_wait(&barr);
- while (1) {
- if (opt_poll) {
- ret = poll(fds, 1, POLL_TMOUT);
- if (ret <= 0)
- continue;
- }
+ while (1) {
+ if (test_type != TEST_TYPE_STATS) {
rx_pkt(ifobject->xsk, fds);
worker_pkt_validate();
-
- if (sigvar)
- break;
+ } else {
+ worker_stats_validate(ifobject);
}
+ if (sigvar)
+ break;
+ }
- ksft_print_msg("Received %d packets on interface %s\n",
- pkt_counter, ifobject->ifname);
+ print_verbose("Received %d packets on interface %s\n",
+ pkt_counter, ifobject->ifname);
- if (opt_teardown)
- ksft_print_msg("Destroying socket\n");
- }
+ if (test_type == TEST_TYPE_TEARDOWN)
+ print_verbose("Destroying socket\n");
- if (!opt_bidi || bidi_pass) {
- xsk_socket__delete(ifobject->xsk->xsk);
- (void)xsk_umem__delete(ifobject->umem->umem);
- }
+ testapp_cleanup_xsk_res(ifobject);
pthread_exit(NULL);
}
static void testapp_validate(void)
{
- struct timespec max_wait = { 0, 0 };
+ bool bidi = test_type == TEST_TYPE_BIDI;
+ bool bpf = test_type == TEST_TYPE_BPF_RES;
- pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, THREAD_STACK);
-
- if (opt_bidi && bidi_pass) {
- pthread_init_mutex();
- if (!switching_notify) {
- ksft_print_msg("Switching Tx/Rx vectors\n");
- switching_notify++;
- }
- }
-
- pthread_mutex_lock(&sync_mutex);
+ if (pthread_barrier_init(&barr, NULL, 2))
+ exit_with_error(errno);
/*Spawn RX thread */
- if (!opt_bidi || !bidi_pass) {
- if (pthread_create(&t0, &attr, worker_testapp_validate, ifdict[1]))
- exit_with_error(errno);
- } else if (opt_bidi && bidi_pass) {
- /*switch Tx/Rx vectors */
- ifdict[0]->fv.vector = rx;
- if (pthread_create(&t0, &attr, worker_testapp_validate, ifdict[0]))
- exit_with_error(errno);
- }
+ pthread_create(&t0, NULL, ifdict_rx->func_ptr, ifdict_rx);
- if (clock_gettime(CLOCK_REALTIME, &max_wait))
+ pthread_barrier_wait(&barr);
+ if (pthread_barrier_destroy(&barr))
exit_with_error(errno);
- max_wait.tv_sec += TMOUT_SEC;
-
- if (pthread_cond_timedwait(&signal_rx_condition, &sync_mutex, &max_wait) == ETIMEDOUT)
- exit_with_error(errno);
-
- pthread_mutex_unlock(&sync_mutex);
/*Spawn TX thread */
- if (!opt_bidi || !bidi_pass) {
- if (pthread_create(&t1, &attr, worker_testapp_validate, ifdict[0]))
- exit_with_error(errno);
- } else if (opt_bidi && bidi_pass) {
- /*switch Tx/Rx vectors */
- ifdict[1]->fv.vector = tx;
- if (pthread_create(&t1, &attr, worker_testapp_validate, ifdict[1]))
- exit_with_error(errno);
- }
+ pthread_create(&t1, NULL, ifdict_tx->func_ptr, ifdict_tx);
pthread_join(t1, NULL);
pthread_join(t0, NULL);
- if (debug_pkt_dump) {
+ if (debug_pkt_dump && test_type != TEST_TYPE_STATS) {
worker_pkt_dump();
for (int iter = 0; iter < num_frames - 1; iter++) {
free(pkt_buf[iter]->payload);
@@ -964,73 +916,217 @@ static void testapp_validate(void)
free(pkt_buf);
}
- if (!opt_teardown && !opt_bidi)
+ if (!(test_type == TEST_TYPE_TEARDOWN) && !bidi && !bpf && !(test_type == TEST_TYPE_STATS))
print_ksft_result();
}
-static void testapp_sockets(void)
+static void testapp_teardown(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_TEARDOWN_ITER; i++) {
+ pkt_counter = 0;
+ prev_pkt = -1;
+ sigvar = 0;
+ print_verbose("Creating socket\n");
+ testapp_validate();
+ }
+
+ print_ksft_result();
+}
+
+static void swap_vectors(struct ifobject *ifobj1, struct ifobject *ifobj2)
{
- for (int i = 0; i < (opt_teardown ? MAX_TEARDOWN_ITER : MAX_BIDI_ITER); i++) {
+ void *(*tmp_func_ptr)(void *) = ifobj1->func_ptr;
+ enum fvector tmp_vector = ifobj1->fv.vector;
+
+ ifobj1->func_ptr = ifobj2->func_ptr;
+ ifobj1->fv.vector = ifobj2->fv.vector;
+
+ ifobj2->func_ptr = tmp_func_ptr;
+ ifobj2->fv.vector = tmp_vector;
+
+ ifdict_tx = ifobj1;
+ ifdict_rx = ifobj2;
+}
+
+static void testapp_bidi(void)
+{
+ for (int i = 0; i < MAX_BIDI_ITER; i++) {
pkt_counter = 0;
prev_pkt = -1;
sigvar = 0;
- ksft_print_msg("Creating socket\n");
+ print_verbose("Creating socket\n");
testapp_validate();
- opt_bidi ? bidi_pass++ : bidi_pass;
+ if (!second_step) {
+ print_verbose("Switching Tx/Rx vectors\n");
+ swap_vectors(ifdict[1], ifdict[0]);
+ }
+ second_step = true;
}
+ swap_vectors(ifdict[0], ifdict[1]);
+
print_ksft_result();
}
-static void init_iface_config(struct ifaceconfigobj *ifaceconfig)
+static void swap_xsk_res(void)
{
- /*Init interface0 */
- ifdict[0]->fv.vector = tx;
- memcpy(ifdict[0]->dst_mac, ifaceconfig->dst_mac, ETH_ALEN);
- memcpy(ifdict[0]->src_mac, ifaceconfig->src_mac, ETH_ALEN);
- ifdict[0]->dst_ip = ifaceconfig->dst_ip.s_addr;
- ifdict[0]->src_ip = ifaceconfig->src_ip.s_addr;
- ifdict[0]->dst_port = ifaceconfig->dst_port;
- ifdict[0]->src_port = ifaceconfig->src_port;
-
- /*Init interface1 */
- ifdict[1]->fv.vector = rx;
- memcpy(ifdict[1]->dst_mac, ifaceconfig->src_mac, ETH_ALEN);
- memcpy(ifdict[1]->src_mac, ifaceconfig->dst_mac, ETH_ALEN);
- ifdict[1]->dst_ip = ifaceconfig->src_ip.s_addr;
- ifdict[1]->src_ip = ifaceconfig->dst_ip.s_addr;
- ifdict[1]->dst_port = ifaceconfig->src_port;
- ifdict[1]->src_port = ifaceconfig->dst_port;
+ xsk_socket__delete(ifdict_tx->xsk->xsk);
+ xsk_umem__delete(ifdict_tx->umem->umem);
+ xsk_socket__delete(ifdict_rx->xsk->xsk);
+ xsk_umem__delete(ifdict_rx->umem->umem);
+ ifdict_tx->umem = ifdict_tx->umem_arr[1];
+ ifdict_tx->xsk = ifdict_tx->xsk_arr[1];
+ ifdict_rx->umem = ifdict_rx->umem_arr[1];
+ ifdict_rx->xsk = ifdict_rx->xsk_arr[1];
+}
+
+static void testapp_bpf_res(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_BPF_ITER; i++) {
+ pkt_counter = 0;
+ prev_pkt = -1;
+ sigvar = 0;
+ print_verbose("Creating socket\n");
+ testapp_validate();
+ if (!second_step)
+ swap_xsk_res();
+ second_step = true;
+ }
+
+ print_ksft_result();
+}
+
+static void testapp_stats(void)
+{
+ for (int i = 0; i < STAT_TEST_TYPE_MAX; i++) {
+ stat_test_type = i;
+
+ /* reset defaults */
+ rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
+ frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
+
+ switch (stat_test_type) {
+ case STAT_TEST_RX_DROPPED:
+ frame_headroom = XSK_UMEM__DEFAULT_FRAME_SIZE -
+ XDP_PACKET_HEADROOM - 1;
+ break;
+ case STAT_TEST_RX_FULL:
+ rxqsize = RX_FULL_RXQSIZE;
+ break;
+ default:
+ break;
+ }
+ testapp_validate();
+ }
+
+ print_ksft_result();
+}
+
+static void init_iface(struct ifobject *ifobj, const char *dst_mac,
+ const char *src_mac, const char *dst_ip,
+ const char *src_ip, const u16 dst_port,
+ const u16 src_port, enum fvector vector)
+{
+ struct in_addr ip;
+
+ memcpy(ifobj->dst_mac, dst_mac, ETH_ALEN);
+ memcpy(ifobj->src_mac, src_mac, ETH_ALEN);
+
+ inet_aton(dst_ip, &ip);
+ ifobj->dst_ip = ip.s_addr;
+
+ inet_aton(src_ip, &ip);
+ ifobj->src_ip = ip.s_addr;
+
+ ifobj->dst_port = dst_port;
+ ifobj->src_port = src_port;
+
+ if (vector == tx) {
+ ifobj->fv.vector = tx;
+ ifobj->func_ptr = worker_testapp_validate_tx;
+ ifdict_tx = ifobj;
+ } else {
+ ifobj->fv.vector = rx;
+ ifobj->func_ptr = worker_testapp_validate_rx;
+ ifdict_rx = ifobj;
+ }
+}
+
+static void run_pkt_test(int mode, int type)
+{
+ test_type = type;
+
+ /* reset defaults after potential previous test */
+ xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+ pkt_counter = 0;
+ second_step = 0;
+ prev_pkt = -1;
+ sigvar = 0;
+ stat_test_type = -1;
+ rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
+ frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
+
+ configured_mode = mode;
+
+ switch (mode) {
+ case (TEST_MODE_SKB):
+ xdp_flags |= XDP_FLAGS_SKB_MODE;
+ break;
+ case (TEST_MODE_DRV):
+ xdp_flags |= XDP_FLAGS_DRV_MODE;
+ break;
+ default:
+ break;
+ }
+
+ switch (test_type) {
+ case TEST_TYPE_STATS:
+ testapp_stats();
+ break;
+ case TEST_TYPE_TEARDOWN:
+ testapp_teardown();
+ break;
+ case TEST_TYPE_BIDI:
+ testapp_bidi();
+ break;
+ case TEST_TYPE_BPF_RES:
+ testapp_bpf_res();
+ break;
+ default:
+ testapp_validate();
+ break;
+ }
}
int main(int argc, char **argv)
{
struct rlimit _rlim = { RLIM_INFINITY, RLIM_INFINITY };
+ bool failure = false;
+ int i, j;
if (setrlimit(RLIMIT_MEMLOCK, &_rlim))
exit_with_error(errno);
- const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62";
- const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61";
- const char *IP1 = "192.168.100.162";
- const char *IP2 = "192.168.100.161";
- u16 UDP_DST_PORT = 2020;
- u16 UDP_SRC_PORT = 2121;
-
- ifaceconfig = malloc(sizeof(struct ifaceconfigobj));
- memcpy(ifaceconfig->dst_mac, MAC1, ETH_ALEN);
- memcpy(ifaceconfig->src_mac, MAC2, ETH_ALEN);
- inet_aton(IP1, &ifaceconfig->dst_ip);
- inet_aton(IP2, &ifaceconfig->src_ip);
- ifaceconfig->dst_port = UDP_DST_PORT;
- ifaceconfig->src_port = UDP_SRC_PORT;
-
for (int i = 0; i < MAX_INTERFACES; i++) {
ifdict[i] = malloc(sizeof(struct ifobject));
if (!ifdict[i])
exit_with_error(errno);
ifdict[i]->ifdict_index = i;
+ ifdict[i]->xsk_arr = calloc(2, sizeof(struct xsk_socket_info *));
+ if (!ifdict[i]->xsk_arr) {
+ failure = true;
+ goto cleanup;
+ }
+ ifdict[i]->umem_arr = calloc(2, sizeof(struct xsk_umem_info *));
+ if (!ifdict[i]->umem_arr) {
+ failure = true;
+ goto cleanup;
+ }
}
setlocale(LC_ALL, "");
@@ -1039,25 +1135,27 @@ int main(int argc, char **argv)
num_frames = ++opt_pkt_count;
- init_iface_config(ifaceconfig);
-
- pthread_init_mutex();
+ init_iface(ifdict[0], MAC1, MAC2, IP1, IP2, UDP_PORT1, UDP_PORT2, tx);
+ init_iface(ifdict[1], MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1, rx);
- ksft_set_plan(1);
+ ksft_set_plan(TEST_MODE_MAX * TEST_TYPE_MAX);
- if (!opt_teardown && !opt_bidi) {
- testapp_validate();
- } else if (opt_teardown && opt_bidi) {
- ksft_test_result_fail("ERROR: parameters -T and -B cannot be used together\n");
- ksft_exit_xfail();
- } else {
- testapp_sockets();
+ for (i = 0; i < TEST_MODE_MAX; i++) {
+ for (j = 0; j < TEST_TYPE_MAX; j++)
+ run_pkt_test(i, j);
}
- for (int i = 0; i < MAX_INTERFACES; i++)
+cleanup:
+ for (int i = 0; i < MAX_INTERFACES; i++) {
+ if (ifdict[i]->ns_fd != -1)
+ close(ifdict[i]->ns_fd);
+ free(ifdict[i]->xsk_arr);
+ free(ifdict[i]->umem_arr);
free(ifdict[i]);
+ }
- pthread_destroy_mutex();
+ if (failure)
+ exit_with_error(errno);
ksft_exit_pass();
diff --git a/tools/testing/selftests/bpf/xdpxceiver.h b/tools/testing/selftests/bpf/xdpxceiver.h
index 0e9f9b7e61c2..6c428b276ab6 100644
--- a/tools/testing/selftests/bpf/xdpxceiver.h
+++ b/tools/testing/selftests/bpf/xdpxceiver.h
@@ -23,6 +23,7 @@
#define MAX_SOCKS 1
#define MAX_TEARDOWN_ITER 10
#define MAX_BIDI_ITER 2
+#define MAX_BPF_ITER 2
#define PKT_HDR_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
sizeof(struct udphdr))
#define MIN_PKT_SIZE 64
@@ -33,41 +34,63 @@
#define IP_PKT_TOS 0x9
#define UDP_PKT_SIZE (IP_PKT_SIZE - sizeof(struct iphdr))
#define UDP_PKT_DATA_SIZE (UDP_PKT_SIZE - sizeof(struct udphdr))
-#define TMOUT_SEC (3)
#define EOT (-1)
#define USLEEP_MAX 200000
-#define THREAD_STACK 60000000
#define SOCK_RECONF_CTR 10
#define BATCH_SIZE 64
#define POLL_TMOUT 1000
-#define NEED_WAKEUP true
+#define DEFAULT_PKT_CNT 10000
+#define RX_FULL_RXQSIZE 32
+
+#define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0)
typedef __u32 u32;
typedef __u16 u16;
typedef __u8 u8;
-enum TESTS {
- ORDER_CONTENT_VALIDATE_XDP_SKB = 0,
- ORDER_CONTENT_VALIDATE_XDP_DRV = 1,
+enum TEST_MODES {
+ TEST_MODE_UNCONFIGURED = -1,
+ TEST_MODE_SKB,
+ TEST_MODE_DRV,
+ TEST_MODE_MAX
+};
+
+enum TEST_TYPES {
+ TEST_TYPE_NOPOLL,
+ TEST_TYPE_POLL,
+ TEST_TYPE_TEARDOWN,
+ TEST_TYPE_BIDI,
+ TEST_TYPE_STATS,
+ TEST_TYPE_BPF_RES,
+ TEST_TYPE_MAX
+};
+
+enum STAT_TEST_TYPES {
+ STAT_TEST_RX_DROPPED,
+ STAT_TEST_TX_INVALID,
+ STAT_TEST_RX_FULL,
+ STAT_TEST_RX_FILL_EMPTY,
+ STAT_TEST_TYPE_MAX
};
-u8 uut;
-u8 debug_pkt_dump;
-u32 num_frames;
-u8 switching_notify;
-u8 bidi_pass;
+static int configured_mode = TEST_MODE_UNCONFIGURED;
+static u8 debug_pkt_dump;
+static u32 num_frames;
+static bool second_step;
+static int test_type;
-static u32 opt_xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
-static int opt_queue;
static int opt_pkt_count;
-static int opt_poll;
-static int opt_teardown;
-static int opt_bidi;
-static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP;
+static u8 opt_verbose;
+
+static u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+static u32 xdp_bind_flags = XDP_USE_NEED_WAKEUP | XDP_COPY;
static u8 pkt_data[XSK_UMEM__DEFAULT_FRAME_SIZE];
static u32 pkt_counter;
-static u32 prev_pkt = -1;
+static long prev_pkt = -1;
static int sigvar;
+static int stat_test_type;
+static u32 rxqsize;
+static u32 frame_headroom;
struct xsk_umem_info {
struct xsk_ring_prod fq;
@@ -99,47 +122,32 @@ struct generic_data {
u32 seqnum;
};
-struct ifaceconfigobj {
- u8 dst_mac[ETH_ALEN];
- u8 src_mac[ETH_ALEN];
- struct in_addr dst_ip;
- struct in_addr src_ip;
- u16 src_port;
- u16 dst_port;
-} *ifaceconfig;
-
struct ifobject {
- int ifindex;
- int ifdict_index;
char ifname[MAX_INTERFACE_NAME_CHARS];
char nsname[MAX_INTERFACES_NAMESPACE_CHARS];
- struct flow_vector fv;
struct xsk_socket_info *xsk;
+ struct xsk_socket_info **xsk_arr;
+ struct xsk_umem_info **umem_arr;
struct xsk_umem_info *umem;
- u8 dst_mac[ETH_ALEN];
- u8 src_mac[ETH_ALEN];
+ void *(*func_ptr)(void *arg);
+ struct flow_vector fv;
+ int ns_fd;
+ int ifdict_index;
u32 dst_ip;
u32 src_ip;
u16 src_port;
u16 dst_port;
+ u8 dst_mac[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
};
static struct ifobject *ifdict[MAX_INTERFACES];
+static struct ifobject *ifdict_rx;
+static struct ifobject *ifdict_tx;
/*threads*/
-atomic_int spinning_tx;
-atomic_int spinning_rx;
-pthread_mutex_t sync_mutex;
-pthread_mutex_t sync_mutex_tx;
-pthread_cond_t signal_rx_condition;
-pthread_cond_t signal_tx_condition;
-pthread_t t0, t1, ns_thread;
-pthread_attr_t attr;
-
-struct targs {
- bool retptr;
- int idx;
-};
+pthread_barrier_t barr;
+pthread_t t0, t1;
TAILQ_HEAD(head_s, pkt) head = TAILQ_HEAD_INITIALIZER(head);
struct head_s *head_p;
diff --git a/tools/testing/selftests/bpf/xsk_prereqs.sh b/tools/testing/selftests/bpf/xsk_prereqs.sh
index 9d54c4645127..dac1c5f78752 100755
--- a/tools/testing/selftests/bpf/xsk_prereqs.sh
+++ b/tools/testing/selftests/bpf/xsk_prereqs.sh
@@ -82,24 +82,21 @@ clear_configs()
{
if [ $(ip netns show | grep $3 &>/dev/null; echo $?;) == 0 ]; then
[ $(ip netns exec $3 ip link show $2 &>/dev/null; echo $?;) == 0 ] &&
- { echo "removing link $1:$2"; ip netns exec $3 ip link del $2; }
- echo "removing ns $3"
+ { ip netns exec $3 ip link del $2; }
ip netns del $3
fi
#Once we delete a veth pair node, the entire veth pair is removed,
#this is just to be cautious just incase the NS does not exist then
#veth node inside NS won't get removed so we explicitly remove it
[ $(ip link show $1 &>/dev/null; echo $?;) == 0 ] &&
- { echo "removing link $1"; ip link del $1; }
+ { ip link del $1; }
if [ -f ${SPECFILE} ]; then
- echo "removing spec file:" ${SPECFILE}
rm -f ${SPECFILE}
fi
}
cleanup_exit()
{
- echo "cleaning up..."
clear_configs $1 $2 $3
}
@@ -108,28 +105,7 @@ validate_ip_utility()
[ ! $(type -P ip) ] && { echo "'ip' not found. Skipping tests."; test_exit $ksft_skip 1; }
}
-vethXDPgeneric()
-{
- ip link set dev $1 xdpdrv off
- ip netns exec $3 ip link set dev $2 xdpdrv off
-}
-
-vethXDPnative()
-{
- ip link set dev $1 xdpgeneric off
- ip netns exec $3 ip link set dev $2 xdpgeneric off
-}
-
execxdpxceiver()
{
- local -a 'paramkeys=("${!'"$1"'[@]}")' copy
- paramkeysstr=${paramkeys[*]}
-
- for index in $paramkeysstr;
- do
- current=$1"[$index]"
- copy[$index]=${!current}
- done
-
- ./${XSKOBJ} -i ${VETH0} -i ${VETH1},${NS1} ${copy[*]} -C ${NUMPKTS}
+ ./${XSKOBJ} -i ${VETH0} -i ${VETH1},${NS1} -C ${NUMPKTS} ${VERBOSE_ARG} ${DUMP_PKTS_ARG}
}
diff --git a/tools/testing/selftests/cgroup/test_kmem.c b/tools/testing/selftests/cgroup/test_kmem.c
index 0941aa16157e..22b31ebb3513 100644
--- a/tools/testing/selftests/cgroup/test_kmem.c
+++ b/tools/testing/selftests/cgroup/test_kmem.c
@@ -19,12 +19,12 @@
/*
- * Memory cgroup charging and vmstat data aggregation is performed using
- * percpu batches 32 pages big (look at MEMCG_CHARGE_BATCH). So the maximum
- * discrepancy between charge and vmstat entries is number of cpus multiplied
- * by 32 pages multiplied by 2.
+ * Memory cgroup charging is performed using percpu batches 32 pages
+ * big (look at MEMCG_CHARGE_BATCH), whereas memory.stat is exact. So
+ * the maximum discrepancy between charge and vmstat entries is number
+ * of cpus multiplied by 32 pages.
*/
-#define MAX_VMSTAT_ERROR (4096 * 32 * 2 * get_nprocs())
+#define MAX_VMSTAT_ERROR (4096 * 32 * get_nprocs())
static int alloc_dcache(const char *cgroup, void *arg)
@@ -162,7 +162,7 @@ static int cg_run_in_subcgroups(const char *parent,
*/
static int test_kmem_memcg_deletion(const char *root)
{
- long current, slab, anon, file, kernel_stack, sum;
+ long current, slab, anon, file, kernel_stack, pagetables, percpu, sock, sum;
int ret = KSFT_FAIL;
char *parent;
@@ -184,11 +184,14 @@ static int test_kmem_memcg_deletion(const char *root)
anon = cg_read_key_long(parent, "memory.stat", "anon ");
file = cg_read_key_long(parent, "memory.stat", "file ");
kernel_stack = cg_read_key_long(parent, "memory.stat", "kernel_stack ");
+ pagetables = cg_read_key_long(parent, "memory.stat", "pagetables ");
+ percpu = cg_read_key_long(parent, "memory.stat", "percpu ");
+ sock = cg_read_key_long(parent, "memory.stat", "sock ");
if (current < 0 || slab < 0 || anon < 0 || file < 0 ||
- kernel_stack < 0)
+ kernel_stack < 0 || pagetables < 0 || percpu < 0 || sock < 0)
goto cleanup;
- sum = slab + anon + file + kernel_stack;
+ sum = slab + anon + file + kernel_stack + pagetables + percpu + sock;
if (abs(sum - current) < MAX_VMSTAT_ERROR) {
ret = KSFT_PASS;
} else {
@@ -198,6 +201,9 @@ static int test_kmem_memcg_deletion(const char *root)
printf("anon = %ld\n", anon);
printf("file = %ld\n", file);
printf("kernel_stack = %ld\n", kernel_stack);
+ printf("pagetables = %ld\n", pagetables);
+ printf("percpu = %ld\n", percpu);
+ printf("sock = %ld\n", sock);
}
cleanup:
diff --git a/tools/testing/selftests/dma/dma_map_benchmark.c b/tools/testing/selftests/dma/dma_map_benchmark.c
index fb23ce9617ea..485dff51bad2 100644
--- a/tools/testing/selftests/dma/dma_map_benchmark.c
+++ b/tools/testing/selftests/dma/dma_map_benchmark.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020 Hisilicon Limited.
+ * Copyright (C) 2020 HiSilicon Limited.
*/
#include <fcntl.h>
@@ -40,7 +40,8 @@ struct map_benchmark {
__u32 dma_bits; /* DMA addressing capability */
__u32 dma_dir; /* DMA data direction */
__u32 dma_trans_ns; /* time for DMA transmission in ns */
- __u8 expansion[80]; /* For future use */
+ __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */
+ __u8 expansion[76]; /* For future use */
};
int main(int argc, char **argv)
@@ -51,11 +52,13 @@ int main(int argc, char **argv)
int threads = 1, seconds = 20, node = -1;
/* default dma mask 32bit, bidirectional DMA */
int bits = 32, xdelay = 0, dir = DMA_MAP_BIDIRECTIONAL;
+ /* default granule 1 PAGESIZE */
+ int granule = 1;
int cmd = DMA_MAP_BENCHMARK;
char *p;
- while ((opt = getopt(argc, argv, "t:s:n:b:d:x:")) != -1) {
+ while ((opt = getopt(argc, argv, "t:s:n:b:d:x:g:")) != -1) {
switch (opt) {
case 't':
threads = atoi(optarg);
@@ -75,6 +78,9 @@ int main(int argc, char **argv)
case 'x':
xdelay = atoi(optarg);
break;
+ case 'g':
+ granule = atoi(optarg);
+ break;
default:
return -1;
}
@@ -110,6 +116,11 @@ int main(int argc, char **argv)
exit(1);
}
+ if (granule < 1 || granule > 1024) {
+ fprintf(stderr, "invalid granule size\n");
+ exit(1);
+ }
+
fd = open("/sys/kernel/debug/dma_map_benchmark", O_RDWR);
if (fd == -1) {
perror("open");
@@ -123,14 +134,15 @@ int main(int argc, char **argv)
map.dma_bits = bits;
map.dma_dir = dir;
map.dma_trans_ns = xdelay;
+ map.granule = granule;
if (ioctl(fd, cmd, &map)) {
perror("ioctl");
exit(1);
}
- printf("dma mapping benchmark: threads:%d seconds:%d node:%d dir:%s\n",
- threads, seconds, node, dir[directions]);
+ printf("dma mapping benchmark: threads:%d seconds:%d node:%d dir:%s granule: %d\n",
+ threads, seconds, node, dir[directions], granule);
printf("average map latency(us):%.1f standard deviation:%.1f\n",
map.avg_map_100ns/10.0, map.map_stddev/10.0);
printf("average unmap latency(us):%.1f standard deviation:%.1f\n",
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh
index 1fedfc9da434..42d44e27802c 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh
@@ -446,6 +446,35 @@ __invalid_nexthop_test()
log_test "Unresolved neigh: nexthop does not exist: $desc"
}
+__invalid_nexthop_bucket_test()
+{
+ local desc=$1; shift
+ local dip=$1; shift
+ local via_add=$1; shift
+ local trap_name="unresolved_neigh"
+
+ RET=0
+
+ # Check that route to nexthop that does not exist triggers
+ # unresolved_neigh
+ ip nexthop add id 1 via $via_add dev $rp2
+ ip nexthop add id 10 group 1 type resilient buckets 32
+ ip route add $dip nhid 10
+
+ t0_packets=$(devlink_trap_rx_packets_get $trap_name)
+ ping_do $h1 $dip
+ t1_packets=$(devlink_trap_rx_packets_get $trap_name)
+
+ if [[ $t0_packets -eq $t1_packets ]]; then
+ check_err 1 "Trap counter did not increase"
+ fi
+
+ ip route del $dip nhid 10
+ ip nexthop del id 10
+ ip nexthop del id 1
+ log_test "Unresolved neigh: nexthop bucket does not exist: $desc"
+}
+
unresolved_neigh_test()
{
__host_miss_test "IPv4" 198.51.100.1
@@ -453,6 +482,8 @@ unresolved_neigh_test()
__invalid_nexthop_test "IPv4" 198.51.100.1 198.51.100.3 24 198.51.100.4
__invalid_nexthop_test "IPv6" 2001:db8:2::1 2001:db8:2::3 64 \
2001:db8:2::4
+ __invalid_nexthop_bucket_test "IPv4" 198.51.100.1 198.51.100.4
+ __invalid_nexthop_bucket_test "IPv6" 2001:db8:2::1 2001:db8:2::4
}
vrf_without_routes_create()
diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
index 6f3a70df63bc..e00435753008 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
@@ -120,12 +120,13 @@ __mirror_gre_test()
sleep 5
for ((i = 0; i < count; ++i)); do
+ local sip=$(mirror_gre_ipv6_addr 1 $i)::1
local dip=$(mirror_gre_ipv6_addr 1 $i)::2
local htun=h3-gt6-$i
local message
icmp6_capture_install $htun
- mirror_test v$h1 "" $dip $htun 100 10
+ mirror_test v$h1 $sip $dip $htun 100 10
icmp6_capture_uninstall $htun
done
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
index f813ffefc07e..65f43a7ce9c9 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
@@ -55,10 +55,6 @@ port_test()
| jq '.[][][] | select(.name=="physical_ports") |.["occ"]')
[[ $occ -eq $max_ports ]]
- if [[ $should_fail -eq 0 ]]; then
- check_err $? "Mismatch ports number: Expected $max_ports, got $occ."
- else
- check_err_fail $should_fail $? "Reached more ports than expected"
- fi
+ check_err_fail $should_fail $? "Attempt to create $max_ports ports (actual result $occ)"
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
index ed346da5d3cb..a217f9f6775b 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
@@ -33,6 +33,7 @@ ALL_TESTS="
nexthop_obj_invalid_test
nexthop_obj_offload_test
nexthop_obj_group_offload_test
+ nexthop_obj_bucket_offload_test
nexthop_obj_blackhole_offload_test
nexthop_obj_route_offload_test
devlink_reload_test
@@ -739,11 +740,28 @@ nexthop_obj_invalid_test()
ip nexthop add id 1 dev $swp1
ip nexthop add id 2 dev $swp1
+ ip nexthop add id 3 via 192.0.2.3 dev $swp1
ip nexthop add id 10 group 1/2
check_fail $? "managed to configure a nexthop group with device-only nexthops when should not"
+ ip nexthop add id 10 group 3 type resilient buckets 7
+ check_fail $? "managed to configure a too small resilient nexthop group when should not"
+
+ ip nexthop add id 10 group 3 type resilient buckets 129
+ check_fail $? "managed to configure a resilient nexthop group with invalid number of buckets when should not"
+
+ ip nexthop add id 10 group 1/2 type resilient buckets 32
+ check_fail $? "managed to configure a resilient nexthop group with device-only nexthops when should not"
+
+ ip nexthop add id 10 group 3 type resilient buckets 32
+ check_err $? "failed to configure a valid resilient nexthop group"
+ ip nexthop replace id 3 dev $swp1
+ check_fail $? "managed to populate a nexthop bucket with a device-only nexthop when should not"
+
log_test "nexthop objects - invalid configurations"
+ ip nexthop del id 10
+ ip nexthop del id 3
ip nexthop del id 2
ip nexthop del id 1
@@ -858,6 +876,70 @@ nexthop_obj_group_offload_test()
simple_if_fini $swp1 192.0.2.1/24 2001:db8:1::1/64
}
+nexthop_obj_bucket_offload_test()
+{
+ # Test offload indication of nexthop buckets
+ RET=0
+
+ simple_if_init $swp1 192.0.2.1/24 2001:db8:1::1/64
+ simple_if_init $swp2
+ setup_wait
+
+ ip nexthop add id 1 via 192.0.2.2 dev $swp1
+ ip nexthop add id 2 via 2001:db8:1::2 dev $swp1
+ ip nexthop add id 10 group 1/2 type resilient buckets 32 idle_timer 0
+ ip neigh replace 192.0.2.2 lladdr 00:11:22:33:44:55 nud reachable \
+ dev $swp1
+ ip neigh replace 192.0.2.3 lladdr 00:11:22:33:44:55 nud reachable \
+ dev $swp1
+ ip neigh replace 2001:db8:1::2 lladdr 00:11:22:33:44:55 nud reachable \
+ dev $swp1
+
+ busywait "$TIMEOUT" wait_for_offload \
+ ip nexthop bucket show nhid 1
+ check_err $? "IPv4 nexthop buckets not marked as offloaded when should"
+ busywait "$TIMEOUT" wait_for_offload \
+ ip nexthop bucket show nhid 2
+ check_err $? "IPv6 nexthop buckets not marked as offloaded when should"
+
+ # Invalidate nexthop id 1
+ ip neigh replace 192.0.2.2 nud failed dev $swp1
+ busywait "$TIMEOUT" wait_for_trap \
+ ip nexthop bucket show nhid 1
+ check_err $? "IPv4 nexthop buckets not marked with trap when should"
+
+ # Invalidate nexthop id 2
+ ip neigh replace 2001:db8:1::2 nud failed dev $swp1
+ busywait "$TIMEOUT" wait_for_trap \
+ ip nexthop bucket show nhid 2
+ check_err $? "IPv6 nexthop buckets not marked with trap when should"
+
+ # Revalidate nexthop id 1 by changing its configuration
+ ip nexthop replace id 1 via 192.0.2.3 dev $swp1
+ busywait "$TIMEOUT" wait_for_offload \
+ ip nexthop bucket show nhid 1
+ check_err $? "nexthop bucket not marked as offloaded after revalidating nexthop"
+
+ # Revalidate nexthop id 2 by changing its neighbour
+ ip neigh replace 2001:db8:1::2 lladdr 00:11:22:33:44:55 nud reachable \
+ dev $swp1
+ busywait "$TIMEOUT" wait_for_offload \
+ ip nexthop bucket show nhid 2
+ check_err $? "nexthop bucket not marked as offloaded after revalidating neighbour"
+
+ log_test "nexthop bucket offload indication"
+
+ ip neigh del 2001:db8:1::2 dev $swp1
+ ip neigh del 192.0.2.3 dev $swp1
+ ip neigh del 192.0.2.2 dev $swp1
+ ip nexthop del id 10
+ ip nexthop del id 2
+ ip nexthop del id 1
+
+ simple_if_fini $swp2
+ simple_if_fini $swp1 192.0.2.1/24 2001:db8:1::1/64
+}
+
nexthop_obj_blackhole_offload_test()
{
# Test offload indication of blackhole nexthop objects
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
index b0cb1aaffdda..33ddd01689be 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
@@ -507,8 +507,8 @@ do_red_test()
check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0."
local diff=$((limit - backlog))
pct=$((100 * diff / limit))
- ((0 <= pct && pct <= 5))
- check_err $? "backlog $backlog / $limit expected <= 5% distance"
+ ((0 <= pct && pct <= 10))
+ check_err $? "backlog $backlog / $limit expected <= 10% distance"
log_test "TC $((vlan - 10)): RED backlog > limit"
stop_traffic
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
index 3f007c5f8361..f3ef3274f9b3 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
@@ -67,6 +67,13 @@ red_test()
{
install_qdisc
+ # Make sure that we get the non-zero value if there is any.
+ local cur=$(busywait 1100 until_counter_is "> 0" \
+ qdisc_stats_get $swp3 10: .backlog)
+ (( cur == 0 ))
+ check_err $? "backlog of $cur observed on non-busy qdisc"
+ log_test "$QDISC backlog properly cleaned"
+
do_red_test 10 $BACKLOG1
do_red_test 11 $BACKLOG2
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/q_in_vni_veto.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/q_in_vni_veto.sh
deleted file mode 100755
index 0231205a7147..000000000000
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/q_in_vni_veto.sh
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-lib_dir=$(dirname $0)/../../../../net/forwarding
-
-VXPORT=4789
-
-ALL_TESTS="
- create_dot1d_and_dot1ad_vxlans
-"
-NUM_NETIFS=2
-source $lib_dir/lib.sh
-
-setup_prepare()
-{
- swp1=${NETIFS[p1]}
- swp2=${NETIFS[p2]}
-
- ip link set dev $swp1 up
- ip link set dev $swp2 up
-}
-
-cleanup()
-{
- pre_cleanup
-
- ip link set dev $swp2 down
- ip link set dev $swp1 down
-}
-
-create_dot1d_and_dot1ad_vxlans()
-{
- RET=0
-
- ip link add dev br0 type bridge vlan_filtering 1 vlan_protocol 802.1ad \
- vlan_default_pvid 0 mcast_snooping 0
- ip link set dev br0 up
-
- ip link add name vx100 type vxlan id 1000 local 192.0.2.17 dstport \
- "$VXPORT" nolearning noudpcsum tos inherit ttl 100
- ip link set dev vx100 up
-
- ip link set dev $swp1 master br0
- ip link set dev vx100 master br0
- bridge vlan add vid 100 dev vx100 pvid untagged
-
- ip link add dev br1 type bridge vlan_filtering 0 mcast_snooping 0
- ip link set dev br1 up
-
- ip link add name vx200 type vxlan id 2000 local 192.0.2.17 dstport \
- "$VXPORT" nolearning noudpcsum tos inherit ttl 100
- ip link set dev vx200 up
-
- ip link set dev $swp2 master br1
- ip link set dev vx200 master br1 2>/dev/null
- check_fail $? "802.1d and 802.1ad VxLANs at the same time not rejected"
-
- ip link set dev vx200 master br1 2>&1 >/dev/null \
- | grep -q mlxsw_spectrum
- check_err $? "802.1d and 802.1ad VxLANs at the same time rejected without extack"
-
- log_test "create 802.1d and 802.1ad VxLANs"
-
- ip link del dev vx200
- ip link del dev br1
- ip link del dev vx100
- ip link del dev br0
-}
-
-trap cleanup EXIT
-
-setup_prepare
-setup_wait
-
-tests_run
-
-exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
index 4a1c9328555f..50654f8a8c37 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
@@ -30,6 +30,7 @@ trap cleanup EXIT
ALL_TESTS="router tc_flower mirror_gre tc_police port"
for current_test in ${TESTS:-$ALL_TESTS}; do
+ RET_FIN=0
source ${current_test}_scale.sh
num_netifs_var=${current_test^^}_NUM_NETIFS
@@ -48,8 +49,9 @@ for current_test in ${TESTS:-$ALL_TESTS}; do
else
log_test "'$current_test' overflow $target"
fi
+ RET_FIN=$(( RET_FIN || RET ))
done
done
current_test=""
-exit "$RET"
+exit "$RET_FIN"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
index 087a884f66cd..685dfb3478b3 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
@@ -24,6 +24,7 @@ trap cleanup EXIT
ALL_TESTS="router tc_flower mirror_gre tc_police port"
for current_test in ${TESTS:-$ALL_TESTS}; do
+ RET_FIN=0
source ${current_test}_scale.sh
num_netifs_var=${current_test^^}_NUM_NETIFS
@@ -50,8 +51,9 @@ for current_test in ${TESTS:-$ALL_TESTS}; do
log_test "'$current_test' [$profile] overflow $target"
fi
done
+ RET_FIN=$(( RET_FIN || RET ))
done
done
current_test=""
-exit "$RET"
+exit "$RET_FIN"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
index cc0f07e72cf2..aa74be9f47c8 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
@@ -98,11 +98,7 @@ __tc_flower_test()
jq -r '[ .[] | select(.kind == "flower") |
.options | .in_hw ]' | jq .[] | wc -l)
[[ $((offload_count - 1)) -eq $count ]]
- if [[ $should_fail -eq 0 ]]; then
- check_err $? "Offload mismatch"
- else
- check_err_fail $should_fail $? "Offload more than expacted"
- fi
+ check_err_fail $should_fail $? "Attempt to offload $count rules (actual result $((offload_count - 1)))"
}
tc_flower_test()
diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh
index 553cb9fad508..5ec3beb637c8 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh
@@ -11,6 +11,7 @@ ALL_TESTS="
matchall_mirror_behind_flower_ingress_test
matchall_sample_behind_flower_ingress_test
matchall_mirror_behind_flower_egress_test
+ matchall_proto_match_test
police_limits_test
multi_police_test
"
@@ -18,6 +19,7 @@ NUM_NETIFS=2
source $lib_dir/tc_common.sh
source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
switch_create()
{
@@ -166,7 +168,8 @@ matchall_sample_egress_test()
RET=0
# It is forbidden in mlxsw driver to have matchall with sample action
- # bound on egress
+ # bound on egress. Spectrum-1 specific restriction
+ [[ "$DEVLINK_VIDDID" != "15b3:cb84" ]] && return
tc qdisc add dev $swp1 clsact
@@ -289,6 +292,22 @@ matchall_mirror_behind_flower_egress_test()
matchall_behind_flower_egress_test "mirror" "mirred egress mirror dev $swp2"
}
+matchall_proto_match_test()
+{
+ RET=0
+
+ tc qdisc add dev $swp1 clsact
+
+ tc filter add dev $swp1 ingress pref 1 proto ip handle 101 \
+ matchall skip_sw \
+ action sample group 1 rate 100
+ check_fail $? "Incorrect success to add matchall rule with protocol match"
+
+ tc qdisc del dev $swp1 clsact
+
+ log_test "matchall protocol match"
+}
+
police_limits_test()
{
RET=0
diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_sample.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_sample.sh
new file mode 100755
index 000000000000..093bed088ad0
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/tc_sample.sh
@@ -0,0 +1,657 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test that packets are sampled when tc-sample is used and that reported
+# metadata is correct. Two sets of hosts (with and without LAG) are used, since
+# metadata extraction in mlxsw is a bit different when LAG is involved.
+#
+# +---------------------------------+ +---------------------------------+
+# | H1 (vrf) | | H3 (vrf) |
+# | + $h1 | | + $h3_lag |
+# | | 192.0.2.1/28 | | | 192.0.2.17/28 |
+# | | | | | |
+# | | default via 192.0.2.2 | | | default via 192.0.2.18 |
+# +----|----------------------------+ +----|----------------------------+
+# | |
+# +----|-----------------------------------------|----------------------------+
+# | | 192.0.2.2/28 | 192.0.2.18/28 |
+# | + $rp1 + $rp3_lag |
+# | |
+# | + $rp2 + $rp4_lag |
+# | | 198.51.100.2/28 | 198.51.100.18/28 |
+# +----|-----------------------------------------|----------------------------+
+# | |
+# +----|----------------------------+ +----|----------------------------+
+# | | default via 198.51.100.2 | | | default via 198.51.100.18 |
+# | | | | | |
+# | | 198.51.100.1/28 | | | 198.51.100.17/28 |
+# | + $h2 | | + $h4_lag |
+# | H2 (vrf) | | H4 (vrf) |
+# +---------------------------------+ +---------------------------------+
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ tc_sample_rate_test
+ tc_sample_max_rate_test
+ tc_sample_conflict_test
+ tc_sample_group_conflict_test
+ tc_sample_md_iif_test
+ tc_sample_md_lag_iif_test
+ tc_sample_md_oif_test
+ tc_sample_md_lag_oif_test
+ tc_sample_md_out_tc_test
+ tc_sample_md_out_tc_occ_test
+ tc_sample_md_latency_test
+ tc_sample_acl_group_conflict_test
+ tc_sample_acl_rate_test
+ tc_sample_acl_max_rate_test
+"
+NUM_NETIFS=8
+CAPTURE_FILE=$(mktemp)
+source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
+
+# Available at https://github.com/Mellanox/libpsample
+require_command psample
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/28
+
+ ip -4 route add default vrf v$h1 nexthop via 192.0.2.2
+}
+
+h1_destroy()
+{
+ ip -4 route del default vrf v$h1 nexthop via 192.0.2.2
+
+ simple_if_fini $h1 192.0.2.1/28
+}
+
+h2_create()
+{
+ simple_if_init $h2 198.51.100.1/28
+
+ ip -4 route add default vrf v$h2 nexthop via 198.51.100.2
+}
+
+h2_destroy()
+{
+ ip -4 route del default vrf v$h2 nexthop via 198.51.100.2
+
+ simple_if_fini $h2 198.51.100.1/28
+}
+
+h3_create()
+{
+ ip link set dev $h3 down
+ ip link add name ${h3}_bond type bond mode 802.3ad
+ ip link set dev $h3 master ${h3}_bond
+
+ simple_if_init ${h3}_bond 192.0.2.17/28
+
+ ip -4 route add default vrf v${h3}_bond nexthop via 192.0.2.18
+}
+
+h3_destroy()
+{
+ ip -4 route del default vrf v${h3}_bond nexthop via 192.0.2.18
+
+ simple_if_fini ${h3}_bond 192.0.2.17/28
+
+ ip link set dev $h3 nomaster
+ ip link del dev ${h3}_bond
+}
+
+h4_create()
+{
+ ip link set dev $h4 down
+ ip link add name ${h4}_bond type bond mode 802.3ad
+ ip link set dev $h4 master ${h4}_bond
+
+ simple_if_init ${h4}_bond 198.51.100.17/28
+
+ ip -4 route add default vrf v${h4}_bond nexthop via 198.51.100.18
+}
+
+h4_destroy()
+{
+ ip -4 route del default vrf v${h4}_bond nexthop via 198.51.100.18
+
+ simple_if_fini ${h4}_bond 198.51.100.17/28
+
+ ip link set dev $h4 nomaster
+ ip link del dev ${h4}_bond
+}
+
+router_create()
+{
+ ip link set dev $rp1 up
+ __addr_add_del $rp1 add 192.0.2.2/28
+ tc qdisc add dev $rp1 clsact
+
+ ip link set dev $rp2 up
+ __addr_add_del $rp2 add 198.51.100.2/28
+ tc qdisc add dev $rp2 clsact
+
+ ip link add name ${rp3}_bond type bond mode 802.3ad
+ ip link set dev $rp3 master ${rp3}_bond
+ __addr_add_del ${rp3}_bond add 192.0.2.18/28
+ tc qdisc add dev $rp3 clsact
+ ip link set dev ${rp3}_bond up
+
+ ip link add name ${rp4}_bond type bond mode 802.3ad
+ ip link set dev $rp4 master ${rp4}_bond
+ __addr_add_del ${rp4}_bond add 198.51.100.18/28
+ tc qdisc add dev $rp4 clsact
+ ip link set dev ${rp4}_bond up
+}
+
+router_destroy()
+{
+ ip link set dev ${rp4}_bond down
+ tc qdisc del dev $rp4 clsact
+ __addr_add_del ${rp4}_bond del 198.51.100.18/28
+ ip link set dev $rp4 nomaster
+ ip link del dev ${rp4}_bond
+
+ ip link set dev ${rp3}_bond down
+ tc qdisc del dev $rp3 clsact
+ __addr_add_del ${rp3}_bond del 192.0.2.18/28
+ ip link set dev $rp3 nomaster
+ ip link del dev ${rp3}_bond
+
+ tc qdisc del dev $rp2 clsact
+ __addr_add_del $rp2 del 198.51.100.2/28
+ ip link set dev $rp2 down
+
+ tc qdisc del dev $rp1 clsact
+ __addr_add_del $rp1 del 192.0.2.2/28
+ ip link set dev $rp1 down
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rp1=${NETIFS[p2]}
+ rp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+ h3=${NETIFS[p5]}
+ rp3=${NETIFS[p6]}
+ h4=${NETIFS[p7]}
+ rp4=${NETIFS[p8]}
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+ h3_create
+ h4_create
+ router_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ rm -f $CAPTURE_FILE
+
+ router_destroy
+ h4_destroy
+ h3_destroy
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+psample_capture_start()
+{
+ rm -f $CAPTURE_FILE
+
+ psample &> $CAPTURE_FILE &
+
+ sleep 1
+}
+
+psample_capture_stop()
+{
+ { kill %% && wait %%; } 2>/dev/null
+}
+
+__tc_sample_rate_test()
+{
+ local desc=$1; shift
+ local dip=$1; shift
+ local pkts pct
+
+ RET=0
+
+ tc filter add dev $rp1 ingress protocol all pref 1 handle 101 matchall \
+ skip_sw action sample rate 32 group 1
+ check_err $? "Failed to configure sampling rule"
+
+ psample_capture_start
+
+ ip vrf exec v$h1 $MZ $h1 -c 3200 -d 1msec -p 64 -A 192.0.2.1 \
+ -B $dip -t udp dp=52768,sp=42768 -q
+
+ psample_capture_stop
+
+ pkts=$(grep -e "group 1 " $CAPTURE_FILE | wc -l)
+ pct=$((100 * (pkts - 100) / 100))
+ (( -25 <= pct && pct <= 25))
+ check_err $? "Expected 100 packets, got $pkts packets, which is $pct% off. Required accuracy is +-25%"
+
+ log_test "tc sample rate ($desc)"
+
+ tc filter del dev $rp1 ingress protocol all pref 1 handle 101 matchall
+}
+
+tc_sample_rate_test()
+{
+ __tc_sample_rate_test "forward" 198.51.100.1
+ __tc_sample_rate_test "local receive" 192.0.2.2
+}
+
+tc_sample_max_rate_test()
+{
+ RET=0
+
+ tc filter add dev $rp1 ingress protocol all pref 1 handle 101 matchall \
+ skip_sw action sample rate $((35 * 10 ** 8)) group 1
+ check_err $? "Failed to configure sampling rule with max rate"
+
+ tc filter del dev $rp1 ingress protocol all pref 1 handle 101 matchall
+
+ tc filter add dev $rp1 ingress protocol all pref 1 handle 101 matchall \
+ skip_sw action sample rate $((35 * 10 ** 8 + 1)) \
+ group 1 &> /dev/null
+ check_fail $? "Managed to configure sampling rate above maximum"
+
+ log_test "tc sample maximum rate"
+}
+
+tc_sample_conflict_test()
+{
+ RET=0
+
+ # Test that two sampling rules cannot be configured on the same port,
+ # even when they share the same parameters.
+
+ tc filter add dev $rp1 ingress protocol all pref 1 handle 101 matchall \
+ skip_sw action sample rate 1024 group 1
+ check_err $? "Failed to configure sampling rule"
+
+ tc filter add dev $rp1 ingress protocol all pref 2 handle 102 matchall \
+ skip_sw action sample rate 1024 group 1 &> /dev/null
+ check_fail $? "Managed to configure second sampling rule"
+
+ # Delete the first rule and make sure the second rule can now be
+ # configured.
+
+ tc filter del dev $rp1 ingress protocol all pref 1 handle 101 matchall
+
+ tc filter add dev $rp1 ingress protocol all pref 2 handle 102 matchall \
+ skip_sw action sample rate 1024 group 1
+ check_err $? "Failed to configure sampling rule after deletion"
+
+ log_test "tc sample conflict test"
+
+ tc filter del dev $rp1 ingress protocol all pref 2 handle 102 matchall
+}
+
+tc_sample_group_conflict_test()
+{
+ RET=0
+
+ # Test that two sampling rules cannot be configured on the same port
+ # with different groups.
+
+ tc filter add dev $rp1 ingress protocol all pref 1 handle 101 matchall \
+ skip_sw action sample rate 1024 group 1
+ check_err $? "Failed to configure sampling rule"
+
+ tc filter add dev $rp1 ingress protocol all pref 2 handle 102 matchall \
+ skip_sw action sample rate 1024 group 2 &> /dev/null
+ check_fail $? "Managed to configure sampling rule with conflicting group"
+
+ log_test "tc sample group conflict test"
+
+ tc filter del dev $rp1 ingress protocol all pref 1 handle 101 matchall
+}
+
+tc_sample_md_iif_test()
+{
+ local rp1_ifindex
+
+ RET=0
+
+ tc filter add dev $rp1 ingress protocol all pref 1 handle 101 matchall \
+ skip_sw action sample rate 5 group 1
+ check_err $? "Failed to configure sampling rule"
+
+ psample_capture_start
+
+ ip vrf exec v$h1 $MZ $h1 -c 3200 -d 1msec -p 64 -A 192.0.2.1 \
+ -B 198.51.100.1 -t udp dp=52768,sp=42768 -q
+
+ psample_capture_stop
+
+ rp1_ifindex=$(ip -j -p link show dev $rp1 | jq '.[]["ifindex"]')
+ grep -q -e "in-ifindex $rp1_ifindex " $CAPTURE_FILE
+ check_err $? "Sampled packets do not have expected in-ifindex"
+
+ log_test "tc sample iif"
+
+ tc filter del dev $rp1 ingress protocol all pref 1 handle 101 matchall
+}
+
+tc_sample_md_lag_iif_test()
+{
+ local rp3_ifindex
+
+ RET=0
+
+ tc filter add dev $rp3 ingress protocol all pref 1 handle 101 matchall \
+ skip_sw action sample rate 5 group 1
+ check_err $? "Failed to configure sampling rule"
+
+ psample_capture_start
+
+ ip vrf exec v${h3}_bond $MZ ${h3}_bond -c 3200 -d 1msec -p 64 \
+ -A 192.0.2.17 -B 198.51.100.17 -t udp dp=52768,sp=42768 -q
+
+ psample_capture_stop
+
+ rp3_ifindex=$(ip -j -p link show dev $rp3 | jq '.[]["ifindex"]')
+ grep -q -e "in-ifindex $rp3_ifindex " $CAPTURE_FILE
+ check_err $? "Sampled packets do not have expected in-ifindex"
+
+ log_test "tc sample lag iif"
+
+ tc filter del dev $rp3 ingress protocol all pref 1 handle 101 matchall
+}
+
+tc_sample_md_oif_test()
+{
+ local rp2_ifindex
+
+ RET=0
+
+ tc filter add dev $rp1 ingress protocol all pref 1 handle 101 matchall \
+ skip_sw action sample rate 5 group 1
+ check_err $? "Failed to configure sampling rule"
+
+ psample_capture_start
+
+ ip vrf exec v$h1 $MZ $h1 -c 3200 -d 1msec -p 64 -A 192.0.2.1 \
+ -B 198.51.100.1 -t udp dp=52768,sp=42768 -q
+
+ psample_capture_stop
+
+ rp2_ifindex=$(ip -j -p link show dev $rp2 | jq '.[]["ifindex"]')
+ grep -q -e "out-ifindex $rp2_ifindex " $CAPTURE_FILE
+ check_err $? "Sampled packets do not have expected out-ifindex"
+
+ log_test "tc sample oif"
+
+ tc filter del dev $rp1 ingress protocol all pref 1 handle 101 matchall
+}
+
+tc_sample_md_lag_oif_test()
+{
+ local rp4_ifindex
+
+ RET=0
+
+ tc filter add dev $rp3 ingress protocol all pref 1 handle 101 matchall \
+ skip_sw action sample rate 5 group 1
+ check_err $? "Failed to configure sampling rule"
+
+ psample_capture_start
+
+ ip vrf exec v${h3}_bond $MZ ${h3}_bond -c 3200 -d 1msec -p 64 \
+ -A 192.0.2.17 -B 198.51.100.17 -t udp dp=52768,sp=42768 -q
+
+ psample_capture_stop
+
+ rp4_ifindex=$(ip -j -p link show dev $rp4 | jq '.[]["ifindex"]')
+ grep -q -e "out-ifindex $rp4_ifindex " $CAPTURE_FILE
+ check_err $? "Sampled packets do not have expected out-ifindex"
+
+ log_test "tc sample lag oif"
+
+ tc filter del dev $rp3 ingress protocol all pref 1 handle 101 matchall
+}
+
+tc_sample_md_out_tc_test()
+{
+ RET=0
+
+ # Output traffic class is not supported on Spectrum-1.
+ [[ "$DEVLINK_VIDDID" == "15b3:cb84" ]] && return
+
+ tc filter add dev $rp1 ingress protocol all pref 1 handle 101 matchall \
+ skip_sw action sample rate 5 group 1
+ check_err $? "Failed to configure sampling rule"
+
+ # By default, all the packets should go to the same traffic class (0).
+
+ psample_capture_start
+
+ ip vrf exec v$h1 $MZ $h1 -c 3200 -d 1msec -p 64 -A 192.0.2.1 \
+ -B 198.51.100.1 -t udp dp=52768,sp=42768 -q
+
+ psample_capture_stop
+
+ grep -q -e "out-tc 0 " $CAPTURE_FILE
+ check_err $? "Sampled packets do not have expected out-tc (0)"
+
+ # Map all priorities to highest traffic class (7) and check reported
+ # out-tc.
+ tc qdisc replace dev $rp2 root handle 1: \
+ prio bands 3 priomap 0 0 0 0 0 0 0 0
+
+ psample_capture_start
+
+ ip vrf exec v$h1 $MZ $h1 -c 3200 -d 1msec -p 64 -A 192.0.2.1 \
+ -B 198.51.100.1 -t udp dp=52768,sp=42768 -q
+
+ psample_capture_stop
+
+ grep -q -e "out-tc 7 " $CAPTURE_FILE
+ check_err $? "Sampled packets do not have expected out-tc (7)"
+
+ log_test "tc sample out-tc"
+
+ tc qdisc del dev $rp2 root handle 1:
+ tc filter del dev $rp1 ingress protocol all pref 1 handle 101 matchall
+}
+
+tc_sample_md_out_tc_occ_test()
+{
+ local backlog pct occ
+
+ RET=0
+
+ # Output traffic class occupancy is not supported on Spectrum-1.
+ [[ "$DEVLINK_VIDDID" == "15b3:cb84" ]] && return
+
+ tc filter add dev $rp1 ingress protocol all pref 1 handle 101 matchall \
+ skip_sw action sample rate 1024 group 1
+ check_err $? "Failed to configure sampling rule"
+
+ # Configure a shaper on egress to create congestion.
+ tc qdisc replace dev $rp2 root handle 1: \
+ tbf rate 1Mbit burst 256k limit 1M
+
+ psample_capture_start
+
+ ip vrf exec v$h1 $MZ $h1 -c 0 -d 1usec -p 1400 -A 192.0.2.1 \
+ -B 198.51.100.1 -t udp dp=52768,sp=42768 -q &
+
+ # Allow congestion to reach steady state.
+ sleep 10
+
+ backlog=$(tc -j -p -s qdisc show dev $rp2 | jq '.[0]["backlog"]')
+
+ # Kill mausezahn.
+ { kill %% && wait %%; } 2>/dev/null
+
+ psample_capture_stop
+
+ # Record last congestion sample.
+ occ=$(grep -e "out-tc-occ " $CAPTURE_FILE | tail -n 1 | \
+ cut -d ' ' -f 16)
+
+ pct=$((100 * (occ - backlog) / backlog))
+ (( -1 <= pct && pct <= 1))
+ check_err $? "Recorded a congestion of $backlog bytes, but sampled congestion is $occ bytes, which is $pct% off. Required accuracy is +-5%"
+
+ log_test "tc sample out-tc-occ"
+
+ tc qdisc del dev $rp2 root handle 1:
+ tc filter del dev $rp1 ingress protocol all pref 1 handle 101 matchall
+}
+
+tc_sample_md_latency_test()
+{
+ RET=0
+
+ # Egress sampling not supported on Spectrum-1.
+ [[ "$DEVLINK_VIDDID" == "15b3:cb84" ]] && return
+
+ tc filter add dev $rp2 egress protocol all pref 1 handle 101 matchall \
+ skip_sw action sample rate 5 group 1
+ check_err $? "Failed to configure sampling rule"
+
+ psample_capture_start
+
+ ip vrf exec v$h1 $MZ $h1 -c 3200 -d 1msec -p 64 -A 192.0.2.1 \
+ -B 198.51.100.1 -t udp dp=52768,sp=42768 -q
+
+ psample_capture_stop
+
+ grep -q -e "latency " $CAPTURE_FILE
+ check_err $? "Sampled packets do not have latency attribute"
+
+ log_test "tc sample latency"
+
+ tc filter del dev $rp2 egress protocol all pref 1 handle 101 matchall
+}
+
+tc_sample_acl_group_conflict_test()
+{
+ RET=0
+
+ # Test that two flower sampling rules cannot be configured on the same
+ # port with different groups.
+
+ # Policy-based sampling is not supported on Spectrum-1.
+ [[ "$DEVLINK_VIDDID" == "15b3:cb84" ]] && return
+
+ tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \
+ skip_sw action sample rate 1024 group 1
+ check_err $? "Failed to configure sampling rule"
+
+ tc filter add dev $rp1 ingress protocol ip pref 2 handle 102 flower \
+ skip_sw action sample rate 1024 group 1
+ check_err $? "Failed to configure sampling rule with same group"
+
+ tc filter add dev $rp1 ingress protocol ip pref 3 handle 103 flower \
+ skip_sw action sample rate 1024 group 2 &> /dev/null
+ check_fail $? "Managed to configure sampling rule with conflicting group"
+
+ log_test "tc sample (w/ flower) group conflict test"
+
+ tc filter del dev $rp1 ingress protocol ip pref 2 handle 102 flower
+ tc filter del dev $rp1 ingress protocol ip pref 1 handle 101 flower
+}
+
+__tc_sample_acl_rate_test()
+{
+ local bind=$1; shift
+ local port=$1; shift
+ local pkts pct
+
+ RET=0
+
+ # Policy-based sampling is not supported on Spectrum-1.
+ [[ "$DEVLINK_VIDDID" == "15b3:cb84" ]] && return
+
+ tc filter add dev $port $bind protocol ip pref 1 handle 101 flower \
+ skip_sw dst_ip 198.51.100.1 action sample rate 32 group 1
+ check_err $? "Failed to configure sampling rule"
+
+ psample_capture_start
+
+ ip vrf exec v$h1 $MZ $h1 -c 3200 -d 1msec -p 64 -A 192.0.2.1 \
+ -B 198.51.100.1 -t udp dp=52768,sp=42768 -q
+
+ psample_capture_stop
+
+ pkts=$(grep -e "group 1 " $CAPTURE_FILE | wc -l)
+ pct=$((100 * (pkts - 100) / 100))
+ (( -25 <= pct && pct <= 25))
+ check_err $? "Expected 100 packets, got $pkts packets, which is $pct% off. Required accuracy is +-25%"
+
+ # Setup a filter that should not match any packet and make sure packets
+ # are not sampled.
+ tc filter del dev $port $bind protocol ip pref 1 handle 101 flower
+
+ tc filter add dev $port $bind protocol ip pref 1 handle 101 flower \
+ skip_sw dst_ip 198.51.100.10 action sample rate 32 group 1
+ check_err $? "Failed to configure sampling rule"
+
+ psample_capture_start
+
+ ip vrf exec v$h1 $MZ $h1 -c 3200 -d 1msec -p 64 -A 192.0.2.1 \
+ -B 198.51.100.1 -t udp dp=52768,sp=42768 -q
+
+ psample_capture_stop
+
+ grep -q -e "group 1 " $CAPTURE_FILE
+ check_fail $? "Sampled packets when should not"
+
+ log_test "tc sample (w/ flower) rate ($bind)"
+
+ tc filter del dev $port $bind protocol ip pref 1 handle 101 flower
+}
+
+tc_sample_acl_rate_test()
+{
+ __tc_sample_acl_rate_test ingress $rp1
+ __tc_sample_acl_rate_test egress $rp2
+}
+
+tc_sample_acl_max_rate_test()
+{
+ RET=0
+
+ # Policy-based sampling is not supported on Spectrum-1.
+ [[ "$DEVLINK_VIDDID" == "15b3:cb84" ]] && return
+
+ tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \
+ skip_sw action sample rate $((2 ** 24 - 1)) group 1
+ check_err $? "Failed to configure sampling rule with max rate"
+
+ tc filter del dev $rp1 ingress protocol ip pref 1 handle 101 flower
+
+ tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \
+ skip_sw action sample rate $((2 ** 24)) \
+ group 1 &> /dev/null
+ check_fail $? "Managed to configure sampling rate above maximum"
+
+ log_test "tc sample (w/ flower) maximum rate"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/netdevsim/ethtool-common.sh b/tools/testing/selftests/drivers/net/netdevsim/ethtool-common.sh
index 9f64d5c7107b..7ca1f030d209 100644
--- a/tools/testing/selftests/drivers/net/netdevsim/ethtool-common.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/ethtool-common.sh
@@ -24,8 +24,11 @@ function check {
local code=$1
local str=$2
local exp_str=$3
+ local exp_fail=$4
- if [ $code -ne 0 ]; then
+ [ -z "$exp_fail" ] && cop="-ne" || cop="-eq"
+
+ if [ $code $cop 0 ]; then
((num_errors++))
return
fi
diff --git a/tools/testing/selftests/drivers/net/netdevsim/ethtool-fec.sh b/tools/testing/selftests/drivers/net/netdevsim/ethtool-fec.sh
new file mode 100755
index 000000000000..0c56746e9ce0
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/netdevsim/ethtool-fec.sh
@@ -0,0 +1,110 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only
+
+source ethtool-common.sh
+
+NSIM_NETDEV=$(make_netdev)
+[ a$ETHTOOL == a ] && ETHTOOL=ethtool
+
+set -o pipefail
+
+# netdevsim starts out with None/None
+s=$($ETHTOOL --show-fec $NSIM_NETDEV | tail -2)
+check $? "$s" "Configured FEC encodings: None
+Active FEC encoding: None"
+
+# Test Auto
+$ETHTOOL --set-fec $NSIM_NETDEV encoding auto
+check $?
+s=$($ETHTOOL --show-fec $NSIM_NETDEV | tail -2)
+check $? "$s" "Configured FEC encodings: Auto
+Active FEC encoding: Off"
+
+# Test case in-sensitivity
+for o in off Off OFF; do
+ $ETHTOOL --set-fec $NSIM_NETDEV encoding $o
+ check $?
+ s=$($ETHTOOL --show-fec $NSIM_NETDEV | tail -2)
+ check $? "$s" "Configured FEC encodings: Off
+Active FEC encoding: Off"
+done
+
+for o in BaseR baser BAser; do
+ $ETHTOOL --set-fec $NSIM_NETDEV encoding $o
+ check $?
+ s=$($ETHTOOL --show-fec $NSIM_NETDEV | tail -2)
+ check $? "$s" "Configured FEC encodings: BaseR
+Active FEC encoding: BaseR"
+done
+
+for o in llrs rs; do
+ $ETHTOOL --set-fec $NSIM_NETDEV encoding $o
+ check $?
+ s=$($ETHTOOL --show-fec $NSIM_NETDEV | tail -2)
+ check $? "$s" "Configured FEC encodings: ${o^^}
+Active FEC encoding: ${o^^}"
+done
+
+# Test mutliple bits
+$ETHTOOL --set-fec $NSIM_NETDEV encoding rs llrs
+check $?
+s=$($ETHTOOL --show-fec $NSIM_NETDEV | tail -2)
+check $? "$s" "Configured FEC encodings: RS LLRS
+Active FEC encoding: LLRS"
+
+$ETHTOOL --set-fec $NSIM_NETDEV encoding rs off auto
+check $?
+s=$($ETHTOOL --show-fec $NSIM_NETDEV | tail -2)
+check $? "$s" "Configured FEC encodings: Auto Off RS
+Active FEC encoding: RS"
+
+# Make sure other link modes are rejected
+$ETHTOOL --set-fec $NSIM_NETDEV encoding FIBRE 2>/dev/null
+check $? '' '' 1
+
+$ETHTOOL --set-fec $NSIM_NETDEV encoding bla-bla-bla 2>/dev/null
+check $? '' '' 1
+
+# Try JSON
+$ETHTOOL --json --show-fec $NSIM_NETDEV | jq empty >>/dev/null 2>&1
+if [ $? -eq 0 ]; then
+ $ETHTOOL --set-fec $NSIM_NETDEV encoding auto
+ check $?
+
+ s=$($ETHTOOL --json --show-fec $NSIM_NETDEV | jq '.[].config[]')
+ check $? "$s" '"Auto"'
+ s=$($ETHTOOL --json --show-fec $NSIM_NETDEV | jq '.[].active[]')
+ check $? "$s" '"Off"'
+
+ $ETHTOOL --set-fec $NSIM_NETDEV encoding auto RS
+ check $?
+
+ s=$($ETHTOOL --json --show-fec $NSIM_NETDEV | jq '.[].config[]')
+ check $? "$s" '"Auto"
+"RS"'
+ s=$($ETHTOOL --json --show-fec $NSIM_NETDEV | jq '.[].active[]')
+ check $? "$s" '"RS"'
+fi
+
+# Test error injection
+echo 11 > $NSIM_DEV_DFS/ethtool/get_err
+
+$ETHTOOL --show-fec $NSIM_NETDEV >>/dev/null 2>&1
+check $? '' '' 1
+
+echo 0 > $NSIM_DEV_DFS/ethtool/get_err
+echo 11 > $NSIM_DEV_DFS/ethtool/set_err
+
+$ETHTOOL --show-fec $NSIM_NETDEV >>/dev/null 2>&1
+check $?
+
+$ETHTOOL --set-fec $NSIM_NETDEV encoding RS 2>/dev/null
+check $? '' '' 1
+
+if [ $num_errors -eq 0 ]; then
+ echo "PASSED all $((num_passes)) checks"
+ exit 0
+else
+ echo "FAILED $num_errors/$((num_errors+num_passes)) checks"
+ exit 1
+fi
diff --git a/tools/testing/selftests/drivers/net/netdevsim/nexthop.sh b/tools/testing/selftests/drivers/net/netdevsim/nexthop.sh
index be0c1b5ee6b8..ba75c81cda91 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/nexthop.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/nexthop.sh
@@ -11,14 +11,33 @@ ALL_TESTS="
nexthop_single_add_err_test
nexthop_group_add_test
nexthop_group_add_err_test
+ nexthop_res_group_add_test
+ nexthop_res_group_add_err_test
nexthop_group_replace_test
nexthop_group_replace_err_test
+ nexthop_res_group_replace_test
+ nexthop_res_group_replace_err_test
+ nexthop_res_group_idle_timer_test
+ nexthop_res_group_idle_timer_del_test
+ nexthop_res_group_increase_idle_timer_test
+ nexthop_res_group_decrease_idle_timer_test
+ nexthop_res_group_unbalanced_timer_test
+ nexthop_res_group_unbalanced_timer_del_test
+ nexthop_res_group_no_unbalanced_timer_test
+ nexthop_res_group_short_unbalanced_timer_test
+ nexthop_res_group_increase_unbalanced_timer_test
+ nexthop_res_group_decrease_unbalanced_timer_test
+ nexthop_res_group_force_migrate_busy_test
nexthop_single_replace_test
nexthop_single_replace_err_test
nexthop_single_in_group_replace_test
nexthop_single_in_group_replace_err_test
+ nexthop_single_in_res_group_replace_test
+ nexthop_single_in_res_group_replace_err_test
nexthop_single_in_group_delete_test
nexthop_single_in_group_delete_err_test
+ nexthop_single_in_res_group_delete_test
+ nexthop_single_in_res_group_delete_err_test
nexthop_replay_test
nexthop_replay_err_test
"
@@ -27,6 +46,7 @@ DEV_ADDR=1337
DEV=netdevsim${DEV_ADDR}
DEVLINK_DEV=netdevsim/${DEV}
SYSFS_NET_DIR=/sys/bus/netdevsim/devices/$DEV/net/
+DEBUGFS_NET_DIR=/sys/kernel/debug/netdevsim/$DEV/
NUM_NETIFS=0
source $lib_dir/lib.sh
source $lib_dir/devlink_lib.sh
@@ -44,6 +64,28 @@ nexthop_check()
return 0
}
+nexthop_bucket_nhid_count_check()
+{
+ local group_id=$1; shift
+ local expected
+ local count
+ local nhid
+ local ret
+
+ while (($# > 0)); do
+ nhid=$1; shift
+ expected=$1; shift
+
+ count=$($IP nexthop bucket show id $group_id nhid $nhid |
+ grep "trap" | wc -l)
+ if ((expected != count)); then
+ return 1
+ fi
+ done
+
+ return 0
+}
+
nexthop_resource_check()
{
local expected_occ=$1; shift
@@ -159,6 +201,71 @@ nexthop_group_add_err_test()
nexthop_resource_set 9999
}
+nexthop_res_group_add_test()
+{
+ RET=0
+
+ $IP nexthop add id 1 via 192.0.2.2 dev dummy1
+ $IP nexthop add id 2 via 192.0.2.3 dev dummy1
+
+ $IP nexthop add id 10 group 1/2 type resilient buckets 4
+ nexthop_check "id 10" "id 10 group 1/2 type resilient buckets 4 idle_timer 120 unbalanced_timer 0 unbalanced_time 0 trap"
+ check_err $? "Unexpected nexthop group entry"
+
+ nexthop_bucket_nhid_count_check 10 1 2
+ check_err $? "Wrong nexthop buckets count"
+ nexthop_bucket_nhid_count_check 10 2 2
+ check_err $? "Wrong nexthop buckets count"
+
+ nexthop_resource_check 6
+ check_err $? "Wrong nexthop occupancy"
+
+ $IP nexthop del id 10
+ nexthop_resource_check 2
+ check_err $? "Wrong nexthop occupancy after delete"
+
+ $IP nexthop add id 10 group 1,3/2,2 type resilient buckets 5
+ nexthop_check "id 10" "id 10 group 1,3/2,2 type resilient buckets 5 idle_timer 120 unbalanced_timer 0 unbalanced_time 0 trap"
+ check_err $? "Unexpected weighted nexthop group entry"
+
+ nexthop_bucket_nhid_count_check 10 1 3
+ check_err $? "Wrong nexthop buckets count"
+ nexthop_bucket_nhid_count_check 10 2 2
+ check_err $? "Wrong nexthop buckets count"
+
+ nexthop_resource_check 7
+ check_err $? "Wrong weighted nexthop occupancy"
+
+ $IP nexthop del id 10
+ nexthop_resource_check 2
+ check_err $? "Wrong nexthop occupancy after delete"
+
+ log_test "Resilient nexthop group add and delete"
+
+ $IP nexthop flush &> /dev/null
+}
+
+nexthop_res_group_add_err_test()
+{
+ RET=0
+
+ nexthop_resource_set 2
+
+ $IP nexthop add id 1 via 192.0.2.2 dev dummy1
+ $IP nexthop add id 2 via 192.0.2.3 dev dummy1
+
+ $IP nexthop add id 10 group 1/2 type resilient buckets 4 &> /dev/null
+ check_fail $? "Nexthop group addition succeeded when should fail"
+
+ nexthop_resource_check 2
+ check_err $? "Wrong nexthop occupancy"
+
+ log_test "Resilient nexthop group add failure"
+
+ $IP nexthop flush &> /dev/null
+ nexthop_resource_set 9999
+}
+
nexthop_group_replace_test()
{
RET=0
@@ -206,6 +313,411 @@ nexthop_group_replace_err_test()
nexthop_resource_set 9999
}
+nexthop_res_group_replace_test()
+{
+ RET=0
+
+ $IP nexthop add id 1 via 192.0.2.2 dev dummy1
+ $IP nexthop add id 2 via 192.0.2.3 dev dummy1
+ $IP nexthop add id 3 via 192.0.2.4 dev dummy1
+ $IP nexthop add id 10 group 1/2 type resilient buckets 6
+
+ $IP nexthop replace id 10 group 1/2/3 type resilient
+ nexthop_check "id 10" "id 10 group 1/2/3 type resilient buckets 6 idle_timer 120 unbalanced_timer 0 unbalanced_time 0 trap"
+ check_err $? "Unexpected nexthop group entry"
+
+ nexthop_bucket_nhid_count_check 10 1 2
+ check_err $? "Wrong nexthop buckets count"
+ nexthop_bucket_nhid_count_check 10 2 2
+ check_err $? "Wrong nexthop buckets count"
+ nexthop_bucket_nhid_count_check 10 3 2
+ check_err $? "Wrong nexthop buckets count"
+
+ nexthop_resource_check 9
+ check_err $? "Wrong nexthop occupancy"
+
+ log_test "Resilient nexthop group replace"
+
+ $IP nexthop flush &> /dev/null
+}
+
+nexthop_res_group_replace_err_test()
+{
+ RET=0
+
+ $IP nexthop add id 1 via 192.0.2.2 dev dummy1
+ $IP nexthop add id 2 via 192.0.2.3 dev dummy1
+ $IP nexthop add id 3 via 192.0.2.4 dev dummy1
+ $IP nexthop add id 10 group 1/2 type resilient buckets 6
+
+ ip netns exec testns1 \
+ echo 1 > $DEBUGFS_NET_DIR/fib/fail_res_nexthop_group_replace
+ $IP nexthop replace id 10 group 1/2/3 type resilient &> /dev/null
+ check_fail $? "Nexthop group replacement succeeded when should fail"
+
+ nexthop_check "id 10" "id 10 group 1/2 type resilient buckets 6 idle_timer 120 unbalanced_timer 0 unbalanced_time 0 trap"
+ check_err $? "Unexpected nexthop group entry after failure"
+
+ nexthop_bucket_nhid_count_check 10 1 3
+ check_err $? "Wrong nexthop buckets count"
+ nexthop_bucket_nhid_count_check 10 2 3
+ check_err $? "Wrong nexthop buckets count"
+
+ nexthop_resource_check 9
+ check_err $? "Wrong nexthop occupancy after failure"
+
+ log_test "Resilient nexthop group replace failure"
+
+ $IP nexthop flush &> /dev/null
+ ip netns exec testns1 \
+ echo 0 > $DEBUGFS_NET_DIR/fib/fail_res_nexthop_group_replace
+}
+
+nexthop_res_mark_buckets_busy()
+{
+ local group_id=$1; shift
+ local nhid=$1; shift
+ local count=$1; shift
+ local index
+
+ for index in $($IP -j nexthop bucket show id $group_id nhid $nhid |
+ jq '.[].bucket.index' | head -n ${count:--0})
+ do
+ echo $group_id $index \
+ > $DEBUGFS_NET_DIR/fib/nexthop_bucket_activity
+ done
+}
+
+nexthop_res_num_nhid_buckets()
+{
+ local group_id=$1; shift
+ local nhid=$1; shift
+
+ $IP -j nexthop bucket show id $group_id nhid $nhid | jq length
+}
+
+nexthop_res_group_idle_timer_test()
+{
+ $IP nexthop add id 1 via 192.0.2.2 dev dummy1
+ $IP nexthop add id 2 via 192.0.2.3 dev dummy1
+
+ RET=0
+
+ $IP nexthop add id 10 group 1/2 type resilient buckets 8 idle_timer 4
+ nexthop_res_mark_buckets_busy 10 1
+ $IP nexthop replace id 10 group 1/2,3 type resilient
+
+ nexthop_bucket_nhid_count_check 10 1 4 2 4
+ check_err $? "Group expected to be unbalanced"
+
+ sleep 6
+
+ nexthop_bucket_nhid_count_check 10 1 2 2 6
+ check_err $? "Group expected to be balanced"
+
+ log_test "Bucket migration after idle timer"
+
+ $IP nexthop flush &> /dev/null
+}
+
+nexthop_res_group_idle_timer_del_test()
+{
+ $IP nexthop add id 1 via 192.0.2.2 dev dummy1
+ $IP nexthop add id 2 via 192.0.2.3 dev dummy1
+ $IP nexthop add id 3 via 192.0.2.3 dev dummy1
+
+ RET=0
+
+ $IP nexthop add id 10 group 1,50/2,50/3,1 \
+ type resilient buckets 8 idle_timer 6
+ nexthop_res_mark_buckets_busy 10 1
+ $IP nexthop replace id 10 group 1,50/2,150/3,1 type resilient
+
+ nexthop_bucket_nhid_count_check 10 1 4 2 4 3 0
+ check_err $? "Group expected to be unbalanced"
+
+ sleep 4
+
+ # Deletion prompts group replacement. Check that the bucket timers
+ # are kept.
+ $IP nexthop delete id 3
+
+ nexthop_bucket_nhid_count_check 10 1 4 2 4
+ check_err $? "Group expected to still be unbalanced"
+
+ sleep 4
+
+ nexthop_bucket_nhid_count_check 10 1 2 2 6
+ check_err $? "Group expected to be balanced"
+
+ log_test "Bucket migration after idle timer (with delete)"
+
+ $IP nexthop flush &> /dev/null
+}
+
+__nexthop_res_group_increase_timer_test()
+{
+ local timer=$1; shift
+
+ $IP nexthop add id 1 via 192.0.2.2 dev dummy1
+ $IP nexthop add id 2 via 192.0.2.3 dev dummy1
+
+ RET=0
+
+ $IP nexthop add id 10 group 1/2 type resilient buckets 8 $timer 4
+ nexthop_res_mark_buckets_busy 10 1
+ $IP nexthop replace id 10 group 1/2,3 type resilient
+
+ nexthop_bucket_nhid_count_check 10 2 6
+ check_fail $? "Group expected to be unbalanced"
+
+ sleep 2
+ $IP nexthop replace id 10 group 1/2,3 type resilient $timer 8
+ sleep 4
+
+ # 6 seconds, past the original timer.
+ nexthop_bucket_nhid_count_check 10 2 6
+ check_fail $? "Group still expected to be unbalanced"
+
+ sleep 4
+
+ # 10 seconds, past the new timer.
+ nexthop_bucket_nhid_count_check 10 2 6
+ check_err $? "Group expected to be balanced"
+
+ log_test "Bucket migration after $timer increase"
+
+ $IP nexthop flush &> /dev/null
+}
+
+__nexthop_res_group_decrease_timer_test()
+{
+ local timer=$1; shift
+
+ $IP nexthop add id 1 via 192.0.2.2 dev dummy1
+ $IP nexthop add id 2 via 192.0.2.3 dev dummy1
+
+ RET=0
+
+ $IP nexthop add id 10 group 1/2 type resilient buckets 8 $timer 8
+ nexthop_res_mark_buckets_busy 10 1
+ $IP nexthop replace id 10 group 1/2,3 type resilient
+
+ nexthop_bucket_nhid_count_check 10 2 6
+ check_fail $? "Group expected to be unbalanced"
+
+ sleep 2
+ $IP nexthop replace id 10 group 1/2,3 type resilient $timer 4
+ sleep 4
+
+ # 6 seconds, past the new timer, before the old timer.
+ nexthop_bucket_nhid_count_check 10 2 6
+ check_err $? "Group expected to be balanced"
+
+ log_test "Bucket migration after $timer decrease"
+
+ $IP nexthop flush &> /dev/null
+}
+
+__nexthop_res_group_increase_timer_del_test()
+{
+ local timer=$1; shift
+
+ $IP nexthop add id 1 via 192.0.2.2 dev dummy1
+ $IP nexthop add id 2 via 192.0.2.3 dev dummy1
+ $IP nexthop add id 3 via 192.0.2.3 dev dummy1
+
+ RET=0
+
+ $IP nexthop add id 10 group 1,100/2,100/3,1 \
+ type resilient buckets 8 $timer 4
+ nexthop_res_mark_buckets_busy 10 1
+ $IP nexthop replace id 10 group 1,100/2,300/3,1 type resilient
+
+ nexthop_bucket_nhid_count_check 10 2 6
+ check_fail $? "Group expected to be unbalanced"
+
+ sleep 2
+ $IP nexthop replace id 10 group 1/2,3 type resilient $timer 8
+ sleep 4
+
+ # 6 seconds, past the original timer.
+ nexthop_bucket_nhid_count_check 10 2 6
+ check_fail $? "Group still expected to be unbalanced"
+
+ sleep 4
+
+ # 10 seconds, past the new timer.
+ nexthop_bucket_nhid_count_check 10 2 6
+ check_err $? "Group expected to be balanced"
+
+ log_test "Bucket migration after $timer increase"
+
+ $IP nexthop flush &> /dev/null
+}
+
+nexthop_res_group_increase_idle_timer_test()
+{
+ __nexthop_res_group_increase_timer_test idle_timer
+}
+
+nexthop_res_group_decrease_idle_timer_test()
+{
+ __nexthop_res_group_decrease_timer_test idle_timer
+}
+
+nexthop_res_group_unbalanced_timer_test()
+{
+ local i
+
+ $IP nexthop add id 1 via 192.0.2.2 dev dummy1
+ $IP nexthop add id 2 via 192.0.2.3 dev dummy1
+
+ RET=0
+
+ $IP nexthop add id 10 group 1/2 type resilient \
+ buckets 8 idle_timer 6 unbalanced_timer 10
+ nexthop_res_mark_buckets_busy 10 1
+ $IP nexthop replace id 10 group 1/2,3 type resilient
+
+ for i in 1 2; do
+ sleep 4
+ nexthop_bucket_nhid_count_check 10 1 4 2 4
+ check_err $? "$i: Group expected to be unbalanced"
+ nexthop_res_mark_buckets_busy 10 1
+ done
+
+ # 3 x sleep 4 > unbalanced timer 10
+ sleep 4
+ nexthop_bucket_nhid_count_check 10 1 2 2 6
+ check_err $? "Group expected to be balanced"
+
+ log_test "Bucket migration after unbalanced timer"
+
+ $IP nexthop flush &> /dev/null
+}
+
+nexthop_res_group_unbalanced_timer_del_test()
+{
+ local i
+
+ $IP nexthop add id 1 via 192.0.2.2 dev dummy1
+ $IP nexthop add id 2 via 192.0.2.3 dev dummy1
+ $IP nexthop add id 3 via 192.0.2.3 dev dummy1
+
+ RET=0
+
+ $IP nexthop add id 10 group 1,50/2,50/3,1 type resilient \
+ buckets 8 idle_timer 6 unbalanced_timer 10
+ nexthop_res_mark_buckets_busy 10 1
+ $IP nexthop replace id 10 group 1,50/2,150/3,1 type resilient
+
+ # Check that NH delete does not reset unbalanced time.
+ sleep 4
+ $IP nexthop delete id 3
+ nexthop_bucket_nhid_count_check 10 1 4 2 4
+ check_err $? "1: Group expected to be unbalanced"
+ nexthop_res_mark_buckets_busy 10 1
+
+ sleep 4
+ nexthop_bucket_nhid_count_check 10 1 4 2 4
+ check_err $? "2: Group expected to be unbalanced"
+ nexthop_res_mark_buckets_busy 10 1
+
+ # 3 x sleep 4 > unbalanced timer 10
+ sleep 4
+ nexthop_bucket_nhid_count_check 10 1 2 2 6
+ check_err $? "Group expected to be balanced"
+
+ log_test "Bucket migration after unbalanced timer (with delete)"
+
+ $IP nexthop flush &> /dev/null
+}
+
+nexthop_res_group_no_unbalanced_timer_test()
+{
+ local i
+
+ $IP nexthop add id 1 via 192.0.2.2 dev dummy1
+ $IP nexthop add id 2 via 192.0.2.3 dev dummy1
+
+ RET=0
+
+ $IP nexthop add id 10 group 1/2 type resilient buckets 8
+ nexthop_res_mark_buckets_busy 10 1
+ $IP nexthop replace id 10 group 1/2,3 type resilient
+
+ for i in $(seq 3); do
+ sleep 60
+ nexthop_bucket_nhid_count_check 10 2 6
+ check_fail $? "$i: Group expected to be unbalanced"
+ nexthop_res_mark_buckets_busy 10 1
+ done
+
+ log_test "Buckets never force-migrated without unbalanced timer"
+
+ $IP nexthop flush &> /dev/null
+}
+
+nexthop_res_group_short_unbalanced_timer_test()
+{
+ $IP nexthop add id 1 via 192.0.2.2 dev dummy1
+ $IP nexthop add id 2 via 192.0.2.3 dev dummy1
+
+ RET=0
+
+ $IP nexthop add id 10 group 1/2 type resilient \
+ buckets 8 idle_timer 120 unbalanced_timer 4
+ nexthop_res_mark_buckets_busy 10 1
+ $IP nexthop replace id 10 group 1/2,3 type resilient
+
+ nexthop_bucket_nhid_count_check 10 2 6
+ check_fail $? "Group expected to be unbalanced"
+
+ sleep 5
+
+ nexthop_bucket_nhid_count_check 10 2 6
+ check_err $? "Group expected to be balanced"
+
+ log_test "Bucket migration after unbalanced < idle timer"
+
+ $IP nexthop flush &> /dev/null
+}
+
+nexthop_res_group_increase_unbalanced_timer_test()
+{
+ __nexthop_res_group_increase_timer_test unbalanced_timer
+}
+
+nexthop_res_group_decrease_unbalanced_timer_test()
+{
+ __nexthop_res_group_decrease_timer_test unbalanced_timer
+}
+
+nexthop_res_group_force_migrate_busy_test()
+{
+ $IP nexthop add id 1 via 192.0.2.2 dev dummy1
+ $IP nexthop add id 2 via 192.0.2.3 dev dummy1
+
+ RET=0
+
+ $IP nexthop add id 10 group 1/2 type resilient \
+ buckets 8 idle_timer 120
+ nexthop_res_mark_buckets_busy 10 1
+ $IP nexthop replace id 10 group 1/2,3 type resilient
+
+ nexthop_bucket_nhid_count_check 10 2 6
+ check_fail $? "Group expected to be unbalanced"
+
+ $IP nexthop replace id 10 group 2 type resilient
+ nexthop_bucket_nhid_count_check 10 2 8
+ check_err $? "All buckets expected to have migrated"
+
+ log_test "Busy buckets force-migrated when NH removed"
+
+ $IP nexthop flush &> /dev/null
+}
+
nexthop_single_replace_test()
{
RET=0
@@ -299,6 +811,63 @@ nexthop_single_in_group_replace_err_test()
nexthop_resource_set 9999
}
+nexthop_single_in_res_group_replace_test()
+{
+ RET=0
+
+ $IP nexthop add id 1 via 192.0.2.2 dev dummy1
+ $IP nexthop add id 2 via 192.0.2.3 dev dummy1
+ $IP nexthop add id 10 group 1/2 type resilient buckets 4
+
+ $IP nexthop replace id 1 via 192.0.2.4 dev dummy1
+ check_err $? "Failed to replace nexthop when should not"
+
+ nexthop_check "id 10" "id 10 group 1/2 type resilient buckets 4 idle_timer 120 unbalanced_timer 0 unbalanced_time 0 trap"
+ check_err $? "Unexpected nexthop group entry"
+
+ nexthop_bucket_nhid_count_check 10 1 2 2 2
+ check_err $? "Wrong nexthop buckets count"
+
+ nexthop_resource_check 6
+ check_err $? "Wrong nexthop occupancy"
+
+ log_test "Single nexthop replace while in resilient group"
+
+ $IP nexthop flush &> /dev/null
+}
+
+nexthop_single_in_res_group_replace_err_test()
+{
+ RET=0
+
+ $IP nexthop add id 1 via 192.0.2.2 dev dummy1
+ $IP nexthop add id 2 via 192.0.2.3 dev dummy1
+ $IP nexthop add id 10 group 1/2 type resilient buckets 4
+
+ ip netns exec testns1 \
+ echo 1 > $DEBUGFS_NET_DIR/fib/fail_nexthop_bucket_replace
+ $IP nexthop replace id 1 via 192.0.2.4 dev dummy1 &> /dev/null
+ check_fail $? "Nexthop replacement succeeded when should fail"
+
+ nexthop_check "id 1" "id 1 via 192.0.2.2 dev dummy1 scope link trap"
+ check_err $? "Unexpected nexthop entry after failure"
+
+ nexthop_check "id 10" "id 10 group 1/2 type resilient buckets 4 idle_timer 120 unbalanced_timer 0 unbalanced_time 0 trap"
+ check_err $? "Unexpected nexthop group entry after failure"
+
+ nexthop_bucket_nhid_count_check 10 1 2 2 2
+ check_err $? "Wrong nexthop buckets count"
+
+ nexthop_resource_check 6
+ check_err $? "Wrong nexthop occupancy"
+
+ log_test "Single nexthop replace while in resilient group failure"
+
+ $IP nexthop flush &> /dev/null
+ ip netns exec testns1 \
+ echo 0 > $DEBUGFS_NET_DIR/fib/fail_nexthop_bucket_replace
+}
+
nexthop_single_in_group_delete_test()
{
RET=0
@@ -346,6 +915,57 @@ nexthop_single_in_group_delete_err_test()
nexthop_resource_set 9999
}
+nexthop_single_in_res_group_delete_test()
+{
+ RET=0
+
+ $IP nexthop add id 1 via 192.0.2.2 dev dummy1
+ $IP nexthop add id 2 via 192.0.2.3 dev dummy1
+ $IP nexthop add id 10 group 1/2 type resilient buckets 4
+
+ $IP nexthop del id 1
+ nexthop_check "id 10" "id 10 group 2 type resilient buckets 4 idle_timer 120 unbalanced_timer 0 unbalanced_time 0 trap"
+ check_err $? "Unexpected nexthop group entry"
+
+ nexthop_bucket_nhid_count_check 10 2 4
+ check_err $? "Wrong nexthop buckets count"
+
+ nexthop_resource_check 5
+ check_err $? "Wrong nexthop occupancy"
+
+ log_test "Single nexthop delete while in resilient group"
+
+ $IP nexthop flush &> /dev/null
+}
+
+nexthop_single_in_res_group_delete_err_test()
+{
+ RET=0
+
+ $IP nexthop add id 1 via 192.0.2.2 dev dummy1
+ $IP nexthop add id 2 via 192.0.2.3 dev dummy1
+ $IP nexthop add id 3 via 192.0.2.4 dev dummy1
+ $IP nexthop add id 10 group 1/2/3 type resilient buckets 6
+
+ ip netns exec testns1 \
+ echo 1 > $DEBUGFS_NET_DIR/fib/fail_nexthop_bucket_replace
+ $IP nexthop del id 1
+
+ # We failed to replace the two nexthop buckets that were originally
+ # assigned to nhid 1.
+ nexthop_bucket_nhid_count_check 10 2 2 3 2
+ check_err $? "Wrong nexthop buckets count"
+
+ nexthop_resource_check 8
+ check_err $? "Wrong nexthop occupancy"
+
+ log_test "Single nexthop delete while in resilient group failure"
+
+ $IP nexthop flush &> /dev/null
+ ip netns exec testns1 \
+ echo 0 > $DEBUGFS_NET_DIR/fib/fail_nexthop_bucket_replace
+}
+
nexthop_replay_test()
{
RET=0
diff --git a/tools/testing/selftests/drivers/net/netdevsim/psample.sh b/tools/testing/selftests/drivers/net/netdevsim/psample.sh
new file mode 100755
index 000000000000..ee10b1a8933c
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/netdevsim/psample.sh
@@ -0,0 +1,181 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# This test is for checking the psample module. It makes use of netdevsim
+# which periodically generates "sampled" packets.
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ psample_enable_test
+ psample_group_num_test
+ psample_md_test
+"
+NETDEVSIM_PATH=/sys/bus/netdevsim/
+DEV_ADDR=1337
+DEV=netdevsim${DEV_ADDR}
+DEVLINK_DEV=netdevsim/${DEV}
+SYSFS_NET_DIR=/sys/bus/netdevsim/devices/$DEV/net/
+PSAMPLE_DIR=/sys/kernel/debug/netdevsim/$DEV/psample/
+CAPTURE_FILE=$(mktemp)
+NUM_NETIFS=0
+source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
+
+# Available at https://github.com/Mellanox/libpsample
+require_command psample
+
+psample_capture()
+{
+ rm -f $CAPTURE_FILE
+
+ timeout 2 ip netns exec testns1 psample &> $CAPTURE_FILE
+}
+
+psample_enable_test()
+{
+ RET=0
+
+ echo 1 > $PSAMPLE_DIR/enable
+ check_err $? "Failed to enable sampling when should not"
+
+ echo 1 > $PSAMPLE_DIR/enable 2>/dev/null
+ check_fail $? "Sampling enablement succeeded when should fail"
+
+ psample_capture
+ if [ $(cat $CAPTURE_FILE | wc -l) -eq 0 ]; then
+ check_err 1 "Failed to capture sampled packets"
+ fi
+
+ echo 0 > $PSAMPLE_DIR/enable
+ check_err $? "Failed to disable sampling when should not"
+
+ echo 0 > $PSAMPLE_DIR/enable 2>/dev/null
+ check_fail $? "Sampling disablement succeeded when should fail"
+
+ psample_capture
+ if [ $(cat $CAPTURE_FILE | wc -l) -ne 0 ]; then
+ check_err 1 "Captured sampled packets when should not"
+ fi
+
+ log_test "psample enable / disable"
+}
+
+psample_group_num_test()
+{
+ RET=0
+
+ echo 1234 > $PSAMPLE_DIR/group_num
+ echo 1 > $PSAMPLE_DIR/enable
+
+ psample_capture
+ grep -q -e "group 1234" $CAPTURE_FILE
+ check_err $? "Sampled packets reported with wrong group number"
+
+ # New group number should only be used after disable / enable.
+ echo 4321 > $PSAMPLE_DIR/group_num
+
+ psample_capture
+ grep -q -e "group 4321" $CAPTURE_FILE
+ check_fail $? "Group number changed while sampling is active"
+
+ echo 0 > $PSAMPLE_DIR/enable && echo 1 > $PSAMPLE_DIR/enable
+
+ psample_capture
+ grep -q -e "group 4321" $CAPTURE_FILE
+ check_err $? "Group number did not change after restarting sampling"
+
+ log_test "psample group number"
+
+ echo 0 > $PSAMPLE_DIR/enable
+}
+
+psample_md_test()
+{
+ RET=0
+
+ echo 1 > $PSAMPLE_DIR/enable
+
+ echo 1234 > $PSAMPLE_DIR/in_ifindex
+ echo 4321 > $PSAMPLE_DIR/out_ifindex
+ psample_capture
+
+ grep -q -e "in-ifindex 1234" $CAPTURE_FILE
+ check_err $? "Sampled packets reported with wrong in-ifindex"
+
+ grep -q -e "out-ifindex 4321" $CAPTURE_FILE
+ check_err $? "Sampled packets reported with wrong out-ifindex"
+
+ echo 5 > $PSAMPLE_DIR/out_tc
+ psample_capture
+
+ grep -q -e "out-tc 5" $CAPTURE_FILE
+ check_err $? "Sampled packets reported with wrong out-tc"
+
+ echo $((2**16 - 1)) > $PSAMPLE_DIR/out_tc
+ psample_capture
+
+ grep -q -e "out-tc " $CAPTURE_FILE
+ check_fail $? "Sampled packets reported with out-tc when should not"
+
+ echo 1 > $PSAMPLE_DIR/out_tc
+ echo 10000 > $PSAMPLE_DIR/out_tc_occ_max
+ psample_capture
+
+ grep -q -e "out-tc-occ " $CAPTURE_FILE
+ check_err $? "Sampled packets not reported with out-tc-occ when should"
+
+ echo 0 > $PSAMPLE_DIR/out_tc_occ_max
+ psample_capture
+
+ grep -q -e "out-tc-occ " $CAPTURE_FILE
+ check_fail $? "Sampled packets reported with out-tc-occ when should not"
+
+ echo 10000 > $PSAMPLE_DIR/latency_max
+ psample_capture
+
+ grep -q -e "latency " $CAPTURE_FILE
+ check_err $? "Sampled packets not reported with latency when should"
+
+ echo 0 > $PSAMPLE_DIR/latency_max
+ psample_capture
+
+ grep -q -e "latency " $CAPTURE_FILE
+ check_fail $? "Sampled packets reported with latency when should not"
+
+ log_test "psample metadata"
+
+ echo 0 > $PSAMPLE_DIR/enable
+}
+
+setup_prepare()
+{
+ modprobe netdevsim &> /dev/null
+
+ echo "$DEV_ADDR 1" > ${NETDEVSIM_PATH}/new_device
+ while [ ! -d $SYSFS_NET_DIR ] ; do :; done
+
+ set -e
+
+ ip netns add testns1
+ devlink dev reload $DEVLINK_DEV netns testns1
+
+ set +e
+}
+
+cleanup()
+{
+ pre_cleanup
+ rm -f $CAPTURE_FILE
+ ip netns del testns1
+ echo "$DEV_ADDR" > ${NETDEVSIM_PATH}/del_device
+ modprobe -r netdevsim &> /dev/null
+}
+
+trap cleanup EXIT
+
+setup_prepare
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/firmware/fw_namespace.c b/tools/testing/selftests/firmware/fw_namespace.c
index 5ebc1aec7923..0e393cb5f42d 100644
--- a/tools/testing/selftests/firmware/fw_namespace.c
+++ b/tools/testing/selftests/firmware/fw_namespace.c
@@ -95,7 +95,7 @@ static bool test_fw_in_ns(const char *fw_name, const char *sys_path, bool block_
}
if (block_fw_in_parent_ns)
umount("/lib/firmware");
- return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false;
+ return WEXITSTATUS(status) == EXIT_SUCCESS;
}
if (unshare(CLONE_NEWNS) != 0) {
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 7bd7e776c266..bd83158e0e0b 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
/aarch64/get-reg-list
/aarch64/get-reg-list-sve
+/aarch64/vgic_init
/s390x/memop
/s390x/resets
/s390x/sync_regs_test
@@ -38,6 +39,7 @@
/dirty_log_perf_test
/hardware_disable_test
/kvm_create_max_vcpus
+/kvm_page_table_test
/memslot_modification_stress_test
/set_memory_region_test
/steal_time
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 67eebb53235f..e439d027939d 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-include ../../../../scripts/Kbuild.include
+include ../../../build/Build.include
all:
@@ -72,16 +72,19 @@ TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
TEST_GEN_PROGS_x86_64 += hardware_disable_test
TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
+TEST_GEN_PROGS_x86_64 += kvm_page_table_test
TEST_GEN_PROGS_x86_64 += memslot_modification_stress_test
TEST_GEN_PROGS_x86_64 += set_memory_region_test
TEST_GEN_PROGS_x86_64 += steal_time
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list-sve
+TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
+TEST_GEN_PROGS_aarch64 += kvm_page_table_test
TEST_GEN_PROGS_aarch64 += set_memory_region_test
TEST_GEN_PROGS_aarch64 += steal_time
@@ -91,6 +94,7 @@ TEST_GEN_PROGS_s390x += s390x/sync_regs_test
TEST_GEN_PROGS_s390x += demand_paging_test
TEST_GEN_PROGS_s390x += dirty_log_test
TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
+TEST_GEN_PROGS_s390x += kvm_page_table_test
TEST_GEN_PROGS_s390x += set_memory_region_test
TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
diff --git a/tools/testing/selftests/kvm/aarch64/vgic_init.c b/tools/testing/selftests/kvm/aarch64/vgic_init.c
new file mode 100644
index 000000000000..623f31a14326
--- /dev/null
+++ b/tools/testing/selftests/kvm/aarch64/vgic_init.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * vgic init sequence tests
+ *
+ * Copyright (C) 2020, Red Hat, Inc.
+ */
+#define _GNU_SOURCE
+#include <linux/kernel.h>
+#include <sys/syscall.h>
+#include <asm/kvm.h>
+#include <asm/kvm_para.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+
+#define NR_VCPUS 4
+
+#define REDIST_REGION_ATTR_ADDR(count, base, flags, index) (((uint64_t)(count) << 52) | \
+ ((uint64_t)((base) >> 16) << 16) | ((uint64_t)(flags) << 12) | index)
+#define REG_OFFSET(vcpu, offset) (((uint64_t)vcpu << 32) | offset)
+
+#define GICR_TYPER 0x8
+
+struct vm_gic {
+ struct kvm_vm *vm;
+ int gic_fd;
+};
+
+static int max_ipa_bits;
+
+/* helper to access a redistributor register */
+static int access_redist_reg(int gicv3_fd, int vcpu, int offset,
+ uint32_t *val, bool write)
+{
+ uint64_t attr = REG_OFFSET(vcpu, offset);
+
+ return _kvm_device_access(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS,
+ attr, val, write);
+}
+
+/* dummy guest code */
+static void guest_code(void)
+{
+ GUEST_SYNC(0);
+ GUEST_SYNC(1);
+ GUEST_SYNC(2);
+ GUEST_DONE();
+}
+
+/* we don't want to assert on run execution, hence that helper */
+static int run_vcpu(struct kvm_vm *vm, uint32_t vcpuid)
+{
+ ucall_init(vm, NULL);
+ int ret = _vcpu_ioctl(vm, vcpuid, KVM_RUN, NULL);
+ if (ret)
+ return -errno;
+ return 0;
+}
+
+static struct vm_gic vm_gic_create(void)
+{
+ struct vm_gic v;
+
+ v.vm = vm_create_default_with_vcpus(NR_VCPUS, 0, 0, guest_code, NULL);
+ v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false);
+
+ return v;
+}
+
+static void vm_gic_destroy(struct vm_gic *v)
+{
+ close(v->gic_fd);
+ kvm_vm_free(v->vm);
+}
+
+/**
+ * Helper routine that performs KVM device tests in general and
+ * especially ARM_VGIC_V3 ones. Eventually the ARM_VGIC_V3
+ * device gets created, a legacy RDIST region is set at @0x0
+ * and a DIST region is set @0x60000
+ */
+static void subtest_dist_rdist(struct vm_gic *v)
+{
+ int ret;
+ uint64_t addr;
+
+ /* Check existing group/attributes */
+ kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_DIST);
+
+ kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST);
+
+ /* check non existing attribute */
+ ret = _kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, 0);
+ TEST_ASSERT(ret && errno == ENXIO, "attribute not supported");
+
+ /* misaligned DIST and REDIST address settings */
+ addr = 0x1000;
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_DIST, &addr, true);
+ TEST_ASSERT(ret && errno == EINVAL, "GICv3 dist base not 64kB aligned");
+
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true);
+ TEST_ASSERT(ret && errno == EINVAL, "GICv3 redist base not 64kB aligned");
+
+ /* out of range address */
+ if (max_ipa_bits) {
+ addr = 1ULL << max_ipa_bits;
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_DIST, &addr, true);
+ TEST_ASSERT(ret && errno == E2BIG, "dist address beyond IPA limit");
+
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true);
+ TEST_ASSERT(ret && errno == E2BIG, "redist address beyond IPA limit");
+ }
+
+ /* set REDIST base address @0x0*/
+ addr = 0x00000;
+ kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true);
+
+ /* Attempt to create a second legacy redistributor region */
+ addr = 0xE0000;
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true);
+ TEST_ASSERT(ret && errno == EEXIST, "GICv3 redist base set again");
+
+ /* Attempt to mix legacy and new redistributor regions */
+ addr = REDIST_REGION_ATTR_ADDR(NR_VCPUS, 0x100000, 0, 0);
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ TEST_ASSERT(ret && errno == EINVAL, "attempt to mix GICv3 REDIST and REDIST_REGION");
+
+ /*
+ * Set overlapping DIST / REDIST, cannot be detected here. Will be detected
+ * on first vcpu run instead.
+ */
+ addr = 3 * 2 * 0x10000;
+ kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_DIST,
+ &addr, true);
+}
+
+/* Test the new REDIST region API */
+static void subtest_redist_regions(struct vm_gic *v)
+{
+ uint64_t addr, expected_addr;
+ int ret;
+
+ ret = kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST);
+ TEST_ASSERT(!ret, "Multiple redist regions advertised");
+
+ addr = REDIST_REGION_ATTR_ADDR(NR_VCPUS, 0x100000, 2, 0);
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ TEST_ASSERT(ret && errno == EINVAL, "redist region attr value with flags != 0");
+
+ addr = REDIST_REGION_ATTR_ADDR(0, 0x100000, 0, 0);
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ TEST_ASSERT(ret && errno == EINVAL, "redist region attr value with count== 0");
+
+ addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 1);
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ TEST_ASSERT(ret && errno == EINVAL,
+ "attempt to register the first rdist region with index != 0");
+
+ addr = REDIST_REGION_ATTR_ADDR(2, 0x201000, 0, 1);
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ TEST_ASSERT(ret && errno == EINVAL, "rdist region with misaligned address");
+
+ addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 0);
+ kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+
+ addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 1);
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ TEST_ASSERT(ret && errno == EINVAL, "register an rdist region with already used index");
+
+ addr = REDIST_REGION_ATTR_ADDR(1, 0x210000, 0, 2);
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ TEST_ASSERT(ret && errno == EINVAL,
+ "register an rdist region overlapping with another one");
+
+ addr = REDIST_REGION_ATTR_ADDR(1, 0x240000, 0, 2);
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ TEST_ASSERT(ret && errno == EINVAL, "register redist region with index not +1");
+
+ addr = REDIST_REGION_ATTR_ADDR(1, 0x240000, 0, 1);
+ kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+
+ addr = REDIST_REGION_ATTR_ADDR(1, 1ULL << max_ipa_bits, 0, 2);
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ TEST_ASSERT(ret && errno == E2BIG,
+ "register redist region with base address beyond IPA range");
+
+ addr = 0x260000;
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true);
+ TEST_ASSERT(ret && errno == EINVAL,
+ "Mix KVM_VGIC_V3_ADDR_TYPE_REDIST and REDIST_REGION");
+
+ /*
+ * Now there are 2 redist regions:
+ * region 0 @ 0x200000 2 redists
+ * region 1 @ 0x240000 1 redist
+ * Attempt to read their characteristics
+ */
+
+ addr = REDIST_REGION_ATTR_ADDR(0, 0, 0, 0);
+ expected_addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 0);
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, false);
+ TEST_ASSERT(!ret && addr == expected_addr, "read characteristics of region #0");
+
+ addr = REDIST_REGION_ATTR_ADDR(0, 0, 0, 1);
+ expected_addr = REDIST_REGION_ATTR_ADDR(1, 0x240000, 0, 1);
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, false);
+ TEST_ASSERT(!ret && addr == expected_addr, "read characteristics of region #1");
+
+ addr = REDIST_REGION_ATTR_ADDR(0, 0, 0, 2);
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, false);
+ TEST_ASSERT(ret && errno == ENOENT, "read characteristics of non existing region");
+
+ addr = 0x260000;
+ kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_DIST, &addr, true);
+
+ addr = REDIST_REGION_ATTR_ADDR(1, 0x260000, 0, 2);
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ TEST_ASSERT(ret && errno == EINVAL, "register redist region colliding with dist");
+}
+
+/*
+ * VGIC KVM device is created and initialized before the secondary CPUs
+ * get created
+ */
+static void test_vgic_then_vcpus(void)
+{
+ struct vm_gic v;
+ int ret, i;
+
+ v.vm = vm_create_default(0, 0, guest_code);
+ v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false);
+
+ subtest_dist_rdist(&v);
+
+ /* Add the rest of the VCPUs */
+ for (i = 1; i < NR_VCPUS; ++i)
+ vm_vcpu_add_default(v.vm, i, guest_code);
+
+ ret = run_vcpu(v.vm, 3);
+ TEST_ASSERT(ret == -EINVAL, "dist/rdist overlap detected on 1st vcpu run");
+
+ vm_gic_destroy(&v);
+}
+
+/* All the VCPUs are created before the VGIC KVM device gets initialized */
+static void test_vcpus_then_vgic(void)
+{
+ struct vm_gic v;
+ int ret;
+
+ v = vm_gic_create();
+
+ subtest_dist_rdist(&v);
+
+ ret = run_vcpu(v.vm, 3);
+ TEST_ASSERT(ret == -EINVAL, "dist/rdist overlap detected on 1st vcpu run");
+
+ vm_gic_destroy(&v);
+}
+
+static void test_new_redist_regions(void)
+{
+ void *dummy = NULL;
+ struct vm_gic v;
+ uint64_t addr;
+ int ret;
+
+ v = vm_gic_create();
+ subtest_redist_regions(&v);
+ kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
+
+ ret = run_vcpu(v.vm, 3);
+ TEST_ASSERT(ret == -ENXIO, "running without sufficient number of rdists");
+ vm_gic_destroy(&v);
+
+ /* step2 */
+
+ v = vm_gic_create();
+ subtest_redist_regions(&v);
+
+ addr = REDIST_REGION_ATTR_ADDR(1, 0x280000, 0, 2);
+ kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+
+ ret = run_vcpu(v.vm, 3);
+ TEST_ASSERT(ret == -EBUSY, "running without vgic explicit init");
+
+ vm_gic_destroy(&v);
+
+ /* step 3 */
+
+ v = vm_gic_create();
+ subtest_redist_regions(&v);
+
+ _kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, dummy, true);
+ TEST_ASSERT(ret && errno == EFAULT,
+ "register a third region allowing to cover the 4 vcpus");
+
+ addr = REDIST_REGION_ATTR_ADDR(1, 0x280000, 0, 2);
+ kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+
+ kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
+
+ ret = run_vcpu(v.vm, 3);
+ TEST_ASSERT(!ret, "vcpu run");
+
+ vm_gic_destroy(&v);
+}
+
+static void test_typer_accesses(void)
+{
+ struct vm_gic v;
+ uint64_t addr;
+ uint32_t val;
+ int ret, i;
+
+ v.vm = vm_create_default(0, 0, guest_code);
+
+ v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false);
+
+ vm_vcpu_add_default(v.vm, 3, guest_code);
+
+ ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
+ TEST_ASSERT(ret && errno == EINVAL, "attempting to read GICR_TYPER of non created vcpu");
+
+ vm_vcpu_add_default(v.vm, 1, guest_code);
+
+ ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
+ TEST_ASSERT(ret && errno == EBUSY, "read GICR_TYPER before GIC initialized");
+
+ vm_vcpu_add_default(v.vm, 2, guest_code);
+
+ kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
+
+ for (i = 0; i < NR_VCPUS ; i++) {
+ ret = access_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false);
+ TEST_ASSERT(!ret && !val, "read GICR_TYPER before rdist region setting");
+ }
+
+ addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 0);
+ kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+
+ /* The 2 first rdists should be put there (vcpu 0 and 3) */
+ ret = access_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false);
+ TEST_ASSERT(!ret && !val, "read typer of rdist #0");
+
+ ret = access_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false);
+ TEST_ASSERT(!ret && val == 0x310, "read typer of rdist #1");
+
+ addr = REDIST_REGION_ATTR_ADDR(10, 0x100000, 0, 1);
+ ret = _kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ TEST_ASSERT(ret && errno == EINVAL, "collision with previous rdist region");
+
+ ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
+ TEST_ASSERT(!ret && val == 0x100,
+ "no redist region attached to vcpu #1 yet, last cannot be returned");
+
+ ret = access_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false);
+ TEST_ASSERT(!ret && val == 0x200,
+ "no redist region attached to vcpu #2, last cannot be returned");
+
+ addr = REDIST_REGION_ATTR_ADDR(10, 0x20000, 0, 1);
+ kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+
+ ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
+ TEST_ASSERT(!ret && val == 0x100, "read typer of rdist #1");
+
+ ret = access_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false);
+ TEST_ASSERT(!ret && val == 0x210,
+ "read typer of rdist #1, last properly returned");
+
+ vm_gic_destroy(&v);
+}
+
+/**
+ * Test GICR_TYPER last bit with new redist regions
+ * rdist regions #1 and #2 are contiguous
+ * rdist region #0 @0x100000 2 rdist capacity
+ * rdists: 0, 3 (Last)
+ * rdist region #1 @0x240000 2 rdist capacity
+ * rdists: 5, 4 (Last)
+ * rdist region #2 @0x200000 2 rdist capacity
+ * rdists: 1, 2
+ */
+static void test_last_bit_redist_regions(void)
+{
+ uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 };
+ struct vm_gic v;
+ uint64_t addr;
+ uint32_t val;
+ int ret;
+
+ v.vm = vm_create_default_with_vcpus(6, 0, 0, guest_code, vcpuids);
+
+ v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false);
+
+ kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
+
+ addr = REDIST_REGION_ATTR_ADDR(2, 0x100000, 0, 0);
+ kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+
+ addr = REDIST_REGION_ATTR_ADDR(2, 0x240000, 0, 1);
+ kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+
+ addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 2);
+ kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+
+ ret = access_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false);
+ TEST_ASSERT(!ret && val == 0x000, "read typer of rdist #0");
+
+ ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
+ TEST_ASSERT(!ret && val == 0x100, "read typer of rdist #1");
+
+ ret = access_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false);
+ TEST_ASSERT(!ret && val == 0x200, "read typer of rdist #2");
+
+ ret = access_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false);
+ TEST_ASSERT(!ret && val == 0x310, "read typer of rdist #3");
+
+ ret = access_redist_reg(v.gic_fd, 5, GICR_TYPER, &val, false);
+ TEST_ASSERT(!ret && val == 0x500, "read typer of rdist #5");
+
+ ret = access_redist_reg(v.gic_fd, 4, GICR_TYPER, &val, false);
+ TEST_ASSERT(!ret && val == 0x410, "read typer of rdist #4");
+
+ vm_gic_destroy(&v);
+}
+
+/* Test last bit with legacy region */
+static void test_last_bit_single_rdist(void)
+{
+ uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 };
+ struct vm_gic v;
+ uint64_t addr;
+ uint32_t val;
+ int ret;
+
+ v.vm = vm_create_default_with_vcpus(6, 0, 0, guest_code, vcpuids);
+
+ v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false);
+
+ kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
+
+ addr = 0x10000;
+ kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true);
+
+ ret = access_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false);
+ TEST_ASSERT(!ret && val == 0x000, "read typer of rdist #0");
+
+ ret = access_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false);
+ TEST_ASSERT(!ret && val == 0x300, "read typer of rdist #1");
+
+ ret = access_redist_reg(v.gic_fd, 5, GICR_TYPER, &val, false);
+ TEST_ASSERT(!ret && val == 0x500, "read typer of rdist #2");
+
+ ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
+ TEST_ASSERT(!ret && val == 0x100, "read typer of rdist #3");
+
+ ret = access_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false);
+ TEST_ASSERT(!ret && val == 0x210, "read typer of rdist #3");
+
+ vm_gic_destroy(&v);
+}
+
+void test_kvm_device(void)
+{
+ struct vm_gic v;
+ int ret, fd;
+
+ v.vm = vm_create_default_with_vcpus(NR_VCPUS, 0, 0, guest_code, NULL);
+
+ /* try to create a non existing KVM device */
+ ret = _kvm_create_device(v.vm, 0, true, &fd);
+ TEST_ASSERT(ret && errno == ENODEV, "unsupported device");
+
+ /* trial mode with VGIC_V3 device */
+ ret = _kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, true, &fd);
+ if (ret) {
+ print_skip("GICv3 not supported");
+ exit(KSFT_SKIP);
+ }
+ v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false);
+
+ ret = _kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false, &fd);
+ TEST_ASSERT(ret && errno == EEXIST, "create GICv3 device twice");
+
+ kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, true);
+
+ if (!_kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V2, true, &fd)) {
+ ret = _kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V2, false, &fd);
+ TEST_ASSERT(ret && errno == EINVAL, "create GICv2 while v3 exists");
+ }
+
+ vm_gic_destroy(&v);
+}
+
+int main(int ac, char **av)
+{
+ max_ipa_bits = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
+
+ test_kvm_device();
+ test_vcpus_then_vgic();
+ test_vgic_then_vcpus();
+ test_new_redist_regions();
+ test_typer_accesses();
+ test_last_bit_redist_regions();
+ test_last_bit_single_rdist();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index bb2752d78fe3..81edbd23d371 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -17,6 +17,7 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <asm/barrier.h>
+#include <linux/atomic.h>
#include "kvm_util.h"
#include "test_util.h"
@@ -137,12 +138,20 @@ static uint64_t host_clear_count;
static uint64_t host_track_next_count;
/* Whether dirty ring reset is requested, or finished */
-static sem_t dirty_ring_vcpu_stop;
-static sem_t dirty_ring_vcpu_cont;
+static sem_t sem_vcpu_stop;
+static sem_t sem_vcpu_cont;
+/*
+ * This is only set by main thread, and only cleared by vcpu thread. It is
+ * used to request vcpu thread to stop at the next GUEST_SYNC, since GUEST_SYNC
+ * is the only place that we'll guarantee both "dirty bit" and "dirty data"
+ * will match. E.g., SIG_IPI won't guarantee that if the vcpu is interrupted
+ * after setting dirty bit but before the data is written.
+ */
+static atomic_t vcpu_sync_stop_requested;
/*
* This is updated by the vcpu thread to tell the host whether it's a
* ring-full event. It should only be read until a sem_wait() of
- * dirty_ring_vcpu_stop and before vcpu continues to run.
+ * sem_vcpu_stop and before vcpu continues to run.
*/
static bool dirty_ring_vcpu_ring_full;
/*
@@ -234,6 +243,17 @@ static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages);
}
+/* Should only be called after a GUEST_SYNC */
+static void vcpu_handle_sync_stop(void)
+{
+ if (atomic_read(&vcpu_sync_stop_requested)) {
+ /* It means main thread is sleeping waiting */
+ atomic_set(&vcpu_sync_stop_requested, false);
+ sem_post(&sem_vcpu_stop);
+ sem_wait_until(&sem_vcpu_cont);
+ }
+}
+
static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
{
struct kvm_run *run = vcpu_state(vm, VCPU_ID);
@@ -244,6 +264,8 @@ static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(run->exit_reason));
+
+ vcpu_handle_sync_stop();
}
static bool dirty_ring_supported(void)
@@ -301,13 +323,13 @@ static void dirty_ring_wait_vcpu(void)
{
/* This makes sure that hardware PML cache flushed */
vcpu_kick();
- sem_wait_until(&dirty_ring_vcpu_stop);
+ sem_wait_until(&sem_vcpu_stop);
}
static void dirty_ring_continue_vcpu(void)
{
pr_info("Notifying vcpu to continue\n");
- sem_post(&dirty_ring_vcpu_cont);
+ sem_post(&sem_vcpu_cont);
}
static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
@@ -361,11 +383,11 @@ static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
/* Update the flag first before pause */
WRITE_ONCE(dirty_ring_vcpu_ring_full,
run->exit_reason == KVM_EXIT_DIRTY_RING_FULL);
- sem_post(&dirty_ring_vcpu_stop);
+ sem_post(&sem_vcpu_stop);
pr_info("vcpu stops because %s...\n",
dirty_ring_vcpu_ring_full ?
"dirty ring is full" : "vcpu is kicked out");
- sem_wait_until(&dirty_ring_vcpu_cont);
+ sem_wait_until(&sem_vcpu_cont);
pr_info("vcpu continues now.\n");
} else {
TEST_ASSERT(false, "Invalid guest sync status: "
@@ -377,7 +399,7 @@ static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
static void dirty_ring_before_vcpu_join(void)
{
/* Kick another round of vcpu just to make sure it will quit */
- sem_post(&dirty_ring_vcpu_cont);
+ sem_post(&sem_vcpu_cont);
}
struct log_mode {
@@ -505,9 +527,8 @@ static void *vcpu_worker(void *data)
*/
sigmask->len = 8;
pthread_sigmask(0, NULL, sigset);
+ sigdelset(sigset, SIG_IPI);
vcpu_ioctl(vm, VCPU_ID, KVM_SET_SIGNAL_MASK, sigmask);
- sigaddset(sigset, SIG_IPI);
- pthread_sigmask(SIG_BLOCK, sigset, NULL);
sigemptyset(sigset);
sigaddset(sigset, SIG_IPI);
@@ -768,7 +789,25 @@ static void run_test(enum vm_guest_mode mode, void *arg)
usleep(p->interval * 1000);
log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
bmap, host_num_pages);
+
+ /*
+ * See vcpu_sync_stop_requested definition for details on why
+ * we need to stop vcpu when verify data.
+ */
+ atomic_set(&vcpu_sync_stop_requested, true);
+ sem_wait_until(&sem_vcpu_stop);
+ /*
+ * NOTE: for dirty ring, it's possible that we didn't stop at
+ * GUEST_SYNC but instead we stopped because ring is full;
+ * that's okay too because ring full means we're only missing
+ * the flush of the last page, and since we handle the last
+ * page specially verification will succeed anyway.
+ */
+ assert(host_log_mode == LOG_MODE_DIRTY_RING ||
+ atomic_read(&vcpu_sync_stop_requested) == false);
vm_dirty_log_verify(mode, bmap);
+ sem_post(&sem_vcpu_cont);
+
iteration++;
sync_global_to_guest(vm, iteration);
}
@@ -818,9 +857,10 @@ int main(int argc, char *argv[])
.interval = TEST_HOST_LOOP_INTERVAL,
};
int opt, i;
+ sigset_t sigset;
- sem_init(&dirty_ring_vcpu_stop, 0, 0);
- sem_init(&dirty_ring_vcpu_cont, 0, 0);
+ sem_init(&sem_vcpu_stop, 0, 0);
+ sem_init(&sem_vcpu_cont, 0, 0);
guest_modes_append_default();
@@ -876,6 +916,11 @@ int main(int argc, char *argv[])
srandom(time(0));
+ /* Ensure that vCPU threads start with SIG_IPI blocked. */
+ sigemptyset(&sigset);
+ sigaddset(&sigset, SIG_IPI);
+ pthread_sigmask(SIG_BLOCK, &sigset, NULL);
+
if (host_log_mode_option == LOG_MODE_ALL) {
/* Run each log mode */
for (i = 0; i < LOG_MODE_NUM; i++) {
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 0f4258eaa629..a8f022794ce3 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -69,9 +69,6 @@ enum vm_guest_mode {
#define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT)
#define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE)
-#define vm_guest_mode_string(m) vm_guest_mode_string[m]
-extern const char * const vm_guest_mode_string[];
-
struct vm_guest_mode_params {
unsigned int pa_bits;
unsigned int va_bits;
@@ -85,6 +82,7 @@ int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
struct kvm_enable_cap *cap);
void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
+const char *vm_guest_mode_string(uint32_t i);
struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm);
void kvm_vm_free(struct kvm_vm *vmp);
@@ -225,6 +223,15 @@ int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
#endif
void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid);
+int _kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr);
+int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr);
+int _kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test, int *fd);
+int kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test);
+int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
+ void *val, bool write);
+int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
+ void *val, bool write);
+
const char *exit_reason_str(unsigned int exit_reason);
void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot);
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index b7f41399f22c..fade3130eb01 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -71,13 +71,32 @@ enum vm_mem_backing_src_type {
VM_MEM_SRC_ANONYMOUS,
VM_MEM_SRC_ANONYMOUS_THP,
VM_MEM_SRC_ANONYMOUS_HUGETLB,
+ VM_MEM_SRC_ANONYMOUS_HUGETLB_16KB,
+ VM_MEM_SRC_ANONYMOUS_HUGETLB_64KB,
+ VM_MEM_SRC_ANONYMOUS_HUGETLB_512KB,
+ VM_MEM_SRC_ANONYMOUS_HUGETLB_1MB,
+ VM_MEM_SRC_ANONYMOUS_HUGETLB_2MB,
+ VM_MEM_SRC_ANONYMOUS_HUGETLB_8MB,
+ VM_MEM_SRC_ANONYMOUS_HUGETLB_16MB,
+ VM_MEM_SRC_ANONYMOUS_HUGETLB_32MB,
+ VM_MEM_SRC_ANONYMOUS_HUGETLB_256MB,
+ VM_MEM_SRC_ANONYMOUS_HUGETLB_512MB,
+ VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB,
+ VM_MEM_SRC_ANONYMOUS_HUGETLB_2GB,
+ VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB,
+ NUM_SRC_TYPES,
};
struct vm_mem_backing_src_alias {
const char *name;
- enum vm_mem_backing_src_type type;
+ uint32_t flag;
};
+bool thp_configured(void);
+size_t get_trans_hugepagesz(void);
+size_t get_def_hugetlb_pagesz(void);
+const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i);
+size_t get_backing_src_pagesz(uint32_t i);
void backing_src_help(void);
enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name);
diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c
new file mode 100644
index 000000000000..1c4753fff19e
--- /dev/null
+++ b/tools/testing/selftests/kvm/kvm_page_table_test.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KVM page table test
+ *
+ * Copyright (C) 2021, Huawei, Inc.
+ *
+ * Make sure that THP has been enabled or enough HUGETLB pages with specific
+ * page size have been pre-allocated on your system, if you are planning to
+ * use hugepages to back the guest memory for testing.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_name */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <pthread.h>
+#include <semaphore.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "guest_modes.h"
+
+#define TEST_MEM_SLOT_INDEX 1
+
+/* Default size(1GB) of the memory for testing */
+#define DEFAULT_TEST_MEM_SIZE (1 << 30)
+
+/* Default guest test virtual memory offset */
+#define DEFAULT_GUEST_TEST_MEM 0xc0000000
+
+/* Different guest memory accessing stages */
+enum test_stage {
+ KVM_BEFORE_MAPPINGS,
+ KVM_CREATE_MAPPINGS,
+ KVM_UPDATE_MAPPINGS,
+ KVM_ADJUST_MAPPINGS,
+ NUM_TEST_STAGES,
+};
+
+static const char * const test_stage_string[] = {
+ "KVM_BEFORE_MAPPINGS",
+ "KVM_CREATE_MAPPINGS",
+ "KVM_UPDATE_MAPPINGS",
+ "KVM_ADJUST_MAPPINGS",
+};
+
+struct vcpu_args {
+ int vcpu_id;
+ bool vcpu_write;
+};
+
+struct test_args {
+ struct kvm_vm *vm;
+ uint64_t guest_test_virt_mem;
+ uint64_t host_page_size;
+ uint64_t host_num_pages;
+ uint64_t large_page_size;
+ uint64_t large_num_pages;
+ uint64_t host_pages_per_lpage;
+ enum vm_mem_backing_src_type src_type;
+ struct vcpu_args vcpu_args[KVM_MAX_VCPUS];
+};
+
+/*
+ * Guest variables. Use addr_gva2hva() if these variables need
+ * to be changed in host.
+ */
+static enum test_stage guest_test_stage;
+
+/* Host variables */
+static uint32_t nr_vcpus = 1;
+static struct test_args test_args;
+static enum test_stage *current_stage;
+static bool host_quit;
+
+/* Whether the test stage is updated, or completed */
+static sem_t test_stage_updated;
+static sem_t test_stage_completed;
+
+/*
+ * Guest physical memory offset of the testing memory slot.
+ * This will be set to the topmost valid physical address minus
+ * the test memory size.
+ */
+static uint64_t guest_test_phys_mem;
+
+/*
+ * Guest virtual memory offset of the testing memory slot.
+ * Must not conflict with identity mapped test code.
+ */
+static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
+
+static void guest_code(int vcpu_id)
+{
+ struct test_args *p = &test_args;
+ struct vcpu_args *vcpu_args = &p->vcpu_args[vcpu_id];
+ enum test_stage *current_stage = &guest_test_stage;
+ uint64_t addr;
+ int i, j;
+
+ /* Make sure vCPU args data structure is not corrupt */
+ GUEST_ASSERT(vcpu_args->vcpu_id == vcpu_id);
+
+ while (true) {
+ addr = p->guest_test_virt_mem;
+
+ switch (READ_ONCE(*current_stage)) {
+ /*
+ * All vCPU threads will be started in this stage,
+ * where guest code of each vCPU will do nothing.
+ */
+ case KVM_BEFORE_MAPPINGS:
+ break;
+
+ /*
+ * Before dirty logging, vCPUs concurrently access the first
+ * 8 bytes of each page (host page/large page) within the same
+ * memory region with different accessing types (read/write).
+ * Then KVM will create normal page mappings or huge block
+ * mappings for them.
+ */
+ case KVM_CREATE_MAPPINGS:
+ for (i = 0; i < p->large_num_pages; i++) {
+ if (vcpu_args->vcpu_write)
+ *(uint64_t *)addr = 0x0123456789ABCDEF;
+ else
+ READ_ONCE(*(uint64_t *)addr);
+
+ addr += p->large_page_size;
+ }
+ break;
+
+ /*
+ * During dirty logging, KVM will only update attributes of the
+ * normal page mappings from RO to RW if memory backing src type
+ * is anonymous. In other cases, KVM will split the huge block
+ * mappings into normal page mappings if memory backing src type
+ * is THP or HUGETLB.
+ */
+ case KVM_UPDATE_MAPPINGS:
+ if (p->src_type == VM_MEM_SRC_ANONYMOUS) {
+ for (i = 0; i < p->host_num_pages; i++) {
+ *(uint64_t *)addr = 0x0123456789ABCDEF;
+ addr += p->host_page_size;
+ }
+ break;
+ }
+
+ for (i = 0; i < p->large_num_pages; i++) {
+ /*
+ * Write to the first host page in each large
+ * page region, and triger break of large pages.
+ */
+ *(uint64_t *)addr = 0x0123456789ABCDEF;
+
+ /*
+ * Access the middle host pages in each large
+ * page region. Since dirty logging is enabled,
+ * this will create new mappings at the smallest
+ * granularity.
+ */
+ addr += p->large_page_size / 2;
+ for (j = 0; j < p->host_pages_per_lpage / 2; j++) {
+ READ_ONCE(*(uint64_t *)addr);
+ addr += p->host_page_size;
+ }
+ }
+ break;
+
+ /*
+ * After dirty logging is stopped, vCPUs concurrently read
+ * from every single host page. Then KVM will coalesce the
+ * split page mappings back to block mappings. And a TLB
+ * conflict abort could occur here if TLB entries of the
+ * page mappings are not fully invalidated.
+ */
+ case KVM_ADJUST_MAPPINGS:
+ for (i = 0; i < p->host_num_pages; i++) {
+ READ_ONCE(*(uint64_t *)addr);
+ addr += p->host_page_size;
+ }
+ break;
+
+ default:
+ GUEST_ASSERT(0);
+ }
+
+ GUEST_SYNC(1);
+ }
+}
+
+static void *vcpu_worker(void *data)
+{
+ int ret;
+ struct vcpu_args *vcpu_args = data;
+ struct kvm_vm *vm = test_args.vm;
+ int vcpu_id = vcpu_args->vcpu_id;
+ struct kvm_run *run;
+ struct timespec start;
+ struct timespec ts_diff;
+ enum test_stage stage;
+
+ vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
+ run = vcpu_state(vm, vcpu_id);
+
+ while (!READ_ONCE(host_quit)) {
+ ret = sem_wait(&test_stage_updated);
+ TEST_ASSERT(ret == 0, "Error in sem_wait");
+
+ if (READ_ONCE(host_quit))
+ return NULL;
+
+ clock_gettime(CLOCK_MONOTONIC_RAW, &start);
+ ret = _vcpu_run(vm, vcpu_id);
+ ts_diff = timespec_elapsed(start);
+
+ TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
+ TEST_ASSERT(get_ucall(vm, vcpu_id, NULL) == UCALL_SYNC,
+ "Invalid guest sync status: exit_reason=%s\n",
+ exit_reason_str(run->exit_reason));
+
+ pr_debug("Got sync event from vCPU %d\n", vcpu_id);
+ stage = READ_ONCE(*current_stage);
+
+ /*
+ * Here we can know the execution time of every
+ * single vcpu running in different test stages.
+ */
+ pr_debug("vCPU %d has completed stage %s\n"
+ "execution time is: %ld.%.9lds\n\n",
+ vcpu_id, test_stage_string[stage],
+ ts_diff.tv_sec, ts_diff.tv_nsec);
+
+ ret = sem_post(&test_stage_completed);
+ TEST_ASSERT(ret == 0, "Error in sem_post");
+ }
+
+ return NULL;
+}
+
+struct test_params {
+ uint64_t phys_offset;
+ uint64_t test_mem_size;
+ enum vm_mem_backing_src_type src_type;
+};
+
+static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
+{
+ int ret;
+ struct test_params *p = arg;
+ struct vcpu_args *vcpu_args;
+ enum vm_mem_backing_src_type src_type = p->src_type;
+ uint64_t large_page_size = get_backing_src_pagesz(src_type);
+ uint64_t guest_page_size = vm_guest_mode_params[mode].page_size;
+ uint64_t host_page_size = getpagesize();
+ uint64_t test_mem_size = p->test_mem_size;
+ uint64_t guest_num_pages;
+ uint64_t alignment;
+ void *host_test_mem;
+ struct kvm_vm *vm;
+ int vcpu_id;
+
+ /* Align up the test memory size */
+ alignment = max(large_page_size, guest_page_size);
+ test_mem_size = (test_mem_size + alignment - 1) & ~(alignment - 1);
+
+ /* Create a VM with enough guest pages */
+ guest_num_pages = test_mem_size / guest_page_size;
+ vm = vm_create_with_vcpus(mode, nr_vcpus,
+ guest_num_pages, 0, guest_code, NULL);
+
+ /* Align down GPA of the testing memslot */
+ if (!p->phys_offset)
+ guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) *
+ guest_page_size;
+ else
+ guest_test_phys_mem = p->phys_offset;
+#ifdef __s390x__
+ alignment = max(0x100000, alignment);
+#endif
+ guest_test_phys_mem &= ~(alignment - 1);
+
+ /* Set up the shared data structure test_args */
+ test_args.vm = vm;
+ test_args.guest_test_virt_mem = guest_test_virt_mem;
+ test_args.host_page_size = host_page_size;
+ test_args.host_num_pages = test_mem_size / host_page_size;
+ test_args.large_page_size = large_page_size;
+ test_args.large_num_pages = test_mem_size / large_page_size;
+ test_args.host_pages_per_lpage = large_page_size / host_page_size;
+ test_args.src_type = src_type;
+
+ for (vcpu_id = 0; vcpu_id < KVM_MAX_VCPUS; vcpu_id++) {
+ vcpu_args = &test_args.vcpu_args[vcpu_id];
+ vcpu_args->vcpu_id = vcpu_id;
+ vcpu_args->vcpu_write = !(vcpu_id % 2);
+ }
+
+ /* Add an extra memory slot with specified backing src type */
+ vm_userspace_mem_region_add(vm, src_type, guest_test_phys_mem,
+ TEST_MEM_SLOT_INDEX, guest_num_pages, 0);
+
+ /* Do mapping(GVA->GPA) for the testing memory slot */
+ virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
+
+ /* Cache the HVA pointer of the region */
+ host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
+
+ /* Export shared structure test_args to guest */
+ ucall_init(vm, NULL);
+ sync_global_to_guest(vm, test_args);
+
+ ret = sem_init(&test_stage_updated, 0, 0);
+ TEST_ASSERT(ret == 0, "Error in sem_init");
+
+ ret = sem_init(&test_stage_completed, 0, 0);
+ TEST_ASSERT(ret == 0, "Error in sem_init");
+
+ current_stage = addr_gva2hva(vm, (vm_vaddr_t)(&guest_test_stage));
+ *current_stage = NUM_TEST_STAGES;
+
+ pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
+ pr_info("Testing memory backing src type: %s\n",
+ vm_mem_backing_src_alias(src_type)->name);
+ pr_info("Testing memory backing src granularity: 0x%lx\n",
+ large_page_size);
+ pr_info("Testing memory size(aligned): 0x%lx\n", test_mem_size);
+ pr_info("Guest physical test memory offset: 0x%lx\n",
+ guest_test_phys_mem);
+ pr_info("Host virtual test memory offset: 0x%lx\n",
+ (uint64_t)host_test_mem);
+ pr_info("Number of testing vCPUs: %d\n", nr_vcpus);
+
+ return vm;
+}
+
+static void vcpus_complete_new_stage(enum test_stage stage)
+{
+ int ret;
+ int vcpus;
+
+ /* Wake up all the vcpus to run new test stage */
+ for (vcpus = 0; vcpus < nr_vcpus; vcpus++) {
+ ret = sem_post(&test_stage_updated);
+ TEST_ASSERT(ret == 0, "Error in sem_post");
+ }
+ pr_debug("All vcpus have been notified to continue\n");
+
+ /* Wait for all the vcpus to complete new test stage */
+ for (vcpus = 0; vcpus < nr_vcpus; vcpus++) {
+ ret = sem_wait(&test_stage_completed);
+ TEST_ASSERT(ret == 0, "Error in sem_wait");
+
+ pr_debug("%d vcpus have completed stage %s\n",
+ vcpus + 1, test_stage_string[stage]);
+ }
+
+ pr_debug("All vcpus have completed stage %s\n",
+ test_stage_string[stage]);
+}
+
+static void run_test(enum vm_guest_mode mode, void *arg)
+{
+ int ret;
+ pthread_t *vcpu_threads;
+ struct kvm_vm *vm;
+ int vcpu_id;
+ struct timespec start;
+ struct timespec ts_diff;
+
+ /* Create VM with vCPUs and make some pre-initialization */
+ vm = pre_init_before_test(mode, arg);
+
+ vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
+ TEST_ASSERT(vcpu_threads, "Memory allocation failed");
+
+ host_quit = false;
+ *current_stage = KVM_BEFORE_MAPPINGS;
+
+ for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+ pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
+ &test_args.vcpu_args[vcpu_id]);
+ }
+
+ vcpus_complete_new_stage(*current_stage);
+ pr_info("Started all vCPUs successfully\n");
+
+ /* Test the stage of KVM creating mappings */
+ *current_stage = KVM_CREATE_MAPPINGS;
+
+ clock_gettime(CLOCK_MONOTONIC_RAW, &start);
+ vcpus_complete_new_stage(*current_stage);
+ ts_diff = timespec_elapsed(start);
+
+ pr_info("KVM_CREATE_MAPPINGS: total execution time: %ld.%.9lds\n\n",
+ ts_diff.tv_sec, ts_diff.tv_nsec);
+
+ /* Test the stage of KVM updating mappings */
+ vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX,
+ KVM_MEM_LOG_DIRTY_PAGES);
+
+ *current_stage = KVM_UPDATE_MAPPINGS;
+
+ clock_gettime(CLOCK_MONOTONIC_RAW, &start);
+ vcpus_complete_new_stage(*current_stage);
+ ts_diff = timespec_elapsed(start);
+
+ pr_info("KVM_UPDATE_MAPPINGS: total execution time: %ld.%.9lds\n\n",
+ ts_diff.tv_sec, ts_diff.tv_nsec);
+
+ /* Test the stage of KVM adjusting mappings */
+ vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX, 0);
+
+ *current_stage = KVM_ADJUST_MAPPINGS;
+
+ clock_gettime(CLOCK_MONOTONIC_RAW, &start);
+ vcpus_complete_new_stage(*current_stage);
+ ts_diff = timespec_elapsed(start);
+
+ pr_info("KVM_ADJUST_MAPPINGS: total execution time: %ld.%.9lds\n\n",
+ ts_diff.tv_sec, ts_diff.tv_nsec);
+
+ /* Tell the vcpu thread to quit */
+ host_quit = true;
+ for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+ ret = sem_post(&test_stage_updated);
+ TEST_ASSERT(ret == 0, "Error in sem_post");
+ }
+
+ for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
+ pthread_join(vcpu_threads[vcpu_id], NULL);
+
+ ret = sem_destroy(&test_stage_updated);
+ TEST_ASSERT(ret == 0, "Error in sem_destroy");
+
+ ret = sem_destroy(&test_stage_completed);
+ TEST_ASSERT(ret == 0, "Error in sem_destroy");
+
+ free(vcpu_threads);
+ ucall_uninit(vm);
+ kvm_vm_free(vm);
+}
+
+static void help(char *name)
+{
+ puts("");
+ printf("usage: %s [-h] [-p offset] [-m mode] "
+ "[-b mem-size] [-v vcpus] [-s mem-type]\n", name);
+ puts("");
+ printf(" -p: specify guest physical test memory offset\n"
+ " Warning: a low offset can conflict with the loaded test code.\n");
+ guest_modes_help();
+ printf(" -b: specify size of the memory region for testing. e.g. 10M or 3G.\n"
+ " (default: 1G)\n");
+ printf(" -v: specify the number of vCPUs to run\n"
+ " (default: 1)\n");
+ printf(" -s: specify the type of memory that should be used to\n"
+ " back the guest data region.\n"
+ " (default: anonymous)\n\n");
+ backing_src_help();
+ puts("");
+}
+
+int main(int argc, char *argv[])
+{
+ int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
+ struct test_params p = {
+ .test_mem_size = DEFAULT_TEST_MEM_SIZE,
+ .src_type = VM_MEM_SRC_ANONYMOUS,
+ };
+ int opt;
+
+ guest_modes_append_default();
+
+ while ((opt = getopt(argc, argv, "hp:m:b:v:s:")) != -1) {
+ switch (opt) {
+ case 'p':
+ p.phys_offset = strtoull(optarg, NULL, 0);
+ break;
+ case 'm':
+ guest_modes_cmdline(optarg);
+ break;
+ case 'b':
+ p.test_mem_size = parse_size(optarg);
+ break;
+ case 'v':
+ nr_vcpus = atoi(optarg);
+ TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
+ "Invalid number of vcpus, must be between 1 and %d", max_vcpus);
+ break;
+ case 's':
+ p.src_type = parse_backing_src_type(optarg);
+ break;
+ case 'h':
+ default:
+ help(argv[0]);
+ exit(0);
+ }
+ }
+
+ for_each_guest_mode(run_test, &p);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/lib/assert.c b/tools/testing/selftests/kvm/lib/assert.c
index 5ebbd0d6b472..71ade6100fd3 100644
--- a/tools/testing/selftests/kvm/lib/assert.c
+++ b/tools/testing/selftests/kvm/lib/assert.c
@@ -71,9 +71,9 @@ test_assert(bool exp, const char *exp_str,
fprintf(stderr, "==== Test Assertion Failure ====\n"
" %s:%u: %s\n"
- " pid=%d tid=%d - %s\n",
+ " pid=%d tid=%d errno=%d - %s\n",
file, line, exp_str, getpid(), _gettid(),
- strerror(errno));
+ errno, strerror(errno));
test_dump_stack();
if (fmt) {
fputs(" ", stderr);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index b8849a1aca79..fc83f6c5902d 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -18,7 +18,6 @@
#include <unistd.h>
#include <linux/kernel.h>
-#define KVM_UTIL_PGS_PER_HUGEPG 512
#define KVM_UTIL_MIN_PFN 2
static int vcpu_mmap_sz(void);
@@ -143,17 +142,24 @@ static void vm_open(struct kvm_vm *vm, int perm)
"rc: %i errno: %i", vm->fd, errno);
}
-const char * const vm_guest_mode_string[] = {
- "PA-bits:52, VA-bits:48, 4K pages",
- "PA-bits:52, VA-bits:48, 64K pages",
- "PA-bits:48, VA-bits:48, 4K pages",
- "PA-bits:48, VA-bits:48, 64K pages",
- "PA-bits:40, VA-bits:48, 4K pages",
- "PA-bits:40, VA-bits:48, 64K pages",
- "PA-bits:ANY, VA-bits:48, 4K pages",
-};
-_Static_assert(sizeof(vm_guest_mode_string)/sizeof(char *) == NUM_VM_MODES,
- "Missing new mode strings?");
+const char *vm_guest_mode_string(uint32_t i)
+{
+ static const char * const strings[] = {
+ [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages",
+ [VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages",
+ [VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages",
+ [VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages",
+ [VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages",
+ [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages",
+ [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages",
+ };
+ _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
+ "Missing new mode strings?");
+
+ TEST_ASSERT(i < NUM_VM_MODES, "Guest mode ID %d too big", i);
+
+ return strings[i];
+}
const struct vm_guest_mode_params vm_guest_mode_params[] = {
{ 52, 48, 0x1000, 12 },
@@ -514,7 +520,7 @@ static void vm_vcpu_rm(struct kvm_vm *vm, struct vcpu *vcpu)
ret = munmap(vcpu->state, vcpu_mmap_sz());
TEST_ASSERT(ret == 0, "munmap of VCPU fd failed, rc: %i "
"errno: %i", ret, errno);
- close(vcpu->fd);
+ ret = close(vcpu->fd);
TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i "
"errno: %i", ret, errno);
@@ -534,7 +540,7 @@ void kvm_vm_release(struct kvm_vm *vmp)
TEST_ASSERT(ret == 0, "Close of vm fd failed,\n"
" vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno);
- close(vmp->kvm_fd);
+ ret = close(vmp->kvm_fd);
TEST_ASSERT(ret == 0, "Close of /dev/kvm fd failed,\n"
" vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno);
}
@@ -681,7 +687,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
{
int ret;
struct userspace_mem_region *region;
- size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size;
+ size_t backing_src_pagesz = get_backing_src_pagesz(src_type);
size_t alignment;
TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
@@ -743,7 +749,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
#endif
if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
- alignment = max(huge_page_size, alignment);
+ alignment = max(backing_src_pagesz, alignment);
/* Add enough memory to align up if necessary */
if (alignment > 1)
@@ -752,7 +758,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
region->mmap_start = mmap(NULL, region->mmap_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS
- | (src_type == VM_MEM_SRC_ANONYMOUS_HUGETLB ? MAP_HUGETLB : 0),
+ | vm_mem_backing_src_alias(src_type)->flag,
-1, 0);
TEST_ASSERT(region->mmap_start != MAP_FAILED,
"test_malloc failed, mmap_start: %p errno: %i",
@@ -762,22 +768,13 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
region->host_mem = align(region->mmap_start, alignment);
/* As needed perform madvise */
- if (src_type == VM_MEM_SRC_ANONYMOUS || src_type == VM_MEM_SRC_ANONYMOUS_THP) {
- struct stat statbuf;
-
- ret = stat("/sys/kernel/mm/transparent_hugepage", &statbuf);
- TEST_ASSERT(ret == 0 || (ret == -1 && errno == ENOENT),
- "stat /sys/kernel/mm/transparent_hugepage");
-
- TEST_ASSERT(ret == 0 || src_type != VM_MEM_SRC_ANONYMOUS_THP,
- "VM_MEM_SRC_ANONYMOUS_THP requires THP to be configured in the host kernel");
-
- if (ret == 0) {
- ret = madvise(region->host_mem, npages * vm->page_size,
- src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
- TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %x",
- region->host_mem, npages * vm->page_size, src_type);
- }
+ if ((src_type == VM_MEM_SRC_ANONYMOUS ||
+ src_type == VM_MEM_SRC_ANONYMOUS_THP) && thp_configured()) {
+ ret = madvise(region->host_mem, npages * vm->page_size,
+ src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
+ TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %s",
+ region->host_mem, npages * vm->page_size,
+ vm_mem_backing_src_alias(src_type)->name);
}
region->unused_phy_pages = sparsebit_alloc();
@@ -1734,6 +1731,81 @@ int _kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
}
/*
+ * Device Ioctl
+ */
+
+int _kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
+{
+ struct kvm_device_attr attribute = {
+ .group = group,
+ .attr = attr,
+ .flags = 0,
+ };
+
+ return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute);
+}
+
+int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
+{
+ int ret = _kvm_device_check_attr(dev_fd, group, attr);
+
+ TEST_ASSERT(ret >= 0, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
+ return ret;
+}
+
+int _kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test, int *fd)
+{
+ struct kvm_create_device create_dev;
+ int ret;
+
+ create_dev.type = type;
+ create_dev.fd = -1;
+ create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
+ ret = ioctl(vm_get_fd(vm), KVM_CREATE_DEVICE, &create_dev);
+ *fd = create_dev.fd;
+ return ret;
+}
+
+int kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test)
+{
+ int fd, ret;
+
+ ret = _kvm_create_device(vm, type, test, &fd);
+
+ if (!test) {
+ TEST_ASSERT(ret >= 0,
+ "KVM_CREATE_DEVICE IOCTL failed, rc: %i errno: %i", ret, errno);
+ return fd;
+ }
+ return ret;
+}
+
+int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
+ void *val, bool write)
+{
+ struct kvm_device_attr kvmattr = {
+ .group = group,
+ .attr = attr,
+ .flags = 0,
+ .addr = (uintptr_t)val,
+ };
+ int ret;
+
+ ret = ioctl(dev_fd, write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
+ &kvmattr);
+ return ret;
+}
+
+int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
+ void *val, bool write)
+{
+ int ret = _kvm_device_access(dev_fd, group, attr, val, write);
+
+ TEST_ASSERT(ret >= 0, "KVM_SET|GET_DEVICE_ATTR IOCTL failed, rc: %i errno: %i", ret, errno);
+ return ret;
+}
+
+/*
* VM Dump
*
* Input Args:
diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c
index 906c955384e2..63d2bc7d757b 100644
--- a/tools/testing/selftests/kvm/lib/test_util.c
+++ b/tools/testing/selftests/kvm/lib/test_util.c
@@ -10,6 +10,8 @@
#include <limits.h>
#include <stdlib.h>
#include <time.h>
+#include <sys/stat.h>
+#include <linux/mman.h>
#include "linux/kernel.h"
#include "test_util.h"
@@ -111,28 +113,169 @@ void print_skip(const char *fmt, ...)
puts(", skipping test");
}
-const struct vm_mem_backing_src_alias backing_src_aliases[] = {
- {"anonymous", VM_MEM_SRC_ANONYMOUS,},
- {"anonymous_thp", VM_MEM_SRC_ANONYMOUS_THP,},
- {"anonymous_hugetlb", VM_MEM_SRC_ANONYMOUS_HUGETLB,},
-};
+bool thp_configured(void)
+{
+ int ret;
+ struct stat statbuf;
+
+ ret = stat("/sys/kernel/mm/transparent_hugepage", &statbuf);
+ TEST_ASSERT(ret == 0 || (ret == -1 && errno == ENOENT),
+ "Error in stating /sys/kernel/mm/transparent_hugepage");
+
+ return ret == 0;
+}
+
+size_t get_trans_hugepagesz(void)
+{
+ size_t size;
+ FILE *f;
+
+ TEST_ASSERT(thp_configured(), "THP is not configured in host kernel");
+
+ f = fopen("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", "r");
+ TEST_ASSERT(f != NULL, "Error in opening transparent_hugepage/hpage_pmd_size");
+
+ fscanf(f, "%ld", &size);
+ fclose(f);
+
+ return size;
+}
+
+size_t get_def_hugetlb_pagesz(void)
+{
+ char buf[64];
+ const char *tag = "Hugepagesize:";
+ FILE *f;
+
+ f = fopen("/proc/meminfo", "r");
+ TEST_ASSERT(f != NULL, "Error in opening /proc/meminfo");
+
+ while (fgets(buf, sizeof(buf), f) != NULL) {
+ if (strstr(buf, tag) == buf) {
+ fclose(f);
+ return strtoull(buf + strlen(tag), NULL, 10) << 10;
+ }
+ }
+
+ if (feof(f))
+ TEST_FAIL("HUGETLB is not configured in host kernel");
+ else
+ TEST_FAIL("Error in reading /proc/meminfo");
+
+ fclose(f);
+ return 0;
+}
+
+const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i)
+{
+ static const struct vm_mem_backing_src_alias aliases[] = {
+ [VM_MEM_SRC_ANONYMOUS] = {
+ .name = "anonymous",
+ .flag = 0,
+ },
+ [VM_MEM_SRC_ANONYMOUS_THP] = {
+ .name = "anonymous_thp",
+ .flag = 0,
+ },
+ [VM_MEM_SRC_ANONYMOUS_HUGETLB] = {
+ .name = "anonymous_hugetlb",
+ .flag = MAP_HUGETLB,
+ },
+ [VM_MEM_SRC_ANONYMOUS_HUGETLB_16KB] = {
+ .name = "anonymous_hugetlb_16kb",
+ .flag = MAP_HUGETLB | MAP_HUGE_16KB,
+ },
+ [VM_MEM_SRC_ANONYMOUS_HUGETLB_64KB] = {
+ .name = "anonymous_hugetlb_64kb",
+ .flag = MAP_HUGETLB | MAP_HUGE_64KB,
+ },
+ [VM_MEM_SRC_ANONYMOUS_HUGETLB_512KB] = {
+ .name = "anonymous_hugetlb_512kb",
+ .flag = MAP_HUGETLB | MAP_HUGE_512KB,
+ },
+ [VM_MEM_SRC_ANONYMOUS_HUGETLB_1MB] = {
+ .name = "anonymous_hugetlb_1mb",
+ .flag = MAP_HUGETLB | MAP_HUGE_1MB,
+ },
+ [VM_MEM_SRC_ANONYMOUS_HUGETLB_2MB] = {
+ .name = "anonymous_hugetlb_2mb",
+ .flag = MAP_HUGETLB | MAP_HUGE_2MB,
+ },
+ [VM_MEM_SRC_ANONYMOUS_HUGETLB_8MB] = {
+ .name = "anonymous_hugetlb_8mb",
+ .flag = MAP_HUGETLB | MAP_HUGE_8MB,
+ },
+ [VM_MEM_SRC_ANONYMOUS_HUGETLB_16MB] = {
+ .name = "anonymous_hugetlb_16mb",
+ .flag = MAP_HUGETLB | MAP_HUGE_16MB,
+ },
+ [VM_MEM_SRC_ANONYMOUS_HUGETLB_32MB] = {
+ .name = "anonymous_hugetlb_32mb",
+ .flag = MAP_HUGETLB | MAP_HUGE_32MB,
+ },
+ [VM_MEM_SRC_ANONYMOUS_HUGETLB_256MB] = {
+ .name = "anonymous_hugetlb_256mb",
+ .flag = MAP_HUGETLB | MAP_HUGE_256MB,
+ },
+ [VM_MEM_SRC_ANONYMOUS_HUGETLB_512MB] = {
+ .name = "anonymous_hugetlb_512mb",
+ .flag = MAP_HUGETLB | MAP_HUGE_512MB,
+ },
+ [VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB] = {
+ .name = "anonymous_hugetlb_1gb",
+ .flag = MAP_HUGETLB | MAP_HUGE_1GB,
+ },
+ [VM_MEM_SRC_ANONYMOUS_HUGETLB_2GB] = {
+ .name = "anonymous_hugetlb_2gb",
+ .flag = MAP_HUGETLB | MAP_HUGE_2GB,
+ },
+ [VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB] = {
+ .name = "anonymous_hugetlb_16gb",
+ .flag = MAP_HUGETLB | MAP_HUGE_16GB,
+ },
+ };
+ _Static_assert(ARRAY_SIZE(aliases) == NUM_SRC_TYPES,
+ "Missing new backing src types?");
+
+ TEST_ASSERT(i < NUM_SRC_TYPES, "Backing src type ID %d too big", i);
+
+ return &aliases[i];
+}
+
+#define MAP_HUGE_PAGE_SIZE(x) (1ULL << ((x >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK))
+
+size_t get_backing_src_pagesz(uint32_t i)
+{
+ uint32_t flag = vm_mem_backing_src_alias(i)->flag;
+
+ switch (i) {
+ case VM_MEM_SRC_ANONYMOUS:
+ return getpagesize();
+ case VM_MEM_SRC_ANONYMOUS_THP:
+ return get_trans_hugepagesz();
+ case VM_MEM_SRC_ANONYMOUS_HUGETLB:
+ return get_def_hugetlb_pagesz();
+ default:
+ return MAP_HUGE_PAGE_SIZE(flag);
+ }
+}
void backing_src_help(void)
{
int i;
printf("Available backing src types:\n");
- for (i = 0; i < ARRAY_SIZE(backing_src_aliases); i++)
- printf("\t%s\n", backing_src_aliases[i].name);
+ for (i = 0; i < NUM_SRC_TYPES; i++)
+ printf("\t%s\n", vm_mem_backing_src_alias(i)->name);
}
enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name)
{
int i;
- for (i = 0; i < ARRAY_SIZE(backing_src_aliases); i++)
- if (!strcmp(type_name, backing_src_aliases[i].name))
- return backing_src_aliases[i].type;
+ for (i = 0; i < NUM_SRC_TYPES; i++)
+ if (!strcmp(type_name, vm_mem_backing_src_alias(i)->name))
+ return i;
backing_src_help();
TEST_FAIL("Unknown backing src type: %s", type_name);
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c
index f127ed31dba7..978f5b5f4dc0 100644
--- a/tools/testing/selftests/kvm/set_memory_region_test.c
+++ b/tools/testing/selftests/kvm/set_memory_region_test.c
@@ -329,6 +329,22 @@ static void test_zero_memory_regions(void)
}
#endif /* __x86_64__ */
+static int test_memory_region_add(struct kvm_vm *vm, void *mem, uint32_t slot,
+ uint32_t size, uint64_t guest_addr)
+{
+ struct kvm_userspace_memory_region region;
+ int ret;
+
+ region.slot = slot;
+ region.flags = 0;
+ region.guest_phys_addr = guest_addr;
+ region.memory_size = size;
+ region.userspace_addr = (uintptr_t) mem;
+ ret = ioctl(vm_get_fd(vm), KVM_SET_USER_MEMORY_REGION, &region);
+
+ return ret;
+}
+
/*
* Test it can be added memory slots up to KVM_CAP_NR_MEMSLOTS, then any
* tentative to add further slots should fail.
@@ -339,9 +355,15 @@ static void test_add_max_memory_regions(void)
struct kvm_vm *vm;
uint32_t max_mem_slots;
uint32_t slot;
- uint64_t guest_addr = 0x0;
- uint64_t mem_reg_npages;
- void *mem;
+ void *mem, *mem_aligned, *mem_extra;
+ size_t alignment;
+
+#ifdef __s390x__
+ /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
+ alignment = 0x100000;
+#else
+ alignment = 1;
+#endif
max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
TEST_ASSERT(max_mem_slots > 0,
@@ -350,30 +372,37 @@ static void test_add_max_memory_regions(void)
vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
- mem_reg_npages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, MEM_REGION_SIZE);
-
/* Check it can be added memory slots up to the maximum allowed */
pr_info("Adding slots 0..%i, each memory region with %dK size\n",
(max_mem_slots - 1), MEM_REGION_SIZE >> 10);
+
+ mem = mmap(NULL, MEM_REGION_SIZE * max_mem_slots + alignment,
+ PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host");
+ mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1));
+
for (slot = 0; slot < max_mem_slots; slot++) {
- vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
- guest_addr, slot, mem_reg_npages,
- 0);
- guest_addr += MEM_REGION_SIZE;
+ ret = test_memory_region_add(vm, mem_aligned +
+ ((uint64_t)slot * MEM_REGION_SIZE),
+ slot, MEM_REGION_SIZE,
+ (uint64_t)slot * MEM_REGION_SIZE);
+ TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
+ " rc: %i errno: %i slot: %i\n",
+ ret, errno, slot);
}
/* Check it cannot be added memory slots beyond the limit */
- mem = mmap(NULL, MEM_REGION_SIZE, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host");
+ mem_extra = mmap(NULL, MEM_REGION_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ TEST_ASSERT(mem_extra != MAP_FAILED, "Failed to mmap() host");
- ret = ioctl(vm_get_fd(vm), KVM_SET_USER_MEMORY_REGION,
- &(struct kvm_userspace_memory_region) {slot, 0, guest_addr,
- MEM_REGION_SIZE, (uint64_t) mem});
+ ret = test_memory_region_add(vm, mem_extra, max_mem_slots, MEM_REGION_SIZE,
+ (uint64_t)max_mem_slots * MEM_REGION_SIZE);
TEST_ASSERT(ret == -1 && errno == EINVAL,
"Adding one more memory slot should fail with EINVAL");
- munmap(mem, MEM_REGION_SIZE);
+ munmap(mem, MEM_REGION_SIZE * max_mem_slots + alignment);
+ munmap(mem_extra, MEM_REGION_SIZE);
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
index 804ff5ff022d..1f4a0599683c 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
@@ -186,7 +186,7 @@ int main(int argc, char *argv[])
vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &st);
}
- struct vcpu_runstate_info *rs = addr_gpa2hva(vm, RUNSTATE_ADDR);;
+ struct vcpu_runstate_info *rs = addr_gpa2hva(vm, RUNSTATE_ADDR);
rs->state = 0x5a;
for (;;) {
diff --git a/tools/testing/selftests/landlock/.gitignore b/tools/testing/selftests/landlock/.gitignore
new file mode 100644
index 000000000000..470203a7cd73
--- /dev/null
+++ b/tools/testing/selftests/landlock/.gitignore
@@ -0,0 +1,2 @@
+/*_test
+/true
diff --git a/tools/testing/selftests/landlock/Makefile b/tools/testing/selftests/landlock/Makefile
new file mode 100644
index 000000000000..a99596ca9882
--- /dev/null
+++ b/tools/testing/selftests/landlock/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+
+CFLAGS += -Wall -O2
+
+src_test := $(wildcard *_test.c)
+
+TEST_GEN_PROGS := $(src_test:.c=)
+
+TEST_GEN_PROGS_EXTENDED := true
+
+KSFT_KHDR_INSTALL := 1
+OVERRIDE_TARGETS := 1
+include ../lib.mk
+
+khdr_dir = $(top_srcdir)/usr/include
+
+$(khdr_dir)/linux/landlock.h: khdr
+ @:
+
+$(OUTPUT)/true: true.c
+ $(LINK.c) $< $(LDLIBS) -o $@ -static
+
+$(OUTPUT)/%_test: %_test.c $(khdr_dir)/linux/landlock.h ../kselftest_harness.h common.h
+ $(LINK.c) $< $(LDLIBS) -o $@ -lcap -I$(khdr_dir)
diff --git a/tools/testing/selftests/landlock/base_test.c b/tools/testing/selftests/landlock/base_test.c
new file mode 100644
index 000000000000..ca40abe9daa8
--- /dev/null
+++ b/tools/testing/selftests/landlock/base_test.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Landlock tests - Common user space base
+ *
+ * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2019-2020 ANSSI
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/landlock.h>
+#include <string.h>
+#include <sys/prctl.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include "common.h"
+
+#ifndef O_PATH
+#define O_PATH 010000000
+#endif
+
+TEST(inconsistent_attr) {
+ const long page_size = sysconf(_SC_PAGESIZE);
+ char *const buf = malloc(page_size + 1);
+ struct landlock_ruleset_attr *const ruleset_attr = (void *)buf;
+
+ ASSERT_NE(NULL, buf);
+
+ /* Checks copy_from_user(). */
+ ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, 0, 0));
+ /* The size if less than sizeof(struct landlock_attr_enforce). */
+ ASSERT_EQ(EINVAL, errno);
+ ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, 1, 0));
+ ASSERT_EQ(EINVAL, errno);
+
+ ASSERT_EQ(-1, landlock_create_ruleset(NULL, 1, 0));
+ /* The size if less than sizeof(struct landlock_attr_enforce). */
+ ASSERT_EQ(EFAULT, errno);
+
+ ASSERT_EQ(-1, landlock_create_ruleset(NULL,
+ sizeof(struct landlock_ruleset_attr), 0));
+ ASSERT_EQ(EFAULT, errno);
+
+ ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, page_size + 1, 0));
+ ASSERT_EQ(E2BIG, errno);
+
+ ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr,
+ sizeof(struct landlock_ruleset_attr), 0));
+ ASSERT_EQ(ENOMSG, errno);
+ ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, page_size, 0));
+ ASSERT_EQ(ENOMSG, errno);
+
+ /* Checks non-zero value. */
+ buf[page_size - 2] = '.';
+ ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, page_size, 0));
+ ASSERT_EQ(E2BIG, errno);
+
+ ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, page_size + 1, 0));
+ ASSERT_EQ(E2BIG, errno);
+
+ free(buf);
+}
+
+TEST(abi_version) {
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE,
+ };
+ ASSERT_EQ(1, landlock_create_ruleset(NULL, 0,
+ LANDLOCK_CREATE_RULESET_VERSION));
+
+ ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, 0,
+ LANDLOCK_CREATE_RULESET_VERSION));
+ ASSERT_EQ(EINVAL, errno);
+
+ ASSERT_EQ(-1, landlock_create_ruleset(NULL, sizeof(ruleset_attr),
+ LANDLOCK_CREATE_RULESET_VERSION));
+ ASSERT_EQ(EINVAL, errno);
+
+ ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr),
+ LANDLOCK_CREATE_RULESET_VERSION));
+ ASSERT_EQ(EINVAL, errno);
+
+ ASSERT_EQ(-1, landlock_create_ruleset(NULL, 0,
+ LANDLOCK_CREATE_RULESET_VERSION | 1 << 31));
+ ASSERT_EQ(EINVAL, errno);
+}
+
+TEST(inval_create_ruleset_flags) {
+ const int last_flag = LANDLOCK_CREATE_RULESET_VERSION;
+ const int invalid_flag = last_flag << 1;
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE,
+ };
+
+ ASSERT_EQ(-1, landlock_create_ruleset(NULL, 0, invalid_flag));
+ ASSERT_EQ(EINVAL, errno);
+
+ ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, 0, invalid_flag));
+ ASSERT_EQ(EINVAL, errno);
+
+ ASSERT_EQ(-1, landlock_create_ruleset(NULL, sizeof(ruleset_attr),
+ invalid_flag));
+ ASSERT_EQ(EINVAL, errno);
+
+ ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), invalid_flag));
+ ASSERT_EQ(EINVAL, errno);
+}
+
+TEST(empty_path_beneath_attr) {
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_fs = LANDLOCK_ACCESS_FS_EXECUTE,
+ };
+ const int ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+
+ ASSERT_LE(0, ruleset_fd);
+
+ /* Similar to struct landlock_path_beneath_attr.parent_fd = 0 */
+ ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
+ NULL, 0));
+ ASSERT_EQ(EFAULT, errno);
+ ASSERT_EQ(0, close(ruleset_fd));
+}
+
+TEST(inval_fd_enforce) {
+ ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
+
+ ASSERT_EQ(-1, landlock_restrict_self(-1, 0));
+ ASSERT_EQ(EBADF, errno);
+}
+
+TEST(unpriv_enforce_without_no_new_privs) {
+ int err;
+
+ drop_caps(_metadata);
+ err = landlock_restrict_self(-1, 0);
+ ASSERT_EQ(EPERM, errno);
+ ASSERT_EQ(err, -1);
+}
+
+TEST(ruleset_fd_io)
+{
+ struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE,
+ };
+ int ruleset_fd;
+ char buf;
+
+ drop_caps(_metadata);
+ ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+
+ ASSERT_EQ(-1, write(ruleset_fd, ".", 1));
+ ASSERT_EQ(EINVAL, errno);
+ ASSERT_EQ(-1, read(ruleset_fd, &buf, 1));
+ ASSERT_EQ(EINVAL, errno);
+
+ ASSERT_EQ(0, close(ruleset_fd));
+}
+
+/* Tests enforcement of a ruleset FD transferred through a UNIX socket. */
+TEST(ruleset_fd_transfer)
+{
+ struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_fs = LANDLOCK_ACCESS_FS_READ_DIR,
+ };
+ struct landlock_path_beneath_attr path_beneath_attr = {
+ .allowed_access = LANDLOCK_ACCESS_FS_READ_DIR,
+ };
+ int ruleset_fd_tx, dir_fd;
+ union {
+ /* Aligned ancillary data buffer. */
+ char buf[CMSG_SPACE(sizeof(ruleset_fd_tx))];
+ struct cmsghdr _align;
+ } cmsg_tx = {};
+ char data_tx = '.';
+ struct iovec io = {
+ .iov_base = &data_tx,
+ .iov_len = sizeof(data_tx),
+ };
+ struct msghdr msg = {
+ .msg_iov = &io,
+ .msg_iovlen = 1,
+ .msg_control = &cmsg_tx.buf,
+ .msg_controllen = sizeof(cmsg_tx.buf),
+ };
+ struct cmsghdr *cmsg;
+ int socket_fds[2];
+ pid_t child;
+ int status;
+
+ drop_caps(_metadata);
+
+ /* Creates a test ruleset with a simple rule. */
+ ruleset_fd_tx = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd_tx);
+ path_beneath_attr.parent_fd = open("/tmp", O_PATH | O_NOFOLLOW |
+ O_DIRECTORY | O_CLOEXEC);
+ ASSERT_LE(0, path_beneath_attr.parent_fd);
+ ASSERT_EQ(0, landlock_add_rule(ruleset_fd_tx, LANDLOCK_RULE_PATH_BENEATH,
+ &path_beneath_attr, 0));
+ ASSERT_EQ(0, close(path_beneath_attr.parent_fd));
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ ASSERT_NE(NULL, cmsg);
+ cmsg->cmsg_len = CMSG_LEN(sizeof(ruleset_fd_tx));
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ memcpy(CMSG_DATA(cmsg), &ruleset_fd_tx, sizeof(ruleset_fd_tx));
+
+ /* Sends the ruleset FD over a socketpair and then close it. */
+ ASSERT_EQ(0, socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, socket_fds));
+ ASSERT_EQ(sizeof(data_tx), sendmsg(socket_fds[0], &msg, 0));
+ ASSERT_EQ(0, close(socket_fds[0]));
+ ASSERT_EQ(0, close(ruleset_fd_tx));
+
+ child = fork();
+ ASSERT_LE(0, child);
+ if (child == 0) {
+ int ruleset_fd_rx;
+
+ *(char *)msg.msg_iov->iov_base = '\0';
+ ASSERT_EQ(sizeof(data_tx), recvmsg(socket_fds[1], &msg, MSG_CMSG_CLOEXEC));
+ ASSERT_EQ('.', *(char *)msg.msg_iov->iov_base);
+ ASSERT_EQ(0, close(socket_fds[1]));
+ cmsg = CMSG_FIRSTHDR(&msg);
+ ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(ruleset_fd_tx)));
+ memcpy(&ruleset_fd_rx, CMSG_DATA(cmsg), sizeof(ruleset_fd_tx));
+
+ /* Enforces the received ruleset on the child. */
+ ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
+ ASSERT_EQ(0, landlock_restrict_self(ruleset_fd_rx, 0));
+ ASSERT_EQ(0, close(ruleset_fd_rx));
+
+ /* Checks that the ruleset enforcement. */
+ ASSERT_EQ(-1, open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC));
+ ASSERT_EQ(EACCES, errno);
+ dir_fd = open("/tmp", O_RDONLY | O_DIRECTORY | O_CLOEXEC);
+ ASSERT_LE(0, dir_fd);
+ ASSERT_EQ(0, close(dir_fd));
+ _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
+ return;
+ }
+
+ ASSERT_EQ(0, close(socket_fds[1]));
+
+ /* Checks that the parent is unrestricted. */
+ dir_fd = open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC);
+ ASSERT_LE(0, dir_fd);
+ ASSERT_EQ(0, close(dir_fd));
+ dir_fd = open("/tmp", O_RDONLY | O_DIRECTORY | O_CLOEXEC);
+ ASSERT_LE(0, dir_fd);
+ ASSERT_EQ(0, close(dir_fd));
+
+ ASSERT_EQ(child, waitpid(child, &status, 0));
+ ASSERT_EQ(1, WIFEXITED(status));
+ ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status));
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/landlock/common.h b/tools/testing/selftests/landlock/common.h
new file mode 100644
index 000000000000..20e2a9286d71
--- /dev/null
+++ b/tools/testing/selftests/landlock/common.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Landlock test helpers
+ *
+ * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2019-2020 ANSSI
+ * Copyright © 2021 Microsoft Corporation
+ */
+
+#include <errno.h>
+#include <linux/landlock.h>
+#include <sys/capability.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "../kselftest_harness.h"
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+/*
+ * TEST_F_FORK() is useful when a test drop privileges but the corresponding
+ * FIXTURE_TEARDOWN() requires them (e.g. to remove files from a directory
+ * where write actions are denied). For convenience, FIXTURE_TEARDOWN() is
+ * also called when the test failed, but not when FIXTURE_SETUP() failed. For
+ * this to be possible, we must not call abort() but instead exit smoothly
+ * (hence the step print).
+ */
+#define TEST_F_FORK(fixture_name, test_name) \
+ static void fixture_name##_##test_name##_child( \
+ struct __test_metadata *_metadata, \
+ FIXTURE_DATA(fixture_name) *self, \
+ const FIXTURE_VARIANT(fixture_name) *variant); \
+ TEST_F(fixture_name, test_name) \
+ { \
+ int status; \
+ const pid_t child = fork(); \
+ if (child < 0) \
+ abort(); \
+ if (child == 0) { \
+ _metadata->no_print = 1; \
+ fixture_name##_##test_name##_child(_metadata, self, variant); \
+ if (_metadata->skip) \
+ _exit(255); \
+ if (_metadata->passed) \
+ _exit(0); \
+ _exit(_metadata->step); \
+ } \
+ if (child != waitpid(child, &status, 0)) \
+ abort(); \
+ if (WIFSIGNALED(status) || !WIFEXITED(status)) { \
+ _metadata->passed = 0; \
+ _metadata->step = 1; \
+ return; \
+ } \
+ switch (WEXITSTATUS(status)) { \
+ case 0: \
+ _metadata->passed = 1; \
+ break; \
+ case 255: \
+ _metadata->passed = 1; \
+ _metadata->skip = 1; \
+ break; \
+ default: \
+ _metadata->passed = 0; \
+ _metadata->step = WEXITSTATUS(status); \
+ break; \
+ } \
+ } \
+ static void fixture_name##_##test_name##_child( \
+ struct __test_metadata __attribute__((unused)) *_metadata, \
+ FIXTURE_DATA(fixture_name) __attribute__((unused)) *self, \
+ const FIXTURE_VARIANT(fixture_name) \
+ __attribute__((unused)) *variant)
+
+#ifndef landlock_create_ruleset
+static inline int landlock_create_ruleset(
+ const struct landlock_ruleset_attr *const attr,
+ const size_t size, const __u32 flags)
+{
+ return syscall(__NR_landlock_create_ruleset, attr, size, flags);
+}
+#endif
+
+#ifndef landlock_add_rule
+static inline int landlock_add_rule(const int ruleset_fd,
+ const enum landlock_rule_type rule_type,
+ const void *const rule_attr, const __u32 flags)
+{
+ return syscall(__NR_landlock_add_rule, ruleset_fd, rule_type,
+ rule_attr, flags);
+}
+#endif
+
+#ifndef landlock_restrict_self
+static inline int landlock_restrict_self(const int ruleset_fd,
+ const __u32 flags)
+{
+ return syscall(__NR_landlock_restrict_self, ruleset_fd, flags);
+}
+#endif
+
+static void _init_caps(struct __test_metadata *const _metadata, bool drop_all)
+{
+ cap_t cap_p;
+ /* Only these three capabilities are useful for the tests. */
+ const cap_value_t caps[] = {
+ CAP_DAC_OVERRIDE,
+ CAP_MKNOD,
+ CAP_SYS_ADMIN,
+ CAP_SYS_CHROOT,
+ };
+
+ cap_p = cap_get_proc();
+ EXPECT_NE(NULL, cap_p) {
+ TH_LOG("Failed to cap_get_proc: %s", strerror(errno));
+ }
+ EXPECT_NE(-1, cap_clear(cap_p)) {
+ TH_LOG("Failed to cap_clear: %s", strerror(errno));
+ }
+ if (!drop_all) {
+ EXPECT_NE(-1, cap_set_flag(cap_p, CAP_PERMITTED,
+ ARRAY_SIZE(caps), caps, CAP_SET)) {
+ TH_LOG("Failed to cap_set_flag: %s", strerror(errno));
+ }
+ }
+ EXPECT_NE(-1, cap_set_proc(cap_p)) {
+ TH_LOG("Failed to cap_set_proc: %s", strerror(errno));
+ }
+ EXPECT_NE(-1, cap_free(cap_p)) {
+ TH_LOG("Failed to cap_free: %s", strerror(errno));
+ }
+}
+
+/* We cannot put such helpers in a library because of kselftest_harness.h . */
+__attribute__((__unused__))
+static void disable_caps(struct __test_metadata *const _metadata)
+{
+ _init_caps(_metadata, false);
+}
+
+__attribute__((__unused__))
+static void drop_caps(struct __test_metadata *const _metadata)
+{
+ _init_caps(_metadata, true);
+}
+
+static void _effective_cap(struct __test_metadata *const _metadata,
+ const cap_value_t caps, const cap_flag_value_t value)
+{
+ cap_t cap_p;
+
+ cap_p = cap_get_proc();
+ EXPECT_NE(NULL, cap_p) {
+ TH_LOG("Failed to cap_get_proc: %s", strerror(errno));
+ }
+ EXPECT_NE(-1, cap_set_flag(cap_p, CAP_EFFECTIVE, 1, &caps, value)) {
+ TH_LOG("Failed to cap_set_flag: %s", strerror(errno));
+ }
+ EXPECT_NE(-1, cap_set_proc(cap_p)) {
+ TH_LOG("Failed to cap_set_proc: %s", strerror(errno));
+ }
+ EXPECT_NE(-1, cap_free(cap_p)) {
+ TH_LOG("Failed to cap_free: %s", strerror(errno));
+ }
+}
+
+__attribute__((__unused__))
+static void set_cap(struct __test_metadata *const _metadata,
+ const cap_value_t caps)
+{
+ _effective_cap(_metadata, caps, CAP_SET);
+}
+
+__attribute__((__unused__))
+static void clear_cap(struct __test_metadata *const _metadata,
+ const cap_value_t caps)
+{
+ _effective_cap(_metadata, caps, CAP_CLEAR);
+}
diff --git a/tools/testing/selftests/landlock/config b/tools/testing/selftests/landlock/config
new file mode 100644
index 000000000000..0f0a65287bac
--- /dev/null
+++ b/tools/testing/selftests/landlock/config
@@ -0,0 +1,7 @@
+CONFIG_OVERLAY_FS=y
+CONFIG_SECURITY_LANDLOCK=y
+CONFIG_SECURITY_PATH=y
+CONFIG_SECURITY=y
+CONFIG_SHMEM=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_TMPFS=y
diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
new file mode 100644
index 000000000000..10c9a1e4ebd9
--- /dev/null
+++ b/tools/testing/selftests/landlock/fs_test.c
@@ -0,0 +1,2791 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Landlock tests - Filesystem
+ *
+ * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2020 ANSSI
+ * Copyright © 2020-2021 Microsoft Corporation
+ */
+
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <linux/landlock.h>
+#include <sched.h>
+#include <string.h>
+#include <sys/capability.h>
+#include <sys/mount.h>
+#include <sys/prctl.h>
+#include <sys/sendfile.h>
+#include <sys/stat.h>
+#include <sys/sysmacros.h>
+#include <unistd.h>
+
+#include "common.h"
+
+#define TMP_DIR "tmp"
+#define BINARY_PATH "./true"
+
+/* Paths (sibling number and depth) */
+static const char dir_s1d1[] = TMP_DIR "/s1d1";
+static const char file1_s1d1[] = TMP_DIR "/s1d1/f1";
+static const char file2_s1d1[] = TMP_DIR "/s1d1/f2";
+static const char dir_s1d2[] = TMP_DIR "/s1d1/s1d2";
+static const char file1_s1d2[] = TMP_DIR "/s1d1/s1d2/f1";
+static const char file2_s1d2[] = TMP_DIR "/s1d1/s1d2/f2";
+static const char dir_s1d3[] = TMP_DIR "/s1d1/s1d2/s1d3";
+static const char file1_s1d3[] = TMP_DIR "/s1d1/s1d2/s1d3/f1";
+static const char file2_s1d3[] = TMP_DIR "/s1d1/s1d2/s1d3/f2";
+
+static const char dir_s2d1[] = TMP_DIR "/s2d1";
+static const char file1_s2d1[] = TMP_DIR "/s2d1/f1";
+static const char dir_s2d2[] = TMP_DIR "/s2d1/s2d2";
+static const char file1_s2d2[] = TMP_DIR "/s2d1/s2d2/f1";
+static const char dir_s2d3[] = TMP_DIR "/s2d1/s2d2/s2d3";
+static const char file1_s2d3[] = TMP_DIR "/s2d1/s2d2/s2d3/f1";
+static const char file2_s2d3[] = TMP_DIR "/s2d1/s2d2/s2d3/f2";
+
+static const char dir_s3d1[] = TMP_DIR "/s3d1";
+/* dir_s3d2 is a mount point. */
+static const char dir_s3d2[] = TMP_DIR "/s3d1/s3d2";
+static const char dir_s3d3[] = TMP_DIR "/s3d1/s3d2/s3d3";
+
+/*
+ * layout1 hierarchy:
+ *
+ * tmp
+ * ├── s1d1
+ * │   ├── f1
+ * │   ├── f2
+ * │   └── s1d2
+ * │   ├── f1
+ * │   ├── f2
+ * │   └── s1d3
+ * │   ├── f1
+ * │   └── f2
+ * ├── s2d1
+ * │   ├── f1
+ * │   └── s2d2
+ * │   ├── f1
+ * │   └── s2d3
+ * │   ├── f1
+ * │   └── f2
+ * └── s3d1
+ * └── s3d2
+ * └── s3d3
+ */
+
+static void mkdir_parents(struct __test_metadata *const _metadata,
+ const char *const path)
+{
+ char *walker;
+ const char *parent;
+ int i, err;
+
+ ASSERT_NE(path[0], '\0');
+ walker = strdup(path);
+ ASSERT_NE(NULL, walker);
+ parent = walker;
+ for (i = 1; walker[i]; i++) {
+ if (walker[i] != '/')
+ continue;
+ walker[i] = '\0';
+ err = mkdir(parent, 0700);
+ ASSERT_FALSE(err && errno != EEXIST) {
+ TH_LOG("Failed to create directory \"%s\": %s",
+ parent, strerror(errno));
+ }
+ walker[i] = '/';
+ }
+ free(walker);
+}
+
+static void create_directory(struct __test_metadata *const _metadata,
+ const char *const path)
+{
+ mkdir_parents(_metadata, path);
+ ASSERT_EQ(0, mkdir(path, 0700)) {
+ TH_LOG("Failed to create directory \"%s\": %s", path,
+ strerror(errno));
+ }
+}
+
+static void create_file(struct __test_metadata *const _metadata,
+ const char *const path)
+{
+ mkdir_parents(_metadata, path);
+ ASSERT_EQ(0, mknod(path, S_IFREG | 0700, 0)) {
+ TH_LOG("Failed to create file \"%s\": %s", path,
+ strerror(errno));
+ }
+}
+
+static int remove_path(const char *const path)
+{
+ char *walker;
+ int i, ret, err = 0;
+
+ walker = strdup(path);
+ if (!walker) {
+ err = ENOMEM;
+ goto out;
+ }
+ if (unlink(path) && rmdir(path)) {
+ if (errno != ENOENT)
+ err = errno;
+ goto out;
+ }
+ for (i = strlen(walker); i > 0; i--) {
+ if (walker[i] != '/')
+ continue;
+ walker[i] = '\0';
+ ret = rmdir(walker);
+ if (ret) {
+ if (errno != ENOTEMPTY && errno != EBUSY)
+ err = errno;
+ goto out;
+ }
+ if (strcmp(walker, TMP_DIR) == 0)
+ goto out;
+ }
+
+out:
+ free(walker);
+ return err;
+}
+
+static void prepare_layout(struct __test_metadata *const _metadata)
+{
+ disable_caps(_metadata);
+ umask(0077);
+ create_directory(_metadata, TMP_DIR);
+
+ /*
+ * Do not pollute the rest of the system: creates a private mount point
+ * for tests relying on pivot_root(2) and move_mount(2).
+ */
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ ASSERT_EQ(0, unshare(CLONE_NEWNS));
+ ASSERT_EQ(0, mount("tmp", TMP_DIR, "tmpfs", 0, "size=4m,mode=700"));
+ ASSERT_EQ(0, mount(NULL, TMP_DIR, NULL, MS_PRIVATE | MS_REC, NULL));
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+}
+
+static void cleanup_layout(struct __test_metadata *const _metadata)
+{
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ EXPECT_EQ(0, umount(TMP_DIR));
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+ EXPECT_EQ(0, remove_path(TMP_DIR));
+}
+
+static void create_layout1(struct __test_metadata *const _metadata)
+{
+ create_file(_metadata, file1_s1d1);
+ create_file(_metadata, file1_s1d2);
+ create_file(_metadata, file1_s1d3);
+ create_file(_metadata, file2_s1d1);
+ create_file(_metadata, file2_s1d2);
+ create_file(_metadata, file2_s1d3);
+
+ create_file(_metadata, file1_s2d1);
+ create_file(_metadata, file1_s2d2);
+ create_file(_metadata, file1_s2d3);
+ create_file(_metadata, file2_s2d3);
+
+ create_directory(_metadata, dir_s3d2);
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ ASSERT_EQ(0, mount("tmp", dir_s3d2, "tmpfs", 0, "size=4m,mode=700"));
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+
+ ASSERT_EQ(0, mkdir(dir_s3d3, 0700));
+}
+
+static void remove_layout1(struct __test_metadata *const _metadata)
+{
+ EXPECT_EQ(0, remove_path(file2_s1d3));
+ EXPECT_EQ(0, remove_path(file2_s1d2));
+ EXPECT_EQ(0, remove_path(file2_s1d1));
+ EXPECT_EQ(0, remove_path(file1_s1d3));
+ EXPECT_EQ(0, remove_path(file1_s1d2));
+ EXPECT_EQ(0, remove_path(file1_s1d1));
+
+ EXPECT_EQ(0, remove_path(file2_s2d3));
+ EXPECT_EQ(0, remove_path(file1_s2d3));
+ EXPECT_EQ(0, remove_path(file1_s2d2));
+ EXPECT_EQ(0, remove_path(file1_s2d1));
+
+ EXPECT_EQ(0, remove_path(dir_s3d3));
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ umount(dir_s3d2);
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+ EXPECT_EQ(0, remove_path(dir_s3d2));
+}
+
+FIXTURE(layout1) {
+};
+
+FIXTURE_SETUP(layout1)
+{
+ prepare_layout(_metadata);
+
+ create_layout1(_metadata);
+}
+
+FIXTURE_TEARDOWN(layout1)
+{
+ remove_layout1(_metadata);
+
+ cleanup_layout(_metadata);
+}
+
+/*
+ * This helper enables to use the ASSERT_* macros and print the line number
+ * pointing to the test caller.
+ */
+static int test_open_rel(const int dirfd, const char *const path, const int flags)
+{
+ int fd;
+
+ /* Works with file and directories. */
+ fd = openat(dirfd, path, flags | O_CLOEXEC);
+ if (fd < 0)
+ return errno;
+ /*
+ * Mixing error codes from close(2) and open(2) should not lead to any
+ * (access type) confusion for this test.
+ */
+ if (close(fd) != 0)
+ return errno;
+ return 0;
+}
+
+static int test_open(const char *const path, const int flags)
+{
+ return test_open_rel(AT_FDCWD, path, flags);
+}
+
+TEST_F_FORK(layout1, no_restriction)
+{
+ ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY));
+ ASSERT_EQ(0, test_open(file1_s1d1, O_RDONLY));
+ ASSERT_EQ(0, test_open(file2_s1d1, O_RDONLY));
+ ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY));
+ ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
+ ASSERT_EQ(0, test_open(file2_s1d2, O_RDONLY));
+ ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY));
+ ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
+
+ ASSERT_EQ(0, test_open(dir_s2d1, O_RDONLY));
+ ASSERT_EQ(0, test_open(file1_s2d1, O_RDONLY));
+ ASSERT_EQ(0, test_open(dir_s2d2, O_RDONLY));
+ ASSERT_EQ(0, test_open(file1_s2d2, O_RDONLY));
+ ASSERT_EQ(0, test_open(dir_s2d3, O_RDONLY));
+ ASSERT_EQ(0, test_open(file1_s2d3, O_RDONLY));
+
+ ASSERT_EQ(0, test_open(dir_s3d1, O_RDONLY));
+ ASSERT_EQ(0, test_open(dir_s3d2, O_RDONLY));
+ ASSERT_EQ(0, test_open(dir_s3d3, O_RDONLY));
+}
+
+TEST_F_FORK(layout1, inval)
+{
+ struct landlock_path_beneath_attr path_beneath = {
+ .allowed_access = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_WRITE_FILE,
+ .parent_fd = -1,
+ };
+ struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_WRITE_FILE,
+ };
+ int ruleset_fd;
+
+ path_beneath.parent_fd = open(dir_s1d2, O_PATH | O_DIRECTORY |
+ O_CLOEXEC);
+ ASSERT_LE(0, path_beneath.parent_fd);
+
+ ruleset_fd = open(dir_s1d1, O_PATH | O_DIRECTORY | O_CLOEXEC);
+ ASSERT_LE(0, ruleset_fd);
+ ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
+ &path_beneath, 0));
+ /* Returns EBADF because ruleset_fd is not a landlock-ruleset FD. */
+ ASSERT_EQ(EBADF, errno);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ ruleset_fd = open(dir_s1d1, O_DIRECTORY | O_CLOEXEC);
+ ASSERT_LE(0, ruleset_fd);
+ ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
+ &path_beneath, 0));
+ /* Returns EBADFD because ruleset_fd is not a valid ruleset. */
+ ASSERT_EQ(EBADFD, errno);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Gets a real ruleset. */
+ ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+ ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
+ &path_beneath, 0));
+ ASSERT_EQ(0, close(path_beneath.parent_fd));
+
+ /* Tests without O_PATH. */
+ path_beneath.parent_fd = open(dir_s1d2, O_DIRECTORY | O_CLOEXEC);
+ ASSERT_LE(0, path_beneath.parent_fd);
+ ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
+ &path_beneath, 0));
+ ASSERT_EQ(0, close(path_beneath.parent_fd));
+
+ /* Tests with a ruleset FD. */
+ path_beneath.parent_fd = ruleset_fd;
+ ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
+ &path_beneath, 0));
+ ASSERT_EQ(EBADFD, errno);
+
+ /* Checks unhandled allowed_access. */
+ path_beneath.parent_fd = open(dir_s1d2, O_PATH | O_DIRECTORY |
+ O_CLOEXEC);
+ ASSERT_LE(0, path_beneath.parent_fd);
+
+ /* Test with legitimate values. */
+ path_beneath.allowed_access |= LANDLOCK_ACCESS_FS_EXECUTE;
+ ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
+ &path_beneath, 0));
+ ASSERT_EQ(EINVAL, errno);
+ path_beneath.allowed_access &= ~LANDLOCK_ACCESS_FS_EXECUTE;
+
+ /* Test with unknown (64-bits) value. */
+ path_beneath.allowed_access |= (1ULL << 60);
+ ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
+ &path_beneath, 0));
+ ASSERT_EQ(EINVAL, errno);
+ path_beneath.allowed_access &= ~(1ULL << 60);
+
+ /* Test with no access. */
+ path_beneath.allowed_access = 0;
+ ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
+ &path_beneath, 0));
+ ASSERT_EQ(ENOMSG, errno);
+ path_beneath.allowed_access &= ~(1ULL << 60);
+
+ ASSERT_EQ(0, close(path_beneath.parent_fd));
+
+ /* Enforces the ruleset. */
+ ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
+ ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0));
+
+ ASSERT_EQ(0, close(ruleset_fd));
+}
+
+#define ACCESS_FILE ( \
+ LANDLOCK_ACCESS_FS_EXECUTE | \
+ LANDLOCK_ACCESS_FS_WRITE_FILE | \
+ LANDLOCK_ACCESS_FS_READ_FILE)
+
+#define ACCESS_LAST LANDLOCK_ACCESS_FS_MAKE_SYM
+
+#define ACCESS_ALL ( \
+ ACCESS_FILE | \
+ LANDLOCK_ACCESS_FS_READ_DIR | \
+ LANDLOCK_ACCESS_FS_REMOVE_DIR | \
+ LANDLOCK_ACCESS_FS_REMOVE_FILE | \
+ LANDLOCK_ACCESS_FS_MAKE_CHAR | \
+ LANDLOCK_ACCESS_FS_MAKE_DIR | \
+ LANDLOCK_ACCESS_FS_MAKE_REG | \
+ LANDLOCK_ACCESS_FS_MAKE_SOCK | \
+ LANDLOCK_ACCESS_FS_MAKE_FIFO | \
+ LANDLOCK_ACCESS_FS_MAKE_BLOCK | \
+ ACCESS_LAST)
+
+TEST_F_FORK(layout1, file_access_rights)
+{
+ __u64 access;
+ int err;
+ struct landlock_path_beneath_attr path_beneath = {};
+ struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_fs = ACCESS_ALL,
+ };
+ const int ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+
+ ASSERT_LE(0, ruleset_fd);
+
+ /* Tests access rights for files. */
+ path_beneath.parent_fd = open(file1_s1d2, O_PATH | O_CLOEXEC);
+ ASSERT_LE(0, path_beneath.parent_fd);
+ for (access = 1; access <= ACCESS_LAST; access <<= 1) {
+ path_beneath.allowed_access = access;
+ err = landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
+ &path_beneath, 0);
+ if ((access | ACCESS_FILE) == ACCESS_FILE) {
+ ASSERT_EQ(0, err);
+ } else {
+ ASSERT_EQ(-1, err);
+ ASSERT_EQ(EINVAL, errno);
+ }
+ }
+ ASSERT_EQ(0, close(path_beneath.parent_fd));
+}
+
+static void add_path_beneath(struct __test_metadata *const _metadata,
+ const int ruleset_fd, const __u64 allowed_access,
+ const char *const path)
+{
+ struct landlock_path_beneath_attr path_beneath = {
+ .allowed_access = allowed_access,
+ };
+
+ path_beneath.parent_fd = open(path, O_PATH | O_CLOEXEC);
+ ASSERT_LE(0, path_beneath.parent_fd) {
+ TH_LOG("Failed to open directory \"%s\": %s", path,
+ strerror(errno));
+ }
+ ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
+ &path_beneath, 0)) {
+ TH_LOG("Failed to update the ruleset with \"%s\": %s", path,
+ strerror(errno));
+ }
+ ASSERT_EQ(0, close(path_beneath.parent_fd));
+}
+
+struct rule {
+ const char *path;
+ __u64 access;
+};
+
+#define ACCESS_RO ( \
+ LANDLOCK_ACCESS_FS_READ_FILE | \
+ LANDLOCK_ACCESS_FS_READ_DIR)
+
+#define ACCESS_RW ( \
+ ACCESS_RO | \
+ LANDLOCK_ACCESS_FS_WRITE_FILE)
+
+static int create_ruleset(struct __test_metadata *const _metadata,
+ const __u64 handled_access_fs, const struct rule rules[])
+{
+ int ruleset_fd, i;
+ struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_fs = handled_access_fs,
+ };
+
+ ASSERT_NE(NULL, rules) {
+ TH_LOG("No rule list");
+ }
+ ASSERT_NE(NULL, rules[0].path) {
+ TH_LOG("Empty rule list");
+ }
+
+ ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd) {
+ TH_LOG("Failed to create a ruleset: %s", strerror(errno));
+ }
+
+ for (i = 0; rules[i].path; i++) {
+ add_path_beneath(_metadata, ruleset_fd, rules[i].access,
+ rules[i].path);
+ }
+ return ruleset_fd;
+}
+
+static void enforce_ruleset(struct __test_metadata *const _metadata,
+ const int ruleset_fd)
+{
+ ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
+ ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0)) {
+ TH_LOG("Failed to enforce ruleset: %s", strerror(errno));
+ }
+}
+
+TEST_F_FORK(layout1, proc_nsfs)
+{
+ const struct rule rules[] = {
+ {
+ .path = "/dev/null",
+ .access = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_WRITE_FILE,
+ },
+ {}
+ };
+ struct landlock_path_beneath_attr path_beneath;
+ const int ruleset_fd = create_ruleset(_metadata, rules[0].access |
+ LANDLOCK_ACCESS_FS_READ_DIR, rules);
+
+ ASSERT_LE(0, ruleset_fd);
+ ASSERT_EQ(0, test_open("/proc/self/ns/mnt", O_RDONLY));
+
+ enforce_ruleset(_metadata, ruleset_fd);
+
+ ASSERT_EQ(EACCES, test_open("/", O_RDONLY));
+ ASSERT_EQ(EACCES, test_open("/dev", O_RDONLY));
+ ASSERT_EQ(0, test_open("/dev/null", O_RDONLY));
+ ASSERT_EQ(EACCES, test_open("/dev/full", O_RDONLY));
+
+ ASSERT_EQ(EACCES, test_open("/proc", O_RDONLY));
+ ASSERT_EQ(EACCES, test_open("/proc/self", O_RDONLY));
+ ASSERT_EQ(EACCES, test_open("/proc/self/ns", O_RDONLY));
+ /*
+ * Because nsfs is an internal filesystem, /proc/self/ns/mnt is a
+ * disconnected path. Such path cannot be identified and must then be
+ * allowed.
+ */
+ ASSERT_EQ(0, test_open("/proc/self/ns/mnt", O_RDONLY));
+
+ /*
+ * Checks that it is not possible to add nsfs-like filesystem
+ * references to a ruleset.
+ */
+ path_beneath.allowed_access = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_WRITE_FILE,
+ path_beneath.parent_fd = open("/proc/self/ns/mnt", O_PATH | O_CLOEXEC);
+ ASSERT_LE(0, path_beneath.parent_fd);
+ ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
+ &path_beneath, 0));
+ ASSERT_EQ(EBADFD, errno);
+ ASSERT_EQ(0, close(path_beneath.parent_fd));
+}
+
+TEST_F_FORK(layout1, unpriv) {
+ const struct rule rules[] = {
+ {
+ .path = dir_s1d2,
+ .access = ACCESS_RO,
+ },
+ {}
+ };
+ int ruleset_fd;
+
+ drop_caps(_metadata);
+
+ ruleset_fd = create_ruleset(_metadata, ACCESS_RO, rules);
+ ASSERT_LE(0, ruleset_fd);
+ ASSERT_EQ(-1, landlock_restrict_self(ruleset_fd, 0));
+ ASSERT_EQ(EPERM, errno);
+
+ /* enforce_ruleset() calls prctl(no_new_privs). */
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+}
+
+TEST_F_FORK(layout1, effective_access)
+{
+ const struct rule rules[] = {
+ {
+ .path = dir_s1d2,
+ .access = ACCESS_RO,
+ },
+ {
+ .path = file1_s2d2,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_WRITE_FILE,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
+ char buf;
+ int reg_fd;
+
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Tests on a directory. */
+ ASSERT_EQ(EACCES, test_open("/", O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY));
+ ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY));
+ ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
+ ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY));
+ ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
+
+ /* Tests on a file. */
+ ASSERT_EQ(EACCES, test_open(dir_s2d2, O_RDONLY));
+ ASSERT_EQ(0, test_open(file1_s2d2, O_RDONLY));
+
+ /* Checks effective read and write actions. */
+ reg_fd = open(file1_s2d2, O_RDWR | O_CLOEXEC);
+ ASSERT_LE(0, reg_fd);
+ ASSERT_EQ(1, write(reg_fd, ".", 1));
+ ASSERT_LE(0, lseek(reg_fd, 0, SEEK_SET));
+ ASSERT_EQ(1, read(reg_fd, &buf, 1));
+ ASSERT_EQ('.', buf);
+ ASSERT_EQ(0, close(reg_fd));
+
+ /* Just in case, double-checks effective actions. */
+ reg_fd = open(file1_s2d2, O_RDONLY | O_CLOEXEC);
+ ASSERT_LE(0, reg_fd);
+ ASSERT_EQ(-1, write(reg_fd, &buf, 1));
+ ASSERT_EQ(EBADF, errno);
+ ASSERT_EQ(0, close(reg_fd));
+}
+
+TEST_F_FORK(layout1, unhandled_access)
+{
+ const struct rule rules[] = {
+ {
+ .path = dir_s1d2,
+ .access = ACCESS_RO,
+ },
+ {}
+ };
+ /* Here, we only handle read accesses, not write accesses. */
+ const int ruleset_fd = create_ruleset(_metadata, ACCESS_RO, rules);
+
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /*
+ * Because the policy does not handle LANDLOCK_ACCESS_FS_WRITE_FILE,
+ * opening for write-only should be allowed, but not read-write.
+ */
+ ASSERT_EQ(0, test_open(file1_s1d1, O_WRONLY));
+ ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR));
+
+ ASSERT_EQ(0, test_open(file1_s1d2, O_WRONLY));
+ ASSERT_EQ(0, test_open(file1_s1d2, O_RDWR));
+}
+
+TEST_F_FORK(layout1, ruleset_overlap)
+{
+ const struct rule rules[] = {
+ /* These rules should be ORed among them. */
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_WRITE_FILE,
+ },
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_READ_DIR,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
+
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks s1d1 hierarchy. */
+ ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
+ ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR));
+ ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
+
+ /* Checks s1d2 hierarchy. */
+ ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
+ ASSERT_EQ(0, test_open(file1_s1d2, O_WRONLY));
+ ASSERT_EQ(0, test_open(file1_s1d2, O_RDWR));
+ ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
+
+ /* Checks s1d3 hierarchy. */
+ ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
+ ASSERT_EQ(0, test_open(file1_s1d3, O_WRONLY));
+ ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR));
+ ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY));
+}
+
+TEST_F_FORK(layout1, non_overlapping_accesses)
+{
+ const struct rule layer1[] = {
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_MAKE_REG,
+ },
+ {}
+ };
+ const struct rule layer2[] = {
+ {
+ .path = dir_s1d3,
+ .access = LANDLOCK_ACCESS_FS_REMOVE_FILE,
+ },
+ {}
+ };
+ int ruleset_fd;
+
+ ASSERT_EQ(0, unlink(file1_s1d1));
+ ASSERT_EQ(0, unlink(file1_s1d2));
+
+ ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_MAKE_REG,
+ layer1);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ ASSERT_EQ(-1, mknod(file1_s1d1, S_IFREG | 0700, 0));
+ ASSERT_EQ(EACCES, errno);
+ ASSERT_EQ(0, mknod(file1_s1d2, S_IFREG | 0700, 0));
+ ASSERT_EQ(0, unlink(file1_s1d2));
+
+ ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_REMOVE_FILE,
+ layer2);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Unchanged accesses for file creation. */
+ ASSERT_EQ(-1, mknod(file1_s1d1, S_IFREG | 0700, 0));
+ ASSERT_EQ(EACCES, errno);
+ ASSERT_EQ(0, mknod(file1_s1d2, S_IFREG | 0700, 0));
+
+ /* Checks file removing. */
+ ASSERT_EQ(-1, unlink(file1_s1d2));
+ ASSERT_EQ(EACCES, errno);
+ ASSERT_EQ(0, unlink(file1_s1d3));
+}
+
+TEST_F_FORK(layout1, interleaved_masked_accesses)
+{
+ /*
+ * Checks overly restrictive rules:
+ * layer 1: allows R s1d1/s1d2/s1d3/file1
+ * layer 2: allows RW s1d1/s1d2/s1d3
+ * allows W s1d1/s1d2
+ * denies R s1d1/s1d2
+ * layer 3: allows R s1d1
+ * layer 4: allows R s1d1/s1d2
+ * denies W s1d1/s1d2
+ * layer 5: allows R s1d1/s1d2
+ * layer 6: allows X ----
+ * layer 7: allows W s1d1/s1d2
+ * denies R s1d1/s1d2
+ */
+ const struct rule layer1_read[] = {
+ /* Allows read access to file1_s1d3 with the first layer. */
+ {
+ .path = file1_s1d3,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {}
+ };
+ /* First rule with write restrictions. */
+ const struct rule layer2_read_write[] = {
+ /* Start by granting read-write access via its parent directory... */
+ {
+ .path = dir_s1d3,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_WRITE_FILE,
+ },
+ /* ...but also denies read access via its grandparent directory. */
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_WRITE_FILE,
+ },
+ {}
+ };
+ const struct rule layer3_read[] = {
+ /* Allows read access via its great-grandparent directory. */
+ {
+ .path = dir_s1d1,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {}
+ };
+ const struct rule layer4_read_write[] = {
+ /*
+ * Try to confuse the deny access by denying write (but not
+ * read) access via its grandparent directory.
+ */
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {}
+ };
+ const struct rule layer5_read[] = {
+ /*
+ * Try to override layer2's deny read access by explicitly
+ * allowing read access via file1_s1d3's grandparent.
+ */
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {}
+ };
+ const struct rule layer6_execute[] = {
+ /*
+ * Restricts an unrelated file hierarchy with a new access
+ * (non-overlapping) type.
+ */
+ {
+ .path = dir_s2d1,
+ .access = LANDLOCK_ACCESS_FS_EXECUTE,
+ },
+ {}
+ };
+ const struct rule layer7_read_write[] = {
+ /*
+ * Finally, denies read access to file1_s1d3 via its
+ * grandparent.
+ */
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_WRITE_FILE,
+ },
+ {}
+ };
+ int ruleset_fd;
+
+ ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE,
+ layer1_read);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks that read access is granted for file1_s1d3 with layer 1. */
+ ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR));
+ ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY));
+ ASSERT_EQ(0, test_open(file2_s1d3, O_WRONLY));
+
+ ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_WRITE_FILE, layer2_read_write);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks that previous access rights are unchanged with layer 2. */
+ ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR));
+ ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY));
+ ASSERT_EQ(0, test_open(file2_s1d3, O_WRONLY));
+
+ ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE,
+ layer3_read);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks that previous access rights are unchanged with layer 3. */
+ ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR));
+ ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY));
+ ASSERT_EQ(0, test_open(file2_s1d3, O_WRONLY));
+
+ /* This time, denies write access for the file hierarchy. */
+ ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_WRITE_FILE, layer4_read_write);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /*
+ * Checks that the only change with layer 4 is that write access is
+ * denied.
+ */
+ ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
+ ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(file2_s1d3, O_WRONLY));
+
+ ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE,
+ layer5_read);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks that previous access rights are unchanged with layer 5. */
+ ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
+ ASSERT_EQ(EACCES, test_open(file2_s1d3, O_WRONLY));
+ ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY));
+
+ ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_EXECUTE,
+ layer6_execute);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks that previous access rights are unchanged with layer 6. */
+ ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
+ ASSERT_EQ(EACCES, test_open(file2_s1d3, O_WRONLY));
+ ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY));
+
+ ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_WRITE_FILE, layer7_read_write);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks read access is now denied with layer 7. */
+ ASSERT_EQ(EACCES, test_open(file1_s1d3, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
+ ASSERT_EQ(EACCES, test_open(file2_s1d3, O_WRONLY));
+ ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY));
+}
+
+TEST_F_FORK(layout1, inherit_subset)
+{
+ const struct rule rules[] = {
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_READ_DIR,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
+
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+
+ ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
+ ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
+
+ /* Write access is forbidden. */
+ ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
+ /* Readdir access is allowed. */
+ ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
+
+ /* Write access is forbidden. */
+ ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
+ /* Readdir access is allowed. */
+ ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY));
+
+ /*
+ * Tests shared rule extension: the following rules should not grant
+ * any new access, only remove some. Once enforced, these rules are
+ * ANDed with the previous ones.
+ */
+ add_path_beneath(_metadata, ruleset_fd, LANDLOCK_ACCESS_FS_WRITE_FILE,
+ dir_s1d2);
+ /*
+ * According to ruleset_fd, dir_s1d2 should now have the
+ * LANDLOCK_ACCESS_FS_READ_FILE and LANDLOCK_ACCESS_FS_WRITE_FILE
+ * access rights (even if this directory is opened a second time).
+ * However, when enforcing this updated ruleset, the ruleset tied to
+ * the current process (i.e. its domain) will still only have the
+ * dir_s1d2 with LANDLOCK_ACCESS_FS_READ_FILE and
+ * LANDLOCK_ACCESS_FS_READ_DIR accesses, but
+ * LANDLOCK_ACCESS_FS_WRITE_FILE must not be allowed because it would
+ * be a privilege escalation.
+ */
+ enforce_ruleset(_metadata, ruleset_fd);
+
+ /* Same tests and results as above. */
+ ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
+ ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
+
+ /* It is still forbidden to write in file1_s1d2. */
+ ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
+ /* Readdir access is still allowed. */
+ ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
+
+ /* It is still forbidden to write in file1_s1d3. */
+ ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
+ /* Readdir access is still allowed. */
+ ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY));
+
+ /*
+ * Try to get more privileges by adding new access rights to the parent
+ * directory: dir_s1d1.
+ */
+ add_path_beneath(_metadata, ruleset_fd, ACCESS_RW, dir_s1d1);
+ enforce_ruleset(_metadata, ruleset_fd);
+
+ /* Same tests and results as above. */
+ ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
+ ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
+
+ /* It is still forbidden to write in file1_s1d2. */
+ ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
+ /* Readdir access is still allowed. */
+ ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
+
+ /* It is still forbidden to write in file1_s1d3. */
+ ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
+ /* Readdir access is still allowed. */
+ ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY));
+
+ /*
+ * Now, dir_s1d3 get a new rule tied to it, only allowing
+ * LANDLOCK_ACCESS_FS_WRITE_FILE. The (kernel internal) difference is
+ * that there was no rule tied to it before.
+ */
+ add_path_beneath(_metadata, ruleset_fd, LANDLOCK_ACCESS_FS_WRITE_FILE,
+ dir_s1d3);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /*
+ * Same tests and results as above, except for open(dir_s1d3) which is
+ * now denied because the new rule mask the rule previously inherited
+ * from dir_s1d2.
+ */
+
+ /* Same tests and results as above. */
+ ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
+ ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
+
+ /* It is still forbidden to write in file1_s1d2. */
+ ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
+ /* Readdir access is still allowed. */
+ ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
+
+ /* It is still forbidden to write in file1_s1d3. */
+ ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
+ /*
+ * Readdir of dir_s1d3 is still allowed because of the OR policy inside
+ * the same layer.
+ */
+ ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY));
+}
+
+TEST_F_FORK(layout1, inherit_superset)
+{
+ const struct rule rules[] = {
+ {
+ .path = dir_s1d3,
+ .access = ACCESS_RO,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
+
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+
+ /* Readdir access is denied for dir_s1d2. */
+ ASSERT_EQ(EACCES, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
+ /* Readdir access is allowed for dir_s1d3. */
+ ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY));
+ /* File access is allowed for file1_s1d3. */
+ ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
+
+ /* Now dir_s1d2, parent of dir_s1d3, gets a new rule tied to it. */
+ add_path_beneath(_metadata, ruleset_fd, LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_READ_DIR, dir_s1d2);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Readdir access is still denied for dir_s1d2. */
+ ASSERT_EQ(EACCES, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
+ /* Readdir access is still allowed for dir_s1d3. */
+ ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY));
+ /* File access is still allowed for file1_s1d3. */
+ ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
+}
+
+TEST_F_FORK(layout1, max_layers)
+{
+ int i, err;
+ const struct rule rules[] = {
+ {
+ .path = dir_s1d2,
+ .access = ACCESS_RO,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
+
+ ASSERT_LE(0, ruleset_fd);
+ for (i = 0; i < 64; i++)
+ enforce_ruleset(_metadata, ruleset_fd);
+
+ for (i = 0; i < 2; i++) {
+ err = landlock_restrict_self(ruleset_fd, 0);
+ ASSERT_EQ(-1, err);
+ ASSERT_EQ(E2BIG, errno);
+ }
+ ASSERT_EQ(0, close(ruleset_fd));
+}
+
+TEST_F_FORK(layout1, empty_or_same_ruleset)
+{
+ struct landlock_ruleset_attr ruleset_attr = {};
+ int ruleset_fd;
+
+ /* Tests empty handled_access_fs. */
+ ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+ ASSERT_LE(-1, ruleset_fd);
+ ASSERT_EQ(ENOMSG, errno);
+
+ /* Enforces policy which deny read access to all files. */
+ ruleset_attr.handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE;
+ ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY));
+ ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY));
+
+ /* Nests a policy which deny read access to all directories. */
+ ruleset_attr.handled_access_fs = LANDLOCK_ACCESS_FS_READ_DIR;
+ ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY));
+
+ /* Enforces a second time with the same ruleset. */
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+}
+
+TEST_F_FORK(layout1, rule_on_mountpoint)
+{
+ const struct rule rules[] = {
+ {
+ .path = dir_s1d1,
+ .access = ACCESS_RO,
+ },
+ {
+ /* dir_s3d2 is a mount point. */
+ .path = dir_s3d2,
+ .access = ACCESS_RO,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
+
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY));
+
+ ASSERT_EQ(EACCES, test_open(dir_s2d1, O_RDONLY));
+
+ ASSERT_EQ(EACCES, test_open(dir_s3d1, O_RDONLY));
+ ASSERT_EQ(0, test_open(dir_s3d2, O_RDONLY));
+ ASSERT_EQ(0, test_open(dir_s3d3, O_RDONLY));
+}
+
+TEST_F_FORK(layout1, rule_over_mountpoint)
+{
+ const struct rule rules[] = {
+ {
+ .path = dir_s1d1,
+ .access = ACCESS_RO,
+ },
+ {
+ /* dir_s3d2 is a mount point. */
+ .path = dir_s3d1,
+ .access = ACCESS_RO,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
+
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY));
+
+ ASSERT_EQ(EACCES, test_open(dir_s2d1, O_RDONLY));
+
+ ASSERT_EQ(0, test_open(dir_s3d1, O_RDONLY));
+ ASSERT_EQ(0, test_open(dir_s3d2, O_RDONLY));
+ ASSERT_EQ(0, test_open(dir_s3d3, O_RDONLY));
+}
+
+/*
+ * This test verifies that we can apply a landlock rule on the root directory
+ * (which might require special handling).
+ */
+TEST_F_FORK(layout1, rule_over_root_allow_then_deny)
+{
+ struct rule rules[] = {
+ {
+ .path = "/",
+ .access = ACCESS_RO,
+ },
+ {}
+ };
+ int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
+
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks allowed access. */
+ ASSERT_EQ(0, test_open("/", O_RDONLY));
+ ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY));
+
+ rules[0].access = LANDLOCK_ACCESS_FS_READ_FILE;
+ ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks denied access (on a directory). */
+ ASSERT_EQ(EACCES, test_open("/", O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY));
+}
+
+TEST_F_FORK(layout1, rule_over_root_deny)
+{
+ const struct rule rules[] = {
+ {
+ .path = "/",
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
+
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks denied access (on a directory). */
+ ASSERT_EQ(EACCES, test_open("/", O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY));
+}
+
+TEST_F_FORK(layout1, rule_inside_mount_ns)
+{
+ const struct rule rules[] = {
+ {
+ .path = "s3d3",
+ .access = ACCESS_RO,
+ },
+ {}
+ };
+ int ruleset_fd;
+
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ ASSERT_EQ(0, syscall(SYS_pivot_root, dir_s3d2, dir_s3d3)) {
+ TH_LOG("Failed to pivot root: %s", strerror(errno));
+ };
+ ASSERT_EQ(0, chdir("/"));
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+
+ ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ ASSERT_EQ(0, test_open("s3d3", O_RDONLY));
+ ASSERT_EQ(EACCES, test_open("/", O_RDONLY));
+}
+
+TEST_F_FORK(layout1, mount_and_pivot)
+{
+ const struct rule rules[] = {
+ {
+ .path = dir_s3d2,
+ .access = ACCESS_RO,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
+
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ ASSERT_EQ(-1, mount(NULL, dir_s3d2, NULL, MS_RDONLY, NULL));
+ ASSERT_EQ(EPERM, errno);
+ ASSERT_EQ(-1, syscall(SYS_pivot_root, dir_s3d2, dir_s3d3));
+ ASSERT_EQ(EPERM, errno);
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+}
+
+TEST_F_FORK(layout1, move_mount)
+{
+ const struct rule rules[] = {
+ {
+ .path = dir_s3d2,
+ .access = ACCESS_RO,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
+
+ ASSERT_LE(0, ruleset_fd);
+
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ ASSERT_EQ(0, syscall(SYS_move_mount, AT_FDCWD, dir_s3d2, AT_FDCWD,
+ dir_s1d2, 0)) {
+ TH_LOG("Failed to move mount: %s", strerror(errno));
+ }
+
+ ASSERT_EQ(0, syscall(SYS_move_mount, AT_FDCWD, dir_s1d2, AT_FDCWD,
+ dir_s3d2, 0));
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ ASSERT_EQ(-1, syscall(SYS_move_mount, AT_FDCWD, dir_s3d2, AT_FDCWD,
+ dir_s1d2, 0));
+ ASSERT_EQ(EPERM, errno);
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+}
+
+TEST_F_FORK(layout1, release_inodes)
+{
+ const struct rule rules[] = {
+ {
+ .path = dir_s1d1,
+ .access = ACCESS_RO,
+ },
+ {
+ .path = dir_s3d2,
+ .access = ACCESS_RO,
+ },
+ {
+ .path = dir_s3d3,
+ .access = ACCESS_RO,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
+
+ ASSERT_LE(0, ruleset_fd);
+ /* Unmount a file hierarchy while it is being used by a ruleset. */
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ ASSERT_EQ(0, umount(dir_s3d2));
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ ASSERT_EQ(0, test_open(file1_s1d1, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(dir_s3d2, O_RDONLY));
+ /* This dir_s3d3 would not be allowed and does not exist anyway. */
+ ASSERT_EQ(ENOENT, test_open(dir_s3d3, O_RDONLY));
+}
+
+enum relative_access {
+ REL_OPEN,
+ REL_CHDIR,
+ REL_CHROOT_ONLY,
+ REL_CHROOT_CHDIR,
+};
+
+static void test_relative_path(struct __test_metadata *const _metadata,
+ const enum relative_access rel)
+{
+ /*
+ * Common layer to check that chroot doesn't ignore it (i.e. a chroot
+ * is not a disconnected root directory).
+ */
+ const struct rule layer1_base[] = {
+ {
+ .path = TMP_DIR,
+ .access = ACCESS_RO,
+ },
+ {}
+ };
+ const struct rule layer2_subs[] = {
+ {
+ .path = dir_s1d2,
+ .access = ACCESS_RO,
+ },
+ {
+ .path = dir_s2d2,
+ .access = ACCESS_RO,
+ },
+ {}
+ };
+ int dirfd, ruleset_fd;
+
+ ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer1_base);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer2_subs);
+
+ ASSERT_LE(0, ruleset_fd);
+ switch (rel) {
+ case REL_OPEN:
+ case REL_CHDIR:
+ break;
+ case REL_CHROOT_ONLY:
+ ASSERT_EQ(0, chdir(dir_s2d2));
+ break;
+ case REL_CHROOT_CHDIR:
+ ASSERT_EQ(0, chdir(dir_s1d2));
+ break;
+ default:
+ ASSERT_TRUE(false);
+ return;
+ }
+
+ set_cap(_metadata, CAP_SYS_CHROOT);
+ enforce_ruleset(_metadata, ruleset_fd);
+
+ switch (rel) {
+ case REL_OPEN:
+ dirfd = open(dir_s1d2, O_DIRECTORY);
+ ASSERT_LE(0, dirfd);
+ break;
+ case REL_CHDIR:
+ ASSERT_EQ(0, chdir(dir_s1d2));
+ dirfd = AT_FDCWD;
+ break;
+ case REL_CHROOT_ONLY:
+ /* Do chroot into dir_s1d2 (relative to dir_s2d2). */
+ ASSERT_EQ(0, chroot("../../s1d1/s1d2")) {
+ TH_LOG("Failed to chroot: %s", strerror(errno));
+ }
+ dirfd = AT_FDCWD;
+ break;
+ case REL_CHROOT_CHDIR:
+ /* Do chroot into dir_s1d2. */
+ ASSERT_EQ(0, chroot(".")) {
+ TH_LOG("Failed to chroot: %s", strerror(errno));
+ }
+ dirfd = AT_FDCWD;
+ break;
+ }
+
+ ASSERT_EQ((rel == REL_CHROOT_CHDIR) ? 0 : EACCES,
+ test_open_rel(dirfd, "..", O_RDONLY));
+ ASSERT_EQ(0, test_open_rel(dirfd, ".", O_RDONLY));
+
+ if (rel == REL_CHROOT_ONLY) {
+ /* The current directory is dir_s2d2. */
+ ASSERT_EQ(0, test_open_rel(dirfd, "./s2d3", O_RDONLY));
+ } else {
+ /* The current directory is dir_s1d2. */
+ ASSERT_EQ(0, test_open_rel(dirfd, "./s1d3", O_RDONLY));
+ }
+
+ if (rel == REL_CHROOT_ONLY || rel == REL_CHROOT_CHDIR) {
+ /* Checks the root dir_s1d2. */
+ ASSERT_EQ(0, test_open_rel(dirfd, "/..", O_RDONLY));
+ ASSERT_EQ(0, test_open_rel(dirfd, "/", O_RDONLY));
+ ASSERT_EQ(0, test_open_rel(dirfd, "/f1", O_RDONLY));
+ ASSERT_EQ(0, test_open_rel(dirfd, "/s1d3", O_RDONLY));
+ }
+
+ if (rel != REL_CHROOT_CHDIR) {
+ ASSERT_EQ(EACCES, test_open_rel(dirfd, "../../s1d1", O_RDONLY));
+ ASSERT_EQ(0, test_open_rel(dirfd, "../../s1d1/s1d2", O_RDONLY));
+ ASSERT_EQ(0, test_open_rel(dirfd, "../../s1d1/s1d2/s1d3", O_RDONLY));
+
+ ASSERT_EQ(EACCES, test_open_rel(dirfd, "../../s2d1", O_RDONLY));
+ ASSERT_EQ(0, test_open_rel(dirfd, "../../s2d1/s2d2", O_RDONLY));
+ ASSERT_EQ(0, test_open_rel(dirfd, "../../s2d1/s2d2/s2d3", O_RDONLY));
+ }
+
+ if (rel == REL_OPEN)
+ ASSERT_EQ(0, close(dirfd));
+ ASSERT_EQ(0, close(ruleset_fd));
+}
+
+TEST_F_FORK(layout1, relative_open)
+{
+ test_relative_path(_metadata, REL_OPEN);
+}
+
+TEST_F_FORK(layout1, relative_chdir)
+{
+ test_relative_path(_metadata, REL_CHDIR);
+}
+
+TEST_F_FORK(layout1, relative_chroot_only)
+{
+ test_relative_path(_metadata, REL_CHROOT_ONLY);
+}
+
+TEST_F_FORK(layout1, relative_chroot_chdir)
+{
+ test_relative_path(_metadata, REL_CHROOT_CHDIR);
+}
+
+static void copy_binary(struct __test_metadata *const _metadata,
+ const char *const dst_path)
+{
+ int dst_fd, src_fd;
+ struct stat statbuf;
+
+ dst_fd = open(dst_path, O_WRONLY | O_TRUNC | O_CLOEXEC);
+ ASSERT_LE(0, dst_fd) {
+ TH_LOG("Failed to open \"%s\": %s", dst_path,
+ strerror(errno));
+ }
+ src_fd = open(BINARY_PATH, O_RDONLY | O_CLOEXEC);
+ ASSERT_LE(0, src_fd) {
+ TH_LOG("Failed to open \"" BINARY_PATH "\": %s",
+ strerror(errno));
+ }
+ ASSERT_EQ(0, fstat(src_fd, &statbuf));
+ ASSERT_EQ(statbuf.st_size, sendfile(dst_fd, src_fd, 0,
+ statbuf.st_size));
+ ASSERT_EQ(0, close(src_fd));
+ ASSERT_EQ(0, close(dst_fd));
+}
+
+static void test_execute(struct __test_metadata *const _metadata,
+ const int err, const char *const path)
+{
+ int status;
+ char *const argv[] = {(char *)path, NULL};
+ const pid_t child = fork();
+
+ ASSERT_LE(0, child);
+ if (child == 0) {
+ ASSERT_EQ(err ? -1 : 0, execve(path, argv, NULL)) {
+ TH_LOG("Failed to execute \"%s\": %s", path,
+ strerror(errno));
+ };
+ ASSERT_EQ(err, errno);
+ _exit(_metadata->passed ? 2 : 1);
+ return;
+ }
+ ASSERT_EQ(child, waitpid(child, &status, 0));
+ ASSERT_EQ(1, WIFEXITED(status));
+ ASSERT_EQ(err ? 2 : 0, WEXITSTATUS(status)) {
+ TH_LOG("Unexpected return code for \"%s\": %s", path,
+ strerror(errno));
+ };
+}
+
+TEST_F_FORK(layout1, execute)
+{
+ const struct rule rules[] = {
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_EXECUTE,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, rules[0].access,
+ rules);
+
+ ASSERT_LE(0, ruleset_fd);
+ copy_binary(_metadata, file1_s1d1);
+ copy_binary(_metadata, file1_s1d2);
+ copy_binary(_metadata, file1_s1d3);
+
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY));
+ ASSERT_EQ(0, test_open(file1_s1d1, O_RDONLY));
+ test_execute(_metadata, EACCES, file1_s1d1);
+
+ ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY));
+ ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
+ test_execute(_metadata, 0, file1_s1d2);
+
+ ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY));
+ ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
+ test_execute(_metadata, 0, file1_s1d3);
+}
+
+TEST_F_FORK(layout1, link)
+{
+ const struct rule rules[] = {
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_MAKE_REG,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, rules[0].access,
+ rules);
+
+ ASSERT_LE(0, ruleset_fd);
+
+ ASSERT_EQ(0, unlink(file1_s1d1));
+ ASSERT_EQ(0, unlink(file1_s1d2));
+ ASSERT_EQ(0, unlink(file1_s1d3));
+
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ ASSERT_EQ(-1, link(file2_s1d1, file1_s1d1));
+ ASSERT_EQ(EACCES, errno);
+ /* Denies linking because of reparenting. */
+ ASSERT_EQ(-1, link(file1_s2d1, file1_s1d2));
+ ASSERT_EQ(EXDEV, errno);
+ ASSERT_EQ(-1, link(file2_s1d2, file1_s1d3));
+ ASSERT_EQ(EXDEV, errno);
+
+ ASSERT_EQ(0, link(file2_s1d2, file1_s1d2));
+ ASSERT_EQ(0, link(file2_s1d3, file1_s1d3));
+}
+
+TEST_F_FORK(layout1, rename_file)
+{
+ const struct rule rules[] = {
+ {
+ .path = dir_s1d3,
+ .access = LANDLOCK_ACCESS_FS_REMOVE_FILE,
+ },
+ {
+ .path = dir_s2d2,
+ .access = LANDLOCK_ACCESS_FS_REMOVE_FILE,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, rules[0].access,
+ rules);
+
+ ASSERT_LE(0, ruleset_fd);
+
+ ASSERT_EQ(0, unlink(file1_s1d1));
+ ASSERT_EQ(0, unlink(file1_s1d2));
+
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /*
+ * Tries to replace a file, from a directory that allows file removal,
+ * but to a different directory (which also allows file removal).
+ */
+ ASSERT_EQ(-1, rename(file1_s2d3, file1_s1d3));
+ ASSERT_EQ(EXDEV, errno);
+ ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d3, AT_FDCWD, file1_s1d3,
+ RENAME_EXCHANGE));
+ ASSERT_EQ(EXDEV, errno);
+ ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d3, AT_FDCWD, dir_s1d3,
+ RENAME_EXCHANGE));
+ ASSERT_EQ(EXDEV, errno);
+
+ /*
+ * Tries to replace a file, from a directory that denies file removal,
+ * to a different directory (which allows file removal).
+ */
+ ASSERT_EQ(-1, rename(file1_s2d1, file1_s1d3));
+ ASSERT_EQ(EXDEV, errno);
+ ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d1, AT_FDCWD, file1_s1d3,
+ RENAME_EXCHANGE));
+ ASSERT_EQ(EXDEV, errno);
+ ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s2d2, AT_FDCWD, file1_s1d3,
+ RENAME_EXCHANGE));
+ ASSERT_EQ(EXDEV, errno);
+
+ /* Exchanges files and directories that partially allow removal. */
+ ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s2d2, AT_FDCWD, file1_s2d1,
+ RENAME_EXCHANGE));
+ ASSERT_EQ(EACCES, errno);
+ ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d1, AT_FDCWD, dir_s2d2,
+ RENAME_EXCHANGE));
+ ASSERT_EQ(EACCES, errno);
+
+ /* Renames files with different parents. */
+ ASSERT_EQ(-1, rename(file1_s2d2, file1_s1d2));
+ ASSERT_EQ(EXDEV, errno);
+ ASSERT_EQ(0, unlink(file1_s1d3));
+ ASSERT_EQ(-1, rename(file1_s2d1, file1_s1d3));
+ ASSERT_EQ(EXDEV, errno);
+
+ /* Exchanges and renames files with same parent. */
+ ASSERT_EQ(0, renameat2(AT_FDCWD, file2_s2d3, AT_FDCWD, file1_s2d3,
+ RENAME_EXCHANGE));
+ ASSERT_EQ(0, rename(file2_s2d3, file1_s2d3));
+
+ /* Exchanges files and directories with same parent, twice. */
+ ASSERT_EQ(0, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_s2d3,
+ RENAME_EXCHANGE));
+ ASSERT_EQ(0, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_s2d3,
+ RENAME_EXCHANGE));
+}
+
+TEST_F_FORK(layout1, rename_dir)
+{
+ const struct rule rules[] = {
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_REMOVE_DIR,
+ },
+ {
+ .path = dir_s2d1,
+ .access = LANDLOCK_ACCESS_FS_REMOVE_DIR,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, rules[0].access,
+ rules);
+
+ ASSERT_LE(0, ruleset_fd);
+
+ /* Empties dir_s1d3 to allow renaming. */
+ ASSERT_EQ(0, unlink(file1_s1d3));
+ ASSERT_EQ(0, unlink(file2_s1d3));
+
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Exchanges and renames directory to a different parent. */
+ ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s2d3, AT_FDCWD, dir_s1d3,
+ RENAME_EXCHANGE));
+ ASSERT_EQ(EXDEV, errno);
+ ASSERT_EQ(-1, rename(dir_s2d3, dir_s1d3));
+ ASSERT_EQ(EXDEV, errno);
+ ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_s1d3,
+ RENAME_EXCHANGE));
+ ASSERT_EQ(EXDEV, errno);
+
+ /*
+ * Exchanges directory to the same parent, which doesn't allow
+ * directory removal.
+ */
+ ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s1d1, AT_FDCWD, dir_s2d1,
+ RENAME_EXCHANGE));
+ ASSERT_EQ(EACCES, errno);
+ ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s1d1, AT_FDCWD, dir_s1d2,
+ RENAME_EXCHANGE));
+ ASSERT_EQ(EACCES, errno);
+
+ /*
+ * Exchanges and renames directory to the same parent, which allows
+ * directory removal.
+ */
+ ASSERT_EQ(0, renameat2(AT_FDCWD, dir_s1d3, AT_FDCWD, file1_s1d2,
+ RENAME_EXCHANGE));
+ ASSERT_EQ(0, unlink(dir_s1d3));
+ ASSERT_EQ(0, mkdir(dir_s1d3, 0700));
+ ASSERT_EQ(0, rename(file1_s1d2, dir_s1d3));
+ ASSERT_EQ(0, rmdir(dir_s1d3));
+}
+
+TEST_F_FORK(layout1, remove_dir)
+{
+ const struct rule rules[] = {
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_REMOVE_DIR,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, rules[0].access,
+ rules);
+
+ ASSERT_LE(0, ruleset_fd);
+
+ ASSERT_EQ(0, unlink(file1_s1d1));
+ ASSERT_EQ(0, unlink(file1_s1d2));
+ ASSERT_EQ(0, unlink(file1_s1d3));
+ ASSERT_EQ(0, unlink(file2_s1d3));
+
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ ASSERT_EQ(0, rmdir(dir_s1d3));
+ ASSERT_EQ(0, mkdir(dir_s1d3, 0700));
+ ASSERT_EQ(0, unlinkat(AT_FDCWD, dir_s1d3, AT_REMOVEDIR));
+
+ /* dir_s1d2 itself cannot be removed. */
+ ASSERT_EQ(-1, rmdir(dir_s1d2));
+ ASSERT_EQ(EACCES, errno);
+ ASSERT_EQ(-1, unlinkat(AT_FDCWD, dir_s1d2, AT_REMOVEDIR));
+ ASSERT_EQ(EACCES, errno);
+ ASSERT_EQ(-1, rmdir(dir_s1d1));
+ ASSERT_EQ(EACCES, errno);
+ ASSERT_EQ(-1, unlinkat(AT_FDCWD, dir_s1d1, AT_REMOVEDIR));
+ ASSERT_EQ(EACCES, errno);
+}
+
+TEST_F_FORK(layout1, remove_file)
+{
+ const struct rule rules[] = {
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_REMOVE_FILE,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, rules[0].access,
+ rules);
+
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ ASSERT_EQ(-1, unlink(file1_s1d1));
+ ASSERT_EQ(EACCES, errno);
+ ASSERT_EQ(-1, unlinkat(AT_FDCWD, file1_s1d1, 0));
+ ASSERT_EQ(EACCES, errno);
+ ASSERT_EQ(0, unlink(file1_s1d2));
+ ASSERT_EQ(0, unlinkat(AT_FDCWD, file1_s1d3, 0));
+}
+
+static void test_make_file(struct __test_metadata *const _metadata,
+ const __u64 access, const mode_t mode, const dev_t dev)
+{
+ const struct rule rules[] = {
+ {
+ .path = dir_s1d2,
+ .access = access,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, access, rules);
+
+ ASSERT_LE(0, ruleset_fd);
+
+ ASSERT_EQ(0, unlink(file1_s1d1));
+ ASSERT_EQ(0, unlink(file2_s1d1));
+ ASSERT_EQ(0, mknod(file2_s1d1, mode | 0400, dev)) {
+ TH_LOG("Failed to make file \"%s\": %s",
+ file2_s1d1, strerror(errno));
+ };
+
+ ASSERT_EQ(0, unlink(file1_s1d2));
+ ASSERT_EQ(0, unlink(file2_s1d2));
+
+ ASSERT_EQ(0, unlink(file1_s1d3));
+ ASSERT_EQ(0, unlink(file2_s1d3));
+
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ ASSERT_EQ(-1, mknod(file1_s1d1, mode | 0400, dev));
+ ASSERT_EQ(EACCES, errno);
+ ASSERT_EQ(-1, link(file2_s1d1, file1_s1d1));
+ ASSERT_EQ(EACCES, errno);
+ ASSERT_EQ(-1, rename(file2_s1d1, file1_s1d1));
+ ASSERT_EQ(EACCES, errno);
+
+ ASSERT_EQ(0, mknod(file1_s1d2, mode | 0400, dev)) {
+ TH_LOG("Failed to make file \"%s\": %s",
+ file1_s1d2, strerror(errno));
+ };
+ ASSERT_EQ(0, link(file1_s1d2, file2_s1d2));
+ ASSERT_EQ(0, unlink(file2_s1d2));
+ ASSERT_EQ(0, rename(file1_s1d2, file2_s1d2));
+
+ ASSERT_EQ(0, mknod(file1_s1d3, mode | 0400, dev));
+ ASSERT_EQ(0, link(file1_s1d3, file2_s1d3));
+ ASSERT_EQ(0, unlink(file2_s1d3));
+ ASSERT_EQ(0, rename(file1_s1d3, file2_s1d3));
+}
+
+TEST_F_FORK(layout1, make_char)
+{
+ /* Creates a /dev/null device. */
+ set_cap(_metadata, CAP_MKNOD);
+ test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_CHAR, S_IFCHR,
+ makedev(1, 3));
+}
+
+TEST_F_FORK(layout1, make_block)
+{
+ /* Creates a /dev/loop0 device. */
+ set_cap(_metadata, CAP_MKNOD);
+ test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_BLOCK, S_IFBLK,
+ makedev(7, 0));
+}
+
+TEST_F_FORK(layout1, make_reg_1)
+{
+ test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_REG, S_IFREG, 0);
+}
+
+TEST_F_FORK(layout1, make_reg_2)
+{
+ test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_REG, 0, 0);
+}
+
+TEST_F_FORK(layout1, make_sock)
+{
+ test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_SOCK, S_IFSOCK, 0);
+}
+
+TEST_F_FORK(layout1, make_fifo)
+{
+ test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_FIFO, S_IFIFO, 0);
+}
+
+TEST_F_FORK(layout1, make_sym)
+{
+ const struct rule rules[] = {
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_MAKE_SYM,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, rules[0].access,
+ rules);
+
+ ASSERT_LE(0, ruleset_fd);
+
+ ASSERT_EQ(0, unlink(file1_s1d1));
+ ASSERT_EQ(0, unlink(file2_s1d1));
+ ASSERT_EQ(0, symlink("none", file2_s1d1));
+
+ ASSERT_EQ(0, unlink(file1_s1d2));
+ ASSERT_EQ(0, unlink(file2_s1d2));
+
+ ASSERT_EQ(0, unlink(file1_s1d3));
+ ASSERT_EQ(0, unlink(file2_s1d3));
+
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ ASSERT_EQ(-1, symlink("none", file1_s1d1));
+ ASSERT_EQ(EACCES, errno);
+ ASSERT_EQ(-1, link(file2_s1d1, file1_s1d1));
+ ASSERT_EQ(EACCES, errno);
+ ASSERT_EQ(-1, rename(file2_s1d1, file1_s1d1));
+ ASSERT_EQ(EACCES, errno);
+
+ ASSERT_EQ(0, symlink("none", file1_s1d2));
+ ASSERT_EQ(0, link(file1_s1d2, file2_s1d2));
+ ASSERT_EQ(0, unlink(file2_s1d2));
+ ASSERT_EQ(0, rename(file1_s1d2, file2_s1d2));
+
+ ASSERT_EQ(0, symlink("none", file1_s1d3));
+ ASSERT_EQ(0, link(file1_s1d3, file2_s1d3));
+ ASSERT_EQ(0, unlink(file2_s1d3));
+ ASSERT_EQ(0, rename(file1_s1d3, file2_s1d3));
+}
+
+TEST_F_FORK(layout1, make_dir)
+{
+ const struct rule rules[] = {
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_MAKE_DIR,
+ },
+ {}
+ };
+ const int ruleset_fd = create_ruleset(_metadata, rules[0].access,
+ rules);
+
+ ASSERT_LE(0, ruleset_fd);
+
+ ASSERT_EQ(0, unlink(file1_s1d1));
+ ASSERT_EQ(0, unlink(file1_s1d2));
+ ASSERT_EQ(0, unlink(file1_s1d3));
+
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Uses file_* as directory names. */
+ ASSERT_EQ(-1, mkdir(file1_s1d1, 0700));
+ ASSERT_EQ(EACCES, errno);
+ ASSERT_EQ(0, mkdir(file1_s1d2, 0700));
+ ASSERT_EQ(0, mkdir(file1_s1d3, 0700));
+}
+
+static int open_proc_fd(struct __test_metadata *const _metadata, const int fd,
+ const int open_flags)
+{
+ static const char path_template[] = "/proc/self/fd/%d";
+ char procfd_path[sizeof(path_template) + 10];
+ const int procfd_path_size = snprintf(procfd_path, sizeof(procfd_path),
+ path_template, fd);
+
+ ASSERT_LT(procfd_path_size, sizeof(procfd_path));
+ return open(procfd_path, open_flags);
+}
+
+TEST_F_FORK(layout1, proc_unlinked_file)
+{
+ const struct rule rules[] = {
+ {
+ .path = file1_s1d2,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {}
+ };
+ int reg_fd, proc_fd;
+ const int ruleset_fd = create_ruleset(_metadata,
+ LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_WRITE_FILE, rules);
+
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDWR));
+ ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
+ reg_fd = open(file1_s1d2, O_RDONLY | O_CLOEXEC);
+ ASSERT_LE(0, reg_fd);
+ ASSERT_EQ(0, unlink(file1_s1d2));
+
+ proc_fd = open_proc_fd(_metadata, reg_fd, O_RDONLY | O_CLOEXEC);
+ ASSERT_LE(0, proc_fd);
+ ASSERT_EQ(0, close(proc_fd));
+
+ proc_fd = open_proc_fd(_metadata, reg_fd, O_RDWR | O_CLOEXEC);
+ ASSERT_EQ(-1, proc_fd) {
+ TH_LOG("Successfully opened /proc/self/fd/%d: %s",
+ reg_fd, strerror(errno));
+ }
+ ASSERT_EQ(EACCES, errno);
+
+ ASSERT_EQ(0, close(reg_fd));
+}
+
+TEST_F_FORK(layout1, proc_pipe)
+{
+ int proc_fd;
+ int pipe_fds[2];
+ char buf = '\0';
+ const struct rule rules[] = {
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_WRITE_FILE,
+ },
+ {}
+ };
+ /* Limits read and write access to files tied to the filesystem. */
+ const int ruleset_fd = create_ruleset(_metadata, rules[0].access,
+ rules);
+
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks enforcement for normal files. */
+ ASSERT_EQ(0, test_open(file1_s1d2, O_RDWR));
+ ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR));
+
+ /* Checks access to pipes through FD. */
+ ASSERT_EQ(0, pipe2(pipe_fds, O_CLOEXEC));
+ ASSERT_EQ(1, write(pipe_fds[1], ".", 1)) {
+ TH_LOG("Failed to write in pipe: %s", strerror(errno));
+ }
+ ASSERT_EQ(1, read(pipe_fds[0], &buf, 1));
+ ASSERT_EQ('.', buf);
+
+ /* Checks write access to pipe through /proc/self/fd . */
+ proc_fd = open_proc_fd(_metadata, pipe_fds[1], O_WRONLY | O_CLOEXEC);
+ ASSERT_LE(0, proc_fd);
+ ASSERT_EQ(1, write(proc_fd, ".", 1)) {
+ TH_LOG("Failed to write through /proc/self/fd/%d: %s",
+ pipe_fds[1], strerror(errno));
+ }
+ ASSERT_EQ(0, close(proc_fd));
+
+ /* Checks read access to pipe through /proc/self/fd . */
+ proc_fd = open_proc_fd(_metadata, pipe_fds[0], O_RDONLY | O_CLOEXEC);
+ ASSERT_LE(0, proc_fd);
+ buf = '\0';
+ ASSERT_EQ(1, read(proc_fd, &buf, 1)) {
+ TH_LOG("Failed to read through /proc/self/fd/%d: %s",
+ pipe_fds[1], strerror(errno));
+ }
+ ASSERT_EQ(0, close(proc_fd));
+
+ ASSERT_EQ(0, close(pipe_fds[0]));
+ ASSERT_EQ(0, close(pipe_fds[1]));
+}
+
+FIXTURE(layout1_bind) {
+};
+
+FIXTURE_SETUP(layout1_bind)
+{
+ prepare_layout(_metadata);
+
+ create_layout1(_metadata);
+
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ ASSERT_EQ(0, mount(dir_s1d2, dir_s2d2, NULL, MS_BIND, NULL));
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+}
+
+FIXTURE_TEARDOWN(layout1_bind)
+{
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ EXPECT_EQ(0, umount(dir_s2d2));
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+
+ remove_layout1(_metadata);
+
+ cleanup_layout(_metadata);
+}
+
+static const char bind_dir_s1d3[] = TMP_DIR "/s2d1/s2d2/s1d3";
+static const char bind_file1_s1d3[] = TMP_DIR "/s2d1/s2d2/s1d3/f1";
+
+/*
+ * layout1_bind hierarchy:
+ *
+ * tmp
+ * ├── s1d1
+ * │   ├── f1
+ * │   ├── f2
+ * │   └── s1d2
+ * │   ├── f1
+ * │   ├── f2
+ * │   └── s1d3
+ * │   ├── f1
+ * │   └── f2
+ * ├── s2d1
+ * │   ├── f1
+ * │   └── s2d2
+ * │   ├── f1
+ * │   ├── f2
+ * │   └── s1d3
+ * │   ├── f1
+ * │   └── f2
+ * └── s3d1
+ * └── s3d2
+ * └── s3d3
+ */
+
+TEST_F_FORK(layout1_bind, no_restriction)
+{
+ ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY));
+ ASSERT_EQ(0, test_open(file1_s1d1, O_RDONLY));
+ ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY));
+ ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
+ ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY));
+ ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
+
+ ASSERT_EQ(0, test_open(dir_s2d1, O_RDONLY));
+ ASSERT_EQ(0, test_open(file1_s2d1, O_RDONLY));
+ ASSERT_EQ(0, test_open(dir_s2d2, O_RDONLY));
+ ASSERT_EQ(0, test_open(file1_s2d2, O_RDONLY));
+ ASSERT_EQ(ENOENT, test_open(dir_s2d3, O_RDONLY));
+ ASSERT_EQ(ENOENT, test_open(file1_s2d3, O_RDONLY));
+
+ ASSERT_EQ(0, test_open(bind_dir_s1d3, O_RDONLY));
+ ASSERT_EQ(0, test_open(bind_file1_s1d3, O_RDONLY));
+
+ ASSERT_EQ(0, test_open(dir_s3d1, O_RDONLY));
+}
+
+TEST_F_FORK(layout1_bind, same_content_same_file)
+{
+ /*
+ * Sets access right on parent directories of both source and
+ * destination mount points.
+ */
+ const struct rule layer1_parent[] = {
+ {
+ .path = dir_s1d1,
+ .access = ACCESS_RO,
+ },
+ {
+ .path = dir_s2d1,
+ .access = ACCESS_RW,
+ },
+ {}
+ };
+ /*
+ * Sets access rights on the same bind-mounted directories. The result
+ * should be ACCESS_RW for both directories, but not both hierarchies
+ * because of the first layer.
+ */
+ const struct rule layer2_mount_point[] = {
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {
+ .path = dir_s2d2,
+ .access = ACCESS_RW,
+ },
+ {}
+ };
+ /* Only allow read-access to the s1d3 hierarchies. */
+ const struct rule layer3_source[] = {
+ {
+ .path = dir_s1d3,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {}
+ };
+ /* Removes all access rights. */
+ const struct rule layer4_destination[] = {
+ {
+ .path = bind_file1_s1d3,
+ .access = LANDLOCK_ACCESS_FS_WRITE_FILE,
+ },
+ {}
+ };
+ int ruleset_fd;
+
+ /* Sets rules for the parent directories. */
+ ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer1_parent);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks source hierarchy. */
+ ASSERT_EQ(0, test_open(file1_s1d1, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
+ ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
+
+ ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
+ ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
+
+ /* Checks destination hierarchy. */
+ ASSERT_EQ(0, test_open(file1_s2d1, O_RDWR));
+ ASSERT_EQ(0, test_open(dir_s2d1, O_RDONLY | O_DIRECTORY));
+
+ ASSERT_EQ(0, test_open(file1_s2d2, O_RDWR));
+ ASSERT_EQ(0, test_open(dir_s2d2, O_RDONLY | O_DIRECTORY));
+
+ /* Sets rules for the mount points. */
+ ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer2_mount_point);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks source hierarchy. */
+ ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
+ ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
+
+ ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
+ ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
+
+ /* Checks destination hierarchy. */
+ ASSERT_EQ(EACCES, test_open(file1_s2d1, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(file1_s2d1, O_WRONLY));
+ ASSERT_EQ(EACCES, test_open(dir_s2d1, O_RDONLY | O_DIRECTORY));
+
+ ASSERT_EQ(0, test_open(file1_s2d2, O_RDWR));
+ ASSERT_EQ(0, test_open(dir_s2d2, O_RDONLY | O_DIRECTORY));
+ ASSERT_EQ(0, test_open(bind_dir_s1d3, O_RDONLY | O_DIRECTORY));
+
+ /* Sets a (shared) rule only on the source. */
+ ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer3_source);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks source hierarchy. */
+ ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
+ ASSERT_EQ(EACCES, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
+
+ ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
+ ASSERT_EQ(EACCES, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY));
+
+ /* Checks destination hierarchy. */
+ ASSERT_EQ(EACCES, test_open(file1_s2d2, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(file1_s2d2, O_WRONLY));
+ ASSERT_EQ(EACCES, test_open(dir_s2d2, O_RDONLY | O_DIRECTORY));
+
+ ASSERT_EQ(0, test_open(bind_file1_s1d3, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(bind_file1_s1d3, O_WRONLY));
+ ASSERT_EQ(EACCES, test_open(bind_dir_s1d3, O_RDONLY | O_DIRECTORY));
+
+ /* Sets a (shared) rule only on the destination. */
+ ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer4_destination);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks source hierarchy. */
+ ASSERT_EQ(EACCES, test_open(file1_s1d3, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
+
+ /* Checks destination hierarchy. */
+ ASSERT_EQ(EACCES, test_open(bind_file1_s1d3, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(bind_file1_s1d3, O_WRONLY));
+}
+
+#define LOWER_BASE TMP_DIR "/lower"
+#define LOWER_DATA LOWER_BASE "/data"
+static const char lower_fl1[] = LOWER_DATA "/fl1";
+static const char lower_dl1[] = LOWER_DATA "/dl1";
+static const char lower_dl1_fl2[] = LOWER_DATA "/dl1/fl2";
+static const char lower_fo1[] = LOWER_DATA "/fo1";
+static const char lower_do1[] = LOWER_DATA "/do1";
+static const char lower_do1_fo2[] = LOWER_DATA "/do1/fo2";
+static const char lower_do1_fl3[] = LOWER_DATA "/do1/fl3";
+
+static const char (*lower_base_files[])[] = {
+ &lower_fl1,
+ &lower_fo1,
+ NULL
+};
+static const char (*lower_base_directories[])[] = {
+ &lower_dl1,
+ &lower_do1,
+ NULL
+};
+static const char (*lower_sub_files[])[] = {
+ &lower_dl1_fl2,
+ &lower_do1_fo2,
+ &lower_do1_fl3,
+ NULL
+};
+
+#define UPPER_BASE TMP_DIR "/upper"
+#define UPPER_DATA UPPER_BASE "/data"
+#define UPPER_WORK UPPER_BASE "/work"
+static const char upper_fu1[] = UPPER_DATA "/fu1";
+static const char upper_du1[] = UPPER_DATA "/du1";
+static const char upper_du1_fu2[] = UPPER_DATA "/du1/fu2";
+static const char upper_fo1[] = UPPER_DATA "/fo1";
+static const char upper_do1[] = UPPER_DATA "/do1";
+static const char upper_do1_fo2[] = UPPER_DATA "/do1/fo2";
+static const char upper_do1_fu3[] = UPPER_DATA "/do1/fu3";
+
+static const char (*upper_base_files[])[] = {
+ &upper_fu1,
+ &upper_fo1,
+ NULL
+};
+static const char (*upper_base_directories[])[] = {
+ &upper_du1,
+ &upper_do1,
+ NULL
+};
+static const char (*upper_sub_files[])[] = {
+ &upper_du1_fu2,
+ &upper_do1_fo2,
+ &upper_do1_fu3,
+ NULL
+};
+
+#define MERGE_BASE TMP_DIR "/merge"
+#define MERGE_DATA MERGE_BASE "/data"
+static const char merge_fl1[] = MERGE_DATA "/fl1";
+static const char merge_dl1[] = MERGE_DATA "/dl1";
+static const char merge_dl1_fl2[] = MERGE_DATA "/dl1/fl2";
+static const char merge_fu1[] = MERGE_DATA "/fu1";
+static const char merge_du1[] = MERGE_DATA "/du1";
+static const char merge_du1_fu2[] = MERGE_DATA "/du1/fu2";
+static const char merge_fo1[] = MERGE_DATA "/fo1";
+static const char merge_do1[] = MERGE_DATA "/do1";
+static const char merge_do1_fo2[] = MERGE_DATA "/do1/fo2";
+static const char merge_do1_fl3[] = MERGE_DATA "/do1/fl3";
+static const char merge_do1_fu3[] = MERGE_DATA "/do1/fu3";
+
+static const char (*merge_base_files[])[] = {
+ &merge_fl1,
+ &merge_fu1,
+ &merge_fo1,
+ NULL
+};
+static const char (*merge_base_directories[])[] = {
+ &merge_dl1,
+ &merge_du1,
+ &merge_do1,
+ NULL
+};
+static const char (*merge_sub_files[])[] = {
+ &merge_dl1_fl2,
+ &merge_du1_fu2,
+ &merge_do1_fo2,
+ &merge_do1_fl3,
+ &merge_do1_fu3,
+ NULL
+};
+
+/*
+ * layout2_overlay hierarchy:
+ *
+ * tmp
+ * ├── lower
+ * │   └── data
+ * │   ├── dl1
+ * │   │   └── fl2
+ * │   ├── do1
+ * │   │   ├── fl3
+ * │   │   └── fo2
+ * │   ├── fl1
+ * │   └── fo1
+ * ├── merge
+ * │   └── data
+ * │   ├── dl1
+ * │   │   └── fl2
+ * │   ├── do1
+ * │   │   ├── fl3
+ * │   │   ├── fo2
+ * │   │   └── fu3
+ * │   ├── du1
+ * │   │   └── fu2
+ * │   ├── fl1
+ * │   ├── fo1
+ * │   └── fu1
+ * └── upper
+ * ├── data
+ * │   ├── do1
+ * │   │   ├── fo2
+ * │   │   └── fu3
+ * │   ├── du1
+ * │   │   └── fu2
+ * │   ├── fo1
+ * │   └── fu1
+ * └── work
+ * └── work
+ */
+
+FIXTURE(layout2_overlay) {
+};
+
+FIXTURE_SETUP(layout2_overlay)
+{
+ prepare_layout(_metadata);
+
+ create_directory(_metadata, LOWER_BASE);
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ /* Creates tmpfs mount points to get deterministic overlayfs. */
+ ASSERT_EQ(0, mount("tmp", LOWER_BASE, "tmpfs", 0, "size=4m,mode=700"));
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+ create_file(_metadata, lower_fl1);
+ create_file(_metadata, lower_dl1_fl2);
+ create_file(_metadata, lower_fo1);
+ create_file(_metadata, lower_do1_fo2);
+ create_file(_metadata, lower_do1_fl3);
+
+ create_directory(_metadata, UPPER_BASE);
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ ASSERT_EQ(0, mount("tmp", UPPER_BASE, "tmpfs", 0, "size=4m,mode=700"));
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+ create_file(_metadata, upper_fu1);
+ create_file(_metadata, upper_du1_fu2);
+ create_file(_metadata, upper_fo1);
+ create_file(_metadata, upper_do1_fo2);
+ create_file(_metadata, upper_do1_fu3);
+ ASSERT_EQ(0, mkdir(UPPER_WORK, 0700));
+
+ create_directory(_metadata, MERGE_DATA);
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ set_cap(_metadata, CAP_DAC_OVERRIDE);
+ ASSERT_EQ(0, mount("overlay", MERGE_DATA, "overlay", 0,
+ "lowerdir=" LOWER_DATA
+ ",upperdir=" UPPER_DATA
+ ",workdir=" UPPER_WORK));
+ clear_cap(_metadata, CAP_DAC_OVERRIDE);
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+}
+
+FIXTURE_TEARDOWN(layout2_overlay)
+{
+ EXPECT_EQ(0, remove_path(lower_do1_fl3));
+ EXPECT_EQ(0, remove_path(lower_dl1_fl2));
+ EXPECT_EQ(0, remove_path(lower_fl1));
+ EXPECT_EQ(0, remove_path(lower_do1_fo2));
+ EXPECT_EQ(0, remove_path(lower_fo1));
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ EXPECT_EQ(0, umount(LOWER_BASE));
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+ EXPECT_EQ(0, remove_path(LOWER_BASE));
+
+ EXPECT_EQ(0, remove_path(upper_do1_fu3));
+ EXPECT_EQ(0, remove_path(upper_du1_fu2));
+ EXPECT_EQ(0, remove_path(upper_fu1));
+ EXPECT_EQ(0, remove_path(upper_do1_fo2));
+ EXPECT_EQ(0, remove_path(upper_fo1));
+ EXPECT_EQ(0, remove_path(UPPER_WORK "/work"));
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ EXPECT_EQ(0, umount(UPPER_BASE));
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+ EXPECT_EQ(0, remove_path(UPPER_BASE));
+
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ EXPECT_EQ(0, umount(MERGE_DATA));
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+ EXPECT_EQ(0, remove_path(MERGE_DATA));
+
+ cleanup_layout(_metadata);
+}
+
+TEST_F_FORK(layout2_overlay, no_restriction)
+{
+ ASSERT_EQ(0, test_open(lower_fl1, O_RDONLY));
+ ASSERT_EQ(0, test_open(lower_dl1, O_RDONLY));
+ ASSERT_EQ(0, test_open(lower_dl1_fl2, O_RDONLY));
+ ASSERT_EQ(0, test_open(lower_fo1, O_RDONLY));
+ ASSERT_EQ(0, test_open(lower_do1, O_RDONLY));
+ ASSERT_EQ(0, test_open(lower_do1_fo2, O_RDONLY));
+ ASSERT_EQ(0, test_open(lower_do1_fl3, O_RDONLY));
+
+ ASSERT_EQ(0, test_open(upper_fu1, O_RDONLY));
+ ASSERT_EQ(0, test_open(upper_du1, O_RDONLY));
+ ASSERT_EQ(0, test_open(upper_du1_fu2, O_RDONLY));
+ ASSERT_EQ(0, test_open(upper_fo1, O_RDONLY));
+ ASSERT_EQ(0, test_open(upper_do1, O_RDONLY));
+ ASSERT_EQ(0, test_open(upper_do1_fo2, O_RDONLY));
+ ASSERT_EQ(0, test_open(upper_do1_fu3, O_RDONLY));
+
+ ASSERT_EQ(0, test_open(merge_fl1, O_RDONLY));
+ ASSERT_EQ(0, test_open(merge_dl1, O_RDONLY));
+ ASSERT_EQ(0, test_open(merge_dl1_fl2, O_RDONLY));
+ ASSERT_EQ(0, test_open(merge_fu1, O_RDONLY));
+ ASSERT_EQ(0, test_open(merge_du1, O_RDONLY));
+ ASSERT_EQ(0, test_open(merge_du1_fu2, O_RDONLY));
+ ASSERT_EQ(0, test_open(merge_fo1, O_RDONLY));
+ ASSERT_EQ(0, test_open(merge_do1, O_RDONLY));
+ ASSERT_EQ(0, test_open(merge_do1_fo2, O_RDONLY));
+ ASSERT_EQ(0, test_open(merge_do1_fl3, O_RDONLY));
+ ASSERT_EQ(0, test_open(merge_do1_fu3, O_RDONLY));
+}
+
+#define for_each_path(path_list, path_entry, i) \
+ for (i = 0, path_entry = *path_list[i]; path_list[i]; \
+ path_entry = *path_list[++i])
+
+TEST_F_FORK(layout2_overlay, same_content_different_file)
+{
+ /* Sets access right on parent directories of both layers. */
+ const struct rule layer1_base[] = {
+ {
+ .path = LOWER_BASE,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {
+ .path = UPPER_BASE,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {
+ .path = MERGE_BASE,
+ .access = ACCESS_RW,
+ },
+ {}
+ };
+ const struct rule layer2_data[] = {
+ {
+ .path = LOWER_DATA,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {
+ .path = UPPER_DATA,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {
+ .path = MERGE_DATA,
+ .access = ACCESS_RW,
+ },
+ {}
+ };
+ /* Sets access right on directories inside both layers. */
+ const struct rule layer3_subdirs[] = {
+ {
+ .path = lower_dl1,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {
+ .path = lower_do1,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {
+ .path = upper_du1,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {
+ .path = upper_do1,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {
+ .path = merge_dl1,
+ .access = ACCESS_RW,
+ },
+ {
+ .path = merge_du1,
+ .access = ACCESS_RW,
+ },
+ {
+ .path = merge_do1,
+ .access = ACCESS_RW,
+ },
+ {}
+ };
+ /* Tighten access rights to the files. */
+ const struct rule layer4_files[] = {
+ {
+ .path = lower_dl1_fl2,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {
+ .path = lower_do1_fo2,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {
+ .path = lower_do1_fl3,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {
+ .path = upper_du1_fu2,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {
+ .path = upper_do1_fo2,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {
+ .path = upper_do1_fu3,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {
+ .path = merge_dl1_fl2,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_WRITE_FILE,
+ },
+ {
+ .path = merge_du1_fu2,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_WRITE_FILE,
+ },
+ {
+ .path = merge_do1_fo2,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_WRITE_FILE,
+ },
+ {
+ .path = merge_do1_fl3,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_WRITE_FILE,
+ },
+ {
+ .path = merge_do1_fu3,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_WRITE_FILE,
+ },
+ {}
+ };
+ const struct rule layer5_merge_only[] = {
+ {
+ .path = MERGE_DATA,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_WRITE_FILE,
+ },
+ {}
+ };
+ int ruleset_fd;
+ size_t i;
+ const char *path_entry;
+
+ /* Sets rules on base directories (i.e. outside overlay scope). */
+ ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer1_base);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks lower layer. */
+ for_each_path(lower_base_files, path_entry, i) {
+ ASSERT_EQ(0, test_open(path_entry, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY));
+ }
+ for_each_path(lower_base_directories, path_entry, i) {
+ ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY | O_DIRECTORY));
+ }
+ for_each_path(lower_sub_files, path_entry, i) {
+ ASSERT_EQ(0, test_open(path_entry, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY));
+ }
+ /* Checks upper layer. */
+ for_each_path(upper_base_files, path_entry, i) {
+ ASSERT_EQ(0, test_open(path_entry, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY));
+ }
+ for_each_path(upper_base_directories, path_entry, i) {
+ ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY | O_DIRECTORY));
+ }
+ for_each_path(upper_sub_files, path_entry, i) {
+ ASSERT_EQ(0, test_open(path_entry, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY));
+ }
+ /*
+ * Checks that access rights are independent from the lower and upper
+ * layers: write access to upper files viewed through the merge point
+ * is still allowed, and write access to lower file viewed (and copied)
+ * through the merge point is still allowed.
+ */
+ for_each_path(merge_base_files, path_entry, i) {
+ ASSERT_EQ(0, test_open(path_entry, O_RDWR));
+ }
+ for_each_path(merge_base_directories, path_entry, i) {
+ ASSERT_EQ(0, test_open(path_entry, O_RDONLY | O_DIRECTORY));
+ }
+ for_each_path(merge_sub_files, path_entry, i) {
+ ASSERT_EQ(0, test_open(path_entry, O_RDWR));
+ }
+
+ /* Sets rules on data directories (i.e. inside overlay scope). */
+ ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer2_data);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks merge. */
+ for_each_path(merge_base_files, path_entry, i) {
+ ASSERT_EQ(0, test_open(path_entry, O_RDWR));
+ }
+ for_each_path(merge_base_directories, path_entry, i) {
+ ASSERT_EQ(0, test_open(path_entry, O_RDONLY | O_DIRECTORY));
+ }
+ for_each_path(merge_sub_files, path_entry, i) {
+ ASSERT_EQ(0, test_open(path_entry, O_RDWR));
+ }
+
+ /* Same checks with tighter rules. */
+ ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer3_subdirs);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks changes for lower layer. */
+ for_each_path(lower_base_files, path_entry, i) {
+ ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY));
+ }
+ /* Checks changes for upper layer. */
+ for_each_path(upper_base_files, path_entry, i) {
+ ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY));
+ }
+ /* Checks all merge accesses. */
+ for_each_path(merge_base_files, path_entry, i) {
+ ASSERT_EQ(EACCES, test_open(path_entry, O_RDWR));
+ }
+ for_each_path(merge_base_directories, path_entry, i) {
+ ASSERT_EQ(0, test_open(path_entry, O_RDONLY | O_DIRECTORY));
+ }
+ for_each_path(merge_sub_files, path_entry, i) {
+ ASSERT_EQ(0, test_open(path_entry, O_RDWR));
+ }
+
+ /* Sets rules directly on overlayed files. */
+ ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer4_files);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks unchanged accesses on lower layer. */
+ for_each_path(lower_sub_files, path_entry, i) {
+ ASSERT_EQ(0, test_open(path_entry, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY));
+ }
+ /* Checks unchanged accesses on upper layer. */
+ for_each_path(upper_sub_files, path_entry, i) {
+ ASSERT_EQ(0, test_open(path_entry, O_RDONLY));
+ ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY));
+ }
+ /* Checks all merge accesses. */
+ for_each_path(merge_base_files, path_entry, i) {
+ ASSERT_EQ(EACCES, test_open(path_entry, O_RDWR));
+ }
+ for_each_path(merge_base_directories, path_entry, i) {
+ ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY | O_DIRECTORY));
+ }
+ for_each_path(merge_sub_files, path_entry, i) {
+ ASSERT_EQ(0, test_open(path_entry, O_RDWR));
+ }
+
+ /* Only allowes access to the merge hierarchy. */
+ ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer5_merge_only);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks new accesses on lower layer. */
+ for_each_path(lower_sub_files, path_entry, i) {
+ ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY));
+ }
+ /* Checks new accesses on upper layer. */
+ for_each_path(upper_sub_files, path_entry, i) {
+ ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY));
+ }
+ /* Checks all merge accesses. */
+ for_each_path(merge_base_files, path_entry, i) {
+ ASSERT_EQ(EACCES, test_open(path_entry, O_RDWR));
+ }
+ for_each_path(merge_base_directories, path_entry, i) {
+ ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY | O_DIRECTORY));
+ }
+ for_each_path(merge_sub_files, path_entry, i) {
+ ASSERT_EQ(0, test_open(path_entry, O_RDWR));
+ }
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/landlock/ptrace_test.c b/tools/testing/selftests/landlock/ptrace_test.c
new file mode 100644
index 000000000000..15fbef9cc849
--- /dev/null
+++ b/tools/testing/selftests/landlock/ptrace_test.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Landlock tests - Ptrace
+ *
+ * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2019-2020 ANSSI
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/landlock.h>
+#include <signal.h>
+#include <sys/prctl.h>
+#include <sys/ptrace.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "common.h"
+
+static void create_domain(struct __test_metadata *const _metadata)
+{
+ int ruleset_fd;
+ struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_fs = LANDLOCK_ACCESS_FS_MAKE_BLOCK,
+ };
+
+ ruleset_fd = landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0);
+ EXPECT_LE(0, ruleset_fd) {
+ TH_LOG("Failed to create a ruleset: %s", strerror(errno));
+ }
+ EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
+ EXPECT_EQ(0, landlock_restrict_self(ruleset_fd, 0));
+ EXPECT_EQ(0, close(ruleset_fd));
+}
+
+static int test_ptrace_read(const pid_t pid)
+{
+ static const char path_template[] = "/proc/%d/environ";
+ char procenv_path[sizeof(path_template) + 10];
+ int procenv_path_size, fd;
+
+ procenv_path_size = snprintf(procenv_path, sizeof(procenv_path),
+ path_template, pid);
+ if (procenv_path_size >= sizeof(procenv_path))
+ return E2BIG;
+
+ fd = open(procenv_path, O_RDONLY | O_CLOEXEC);
+ if (fd < 0)
+ return errno;
+ /*
+ * Mixing error codes from close(2) and open(2) should not lead to any
+ * (access type) confusion for this test.
+ */
+ if (close(fd) != 0)
+ return errno;
+ return 0;
+}
+
+FIXTURE(hierarchy) { };
+
+FIXTURE_VARIANT(hierarchy) {
+ const bool domain_both;
+ const bool domain_parent;
+ const bool domain_child;
+};
+
+/*
+ * Test multiple tracing combinations between a parent process P1 and a child
+ * process P2.
+ *
+ * Yama's scoped ptrace is presumed disabled. If enabled, this optional
+ * restriction is enforced in addition to any Landlock check, which means that
+ * all P2 requests to trace P1 would be denied.
+ */
+
+/*
+ * No domain
+ *
+ * P1-. P1 -> P2 : allow
+ * \ P2 -> P1 : allow
+ * 'P2
+ */
+FIXTURE_VARIANT_ADD(hierarchy, allow_without_domain) {
+ .domain_both = false,
+ .domain_parent = false,
+ .domain_child = false,
+};
+
+/*
+ * Child domain
+ *
+ * P1--. P1 -> P2 : allow
+ * \ P2 -> P1 : deny
+ * .'-----.
+ * | P2 |
+ * '------'
+ */
+FIXTURE_VARIANT_ADD(hierarchy, allow_with_one_domain) {
+ .domain_both = false,
+ .domain_parent = false,
+ .domain_child = true,
+};
+
+/*
+ * Parent domain
+ * .------.
+ * | P1 --. P1 -> P2 : deny
+ * '------' \ P2 -> P1 : allow
+ * '
+ * P2
+ */
+FIXTURE_VARIANT_ADD(hierarchy, deny_with_parent_domain) {
+ .domain_both = false,
+ .domain_parent = true,
+ .domain_child = false,
+};
+
+/*
+ * Parent + child domain (siblings)
+ * .------.
+ * | P1 ---. P1 -> P2 : deny
+ * '------' \ P2 -> P1 : deny
+ * .---'--.
+ * | P2 |
+ * '------'
+ */
+FIXTURE_VARIANT_ADD(hierarchy, deny_with_sibling_domain) {
+ .domain_both = false,
+ .domain_parent = true,
+ .domain_child = true,
+};
+
+/*
+ * Same domain (inherited)
+ * .-------------.
+ * | P1----. | P1 -> P2 : allow
+ * | \ | P2 -> P1 : allow
+ * | ' |
+ * | P2 |
+ * '-------------'
+ */
+FIXTURE_VARIANT_ADD(hierarchy, allow_sibling_domain) {
+ .domain_both = true,
+ .domain_parent = false,
+ .domain_child = false,
+};
+
+/*
+ * Inherited + child domain
+ * .-----------------.
+ * | P1----. | P1 -> P2 : allow
+ * | \ | P2 -> P1 : deny
+ * | .-'----. |
+ * | | P2 | |
+ * | '------' |
+ * '-----------------'
+ */
+FIXTURE_VARIANT_ADD(hierarchy, allow_with_nested_domain) {
+ .domain_both = true,
+ .domain_parent = false,
+ .domain_child = true,
+};
+
+/*
+ * Inherited + parent domain
+ * .-----------------.
+ * |.------. | P1 -> P2 : deny
+ * || P1 ----. | P2 -> P1 : allow
+ * |'------' \ |
+ * | ' |
+ * | P2 |
+ * '-----------------'
+ */
+FIXTURE_VARIANT_ADD(hierarchy, deny_with_nested_and_parent_domain) {
+ .domain_both = true,
+ .domain_parent = true,
+ .domain_child = false,
+};
+
+/*
+ * Inherited + parent and child domain (siblings)
+ * .-----------------.
+ * | .------. | P1 -> P2 : deny
+ * | | P1 . | P2 -> P1 : deny
+ * | '------'\ |
+ * | \ |
+ * | .--'---. |
+ * | | P2 | |
+ * | '------' |
+ * '-----------------'
+ */
+FIXTURE_VARIANT_ADD(hierarchy, deny_with_forked_domain) {
+ .domain_both = true,
+ .domain_parent = true,
+ .domain_child = true,
+};
+
+FIXTURE_SETUP(hierarchy)
+{ }
+
+FIXTURE_TEARDOWN(hierarchy)
+{ }
+
+/* Test PTRACE_TRACEME and PTRACE_ATTACH for parent and child. */
+TEST_F(hierarchy, trace)
+{
+ pid_t child, parent;
+ int status, err_proc_read;
+ int pipe_child[2], pipe_parent[2];
+ char buf_parent;
+ long ret;
+
+ /*
+ * Removes all effective and permitted capabilities to not interfere
+ * with cap_ptrace_access_check() in case of PTRACE_MODE_FSCREDS.
+ */
+ drop_caps(_metadata);
+
+ parent = getpid();
+ ASSERT_EQ(0, pipe2(pipe_child, O_CLOEXEC));
+ ASSERT_EQ(0, pipe2(pipe_parent, O_CLOEXEC));
+ if (variant->domain_both) {
+ create_domain(_metadata);
+ if (!_metadata->passed)
+ /* Aborts before forking. */
+ return;
+ }
+
+ child = fork();
+ ASSERT_LE(0, child);
+ if (child == 0) {
+ char buf_child;
+
+ ASSERT_EQ(0, close(pipe_parent[1]));
+ ASSERT_EQ(0, close(pipe_child[0]));
+ if (variant->domain_child)
+ create_domain(_metadata);
+
+ /* Waits for the parent to be in a domain, if any. */
+ ASSERT_EQ(1, read(pipe_parent[0], &buf_child, 1));
+
+ /* Tests PTRACE_ATTACH and PTRACE_MODE_READ on the parent. */
+ err_proc_read = test_ptrace_read(parent);
+ ret = ptrace(PTRACE_ATTACH, parent, NULL, 0);
+ if (variant->domain_child) {
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EPERM, errno);
+ EXPECT_EQ(EACCES, err_proc_read);
+ } else {
+ EXPECT_EQ(0, ret);
+ EXPECT_EQ(0, err_proc_read);
+ }
+ if (ret == 0) {
+ ASSERT_EQ(parent, waitpid(parent, &status, 0));
+ ASSERT_EQ(1, WIFSTOPPED(status));
+ ASSERT_EQ(0, ptrace(PTRACE_DETACH, parent, NULL, 0));
+ }
+
+ /* Tests child PTRACE_TRACEME. */
+ ret = ptrace(PTRACE_TRACEME);
+ if (variant->domain_parent) {
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EPERM, errno);
+ } else {
+ EXPECT_EQ(0, ret);
+ }
+
+ /*
+ * Signals that the PTRACE_ATTACH test is done and the
+ * PTRACE_TRACEME test is ongoing.
+ */
+ ASSERT_EQ(1, write(pipe_child[1], ".", 1));
+
+ if (!variant->domain_parent) {
+ ASSERT_EQ(0, raise(SIGSTOP));
+ }
+
+ /* Waits for the parent PTRACE_ATTACH test. */
+ ASSERT_EQ(1, read(pipe_parent[0], &buf_child, 1));
+ _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
+ return;
+ }
+
+ ASSERT_EQ(0, close(pipe_child[1]));
+ ASSERT_EQ(0, close(pipe_parent[0]));
+ if (variant->domain_parent)
+ create_domain(_metadata);
+
+ /* Signals that the parent is in a domain, if any. */
+ ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
+
+ /*
+ * Waits for the child to test PTRACE_ATTACH on the parent and start
+ * testing PTRACE_TRACEME.
+ */
+ ASSERT_EQ(1, read(pipe_child[0], &buf_parent, 1));
+
+ /* Tests child PTRACE_TRACEME. */
+ if (!variant->domain_parent) {
+ ASSERT_EQ(child, waitpid(child, &status, 0));
+ ASSERT_EQ(1, WIFSTOPPED(status));
+ ASSERT_EQ(0, ptrace(PTRACE_DETACH, child, NULL, 0));
+ } else {
+ /* The child should not be traced by the parent. */
+ EXPECT_EQ(-1, ptrace(PTRACE_DETACH, child, NULL, 0));
+ EXPECT_EQ(ESRCH, errno);
+ }
+
+ /* Tests PTRACE_ATTACH and PTRACE_MODE_READ on the child. */
+ err_proc_read = test_ptrace_read(child);
+ ret = ptrace(PTRACE_ATTACH, child, NULL, 0);
+ if (variant->domain_parent) {
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EPERM, errno);
+ EXPECT_EQ(EACCES, err_proc_read);
+ } else {
+ EXPECT_EQ(0, ret);
+ EXPECT_EQ(0, err_proc_read);
+ }
+ if (ret == 0) {
+ ASSERT_EQ(child, waitpid(child, &status, 0));
+ ASSERT_EQ(1, WIFSTOPPED(status));
+ ASSERT_EQ(0, ptrace(PTRACE_DETACH, child, NULL, 0));
+ }
+
+ /* Signals that the parent PTRACE_ATTACH test is done. */
+ ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
+ ASSERT_EQ(child, waitpid(child, &status, 0));
+ if (WIFSIGNALED(status) || !WIFEXITED(status) ||
+ WEXITSTATUS(status) != EXIT_SUCCESS)
+ _metadata->passed = 0;
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/landlock/true.c b/tools/testing/selftests/landlock/true.c
new file mode 100644
index 000000000000..3f9ccbf52783
--- /dev/null
+++ b/tools/testing/selftests/landlock/true.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+int main(void)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index a5ce26d548e4..0af84ad48aa7 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -1,6 +1,10 @@
# This mimics the top-level Makefile. We do it explicitly here so that this
# Makefile can operate with or without the kbuild infrastructure.
+ifneq ($(LLVM),)
+CC := clang
+else
CC := $(CROSS_COMPILE)gcc
+endif
ifeq (0,$(MAKELEVEL))
ifeq ($(OUTPUT),)
@@ -74,7 +78,8 @@ ifdef building_out_of_srctree
rsync -aq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
fi
@if [ "X$(TEST_PROGS)" != "X" ]; then \
- $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(OUTPUT)/$(TEST_PROGS)) ; \
+ $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
+ $(addprefix $(OUTPUT)/,$(TEST_PROGS))) ; \
else \
$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS)); \
fi
diff --git a/tools/testing/selftests/lkdtm/.gitignore b/tools/testing/selftests/lkdtm/.gitignore
index f26212605b6b..d4b0be857deb 100644
--- a/tools/testing/selftests/lkdtm/.gitignore
+++ b/tools/testing/selftests/lkdtm/.gitignore
@@ -1,2 +1,3 @@
*.sh
!run.sh
+!stack-entropy.sh
diff --git a/tools/testing/selftests/lkdtm/Makefile b/tools/testing/selftests/lkdtm/Makefile
index 1bcc9ee990eb..c71109ceeb2d 100644
--- a/tools/testing/selftests/lkdtm/Makefile
+++ b/tools/testing/selftests/lkdtm/Makefile
@@ -5,6 +5,7 @@ include ../lib.mk
# NOTE: $(OUTPUT) won't get default value if used before lib.mk
TEST_FILES := tests.txt
+TEST_PROGS := stack-entropy.sh
TEST_GEN_PROGS = $(patsubst %,$(OUTPUT)/%.sh,$(shell awk '{print $$1}' tests.txt | sed -e 's/\#//'))
all: $(TEST_GEN_PROGS)
diff --git a/tools/testing/selftests/lkdtm/stack-entropy.sh b/tools/testing/selftests/lkdtm/stack-entropy.sh
new file mode 100755
index 000000000000..b1b8a5097cbb
--- /dev/null
+++ b/tools/testing/selftests/lkdtm/stack-entropy.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Measure kernel stack entropy by sampling via LKDTM's REPORT_STACK test.
+set -e
+samples="${1:-1000}"
+
+# Capture dmesg continuously since it may fill up depending on sample size.
+log=$(mktemp -t stack-entropy-XXXXXX)
+dmesg --follow >"$log" & pid=$!
+report=-1
+for i in $(seq 1 $samples); do
+ echo "REPORT_STACK" >/sys/kernel/debug/provoke-crash/DIRECT
+ if [ -t 1 ]; then
+ percent=$(( 100 * $i / $samples ))
+ if [ "$percent" -ne "$report" ]; then
+ /bin/echo -en "$percent%\r"
+ report="$percent"
+ fi
+ fi
+done
+kill "$pid"
+
+# Count unique offsets since last run.
+seen=$(tac "$log" | grep -m1 -B"$samples"0 'Starting stack offset' | \
+ grep 'Stack offset' | awk '{print $NF}' | sort | uniq -c | wc -l)
+bits=$(echo "obase=2; $seen" | bc | wc -L)
+echo "Bits of stack entropy: $bits"
+rm -f "$log"
+
+# We would expect any functional stack randomization to be at least 5 bits.
+if [ "$bits" -lt 5 ]; then
+ exit 1
+else
+ exit 0
+fi
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 25f198bec0b2..3915bb7bfc39 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -23,6 +23,8 @@ TEST_PROGS += drop_monitor_tests.sh
TEST_PROGS += vrf_route_leaking.sh
TEST_PROGS += bareudp.sh
TEST_PROGS += unicast_extensions.sh
+TEST_PROGS += udpgro_fwd.sh
+TEST_PROGS += veth.sh
TEST_PROGS_EXTENDED := in_netns.sh
TEST_GEN_FILES = socket nettest
TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
@@ -37,6 +39,8 @@ TEST_GEN_FILES += ipsec
TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
+TEST_FILES := settings
+
KSFT_KHDR_INSTALL := 1
include ../lib.mk
diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
index d98fb85e201c..49774a8a7736 100755
--- a/tools/testing/selftests/net/fib_nexthops.sh
+++ b/tools/testing/selftests/net/fib_nexthops.sh
@@ -19,10 +19,39 @@ ret=0
ksft_skip=4
# all tests in this script. Can be overridden with -t option
-IPV4_TESTS="ipv4_fcnal ipv4_grp_fcnal ipv4_withv6_fcnal ipv4_fcnal_runtime ipv4_large_grp ipv4_compat_mode ipv4_fdb_grp_fcnal ipv4_torture"
-IPV6_TESTS="ipv6_fcnal ipv6_grp_fcnal ipv6_fcnal_runtime ipv6_large_grp ipv6_compat_mode ipv6_fdb_grp_fcnal ipv6_torture"
-
-ALL_TESTS="basic ${IPV4_TESTS} ${IPV6_TESTS}"
+IPV4_TESTS="
+ ipv4_fcnal
+ ipv4_grp_fcnal
+ ipv4_res_grp_fcnal
+ ipv4_withv6_fcnal
+ ipv4_fcnal_runtime
+ ipv4_large_grp
+ ipv4_large_res_grp
+ ipv4_compat_mode
+ ipv4_fdb_grp_fcnal
+ ipv4_torture
+ ipv4_res_torture
+"
+
+IPV6_TESTS="
+ ipv6_fcnal
+ ipv6_grp_fcnal
+ ipv6_res_grp_fcnal
+ ipv6_fcnal_runtime
+ ipv6_large_grp
+ ipv6_large_res_grp
+ ipv6_compat_mode
+ ipv6_fdb_grp_fcnal
+ ipv6_torture
+ ipv6_res_torture
+"
+
+ALL_TESTS="
+ basic
+ basic_res
+ ${IPV4_TESTS}
+ ${IPV6_TESTS}
+"
TESTS="${ALL_TESTS}"
VERBOSE=0
PAUSE_ON_FAIL=no
@@ -232,6 +261,19 @@ check_nexthop()
check_output "${out}" "${expected}"
}
+check_nexthop_bucket()
+{
+ local nharg="$1"
+ local expected="$2"
+ local out
+
+ # remove the idle time since we cannot match it
+ out=$($IP nexthop bucket ${nharg} \
+ | sed s/idle_time\ [0-9.]*\ // 2>/dev/null)
+
+ check_output "${out}" "${expected}"
+}
+
check_route()
{
local pfx="$1"
@@ -308,6 +350,25 @@ check_large_grp()
log_test $? 0 "Dump large (x$ecmp) ecmp groups"
}
+check_large_res_grp()
+{
+ local ipv=$1
+ local buckets=$2
+ local ipstr=""
+
+ if [ $ipv -eq 4 ]; then
+ ipstr="172.16.1.2"
+ else
+ ipstr="2001:db8:91::2"
+ fi
+
+ # create a resilient group with $buckets buckets and dump them
+ run_cmd "$IP nexthop add id 100 via $ipstr dev veth1"
+ run_cmd "$IP nexthop add id 1000 group 100 type resilient buckets $buckets"
+ run_cmd "$IP nexthop bucket list"
+ log_test $? 0 "Dump large (x$buckets) nexthop buckets"
+}
+
start_ip_monitor()
{
local mtype=$1
@@ -344,6 +405,15 @@ check_nexthop_fdb_support()
fi
}
+check_nexthop_res_support()
+{
+ $IP nexthop help 2>&1 | grep -q resilient
+ if [ $? -ne 0 ]; then
+ echo "SKIP: iproute2 too old, missing resilient nexthop group support"
+ return $ksft_skip
+ fi
+}
+
ipv6_fdb_grp_fcnal()
{
local rc
@@ -666,6 +736,70 @@ ipv6_grp_fcnal()
log_test $? 2 "Nexthop group can not have a blackhole and another nexthop"
}
+ipv6_res_grp_fcnal()
+{
+ local rc
+
+ echo
+ echo "IPv6 resilient groups functional"
+ echo "--------------------------------"
+
+ check_nexthop_res_support
+ if [ $? -eq $ksft_skip ]; then
+ return $ksft_skip
+ fi
+
+ #
+ # migration of nexthop buckets - equal weights
+ #
+ run_cmd "$IP nexthop add id 62 via 2001:db8:91::2 dev veth1"
+ run_cmd "$IP nexthop add id 63 via 2001:db8:91::3 dev veth1"
+ run_cmd "$IP nexthop add id 102 group 62/63 type resilient buckets 2 idle_timer 0"
+
+ run_cmd "$IP nexthop del id 63"
+ check_nexthop "id 102" \
+ "id 102 group 62 type resilient buckets 2 idle_timer 0 unbalanced_timer 0 unbalanced_time 0"
+ log_test $? 0 "Nexthop group updated when entry is deleted"
+ check_nexthop_bucket "list id 102" \
+ "id 102 index 0 nhid 62 id 102 index 1 nhid 62"
+ log_test $? 0 "Nexthop buckets updated when entry is deleted"
+
+ run_cmd "$IP nexthop add id 63 via 2001:db8:91::3 dev veth1"
+ run_cmd "$IP nexthop replace id 102 group 62/63 type resilient buckets 2 idle_timer 0"
+ check_nexthop "id 102" \
+ "id 102 group 62/63 type resilient buckets 2 idle_timer 0 unbalanced_timer 0 unbalanced_time 0"
+ log_test $? 0 "Nexthop group updated after replace"
+ check_nexthop_bucket "list id 102" \
+ "id 102 index 0 nhid 63 id 102 index 1 nhid 62"
+ log_test $? 0 "Nexthop buckets updated after replace"
+
+ $IP nexthop flush >/dev/null 2>&1
+
+ #
+ # migration of nexthop buckets - unequal weights
+ #
+ run_cmd "$IP nexthop add id 62 via 2001:db8:91::2 dev veth1"
+ run_cmd "$IP nexthop add id 63 via 2001:db8:91::3 dev veth1"
+ run_cmd "$IP nexthop add id 102 group 62,3/63,1 type resilient buckets 4 idle_timer 0"
+
+ run_cmd "$IP nexthop del id 63"
+ check_nexthop "id 102" \
+ "id 102 group 62,3 type resilient buckets 4 idle_timer 0 unbalanced_timer 0 unbalanced_time 0"
+ log_test $? 0 "Nexthop group updated when entry is deleted - nECMP"
+ check_nexthop_bucket "list id 102" \
+ "id 102 index 0 nhid 62 id 102 index 1 nhid 62 id 102 index 2 nhid 62 id 102 index 3 nhid 62"
+ log_test $? 0 "Nexthop buckets updated when entry is deleted - nECMP"
+
+ run_cmd "$IP nexthop add id 63 via 2001:db8:91::3 dev veth1"
+ run_cmd "$IP nexthop replace id 102 group 62,3/63,1 type resilient buckets 4 idle_timer 0"
+ check_nexthop "id 102" \
+ "id 102 group 62,3/63 type resilient buckets 4 idle_timer 0 unbalanced_timer 0 unbalanced_time 0"
+ log_test $? 0 "Nexthop group updated after replace - nECMP"
+ check_nexthop_bucket "list id 102" \
+ "id 102 index 0 nhid 63 id 102 index 1 nhid 62 id 102 index 2 nhid 62 id 102 index 3 nhid 62"
+ log_test $? 0 "Nexthop buckets updated after replace - nECMP"
+}
+
ipv6_fcnal_runtime()
{
local rc
@@ -824,6 +958,22 @@ ipv6_large_grp()
$IP nexthop flush >/dev/null 2>&1
}
+ipv6_large_res_grp()
+{
+ echo
+ echo "IPv6 large resilient group (128k buckets)"
+ echo "-----------------------------------------"
+
+ check_nexthop_res_support
+ if [ $? -eq $ksft_skip ]; then
+ return $ksft_skip
+ fi
+
+ check_large_res_grp 6 $((128 * 1024))
+
+ $IP nexthop flush >/dev/null 2>&1
+}
+
ipv6_del_add_loop1()
{
while :; do
@@ -874,11 +1024,67 @@ ipv6_torture()
sleep 300
kill -9 $pid1 $pid2 $pid3 $pid4 $pid5
+ wait $pid1 $pid2 $pid3 $pid4 $pid5 2>/dev/null
# if we did not crash, success
log_test 0 0 "IPv6 torture test"
}
+ipv6_res_grp_replace_loop()
+{
+ while :; do
+ $IP nexthop replace id 102 group 100/101 type resilient
+ done >/dev/null 2>&1
+}
+
+ipv6_res_torture()
+{
+ local pid1
+ local pid2
+ local pid3
+ local pid4
+ local pid5
+
+ echo
+ echo "IPv6 runtime resilient nexthop group torture"
+ echo "--------------------------------------------"
+
+ check_nexthop_res_support
+ if [ $? -eq $ksft_skip ]; then
+ return $ksft_skip
+ fi
+
+ if [ ! -x "$(command -v mausezahn)" ]; then
+ echo "SKIP: Could not run test; need mausezahn tool"
+ return
+ fi
+
+ run_cmd "$IP nexthop add id 100 via 2001:db8:91::2 dev veth1"
+ run_cmd "$IP nexthop add id 101 via 2001:db8:92::2 dev veth3"
+ run_cmd "$IP nexthop add id 102 group 100/101 type resilient buckets 512 idle_timer 0"
+ run_cmd "$IP route add 2001:db8:101::1 nhid 102"
+ run_cmd "$IP route add 2001:db8:101::2 nhid 102"
+
+ ipv6_del_add_loop1 &
+ pid1=$!
+ ipv6_res_grp_replace_loop &
+ pid2=$!
+ ip netns exec me ping -f 2001:db8:101::1 >/dev/null 2>&1 &
+ pid3=$!
+ ip netns exec me ping -f 2001:db8:101::2 >/dev/null 2>&1 &
+ pid4=$!
+ ip netns exec me mausezahn -6 veth1 \
+ -B 2001:db8:101::2 -A 2001:db8:91::1 -c 0 \
+ -t tcp "dp=1-1023, flags=syn" >/dev/null 2>&1 &
+ pid5=$!
+
+ sleep 300
+ kill -9 $pid1 $pid2 $pid3 $pid4 $pid5
+ wait $pid1 $pid2 $pid3 $pid4 $pid5 2>/dev/null
+
+ # if we did not crash, success
+ log_test 0 0 "IPv6 resilient nexthop group torture test"
+}
ipv4_fcnal()
{
@@ -1038,6 +1244,70 @@ ipv4_grp_fcnal()
log_test $? 2 "Nexthop group can not have a blackhole and another nexthop"
}
+ipv4_res_grp_fcnal()
+{
+ local rc
+
+ echo
+ echo "IPv4 resilient groups functional"
+ echo "--------------------------------"
+
+ check_nexthop_res_support
+ if [ $? -eq $ksft_skip ]; then
+ return $ksft_skip
+ fi
+
+ #
+ # migration of nexthop buckets - equal weights
+ #
+ run_cmd "$IP nexthop add id 12 via 172.16.1.2 dev veth1"
+ run_cmd "$IP nexthop add id 13 via 172.16.1.3 dev veth1"
+ run_cmd "$IP nexthop add id 102 group 12/13 type resilient buckets 2 idle_timer 0"
+
+ run_cmd "$IP nexthop del id 13"
+ check_nexthop "id 102" \
+ "id 102 group 12 type resilient buckets 2 idle_timer 0 unbalanced_timer 0 unbalanced_time 0"
+ log_test $? 0 "Nexthop group updated when entry is deleted"
+ check_nexthop_bucket "list id 102" \
+ "id 102 index 0 nhid 12 id 102 index 1 nhid 12"
+ log_test $? 0 "Nexthop buckets updated when entry is deleted"
+
+ run_cmd "$IP nexthop add id 13 via 172.16.1.3 dev veth1"
+ run_cmd "$IP nexthop replace id 102 group 12/13 type resilient buckets 2 idle_timer 0"
+ check_nexthop "id 102" \
+ "id 102 group 12/13 type resilient buckets 2 idle_timer 0 unbalanced_timer 0 unbalanced_time 0"
+ log_test $? 0 "Nexthop group updated after replace"
+ check_nexthop_bucket "list id 102" \
+ "id 102 index 0 nhid 13 id 102 index 1 nhid 12"
+ log_test $? 0 "Nexthop buckets updated after replace"
+
+ $IP nexthop flush >/dev/null 2>&1
+
+ #
+ # migration of nexthop buckets - unequal weights
+ #
+ run_cmd "$IP nexthop add id 12 via 172.16.1.2 dev veth1"
+ run_cmd "$IP nexthop add id 13 via 172.16.1.3 dev veth1"
+ run_cmd "$IP nexthop add id 102 group 12,3/13,1 type resilient buckets 4 idle_timer 0"
+
+ run_cmd "$IP nexthop del id 13"
+ check_nexthop "id 102" \
+ "id 102 group 12,3 type resilient buckets 4 idle_timer 0 unbalanced_timer 0 unbalanced_time 0"
+ log_test $? 0 "Nexthop group updated when entry is deleted - nECMP"
+ check_nexthop_bucket "list id 102" \
+ "id 102 index 0 nhid 12 id 102 index 1 nhid 12 id 102 index 2 nhid 12 id 102 index 3 nhid 12"
+ log_test $? 0 "Nexthop buckets updated when entry is deleted - nECMP"
+
+ run_cmd "$IP nexthop add id 13 via 172.16.1.3 dev veth1"
+ run_cmd "$IP nexthop replace id 102 group 12,3/13,1 type resilient buckets 4 idle_timer 0"
+ check_nexthop "id 102" \
+ "id 102 group 12,3/13 type resilient buckets 4 idle_timer 0 unbalanced_timer 0 unbalanced_time 0"
+ log_test $? 0 "Nexthop group updated after replace - nECMP"
+ check_nexthop_bucket "list id 102" \
+ "id 102 index 0 nhid 13 id 102 index 1 nhid 12 id 102 index 2 nhid 12 id 102 index 3 nhid 12"
+ log_test $? 0 "Nexthop buckets updated after replace - nECMP"
+}
+
ipv4_withv6_fcnal()
{
local lladdr
@@ -1259,6 +1529,22 @@ ipv4_large_grp()
$IP nexthop flush >/dev/null 2>&1
}
+ipv4_large_res_grp()
+{
+ echo
+ echo "IPv4 large resilient group (128k buckets)"
+ echo "-----------------------------------------"
+
+ check_nexthop_res_support
+ if [ $? -eq $ksft_skip ]; then
+ return $ksft_skip
+ fi
+
+ check_large_res_grp 4 $((128 * 1024))
+
+ $IP nexthop flush >/dev/null 2>&1
+}
+
sysctl_nexthop_compat_mode_check()
{
local sysctlname="net.ipv4.nexthop_compat_mode"
@@ -1476,11 +1762,68 @@ ipv4_torture()
sleep 300
kill -9 $pid1 $pid2 $pid3 $pid4 $pid5
+ wait $pid1 $pid2 $pid3 $pid4 $pid5 2>/dev/null
# if we did not crash, success
log_test 0 0 "IPv4 torture test"
}
+ipv4_res_grp_replace_loop()
+{
+ while :; do
+ $IP nexthop replace id 102 group 100/101 type resilient
+ done >/dev/null 2>&1
+}
+
+ipv4_res_torture()
+{
+ local pid1
+ local pid2
+ local pid3
+ local pid4
+ local pid5
+
+ echo
+ echo "IPv4 runtime resilient nexthop group torture"
+ echo "--------------------------------------------"
+
+ check_nexthop_res_support
+ if [ $? -eq $ksft_skip ]; then
+ return $ksft_skip
+ fi
+
+ if [ ! -x "$(command -v mausezahn)" ]; then
+ echo "SKIP: Could not run test; need mausezahn tool"
+ return
+ fi
+
+ run_cmd "$IP nexthop add id 100 via 172.16.1.2 dev veth1"
+ run_cmd "$IP nexthop add id 101 via 172.16.2.2 dev veth3"
+ run_cmd "$IP nexthop add id 102 group 100/101 type resilient buckets 512 idle_timer 0"
+ run_cmd "$IP route add 172.16.101.1 nhid 102"
+ run_cmd "$IP route add 172.16.101.2 nhid 102"
+
+ ipv4_del_add_loop1 &
+ pid1=$!
+ ipv4_res_grp_replace_loop &
+ pid2=$!
+ ip netns exec me ping -f 172.16.101.1 >/dev/null 2>&1 &
+ pid3=$!
+ ip netns exec me ping -f 172.16.101.2 >/dev/null 2>&1 &
+ pid4=$!
+ ip netns exec me mausezahn veth1 \
+ -B 172.16.101.2 -A 172.16.1.1 -c 0 \
+ -t tcp "dp=1-1023, flags=syn" >/dev/null 2>&1 &
+ pid5=$!
+
+ sleep 300
+ kill -9 $pid1 $pid2 $pid3 $pid4 $pid5
+ wait $pid1 $pid2 $pid3 $pid4 $pid5 2>/dev/null
+
+ # if we did not crash, success
+ log_test 0 0 "IPv4 resilient nexthop group torture test"
+}
+
basic()
{
echo
@@ -1590,6 +1933,219 @@ basic()
log_test $? 2 "Nexthop group and blackhole"
$IP nexthop flush >/dev/null 2>&1
+
+ # Test to ensure that flushing with a multi-part nexthop dump works as
+ # expected.
+ local batch_file=$(mktemp)
+
+ for i in $(seq 1 $((64 * 1024))); do
+ echo "nexthop add id $i blackhole" >> $batch_file
+ done
+
+ $IP -b $batch_file
+ $IP nexthop flush >/dev/null 2>&1
+ [[ $($IP nexthop | wc -l) -eq 0 ]]
+ log_test $? 0 "Large scale nexthop flushing"
+
+ rm $batch_file
+}
+
+check_nexthop_buckets_balance()
+{
+ local nharg=$1; shift
+ local ret
+
+ while (($# > 0)); do
+ local selector=$1; shift
+ local condition=$1; shift
+ local count
+
+ count=$($IP -j nexthop bucket ${nharg} ${selector} | jq length)
+ (( $count $condition ))
+ ret=$?
+ if ((ret != 0)); then
+ return $ret
+ fi
+ done
+
+ return 0
+}
+
+basic_res()
+{
+ echo
+ echo "Basic resilient nexthop group functional tests"
+ echo "----------------------------------------------"
+
+ check_nexthop_res_support
+ if [ $? -eq $ksft_skip ]; then
+ return $ksft_skip
+ fi
+
+ run_cmd "$IP nexthop add id 1 dev veth1"
+
+ #
+ # resilient nexthop group addition
+ #
+
+ run_cmd "$IP nexthop add id 101 group 1 type resilient buckets 8"
+ log_test $? 0 "Add a nexthop group with default parameters"
+
+ run_cmd "$IP nexthop get id 101"
+ check_nexthop "id 101" \
+ "id 101 group 1 type resilient buckets 8 idle_timer 120 unbalanced_timer 0 unbalanced_time 0"
+ log_test $? 0 "Get a nexthop group with default parameters"
+
+ run_cmd "$IP nexthop add id 102 group 1 type resilient
+ buckets 4 idle_timer 100 unbalanced_timer 5"
+ run_cmd "$IP nexthop get id 102"
+ check_nexthop "id 102" \
+ "id 102 group 1 type resilient buckets 4 idle_timer 100 unbalanced_timer 5 unbalanced_time 0"
+ log_test $? 0 "Get a nexthop group with non-default parameters"
+
+ run_cmd "$IP nexthop add id 103 group 1 type resilient buckets 0"
+ log_test $? 2 "Add a nexthop group with 0 buckets"
+
+ #
+ # resilient nexthop group replacement
+ #
+
+ run_cmd "$IP nexthop replace id 101 group 1 type resilient
+ buckets 8 idle_timer 240 unbalanced_timer 80"
+ log_test $? 0 "Replace nexthop group parameters"
+ check_nexthop "id 101" \
+ "id 101 group 1 type resilient buckets 8 idle_timer 240 unbalanced_timer 80 unbalanced_time 0"
+ log_test $? 0 "Get a nexthop group after replacing parameters"
+
+ run_cmd "$IP nexthop replace id 101 group 1 type resilient idle_timer 512"
+ log_test $? 0 "Replace idle timer"
+ check_nexthop "id 101" \
+ "id 101 group 1 type resilient buckets 8 idle_timer 512 unbalanced_timer 80 unbalanced_time 0"
+ log_test $? 0 "Get a nexthop group after replacing idle timer"
+
+ run_cmd "$IP nexthop replace id 101 group 1 type resilient unbalanced_timer 256"
+ log_test $? 0 "Replace unbalanced timer"
+ check_nexthop "id 101" \
+ "id 101 group 1 type resilient buckets 8 idle_timer 512 unbalanced_timer 256 unbalanced_time 0"
+ log_test $? 0 "Get a nexthop group after replacing unbalanced timer"
+
+ run_cmd "$IP nexthop replace id 101 group 1 type resilient"
+ log_test $? 0 "Replace with no parameters"
+ check_nexthop "id 101" \
+ "id 101 group 1 type resilient buckets 8 idle_timer 512 unbalanced_timer 256 unbalanced_time 0"
+ log_test $? 0 "Get a nexthop group after replacing no parameters"
+
+ run_cmd "$IP nexthop replace id 101 group 1"
+ log_test $? 2 "Replace nexthop group type - implicit"
+
+ run_cmd "$IP nexthop replace id 101 group 1 type mpath"
+ log_test $? 2 "Replace nexthop group type - explicit"
+
+ run_cmd "$IP nexthop replace id 101 group 1 type resilient buckets 1024"
+ log_test $? 2 "Replace number of nexthop buckets"
+
+ check_nexthop "id 101" \
+ "id 101 group 1 type resilient buckets 8 idle_timer 512 unbalanced_timer 256 unbalanced_time 0"
+ log_test $? 0 "Get a nexthop group after replacing with invalid parameters"
+
+ #
+ # resilient nexthop buckets dump
+ #
+
+ $IP nexthop flush >/dev/null 2>&1
+ run_cmd "$IP nexthop add id 1 dev veth1"
+ run_cmd "$IP nexthop add id 2 dev veth3"
+ run_cmd "$IP nexthop add id 101 group 1/2 type resilient buckets 4"
+ run_cmd "$IP nexthop add id 201 group 1/2"
+
+ check_nexthop_bucket "" \
+ "id 101 index 0 nhid 2 id 101 index 1 nhid 2 id 101 index 2 nhid 1 id 101 index 3 nhid 1"
+ log_test $? 0 "Dump all nexthop buckets"
+
+ check_nexthop_bucket "list id 101" \
+ "id 101 index 0 nhid 2 id 101 index 1 nhid 2 id 101 index 2 nhid 1 id 101 index 3 nhid 1"
+ log_test $? 0 "Dump all nexthop buckets in a group"
+
+ (( $($IP -j nexthop bucket list id 101 |
+ jq '[.[] | select(.bucket.idle_time > 0 and
+ .bucket.idle_time < 2)] | length') == 4 ))
+ log_test $? 0 "All nexthop buckets report a positive near-zero idle time"
+
+ check_nexthop_bucket "list dev veth1" \
+ "id 101 index 2 nhid 1 id 101 index 3 nhid 1"
+ log_test $? 0 "Dump all nexthop buckets with a specific nexthop device"
+
+ check_nexthop_bucket "list nhid 2" \
+ "id 101 index 0 nhid 2 id 101 index 1 nhid 2"
+ log_test $? 0 "Dump all nexthop buckets with a specific nexthop identifier"
+
+ run_cmd "$IP nexthop bucket list id 111"
+ log_test $? 2 "Dump all nexthop buckets in a non-existent group"
+
+ run_cmd "$IP nexthop bucket list id 201"
+ log_test $? 2 "Dump all nexthop buckets in a non-resilient group"
+
+ run_cmd "$IP nexthop bucket list dev bla"
+ log_test $? 255 "Dump all nexthop buckets using a non-existent device"
+
+ run_cmd "$IP nexthop bucket list groups"
+ log_test $? 255 "Dump all nexthop buckets with invalid 'groups' keyword"
+
+ run_cmd "$IP nexthop bucket list fdb"
+ log_test $? 255 "Dump all nexthop buckets with invalid 'fdb' keyword"
+
+ #
+ # resilient nexthop buckets get requests
+ #
+
+ check_nexthop_bucket "get id 101 index 0" "id 101 index 0 nhid 2"
+ log_test $? 0 "Get a valid nexthop bucket"
+
+ run_cmd "$IP nexthop bucket get id 101 index 999"
+ log_test $? 2 "Get a nexthop bucket with valid group, but invalid index"
+
+ run_cmd "$IP nexthop bucket get id 201 index 0"
+ log_test $? 2 "Get a nexthop bucket from a non-resilient group"
+
+ run_cmd "$IP nexthop bucket get id 999 index 0"
+ log_test $? 2 "Get a nexthop bucket from a non-existent group"
+
+ #
+ # tests for bucket migration
+ #
+
+ $IP nexthop flush >/dev/null 2>&1
+
+ run_cmd "$IP nexthop add id 1 dev veth1"
+ run_cmd "$IP nexthop add id 2 dev veth3"
+ run_cmd "$IP nexthop add id 101
+ group 1/2 type resilient buckets 10
+ idle_timer 1 unbalanced_timer 20"
+
+ check_nexthop_buckets_balance "list id 101" \
+ "nhid 1" "== 5" \
+ "nhid 2" "== 5"
+ log_test $? 0 "Initial bucket allocation"
+
+ run_cmd "$IP nexthop replace id 101
+ group 1,2/2,3 type resilient"
+ check_nexthop_buckets_balance "list id 101" \
+ "nhid 1" "== 4" \
+ "nhid 2" "== 6"
+ log_test $? 0 "Bucket allocation after replace"
+
+ # Check that increase in idle timer does not make buckets appear busy.
+ run_cmd "$IP nexthop replace id 101
+ group 1,2/2,3 type resilient
+ idle_timer 10"
+ run_cmd "$IP nexthop replace id 101
+ group 1/2 type resilient"
+ check_nexthop_buckets_balance "list id 101" \
+ "nhid 1" "== 5" \
+ "nhid 2" "== 5"
+ log_test $? 0 "Buckets migrated after idle timer change"
+
+ $IP nexthop flush >/dev/null 2>&1
}
################################################################################
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 2b5707738609..76d9487fb03c 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -9,7 +9,7 @@ ret=0
ksft_skip=4
# all tests in this script. Can be overridden with -t option
-TESTS="unregister down carrier nexthop suppress ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics ipv4_route_v6_gw rp_filter ipv4_del_addr"
+TESTS="unregister down carrier nexthop suppress ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics ipv4_route_v6_gw rp_filter ipv4_del_addr ipv4_mangle ipv6_mangle"
VERBOSE=0
PAUSE_ON_FAIL=no
@@ -1653,6 +1653,154 @@ ipv4_route_v6_gw_test()
route_cleanup
}
+socat_check()
+{
+ if [ ! -x "$(command -v socat)" ]; then
+ echo "socat command not found. Skipping test"
+ return 1
+ fi
+
+ return 0
+}
+
+iptables_check()
+{
+ iptables -t mangle -L OUTPUT &> /dev/null
+ if [ $? -ne 0 ]; then
+ echo "iptables configuration not supported. Skipping test"
+ return 1
+ fi
+
+ return 0
+}
+
+ip6tables_check()
+{
+ ip6tables -t mangle -L OUTPUT &> /dev/null
+ if [ $? -ne 0 ]; then
+ echo "ip6tables configuration not supported. Skipping test"
+ return 1
+ fi
+
+ return 0
+}
+
+ipv4_mangle_test()
+{
+ local rc
+
+ echo
+ echo "IPv4 mangling tests"
+
+ socat_check || return 1
+ iptables_check || return 1
+
+ route_setup
+ sleep 2
+
+ local tmp_file=$(mktemp)
+ ip netns exec ns2 socat UDP4-LISTEN:54321,fork $tmp_file &
+
+ # Add a FIB rule and a route that will direct our connection to the
+ # listening server.
+ $IP rule add pref 100 ipproto udp sport 12345 dport 54321 table 123
+ $IP route add table 123 172.16.101.0/24 dev veth1
+
+ # Add an unreachable route to the main table that will block our
+ # connection in case the FIB rule is not hit.
+ $IP route add unreachable 172.16.101.2/32
+
+ run_cmd "echo a | $NS_EXEC socat STDIN UDP4:172.16.101.2:54321,sourceport=12345"
+ log_test $? 0 " Connection with correct parameters"
+
+ run_cmd "echo a | $NS_EXEC socat STDIN UDP4:172.16.101.2:54321,sourceport=11111"
+ log_test $? 1 " Connection with incorrect parameters"
+
+ # Add a mangling rule and make sure connection is still successful.
+ $NS_EXEC iptables -t mangle -A OUTPUT -j MARK --set-mark 1
+
+ run_cmd "echo a | $NS_EXEC socat STDIN UDP4:172.16.101.2:54321,sourceport=12345"
+ log_test $? 0 " Connection with correct parameters - mangling"
+
+ # Delete the mangling rule and make sure connection is still
+ # successful.
+ $NS_EXEC iptables -t mangle -D OUTPUT -j MARK --set-mark 1
+
+ run_cmd "echo a | $NS_EXEC socat STDIN UDP4:172.16.101.2:54321,sourceport=12345"
+ log_test $? 0 " Connection with correct parameters - no mangling"
+
+ # Verify connections were indeed successful on server side.
+ [[ $(cat $tmp_file | wc -l) -eq 3 ]]
+ log_test $? 0 " Connection check - server side"
+
+ $IP route del unreachable 172.16.101.2/32
+ $IP route del table 123 172.16.101.0/24 dev veth1
+ $IP rule del pref 100
+
+ { kill %% && wait %%; } 2>/dev/null
+ rm $tmp_file
+
+ route_cleanup
+}
+
+ipv6_mangle_test()
+{
+ local rc
+
+ echo
+ echo "IPv6 mangling tests"
+
+ socat_check || return 1
+ ip6tables_check || return 1
+
+ route_setup
+ sleep 2
+
+ local tmp_file=$(mktemp)
+ ip netns exec ns2 socat UDP6-LISTEN:54321,fork $tmp_file &
+
+ # Add a FIB rule and a route that will direct our connection to the
+ # listening server.
+ $IP -6 rule add pref 100 ipproto udp sport 12345 dport 54321 table 123
+ $IP -6 route add table 123 2001:db8:101::/64 dev veth1
+
+ # Add an unreachable route to the main table that will block our
+ # connection in case the FIB rule is not hit.
+ $IP -6 route add unreachable 2001:db8:101::2/128
+
+ run_cmd "echo a | $NS_EXEC socat STDIN UDP6:[2001:db8:101::2]:54321,sourceport=12345"
+ log_test $? 0 " Connection with correct parameters"
+
+ run_cmd "echo a | $NS_EXEC socat STDIN UDP6:[2001:db8:101::2]:54321,sourceport=11111"
+ log_test $? 1 " Connection with incorrect parameters"
+
+ # Add a mangling rule and make sure connection is still successful.
+ $NS_EXEC ip6tables -t mangle -A OUTPUT -j MARK --set-mark 1
+
+ run_cmd "echo a | $NS_EXEC socat STDIN UDP6:[2001:db8:101::2]:54321,sourceport=12345"
+ log_test $? 0 " Connection with correct parameters - mangling"
+
+ # Delete the mangling rule and make sure connection is still
+ # successful.
+ $NS_EXEC ip6tables -t mangle -D OUTPUT -j MARK --set-mark 1
+
+ run_cmd "echo a | $NS_EXEC socat STDIN UDP6:[2001:db8:101::2]:54321,sourceport=12345"
+ log_test $? 0 " Connection with correct parameters - no mangling"
+
+ # Verify connections were indeed successful on server side.
+ [[ $(cat $tmp_file | wc -l) -eq 3 ]]
+ log_test $? 0 " Connection check - server side"
+
+ $IP -6 route del unreachable 2001:db8:101::2/128
+ $IP -6 route del table 123 2001:db8:101::/64 dev veth1
+ $IP -6 rule del pref 100
+
+ { kill %% && wait %%; } 2>/dev/null
+ rm $tmp_file
+
+ route_cleanup
+}
+
################################################################################
# usage
@@ -1725,6 +1873,8 @@ do
ipv6_route_metrics) ipv6_route_metrics_test;;
ipv4_route_metrics) ipv4_route_metrics_test;;
ipv4_route_v6_gw) ipv4_route_v6_gw_test;;
+ ipv4_mangle) ipv4_mangle_test;;
+ ipv6_mangle) ipv6_mangle_test;;
help) echo "Test names: $TESTS"; exit 0;;
esac
diff --git a/tools/testing/selftests/net/forwarding/dual_vxlan_bridge.sh b/tools/testing/selftests/net/forwarding/dual_vxlan_bridge.sh
new file mode 100755
index 000000000000..5148d97a5df8
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/dual_vxlan_bridge.sh
@@ -0,0 +1,366 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +--------------------+ +----------------------+
+# | H1 (vrf) | | H2 (vrf) |
+# | + h1.10 | | + h2.20 |
+# | | 192.0.2.1/28 | | | 192.0.2.2/28 |
+# | | | | | |
+# | + $h1 | | + $h2 |
+# | | | | | |
+# +----|---------------+ +--|-------------------+
+# | |
+# +----|--------------------------------------------------|--------------------+
+# | SW | | |
+# | +--|-------------------------------+ +----------------|------------------+ |
+# | | + $swp1 BR1 (802.1ad) | | BR2 (802.1d) + $swp2 | |
+# | | vid 100 pvid untagged | | | | |
+# | | | | + $swp2.20 | |
+# | | | | | |
+# | | + vx100 (vxlan) | | + vx200 (vxlan) | |
+# | | local 192.0.2.17 | | local 192.0.2.17 | |
+# | | remote 192.0.2.34 | | remote 192.0.2.50 | |
+# | | id 1000 dstport $VXPORT | | id 2000 dstport $VXPORT | |
+# | | vid 100 pvid untagged | | | |
+# | +--------------------------------- + +-----------------------------------+ |
+# | |
+# | 192.0.2.32/28 via 192.0.2.18 |
+# | 192.0.2.48/28 via 192.0.2.18 |
+# | |
+# | + $rp1 |
+# | | 192.0.2.17/28 |
+# +----|-----------------------------------------------------------------------+
+# |
+# +----|--------------------------------------------------------+
+# | | VRP2 (vrf) |
+# | + $rp2 |
+# | 192.0.2.18/28 |
+# | | (maybe) HW
+# =============================================================================
+# | | (likely) SW
+# | + v1 (veth) + v3 (veth) |
+# | | 192.0.2.33/28 | 192.0.2.49/28 |
+# +----|---------------------------------------|----------------+
+# | |
+# +----|------------------------------+ +----|------------------------------+
+# | + v2 (veth) NS1 (netns) | | + v4 (veth) NS2 (netns) |
+# | 192.0.2.34/28 | | 192.0.2.50/28 |
+# | | | |
+# | 192.0.2.16/28 via 192.0.2.33 | | 192.0.2.16/28 via 192.0.2.49 |
+# | 192.0.2.50/32 via 192.0.2.33 | | 192.0.2.34/32 via 192.0.2.49 |
+# | | | |
+# | +-------------------------------+ | | +-------------------------------+ |
+# | | BR3 (802.1ad) | | | | BR3 (802.1d) | |
+# | | + vx100 (vxlan) | | | | + vx200 (vxlan) | |
+# | | local 192.0.2.34 | | | | local 192.0.2.50 | |
+# | | remote 192.0.2.17 | | | | remote 192.0.2.17 | |
+# | | remote 192.0.2.50 | | | | remote 192.0.2.34 | |
+# | | id 1000 dstport $VXPORT | | | | id 2000 dstport $VXPORT | |
+# | | vid 100 pvid untagged | | | | | |
+# | | | | | | + w1.20 | |
+# | | | | | | | | |
+# | | + w1 (veth) | | | | + w1 (veth) | |
+# | | | vid 100 pvid untagged | | | | | | |
+# | +--|----------------------------+ | | +--|----------------------------+ |
+# | | | | | |
+# | +--|----------------------------+ | | +--|----------------------------+ |
+# | | | VW2 (vrf) | | | | | VW2 (vrf) | |
+# | | + w2 (veth) | | | | + w2 (veth) | |
+# | | | | | | | | | |
+# | | | | | | | | | |
+# | | + w2.10 | | | | + w2.20 | |
+# | | 192.0.2.3/28 | | | | 192.0.2.4/28 | |
+# | +-------------------------------+ | | +-------------------------------+ |
+# +-----------------------------------+ +-----------------------------------+
+
+: ${VXPORT:=4789}
+export VXPORT
+
+: ${ALL_TESTS:="
+ ping_ipv4
+ "}
+
+NUM_NETIFS=6
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1
+ tc qdisc add dev $h1 clsact
+ vlan_create $h1 10 v$h1 192.0.2.1/28
+}
+
+h1_destroy()
+{
+ vlan_destroy $h1 10
+ tc qdisc del dev $h1 clsact
+ simple_if_fini $h1
+}
+
+h2_create()
+{
+ simple_if_init $h2
+ tc qdisc add dev $h2 clsact
+ vlan_create $h2 20 v$h2 192.0.2.2/28
+}
+
+h2_destroy()
+{
+ vlan_destroy $h2 20
+ tc qdisc del dev $h2 clsact
+ simple_if_fini $h2
+}
+
+rp1_set_addr()
+{
+ ip address add dev $rp1 192.0.2.17/28
+
+ ip route add 192.0.2.32/28 nexthop via 192.0.2.18
+ ip route add 192.0.2.48/28 nexthop via 192.0.2.18
+}
+
+rp1_unset_addr()
+{
+ ip route del 192.0.2.48/28 nexthop via 192.0.2.18
+ ip route del 192.0.2.32/28 nexthop via 192.0.2.18
+
+ ip address del dev $rp1 192.0.2.17/28
+}
+
+switch_create()
+{
+ #### BR1 ####
+ ip link add name br1 type bridge vlan_filtering 1 \
+ vlan_protocol 802.1ad vlan_default_pvid 0 mcast_snooping 0
+ # Make sure the bridge uses the MAC address of the local port and not
+ # that of the VxLAN's device.
+ ip link set dev br1 address $(mac_get $swp1)
+ ip link set dev br1 up
+
+ #### BR2 ####
+ ip link add name br2 type bridge vlan_filtering 0 mcast_snooping 0
+ # Make sure the bridge uses the MAC address of the local port and not
+ # that of the VxLAN's device.
+ ip link set dev br2 address $(mac_get $swp2)
+ ip link set dev br2 up
+
+ ip link set dev $rp1 up
+ rp1_set_addr
+
+ #### VX100 ####
+ ip link add name vx100 type vxlan id 1000 local 192.0.2.17 \
+ dstport "$VXPORT" nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx100 up
+
+ ip link set dev vx100 master br1
+ bridge vlan add vid 100 dev vx100 pvid untagged
+
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+ bridge vlan add vid 100 dev $swp1 pvid untagged
+
+ #### VX200 ####
+ ip link add name vx200 type vxlan id 2000 local 192.0.2.17 \
+ dstport "$VXPORT" nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx200 up
+
+ ip link set dev vx200 master br2
+
+ ip link set dev $swp2 up
+ ip link add name $swp2.20 link $swp2 type vlan id 20
+ ip link set dev $swp2.20 master br2
+ ip link set dev $swp2.20 up
+
+ bridge fdb append dev vx100 00:00:00:00:00:00 dst 192.0.2.34 self
+ bridge fdb append dev vx200 00:00:00:00:00:00 dst 192.0.2.50 self
+}
+
+switch_destroy()
+{
+ bridge fdb del dev vx200 00:00:00:00:00:00 dst 192.0.2.50 self
+ bridge fdb del dev vx100 00:00:00:00:00:00 dst 192.0.2.34 self
+
+ ip link set dev vx200 nomaster
+ ip link set dev vx200 down
+ ip link del dev vx200
+
+ ip link del dev $swp2.20
+ ip link set dev $swp2 down
+ ip link set dev $swp2 nomaster
+
+ bridge vlan del vid 100 dev $swp1
+ ip link set dev $swp1 down
+ ip link set dev $swp1 nomaster
+
+ ip link set dev vx100 nomaster
+ ip link set dev vx100 down
+ ip link del dev vx100
+
+ rp1_unset_addr
+ ip link set dev $rp1 down
+
+ ip link set dev br2 down
+ ip link del dev br2
+
+ ip link set dev br1 down
+ ip link del dev br1
+}
+
+vrp2_create()
+{
+ simple_if_init $rp2 192.0.2.18/28
+ __simple_if_init v1 v$rp2 192.0.2.33/28
+ __simple_if_init v3 v$rp2 192.0.2.49/28
+ tc qdisc add dev v1 clsact
+}
+
+vrp2_destroy()
+{
+ tc qdisc del dev v1 clsact
+ __simple_if_fini v3 192.0.2.49/28
+ __simple_if_fini v1 192.0.2.33/28
+ simple_if_fini $rp2 192.0.2.18/28
+}
+
+ns_init_common()
+{
+ local in_if=$1; shift
+ local in_addr=$1; shift
+ local other_in_addr=$1; shift
+ local vxlan_name=$1; shift
+ local vxlan_id=$1; shift
+ local vlan_id=$1; shift
+ local host_addr=$1; shift
+ local nh_addr=$1; shift
+
+ ip link set dev $in_if up
+ ip address add dev $in_if $in_addr/28
+ tc qdisc add dev $in_if clsact
+
+ ip link add name br3 type bridge vlan_filtering 0
+ ip link set dev br3 up
+
+ ip link add name w1 type veth peer name w2
+
+ ip link set dev w1 master br3
+ ip link set dev w1 up
+
+ ip link add name $vxlan_name type vxlan id $vxlan_id local $in_addr \
+ dstport "$VXPORT"
+ ip link set dev $vxlan_name up
+ bridge fdb append dev $vxlan_name 00:00:00:00:00:00 dst 192.0.2.17 self
+ bridge fdb append dev $vxlan_name 00:00:00:00:00:00 dst $other_in_addr self
+
+ ip link set dev $vxlan_name master br3
+ tc qdisc add dev $vxlan_name clsact
+
+ simple_if_init w2
+ vlan_create w2 $vlan_id vw2 $host_addr/28
+
+ ip route add 192.0.2.16/28 nexthop via $nh_addr
+ ip route add $other_in_addr/32 nexthop via $nh_addr
+}
+export -f ns_init_common
+
+ns1_create()
+{
+ ip netns add ns1
+ ip link set dev v2 netns ns1
+ in_ns ns1 \
+ ns_init_common v2 192.0.2.34 192.0.2.50 vx100 1000 10 192.0.2.3 \
+ 192.0.2.33
+
+ in_ns ns1 bridge vlan add vid 100 dev vx100 pvid untagged
+}
+
+ns1_destroy()
+{
+ ip netns exec ns1 ip link set dev v2 netns 1
+ ip netns del ns1
+}
+
+ns2_create()
+{
+ ip netns add ns2
+ ip link set dev v4 netns ns2
+ in_ns ns2 \
+ ns_init_common v4 192.0.2.50 192.0.2.34 vx200 2000 20 192.0.2.4 \
+ 192.0.2.49
+
+ in_ns ns2 ip link add name w1.20 link w1 type vlan id 20
+ in_ns ns2 ip link set dev w1.20 master br3
+ in_ns ns2 ip link set dev w1.20 up
+}
+
+ns2_destroy()
+{
+ ip netns exec ns2 ip link set dev v4 netns 1
+ ip netns del ns2
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ rp1=${NETIFS[p5]}
+ rp2=${NETIFS[p6]}
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+ switch_create
+
+ ip link add name v1 type veth peer name v2
+ ip link add name v3 type veth peer name v4
+ vrp2_create
+ ns1_create
+ ns2_create
+
+ r1_mac=$(in_ns ns1 mac_get w2)
+ r2_mac=$(in_ns ns2 mac_get w2)
+ h2_mac=$(mac_get $h2)
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ns2_destroy
+ ns1_destroy
+ vrp2_destroy
+ ip link del dev v3
+ ip link del dev v1
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.3 ": local->remote 1 through VxLAN with an 802.1ad bridge"
+ ping_test $h2 192.0.2.4 ": local->remote 2 through VxLAN with an 802.1d bridge"
+}
+
+test_all()
+{
+ echo "Running tests with UDP port $VXPORT"
+ tests_run
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+test_all
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/fib_offload_lib.sh b/tools/testing/selftests/net/forwarding/fib_offload_lib.sh
index 66496659bea7..e134a5f529c9 100644
--- a/tools/testing/selftests/net/forwarding/fib_offload_lib.sh
+++ b/tools/testing/selftests/net/forwarding/fib_offload_lib.sh
@@ -224,7 +224,7 @@ fib_ipv4_plen_test()
ip -n $ns link set dev dummy1 up
# Add two routes with the same key and different prefix length and
- # make sure both are in hardware. It can be verfied that both are
+ # make sure both are in hardware. It can be verified that both are
# sharing the same leaf by checking the /proc/net/fib_trie
ip -n $ns route add 192.0.2.0/24 dev dummy1
ip -n $ns route add 192.0.2.0/25 dev dummy1
diff --git a/tools/testing/selftests/net/forwarding/gre_multipath_nh_res.sh b/tools/testing/selftests/net/forwarding/gre_multipath_nh_res.sh
new file mode 100755
index 000000000000..088b65e64d66
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/gre_multipath_nh_res.sh
@@ -0,0 +1,361 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test traffic distribution when a wECMP route forwards traffic to two GRE
+# tunnels.
+#
+# +-------------------------+
+# | H1 |
+# | $h1 + |
+# | 192.0.2.1/28 | |
+# | 2001:db8:1::1/64 | |
+# +-------------------|-----+
+# |
+# +-------------------|------------------------+
+# | SW1 | |
+# | $ol1 + |
+# | 192.0.2.2/28 |
+# | 2001:db8:1::2/64 |
+# | |
+# | + g1a (gre) + g1b (gre) |
+# | loc=192.0.2.65 loc=192.0.2.81 |
+# | rem=192.0.2.66 --. rem=192.0.2.82 --. |
+# | tos=inherit | tos=inherit | |
+# | .------------------' | |
+# | | .------------------' |
+# | v v |
+# | + $ul1.111 (vlan) + $ul1.222 (vlan) |
+# | | 192.0.2.129/28 | 192.0.2.145/28 |
+# | \ / |
+# | \________________/ |
+# | | |
+# | + $ul1 |
+# +------------|-------------------------------+
+# |
+# +------------|-------------------------------+
+# | SW2 + $ul2 |
+# | _______|________ |
+# | / \ |
+# | / \ |
+# | + $ul2.111 (vlan) + $ul2.222 (vlan) |
+# | ^ 192.0.2.130/28 ^ 192.0.2.146/28 |
+# | | | |
+# | | '------------------. |
+# | '------------------. | |
+# | + g2a (gre) | + g2b (gre) | |
+# | loc=192.0.2.66 | loc=192.0.2.82 | |
+# | rem=192.0.2.65 --' rem=192.0.2.81 --' |
+# | tos=inherit tos=inherit |
+# | |
+# | $ol2 + |
+# | 192.0.2.17/28 | |
+# | 2001:db8:2::1/64 | |
+# +-------------------|------------------------+
+# |
+# +-------------------|-----+
+# | H2 | |
+# | $h2 + |
+# | 192.0.2.18/28 |
+# | 2001:db8:2::2/64 |
+# +-------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+ multipath_ipv4
+ multipath_ipv6
+ multipath_ipv6_l4
+"
+
+NUM_NETIFS=6
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64
+ ip route add vrf v$h1 192.0.2.16/28 via 192.0.2.2
+ ip route add vrf v$h1 2001:db8:2::/64 via 2001:db8:1::2
+}
+
+h1_destroy()
+{
+ ip route del vrf v$h1 2001:db8:2::/64 via 2001:db8:1::2
+ ip route del vrf v$h1 192.0.2.16/28 via 192.0.2.2
+ simple_if_fini $h1 192.0.2.1/28
+}
+
+sw1_create()
+{
+ simple_if_init $ol1 192.0.2.2/28 2001:db8:1::2/64
+ __simple_if_init $ul1 v$ol1
+ vlan_create $ul1 111 v$ol1 192.0.2.129/28
+ vlan_create $ul1 222 v$ol1 192.0.2.145/28
+
+ tunnel_create g1a gre 192.0.2.65 192.0.2.66 tos inherit dev v$ol1
+ __simple_if_init g1a v$ol1 192.0.2.65/32
+ ip route add vrf v$ol1 192.0.2.66/32 via 192.0.2.130
+
+ tunnel_create g1b gre 192.0.2.81 192.0.2.82 tos inherit dev v$ol1
+ __simple_if_init g1b v$ol1 192.0.2.81/32
+ ip route add vrf v$ol1 192.0.2.82/32 via 192.0.2.146
+
+ ip -6 nexthop add id 101 dev g1a
+ ip -6 nexthop add id 102 dev g1b
+ ip nexthop add id 103 group 101/102 type resilient buckets 512 \
+ idle_timer 0
+
+ ip route add vrf v$ol1 192.0.2.16/28 nhid 103
+ ip route add vrf v$ol1 2001:db8:2::/64 nhid 103
+}
+
+sw1_destroy()
+{
+ ip route del vrf v$ol1 2001:db8:2::/64
+ ip route del vrf v$ol1 192.0.2.16/28
+
+ ip nexthop del id 103
+ ip -6 nexthop del id 102
+ ip -6 nexthop del id 101
+
+ ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146
+ __simple_if_fini g1b 192.0.2.81/32
+ tunnel_destroy g1b
+
+ ip route del vrf v$ol1 192.0.2.66/32 via 192.0.2.130
+ __simple_if_fini g1a 192.0.2.65/32
+ tunnel_destroy g1a
+
+ vlan_destroy $ul1 222
+ vlan_destroy $ul1 111
+ __simple_if_fini $ul1
+ simple_if_fini $ol1 192.0.2.2/28 2001:db8:1::2/64
+}
+
+sw2_create()
+{
+ simple_if_init $ol2 192.0.2.17/28 2001:db8:2::1/64
+ __simple_if_init $ul2 v$ol2
+ vlan_create $ul2 111 v$ol2 192.0.2.130/28
+ vlan_create $ul2 222 v$ol2 192.0.2.146/28
+
+ tunnel_create g2a gre 192.0.2.66 192.0.2.65 tos inherit dev v$ol2
+ __simple_if_init g2a v$ol2 192.0.2.66/32
+ ip route add vrf v$ol2 192.0.2.65/32 via 192.0.2.129
+
+ tunnel_create g2b gre 192.0.2.82 192.0.2.81 tos inherit dev v$ol2
+ __simple_if_init g2b v$ol2 192.0.2.82/32
+ ip route add vrf v$ol2 192.0.2.81/32 via 192.0.2.145
+
+ ip -6 nexthop add id 201 dev g2a
+ ip -6 nexthop add id 202 dev g2b
+ ip nexthop add id 203 group 201/202 type resilient buckets 512 \
+ idle_timer 0
+
+ ip route add vrf v$ol2 192.0.2.0/28 nhid 203
+ ip route add vrf v$ol2 2001:db8:1::/64 nhid 203
+
+ tc qdisc add dev $ul2 clsact
+ tc filter add dev $ul2 ingress pref 111 prot 802.1Q \
+ flower vlan_id 111 action pass
+ tc filter add dev $ul2 ingress pref 222 prot 802.1Q \
+ flower vlan_id 222 action pass
+}
+
+sw2_destroy()
+{
+ tc qdisc del dev $ul2 clsact
+
+ ip route del vrf v$ol2 2001:db8:1::/64
+ ip route del vrf v$ol2 192.0.2.0/28
+
+ ip nexthop del id 203
+ ip -6 nexthop del id 202
+ ip -6 nexthop del id 201
+
+ ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145
+ __simple_if_fini g2b 192.0.2.82/32
+ tunnel_destroy g2b
+
+ ip route del vrf v$ol2 192.0.2.65/32 via 192.0.2.129
+ __simple_if_fini g2a 192.0.2.66/32
+ tunnel_destroy g2a
+
+ vlan_destroy $ul2 222
+ vlan_destroy $ul2 111
+ __simple_if_fini $ul2
+ simple_if_fini $ol2 192.0.2.17/28 2001:db8:2::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.18/28 2001:db8:2::2/64
+ ip route add vrf v$h2 192.0.2.0/28 via 192.0.2.17
+ ip route add vrf v$h2 2001:db8:1::/64 via 2001:db8:2::1
+}
+
+h2_destroy()
+{
+ ip route del vrf v$h2 2001:db8:1::/64 via 2001:db8:2::1
+ ip route del vrf v$h2 192.0.2.0/28 via 192.0.2.17
+ simple_if_fini $h2 192.0.2.18/28 2001:db8:2::2/64
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ vrf_prepare
+ h1_create
+ sw1_create
+ sw2_create
+ h2_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ h2_destroy
+ sw2_destroy
+ sw1_destroy
+ h1_destroy
+ vrf_cleanup
+}
+
+multipath4_test()
+{
+ local what=$1; shift
+ local weight1=$1; shift
+ local weight2=$1; shift
+
+ sysctl_set net.ipv4.fib_multipath_hash_policy 1
+ ip nexthop replace id 103 group 101,$weight1/102,$weight2 \
+ type resilient
+
+ local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
+ local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
+
+ ip vrf exec v$h1 \
+ $MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
+ -d 1msec -t udp "sp=1024,dp=0-32768"
+
+ local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
+ local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
+
+ local d111=$((t1_111 - t0_111))
+ local d222=$((t1_222 - t0_222))
+ multipath_eval "$what" $weight1 $weight2 $d111 $d222
+
+ ip nexthop replace id 103 group 101/102 type resilient
+ sysctl_restore net.ipv4.fib_multipath_hash_policy
+}
+
+multipath6_test()
+{
+ local what=$1; shift
+ local weight1=$1; shift
+ local weight2=$1; shift
+
+ sysctl_set net.ipv6.fib_multipath_hash_policy 0
+ ip nexthop replace id 103 group 101,$weight1/102,$weight2 \
+ type resilient
+
+ local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
+ local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
+
+ # Generate 16384 echo requests, each with a random flow label.
+ for ((i=0; i < 16384; ++i)); do
+ ip vrf exec v$h1 $PING6 2001:db8:2::2 -F 0 -c 1 -q &> /dev/null
+ done
+
+ local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
+ local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
+
+ local d111=$((t1_111 - t0_111))
+ local d222=$((t1_222 - t0_222))
+ multipath_eval "$what" $weight1 $weight2 $d111 $d222
+
+ ip nexthop replace id 103 group 101/102 type resilient
+ sysctl_restore net.ipv6.fib_multipath_hash_policy
+}
+
+multipath6_l4_test()
+{
+ local what=$1; shift
+ local weight1=$1; shift
+ local weight2=$1; shift
+
+ sysctl_set net.ipv6.fib_multipath_hash_policy 1
+ ip nexthop replace id 103 group 101,$weight1/102,$weight2 \
+ type resilient
+
+ local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
+ local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
+
+ ip vrf exec v$h1 \
+ $MZ $h1 -6 -q -p 64 -A 2001:db8:1::1 -B 2001:db8:2::2 \
+ -d 1msec -t udp "sp=1024,dp=0-32768"
+
+ local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
+ local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
+
+ local d111=$((t1_111 - t0_111))
+ local d222=$((t1_222 - t0_222))
+ multipath_eval "$what" $weight1 $weight2 $d111 $d222
+
+ ip nexthop replace id 103 group 101/102 type resilient
+ sysctl_restore net.ipv6.fib_multipath_hash_policy
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.18
+}
+
+ping_ipv6()
+{
+ ping6_test $h1 2001:db8:2::2
+}
+
+multipath_ipv4()
+{
+ log_info "Running IPv4 multipath tests"
+ multipath4_test "ECMP" 1 1
+ multipath4_test "Weighted MP 2:1" 2 1
+ multipath4_test "Weighted MP 11:45" 11 45
+}
+
+multipath_ipv6()
+{
+ log_info "Running IPv6 multipath tests"
+ multipath6_test "ECMP" 1 1
+ multipath6_test "Weighted MP 2:1" 2 1
+ multipath6_test "Weighted MP 11:45" 11 45
+}
+
+multipath_ipv6_l4()
+{
+ log_info "Running IPv6 L4 hash multipath tests"
+ multipath6_l4_test "ECMP" 1 1
+ multipath6_l4_test "Weighted MP 2:1" 2 1
+ multipath6_l4_test "Weighted MP 11:45" 11 45
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index be71012b8fc5..42e28c983d41 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -353,6 +353,11 @@ wait_for_offload()
"$@" | grep -q offload
}
+wait_for_trap()
+{
+ "$@" | grep -q trap
+}
+
until_counter_is()
{
local expr=$1; shift
@@ -767,6 +772,15 @@ rate()
echo $((8 * (t1 - t0) / interval))
}
+packets_rate()
+{
+ local t0=$1; shift
+ local t1=$1; shift
+ local interval=$1; shift
+
+ echo $(((t1 - t0) / interval))
+}
+
mac_get()
{
local if_name=$1
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
index c02291e9841e..880e3ab9d088 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
@@ -271,7 +271,7 @@ test_span_gre_fdb_roaming()
while ((RET == 0)); do
bridge fdb del dev $swp3 $h3mac vlan 555 master 2>/dev/null
- bridge fdb add dev $swp2 $h3mac vlan 555 master
+ bridge fdb add dev $swp2 $h3mac vlan 555 master static
sleep 1
fail_test_span_gre_dir $tundev ingress
diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh
index 13db1cb50e57..6406cd76a19d 100644
--- a/tools/testing/selftests/net/forwarding/mirror_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh
@@ -20,6 +20,13 @@ mirror_uninstall()
tc filter del dev $swp1 $direction pref 1000
}
+is_ipv6()
+{
+ local addr=$1; shift
+
+ [[ -z ${addr//[0-9a-fA-F:]/} ]]
+}
+
mirror_test()
{
local vrf_name=$1; shift
@@ -29,9 +36,17 @@ mirror_test()
local pref=$1; shift
local expect=$1; shift
+ if is_ipv6 $dip; then
+ local proto=-6
+ local type="icmp6 type=128" # Echo request.
+ else
+ local proto=
+ local type="icmp echoreq"
+ fi
+
local t0=$(tc_rule_stats_get $dev $pref)
- $MZ $vrf_name ${sip:+-A $sip} -B $dip -a own -b bc -q \
- -c 10 -d 100msec -t icmp type=8
+ $MZ $proto $vrf_name ${sip:+-A $sip} -B $dip -a own -b bc -q \
+ -c 10 -d 100msec -t $type
sleep 0.5
local t1=$(tc_rule_stats_get $dev $pref)
local delta=$((t1 - t0))
diff --git a/tools/testing/selftests/net/forwarding/router_mpath_nh_res.sh b/tools/testing/selftests/net/forwarding/router_mpath_nh_res.sh
new file mode 100755
index 000000000000..4898dd4118f1
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/router_mpath_nh_res.sh
@@ -0,0 +1,400 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+ multipath_test
+"
+NUM_NETIFS=8
+source lib.sh
+
+h1_create()
+{
+ vrf_create "vrf-h1"
+ ip link set dev $h1 master vrf-h1
+
+ ip link set dev vrf-h1 up
+ ip link set dev $h1 up
+
+ ip address add 192.0.2.2/24 dev $h1
+ ip address add 2001:db8:1::2/64 dev $h1
+
+ ip route add 198.51.100.0/24 vrf vrf-h1 nexthop via 192.0.2.1
+ ip route add 2001:db8:2::/64 vrf vrf-h1 nexthop via 2001:db8:1::1
+}
+
+h1_destroy()
+{
+ ip route del 2001:db8:2::/64 vrf vrf-h1
+ ip route del 198.51.100.0/24 vrf vrf-h1
+
+ ip address del 2001:db8:1::2/64 dev $h1
+ ip address del 192.0.2.2/24 dev $h1
+
+ ip link set dev $h1 down
+ vrf_destroy "vrf-h1"
+}
+
+h2_create()
+{
+ vrf_create "vrf-h2"
+ ip link set dev $h2 master vrf-h2
+
+ ip link set dev vrf-h2 up
+ ip link set dev $h2 up
+
+ ip address add 198.51.100.2/24 dev $h2
+ ip address add 2001:db8:2::2/64 dev $h2
+
+ ip route add 192.0.2.0/24 vrf vrf-h2 nexthop via 198.51.100.1
+ ip route add 2001:db8:1::/64 vrf vrf-h2 nexthop via 2001:db8:2::1
+}
+
+h2_destroy()
+{
+ ip route del 2001:db8:1::/64 vrf vrf-h2
+ ip route del 192.0.2.0/24 vrf vrf-h2
+
+ ip address del 2001:db8:2::2/64 dev $h2
+ ip address del 198.51.100.2/24 dev $h2
+
+ ip link set dev $h2 down
+ vrf_destroy "vrf-h2"
+}
+
+router1_create()
+{
+ vrf_create "vrf-r1"
+ ip link set dev $rp11 master vrf-r1
+ ip link set dev $rp12 master vrf-r1
+ ip link set dev $rp13 master vrf-r1
+
+ ip link set dev vrf-r1 up
+ ip link set dev $rp11 up
+ ip link set dev $rp12 up
+ ip link set dev $rp13 up
+
+ ip address add 192.0.2.1/24 dev $rp11
+ ip address add 2001:db8:1::1/64 dev $rp11
+
+ ip address add 169.254.2.12/24 dev $rp12
+ ip address add fe80:2::12/64 dev $rp12
+
+ ip address add 169.254.3.13/24 dev $rp13
+ ip address add fe80:3::13/64 dev $rp13
+}
+
+router1_destroy()
+{
+ ip route del 2001:db8:2::/64 vrf vrf-r1
+ ip route del 198.51.100.0/24 vrf vrf-r1
+
+ ip address del fe80:3::13/64 dev $rp13
+ ip address del 169.254.3.13/24 dev $rp13
+
+ ip address del fe80:2::12/64 dev $rp12
+ ip address del 169.254.2.12/24 dev $rp12
+
+ ip address del 2001:db8:1::1/64 dev $rp11
+ ip address del 192.0.2.1/24 dev $rp11
+
+ ip nexthop del id 103
+ ip nexthop del id 101
+ ip nexthop del id 102
+ ip nexthop del id 106
+ ip nexthop del id 104
+ ip nexthop del id 105
+
+ ip link set dev $rp13 down
+ ip link set dev $rp12 down
+ ip link set dev $rp11 down
+
+ vrf_destroy "vrf-r1"
+}
+
+router2_create()
+{
+ vrf_create "vrf-r2"
+ ip link set dev $rp21 master vrf-r2
+ ip link set dev $rp22 master vrf-r2
+ ip link set dev $rp23 master vrf-r2
+
+ ip link set dev vrf-r2 up
+ ip link set dev $rp21 up
+ ip link set dev $rp22 up
+ ip link set dev $rp23 up
+
+ ip address add 198.51.100.1/24 dev $rp21
+ ip address add 2001:db8:2::1/64 dev $rp21
+
+ ip address add 169.254.2.22/24 dev $rp22
+ ip address add fe80:2::22/64 dev $rp22
+
+ ip address add 169.254.3.23/24 dev $rp23
+ ip address add fe80:3::23/64 dev $rp23
+}
+
+router2_destroy()
+{
+ ip route del 2001:db8:1::/64 vrf vrf-r2
+ ip route del 192.0.2.0/24 vrf vrf-r2
+
+ ip address del fe80:3::23/64 dev $rp23
+ ip address del 169.254.3.23/24 dev $rp23
+
+ ip address del fe80:2::22/64 dev $rp22
+ ip address del 169.254.2.22/24 dev $rp22
+
+ ip address del 2001:db8:2::1/64 dev $rp21
+ ip address del 198.51.100.1/24 dev $rp21
+
+ ip nexthop del id 201
+ ip nexthop del id 202
+ ip nexthop del id 204
+ ip nexthop del id 205
+
+ ip link set dev $rp23 down
+ ip link set dev $rp22 down
+ ip link set dev $rp21 down
+
+ vrf_destroy "vrf-r2"
+}
+
+routing_nh_obj()
+{
+ ip nexthop add id 101 via 169.254.2.22 dev $rp12
+ ip nexthop add id 102 via 169.254.3.23 dev $rp13
+ ip nexthop add id 103 group 101/102 type resilient buckets 512 \
+ idle_timer 0
+ ip route add 198.51.100.0/24 vrf vrf-r1 nhid 103
+
+ ip nexthop add id 104 via fe80:2::22 dev $rp12
+ ip nexthop add id 105 via fe80:3::23 dev $rp13
+ ip nexthop add id 106 group 104/105 type resilient buckets 512 \
+ idle_timer 0
+ ip route add 2001:db8:2::/64 vrf vrf-r1 nhid 106
+
+ ip nexthop add id 201 via 169.254.2.12 dev $rp22
+ ip nexthop add id 202 via 169.254.3.13 dev $rp23
+ ip nexthop add id 203 group 201/202 type resilient buckets 512 \
+ idle_timer 0
+ ip route add 192.0.2.0/24 vrf vrf-r2 nhid 203
+
+ ip nexthop add id 204 via fe80:2::12 dev $rp22
+ ip nexthop add id 205 via fe80:3::13 dev $rp23
+ ip nexthop add id 206 group 204/205 type resilient buckets 512 \
+ idle_timer 0
+ ip route add 2001:db8:1::/64 vrf vrf-r2 nhid 206
+}
+
+multipath4_test()
+{
+ local desc="$1"
+ local weight_rp12=$2
+ local weight_rp13=$3
+ local t0_rp12 t0_rp13 t1_rp12 t1_rp13
+ local packets_rp12 packets_rp13
+
+ # Transmit multiple flows from h1 to h2 and make sure they are
+ # distributed between both multipath links (rp12 and rp13)
+ # according to the provided weights.
+ sysctl_set net.ipv4.fib_multipath_hash_policy 1
+
+ t0_rp12=$(link_stats_tx_packets_get $rp12)
+ t0_rp13=$(link_stats_tx_packets_get $rp13)
+
+ ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
+ -d 1msec -t udp "sp=1024,dp=0-32768"
+
+ t1_rp12=$(link_stats_tx_packets_get $rp12)
+ t1_rp13=$(link_stats_tx_packets_get $rp13)
+
+ let "packets_rp12 = $t1_rp12 - $t0_rp12"
+ let "packets_rp13 = $t1_rp13 - $t0_rp13"
+ multipath_eval "$desc" $weight_rp12 $weight_rp13 $packets_rp12 $packets_rp13
+
+ # Restore settings.
+ sysctl_restore net.ipv4.fib_multipath_hash_policy
+}
+
+multipath6_l4_test()
+{
+ local desc="$1"
+ local weight_rp12=$2
+ local weight_rp13=$3
+ local t0_rp12 t0_rp13 t1_rp12 t1_rp13
+ local packets_rp12 packets_rp13
+
+ # Transmit multiple flows from h1 to h2 and make sure they are
+ # distributed between both multipath links (rp12 and rp13)
+ # according to the provided weights.
+ sysctl_set net.ipv6.fib_multipath_hash_policy 1
+
+ t0_rp12=$(link_stats_tx_packets_get $rp12)
+ t0_rp13=$(link_stats_tx_packets_get $rp13)
+
+ $MZ $h1 -6 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
+ -d 1msec -t udp "sp=1024,dp=0-32768"
+
+ t1_rp12=$(link_stats_tx_packets_get $rp12)
+ t1_rp13=$(link_stats_tx_packets_get $rp13)
+
+ let "packets_rp12 = $t1_rp12 - $t0_rp12"
+ let "packets_rp13 = $t1_rp13 - $t0_rp13"
+ multipath_eval "$desc" $weight_rp12 $weight_rp13 $packets_rp12 $packets_rp13
+
+ sysctl_restore net.ipv6.fib_multipath_hash_policy
+}
+
+multipath_test()
+{
+ # Without an idle timer, weight replacement should happen immediately.
+ log_info "Running multipath tests without an idle timer"
+ ip nexthop replace id 103 group 101/102 type resilient idle_timer 0
+ ip nexthop replace id 106 group 104/105 type resilient idle_timer 0
+
+ log_info "Running IPv4 multipath tests"
+ ip nexthop replace id 103 group 101,1/102,1 type resilient
+ multipath4_test "ECMP" 1 1
+ ip nexthop replace id 103 group 101,2/102,1 type resilient
+ multipath4_test "Weighted MP 2:1" 2 1
+ ip nexthop replace id 103 group 101,11/102,45 type resilient
+ multipath4_test "Weighted MP 11:45" 11 45
+
+ ip nexthop replace id 103 group 101,1/102,1 type resilient
+
+ log_info "Running IPv6 L4 hash multipath tests"
+ ip nexthop replace id 106 group 104,1/105,1 type resilient
+ multipath6_l4_test "ECMP" 1 1
+ ip nexthop replace id 106 group 104,2/105,1 type resilient
+ multipath6_l4_test "Weighted MP 2:1" 2 1
+ ip nexthop replace id 106 group 104,11/105,45 type resilient
+ multipath6_l4_test "Weighted MP 11:45" 11 45
+
+ ip nexthop replace id 106 group 104,1/105,1 type resilient
+
+ # With an idle timer, weight replacement should not happen, so the
+ # expected ratio should always be the initial one (1:1).
+ log_info "Running multipath tests with an idle timer of 120 seconds"
+ ip nexthop replace id 103 group 101/102 type resilient idle_timer 120
+ ip nexthop replace id 106 group 104/105 type resilient idle_timer 120
+
+ log_info "Running IPv4 multipath tests"
+ ip nexthop replace id 103 group 101,1/102,1 type resilient
+ multipath4_test "ECMP" 1 1
+ ip nexthop replace id 103 group 101,2/102,1 type resilient
+ multipath4_test "Weighted MP 2:1" 1 1
+ ip nexthop replace id 103 group 101,11/102,45 type resilient
+ multipath4_test "Weighted MP 11:45" 1 1
+
+ ip nexthop replace id 103 group 101,1/102,1 type resilient
+
+ log_info "Running IPv6 L4 hash multipath tests"
+ ip nexthop replace id 106 group 104,1/105,1 type resilient
+ multipath6_l4_test "ECMP" 1 1
+ ip nexthop replace id 106 group 104,2/105,1 type resilient
+ multipath6_l4_test "Weighted MP 2:1" 1 1
+ ip nexthop replace id 106 group 104,11/105,45 type resilient
+ multipath6_l4_test "Weighted MP 11:45" 1 1
+
+ ip nexthop replace id 106 group 104,1/105,1 type resilient
+
+ # With a short idle timer and enough idle time, weight replacement
+ # should happen.
+ log_info "Running multipath tests with an idle timer of 5 seconds"
+ ip nexthop replace id 103 group 101/102 type resilient idle_timer 5
+ ip nexthop replace id 106 group 104/105 type resilient idle_timer 5
+
+ log_info "Running IPv4 multipath tests"
+ sleep 10
+ ip nexthop replace id 103 group 101,1/102,1 type resilient
+ multipath4_test "ECMP" 1 1
+ sleep 10
+ ip nexthop replace id 103 group 101,2/102,1 type resilient
+ multipath4_test "Weighted MP 2:1" 2 1
+ sleep 10
+ ip nexthop replace id 103 group 101,11/102,45 type resilient
+ multipath4_test "Weighted MP 11:45" 11 45
+
+ ip nexthop replace id 103 group 101,1/102,1 type resilient
+
+ log_info "Running IPv6 L4 hash multipath tests"
+ sleep 10
+ ip nexthop replace id 106 group 104,1/105,1 type resilient
+ multipath6_l4_test "ECMP" 1 1
+ sleep 10
+ ip nexthop replace id 106 group 104,2/105,1 type resilient
+ multipath6_l4_test "Weighted MP 2:1" 2 1
+ sleep 10
+ ip nexthop replace id 106 group 104,11/105,45 type resilient
+ multipath6_l4_test "Weighted MP 11:45" 11 45
+
+ ip nexthop replace id 106 group 104,1/105,1 type resilient
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rp11=${NETIFS[p2]}
+
+ rp12=${NETIFS[p3]}
+ rp22=${NETIFS[p4]}
+
+ rp13=${NETIFS[p5]}
+ rp23=${NETIFS[p6]}
+
+ rp21=${NETIFS[p7]}
+ h2=${NETIFS[p8]}
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+
+ router1_create
+ router2_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ router2_destroy
+ router1_destroy
+
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 198.51.100.2
+}
+
+ping_ipv6()
+{
+ ping6_test $h1 2001:db8:2::2
+}
+
+ip nexthop ls >/dev/null 2>&1
+if [ $? -ne 0 ]; then
+ echo "Nexthop objects not supported; skipping tests"
+ exit 0
+fi
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+routing_nh_obj
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/tc_police.sh b/tools/testing/selftests/net/forwarding/tc_police.sh
index 160f9cccdfb7..4f9f17cb45d6 100755
--- a/tools/testing/selftests/net/forwarding/tc_police.sh
+++ b/tools/testing/selftests/net/forwarding/tc_police.sh
@@ -35,6 +35,8 @@ ALL_TESTS="
police_shared_test
police_rx_mirror_test
police_tx_mirror_test
+ police_pps_rx_test
+ police_pps_tx_test
"
NUM_NETIFS=6
source tc_common.sh
@@ -290,6 +292,60 @@ police_tx_mirror_test()
police_mirror_common_test $rp2 egress "police tx and mirror"
}
+police_pps_common_test()
+{
+ local test_name=$1; shift
+
+ RET=0
+
+ # Rule to measure bandwidth on ingress of $h2
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
+ dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
+ action drop
+
+ mausezahn $h1 -a own -b $(mac_get $rp1) -A 192.0.2.1 -B 198.51.100.1 \
+ -t udp sp=12345,dp=54321 -p 1000 -c 0 -q &
+
+ local t0=$(tc_rule_stats_get $h2 1 ingress .packets)
+ sleep 10
+ local t1=$(tc_rule_stats_get $h2 1 ingress .packets)
+
+ local er=$((2000))
+ local nr=$(packets_rate $t0 $t1 10)
+ local nr_pct=$((100 * (nr - er) / er))
+ ((-10 <= nr_pct && nr_pct <= 10))
+ check_err $? "Expected rate $(humanize $er), got $(humanize $nr), which is $nr_pct% off. Required accuracy is +-10%."
+
+ log_test "$test_name"
+
+ { kill %% && wait %%; } 2>/dev/null
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+}
+
+police_pps_rx_test()
+{
+ # Rule to police traffic destined to $h2 on ingress of $rp1
+ tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \
+ dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
+ action police pkts_rate 2000 pkts_burst 400 conform-exceed drop/ok
+
+ police_pps_common_test "police pps on rx"
+
+ tc filter del dev $rp1 ingress protocol ip pref 1 handle 101 flower
+}
+
+police_pps_tx_test()
+{
+ # Rule to police traffic destined to $h2 on egress of $rp2
+ tc filter add dev $rp2 egress protocol ip pref 1 handle 101 flower \
+ dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
+ action police pkts_rate 2000 pkts_burst 400 conform-exceed drop/ok
+
+ police_pps_common_test "police pps on tx"
+
+ tc filter del dev $rp2 egress protocol ip pref 1 handle 101 flower
+}
+
setup_prepare()
{
h1=${NETIFS[p1]}
diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile
index 00bb158b4a5d..f1464f09b080 100644
--- a/tools/testing/selftests/net/mptcp/Makefile
+++ b/tools/testing/selftests/net/mptcp/Makefile
@@ -6,7 +6,7 @@ KSFT_KHDR_INSTALL := 1
CFLAGS = -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include
TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \
- simult_flows.sh
+ simult_flows.sh mptcp_sockopt.sh
TEST_GEN_FILES = mptcp_connect pm_nl_ctl
diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
index 39edce4f541c..2674ba20d524 100755
--- a/tools/testing/selftests/net/mptcp/diag.sh
+++ b/tools/testing/selftests/net/mptcp/diag.sh
@@ -5,8 +5,9 @@ rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
ns="ns1-$rndh"
ksft_skip=4
test_cnt=1
+timeout_poll=100
+timeout_test=$((timeout_poll * 2 + 1))
ret=0
-pids=()
flush_pids()
{
@@ -14,18 +15,14 @@ flush_pids()
# give it some time
sleep 1.1
- for pid in ${pids[@]}; do
- [ -d /proc/$pid ] && kill -SIGUSR1 $pid >/dev/null 2>&1
- done
- pids=()
+ ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGUSR1 &>/dev/null
}
cleanup()
{
+ ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGKILL &>/dev/null
+
ip netns del $ns
- for pid in ${pids[@]}; do
- [ -d /proc/$pid ] && kill -9 $pid >/dev/null 2>&1
- done
}
ip -Version > /dev/null 2>&1
@@ -79,39 +76,57 @@ trap cleanup EXIT
ip netns add $ns
ip -n $ns link set dev lo up
-echo "a" | ip netns exec $ns ./mptcp_connect -p 10000 -l 0.0.0.0 -t 100 >/dev/null &
+echo "a" | \
+ timeout ${timeout_test} \
+ ip netns exec $ns \
+ ./mptcp_connect -p 10000 -l -t ${timeout_poll} \
+ 0.0.0.0 >/dev/null &
sleep 0.1
-pids[0]=$!
chk_msk_nr 0 "no msk on netns creation"
-echo "b" | ip netns exec $ns ./mptcp_connect -p 10000 127.0.0.1 -j -t 100 >/dev/null &
+echo "b" | \
+ timeout ${timeout_test} \
+ ip netns exec $ns \
+ ./mptcp_connect -p 10000 -j -t ${timeout_poll} \
+ 127.0.0.1 >/dev/null &
sleep 0.1
-pids[1]=$!
chk_msk_nr 2 "after MPC handshake "
chk_msk_remote_key_nr 2 "....chk remote_key"
chk_msk_fallback_nr 0 "....chk no fallback"
flush_pids
-echo "a" | ip netns exec $ns ./mptcp_connect -p 10001 -s TCP -l 0.0.0.0 -t 100 >/dev/null &
-pids[0]=$!
+echo "a" | \
+ timeout ${timeout_test} \
+ ip netns exec $ns \
+ ./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} \
+ 0.0.0.0 >/dev/null &
sleep 0.1
-echo "b" | ip netns exec $ns ./mptcp_connect -p 10001 127.0.0.1 -j -t 100 >/dev/null &
-pids[1]=$!
+echo "b" | \
+ timeout ${timeout_test} \
+ ip netns exec $ns \
+ ./mptcp_connect -p 10001 -j -t ${timeout_poll} \
+ 127.0.0.1 >/dev/null &
sleep 0.1
chk_msk_fallback_nr 1 "check fallback"
flush_pids
NR_CLIENTS=100
for I in `seq 1 $NR_CLIENTS`; do
- echo "a" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) -l 0.0.0.0 -t 100 -w 10 >/dev/null &
- pids[$((I*2))]=$!
+ echo "a" | \
+ timeout ${timeout_test} \
+ ip netns exec $ns \
+ ./mptcp_connect -p $((I+10001)) -l -w 10 \
+ -t ${timeout_poll} 0.0.0.0 >/dev/null &
done
sleep 0.1
for I in `seq 1 $NR_CLIENTS`; do
- echo "b" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) 127.0.0.1 -t 100 -w 10 >/dev/null &
- pids[$((I*2 + 1))]=$!
+ echo "b" | \
+ timeout ${timeout_test} \
+ ip netns exec $ns \
+ ./mptcp_connect -p $((I+10001)) -w 10 \
+ -t ${timeout_poll} 127.0.0.1 >/dev/null &
done
sleep 1.5
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index 77bb62feb872..d88e1fdfb147 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -45,7 +45,14 @@ enum cfg_mode {
CFG_MODE_SENDFILE,
};
+enum cfg_peek {
+ CFG_NONE_PEEK,
+ CFG_WITH_PEEK,
+ CFG_AFTER_PEEK,
+};
+
static enum cfg_mode cfg_mode = CFG_MODE_POLL;
+static enum cfg_peek cfg_peek = CFG_NONE_PEEK;
static const char *cfg_host;
static const char *cfg_port = "12000";
static int cfg_sock_proto = IPPROTO_MPTCP;
@@ -55,7 +62,9 @@ static int cfg_sndbuf;
static int cfg_rcvbuf;
static bool cfg_join;
static bool cfg_remove;
+static unsigned int cfg_do_w;
static int cfg_wait;
+static uint32_t cfg_mark;
static void die_usage(void)
{
@@ -68,8 +77,11 @@ static void die_usage(void)
fprintf(stderr, "\t-p num -- use port num\n");
fprintf(stderr, "\t-s [MPTCP|TCP] -- use mptcp(default) or tcp sockets\n");
fprintf(stderr, "\t-m [poll|mmap|sendfile] -- use poll(default)/mmap+write/sendfile\n");
+ fprintf(stderr, "\t-M mark -- set socket packet mark\n");
fprintf(stderr, "\t-u -- check mptcp ulp\n");
fprintf(stderr, "\t-w num -- wait num sec before closing the socket\n");
+ fprintf(stderr,
+ "\t-P [saveWithPeek|saveAfterPeek] -- save data with/after MSG_PEEK form tcp socket\n");
exit(1);
}
@@ -139,6 +151,17 @@ static void set_sndbuf(int fd, unsigned int size)
}
}
+static void set_mark(int fd, uint32_t mark)
+{
+ int err;
+
+ err = setsockopt(fd, SOL_SOCKET, SO_MARK, &mark, sizeof(mark));
+ if (err) {
+ perror("set SO_MARK");
+ exit(1);
+ }
+}
+
static int sock_listen_mptcp(const char * const listenaddr,
const char * const port)
{
@@ -247,6 +270,9 @@ static int sock_connect_mptcp(const char * const remoteaddr,
continue;
}
+ if (cfg_mark)
+ set_mark(sock, cfg_mark);
+
if (connect(sock, a->ai_addr, a->ai_addrlen) == 0)
break; /* success */
@@ -272,8 +298,8 @@ static size_t do_rnd_write(const int fd, char *buf, const size_t len)
if (cfg_join && first && do_w > 100)
do_w = 100;
- if (cfg_remove && do_w > 50)
- do_w = 50;
+ if (cfg_remove && do_w > cfg_do_w)
+ do_w = cfg_do_w;
bw = write(fd, buf, do_w);
if (bw < 0)
@@ -314,6 +340,8 @@ static size_t do_write(const int fd, char *buf, const size_t len)
static ssize_t do_rnd_read(const int fd, char *buf, const size_t len)
{
+ int ret = 0;
+ char tmp[16384];
size_t cap = rand();
cap &= 0xffff;
@@ -323,7 +351,17 @@ static ssize_t do_rnd_read(const int fd, char *buf, const size_t len)
else if (cap > len)
cap = len;
- return read(fd, buf, cap);
+ if (cfg_peek == CFG_WITH_PEEK) {
+ ret = recv(fd, buf, cap, MSG_PEEK);
+ ret = (ret < 0) ? ret : read(fd, tmp, ret);
+ } else if (cfg_peek == CFG_AFTER_PEEK) {
+ ret = recv(fd, buf, cap, MSG_PEEK);
+ ret = (ret < 0) ? ret : read(fd, buf, cap);
+ } else {
+ ret = read(fd, buf, cap);
+ }
+
+ return ret;
}
static void set_nonblock(int fd)
@@ -802,6 +840,26 @@ int parse_mode(const char *mode)
return 0;
}
+int parse_peek(const char *mode)
+{
+ if (!strcasecmp(mode, "saveWithPeek"))
+ return CFG_WITH_PEEK;
+ if (!strcasecmp(mode, "saveAfterPeek"))
+ return CFG_AFTER_PEEK;
+
+ fprintf(stderr, "Unknown: %s\n", mode);
+ fprintf(stderr, "Supported MSG_PEEK mode are:\n");
+ fprintf(stderr,
+ "\t\t\"saveWithPeek\" - recv data with flags 'MSG_PEEK' and save the peek data into file\n");
+ fprintf(stderr,
+ "\t\t\"saveAfterPeek\" - read and save data into file after recv with flags 'MSG_PEEK'\n");
+
+ die_usage();
+
+ /* silence compiler warning */
+ return 0;
+}
+
static int parse_int(const char *size)
{
unsigned long s;
@@ -829,7 +887,7 @@ static void parse_opts(int argc, char **argv)
{
int c;
- while ((c = getopt(argc, argv, "6jrlp:s:hut:m:S:R:w:")) != -1) {
+ while ((c = getopt(argc, argv, "6jr:lp:s:hut:m:S:R:w:M:P:")) != -1) {
switch (c) {
case 'j':
cfg_join = true;
@@ -840,6 +898,9 @@ static void parse_opts(int argc, char **argv)
cfg_remove = true;
cfg_mode = CFG_MODE_POLL;
cfg_wait = 400000;
+ cfg_do_w = atoi(optarg);
+ if (cfg_do_w <= 0)
+ cfg_do_w = 50;
break;
case 'l':
listen_mode = true;
@@ -876,6 +937,12 @@ static void parse_opts(int argc, char **argv)
case 'w':
cfg_wait = atoi(optarg)*1000000;
break;
+ case 'M':
+ cfg_mark = strtol(optarg, NULL, 0);
+ break;
+ case 'P':
+ cfg_peek = parse_peek(optarg);
+ break;
}
}
@@ -907,6 +974,8 @@ int main(int argc, char *argv[])
set_rcvbuf(fd, cfg_rcvbuf);
if (cfg_sndbuf)
set_sndbuf(fd, cfg_sndbuf);
+ if (cfg_mark)
+ set_mark(fd, cfg_mark);
return main_loop_s(fd);
}
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index 10a030b53b23..9236609731b1 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -11,7 +11,8 @@ cin=""
cout=""
ksft_skip=4
capture=false
-timeout=30
+timeout_poll=30
+timeout_test=$((timeout_poll * 2 + 1))
ipv6=true
ethtool_random_on=true
tc_delay="$((RANDOM%50))"
@@ -273,7 +274,7 @@ check_mptcp_disabled()
ip netns exec ${disabled_ns} sysctl -q net.mptcp.enabled=0
local err=0
- LANG=C ip netns exec ${disabled_ns} ./mptcp_connect -t $timeout -p 10000 -s MPTCP 127.0.0.1 < "$cin" 2>&1 | \
+ LANG=C ip netns exec ${disabled_ns} ./mptcp_connect -p 10000 -s MPTCP 127.0.0.1 < "$cin" 2>&1 | \
grep -q "^socket: Protocol not available$" && err=1
ip netns delete ${disabled_ns}
@@ -374,7 +375,7 @@ do_transfer()
local srv_proto="$4"
local connect_addr="$5"
local local_addr="$6"
- local extra_args=""
+ local extra_args="$7"
local port
port=$((10000+$TEST_COUNT))
@@ -393,9 +394,9 @@ do_transfer()
fi
if [ -n "$extra_args" ] && $options_log; then
- options_log=false
echo "INFO: extra options: $extra_args"
fi
+ options_log=false
:> "$cout"
:> "$sout"
@@ -425,19 +426,32 @@ do_transfer()
sleep 1
fi
+ NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
+ nstat -n
+ if [ ${listener_ns} != ${connector_ns} ]; then
+ NSTAT_HISTORY=/tmp/${connector_ns}.nstat ip netns exec ${connector_ns} \
+ nstat -n
+ fi
+
local stat_synrx_last_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
local stat_ackrx_last_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
local stat_cookietx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesSent")
local stat_cookierx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
- ip netns exec ${listener_ns} ./mptcp_connect -t $timeout -l -p $port -s ${srv_proto} $extra_args $local_addr < "$sin" > "$sout" &
+ timeout ${timeout_test} \
+ ip netns exec ${listener_ns} \
+ ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
+ $extra_args $local_addr < "$sin" > "$sout" &
local spid=$!
wait_local_port_listen "${listener_ns}" "${port}"
local start
start=$(date +%s%3N)
- ip netns exec ${connector_ns} ./mptcp_connect -t $timeout -p $port -s ${cl_proto} $extra_args $connect_addr < "$cin" > "$cout" &
+ timeout ${timeout_test} \
+ ip netns exec ${connector_ns} \
+ ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
+ $extra_args $connect_addr < "$cin" > "$cout" &
local cpid=$!
wait $cpid
@@ -575,6 +589,7 @@ run_tests_lo()
local connector_ns="$2"
local connect_addr="$3"
local loopback="$4"
+ local extra_args="$5"
local lret=0
# skip if test programs are running inside same netns for subsequent runs.
@@ -594,7 +609,8 @@ run_tests_lo()
local_addr="0.0.0.0"
fi
- do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP ${connect_addr} ${local_addr}
+ do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP \
+ ${connect_addr} ${local_addr} "${extra_args}"
lret=$?
if [ $lret -ne 0 ]; then
ret=$lret
@@ -608,14 +624,16 @@ run_tests_lo()
fi
fi
- do_transfer ${listener_ns} ${connector_ns} MPTCP TCP ${connect_addr} ${local_addr}
+ do_transfer ${listener_ns} ${connector_ns} MPTCP TCP \
+ ${connect_addr} ${local_addr} "${extra_args}"
lret=$?
if [ $lret -ne 0 ]; then
ret=$lret
return 1
fi
- do_transfer ${listener_ns} ${connector_ns} TCP MPTCP ${connect_addr} ${local_addr}
+ do_transfer ${listener_ns} ${connector_ns} TCP MPTCP \
+ ${connect_addr} ${local_addr} "${extra_args}"
lret=$?
if [ $lret -ne 0 ]; then
ret=$lret
@@ -623,7 +641,8 @@ run_tests_lo()
fi
if [ $do_tcp -gt 1 ] ;then
- do_transfer ${listener_ns} ${connector_ns} TCP TCP ${connect_addr} ${local_addr}
+ do_transfer ${listener_ns} ${connector_ns} TCP TCP \
+ ${connect_addr} ${local_addr} "${extra_args}"
lret=$?
if [ $lret -ne 0 ]; then
ret=$lret
@@ -639,6 +658,15 @@ run_tests()
run_tests_lo $1 $2 $3 0
}
+run_tests_peekmode()
+{
+ local peekmode="$1"
+
+ echo "INFO: with peek mode: ${peekmode}"
+ run_tests_lo "$ns1" "$ns1" 10.0.1.1 1 "-P ${peekmode}"
+ run_tests_lo "$ns1" "$ns1" dead:beef:1::1 1 "-P ${peekmode}"
+}
+
make_file "$cin" "client"
make_file "$sin" "server"
@@ -718,6 +746,9 @@ for sender in $ns1 $ns2 $ns3 $ns4;do
run_tests "$ns4" $sender dead:beef:3::1
done
+run_tests_peekmode "saveWithPeek"
+run_tests_peekmode "saveAfterPeek"
+
time_end=$(date +%s)
time_run=$((time_end-time_start))
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index ad32240fbfda..fd99485cf2a4 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -8,7 +8,8 @@ cin=""
cinsent=""
cout=""
ksft_skip=4
-timeout=30
+timeout_poll=30
+timeout_test=$((timeout_poll * 2 + 1))
mptcp_connect=""
capture=0
do_all_tests=1
@@ -77,6 +78,7 @@ cleanup_partial()
for netns in "$ns1" "$ns2"; do
ip netns del $netns
+ rm -f /tmp/$netns.{nstat,out}
done
}
@@ -232,10 +234,17 @@ do_transfer()
sleep 1
fi
+ NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
+ nstat -n
+ NSTAT_HISTORY=/tmp/${connector_ns}.nstat ip netns exec ${connector_ns} \
+ nstat -n
+
if [ $speed = "fast" ]; then
mptcp_connect="./mptcp_connect -j"
- else
- mptcp_connect="./mptcp_connect -r"
+ elif [ $speed = "slow" ]; then
+ mptcp_connect="./mptcp_connect -r 50"
+ elif [ $speed = "least" ]; then
+ mptcp_connect="./mptcp_connect -r 10"
fi
local local_addr
@@ -245,17 +254,26 @@ do_transfer()
local_addr="0.0.0.0"
fi
- ip netns exec ${listener_ns} $mptcp_connect -t $timeout -l -p $port \
- -s ${srv_proto} ${local_addr} < "$sin" > "$sout" &
+ timeout ${timeout_test} \
+ ip netns exec ${listener_ns} \
+ $mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
+ ${local_addr} < "$sin" > "$sout" &
spid=$!
sleep 1
if [ "$test_link_fail" -eq 0 ];then
- ip netns exec ${connector_ns} $mptcp_connect -t $timeout -p $port -s ${cl_proto} $connect_addr < "$cin" > "$cout" &
+ timeout ${timeout_test} \
+ ip netns exec ${connector_ns} \
+ $mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
+ $connect_addr < "$cin" > "$cout" &
else
- ( cat "$cin" ; sleep 2; link_failure $listener_ns ; cat "$cin" ) | tee "$cinsent" | \
- ip netns exec ${connector_ns} $mptcp_connect -t $timeout -p $port -s ${cl_proto} $connect_addr > "$cout" &
+ ( cat "$cin" ; sleep 2; link_failure $listener_ns ; cat "$cin" ) | \
+ tee "$cinsent" | \
+ timeout ${timeout_test} \
+ ip netns exec ${connector_ns} \
+ $mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
+ $connect_addr > "$cout" &
fi
cpid=$!
@@ -279,17 +297,25 @@ do_transfer()
let rm_nr_ns1=-addr_nr_ns1
if [ $rm_nr_ns1 -lt 8 ]; then
counter=1
- sleep 1
-
- while [ $counter -le $rm_nr_ns1 ]
- do
- ip netns exec ${listener_ns} ./pm_nl_ctl del $counter
+ dump=(`ip netns exec ${listener_ns} ./pm_nl_ctl dump`)
+ if [ ${#dump[@]} -gt 0 ]; then
+ id=${dump[1]}
sleep 1
- let counter+=1
- done
- else
+
+ while [ $counter -le $rm_nr_ns1 ]
+ do
+ ip netns exec ${listener_ns} ./pm_nl_ctl del $id
+ sleep 1
+ let counter+=1
+ let id+=1
+ done
+ fi
+ elif [ $rm_nr_ns1 -eq 8 ]; then
sleep 1
ip netns exec ${listener_ns} ./pm_nl_ctl flush
+ elif [ $rm_nr_ns1 -eq 9 ]; then
+ sleep 1
+ ip netns exec ${listener_ns} ./pm_nl_ctl del 0 ${connect_addr}
fi
fi
@@ -313,17 +339,31 @@ do_transfer()
let rm_nr_ns2=-addr_nr_ns2
if [ $rm_nr_ns2 -lt 8 ]; then
counter=1
- sleep 1
-
- while [ $counter -le $rm_nr_ns2 ]
- do
- ip netns exec ${connector_ns} ./pm_nl_ctl del $counter
+ dump=(`ip netns exec ${connector_ns} ./pm_nl_ctl dump`)
+ if [ ${#dump[@]} -gt 0 ]; then
+ id=${dump[1]}
sleep 1
- let counter+=1
- done
- else
+
+ while [ $counter -le $rm_nr_ns2 ]
+ do
+ ip netns exec ${connector_ns} ./pm_nl_ctl del $id
+ sleep 1
+ let counter+=1
+ let id+=1
+ done
+ fi
+ elif [ $rm_nr_ns2 -eq 8 ]; then
sleep 1
ip netns exec ${connector_ns} ./pm_nl_ctl flush
+ elif [ $rm_nr_ns2 -eq 9 ]; then
+ local addr
+ if is_v6 "${connect_addr}"; then
+ addr="dead:beef:1::2"
+ else
+ addr="10.0.1.2"
+ fi
+ sleep 1
+ ip netns exec ${connector_ns} ./pm_nl_ctl del 0 $addr
fi
fi
@@ -349,12 +389,19 @@ do_transfer()
kill $cappid
fi
+ NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
+ nstat | grep Tcp > /tmp/${listener_ns}.out
+ NSTAT_HISTORY=/tmp/${connector_ns}.nstat ip netns exec ${connector_ns} \
+ nstat | grep Tcp > /tmp/${connector_ns}.out
+
if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
echo " client exit code $retc, server $rets" 1>&2
echo -e "\nnetns ${listener_ns} socket stat for ${port}:" 1>&2
- ip netns exec ${listener_ns} ss -nita 1>&2 -o "sport = :$port"
+ ip netns exec ${listener_ns} ss -Menita 1>&2 -o "sport = :$port"
+ cat /tmp/${listener_ns}.out
echo -e "\nnetns ${connector_ns} socket stat for ${port}:" 1>&2
- ip netns exec ${connector_ns} ss -nita 1>&2 -o "dport = :$port"
+ ip netns exec ${connector_ns} ss -Menita 1>&2 -o "dport = :$port"
+ cat /tmp/${connector_ns}.out
cat "$capout"
ret=1
@@ -605,11 +652,22 @@ chk_rm_nr()
{
local rm_addr_nr=$1
local rm_subflow_nr=$2
+ local invert=${3:-""}
local count
local dump_stats
+ local addr_ns
+ local subflow_ns
+
+ if [ -z $invert ]; then
+ addr_ns=$ns1
+ subflow_ns=$ns2
+ elif [ $invert = "invert" ]; then
+ addr_ns=$ns2
+ subflow_ns=$ns1
+ fi
printf "%-39s %s" " " "rm "
- count=`ip netns exec $ns1 nstat -as | grep MPTcpExtRmAddr | awk '{print $2}'`
+ count=`ip netns exec $addr_ns nstat -as | grep MPTcpExtRmAddr | awk '{print $2}'`
[ -z "$count" ] && count=0
if [ "$count" != "$rm_addr_nr" ]; then
echo "[fail] got $count RM_ADDR[s] expected $rm_addr_nr"
@@ -620,7 +678,7 @@ chk_rm_nr()
fi
echo -n " - sf "
- count=`ip netns exec $ns2 nstat -as | grep MPTcpExtRmSubflow | awk '{print $2}'`
+ count=`ip netns exec $subflow_ns nstat -as | grep MPTcpExtRmSubflow | awk '{print $2}'`
[ -z "$count" ] && count=0
if [ "$count" != "$rm_subflow_nr" ]; then
echo "[fail] got $count RM_SUBFLOW[s] expected $rm_subflow_nr"
@@ -719,6 +777,14 @@ subflows_tests()
ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags subflow
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "multiple subflows, limited by server" 2 2 1
+
+ # single subflow, dev
+ reset
+ ip netns exec $ns1 ./pm_nl_ctl limits 0 1
+ ip netns exec $ns2 ./pm_nl_ctl limits 0 1
+ ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow dev ns2eth3
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr "single subflow, dev" 1 1 1
}
signal_address_tests()
@@ -762,6 +828,28 @@ signal_address_tests()
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "multiple subflows and signal" 3 3 3
chk_add_nr 1 1
+
+ # signal addresses
+ reset
+ ip netns exec $ns1 ./pm_nl_ctl limits 3 3
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.3.1 flags signal
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.4.1 flags signal
+ ip netns exec $ns2 ./pm_nl_ctl limits 3 3
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr "signal addresses" 3 3 3
+ chk_add_nr 3 3
+
+ # signal invalid addresses
+ reset
+ ip netns exec $ns1 ./pm_nl_ctl limits 3 3
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.12.1 flags signal
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.3.1 flags signal
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.14.1 flags signal
+ ip netns exec $ns2 ./pm_nl_ctl limits 3 3
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr "signal invalid addresses" 1 1 1
+ chk_add_nr 3 3
}
link_failure_tests()
@@ -797,6 +885,26 @@ add_addr_timeout_tests()
run_tests $ns1 $ns2 dead:beef:1::1 0 0 0 slow
chk_join_nr "signal address, ADD_ADDR6 timeout" 1 1 1
chk_add_nr 4 0
+
+ # signal addresses timeout
+ reset_with_add_addr_timeout
+ ip netns exec $ns1 ./pm_nl_ctl limits 2 2
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.3.1 flags signal
+ ip netns exec $ns2 ./pm_nl_ctl limits 2 2
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 least
+ chk_join_nr "signal addresses, ADD_ADDR timeout" 2 2 2
+ chk_add_nr 8 0
+
+ # signal invalid addresses timeout
+ reset_with_add_addr_timeout
+ ip netns exec $ns1 ./pm_nl_ctl limits 2 2
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.12.1 flags signal
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.3.1 flags signal
+ ip netns exec $ns2 ./pm_nl_ctl limits 2 2
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 least
+ chk_join_nr "invalid address, ADD_ADDR timeout" 1 1 1
+ chk_add_nr 8 0
}
remove_tests()
@@ -828,7 +936,7 @@ remove_tests()
run_tests $ns1 $ns2 10.0.1.1 0 -1 0 slow
chk_join_nr "remove single address" 1 1 1
chk_add_nr 1 1
- chk_rm_nr 0 0
+ chk_rm_nr 1 1 invert
# subflow and signal, remove
reset
@@ -853,6 +961,30 @@ remove_tests()
chk_add_nr 1 1
chk_rm_nr 2 2
+ # addresses remove
+ reset
+ ip netns exec $ns1 ./pm_nl_ctl limits 3 3
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal id 250
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.3.1 flags signal
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.4.1 flags signal
+ ip netns exec $ns2 ./pm_nl_ctl limits 3 3
+ run_tests $ns1 $ns2 10.0.1.1 0 -3 0 slow
+ chk_join_nr "remove addresses" 3 3 3
+ chk_add_nr 3 3
+ chk_rm_nr 3 3 invert
+
+ # invalid addresses remove
+ reset
+ ip netns exec $ns1 ./pm_nl_ctl limits 3 3
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.12.1 flags signal
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.3.1 flags signal
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.14.1 flags signal
+ ip netns exec $ns2 ./pm_nl_ctl limits 3 3
+ run_tests $ns1 $ns2 10.0.1.1 0 -3 0 slow
+ chk_join_nr "remove invalid addresses" 1 1 1
+ chk_add_nr 3 3
+ chk_rm_nr 3 1 invert
+
# subflows and signal, flush
reset
ip netns exec $ns1 ./pm_nl_ctl limits 0 3
@@ -864,6 +996,60 @@ remove_tests()
chk_join_nr "flush subflows and signal" 3 3 3
chk_add_nr 1 1
chk_rm_nr 2 2
+
+ # subflows flush
+ reset
+ ip netns exec $ns1 ./pm_nl_ctl limits 3 3
+ ip netns exec $ns2 ./pm_nl_ctl limits 3 3
+ ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags subflow id 150
+ ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+ ip netns exec $ns2 ./pm_nl_ctl add 10.0.4.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1 0 -8 -8 slow
+ chk_join_nr "flush subflows" 3 3 3
+ chk_rm_nr 3 3
+
+ # addresses flush
+ reset
+ ip netns exec $ns1 ./pm_nl_ctl limits 3 3
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal id 250
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.3.1 flags signal
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.4.1 flags signal
+ ip netns exec $ns2 ./pm_nl_ctl limits 3 3
+ run_tests $ns1 $ns2 10.0.1.1 0 -8 -8 slow
+ chk_join_nr "flush addresses" 3 3 3
+ chk_add_nr 3 3
+ chk_rm_nr 3 3 invert
+
+ # invalid addresses flush
+ reset
+ ip netns exec $ns1 ./pm_nl_ctl limits 3 3
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.12.1 flags signal
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.3.1 flags signal
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.14.1 flags signal
+ ip netns exec $ns2 ./pm_nl_ctl limits 3 3
+ run_tests $ns1 $ns2 10.0.1.1 0 -8 0 slow
+ chk_join_nr "flush invalid addresses" 1 1 1
+ chk_add_nr 3 3
+ chk_rm_nr 3 1 invert
+
+ # remove id 0 subflow
+ reset
+ ip netns exec $ns1 ./pm_nl_ctl limits 0 1
+ ip netns exec $ns2 ./pm_nl_ctl limits 0 1
+ ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1 0 0 -9 slow
+ chk_join_nr "remove id 0 subflow" 1 1 1
+ chk_rm_nr 1 1
+
+ # remove id 0 address
+ reset
+ ip netns exec $ns1 ./pm_nl_ctl limits 0 1
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+ ip netns exec $ns2 ./pm_nl_ctl limits 1 1
+ run_tests $ns1 $ns2 10.0.1.1 0 -9 0 slow
+ chk_join_nr "remove id 0 address" 1 1 1
+ chk_add_nr 1 1
+ chk_rm_nr 1 1 invert
}
add_tests()
@@ -940,7 +1126,7 @@ ipv6_tests()
run_tests $ns1 $ns2 dead:beef:1::1 0 -1 0 slow
chk_join_nr "remove single address IPv6" 1 1 1
chk_add_nr 1 1
- chk_rm_nr 0 0
+ chk_rm_nr 1 1 invert
# subflow and signal IPv6, remove
reset
@@ -1083,7 +1269,7 @@ add_addr_ports_tests()
run_tests $ns1 $ns2 10.0.1.1 0 -1 0 slow
chk_join_nr "remove single address with port" 1 1 1
chk_add_nr 1 1 1
- chk_rm_nr 0 0
+ chk_rm_nr 1 1 invert
# subflow and signal with port, remove
reset
diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
new file mode 100755
index 000000000000..2fa13946ac04
--- /dev/null
+++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
@@ -0,0 +1,276 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ret=0
+sin=""
+sout=""
+cin=""
+cout=""
+ksft_skip=4
+timeout_poll=30
+timeout_test=$((timeout_poll * 2 + 1))
+mptcp_connect=""
+do_all_tests=1
+
+add_mark_rules()
+{
+ local ns=$1
+ local m=$2
+
+ for t in iptables ip6tables; do
+ # just to debug: check we have multiple subflows connection requests
+ ip netns exec $ns $t -A OUTPUT -p tcp --syn -m mark --mark $m -j ACCEPT
+
+ # RST packets might be handled by a internal dummy socket
+ ip netns exec $ns $t -A OUTPUT -p tcp --tcp-flags RST RST -m mark --mark 0 -j ACCEPT
+
+ ip netns exec $ns $t -A OUTPUT -p tcp -m mark --mark $m -j ACCEPT
+ ip netns exec $ns $t -A OUTPUT -p tcp -m mark --mark 0 -j DROP
+ done
+}
+
+init()
+{
+ rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
+
+ ns1="ns1-$rndh"
+ ns2="ns2-$rndh"
+
+ for netns in "$ns1" "$ns2";do
+ ip netns add $netns || exit $ksft_skip
+ ip -net $netns link set lo up
+ ip netns exec $netns sysctl -q net.mptcp.enabled=1
+ ip netns exec $netns sysctl -q net.ipv4.conf.all.rp_filter=0
+ ip netns exec $netns sysctl -q net.ipv4.conf.default.rp_filter=0
+ done
+
+ for i in `seq 1 4`; do
+ ip link add ns1eth$i netns "$ns1" type veth peer name ns2eth$i netns "$ns2"
+ ip -net "$ns1" addr add 10.0.$i.1/24 dev ns1eth$i
+ ip -net "$ns1" addr add dead:beef:$i::1/64 dev ns1eth$i nodad
+ ip -net "$ns1" link set ns1eth$i up
+
+ ip -net "$ns2" addr add 10.0.$i.2/24 dev ns2eth$i
+ ip -net "$ns2" addr add dead:beef:$i::2/64 dev ns2eth$i nodad
+ ip -net "$ns2" link set ns2eth$i up
+
+ # let $ns2 reach any $ns1 address from any interface
+ ip -net "$ns2" route add default via 10.0.$i.1 dev ns2eth$i metric 10$i
+
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.$i.1 flags signal
+ ip netns exec $ns1 ./pm_nl_ctl add dead:beef:$i::1 flags signal
+
+ ip netns exec $ns2 ./pm_nl_ctl add 10.0.$i.2 flags signal
+ ip netns exec $ns2 ./pm_nl_ctl add dead:beef:$i::2 flags signal
+ done
+
+ ip netns exec $ns1 ./pm_nl_ctl limits 8 8
+ ip netns exec $ns2 ./pm_nl_ctl limits 8 8
+
+ add_mark_rules $ns1 1
+ add_mark_rules $ns2 2
+}
+
+cleanup()
+{
+ for netns in "$ns1" "$ns2"; do
+ ip netns del $netns
+ done
+ rm -f "$cin" "$cout"
+ rm -f "$sin" "$sout"
+}
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+iptables -V > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run all tests without iptables tool"
+ exit $ksft_skip
+fi
+
+ip6tables -V > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run all tests without ip6tables tool"
+ exit $ksft_skip
+fi
+
+check_mark()
+{
+ local ns=$1
+ local af=$2
+
+ tables=iptables
+
+ if [ $af -eq 6 ];then
+ tables=ip6tables
+ fi
+
+ counters=$(ip netns exec $ns $tables -v -L OUTPUT | grep DROP)
+ values=${counters%DROP*}
+
+ for v in $values; do
+ if [ $v -ne 0 ]; then
+ echo "FAIL: got $tables $values in ns $ns , not 0 - not all expected packets marked" 1>&2
+ return 1
+ fi
+ done
+
+ return 0
+}
+
+print_file_err()
+{
+ ls -l "$1" 1>&2
+ echo "Trailing bytes are: "
+ tail -c 27 "$1"
+}
+
+check_transfer()
+{
+ in=$1
+ out=$2
+ what=$3
+
+ cmp "$in" "$out" > /dev/null 2>&1
+ if [ $? -ne 0 ] ;then
+ echo "[ FAIL ] $what does not match (in, out):"
+ print_file_err "$in"
+ print_file_err "$out"
+ ret=1
+
+ return 1
+ fi
+
+ return 0
+}
+
+# $1: IP address
+is_v6()
+{
+ [ -z "${1##*:*}" ]
+}
+
+do_transfer()
+{
+ listener_ns="$1"
+ connector_ns="$2"
+ cl_proto="$3"
+ srv_proto="$4"
+ connect_addr="$5"
+
+ port=12001
+
+ :> "$cout"
+ :> "$sout"
+
+ mptcp_connect="./mptcp_connect -r 20"
+
+ local local_addr
+ if is_v6 "${connect_addr}"; then
+ local_addr="::"
+ else
+ local_addr="0.0.0.0"
+ fi
+
+ timeout ${timeout_test} \
+ ip netns exec ${listener_ns} \
+ $mptcp_connect -t ${timeout_poll} -l -M 1 -p $port -s ${srv_proto} \
+ ${local_addr} < "$sin" > "$sout" &
+ spid=$!
+
+ sleep 1
+
+ timeout ${timeout_test} \
+ ip netns exec ${connector_ns} \
+ $mptcp_connect -t ${timeout_poll} -M 2 -p $port -s ${cl_proto} \
+ $connect_addr < "$cin" > "$cout" &
+
+ cpid=$!
+
+ wait $cpid
+ retc=$?
+ wait $spid
+ rets=$?
+
+ if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
+ echo " client exit code $retc, server $rets" 1>&2
+ echo -e "\nnetns ${listener_ns} socket stat for ${port}:" 1>&2
+ ip netns exec ${listener_ns} ss -Menita 1>&2 -o "sport = :$port"
+
+ echo -e "\nnetns ${connector_ns} socket stat for ${port}:" 1>&2
+ ip netns exec ${connector_ns} ss -Menita 1>&2 -o "dport = :$port"
+
+ ret=1
+ return 1
+ fi
+
+ if [ $local_addr = "::" ];then
+ check_mark $listener_ns 6
+ check_mark $connector_ns 6
+ else
+ check_mark $listener_ns 4
+ check_mark $connector_ns 4
+ fi
+
+ check_transfer $cin $sout "file received by server"
+
+ rets=$?
+
+ if [ $retc -eq 0 ] && [ $rets -eq 0 ];then
+ return 0
+ fi
+
+ return 1
+}
+
+make_file()
+{
+ name=$1
+ who=$2
+ size=$3
+
+ dd if=/dev/urandom of="$name" bs=1024 count=$size 2> /dev/null
+ echo -e "\nMPTCP_TEST_FILE_END_MARKER" >> "$name"
+
+ echo "Created $name (size $size KB) containing data sent by $who"
+}
+
+run_tests()
+{
+ listener_ns="$1"
+ connector_ns="$2"
+ connect_addr="$3"
+ lret=0
+
+ do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP ${connect_addr}
+
+ lret=$?
+
+ if [ $lret -ne 0 ]; then
+ ret=$lret
+ return
+ fi
+}
+
+sin=$(mktemp)
+sout=$(mktemp)
+cin=$(mktemp)
+cout=$(mktemp)
+init
+make_file "$cin" "client" 1
+make_file "$sin" "server" 1
+trap cleanup EXIT
+
+run_tests $ns1 $ns2 10.0.1.1
+run_tests $ns1 $ns2 dead:beef:1::1
+
+
+if [ $ret -eq 0 ];then
+ echo "PASS: all packets had packet mark set"
+fi
+
+exit $ret
diff --git a/tools/testing/selftests/net/mptcp/pm_netlink.sh b/tools/testing/selftests/net/mptcp/pm_netlink.sh
index a617e293734c..3c741abe034e 100755
--- a/tools/testing/selftests/net/mptcp/pm_netlink.sh
+++ b/tools/testing/selftests/net/mptcp/pm_netlink.sh
@@ -100,12 +100,12 @@ done
check "ip netns exec $ns1 ./pm_nl_ctl get 9" "id 9 flags signal 10.0.1.9" "hard addr limit"
check "ip netns exec $ns1 ./pm_nl_ctl get 10" "" "above hard addr limit"
-for i in `seq 9 256`; do
+ip netns exec $ns1 ./pm_nl_ctl del 9
+for i in `seq 10 255`; do
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.0.9 id $i
ip netns exec $ns1 ./pm_nl_ctl del $i
- ip netns exec $ns1 ./pm_nl_ctl add 10.0.0.9 id $((i+1))
done
check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags 10.0.1.1
-id 2 flags 10.0.0.9
id 3 flags signal,backup 10.0.1.3
id 4 flags signal 10.0.1.4
id 5 flags signal 10.0.1.5
diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
index 7b4167f3f9a2..115decfdc1ef 100644
--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
@@ -26,7 +26,7 @@ static void syntax(char *argv[])
{
fprintf(stderr, "%s add|get|set|del|flush|dump|accept [<args>]\n", argv[0]);
fprintf(stderr, "\tadd [flags signal|subflow|backup] [id <nr>] [dev <name>] <ip>\n");
- fprintf(stderr, "\tdel <id>\n");
+ fprintf(stderr, "\tdel <id> [<ip>]\n");
fprintf(stderr, "\tget <id>\n");
fprintf(stderr, "\tset <ip> [flags backup|nobackup]\n");
fprintf(stderr, "\tflush\n");
@@ -301,6 +301,7 @@ int del_addr(int fd, int pm_family, int argc, char *argv[])
1024];
struct rtattr *rta, *nest;
struct nlmsghdr *nh;
+ u_int16_t family;
int nest_start;
u_int8_t id;
int off = 0;
@@ -310,11 +311,14 @@ int del_addr(int fd, int pm_family, int argc, char *argv[])
off = init_genl_req(data, pm_family, MPTCP_PM_CMD_DEL_ADDR,
MPTCP_PM_VER);
- /* the only argument is the address id */
- if (argc != 3)
+ /* the only argument is the address id (nonzero) */
+ if (argc != 3 && argc != 4)
syntax(argv);
id = atoi(argv[2]);
+ /* zero id with the IP address */
+ if (!id && argc != 4)
+ syntax(argv);
nest_start = off;
nest = (void *)(data + off);
@@ -328,6 +332,30 @@ int del_addr(int fd, int pm_family, int argc, char *argv[])
rta->rta_len = RTA_LENGTH(1);
memcpy(RTA_DATA(rta), &id, 1);
off += NLMSG_ALIGN(rta->rta_len);
+
+ if (!id) {
+ /* addr data */
+ rta = (void *)(data + off);
+ if (inet_pton(AF_INET, argv[3], RTA_DATA(rta))) {
+ family = AF_INET;
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR4;
+ rta->rta_len = RTA_LENGTH(4);
+ } else if (inet_pton(AF_INET6, argv[3], RTA_DATA(rta))) {
+ family = AF_INET6;
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR6;
+ rta->rta_len = RTA_LENGTH(16);
+ } else {
+ error(1, errno, "can't parse ip %s", argv[3]);
+ }
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ /* family */
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_FAMILY;
+ rta->rta_len = RTA_LENGTH(2);
+ memcpy(RTA_DATA(rta), &family, 2);
+ off += NLMSG_ALIGN(rta->rta_len);
+ }
nest->rta_len = off - nest_start;
do_nl_req(fd, nh, off, 0);
diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
index f039ee57eb3c..3aeef3bcb101 100755
--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
+++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
@@ -7,7 +7,8 @@ ns2="ns2-$rndh"
ns3="ns3-$rndh"
capture=false
ksft_skip=4
-timeout=30
+timeout_poll=30
+timeout_test=$((timeout_poll * 2 + 1))
test_cnt=1
ret=0
bail=0
@@ -157,14 +158,20 @@ do_transfer()
sleep 1
fi
- ip netns exec ${ns3} ./mptcp_connect -jt $timeout -l -p $port 0.0.0.0 < "$sin" > "$sout" &
+ timeout ${timeout_test} \
+ ip netns exec ${ns3} \
+ ./mptcp_connect -jt ${timeout_poll} -l -p $port \
+ 0.0.0.0 < "$sin" > "$sout" &
local spid=$!
wait_local_port_listen "${ns3}" "${port}"
local start
start=$(date +%s%3N)
- ip netns exec ${ns1} ./mptcp_connect -jt $timeout -p $port 10.0.3.3 < "$cin" > "$cout" &
+ timeout ${timeout_test} \
+ ip netns exec ${ns1} \
+ ./mptcp_connect -jt ${timeout_poll} -p $port \
+ 10.0.3.3 < "$cin" > "$cout" &
local cpid=$!
wait $cpid
diff --git a/tools/testing/selftests/net/settings b/tools/testing/selftests/net/settings
new file mode 100644
index 000000000000..694d70710ff0
--- /dev/null
+++ b/tools/testing/selftests/net/settings
@@ -0,0 +1 @@
+timeout=300
diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c
index b4cca382d125..59067f64b775 100644
--- a/tools/testing/selftests/net/so_txtime.c
+++ b/tools/testing/selftests/net/so_txtime.c
@@ -2,9 +2,12 @@
/*
* Test the SO_TXTIME API
*
- * Takes two streams of { payload, delivery time }[], one input and one output.
- * Sends the input stream and verifies arrival matches the output stream.
- * The two streams can differ due to out-of-order delivery and drops.
+ * Takes a stream of { payload, delivery time }[], to be sent across two
+ * processes. Start this program on two separate network namespaces or
+ * connected hosts, one instance in transmit mode and the other in receive
+ * mode using the '-r' option. Receiver will compare arrival timestamps to
+ * the expected stream. Sender will read transmit timestamps from the error
+ * queue. The streams can differ due to out-of-order delivery and drops.
*/
#define _GNU_SOURCE
@@ -28,14 +31,17 @@
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
+#include <poll.h>
static int cfg_clockid = CLOCK_TAI;
-static bool cfg_do_ipv4;
-static bool cfg_do_ipv6;
static uint16_t cfg_port = 8000;
static int cfg_variance_us = 4000;
+static uint64_t cfg_start_time_ns;
+static int cfg_mark;
+static bool cfg_rx;
static uint64_t glob_tstart;
+static uint64_t tdeliver_max;
/* encode one timed transmission (of a 1B payload) */
struct timed_send {
@@ -44,18 +50,21 @@ struct timed_send {
};
#define MAX_NUM_PKT 8
-static struct timed_send cfg_in[MAX_NUM_PKT];
-static struct timed_send cfg_out[MAX_NUM_PKT];
+static struct timed_send cfg_buf[MAX_NUM_PKT];
static int cfg_num_pkt;
static int cfg_errq_level;
static int cfg_errq_type;
-static uint64_t gettime_ns(void)
+static struct sockaddr_storage cfg_dst_addr;
+static struct sockaddr_storage cfg_src_addr;
+static socklen_t cfg_alen;
+
+static uint64_t gettime_ns(clockid_t clock)
{
struct timespec ts;
- if (clock_gettime(cfg_clockid, &ts))
+ if (clock_gettime(clock, &ts))
error(1, errno, "gettime");
return ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
@@ -75,6 +84,8 @@ static void do_send_one(int fdt, struct timed_send *ts)
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
+ msg.msg_name = (struct sockaddr *)&cfg_dst_addr;
+ msg.msg_namelen = cfg_alen;
if (ts->delay_us >= 0) {
memset(control, 0, sizeof(control));
@@ -82,6 +93,8 @@ static void do_send_one(int fdt, struct timed_send *ts)
msg.msg_controllen = sizeof(control);
tdeliver = glob_tstart + ts->delay_us * 1000;
+ tdeliver_max = tdeliver_max > tdeliver ?
+ tdeliver_max : tdeliver;
cm = CMSG_FIRSTHDR(&msg);
cm->cmsg_level = SOL_SOCKET;
@@ -98,7 +111,7 @@ static void do_send_one(int fdt, struct timed_send *ts)
}
-static bool do_recv_one(int fdr, struct timed_send *ts)
+static void do_recv_one(int fdr, struct timed_send *ts)
{
int64_t tstop, texpect;
char rbuf[2];
@@ -106,13 +119,13 @@ static bool do_recv_one(int fdr, struct timed_send *ts)
ret = recv(fdr, rbuf, sizeof(rbuf), 0);
if (ret == -1 && errno == EAGAIN)
- return true;
+ error(1, EAGAIN, "recv: timeout");
if (ret == -1)
error(1, errno, "read");
if (ret != 1)
error(1, 0, "read: %dB", ret);
- tstop = (gettime_ns() - glob_tstart) / 1000;
+ tstop = (gettime_ns(cfg_clockid) - glob_tstart) / 1000;
texpect = ts->delay_us >= 0 ? ts->delay_us : 0;
fprintf(stderr, "payload:%c delay:%lld expected:%lld (us)\n",
@@ -123,8 +136,6 @@ static bool do_recv_one(int fdr, struct timed_send *ts)
if (llabs(tstop - texpect) > cfg_variance_us)
error(1, 0, "exceeds variance (%d us)", cfg_variance_us);
-
- return false;
}
static void do_recv_verify_empty(int fdr)
@@ -137,18 +148,18 @@ static void do_recv_verify_empty(int fdr)
error(1, 0, "recv: not empty as expected (%d, %d)", ret, errno);
}
-static void do_recv_errqueue_timeout(int fdt)
+static int do_recv_errqueue_timeout(int fdt)
{
char control[CMSG_SPACE(sizeof(struct sock_extended_err)) +
CMSG_SPACE(sizeof(struct sockaddr_in6))] = {0};
char data[sizeof(struct ethhdr) + sizeof(struct ipv6hdr) +
sizeof(struct udphdr) + 1];
struct sock_extended_err *err;
+ int ret, num_tstamp = 0;
struct msghdr msg = {0};
struct iovec iov = {0};
struct cmsghdr *cm;
int64_t tstamp = 0;
- int ret;
iov.iov_base = data;
iov.iov_len = sizeof(data);
@@ -206,9 +217,47 @@ static void do_recv_errqueue_timeout(int fdt)
msg.msg_flags = 0;
msg.msg_controllen = sizeof(control);
+ num_tstamp++;
}
- error(1, 0, "recv: timeout");
+ return num_tstamp;
+}
+
+static void recv_errqueue_msgs(int fdt)
+{
+ struct pollfd pfd = { .fd = fdt, .events = POLLERR };
+ const int timeout_ms = 10;
+ int ret, num_tstamp = 0;
+
+ do {
+ ret = poll(&pfd, 1, timeout_ms);
+ if (ret == -1)
+ error(1, errno, "poll");
+
+ if (ret && (pfd.revents & POLLERR))
+ num_tstamp += do_recv_errqueue_timeout(fdt);
+
+ if (num_tstamp == cfg_num_pkt)
+ break;
+
+ } while (gettime_ns(cfg_clockid) < tdeliver_max);
+}
+
+static void start_time_wait(void)
+{
+ uint64_t now;
+ int err;
+
+ if (!cfg_start_time_ns)
+ return;
+
+ now = gettime_ns(CLOCK_REALTIME);
+ if (cfg_start_time_ns < now)
+ return;
+
+ err = usleep((cfg_start_time_ns - now) / 1000);
+ if (err)
+ error(1, errno, "usleep");
}
static void setsockopt_txtime(int fd)
@@ -245,6 +294,10 @@ static int setup_tx(struct sockaddr *addr, socklen_t alen)
setsockopt_txtime(fd);
+ if (cfg_mark &&
+ setsockopt(fd, SOL_SOCKET, SO_MARK, &cfg_mark, sizeof(cfg_mark)))
+ error(1, errno, "setsockopt mark");
+
return fd;
}
@@ -266,31 +319,70 @@ static int setup_rx(struct sockaddr *addr, socklen_t alen)
return fd;
}
-static void do_test(struct sockaddr *addr, socklen_t alen)
+static void do_test_tx(struct sockaddr *addr, socklen_t alen)
{
- int fdt, fdr, i;
+ int fdt, i;
fprintf(stderr, "\nSO_TXTIME ipv%c clock %s\n",
addr->sa_family == PF_INET ? '4' : '6',
cfg_clockid == CLOCK_TAI ? "tai" : "monotonic");
fdt = setup_tx(addr, alen);
- fdr = setup_rx(addr, alen);
- glob_tstart = gettime_ns();
+ start_time_wait();
+ glob_tstart = gettime_ns(cfg_clockid);
for (i = 0; i < cfg_num_pkt; i++)
- do_send_one(fdt, &cfg_in[i]);
+ do_send_one(fdt, &cfg_buf[i]);
+
+ recv_errqueue_msgs(fdt);
+
+ if (close(fdt))
+ error(1, errno, "close t");
+}
+
+static void do_test_rx(struct sockaddr *addr, socklen_t alen)
+{
+ int fdr, i;
+
+ fdr = setup_rx(addr, alen);
+
+ start_time_wait();
+ glob_tstart = gettime_ns(cfg_clockid);
+
for (i = 0; i < cfg_num_pkt; i++)
- if (do_recv_one(fdr, &cfg_out[i]))
- do_recv_errqueue_timeout(fdt);
+ do_recv_one(fdr, &cfg_buf[i]);
do_recv_verify_empty(fdr);
if (close(fdr))
error(1, errno, "close r");
- if (close(fdt))
- error(1, errno, "close t");
+}
+
+static void setup_sockaddr(int domain, const char *str_addr,
+ struct sockaddr_storage *sockaddr)
+{
+ struct sockaddr_in6 *addr6 = (void *) sockaddr;
+ struct sockaddr_in *addr4 = (void *) sockaddr;
+
+ switch (domain) {
+ case PF_INET:
+ memset(addr4, 0, sizeof(*addr4));
+ addr4->sin_family = AF_INET;
+ addr4->sin_port = htons(cfg_port);
+ if (str_addr &&
+ inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1)
+ error(1, 0, "ipv4 parse error: %s", str_addr);
+ break;
+ case PF_INET6:
+ memset(addr6, 0, sizeof(*addr6));
+ addr6->sin6_family = AF_INET6;
+ addr6->sin6_port = htons(cfg_port);
+ if (str_addr &&
+ inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1)
+ error(1, 0, "ipv6 parse error: %s", str_addr);
+ break;
+ }
}
static int parse_io(const char *optarg, struct timed_send *array)
@@ -323,17 +415,46 @@ static int parse_io(const char *optarg, struct timed_send *array)
return aoff / 2;
}
+static void usage(const char *progname)
+{
+ fprintf(stderr, "\nUsage: %s [options] <payload>\n"
+ "Options:\n"
+ " -4 only IPv4\n"
+ " -6 only IPv6\n"
+ " -c <clock> monotonic (default) or tai\n"
+ " -D <addr> destination IP address (server)\n"
+ " -S <addr> source IP address (client)\n"
+ " -r run rx mode\n"
+ " -t <nsec> start time (UTC nanoseconds)\n"
+ " -m <mark> socket mark\n"
+ "\n",
+ progname);
+ exit(1);
+}
+
static void parse_opts(int argc, char **argv)
{
- int c, ilen, olen;
+ char *daddr = NULL, *saddr = NULL;
+ int domain = PF_UNSPEC;
+ int c;
- while ((c = getopt(argc, argv, "46c:")) != -1) {
+ while ((c = getopt(argc, argv, "46c:S:D:rt:m:")) != -1) {
switch (c) {
case '4':
- cfg_do_ipv4 = true;
+ if (domain != PF_UNSPEC)
+ error(1, 0, "Pass one of -4 or -6");
+ domain = PF_INET;
+ cfg_alen = sizeof(struct sockaddr_in);
+ cfg_errq_level = SOL_IP;
+ cfg_errq_type = IP_RECVERR;
break;
case '6':
- cfg_do_ipv6 = true;
+ if (domain != PF_UNSPEC)
+ error(1, 0, "Pass one of -4 or -6");
+ domain = PF_INET6;
+ cfg_alen = sizeof(struct sockaddr_in6);
+ cfg_errq_level = SOL_IPV6;
+ cfg_errq_type = IPV6_RECVERR;
break;
case 'c':
if (!strcmp(optarg, "tai"))
@@ -344,50 +465,50 @@ static void parse_opts(int argc, char **argv)
else
error(1, 0, "unknown clock id %s", optarg);
break;
+ case 'S':
+ saddr = optarg;
+ break;
+ case 'D':
+ daddr = optarg;
+ break;
+ case 'r':
+ cfg_rx = true;
+ break;
+ case 't':
+ cfg_start_time_ns = strtol(optarg, NULL, 0);
+ break;
+ case 'm':
+ cfg_mark = strtol(optarg, NULL, 0);
+ break;
default:
- error(1, 0, "parse error at %d", optind);
+ usage(argv[0]);
}
}
- if (argc - optind != 2)
- error(1, 0, "Usage: %s [-46] -c <clock> <in> <out>", argv[0]);
+ if (argc - optind != 1)
+ usage(argv[0]);
+
+ if (domain == PF_UNSPEC)
+ error(1, 0, "Pass one of -4 or -6");
+ if (!daddr)
+ error(1, 0, "-D <server addr> required\n");
+ if (!cfg_rx && !saddr)
+ error(1, 0, "-S <client addr> required\n");
- ilen = parse_io(argv[optind], cfg_in);
- olen = parse_io(argv[optind + 1], cfg_out);
- if (ilen != olen)
- error(1, 0, "i/o streams len mismatch (%d, %d)\n", ilen, olen);
- cfg_num_pkt = ilen;
+ setup_sockaddr(domain, daddr, &cfg_dst_addr);
+ setup_sockaddr(domain, saddr, &cfg_src_addr);
+
+ cfg_num_pkt = parse_io(argv[optind], cfg_buf);
}
int main(int argc, char **argv)
{
parse_opts(argc, argv);
- if (cfg_do_ipv6) {
- struct sockaddr_in6 addr6 = {0};
-
- addr6.sin6_family = AF_INET6;
- addr6.sin6_port = htons(cfg_port);
- addr6.sin6_addr = in6addr_loopback;
-
- cfg_errq_level = SOL_IPV6;
- cfg_errq_type = IPV6_RECVERR;
-
- do_test((void *)&addr6, sizeof(addr6));
- }
-
- if (cfg_do_ipv4) {
- struct sockaddr_in addr4 = {0};
-
- addr4.sin_family = AF_INET;
- addr4.sin_port = htons(cfg_port);
- addr4.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
-
- cfg_errq_level = SOL_IP;
- cfg_errq_type = IP_RECVERR;
-
- do_test((void *)&addr4, sizeof(addr4));
- }
+ if (cfg_rx)
+ do_test_rx((void *)&cfg_dst_addr, cfg_alen);
+ else
+ do_test_tx((void *)&cfg_src_addr, cfg_alen);
return 0;
}
diff --git a/tools/testing/selftests/net/so_txtime.sh b/tools/testing/selftests/net/so_txtime.sh
index 3f7800eaecb1..3f06f4d286a9 100755
--- a/tools/testing/selftests/net/so_txtime.sh
+++ b/tools/testing/selftests/net/so_txtime.sh
@@ -3,32 +3,85 @@
#
# Regression tests for the SO_TXTIME interface
-# Run in network namespace
-if [[ $# -eq 0 ]]; then
- if ! ./in_netns.sh $0 __subprocess; then
- # test is time sensitive, can be flaky
- echo "test failed: retry once"
- ./in_netns.sh $0 __subprocess
+set -e
+
+readonly DEV="veth0"
+readonly BIN="./so_txtime"
+
+readonly RAND="$(mktemp -u XXXXXX)"
+readonly NSPREFIX="ns-${RAND}"
+readonly NS1="${NSPREFIX}1"
+readonly NS2="${NSPREFIX}2"
+
+readonly SADDR4='192.168.1.1'
+readonly DADDR4='192.168.1.2'
+readonly SADDR6='fd::1'
+readonly DADDR6='fd::2'
+
+cleanup() {
+ ip netns del "${NS2}"
+ ip netns del "${NS1}"
+}
+
+trap cleanup EXIT
+
+# Create virtual ethernet pair between network namespaces
+ip netns add "${NS1}"
+ip netns add "${NS2}"
+
+ip link add "${DEV}" netns "${NS1}" type veth \
+ peer name "${DEV}" netns "${NS2}"
+
+# Bring the devices up
+ip -netns "${NS1}" link set "${DEV}" up
+ip -netns "${NS2}" link set "${DEV}" up
+
+# Set fixed MAC addresses on the devices
+ip -netns "${NS1}" link set dev "${DEV}" address 02:02:02:02:02:02
+ip -netns "${NS2}" link set dev "${DEV}" address 06:06:06:06:06:06
+
+# Add fixed IP addresses to the devices
+ip -netns "${NS1}" addr add 192.168.1.1/24 dev "${DEV}"
+ip -netns "${NS2}" addr add 192.168.1.2/24 dev "${DEV}"
+ip -netns "${NS1}" addr add fd::1/64 dev "${DEV}" nodad
+ip -netns "${NS2}" addr add fd::2/64 dev "${DEV}" nodad
+
+do_test() {
+ local readonly IP="$1"
+ local readonly CLOCK="$2"
+ local readonly TXARGS="$3"
+ local readonly RXARGS="$4"
+
+ if [[ "${IP}" == "4" ]]; then
+ local readonly SADDR="${SADDR4}"
+ local readonly DADDR="${DADDR4}"
+ elif [[ "${IP}" == "6" ]]; then
+ local readonly SADDR="${SADDR6}"
+ local readonly DADDR="${DADDR6}"
+ else
+ echo "Invalid IP version ${IP}"
+ exit 1
fi
- exit $?
-fi
+ local readonly START="$(date +%s%N --date="+ 0.1 seconds")"
+ ip netns exec "${NS2}" "${BIN}" -"${IP}" -c "${CLOCK}" -t "${START}" -S "${SADDR}" -D "${DADDR}" "${RXARGS}" -r &
+ ip netns exec "${NS1}" "${BIN}" -"${IP}" -c "${CLOCK}" -t "${START}" -S "${SADDR}" -D "${DADDR}" "${TXARGS}"
+ wait "$!"
+}
-set -e
+ip netns exec "${NS1}" tc qdisc add dev "${DEV}" root fq
+do_test 4 mono a,-1 a,-1
+do_test 6 mono a,0 a,0
+do_test 6 mono a,10 a,10
+do_test 4 mono a,10,b,20 a,10,b,20
+do_test 6 mono a,20,b,10 b,20,a,20
-tc qdisc add dev lo root fq
-./so_txtime -4 -6 -c mono a,-1 a,-1
-./so_txtime -4 -6 -c mono a,0 a,0
-./so_txtime -4 -6 -c mono a,10 a,10
-./so_txtime -4 -6 -c mono a,10,b,20 a,10,b,20
-./so_txtime -4 -6 -c mono a,20,b,10 b,20,a,20
-
-if tc qdisc replace dev lo root etf clockid CLOCK_TAI delta 400000; then
- ! ./so_txtime -4 -6 -c tai a,-1 a,-1
- ! ./so_txtime -4 -6 -c tai a,0 a,0
- ./so_txtime -4 -6 -c tai a,10 a,10
- ./so_txtime -4 -6 -c tai a,10,b,20 a,10,b,20
- ./so_txtime -4 -6 -c tai a,20,b,10 b,10,a,20
+if ip netns exec "${NS1}" tc qdisc replace dev "${DEV}" root etf clockid CLOCK_TAI delta 400000; then
+ ! do_test 4 tai a,-1 a,-1
+ ! do_test 6 tai a,0 a,0
+ do_test 6 tai a,10 a,10
+ do_test 4 tai a,10,b,20 a,10,b,20
+ do_test 6 tai a,20,b,10 b,10,a,20
else
echo "tc ($(tc -V)) does not support qdisc etf. skipping"
fi
diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh
new file mode 100755
index 000000000000..a8fa64136282
--- /dev/null
+++ b/tools/testing/selftests/net/udpgro_fwd.sh
@@ -0,0 +1,251 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+readonly BASE="ns-$(mktemp -u XXXXXX)"
+readonly SRC=2
+readonly DST=1
+readonly DST_NAT=100
+readonly NS_SRC=$BASE$SRC
+readonly NS_DST=$BASE$DST
+
+# "baremetal" network used for raw UDP traffic
+readonly BM_NET_V4=192.168.1.
+readonly BM_NET_V6=2001:db8::
+
+# "overlay" network used for UDP over UDP tunnel traffic
+readonly OL_NET_V4=172.16.1.
+readonly OL_NET_V6=2001:db8:1::
+readonly NPROCS=`nproc`
+
+cleanup() {
+ local ns
+ local -r jobs="$(jobs -p)"
+ [ -n "${jobs}" ] && kill -1 ${jobs} 2>/dev/null
+
+ for ns in $NS_SRC $NS_DST; do
+ ip netns del $ns 2>/dev/null
+ done
+}
+
+trap cleanup EXIT
+
+create_ns() {
+ local net
+ local ns
+
+ for ns in $NS_SRC $NS_DST; do
+ ip netns add $ns
+ ip -n $ns link set dev lo up
+ done
+
+ ip link add name veth$SRC type veth peer name veth$DST
+
+ for ns in $SRC $DST; do
+ ip link set dev veth$ns netns $BASE$ns
+ ip -n $BASE$ns link set dev veth$ns up
+ ip -n $BASE$ns addr add dev veth$ns $BM_NET_V4$ns/24
+ ip -n $BASE$ns addr add dev veth$ns $BM_NET_V6$ns/64 nodad
+ done
+ ip -n $NS_DST link set veth$DST xdp object ../bpf/xdp_dummy.o section xdp_dummy 2>/dev/null
+}
+
+create_vxlan_endpoint() {
+ local -r netns=$1
+ local -r bm_dev=$2
+ local -r bm_rem_addr=$3
+ local -r vxlan_dev=$4
+ local -r vxlan_id=$5
+ local -r vxlan_port=4789
+
+ ip -n $netns link set dev $bm_dev up
+ ip -n $netns link add dev $vxlan_dev type vxlan id $vxlan_id \
+ dstport $vxlan_port remote $bm_rem_addr
+ ip -n $netns link set dev $vxlan_dev up
+}
+
+create_vxlan_pair() {
+ local ns
+
+ create_ns
+
+ for ns in $SRC $DST; do
+ # note that 3 - $SRC == $DST and 3 - $DST == $SRC
+ create_vxlan_endpoint $BASE$ns veth$ns $BM_NET_V4$((3 - $ns)) vxlan$ns 4
+ ip -n $BASE$ns addr add dev vxlan$ns $OL_NET_V4$ns/24
+ done
+ for ns in $SRC $DST; do
+ create_vxlan_endpoint $BASE$ns veth$ns $BM_NET_V6$((3 - $ns)) vxlan6$ns 6
+ ip -n $BASE$ns addr add dev vxlan6$ns $OL_NET_V6$ns/24 nodad
+ done
+}
+
+is_ipv6() {
+ if [[ $1 =~ .*:.* ]]; then
+ return 0
+ fi
+ return 1
+}
+
+run_test() {
+ local -r msg=$1
+ local -r dst=$2
+ local -r pkts=$3
+ local -r vxpkts=$4
+ local bind=$5
+ local rx_args=""
+ local rx_family="-4"
+ local family=-4
+ local filter=IpInReceives
+ local ipt=iptables
+
+ printf "%-40s" "$msg"
+
+ if is_ipv6 $dst; then
+ # rx program does not support '-6' and implies ipv6 usage by default
+ rx_family=""
+ family=-6
+ filter=Ip6InReceives
+ ipt=ip6tables
+ fi
+
+ rx_args="$rx_family"
+ [ -n "$bind" ] && rx_args="$rx_args -b $bind"
+
+ # send a single GSO packet, segmented in 10 UDP frames.
+ # Always expect 10 UDP frames on RX side as rx socket does
+ # not enable GRO
+ ip netns exec $NS_DST $ipt -A INPUT -p udp --dport 4789
+ ip netns exec $NS_DST $ipt -A INPUT -p udp --dport 8000
+ ip netns exec $NS_DST ./udpgso_bench_rx -C 1000 -R 10 -n 10 -l 1300 $rx_args &
+ local spid=$!
+ sleep 0.1
+ ip netns exec $NS_SRC ./udpgso_bench_tx $family -M 1 -s 13000 -S 1300 -D $dst
+ local retc=$?
+ wait $spid
+ local rets=$?
+ if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
+ echo " fail client exit code $retc, server $rets"
+ ret=1
+ return
+ fi
+
+ local rcv=`ip netns exec $NS_DST $ipt"-save" -c | grep 'dport 8000' | \
+ sed -e 's/\[//' -e 's/:.*//'`
+ if [ $rcv != $pkts ]; then
+ echo " fail - received $rvs packets, expected $pkts"
+ ret=1
+ return
+ fi
+
+ local vxrcv=`ip netns exec $NS_DST $ipt"-save" -c | grep 'dport 4789' | \
+ sed -e 's/\[//' -e 's/:.*//'`
+
+ # upper net can generate a little noise, allow some tolerance
+ if [ $vxrcv -lt $vxpkts -o $vxrcv -gt $((vxpkts + 3)) ]; then
+ echo " fail - received $vxrcv vxlan packets, expected $vxpkts"
+ ret=1
+ return
+ fi
+ echo " ok"
+}
+
+run_bench() {
+ local -r msg=$1
+ local -r dst=$2
+ local family=-4
+
+ printf "%-40s" "$msg"
+ if [ $NPROCS -lt 2 ]; then
+ echo " skip - needed 2 CPUs found $NPROCS"
+ return
+ fi
+
+ is_ipv6 $dst && family=-6
+
+ # bind the sender and the receiver to different CPUs to try
+ # get reproducible results
+ ip netns exec $NS_DST bash -c "echo 2 > /sys/class/net/veth$DST/queues/rx-0/rps_cpus"
+ ip netns exec $NS_DST taskset 0x2 ./udpgso_bench_rx -C 1000 -R 10 &
+ local spid=$!
+ sleep 0.1
+ ip netns exec $NS_SRC taskset 0x1 ./udpgso_bench_tx $family -l 3 -S 1300 -D $dst
+ local retc=$?
+ wait $spid
+ local rets=$?
+ if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
+ echo " fail client exit code $retc, server $rets"
+ ret=1
+ return
+ fi
+}
+
+for family in 4 6; do
+ BM_NET=$BM_NET_V4
+ OL_NET=$OL_NET_V4
+ IPT=iptables
+ SUFFIX=24
+ VXDEV=vxlan
+
+ if [ $family = 6 ]; then
+ BM_NET=$BM_NET_V6
+ OL_NET=$OL_NET_V6
+ SUFFIX="64 nodad"
+ VXDEV=vxlan6
+ IPT=ip6tables
+ fi
+
+ echo "IPv$family"
+
+ create_ns
+ run_test "No GRO" $BM_NET$DST 10 0
+ cleanup
+
+ create_ns
+ ip netns exec $NS_DST ethtool -K veth$DST rx-gro-list on
+ run_test "GRO frag list" $BM_NET$DST 1 0
+ cleanup
+
+ # UDP GRO fwd skips aggregation when find an udp socket with the GRO option
+ # if there is an UDP tunnel in the running system, such lookup happen
+ # take place.
+ # use NAT to circumvent GRO FWD check
+ create_ns
+ ip -n $NS_DST addr add dev veth$DST $BM_NET$DST_NAT/$SUFFIX
+ ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on
+ ip netns exec $NS_DST $IPT -t nat -I PREROUTING -d $BM_NET$DST_NAT \
+ -j DNAT --to-destination $BM_NET$DST
+ run_test "GRO fwd" $BM_NET$DST_NAT 1 0 $BM_NET$DST
+ cleanup
+
+ create_ns
+ run_bench "UDP fwd perf" $BM_NET$DST
+ ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on
+ run_bench "UDP GRO fwd perf" $BM_NET$DST
+ cleanup
+
+ create_vxlan_pair
+ ip netns exec $NS_DST ethtool -K veth$DST rx-gro-list on
+ run_test "GRO frag list over UDP tunnel" $OL_NET$DST 1 1
+ cleanup
+
+ # use NAT to circumvent GRO FWD check
+ create_vxlan_pair
+ ip -n $NS_DST addr add dev $VXDEV$DST $OL_NET$DST_NAT/$SUFFIX
+ ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on
+ ip netns exec $NS_DST $IPT -t nat -I PREROUTING -d $OL_NET$DST_NAT \
+ -j DNAT --to-destination $OL_NET$DST
+
+ # load arp cache before running the test to reduce the amount of
+ # stray traffic on top of the UDP tunnel
+ ip netns exec $NS_SRC ping -q -c 1 $OL_NET$DST_NAT >/dev/null
+ run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 1 1 $OL_NET$DST
+ cleanup
+
+ create_vxlan_pair
+ run_bench "UDP tunnel fwd perf" $OL_NET$DST
+ ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on
+ run_bench "UDP tunnel GRO fwd perf" $OL_NET$DST
+ cleanup
+done
+
+exit $ret
diff --git a/tools/testing/selftests/net/veth.sh b/tools/testing/selftests/net/veth.sh
new file mode 100755
index 000000000000..2fedc0781ce8
--- /dev/null
+++ b/tools/testing/selftests/net/veth.sh
@@ -0,0 +1,177 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+readonly STATS="$(mktemp -p /tmp ns-XXXXXX)"
+readonly BASE=`basename $STATS`
+readonly SRC=2
+readonly DST=1
+readonly DST_NAT=100
+readonly NS_SRC=$BASE$SRC
+readonly NS_DST=$BASE$DST
+
+# "baremetal" network used for raw UDP traffic
+readonly BM_NET_V4=192.168.1.
+readonly BM_NET_V6=2001:db8::
+
+readonly NPROCS=`nproc`
+ret=0
+
+cleanup() {
+ local ns
+ local -r jobs="$(jobs -p)"
+ [ -n "${jobs}" ] && kill -1 ${jobs} 2>/dev/null
+ rm -f $STATS
+
+ for ns in $NS_SRC $NS_DST; do
+ ip netns del $ns 2>/dev/null
+ done
+}
+
+trap cleanup EXIT
+
+create_ns() {
+ local ns
+
+ for ns in $NS_SRC $NS_DST; do
+ ip netns add $ns
+ ip -n $ns link set dev lo up
+ done
+
+ ip link add name veth$SRC type veth peer name veth$DST
+
+ for ns in $SRC $DST; do
+ ip link set dev veth$ns netns $BASE$ns up
+ ip -n $BASE$ns addr add dev veth$ns $BM_NET_V4$ns/24
+ ip -n $BASE$ns addr add dev veth$ns $BM_NET_V6$ns/64 nodad
+ done
+ echo "#kernel" > $BASE
+ chmod go-rw $BASE
+}
+
+__chk_flag() {
+ local msg="$1"
+ local target=$2
+ local expected=$3
+ local flagname=$4
+
+ local flag=`ip netns exec $BASE$target ethtool -k veth$target |\
+ grep $flagname | awk '{print $2}'`
+
+ printf "%-60s" "$msg"
+ if [ "$flag" = "$expected" ]; then
+ echo " ok "
+ else
+ echo " fail - expected $expected found $flag"
+ ret=1
+ fi
+}
+
+chk_gro_flag() {
+ __chk_flag "$1" $2 $3 generic-receive-offload
+}
+
+chk_tso_flag() {
+ __chk_flag "$1" $2 $3 tcp-segmentation-offload
+}
+
+chk_gro() {
+ local msg="$1"
+ local expected=$2
+
+ ip netns exec $BASE$SRC ping -qc 1 $BM_NET_V4$DST >/dev/null
+ NSTAT_HISTORY=$STATS ip netns exec $NS_DST nstat -n
+
+ printf "%-60s" "$msg"
+ ip netns exec $BASE$DST ./udpgso_bench_rx -C 1000 -R 10 &
+ local spid=$!
+ sleep 0.1
+
+ ip netns exec $NS_SRC ./udpgso_bench_tx -4 -s 13000 -S 1300 -M 1 -D $BM_NET_V4$DST
+ local retc=$?
+ wait $spid
+ local rets=$?
+ if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
+ echo " fail client exit code $retc, server $rets"
+ ret=1
+ return
+ fi
+
+ local pkts=`NSTAT_HISTORY=$STATS ip netns exec $NS_DST nstat IpInReceives | \
+ awk '{print $2}' | tail -n 1`
+ if [ "$pkts" = "$expected" ]; then
+ echo " ok "
+ else
+ echo " fail - got $pkts packets, expected $expected "
+ ret=1
+ fi
+}
+
+if [ ! -f ../bpf/xdp_dummy.o ]; then
+ echo "Missing xdp_dummy helper. Build bpf selftest first"
+ exit -1
+fi
+
+create_ns
+chk_gro_flag "default - gro flag" $SRC off
+chk_gro_flag " - peer gro flag" $DST off
+chk_tso_flag " - tso flag" $SRC on
+chk_tso_flag " - peer tso flag" $DST on
+chk_gro " - aggregation" 1
+ip netns exec $NS_SRC ethtool -K veth$SRC tx-udp-segmentation off
+chk_gro " - aggregation with TSO off" 10
+cleanup
+
+create_ns
+ip netns exec $NS_DST ethtool -K veth$DST gro on
+chk_gro_flag "with gro on - gro flag" $DST on
+chk_gro_flag " - peer gro flag" $SRC off
+chk_tso_flag " - tso flag" $SRC on
+chk_tso_flag " - peer tso flag" $DST on
+ip netns exec $NS_SRC ethtool -K veth$SRC tx-udp-segmentation off
+ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on
+chk_gro " - aggregation with TSO off" 1
+cleanup
+
+create_ns
+ip -n $NS_DST link set dev veth$DST down
+ip netns exec $NS_DST ethtool -K veth$DST gro on
+chk_gro_flag "with gro enabled on link down - gro flag" $DST on
+chk_gro_flag " - peer gro flag" $SRC off
+chk_tso_flag " - tso flag" $SRC on
+chk_tso_flag " - peer tso flag" $DST on
+ip -n $NS_DST link set dev veth$DST up
+ip netns exec $NS_SRC ethtool -K veth$SRC tx-udp-segmentation off
+ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on
+chk_gro " - aggregation with TSO off" 1
+cleanup
+
+create_ns
+ip -n $NS_DST link set dev veth$DST xdp object ../bpf/xdp_dummy.o section xdp_dummy 2>/dev/null
+chk_gro_flag "with xdp attached - gro flag" $DST on
+chk_gro_flag " - peer gro flag" $SRC off
+chk_tso_flag " - tso flag" $SRC off
+chk_tso_flag " - peer tso flag" $DST on
+ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on
+chk_gro " - aggregation" 1
+
+
+ip -n $NS_DST link set dev veth$DST down
+ip -n $NS_SRC link set dev veth$SRC down
+chk_gro_flag " - after dev off, flag" $DST on
+chk_gro_flag " - peer flag" $SRC off
+
+ip netns exec $NS_DST ethtool -K veth$DST gro on
+ip -n $NS_DST link set dev veth$DST xdp off
+chk_gro_flag " - after gro on xdp off, gro flag" $DST on
+chk_gro_flag " - peer gro flag" $SRC off
+chk_tso_flag " - tso flag" $SRC on
+chk_tso_flag " - peer tso flag" $DST on
+ip -n $NS_DST link set dev veth$DST up
+ip -n $NS_SRC link set dev veth$SRC up
+chk_gro " - aggregation" 1
+
+ip netns exec $NS_DST ethtool -K veth$DST gro off
+ip netns exec $NS_SRC ethtool -K veth$SRC tx-udp-segmentation off
+chk_gro "aggregation again with default and TSO off" 10
+
+exit $ret
diff --git a/tools/testing/selftests/netfilter/nft_flowtable.sh b/tools/testing/selftests/netfilter/nft_flowtable.sh
index 431296c0f91c..427d94816f2d 100755
--- a/tools/testing/selftests/netfilter/nft_flowtable.sh
+++ b/tools/testing/selftests/netfilter/nft_flowtable.sh
@@ -371,6 +371,88 @@ else
ip netns exec nsr1 nft list ruleset
fi
+# Another test:
+# Add bridge interface br0 to Router1, with NAT enabled.
+ip -net nsr1 link add name br0 type bridge
+ip -net nsr1 addr flush dev veth0
+ip -net nsr1 link set up dev veth0
+ip -net nsr1 link set veth0 master br0
+ip -net nsr1 addr add 10.0.1.1/24 dev br0
+ip -net nsr1 addr add dead:1::1/64 dev br0
+ip -net nsr1 link set up dev br0
+
+ip netns exec nsr1 sysctl net.ipv4.conf.br0.forwarding=1 > /dev/null
+
+# br0 with NAT enabled.
+ip netns exec nsr1 nft -f - <<EOF
+flush table ip nat
+table ip nat {
+ chain prerouting {
+ type nat hook prerouting priority 0; policy accept;
+ meta iif "br0" ip daddr 10.6.6.6 tcp dport 1666 counter dnat ip to 10.0.2.99:12345
+ }
+
+ chain postrouting {
+ type nat hook postrouting priority 0; policy accept;
+ meta oifname "veth1" counter masquerade
+ }
+}
+EOF
+
+if test_tcp_forwarding_nat ns1 ns2; then
+ echo "PASS: flow offloaded for ns1/ns2 with bridge NAT"
+else
+ echo "FAIL: flow offload for ns1/ns2 with bridge NAT" 1>&2
+ ip netns exec nsr1 nft list ruleset
+ ret=1
+fi
+
+# Another test:
+# Add bridge interface br0 to Router1, with NAT and VLAN.
+ip -net nsr1 link set veth0 nomaster
+ip -net nsr1 link set down dev veth0
+ip -net nsr1 link add link veth0 name veth0.10 type vlan id 10
+ip -net nsr1 link set up dev veth0
+ip -net nsr1 link set up dev veth0.10
+ip -net nsr1 link set veth0.10 master br0
+
+ip -net ns1 addr flush dev eth0
+ip -net ns1 link add link eth0 name eth0.10 type vlan id 10
+ip -net ns1 link set eth0 up
+ip -net ns1 link set eth0.10 up
+ip -net ns1 addr add 10.0.1.99/24 dev eth0.10
+ip -net ns1 route add default via 10.0.1.1
+ip -net ns1 addr add dead:1::99/64 dev eth0.10
+
+if test_tcp_forwarding_nat ns1 ns2; then
+ echo "PASS: flow offloaded for ns1/ns2 with bridge NAT and VLAN"
+else
+ echo "FAIL: flow offload for ns1/ns2 with bridge NAT and VLAN" 1>&2
+ ip netns exec nsr1 nft list ruleset
+ ret=1
+fi
+
+# restore test topology (remove bridge and VLAN)
+ip -net nsr1 link set veth0 nomaster
+ip -net nsr1 link set veth0 down
+ip -net nsr1 link set veth0.10 down
+ip -net nsr1 link delete veth0.10 type vlan
+ip -net nsr1 link delete br0 type bridge
+ip -net ns1 addr flush dev eth0.10
+ip -net ns1 link set eth0.10 down
+ip -net ns1 link set eth0 down
+ip -net ns1 link delete eth0.10 type vlan
+
+# restore address in ns1 and nsr1
+ip -net ns1 link set eth0 up
+ip -net ns1 addr add 10.0.1.99/24 dev eth0
+ip -net ns1 route add default via 10.0.1.1
+ip -net ns1 addr add dead:1::99/64 dev eth0
+ip -net ns1 route add default via dead:1::1
+ip -net nsr1 addr add 10.0.1.1/24 dev veth0
+ip -net nsr1 addr add dead:1::1/64 dev veth0
+ip -net nsr1 link set up dev veth0
+
KEY_SHA="0x"$(ps -xaf | sha1sum | cut -d " " -f 1)
KEY_AES="0x"$(ps -xaf | md5sum | cut -d " " -f 1)
SPI1=$RANDOM
diff --git a/tools/testing/selftests/perf_events/.gitignore b/tools/testing/selftests/perf_events/.gitignore
new file mode 100644
index 000000000000..790c47001e77
--- /dev/null
+++ b/tools/testing/selftests/perf_events/.gitignore
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+sigtrap_threads
+remove_on_exec
diff --git a/tools/testing/selftests/perf_events/Makefile b/tools/testing/selftests/perf_events/Makefile
new file mode 100644
index 000000000000..fcafa5f0d34c
--- /dev/null
+++ b/tools/testing/selftests/perf_events/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+CFLAGS += -Wl,-no-as-needed -Wall -I../../../../usr/include
+LDFLAGS += -lpthread
+
+TEST_GEN_PROGS := sigtrap_threads remove_on_exec
+include ../lib.mk
diff --git a/tools/testing/selftests/perf_events/config b/tools/testing/selftests/perf_events/config
new file mode 100644
index 000000000000..ba58ff2203e4
--- /dev/null
+++ b/tools/testing/selftests/perf_events/config
@@ -0,0 +1 @@
+CONFIG_PERF_EVENTS=y
diff --git a/tools/testing/selftests/perf_events/remove_on_exec.c b/tools/testing/selftests/perf_events/remove_on_exec.c
new file mode 100644
index 000000000000..5814611a1dc7
--- /dev/null
+++ b/tools/testing/selftests/perf_events/remove_on_exec.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test for remove_on_exec.
+ *
+ * Copyright (C) 2021, Google LLC.
+ */
+
+#define _GNU_SOURCE
+
+/* We need the latest siginfo from the kernel repo. */
+#include <sys/types.h>
+#include <asm/siginfo.h>
+#define __have_siginfo_t 1
+#define __have_sigval_t 1
+#define __have_sigevent_t 1
+#define __siginfo_t_defined
+#define __sigval_t_defined
+#define __sigevent_t_defined
+#define _BITS_SIGINFO_CONSTS_H 1
+#define _BITS_SIGEVENT_CONSTS_H 1
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <linux/perf_event.h>
+#include <pthread.h>
+#include <signal.h>
+#include <sys/ioctl.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include "../kselftest_harness.h"
+
+static volatile int signal_count;
+
+static struct perf_event_attr make_event_attr(void)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .size = sizeof(attr),
+ .config = PERF_COUNT_HW_INSTRUCTIONS,
+ .sample_period = 1000,
+ .exclude_kernel = 1,
+ .exclude_hv = 1,
+ .disabled = 1,
+ .inherit = 1,
+ /*
+ * Children normally retain their inherited event on exec; with
+ * remove_on_exec, we'll remove their event, but the parent and
+ * any other non-exec'd children will keep their events.
+ */
+ .remove_on_exec = 1,
+ .sigtrap = 1,
+ };
+ return attr;
+}
+
+static void sigtrap_handler(int signum, siginfo_t *info, void *ucontext)
+{
+ if (info->si_code != TRAP_PERF) {
+ fprintf(stderr, "%s: unexpected si_code %d\n", __func__, info->si_code);
+ return;
+ }
+
+ signal_count++;
+}
+
+FIXTURE(remove_on_exec)
+{
+ struct sigaction oldact;
+ int fd;
+};
+
+FIXTURE_SETUP(remove_on_exec)
+{
+ struct perf_event_attr attr = make_event_attr();
+ struct sigaction action = {};
+
+ signal_count = 0;
+
+ /* Initialize sigtrap handler. */
+ action.sa_flags = SA_SIGINFO | SA_NODEFER;
+ action.sa_sigaction = sigtrap_handler;
+ sigemptyset(&action.sa_mask);
+ ASSERT_EQ(sigaction(SIGTRAP, &action, &self->oldact), 0);
+
+ /* Initialize perf event. */
+ self->fd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
+ ASSERT_NE(self->fd, -1);
+}
+
+FIXTURE_TEARDOWN(remove_on_exec)
+{
+ close(self->fd);
+ sigaction(SIGTRAP, &self->oldact, NULL);
+}
+
+/* Verify event propagates to fork'd child. */
+TEST_F(remove_on_exec, fork_only)
+{
+ int status;
+ pid_t pid = fork();
+
+ if (pid == 0) {
+ ASSERT_EQ(signal_count, 0);
+ ASSERT_EQ(ioctl(self->fd, PERF_EVENT_IOC_ENABLE, 0), 0);
+ while (!signal_count);
+ _exit(42);
+ }
+
+ while (!signal_count); /* Child enables event. */
+ EXPECT_EQ(waitpid(pid, &status, 0), pid);
+ EXPECT_EQ(WEXITSTATUS(status), 42);
+}
+
+/*
+ * Verify that event does _not_ propagate to fork+exec'd child; event enabled
+ * after fork+exec.
+ */
+TEST_F(remove_on_exec, fork_exec_then_enable)
+{
+ pid_t pid_exec, pid_only_fork;
+ int pipefd[2];
+ int tmp;
+
+ /*
+ * Non-exec child, to ensure exec does not affect inherited events of
+ * other children.
+ */
+ pid_only_fork = fork();
+ if (pid_only_fork == 0) {
+ /* Block until parent enables event. */
+ while (!signal_count);
+ _exit(42);
+ }
+
+ ASSERT_NE(pipe(pipefd), -1);
+ pid_exec = fork();
+ if (pid_exec == 0) {
+ ASSERT_NE(dup2(pipefd[1], STDOUT_FILENO), -1);
+ close(pipefd[0]);
+ execl("/proc/self/exe", "exec_child", NULL);
+ _exit((perror("exec failed"), 1));
+ }
+ close(pipefd[1]);
+
+ ASSERT_EQ(waitpid(pid_exec, &tmp, WNOHANG), 0); /* Child is running. */
+ /* Wait for exec'd child to start spinning. */
+ EXPECT_EQ(read(pipefd[0], &tmp, sizeof(int)), sizeof(int));
+ EXPECT_EQ(tmp, 42);
+ close(pipefd[0]);
+ /* Now we can enable the event, knowing the child is doing work. */
+ EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_ENABLE, 0), 0);
+ /* If the event propagated to the exec'd child, it will exit normally... */
+ usleep(100000); /* ... give time for event to trigger (in case of bug). */
+ EXPECT_EQ(waitpid(pid_exec, &tmp, WNOHANG), 0); /* Should still be running. */
+ EXPECT_EQ(kill(pid_exec, SIGKILL), 0);
+
+ /* Verify removal from child did not affect this task's event. */
+ tmp = signal_count;
+ while (signal_count == tmp); /* Should not hang! */
+ /* Nor should it have affected the first child. */
+ EXPECT_EQ(waitpid(pid_only_fork, &tmp, 0), pid_only_fork);
+ EXPECT_EQ(WEXITSTATUS(tmp), 42);
+}
+
+/*
+ * Verify that event does _not_ propagate to fork+exec'd child; event enabled
+ * before fork+exec.
+ */
+TEST_F(remove_on_exec, enable_then_fork_exec)
+{
+ pid_t pid_exec;
+ int tmp;
+
+ EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_ENABLE, 0), 0);
+
+ pid_exec = fork();
+ if (pid_exec == 0) {
+ execl("/proc/self/exe", "exec_child", NULL);
+ _exit((perror("exec failed"), 1));
+ }
+
+ /*
+ * The child may exit abnormally at any time if the event propagated and
+ * a SIGTRAP is sent before the handler was set up.
+ */
+ usleep(100000); /* ... give time for event to trigger (in case of bug). */
+ EXPECT_EQ(waitpid(pid_exec, &tmp, WNOHANG), 0); /* Should still be running. */
+ EXPECT_EQ(kill(pid_exec, SIGKILL), 0);
+
+ /* Verify removal from child did not affect this task's event. */
+ tmp = signal_count;
+ while (signal_count == tmp); /* Should not hang! */
+}
+
+TEST_F(remove_on_exec, exec_stress)
+{
+ pid_t pids[30];
+ int i, tmp;
+
+ for (i = 0; i < sizeof(pids) / sizeof(pids[0]); i++) {
+ pids[i] = fork();
+ if (pids[i] == 0) {
+ execl("/proc/self/exe", "exec_child", NULL);
+ _exit((perror("exec failed"), 1));
+ }
+
+ /* Some forked with event disabled, rest with enabled. */
+ if (i > 10)
+ EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_ENABLE, 0), 0);
+ }
+
+ usleep(100000); /* ... give time for event to trigger (in case of bug). */
+
+ for (i = 0; i < sizeof(pids) / sizeof(pids[0]); i++) {
+ /* All children should still be running. */
+ EXPECT_EQ(waitpid(pids[i], &tmp, WNOHANG), 0);
+ EXPECT_EQ(kill(pids[i], SIGKILL), 0);
+ }
+
+ /* Verify event is still alive. */
+ tmp = signal_count;
+ while (signal_count == tmp);
+}
+
+/* For exec'd child. */
+static void exec_child(void)
+{
+ struct sigaction action = {};
+ const int val = 42;
+
+ /* Set up sigtrap handler in case we erroneously receive a trap. */
+ action.sa_flags = SA_SIGINFO | SA_NODEFER;
+ action.sa_sigaction = sigtrap_handler;
+ sigemptyset(&action.sa_mask);
+ if (sigaction(SIGTRAP, &action, NULL))
+ _exit((perror("sigaction failed"), 1));
+
+ /* Signal parent that we're starting to spin. */
+ if (write(STDOUT_FILENO, &val, sizeof(int)) == -1)
+ _exit((perror("write failed"), 1));
+
+ /* Should hang here until killed. */
+ while (!signal_count);
+}
+
+#define main test_main
+TEST_HARNESS_MAIN
+#undef main
+int main(int argc, char *argv[])
+{
+ if (!strcmp(argv[0], "exec_child")) {
+ exec_child();
+ return 1;
+ }
+
+ return test_main(argc, argv);
+}
diff --git a/tools/testing/selftests/perf_events/settings b/tools/testing/selftests/perf_events/settings
new file mode 100644
index 000000000000..6091b45d226b
--- /dev/null
+++ b/tools/testing/selftests/perf_events/settings
@@ -0,0 +1 @@
+timeout=120
diff --git a/tools/testing/selftests/perf_events/sigtrap_threads.c b/tools/testing/selftests/perf_events/sigtrap_threads.c
new file mode 100644
index 000000000000..78ddf5e11625
--- /dev/null
+++ b/tools/testing/selftests/perf_events/sigtrap_threads.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test for perf events with SIGTRAP across all threads.
+ *
+ * Copyright (C) 2021, Google LLC.
+ */
+
+#define _GNU_SOURCE
+
+/* We need the latest siginfo from the kernel repo. */
+#include <sys/types.h>
+#include <asm/siginfo.h>
+#define __have_siginfo_t 1
+#define __have_sigval_t 1
+#define __have_sigevent_t 1
+#define __siginfo_t_defined
+#define __sigval_t_defined
+#define __sigevent_t_defined
+#define _BITS_SIGINFO_CONSTS_H 1
+#define _BITS_SIGEVENT_CONSTS_H 1
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/perf_event.h>
+#include <pthread.h>
+#include <signal.h>
+#include <sys/ioctl.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include "../kselftest_harness.h"
+
+#define NUM_THREADS 5
+
+/* Data shared between test body, threads, and signal handler. */
+static struct {
+ int tids_want_signal; /* Which threads still want a signal. */
+ int signal_count; /* Sanity check number of signals received. */
+ volatile int iterate_on; /* Variable to set breakpoint on. */
+ siginfo_t first_siginfo; /* First observed siginfo_t. */
+} ctx;
+
+/* Unique value to check si_perf is correctly set from perf_event_attr::sig_data. */
+#define TEST_SIG_DATA(addr) (~(unsigned long)(addr))
+
+static struct perf_event_attr make_event_attr(bool enabled, volatile void *addr)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_BREAKPOINT,
+ .size = sizeof(attr),
+ .sample_period = 1,
+ .disabled = !enabled,
+ .bp_addr = (unsigned long)addr,
+ .bp_type = HW_BREAKPOINT_RW,
+ .bp_len = HW_BREAKPOINT_LEN_1,
+ .inherit = 1, /* Children inherit events ... */
+ .inherit_thread = 1, /* ... but only cloned with CLONE_THREAD. */
+ .remove_on_exec = 1, /* Required by sigtrap. */
+ .sigtrap = 1, /* Request synchronous SIGTRAP on event. */
+ .sig_data = TEST_SIG_DATA(addr),
+ };
+ return attr;
+}
+
+static void sigtrap_handler(int signum, siginfo_t *info, void *ucontext)
+{
+ if (info->si_code != TRAP_PERF) {
+ fprintf(stderr, "%s: unexpected si_code %d\n", __func__, info->si_code);
+ return;
+ }
+
+ /*
+ * The data in siginfo_t we're interested in should all be the same
+ * across threads.
+ */
+ if (!__atomic_fetch_add(&ctx.signal_count, 1, __ATOMIC_RELAXED))
+ ctx.first_siginfo = *info;
+ __atomic_fetch_sub(&ctx.tids_want_signal, syscall(__NR_gettid), __ATOMIC_RELAXED);
+}
+
+static void *test_thread(void *arg)
+{
+ pthread_barrier_t *barrier = (pthread_barrier_t *)arg;
+ pid_t tid = syscall(__NR_gettid);
+ int iter;
+ int i;
+
+ pthread_barrier_wait(barrier);
+
+ __atomic_fetch_add(&ctx.tids_want_signal, tid, __ATOMIC_RELAXED);
+ iter = ctx.iterate_on; /* read */
+ for (i = 0; i < iter - 1; i++) {
+ __atomic_fetch_add(&ctx.tids_want_signal, tid, __ATOMIC_RELAXED);
+ ctx.iterate_on = iter; /* idempotent write */
+ }
+
+ return NULL;
+}
+
+FIXTURE(sigtrap_threads)
+{
+ struct sigaction oldact;
+ pthread_t threads[NUM_THREADS];
+ pthread_barrier_t barrier;
+ int fd;
+};
+
+FIXTURE_SETUP(sigtrap_threads)
+{
+ struct perf_event_attr attr = make_event_attr(false, &ctx.iterate_on);
+ struct sigaction action = {};
+ int i;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ /* Initialize sigtrap handler. */
+ action.sa_flags = SA_SIGINFO | SA_NODEFER;
+ action.sa_sigaction = sigtrap_handler;
+ sigemptyset(&action.sa_mask);
+ ASSERT_EQ(sigaction(SIGTRAP, &action, &self->oldact), 0);
+
+ /* Initialize perf event. */
+ self->fd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
+ ASSERT_NE(self->fd, -1);
+
+ /* Spawn threads inheriting perf event. */
+ pthread_barrier_init(&self->barrier, NULL, NUM_THREADS + 1);
+ for (i = 0; i < NUM_THREADS; i++)
+ ASSERT_EQ(pthread_create(&self->threads[i], NULL, test_thread, &self->barrier), 0);
+}
+
+FIXTURE_TEARDOWN(sigtrap_threads)
+{
+ pthread_barrier_destroy(&self->barrier);
+ close(self->fd);
+ sigaction(SIGTRAP, &self->oldact, NULL);
+}
+
+static void run_test_threads(struct __test_metadata *_metadata,
+ FIXTURE_DATA(sigtrap_threads) *self)
+{
+ int i;
+
+ pthread_barrier_wait(&self->barrier);
+ for (i = 0; i < NUM_THREADS; i++)
+ ASSERT_EQ(pthread_join(self->threads[i], NULL), 0);
+}
+
+TEST_F(sigtrap_threads, remain_disabled)
+{
+ run_test_threads(_metadata, self);
+ EXPECT_EQ(ctx.signal_count, 0);
+ EXPECT_NE(ctx.tids_want_signal, 0);
+}
+
+TEST_F(sigtrap_threads, enable_event)
+{
+ EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_ENABLE, 0), 0);
+ run_test_threads(_metadata, self);
+
+ EXPECT_EQ(ctx.signal_count, NUM_THREADS);
+ EXPECT_EQ(ctx.tids_want_signal, 0);
+ EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
+ EXPECT_EQ(ctx.first_siginfo.si_errno, PERF_TYPE_BREAKPOINT);
+ EXPECT_EQ(ctx.first_siginfo.si_perf, TEST_SIG_DATA(&ctx.iterate_on));
+
+ /* Check enabled for parent. */
+ ctx.iterate_on = 0;
+ EXPECT_EQ(ctx.signal_count, NUM_THREADS + 1);
+}
+
+/* Test that modification propagates to all inherited events. */
+TEST_F(sigtrap_threads, modify_and_enable_event)
+{
+ struct perf_event_attr new_attr = make_event_attr(true, &ctx.iterate_on);
+
+ EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_MODIFY_ATTRIBUTES, &new_attr), 0);
+ run_test_threads(_metadata, self);
+
+ EXPECT_EQ(ctx.signal_count, NUM_THREADS);
+ EXPECT_EQ(ctx.tids_want_signal, 0);
+ EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
+ EXPECT_EQ(ctx.first_siginfo.si_errno, PERF_TYPE_BREAKPOINT);
+ EXPECT_EQ(ctx.first_siginfo.si_perf, TEST_SIG_DATA(&ctx.iterate_on));
+
+ /* Check enabled for parent. */
+ ctx.iterate_on = 0;
+ EXPECT_EQ(ctx.signal_count, NUM_THREADS + 1);
+}
+
+/* Stress test event + signal handling. */
+TEST_F(sigtrap_threads, signal_stress)
+{
+ ctx.iterate_on = 3000;
+
+ EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_ENABLE, 0), 0);
+ run_test_threads(_metadata, self);
+ EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_DISABLE, 0), 0);
+
+ EXPECT_EQ(ctx.signal_count, NUM_THREADS * ctx.iterate_on);
+ EXPECT_EQ(ctx.tids_want_signal, 0);
+ EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
+ EXPECT_EQ(ctx.first_siginfo.si_errno, PERF_TYPE_BREAKPOINT);
+ EXPECT_EQ(ctx.first_siginfo.si_perf, TEST_SIG_DATA(&ctx.iterate_on));
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
index c25cf7cd45e9..33ee34fc0828 100644
--- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c
+++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
@@ -10,16 +10,7 @@
*
* We create two sets of source and destination buffers, one in regular memory,
* the other cache-inhibited (by default we use /dev/fb0 for this, but an
- * alterative path for cache-inhibited memory may be provided).
- *
- * One way to get cache-inhibited memory is to use the "mem" kernel parameter
- * to limit the kernel to less memory than actually exists. Addresses above
- * the limit may still be accessed but will be treated as cache-inhibited. For
- * example, if there is actually 4GB of memory and the parameter "mem=3GB" is
- * used, memory from address 0xC0000000 onwards is treated as cache-inhibited.
- * To access this region /dev/mem is used. The kernel should be configured
- * without CONFIG_STRICT_DEVMEM. In this case use:
- * ./alignment_handler /dev/mem 0xc0000000
+ * alterative path for cache-inhibited memory may be provided, e.g. memtrace).
*
* We initialise the source buffers, then use whichever set of load/store
* instructions is under test to copy bytes from the source buffers to the
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile
index defe488d6bf1..40253abc6208 100644
--- a/tools/testing/selftests/powerpc/mm/Makefile
+++ b/tools/testing/selftests/powerpc/mm/Makefile
@@ -5,6 +5,7 @@ noarg:
TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \
large_vm_fork_separation bad_accesses pkey_exec_prot \
pkey_siginfo stack_expansion_signal stack_expansion_ldst
+TEST_PROGS := stress_code_patching.sh
TEST_GEN_PROGS_EXTENDED := tlbie_test
TEST_GEN_FILES := tempfile
diff --git a/tools/testing/selftests/powerpc/mm/stress_code_patching.sh b/tools/testing/selftests/powerpc/mm/stress_code_patching.sh
new file mode 100755
index 000000000000..e454509659f6
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mm/stress_code_patching.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+TIMEOUT=30
+
+DEBUFS_DIR=`cat /proc/mounts | grep debugfs | awk '{print $2}'`
+if [ ! -e "$DEBUFS_DIR" ]
+then
+ echo "debugfs not found, skipping" 1>&2
+ exit 4
+fi
+
+if [ ! -e "$DEBUFS_DIR/tracing/current_tracer" ]
+then
+ echo "Tracing files not found, skipping" 1>&2
+ exit 4
+fi
+
+
+echo "Testing for spurious faults when mapping kernel memory..."
+
+if grep -q "FUNCTION TRACING IS CORRUPTED" "$DEBUFS_DIR/tracing/trace"
+then
+ echo "FAILED: Ftrace already dead. Probably due to a spurious fault" 1>&2
+ exit 1
+fi
+
+dmesg -C
+START_TIME=`date +%s`
+END_TIME=`expr $START_TIME + $TIMEOUT`
+while [ `date +%s` -lt $END_TIME ]
+do
+ echo function > $DEBUFS_DIR/tracing/current_tracer
+ echo nop > $DEBUFS_DIR/tracing/current_tracer
+ if dmesg | grep -q 'ftrace bug'
+ then
+ break
+ fi
+done
+
+echo nop > $DEBUFS_DIR/tracing/current_tracer
+if dmesg | grep -q 'ftrace bug'
+then
+ echo "FAILED: Mapping kernel memory causes spurious faults" 1>&2
+ exit 1
+else
+ echo "OK: Mapping kernel memory does not cause spurious faults"
+ exit 0
+fi
diff --git a/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c b/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c
index 02dffb65de48..b099753b50e4 100644
--- a/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c
+++ b/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c
@@ -324,7 +324,7 @@ int compress_file(int argc, char **argv, void *handle)
fprintf(stderr, "error: cannot progress; ");
fprintf(stderr, "too many faults\n");
exit(-1);
- };
+ }
}
fault_tries = NX_MAX_FAULTS; /* Reset for the next chunk */
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
index af3df79d8163..c5ecb4634094 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-include ../../../../../../scripts/Kbuild.include
+include ../../../../../build/Build.include
noarg:
$(MAKE) -C ../../
diff --git a/tools/testing/selftests/powerpc/ptrace/.gitignore b/tools/testing/selftests/powerpc/ptrace/.gitignore
index 0e96150b7c7e..eb75e5360e31 100644
--- a/tools/testing/selftests/powerpc/ptrace/.gitignore
+++ b/tools/testing/selftests/powerpc/ptrace/.gitignore
@@ -14,3 +14,4 @@ perf-hwbreak
core-pkey
ptrace-pkey
ptrace-syscall
+ptrace-perf-hwbreak
diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile
index 8d3f006c98cc..a500639da97a 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -2,7 +2,7 @@
TEST_GEN_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \
ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \
- perf-hwbreak ptrace-syscall
+ perf-hwbreak ptrace-syscall ptrace-perf-hwbreak
top_srcdir = ../../../../..
include ../../lib.mk
diff --git a/tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c b/tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c
index c1f324afdbf3..ecde2c199f3b 100644
--- a/tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c
+++ b/tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c
@@ -21,8 +21,13 @@
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
+#include <signal.h>
#include <string.h>
#include <sys/ioctl.h>
+#include <sys/wait.h>
+#include <sys/ptrace.h>
+#include <sys/sysinfo.h>
+#include <asm/ptrace.h>
#include <elf.h>
#include <pthread.h>
#include <sys/syscall.h>
@@ -30,32 +35,130 @@
#include <linux/hw_breakpoint.h>
#include "utils.h"
+#ifndef PPC_DEBUG_FEATURE_DATA_BP_ARCH_31
+#define PPC_DEBUG_FEATURE_DATA_BP_ARCH_31 0x20
+#endif
+
#define MAX_LOOPS 10000
#define DAWR_LENGTH_MAX ((0x3f + 1) * 8)
-static inline int sys_perf_event_open(struct perf_event_attr *attr, pid_t pid,
- int cpu, int group_fd,
- unsigned long flags)
+int nprocs;
+
+static volatile int a = 10;
+static volatile int b = 10;
+static volatile char c[512 + 8] __attribute__((aligned(512)));
+
+static void perf_event_attr_set(struct perf_event_attr *attr,
+ __u32 type, __u64 addr, __u64 len,
+ bool exclude_user)
{
- attr->size = sizeof(*attr);
- return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
+ memset(attr, 0, sizeof(struct perf_event_attr));
+ attr->type = PERF_TYPE_BREAKPOINT;
+ attr->size = sizeof(struct perf_event_attr);
+ attr->bp_type = type;
+ attr->bp_addr = addr;
+ attr->bp_len = len;
+ attr->exclude_kernel = 1;
+ attr->exclude_hv = 1;
+ attr->exclude_guest = 1;
+ attr->exclude_user = exclude_user;
+ attr->disabled = 1;
}
-static inline bool breakpoint_test(int len)
+static int
+perf_process_event_open_exclude_user(__u32 type, __u64 addr, __u64 len, bool exclude_user)
+{
+ struct perf_event_attr attr;
+
+ perf_event_attr_set(&attr, type, addr, len, exclude_user);
+ return syscall(__NR_perf_event_open, &attr, getpid(), -1, -1, 0);
+}
+
+static int perf_process_event_open(__u32 type, __u64 addr, __u64 len)
+{
+ struct perf_event_attr attr;
+
+ perf_event_attr_set(&attr, type, addr, len, 0);
+ return syscall(__NR_perf_event_open, &attr, getpid(), -1, -1, 0);
+}
+
+static int perf_cpu_event_open(long cpu, __u32 type, __u64 addr, __u64 len)
{
struct perf_event_attr attr;
+
+ perf_event_attr_set(&attr, type, addr, len, 0);
+ return syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0);
+}
+
+static void close_fds(int *fd, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ close(fd[i]);
+}
+
+static unsigned long read_fds(int *fd, int n)
+{
+ int i;
+ unsigned long c = 0;
+ unsigned long count = 0;
+ size_t res;
+
+ for (i = 0; i < n; i++) {
+ res = read(fd[i], &c, sizeof(c));
+ assert(res == sizeof(unsigned long long));
+ count += c;
+ }
+ return count;
+}
+
+static void reset_fds(int *fd, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ ioctl(fd[i], PERF_EVENT_IOC_RESET);
+}
+
+static void enable_fds(int *fd, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ ioctl(fd[i], PERF_EVENT_IOC_ENABLE);
+}
+
+static void disable_fds(int *fd, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ ioctl(fd[i], PERF_EVENT_IOC_DISABLE);
+}
+
+static int perf_systemwide_event_open(int *fd, __u32 type, __u64 addr, __u64 len)
+{
+ int i = 0;
+
+ /* Assume online processors are 0 to nprocs for simplisity */
+ for (i = 0; i < nprocs; i++) {
+ fd[i] = perf_cpu_event_open(i, type, addr, len);
+ if (fd[i] < 0) {
+ close_fds(fd, i);
+ return fd[i];
+ }
+ }
+ return 0;
+}
+
+static inline bool breakpoint_test(int len)
+{
int fd;
- /* setup counters */
- memset(&attr, 0, sizeof(attr));
- attr.disabled = 1;
- attr.type = PERF_TYPE_BREAKPOINT;
- attr.bp_type = HW_BREAKPOINT_R;
/* bp_addr can point anywhere but needs to be aligned */
- attr.bp_addr = (__u64)(&attr) & 0xfffffffffffff800;
- attr.bp_len = len;
- fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+ fd = perf_process_event_open(HW_BREAKPOINT_R, (__u64)(&fd) & 0xfffffffffffff800, len);
if (fd < 0)
return false;
close(fd);
@@ -75,7 +178,6 @@ static inline bool dawr_supported(void)
static int runtestsingle(int readwriteflag, int exclude_user, int arraytest)
{
int i,j;
- struct perf_event_attr attr;
size_t res;
unsigned long long breaks, needed;
int readint;
@@ -85,6 +187,7 @@ static int runtestsingle(int readwriteflag, int exclude_user, int arraytest)
int break_fd;
int loop_num = MAX_LOOPS - (rand() % 100); /* provide some variability */
volatile int *k;
+ __u64 len;
/* align to 0x400 boundary as required by DAWR */
readintalign = (int *)(((unsigned long)readintarraybig + 0x7ff) &
@@ -94,19 +197,11 @@ static int runtestsingle(int readwriteflag, int exclude_user, int arraytest)
if (arraytest)
ptr = &readintalign[0];
- /* setup counters */
- memset(&attr, 0, sizeof(attr));
- attr.disabled = 1;
- attr.type = PERF_TYPE_BREAKPOINT;
- attr.bp_type = readwriteflag;
- attr.bp_addr = (__u64)ptr;
- attr.bp_len = sizeof(int);
- if (arraytest)
- attr.bp_len = DAWR_LENGTH_MAX;
- attr.exclude_user = exclude_user;
- break_fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+ len = arraytest ? DAWR_LENGTH_MAX : sizeof(int);
+ break_fd = perf_process_event_open_exclude_user(readwriteflag, (__u64)ptr,
+ len, exclude_user);
if (break_fd < 0) {
- perror("sys_perf_event_open");
+ perror("perf_process_event_open_exclude_user");
exit(1);
}
@@ -153,7 +248,6 @@ static int runtest_dar_outside(void)
void *target;
volatile __u16 temp16;
volatile __u64 temp64;
- struct perf_event_attr attr;
int break_fd;
unsigned long long breaks;
int fail = 0;
@@ -165,21 +259,11 @@ static int runtest_dar_outside(void)
exit(EXIT_FAILURE);
}
- /* setup counters */
- memset(&attr, 0, sizeof(attr));
- attr.disabled = 1;
- attr.type = PERF_TYPE_BREAKPOINT;
- attr.exclude_kernel = 1;
- attr.exclude_hv = 1;
- attr.exclude_guest = 1;
- attr.bp_type = HW_BREAKPOINT_RW;
/* watch middle half of target array */
- attr.bp_addr = (__u64)(target + 2);
- attr.bp_len = 4;
- break_fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+ break_fd = perf_process_event_open(HW_BREAKPOINT_RW, (__u64)(target + 2), 4);
if (break_fd < 0) {
free(target);
- perror("sys_perf_event_open");
+ perror("perf_process_event_open");
exit(EXIT_FAILURE);
}
@@ -263,11 +347,467 @@ static int runtest_dar_outside(void)
return fail;
}
+static void multi_dawr_workload(void)
+{
+ a += 10;
+ b += 10;
+ c[512 + 1] += 'a';
+}
+
+static int test_process_multi_diff_addr(void)
+{
+ unsigned long long breaks1 = 0, breaks2 = 0;
+ int fd1, fd2;
+ char *desc = "Process specific, Two events, diff addr";
+ size_t res;
+
+ fd1 = perf_process_event_open(HW_BREAKPOINT_RW, (__u64)&a, (__u64)sizeof(a));
+ if (fd1 < 0) {
+ perror("perf_process_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ fd2 = perf_process_event_open(HW_BREAKPOINT_RW, (__u64)&b, (__u64)sizeof(b));
+ if (fd2 < 0) {
+ close(fd1);
+ perror("perf_process_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ ioctl(fd1, PERF_EVENT_IOC_RESET);
+ ioctl(fd2, PERF_EVENT_IOC_RESET);
+ ioctl(fd1, PERF_EVENT_IOC_ENABLE);
+ ioctl(fd2, PERF_EVENT_IOC_ENABLE);
+ multi_dawr_workload();
+ ioctl(fd1, PERF_EVENT_IOC_DISABLE);
+ ioctl(fd2, PERF_EVENT_IOC_DISABLE);
+
+ res = read(fd1, &breaks1, sizeof(breaks1));
+ assert(res == sizeof(unsigned long long));
+ res = read(fd2, &breaks2, sizeof(breaks2));
+ assert(res == sizeof(unsigned long long));
+
+ close(fd1);
+ close(fd2);
+
+ if (breaks1 != 2 || breaks2 != 2) {
+ printf("FAILED: %s: %lld != 2 || %lld != 2\n", desc, breaks1, breaks2);
+ return 1;
+ }
+
+ printf("TESTED: %s\n", desc);
+ return 0;
+}
+
+static int test_process_multi_same_addr(void)
+{
+ unsigned long long breaks1 = 0, breaks2 = 0;
+ int fd1, fd2;
+ char *desc = "Process specific, Two events, same addr";
+ size_t res;
+
+ fd1 = perf_process_event_open(HW_BREAKPOINT_RW, (__u64)&a, (__u64)sizeof(a));
+ if (fd1 < 0) {
+ perror("perf_process_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ fd2 = perf_process_event_open(HW_BREAKPOINT_RW, (__u64)&a, (__u64)sizeof(a));
+ if (fd2 < 0) {
+ close(fd1);
+ perror("perf_process_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ ioctl(fd1, PERF_EVENT_IOC_RESET);
+ ioctl(fd2, PERF_EVENT_IOC_RESET);
+ ioctl(fd1, PERF_EVENT_IOC_ENABLE);
+ ioctl(fd2, PERF_EVENT_IOC_ENABLE);
+ multi_dawr_workload();
+ ioctl(fd1, PERF_EVENT_IOC_DISABLE);
+ ioctl(fd2, PERF_EVENT_IOC_DISABLE);
+
+ res = read(fd1, &breaks1, sizeof(breaks1));
+ assert(res == sizeof(unsigned long long));
+ res = read(fd2, &breaks2, sizeof(breaks2));
+ assert(res == sizeof(unsigned long long));
+
+ close(fd1);
+ close(fd2);
+
+ if (breaks1 != 2 || breaks2 != 2) {
+ printf("FAILED: %s: %lld != 2 || %lld != 2\n", desc, breaks1, breaks2);
+ return 1;
+ }
+
+ printf("TESTED: %s\n", desc);
+ return 0;
+}
+
+static int test_process_multi_diff_addr_ro_wo(void)
+{
+ unsigned long long breaks1 = 0, breaks2 = 0;
+ int fd1, fd2;
+ char *desc = "Process specific, Two events, diff addr, one is RO, other is WO";
+ size_t res;
+
+ fd1 = perf_process_event_open(HW_BREAKPOINT_W, (__u64)&a, (__u64)sizeof(a));
+ if (fd1 < 0) {
+ perror("perf_process_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ fd2 = perf_process_event_open(HW_BREAKPOINT_R, (__u64)&b, (__u64)sizeof(b));
+ if (fd2 < 0) {
+ close(fd1);
+ perror("perf_process_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ ioctl(fd1, PERF_EVENT_IOC_RESET);
+ ioctl(fd2, PERF_EVENT_IOC_RESET);
+ ioctl(fd1, PERF_EVENT_IOC_ENABLE);
+ ioctl(fd2, PERF_EVENT_IOC_ENABLE);
+ multi_dawr_workload();
+ ioctl(fd1, PERF_EVENT_IOC_DISABLE);
+ ioctl(fd2, PERF_EVENT_IOC_DISABLE);
+
+ res = read(fd1, &breaks1, sizeof(breaks1));
+ assert(res == sizeof(unsigned long long));
+ res = read(fd2, &breaks2, sizeof(breaks2));
+ assert(res == sizeof(unsigned long long));
+
+ close(fd1);
+ close(fd2);
+
+ if (breaks1 != 1 || breaks2 != 1) {
+ printf("FAILED: %s: %lld != 1 || %lld != 1\n", desc, breaks1, breaks2);
+ return 1;
+ }
+
+ printf("TESTED: %s\n", desc);
+ return 0;
+}
+
+static int test_process_multi_same_addr_ro_wo(void)
+{
+ unsigned long long breaks1 = 0, breaks2 = 0;
+ int fd1, fd2;
+ char *desc = "Process specific, Two events, same addr, one is RO, other is WO";
+ size_t res;
+
+ fd1 = perf_process_event_open(HW_BREAKPOINT_R, (__u64)&a, (__u64)sizeof(a));
+ if (fd1 < 0) {
+ perror("perf_process_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ fd2 = perf_process_event_open(HW_BREAKPOINT_W, (__u64)&a, (__u64)sizeof(a));
+ if (fd2 < 0) {
+ close(fd1);
+ perror("perf_process_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ ioctl(fd1, PERF_EVENT_IOC_RESET);
+ ioctl(fd2, PERF_EVENT_IOC_RESET);
+ ioctl(fd1, PERF_EVENT_IOC_ENABLE);
+ ioctl(fd2, PERF_EVENT_IOC_ENABLE);
+ multi_dawr_workload();
+ ioctl(fd1, PERF_EVENT_IOC_DISABLE);
+ ioctl(fd2, PERF_EVENT_IOC_DISABLE);
+
+ res = read(fd1, &breaks1, sizeof(breaks1));
+ assert(res == sizeof(unsigned long long));
+ res = read(fd2, &breaks2, sizeof(breaks2));
+ assert(res == sizeof(unsigned long long));
+
+ close(fd1);
+ close(fd2);
+
+ if (breaks1 != 1 || breaks2 != 1) {
+ printf("FAILED: %s: %lld != 1 || %lld != 1\n", desc, breaks1, breaks2);
+ return 1;
+ }
+
+ printf("TESTED: %s\n", desc);
+ return 0;
+}
+
+static int test_syswide_multi_diff_addr(void)
+{
+ unsigned long long breaks1 = 0, breaks2 = 0;
+ int *fd1 = malloc(nprocs * sizeof(int));
+ int *fd2 = malloc(nprocs * sizeof(int));
+ char *desc = "Systemwide, Two events, diff addr";
+ int ret;
+
+ ret = perf_systemwide_event_open(fd1, HW_BREAKPOINT_RW, (__u64)&a, (__u64)sizeof(a));
+ if (ret) {
+ perror("perf_systemwide_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ ret = perf_systemwide_event_open(fd2, HW_BREAKPOINT_RW, (__u64)&b, (__u64)sizeof(b));
+ if (ret) {
+ close_fds(fd1, nprocs);
+ perror("perf_systemwide_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ reset_fds(fd1, nprocs);
+ reset_fds(fd2, nprocs);
+ enable_fds(fd1, nprocs);
+ enable_fds(fd2, nprocs);
+ multi_dawr_workload();
+ disable_fds(fd1, nprocs);
+ disable_fds(fd2, nprocs);
+
+ breaks1 = read_fds(fd1, nprocs);
+ breaks2 = read_fds(fd2, nprocs);
+
+ close_fds(fd1, nprocs);
+ close_fds(fd2, nprocs);
+
+ free(fd1);
+ free(fd2);
+
+ if (breaks1 != 2 || breaks2 != 2) {
+ printf("FAILED: %s: %lld != 2 || %lld != 2\n", desc, breaks1, breaks2);
+ return 1;
+ }
+
+ printf("TESTED: %s\n", desc);
+ return 0;
+}
+
+static int test_syswide_multi_same_addr(void)
+{
+ unsigned long long breaks1 = 0, breaks2 = 0;
+ int *fd1 = malloc(nprocs * sizeof(int));
+ int *fd2 = malloc(nprocs * sizeof(int));
+ char *desc = "Systemwide, Two events, same addr";
+ int ret;
+
+ ret = perf_systemwide_event_open(fd1, HW_BREAKPOINT_RW, (__u64)&a, (__u64)sizeof(a));
+ if (ret) {
+ perror("perf_systemwide_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ ret = perf_systemwide_event_open(fd2, HW_BREAKPOINT_RW, (__u64)&a, (__u64)sizeof(a));
+ if (ret) {
+ close_fds(fd1, nprocs);
+ perror("perf_systemwide_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ reset_fds(fd1, nprocs);
+ reset_fds(fd2, nprocs);
+ enable_fds(fd1, nprocs);
+ enable_fds(fd2, nprocs);
+ multi_dawr_workload();
+ disable_fds(fd1, nprocs);
+ disable_fds(fd2, nprocs);
+
+ breaks1 = read_fds(fd1, nprocs);
+ breaks2 = read_fds(fd2, nprocs);
+
+ close_fds(fd1, nprocs);
+ close_fds(fd2, nprocs);
+
+ free(fd1);
+ free(fd2);
+
+ if (breaks1 != 2 || breaks2 != 2) {
+ printf("FAILED: %s: %lld != 2 || %lld != 2\n", desc, breaks1, breaks2);
+ return 1;
+ }
+
+ printf("TESTED: %s\n", desc);
+ return 0;
+}
+
+static int test_syswide_multi_diff_addr_ro_wo(void)
+{
+ unsigned long long breaks1 = 0, breaks2 = 0;
+ int *fd1 = malloc(nprocs * sizeof(int));
+ int *fd2 = malloc(nprocs * sizeof(int));
+ char *desc = "Systemwide, Two events, diff addr, one is RO, other is WO";
+ int ret;
+
+ ret = perf_systemwide_event_open(fd1, HW_BREAKPOINT_W, (__u64)&a, (__u64)sizeof(a));
+ if (ret) {
+ perror("perf_systemwide_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ ret = perf_systemwide_event_open(fd2, HW_BREAKPOINT_R, (__u64)&b, (__u64)sizeof(b));
+ if (ret) {
+ close_fds(fd1, nprocs);
+ perror("perf_systemwide_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ reset_fds(fd1, nprocs);
+ reset_fds(fd2, nprocs);
+ enable_fds(fd1, nprocs);
+ enable_fds(fd2, nprocs);
+ multi_dawr_workload();
+ disable_fds(fd1, nprocs);
+ disable_fds(fd2, nprocs);
+
+ breaks1 = read_fds(fd1, nprocs);
+ breaks2 = read_fds(fd2, nprocs);
+
+ close_fds(fd1, nprocs);
+ close_fds(fd2, nprocs);
+
+ free(fd1);
+ free(fd2);
+
+ if (breaks1 != 1 || breaks2 != 1) {
+ printf("FAILED: %s: %lld != 1 || %lld != 1\n", desc, breaks1, breaks2);
+ return 1;
+ }
+
+ printf("TESTED: %s\n", desc);
+ return 0;
+}
+
+static int test_syswide_multi_same_addr_ro_wo(void)
+{
+ unsigned long long breaks1 = 0, breaks2 = 0;
+ int *fd1 = malloc(nprocs * sizeof(int));
+ int *fd2 = malloc(nprocs * sizeof(int));
+ char *desc = "Systemwide, Two events, same addr, one is RO, other is WO";
+ int ret;
+
+ ret = perf_systemwide_event_open(fd1, HW_BREAKPOINT_W, (__u64)&a, (__u64)sizeof(a));
+ if (ret) {
+ perror("perf_systemwide_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ ret = perf_systemwide_event_open(fd2, HW_BREAKPOINT_R, (__u64)&a, (__u64)sizeof(a));
+ if (ret) {
+ close_fds(fd1, nprocs);
+ perror("perf_systemwide_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ reset_fds(fd1, nprocs);
+ reset_fds(fd2, nprocs);
+ enable_fds(fd1, nprocs);
+ enable_fds(fd2, nprocs);
+ multi_dawr_workload();
+ disable_fds(fd1, nprocs);
+ disable_fds(fd2, nprocs);
+
+ breaks1 = read_fds(fd1, nprocs);
+ breaks2 = read_fds(fd2, nprocs);
+
+ close_fds(fd1, nprocs);
+ close_fds(fd2, nprocs);
+
+ free(fd1);
+ free(fd2);
+
+ if (breaks1 != 1 || breaks2 != 1) {
+ printf("FAILED: %s: %lld != 1 || %lld != 1\n", desc, breaks1, breaks2);
+ return 1;
+ }
+
+ printf("TESTED: %s\n", desc);
+ return 0;
+}
+
+static int runtest_multi_dawr(void)
+{
+ int ret = 0;
+
+ ret |= test_process_multi_diff_addr();
+ ret |= test_process_multi_same_addr();
+ ret |= test_process_multi_diff_addr_ro_wo();
+ ret |= test_process_multi_same_addr_ro_wo();
+ ret |= test_syswide_multi_diff_addr();
+ ret |= test_syswide_multi_same_addr();
+ ret |= test_syswide_multi_diff_addr_ro_wo();
+ ret |= test_syswide_multi_same_addr_ro_wo();
+
+ return ret;
+}
+
+static int runtest_unaligned_512bytes(void)
+{
+ unsigned long long breaks = 0;
+ int fd;
+ char *desc = "Process specific, 512 bytes, unaligned";
+ __u64 addr = (__u64)&c + 8;
+ size_t res;
+
+ fd = perf_process_event_open(HW_BREAKPOINT_RW, addr, 512);
+ if (fd < 0) {
+ perror("perf_process_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ ioctl(fd, PERF_EVENT_IOC_RESET);
+ ioctl(fd, PERF_EVENT_IOC_ENABLE);
+ multi_dawr_workload();
+ ioctl(fd, PERF_EVENT_IOC_DISABLE);
+
+ res = read(fd, &breaks, sizeof(breaks));
+ assert(res == sizeof(unsigned long long));
+
+ close(fd);
+
+ if (breaks != 2) {
+ printf("FAILED: %s: %lld != 2\n", desc, breaks);
+ return 1;
+ }
+
+ printf("TESTED: %s\n", desc);
+ return 0;
+}
+
+/* There is no perf api to find number of available watchpoints. Use ptrace. */
+static int get_nr_wps(bool *arch_31)
+{
+ struct ppc_debug_info dbginfo;
+ int child_pid;
+
+ child_pid = fork();
+ if (!child_pid) {
+ int ret = ptrace(PTRACE_TRACEME, 0, NULL, 0);
+ if (ret) {
+ perror("PTRACE_TRACEME failed\n");
+ exit(EXIT_FAILURE);
+ }
+ kill(getpid(), SIGUSR1);
+
+ sleep(1);
+ exit(EXIT_SUCCESS);
+ }
+
+ wait(NULL);
+ if (ptrace(PPC_PTRACE_GETHWDBGINFO, child_pid, NULL, &dbginfo)) {
+ perror("Can't get breakpoint info");
+ exit(EXIT_FAILURE);
+ }
+
+ *arch_31 = !!(dbginfo.features & PPC_DEBUG_FEATURE_DATA_BP_ARCH_31);
+ return dbginfo.num_data_bps;
+}
+
static int runtest(void)
{
int rwflag;
int exclude_user;
int ret;
+ bool dawr = dawr_supported();
+ bool arch_31 = false;
+ int nr_wps = get_nr_wps(&arch_31);
/*
* perf defines rwflag as two bits read and write and at least
@@ -280,7 +820,7 @@ static int runtest(void)
return ret;
/* if we have the dawr, we can do an array test */
- if (!dawr_supported())
+ if (!dawr)
continue;
ret = runtestsingle(rwflag, exclude_user, 1);
if (ret)
@@ -289,6 +829,19 @@ static int runtest(void)
}
ret = runtest_dar_outside();
+ if (ret)
+ return ret;
+
+ if (dawr && nr_wps > 1) {
+ nprocs = get_nprocs();
+ ret = runtest_multi_dawr();
+ if (ret)
+ return ret;
+ }
+
+ if (dawr && arch_31)
+ ret = runtest_unaligned_512bytes();
+
return ret;
}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c b/tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
index 2e0d86e0687e..a0635a3819aa 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
@@ -194,6 +194,18 @@ static void test_workload(void)
big_var[rand() % DAWR_MAX_LEN] = 'a';
else
cvar = big_var[rand() % DAWR_MAX_LEN];
+
+ /* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DW ALIGNED, WO test */
+ gstruct.a[rand() % A_LEN] = 'a';
+
+ /* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DW UNALIGNED, RO test */
+ cvar = gstruct.b[rand() % B_LEN];
+
+ /* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DAWR Overlap, WO test */
+ gstruct.a[rand() % A_LEN] = 'a';
+
+ /* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DAWR Overlap, RO test */
+ cvar = gstruct.a[rand() % A_LEN];
}
static void check_success(pid_t child_pid, const char *name, const char *type,
@@ -417,6 +429,69 @@ static void test_sethwdebug_range_aligned(pid_t child_pid)
ptrace_delhwdebug(child_pid, wh);
}
+static void test_multi_sethwdebug_range(pid_t child_pid)
+{
+ struct ppc_hw_breakpoint info1, info2;
+ unsigned long wp_addr1, wp_addr2;
+ char *name1 = "PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DW ALIGNED";
+ char *name2 = "PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DW UNALIGNED";
+ int len1, len2;
+ int wh1, wh2;
+
+ wp_addr1 = (unsigned long)&gstruct.a;
+ wp_addr2 = (unsigned long)&gstruct.b;
+ len1 = A_LEN;
+ len2 = B_LEN;
+ get_ppc_hw_breakpoint(&info1, PPC_BREAKPOINT_TRIGGER_WRITE, wp_addr1, len1);
+ get_ppc_hw_breakpoint(&info2, PPC_BREAKPOINT_TRIGGER_READ, wp_addr2, len2);
+
+ /* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DW ALIGNED, WO test */
+ wh1 = ptrace_sethwdebug(child_pid, &info1);
+
+ /* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DW UNALIGNED, RO test */
+ wh2 = ptrace_sethwdebug(child_pid, &info2);
+
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name1, "WO", wp_addr1, len1);
+
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name2, "RO", wp_addr2, len2);
+
+ ptrace_delhwdebug(child_pid, wh1);
+ ptrace_delhwdebug(child_pid, wh2);
+}
+
+static void test_multi_sethwdebug_range_dawr_overlap(pid_t child_pid)
+{
+ struct ppc_hw_breakpoint info1, info2;
+ unsigned long wp_addr1, wp_addr2;
+ char *name = "PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DAWR Overlap";
+ int len1, len2;
+ int wh1, wh2;
+
+ wp_addr1 = (unsigned long)&gstruct.a;
+ wp_addr2 = (unsigned long)&gstruct.a;
+ len1 = A_LEN;
+ len2 = A_LEN;
+ get_ppc_hw_breakpoint(&info1, PPC_BREAKPOINT_TRIGGER_WRITE, wp_addr1, len1);
+ get_ppc_hw_breakpoint(&info2, PPC_BREAKPOINT_TRIGGER_READ, wp_addr2, len2);
+
+ /* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DAWR Overlap, WO test */
+ wh1 = ptrace_sethwdebug(child_pid, &info1);
+
+ /* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DAWR Overlap, RO test */
+ wh2 = ptrace_sethwdebug(child_pid, &info2);
+
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "WO", wp_addr1, len1);
+
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "RO", wp_addr2, len2);
+
+ ptrace_delhwdebug(child_pid, wh1);
+ ptrace_delhwdebug(child_pid, wh2);
+}
+
static void test_sethwdebug_range_unaligned(pid_t child_pid)
{
struct ppc_hw_breakpoint info;
@@ -504,6 +579,10 @@ run_tests(pid_t child_pid, struct ppc_debug_info *dbginfo, bool dawr)
test_sethwdebug_range_unaligned(child_pid);
test_sethwdebug_range_unaligned_dar(child_pid);
test_sethwdebug_dawr_max_range(child_pid);
+ if (dbginfo->num_data_bps > 1) {
+ test_multi_sethwdebug_range(child_pid);
+ test_multi_sethwdebug_range_dawr_overlap(child_pid);
+ }
}
}
}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-perf-hwbreak.c b/tools/testing/selftests/powerpc/ptrace/ptrace-perf-hwbreak.c
new file mode 100644
index 000000000000..3344e74a97b4
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-perf-hwbreak.c
@@ -0,0 +1,659 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <stdio.h>
+#include <string.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/perf_event.h>
+#include <asm/unistd.h>
+#include <sys/ptrace.h>
+#include <sys/wait.h>
+#include "ptrace.h"
+
+char data[16];
+
+/* Overlapping address range */
+volatile __u64 *ptrace_data1 = (__u64 *)&data[0];
+volatile __u64 *perf_data1 = (__u64 *)&data[4];
+
+/* Non-overlapping address range */
+volatile __u64 *ptrace_data2 = (__u64 *)&data[0];
+volatile __u64 *perf_data2 = (__u64 *)&data[8];
+
+static unsigned long pid_max_addr(void)
+{
+ FILE *fp;
+ char *line, *c;
+ char addr[100];
+ size_t len = 0;
+
+ fp = fopen("/proc/kallsyms", "r");
+ if (!fp) {
+ printf("Failed to read /proc/kallsyms. Exiting..\n");
+ exit(EXIT_FAILURE);
+ }
+
+ while (getline(&line, &len, fp) != -1) {
+ if (!strstr(line, "pid_max") || strstr(line, "pid_max_max") ||
+ strstr(line, "pid_max_min"))
+ continue;
+
+ strncpy(addr, line, len < 100 ? len : 100);
+ c = strchr(addr, ' ');
+ *c = '\0';
+ return strtoul(addr, &c, 16);
+ }
+ fclose(fp);
+ printf("Could not find pix_max. Exiting..\n");
+ exit(EXIT_FAILURE);
+ return -1;
+}
+
+static void perf_user_event_attr_set(struct perf_event_attr *attr, __u64 addr, __u64 len)
+{
+ memset(attr, 0, sizeof(struct perf_event_attr));
+ attr->type = PERF_TYPE_BREAKPOINT;
+ attr->size = sizeof(struct perf_event_attr);
+ attr->bp_type = HW_BREAKPOINT_R;
+ attr->bp_addr = addr;
+ attr->bp_len = len;
+ attr->exclude_kernel = 1;
+ attr->exclude_hv = 1;
+}
+
+static void perf_kernel_event_attr_set(struct perf_event_attr *attr)
+{
+ memset(attr, 0, sizeof(struct perf_event_attr));
+ attr->type = PERF_TYPE_BREAKPOINT;
+ attr->size = sizeof(struct perf_event_attr);
+ attr->bp_type = HW_BREAKPOINT_R;
+ attr->bp_addr = pid_max_addr();
+ attr->bp_len = sizeof(unsigned long);
+ attr->exclude_user = 1;
+ attr->exclude_hv = 1;
+}
+
+static int perf_cpu_event_open(int cpu, __u64 addr, __u64 len)
+{
+ struct perf_event_attr attr;
+
+ perf_user_event_attr_set(&attr, addr, len);
+ return syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0);
+}
+
+static int perf_thread_event_open(pid_t child_pid, __u64 addr, __u64 len)
+{
+ struct perf_event_attr attr;
+
+ perf_user_event_attr_set(&attr, addr, len);
+ return syscall(__NR_perf_event_open, &attr, child_pid, -1, -1, 0);
+}
+
+static int perf_thread_cpu_event_open(pid_t child_pid, int cpu, __u64 addr, __u64 len)
+{
+ struct perf_event_attr attr;
+
+ perf_user_event_attr_set(&attr, addr, len);
+ return syscall(__NR_perf_event_open, &attr, child_pid, cpu, -1, 0);
+}
+
+static int perf_thread_kernel_event_open(pid_t child_pid)
+{
+ struct perf_event_attr attr;
+
+ perf_kernel_event_attr_set(&attr);
+ return syscall(__NR_perf_event_open, &attr, child_pid, -1, -1, 0);
+}
+
+static int perf_cpu_kernel_event_open(int cpu)
+{
+ struct perf_event_attr attr;
+
+ perf_kernel_event_attr_set(&attr);
+ return syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0);
+}
+
+static int child(void)
+{
+ int ret;
+
+ ret = ptrace(PTRACE_TRACEME, 0, NULL, 0);
+ if (ret) {
+ printf("Error: PTRACE_TRACEME failed\n");
+ return 0;
+ }
+ kill(getpid(), SIGUSR1); /* --> parent (SIGUSR1) */
+
+ return 0;
+}
+
+static void ptrace_ppc_hw_breakpoint(struct ppc_hw_breakpoint *info, int type,
+ __u64 addr, int len)
+{
+ info->version = 1;
+ info->trigger_type = type;
+ info->condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
+ info->addr = addr;
+ info->addr2 = addr + len;
+ info->condition_value = 0;
+ if (!len)
+ info->addr_mode = PPC_BREAKPOINT_MODE_EXACT;
+ else
+ info->addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE;
+}
+
+static int ptrace_open(pid_t child_pid, __u64 wp_addr, int len)
+{
+ struct ppc_hw_breakpoint info;
+
+ ptrace_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_RW, wp_addr, len);
+ return ptrace(PPC_PTRACE_SETHWDEBUG, child_pid, 0, &info);
+}
+
+static int test1(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread event by ptrace)
+ * if (existing cpu event by perf)
+ * if (addr range overlaps)
+ * fail;
+ */
+
+ perf_fd = perf_cpu_event_open(0, (__u64)perf_data1, sizeof(*perf_data1));
+ if (perf_fd < 0)
+ return -1;
+
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data1, sizeof(*ptrace_data1));
+ if (ptrace_fd > 0 || errno != ENOSPC)
+ ret = -1;
+
+ close(perf_fd);
+ return ret;
+}
+
+static int test2(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread event by ptrace)
+ * if (existing cpu event by perf)
+ * if (addr range does not overlaps)
+ * allow;
+ */
+
+ perf_fd = perf_cpu_event_open(0, (__u64)perf_data2, sizeof(*perf_data2));
+ if (perf_fd < 0)
+ return -1;
+
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data2, sizeof(*ptrace_data2));
+ if (ptrace_fd < 0) {
+ ret = -1;
+ goto perf_close;
+ }
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+
+perf_close:
+ close(perf_fd);
+ return ret;
+}
+
+static int test3(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread event by ptrace)
+ * if (existing thread event by perf on the same thread)
+ * if (addr range overlaps)
+ * fail;
+ */
+ perf_fd = perf_thread_event_open(child_pid, (__u64)perf_data1,
+ sizeof(*perf_data1));
+ if (perf_fd < 0)
+ return -1;
+
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data1, sizeof(*ptrace_data1));
+ if (ptrace_fd > 0 || errno != ENOSPC)
+ ret = -1;
+
+ close(perf_fd);
+ return ret;
+}
+
+static int test4(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread event by ptrace)
+ * if (existing thread event by perf on the same thread)
+ * if (addr range does not overlaps)
+ * fail;
+ */
+ perf_fd = perf_thread_event_open(child_pid, (__u64)perf_data2,
+ sizeof(*perf_data2));
+ if (perf_fd < 0)
+ return -1;
+
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data2, sizeof(*ptrace_data2));
+ if (ptrace_fd < 0) {
+ ret = -1;
+ goto perf_close;
+ }
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+
+perf_close:
+ close(perf_fd);
+ return ret;
+}
+
+static int test5(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int cpid;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread event by ptrace)
+ * if (existing thread event by perf on the different thread)
+ * allow;
+ */
+ cpid = fork();
+ if (!cpid) {
+ /* Temporary Child */
+ pause();
+ exit(EXIT_SUCCESS);
+ }
+
+ perf_fd = perf_thread_event_open(cpid, (__u64)perf_data1, sizeof(*perf_data1));
+ if (perf_fd < 0) {
+ ret = -1;
+ goto kill_child;
+ }
+
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data1, sizeof(*ptrace_data1));
+ if (ptrace_fd < 0) {
+ ret = -1;
+ goto perf_close;
+ }
+
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+perf_close:
+ close(perf_fd);
+kill_child:
+ kill(cpid, SIGINT);
+ return ret;
+}
+
+static int test6(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread kernel event by perf)
+ * if (existing thread event by ptrace on the same thread)
+ * allow;
+ * -- OR --
+ * if (new per cpu kernel event by perf)
+ * if (existing thread event by ptrace)
+ * allow;
+ */
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data1, sizeof(*ptrace_data1));
+ if (ptrace_fd < 0)
+ return -1;
+
+ perf_fd = perf_thread_kernel_event_open(child_pid);
+ if (perf_fd < 0) {
+ ret = -1;
+ goto ptrace_close;
+ }
+ close(perf_fd);
+
+ perf_fd = perf_cpu_kernel_event_open(0);
+ if (perf_fd < 0) {
+ ret = -1;
+ goto ptrace_close;
+ }
+ close(perf_fd);
+
+ptrace_close:
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+ return ret;
+}
+
+static int test7(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread event by perf)
+ * if (existing thread event by ptrace on the same thread)
+ * if (addr range overlaps)
+ * fail;
+ */
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data1, sizeof(*ptrace_data1));
+ if (ptrace_fd < 0)
+ return -1;
+
+ perf_fd = perf_thread_event_open(child_pid, (__u64)perf_data1,
+ sizeof(*perf_data1));
+ if (perf_fd > 0 || errno != ENOSPC)
+ ret = -1;
+
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+ return ret;
+}
+
+static int test8(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread event by perf)
+ * if (existing thread event by ptrace on the same thread)
+ * if (addr range does not overlaps)
+ * allow;
+ */
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data2, sizeof(*ptrace_data2));
+ if (ptrace_fd < 0)
+ return -1;
+
+ perf_fd = perf_thread_event_open(child_pid, (__u64)perf_data2,
+ sizeof(*perf_data2));
+ if (perf_fd < 0) {
+ ret = -1;
+ goto ptrace_close;
+ }
+ close(perf_fd);
+
+ptrace_close:
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+ return ret;
+}
+
+static int test9(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int cpid;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread event by perf)
+ * if (existing thread event by ptrace on the other thread)
+ * allow;
+ */
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data1, sizeof(*ptrace_data1));
+ if (ptrace_fd < 0)
+ return -1;
+
+ cpid = fork();
+ if (!cpid) {
+ /* Temporary Child */
+ pause();
+ exit(EXIT_SUCCESS);
+ }
+
+ perf_fd = perf_thread_event_open(cpid, (__u64)perf_data1, sizeof(*perf_data1));
+ if (perf_fd < 0) {
+ ret = -1;
+ goto kill_child;
+ }
+ close(perf_fd);
+
+kill_child:
+ kill(cpid, SIGINT);
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+ return ret;
+}
+
+static int test10(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per cpu event by perf)
+ * if (existing thread event by ptrace on the same thread)
+ * if (addr range overlaps)
+ * fail;
+ */
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data1, sizeof(*ptrace_data1));
+ if (ptrace_fd < 0)
+ return -1;
+
+ perf_fd = perf_cpu_event_open(0, (__u64)perf_data1, sizeof(*perf_data1));
+ if (perf_fd > 0 || errno != ENOSPC)
+ ret = -1;
+
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+ return ret;
+}
+
+static int test11(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per cpu event by perf)
+ * if (existing thread event by ptrace on the same thread)
+ * if (addr range does not overlap)
+ * allow;
+ */
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data2, sizeof(*ptrace_data2));
+ if (ptrace_fd < 0)
+ return -1;
+
+ perf_fd = perf_cpu_event_open(0, (__u64)perf_data2, sizeof(*perf_data2));
+ if (perf_fd < 0) {
+ ret = -1;
+ goto ptrace_close;
+ }
+ close(perf_fd);
+
+ptrace_close:
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+ return ret;
+}
+
+static int test12(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread and per cpu event by perf)
+ * if (existing thread event by ptrace on the same thread)
+ * if (addr range overlaps)
+ * fail;
+ */
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data1, sizeof(*ptrace_data1));
+ if (ptrace_fd < 0)
+ return -1;
+
+ perf_fd = perf_thread_cpu_event_open(child_pid, 0, (__u64)perf_data1, sizeof(*perf_data1));
+ if (perf_fd > 0 || errno != ENOSPC)
+ ret = -1;
+
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+ return ret;
+}
+
+static int test13(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread and per cpu event by perf)
+ * if (existing thread event by ptrace on the same thread)
+ * if (addr range does not overlap)
+ * allow;
+ */
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data2, sizeof(*ptrace_data2));
+ if (ptrace_fd < 0)
+ return -1;
+
+ perf_fd = perf_thread_cpu_event_open(child_pid, 0, (__u64)perf_data2, sizeof(*perf_data2));
+ if (perf_fd < 0) {
+ ret = -1;
+ goto ptrace_close;
+ }
+ close(perf_fd);
+
+ptrace_close:
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+ return ret;
+}
+
+static int test14(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int cpid;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread and per cpu event by perf)
+ * if (existing thread event by ptrace on the other thread)
+ * allow;
+ */
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data1, sizeof(*ptrace_data1));
+ if (ptrace_fd < 0)
+ return -1;
+
+ cpid = fork();
+ if (!cpid) {
+ /* Temporary Child */
+ pause();
+ exit(EXIT_SUCCESS);
+ }
+
+ perf_fd = perf_thread_cpu_event_open(cpid, 0, (__u64)perf_data1,
+ sizeof(*perf_data1));
+ if (perf_fd < 0) {
+ ret = -1;
+ goto kill_child;
+ }
+ close(perf_fd);
+
+kill_child:
+ kill(cpid, SIGINT);
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+ return ret;
+}
+
+static int do_test(const char *msg, int (*fun)(pid_t arg), pid_t arg)
+{
+ int ret;
+
+ ret = fun(arg);
+ if (ret)
+ printf("%s: Error\n", msg);
+ else
+ printf("%s: Ok\n", msg);
+ return ret;
+}
+
+char *desc[14] = {
+ "perf cpu event -> ptrace thread event (Overlapping)",
+ "perf cpu event -> ptrace thread event (Non-overlapping)",
+ "perf thread event -> ptrace same thread event (Overlapping)",
+ "perf thread event -> ptrace same thread event (Non-overlapping)",
+ "perf thread event -> ptrace other thread event",
+ "ptrace thread event -> perf kernel event",
+ "ptrace thread event -> perf same thread event (Overlapping)",
+ "ptrace thread event -> perf same thread event (Non-overlapping)",
+ "ptrace thread event -> perf other thread event",
+ "ptrace thread event -> perf cpu event (Overlapping)",
+ "ptrace thread event -> perf cpu event (Non-overlapping)",
+ "ptrace thread event -> perf same thread & cpu event (Overlapping)",
+ "ptrace thread event -> perf same thread & cpu event (Non-overlapping)",
+ "ptrace thread event -> perf other thread & cpu event",
+};
+
+static int test(pid_t child_pid)
+{
+ int ret = TEST_PASS;
+
+ ret |= do_test(desc[0], test1, child_pid);
+ ret |= do_test(desc[1], test2, child_pid);
+ ret |= do_test(desc[2], test3, child_pid);
+ ret |= do_test(desc[3], test4, child_pid);
+ ret |= do_test(desc[4], test5, child_pid);
+ ret |= do_test(desc[5], test6, child_pid);
+ ret |= do_test(desc[6], test7, child_pid);
+ ret |= do_test(desc[7], test8, child_pid);
+ ret |= do_test(desc[8], test9, child_pid);
+ ret |= do_test(desc[9], test10, child_pid);
+ ret |= do_test(desc[10], test11, child_pid);
+ ret |= do_test(desc[11], test12, child_pid);
+ ret |= do_test(desc[12], test13, child_pid);
+ ret |= do_test(desc[13], test14, child_pid);
+
+ return ret;
+}
+
+static void get_dbginfo(pid_t child_pid, struct ppc_debug_info *dbginfo)
+{
+ if (ptrace(PPC_PTRACE_GETHWDBGINFO, child_pid, NULL, dbginfo)) {
+ perror("Can't get breakpoint info");
+ exit(-1);
+ }
+}
+
+static int ptrace_perf_hwbreak(void)
+{
+ int ret;
+ pid_t child_pid;
+ struct ppc_debug_info dbginfo;
+
+ child_pid = fork();
+ if (!child_pid)
+ return child();
+
+ /* parent */
+ wait(NULL); /* <-- child (SIGUSR1) */
+
+ get_dbginfo(child_pid, &dbginfo);
+ SKIP_IF(dbginfo.num_data_bps <= 1);
+
+ ret = perf_cpu_event_open(0, (__u64)perf_data1, sizeof(*perf_data1));
+ SKIP_IF(ret < 0);
+ close(ret);
+
+ ret = test(child_pid);
+
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ return ret;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(ptrace_perf_hwbreak, "ptrace-perf-hwbreak");
+}
diff --git a/tools/testing/selftests/powerpc/security/Makefile b/tools/testing/selftests/powerpc/security/Makefile
index f25e854fe370..844d18cd5f93 100644
--- a/tools/testing/selftests/powerpc/security/Makefile
+++ b/tools/testing/selftests/powerpc/security/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0+
-TEST_GEN_PROGS := rfi_flush entry_flush spectre_v2
+TEST_GEN_PROGS := rfi_flush entry_flush uaccess_flush spectre_v2
top_srcdir = ../../../../..
CFLAGS += -I../../../../../usr/include
@@ -13,3 +13,4 @@ $(OUTPUT)/spectre_v2: CFLAGS += -m64
$(OUTPUT)/spectre_v2: ../pmu/event.c branch_loops.S
$(OUTPUT)/rfi_flush: flush_utils.c
$(OUTPUT)/entry_flush: flush_utils.c
+$(OUTPUT)/uaccess_flush: flush_utils.c
diff --git a/tools/testing/selftests/powerpc/security/entry_flush.c b/tools/testing/selftests/powerpc/security/entry_flush.c
index 78cf914fa321..68ce377b205e 100644
--- a/tools/testing/selftests/powerpc/security/entry_flush.c
+++ b/tools/testing/selftests/powerpc/security/entry_flush.c
@@ -53,7 +53,7 @@ int entry_flush_test(void)
entry_flush = entry_flush_orig;
- fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
+ fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
FAIL_IF(fd < 0);
p = (char *)memalign(zero_size, CACHELINE_SIZE);
diff --git a/tools/testing/selftests/powerpc/security/flush_utils.c b/tools/testing/selftests/powerpc/security/flush_utils.c
index 0c3c4c40c7fb..4d95965cb751 100644
--- a/tools/testing/selftests/powerpc/security/flush_utils.c
+++ b/tools/testing/selftests/powerpc/security/flush_utils.c
@@ -13,6 +13,7 @@
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
+#include <sys/utsname.h>
#include "utils.h"
#include "flush_utils.h"
@@ -35,6 +36,18 @@ void syscall_loop(char *p, unsigned long iterations,
}
}
+void syscall_loop_uaccess(char *p, unsigned long iterations,
+ unsigned long zero_size)
+{
+ struct utsname utsname;
+
+ for (unsigned long i = 0; i < iterations; i++) {
+ for (unsigned long j = 0; j < zero_size; j += CACHELINE_SIZE)
+ load(p + j);
+ uname(&utsname);
+ }
+}
+
static void sigill_handler(int signr, siginfo_t *info, void *unused)
{
static int warned;
diff --git a/tools/testing/selftests/powerpc/security/flush_utils.h b/tools/testing/selftests/powerpc/security/flush_utils.h
index 07a5eb301466..e1e68281f7ac 100644
--- a/tools/testing/selftests/powerpc/security/flush_utils.h
+++ b/tools/testing/selftests/powerpc/security/flush_utils.h
@@ -9,9 +9,16 @@
#define CACHELINE_SIZE 128
+#define PERF_L1D_READ_MISS_CONFIG ((PERF_COUNT_HW_CACHE_L1D) | \
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) | \
+ (PERF_COUNT_HW_CACHE_RESULT_MISS << 16))
+
void syscall_loop(char *p, unsigned long iterations,
unsigned long zero_size);
+void syscall_loop_uaccess(char *p, unsigned long iterations,
+ unsigned long zero_size);
+
void set_dscr(unsigned long val);
#endif /* _SELFTESTS_POWERPC_SECURITY_FLUSH_UTILS_H */
diff --git a/tools/testing/selftests/powerpc/security/rfi_flush.c b/tools/testing/selftests/powerpc/security/rfi_flush.c
index 7565fd786640..f73484a6470f 100644
--- a/tools/testing/selftests/powerpc/security/rfi_flush.c
+++ b/tools/testing/selftests/powerpc/security/rfi_flush.c
@@ -54,7 +54,7 @@ int rfi_flush_test(void)
rfi_flush = rfi_flush_orig;
- fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
+ fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
FAIL_IF(fd < 0);
p = (char *)memalign(zero_size, CACHELINE_SIZE);
diff --git a/tools/testing/selftests/powerpc/security/uaccess_flush.c b/tools/testing/selftests/powerpc/security/uaccess_flush.c
new file mode 100644
index 000000000000..cf80f960e38a
--- /dev/null
+++ b/tools/testing/selftests/powerpc/security/uaccess_flush.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2018 IBM Corporation.
+ * Copyright 2020 Canonical Ltd.
+ */
+
+#define __SANE_USERSPACE_TYPES__
+
+#include <sys/types.h>
+#include <stdint.h>
+#include <malloc.h>
+#include <unistd.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include "utils.h"
+#include "flush_utils.h"
+
+int uaccess_flush_test(void)
+{
+ char *p;
+ int repetitions = 10;
+ int fd, passes = 0, iter, rc = 0;
+ struct perf_event_read v;
+ __u64 l1d_misses_total = 0;
+ unsigned long iterations = 100000, zero_size = 24 * 1024;
+ unsigned long l1d_misses_expected;
+ int rfi_flush_orig;
+ int entry_flush_orig;
+ int uaccess_flush, uaccess_flush_orig;
+
+ SKIP_IF(geteuid() != 0);
+
+ // The PMU event we use only works on Power7 or later
+ SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06));
+
+ if (read_debugfs_file("powerpc/rfi_flush", &rfi_flush_orig) < 0) {
+ perror("Unable to read powerpc/rfi_flush debugfs file");
+ SKIP_IF(1);
+ }
+
+ if (read_debugfs_file("powerpc/entry_flush", &entry_flush_orig) < 0) {
+ perror("Unable to read powerpc/entry_flush debugfs file");
+ SKIP_IF(1);
+ }
+
+ if (read_debugfs_file("powerpc/uaccess_flush", &uaccess_flush_orig) < 0) {
+ perror("Unable to read powerpc/entry_flush debugfs file");
+ SKIP_IF(1);
+ }
+
+ if (rfi_flush_orig != 0) {
+ if (write_debugfs_file("powerpc/rfi_flush", 0) < 0) {
+ perror("error writing to powerpc/rfi_flush debugfs file");
+ FAIL_IF(1);
+ }
+ }
+
+ if (entry_flush_orig != 0) {
+ if (write_debugfs_file("powerpc/entry_flush", 0) < 0) {
+ perror("error writing to powerpc/entry_flush debugfs file");
+ FAIL_IF(1);
+ }
+ }
+
+ uaccess_flush = uaccess_flush_orig;
+
+ fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
+ FAIL_IF(fd < 0);
+
+ p = (char *)memalign(zero_size, CACHELINE_SIZE);
+
+ FAIL_IF(perf_event_enable(fd));
+
+ // disable L1 prefetching
+ set_dscr(1);
+
+ iter = repetitions;
+
+ /*
+ * We expect to see l1d miss for each cacheline access when entry_flush
+ * is set. Allow a small variation on this.
+ */
+ l1d_misses_expected = iterations * (zero_size / CACHELINE_SIZE - 2);
+
+again:
+ FAIL_IF(perf_event_reset(fd));
+
+ syscall_loop_uaccess(p, iterations, zero_size);
+
+ FAIL_IF(read(fd, &v, sizeof(v)) != sizeof(v));
+
+ if (uaccess_flush && v.l1d_misses >= l1d_misses_expected)
+ passes++;
+ else if (!uaccess_flush && v.l1d_misses < (l1d_misses_expected / 2))
+ passes++;
+
+ l1d_misses_total += v.l1d_misses;
+
+ while (--iter)
+ goto again;
+
+ if (passes < repetitions) {
+ printf("FAIL (L1D misses with uaccess_flush=%d: %llu %c %lu) [%d/%d failures]\n",
+ uaccess_flush, l1d_misses_total, uaccess_flush ? '<' : '>',
+ uaccess_flush ? repetitions * l1d_misses_expected :
+ repetitions * l1d_misses_expected / 2,
+ repetitions - passes, repetitions);
+ rc = 1;
+ } else {
+ printf("PASS (L1D misses with uaccess_flush=%d: %llu %c %lu) [%d/%d pass]\n",
+ uaccess_flush, l1d_misses_total, uaccess_flush ? '>' : '<',
+ uaccess_flush ? repetitions * l1d_misses_expected :
+ repetitions * l1d_misses_expected / 2,
+ passes, repetitions);
+ }
+
+ if (uaccess_flush == uaccess_flush_orig) {
+ uaccess_flush = !uaccess_flush_orig;
+ if (write_debugfs_file("powerpc/uaccess_flush", uaccess_flush) < 0) {
+ perror("error writing to powerpc/uaccess_flush debugfs file");
+ return 1;
+ }
+ iter = repetitions;
+ l1d_misses_total = 0;
+ passes = 0;
+ goto again;
+ }
+
+ perf_event_disable(fd);
+ close(fd);
+
+ set_dscr(0);
+
+ if (write_debugfs_file("powerpc/rfi_flush", rfi_flush_orig) < 0) {
+ perror("unable to restore original value of powerpc/rfi_flush debugfs file");
+ return 1;
+ }
+
+ if (write_debugfs_file("powerpc/entry_flush", entry_flush_orig) < 0) {
+ perror("unable to restore original value of powerpc/entry_flush debugfs file");
+ return 1;
+ }
+
+ if (write_debugfs_file("powerpc/uaccess_flush", uaccess_flush_orig) < 0) {
+ perror("unable to restore original value of powerpc/uaccess_flush debugfs file");
+ return 1;
+ }
+
+ return rc;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(uaccess_flush_test, "uaccess_flush_test");
+}
diff --git a/tools/testing/selftests/powerpc/tm/tm-trap.c b/tools/testing/selftests/powerpc/tm/tm-trap.c
index c75960af8018..11521077f915 100644
--- a/tools/testing/selftests/powerpc/tm/tm-trap.c
+++ b/tools/testing/selftests/powerpc/tm/tm-trap.c
@@ -66,7 +66,7 @@ void trap_signal_handler(int signo, siginfo_t *si, void *uc)
/* Get thread endianness: extract bit LE from MSR */
thread_endianness = MSR_LE & ucp->uc_mcontext.gp_regs[PT_MSR];
- /***
+ /*
* Little-Endian Machine
*/
@@ -126,7 +126,7 @@ void trap_signal_handler(int signo, siginfo_t *si, void *uc)
}
}
- /***
+ /*
* Big-Endian Machine
*/
diff --git a/tools/testing/selftests/rcutorture/bin/cpus2use.sh b/tools/testing/selftests/rcutorture/bin/cpus2use.sh
index 1dbfb62567d2..6bb993001680 100755
--- a/tools/testing/selftests/rcutorture/bin/cpus2use.sh
+++ b/tools/testing/selftests/rcutorture/bin/cpus2use.sh
@@ -21,7 +21,6 @@ then
awk -v ncpus=$ncpus '{ print ncpus * ($7 + $NF) / 100 }'`
else
# No mpstat command, so use all available CPUs.
- echo The mpstat command is not available, so greedily using all CPUs.
idlecpus=$ncpus
fi
awk -v ncpus=$ncpus -v idlecpus=$idlecpus < /dev/null '
diff --git a/tools/testing/selftests/rcutorture/bin/jitter.sh b/tools/testing/selftests/rcutorture/bin/jitter.sh
index 188b864bc4bf..15d937ba96ca 100755
--- a/tools/testing/selftests/rcutorture/bin/jitter.sh
+++ b/tools/testing/selftests/rcutorture/bin/jitter.sh
@@ -5,10 +5,11 @@
# of this script is to inflict random OS jitter on a concurrently running
# test.
#
-# Usage: jitter.sh me duration [ sleepmax [ spinmax ] ]
+# Usage: jitter.sh me jittering-path duration [ sleepmax [ spinmax ] ]
#
# me: Random-number-generator seed salt.
# duration: Time to run in seconds.
+# jittering-path: Path to file whose removal will stop this script.
# sleepmax: Maximum microseconds to sleep, defaults to one second.
# spinmax: Maximum microseconds to spin, defaults to one millisecond.
#
@@ -17,9 +18,10 @@
# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
me=$(($1 * 1000))
-duration=$2
-sleepmax=${3-1000000}
-spinmax=${4-1000}
+jittering=$2
+duration=$3
+sleepmax=${4-1000000}
+spinmax=${5-1000}
n=1
@@ -47,7 +49,7 @@ do
fi
# Check for stop request.
- if test -f "$TORTURE_STOPFILE"
+ if ! test -f "$jittering"
then
exit 1;
fi
@@ -67,10 +69,10 @@ do
srand(n + me + systime());
ncpus = split(cpus, ca);
curcpu = ca[int(rand() * ncpus + 1)];
- mask = lshift(1, curcpu);
- if (mask + 0 <= 0)
- mask = 1;
- printf("%#x\n", mask);
+ z = "";
+ for (i = 1; 4 * i <= curcpu; i++)
+ z = z "0";
+ print "0x" 2 ^ (curcpu % 4) z;
}' < /dev/null`
n=$(($n+1))
if ! taskset -p $cpumask $$ > /dev/null 2>&1
diff --git a/tools/testing/selftests/rcutorture/bin/jitterstart.sh b/tools/testing/selftests/rcutorture/bin/jitterstart.sh
new file mode 100644
index 000000000000..3d710ad291c3
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/jitterstart.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Start up the specified number of jitter.sh scripts in the background.
+#
+# Usage: . jitterstart.sh n jittering-dir duration [ sleepmax [ spinmax ] ]
+#
+# n: Number of jitter.sh scripts to start up.
+# jittering-dir: Directory in which to put "jittering" file.
+# duration: Time to run in seconds.
+# sleepmax: Maximum microseconds to sleep, defaults to one second.
+# spinmax: Maximum microseconds to spin, defaults to one millisecond.
+#
+# Copyright (C) 2021 Facebook, Inc.
+#
+# Authors: Paul E. McKenney <paulmck@kernel.org>
+
+jitter_n=$1
+if test -z "$jitter_n"
+then
+ echo jitterstart.sh: Missing count of jitter.sh scripts to start.
+ exit 33
+fi
+jittering_dir=$2
+if test -z "$jittering_dir"
+then
+ echo jitterstart.sh: Missing directory in which to place jittering file.
+ exit 34
+fi
+shift
+shift
+
+touch ${jittering_dir}/jittering
+for ((jitter_i = 1; jitter_i <= $jitter_n; jitter_i++))
+do
+ jitter.sh $jitter_i "${jittering_dir}/jittering" "$@" &
+done
diff --git a/tools/testing/selftests/rcutorture/bin/jitterstop.sh b/tools/testing/selftests/rcutorture/bin/jitterstop.sh
new file mode 100644
index 000000000000..576a4cf4b79a
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/jitterstop.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Remove the "jittering" file, signaling the jitter.sh scripts to stop,
+# then wait for them to terminate.
+#
+# Usage: . jitterstop.sh jittering-dir
+#
+# jittering-dir: Directory containing "jittering" file.
+#
+# Copyright (C) 2021 Facebook, Inc.
+#
+# Authors: Paul E. McKenney <paulmck@kernel.org>
+
+jittering_dir=$1
+if test -z "$jittering_dir"
+then
+ echo jitterstop.sh: Missing directory in which to place jittering file.
+ exit 34
+fi
+
+rm -f ${jittering_dir}/jittering
+wait
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-again.sh b/tools/testing/selftests/rcutorture/bin/kvm-again.sh
new file mode 100755
index 000000000000..46e47a00a7db
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/kvm-again.sh
@@ -0,0 +1,199 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Rerun a series of tests under KVM.
+#
+# Usage: kvm-again.sh /path/to/old/run [ options ]
+#
+# Copyright (C) 2021 Facebook, Inc.
+#
+# Authors: Paul E. McKenney <paulmck@kernel.org>
+
+scriptname=$0
+args="$*"
+
+T=${TMPDIR-/tmp}/kvm-again.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+if ! test -d tools/testing/selftests/rcutorture/bin
+then
+ echo $scriptname must be run from top-level directory of kernel source tree.
+ exit 1
+fi
+
+oldrun=$1
+shift
+if ! test -d "$oldrun"
+then
+ echo "Usage: $scriptname /path/to/old/run [ options ]"
+ exit 1
+fi
+if ! cp "$oldrun/batches" $T/batches.oldrun
+then
+ # Later on, can reconstitute this from console.log files.
+ echo Prior run batches file does not exist: $oldrun/batches
+ exit 1
+fi
+
+if test -f "$oldrun/torture_suite"
+then
+ torture_suite="`cat $oldrun/torture_suite`"
+elif test -f "$oldrun/TORTURE_SUITE"
+then
+ torture_suite="`cat $oldrun/TORTURE_SUITE`"
+else
+ echo "Prior run torture_suite file does not exist: $oldrun/{torture_suite,TORTURE_SUITE}"
+ exit 1
+fi
+
+KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
+PATH=${KVM}/bin:$PATH; export PATH
+. functions.sh
+
+dryrun=
+dur=
+default_link="cp -R"
+rundir="`pwd`/tools/testing/selftests/rcutorture/res/`date +%Y.%m.%d-%H.%M.%S-again`"
+
+startdate="`date`"
+starttime="`get_starttime`"
+
+usage () {
+ echo "Usage: $scriptname $oldrun [ arguments ]:"
+ echo " --dryrun"
+ echo " --duration minutes | <seconds>s | <hours>h | <days>d"
+ echo " --link hard|soft|copy"
+ echo " --remote"
+ echo " --rundir /new/res/path"
+ exit 1
+}
+
+while test $# -gt 0
+do
+ case "$1" in
+ --dryrun)
+ dryrun=1
+ ;;
+ --duration)
+ checkarg --duration "(minutes)" $# "$2" '^[0-9][0-9]*\(s\|m\|h\|d\|\)$' '^error'
+ mult=60
+ if echo "$2" | grep -q 's$'
+ then
+ mult=1
+ elif echo "$2" | grep -q 'h$'
+ then
+ mult=3600
+ elif echo "$2" | grep -q 'd$'
+ then
+ mult=86400
+ fi
+ ts=`echo $2 | sed -e 's/[smhd]$//'`
+ dur=$(($ts*mult))
+ shift
+ ;;
+ --link)
+ checkarg --link "hard|soft|copy" "$#" "$2" 'hard\|soft\|copy' '^--'
+ case "$2" in
+ copy)
+ arg_link="cp -R"
+ ;;
+ hard)
+ arg_link="cp -Rl"
+ ;;
+ soft)
+ arg_link="cp -Rs"
+ ;;
+ esac
+ shift
+ ;;
+ --remote)
+ arg_remote=1
+ default_link="cp -as"
+ ;;
+ --rundir)
+ checkarg --rundir "(absolute pathname)" "$#" "$2" '^/' '^error'
+ rundir=$2
+ if test -e "$rundir"
+ then
+ echo "--rundir $2: Already exists."
+ usage
+ fi
+ shift
+ ;;
+ *)
+ echo Unknown argument $1
+ usage
+ ;;
+ esac
+ shift
+done
+if test -z "$arg_link"
+then
+ arg_link="$default_link"
+fi
+
+echo ---- Re-run results directory: $rundir
+
+# Copy old run directory tree over and adjust.
+mkdir -p "`dirname "$rundir"`"
+if ! $arg_link "$oldrun" "$rundir"
+then
+ echo "Cannot copy from $oldrun to $rundir."
+ usage
+fi
+rm -f "$rundir"/*/{console.log,console.log.diags,qemu_pid,qemu-retval,Warnings,kvm-test-1-run.sh.out,kvm-test-1-run-qemu.sh.out,vmlinux} "$rundir"/log
+echo $oldrun > "$rundir/re-run"
+if ! test -d "$rundir/../../bin"
+then
+ $arg_link "$oldrun/../../bin" "$rundir/../.."
+fi
+for i in $rundir/*/qemu-cmd
+do
+ cp "$i" $T
+ qemu_cmd_dir="`dirname "$i"`"
+ kernel_dir="`echo $qemu_cmd_dir | sed -e 's/\.[0-9]\+$//'`"
+ jitter_dir="`dirname "$kernel_dir"`"
+ kvm-transform.sh "$kernel_dir/bzImage" "$qemu_cmd_dir/console.log" "$jitter_dir" $dur < $T/qemu-cmd > $i
+ if test -n "$arg_remote"
+ then
+ echo "# TORTURE_KCONFIG_GDB_ARG=''" >> $i
+ fi
+done
+
+# Extract settings from the last qemu-cmd file transformed above.
+grep '^#' $i | sed -e 's/^# //' > $T/qemu-cmd-settings
+. $T/qemu-cmd-settings
+
+grep -v '^#' $T/batches.oldrun | awk '
+BEGIN {
+ oldbatch = 1;
+}
+
+{
+ if (oldbatch != $1) {
+ print "kvm-test-1-run-batch.sh" curbatch;
+ curbatch = "";
+ oldbatch = $1;
+ }
+ curbatch = curbatch " " $2;
+}
+
+END {
+ print "kvm-test-1-run-batch.sh" curbatch
+}' > $T/runbatches.sh
+
+if test -n "$dryrun"
+then
+ echo ---- Dryrun complete, directory: $rundir | tee -a "$rundir/log"
+else
+ ( cd "$rundir"; sh $T/runbatches.sh )
+ kcsan-collapse.sh "$rundir" | tee -a "$rundir/log"
+ echo | tee -a "$rundir/log"
+ echo ---- Results directory: $rundir | tee -a "$rundir/log"
+ kvm-recheck.sh "$rundir" > $T/kvm-recheck.sh.out 2>&1
+ ret=$?
+ cat $T/kvm-recheck.sh.out | tee -a "$rundir/log"
+ echo " --- Done at `date` (`get_starttime_duration $starttime`) exitcode $ret" | tee -a "$rundir/log"
+ exit $ret
+fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
index 47cf4db10896..e01b31b87044 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
@@ -30,7 +30,7 @@ do
resdir=`echo $i | sed -e 's,/$,,' -e 's,/[^/]*$,,'`
head -1 $resdir/log
fi
- TORTURE_SUITE="`cat $i/../TORTURE_SUITE`"
+ TORTURE_SUITE="`cat $i/../torture_suite`"
configfile=`echo $i | sed -e 's,^.*/,,'`
rm -f $i/console.log.*.diags
kvm-recheck-${TORTURE_SUITE}.sh $i
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run-batch.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run-batch.sh
new file mode 100755
index 000000000000..7ea0809e229e
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run-batch.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Carry out a kvm-based run for the specified batch of scenarios, which
+# might have been built by --build-only kvm.sh run.
+#
+# Usage: kvm-test-1-run-batch.sh SCENARIO [ SCENARIO ... ]
+#
+# Each SCENARIO is the name of a directory in the current directory
+# containing a ready-to-run qemu-cmd file.
+#
+# Copyright (C) 2021 Facebook, Inc.
+#
+# Authors: Paul E. McKenney <paulmck@kernel.org>
+
+T=${TMPDIR-/tmp}/kvm-test-1-run-batch.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+echo ---- Running batch $*
+# Check arguments
+runfiles=
+for i in "$@"
+do
+ if ! echo $i | grep -q '^[^/.a-z]\+\(\.[0-9]\+\)\?$'
+ then
+ echo Bad scenario name: \"$i\" 1>&2
+ exit 1
+ fi
+ if ! test -d "$i"
+ then
+ echo Scenario name not a directory: \"$i\" 1>&2
+ exit 2
+ fi
+ if ! test -f "$i/qemu-cmd"
+ then
+ echo Scenario lacks a command file: \"$i/qemu-cmd\" 1>&2
+ exit 3
+ fi
+ rm -f $i/build.*
+ touch $i/build.run
+ runfiles="$runfiles $i/build.run"
+done
+
+# Extract settings from the qemu-cmd file.
+grep '^#' $1/qemu-cmd | sed -e 's/^# //' > $T/qemu-cmd-settings
+. $T/qemu-cmd-settings
+
+# Start up jitter, start each scenario, wait, end jitter.
+echo ---- System running test: `uname -a`
+echo ---- Starting kernels. `date` | tee -a log
+$TORTURE_JITTER_START
+for i in "$@"
+do
+ echo ---- System running test: `uname -a` > $i/kvm-test-1-run-qemu.sh.out
+ echo > $i/kvm-test-1-run-qemu.sh.out
+ kvm-test-1-run-qemu.sh $i >> $i/kvm-test-1-run-qemu.sh.out 2>&1 &
+done
+for i in $runfiles
+do
+ while ls $i > /dev/null 2>&1
+ do
+ :
+ done
+done
+echo ---- All kernel runs complete. `date` | tee -a log
+$TORTURE_JITTER_STOP
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run-qemu.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run-qemu.sh
new file mode 100755
index 000000000000..5b1aa2a4f3f6
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run-qemu.sh
@@ -0,0 +1,176 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Carry out a kvm-based run for the specified qemu-cmd file, which might
+# have been generated by --build-only kvm.sh run.
+#
+# Usage: kvm-test-1-run-qemu.sh qemu-cmd-dir
+#
+# qemu-cmd-dir provides the directory containing qemu-cmd file.
+# This is assumed to be of the form prefix/ds/scenario, where
+# "ds" is the top-level date-stamped directory and "scenario"
+# is the scenario name. Any required adjustments to this file
+# must have been made by the caller. The shell-command comments
+# at the end of the qemu-cmd file are not optional.
+#
+# Copyright (C) 2021 Facebook, Inc.
+#
+# Authors: Paul E. McKenney <paulmck@kernel.org>
+
+T=${TMPDIR-/tmp}/kvm-test-1-run-qemu.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+resdir="$1"
+if ! test -d "$resdir"
+then
+ echo $0: Nonexistent directory: $resdir
+ exit 1
+fi
+if ! test -f "$resdir/qemu-cmd"
+then
+ echo $0: Nonexistent qemu-cmd file: $resdir/qemu-cmd
+ exit 1
+fi
+
+echo ' ---' `date`: Starting kernel, PID $$
+
+# Obtain settings from the qemu-cmd file.
+grep '^#' $resdir/qemu-cmd | sed -e 's/^# //' > $T/qemu-cmd-settings
+. $T/qemu-cmd-settings
+
+# Decorate qemu-cmd with redirection, backgrounding, and PID capture
+sed -e 's/$/ 2>\&1 \&/' < $resdir/qemu-cmd > $T/qemu-cmd
+echo 'echo $! > $resdir/qemu_pid' >> $T/qemu-cmd
+
+# In case qemu refuses to run...
+echo "NOTE: $QEMU either did not run or was interactive" > $resdir/console.log
+
+# Attempt to run qemu
+kstarttime=`gawk 'BEGIN { print systime() }' < /dev/null`
+( . $T/qemu-cmd; wait `cat $resdir/qemu_pid`; echo $? > $resdir/qemu-retval ) &
+commandcompleted=0
+if test -z "$TORTURE_KCONFIG_GDB_ARG"
+then
+ sleep 10 # Give qemu's pid a chance to reach the file
+ if test -s "$resdir/qemu_pid"
+ then
+ qemu_pid=`cat "$resdir/qemu_pid"`
+ echo Monitoring qemu job at pid $qemu_pid
+ else
+ qemu_pid=""
+ echo Monitoring qemu job at yet-as-unknown pid
+ fi
+fi
+if test -n "$TORTURE_KCONFIG_GDB_ARG"
+then
+ base_resdir=`echo $resdir | sed -e 's/\.[0-9]\+$//'`
+ if ! test -f $base_resdir/vmlinux
+ then
+ base_resdir="`cat re-run`/$resdir"
+ if ! test -f $base_resdir/vmlinux
+ then
+ base_resdir=/path/to
+ fi
+ fi
+ echo Waiting for you to attach a debug session, for example: > /dev/tty
+ echo " gdb $base_resdir/vmlinux" > /dev/tty
+ echo 'After symbols load and the "(gdb)" prompt appears:' > /dev/tty
+ echo " target remote :1234" > /dev/tty
+ echo " continue" > /dev/tty
+ kstarttime=`gawk 'BEGIN { print systime() }' < /dev/null`
+fi
+while :
+do
+ if test -z "$qemu_pid" -a -s "$resdir/qemu_pid"
+ then
+ qemu_pid=`cat "$resdir/qemu_pid"`
+ fi
+ kruntime=`gawk 'BEGIN { print systime() - '"$kstarttime"' }' < /dev/null`
+ if test -z "$qemu_pid" || kill -0 "$qemu_pid" > /dev/null 2>&1
+ then
+ if test -n "$TORTURE_KCONFIG_GDB_ARG"
+ then
+ :
+ elif test $kruntime -ge $seconds || test -f "$resdir/../STOP.1"
+ then
+ break;
+ fi
+ sleep 1
+ else
+ commandcompleted=1
+ if test $kruntime -lt $seconds
+ then
+ echo Completed in $kruntime vs. $seconds >> $resdir/Warnings 2>&1
+ grep "^(qemu) qemu:" $resdir/kvm-test-1-run.sh.out >> $resdir/Warnings 2>&1
+ killpid="`sed -n "s/^(qemu) qemu: terminating on signal [0-9]* from pid \([0-9]*\).*$/\1/p" $resdir/Warnings`"
+ if test -n "$killpid"
+ then
+ echo "ps -fp $killpid" >> $resdir/Warnings 2>&1
+ ps -fp $killpid >> $resdir/Warnings 2>&1
+ fi
+ else
+ echo ' ---' `date`: "Kernel done"
+ fi
+ break
+ fi
+done
+if test -z "$qemu_pid" -a -s "$resdir/qemu_pid"
+then
+ qemu_pid=`cat "$resdir/qemu_pid"`
+fi
+if test $commandcompleted -eq 0 -a -n "$qemu_pid"
+then
+ if ! test -f "$resdir/../STOP.1"
+ then
+ echo Grace period for qemu job at pid $qemu_pid
+ fi
+ oldline="`tail $resdir/console.log`"
+ while :
+ do
+ if test -f "$resdir/../STOP.1"
+ then
+ echo "PID $qemu_pid killed due to run STOP.1 request" >> $resdir/Warnings 2>&1
+ kill -KILL $qemu_pid
+ break
+ fi
+ kruntime=`gawk 'BEGIN { print systime() - '"$kstarttime"' }' < /dev/null`
+ if kill -0 $qemu_pid > /dev/null 2>&1
+ then
+ :
+ else
+ break
+ fi
+ must_continue=no
+ newline="`tail $resdir/console.log`"
+ if test "$newline" != "$oldline" && echo $newline | grep -q ' [0-9]\+us : '
+ then
+ must_continue=yes
+ fi
+ last_ts="`tail $resdir/console.log | grep '^\[ *[0-9]\+\.[0-9]\+]' | tail -1 | sed -e 's/^\[ *//' -e 's/\..*$//'`"
+ if test -z "$last_ts"
+ then
+ last_ts=0
+ fi
+ if test "$newline" != "$oldline" -a "$last_ts" -lt $((seconds + $TORTURE_SHUTDOWN_GRACE))
+ then
+ must_continue=yes
+ fi
+ if test $must_continue = no -a $kruntime -ge $((seconds + $TORTURE_SHUTDOWN_GRACE))
+ then
+ echo "!!! PID $qemu_pid hung at $kruntime vs. $seconds seconds" >> $resdir/Warnings 2>&1
+ kill -KILL $qemu_pid
+ break
+ fi
+ oldline=$newline
+ sleep 10
+ done
+elif test -z "$qemu_pid"
+then
+ echo Unknown PID, cannot kill qemu command
+fi
+
+# Tell the script that this run is done.
+rm -f $resdir/build.run
+
+parse-console.sh $resdir/console.log $title
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index 536d103ef166..420ed5ce9d32 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -7,15 +7,15 @@
# Execute this in the source tree. Do not run it as a background task
# because qemu does not seem to like that much.
#
-# Usage: kvm-test-1-run.sh config builddir resdir seconds qemu-args boot_args
+# Usage: kvm-test-1-run.sh config resdir seconds qemu-args boot_args_in
#
# qemu-args defaults to "-enable-kvm -nographic", along with arguments
# specifying the number of CPUs and other options
# generated from the underlying CPU architecture.
-# boot_args defaults to value returned by the per_version_boot_params
+# boot_args_in defaults to value returned by the per_version_boot_params
# shell function.
#
-# Anything you specify for either qemu-args or boot_args is appended to
+# Anything you specify for either qemu-args or boot_args_in is appended to
# the default values. The "-smp" value is deduced from the contents of
# the config fragment.
#
@@ -35,14 +35,13 @@ mkdir $T
config_template=${1}
config_dir=`echo $config_template | sed -e 's,/[^/]*$,,'`
title=`echo $config_template | sed -e 's/^.*\///'`
-builddir=${2}
-resdir=${3}
+resdir=${2}
if test -z "$resdir" -o ! -d "$resdir" -o ! -w "$resdir"
then
echo "kvm-test-1-run.sh :$resdir: Not a writable directory, cannot store results into it"
exit 1
fi
-echo ' ---' `date`: Starting build
+echo ' ---' `date`: Starting build, PID $$
echo ' ---' Kconfig fragment at: $config_template >> $resdir/log
touch $resdir/ConfigFragment.input
@@ -73,7 +72,7 @@ config_override_param "--kconfig argument" KcList "$TORTURE_KCONFIG_ARG"
cp $T/KcList $resdir/ConfigFragment
base_resdir=`echo $resdir | sed -e 's/\.[0-9]\+$//'`
-if test "$base_resdir" != "$resdir" -a -f $base_resdir/bzImage -a -f $base_resdir/vmlinux
+if test "$base_resdir" != "$resdir" && test -f $base_resdir/bzImage && test -f $base_resdir/vmlinux
then
# Rerunning previous test, so use that test's kernel.
QEMU="`identify_qemu $base_resdir/vmlinux`"
@@ -83,6 +82,17 @@ then
ln -s $base_resdir/.config $resdir # for kvm-recheck.sh
# Arch-independent indicator
touch $resdir/builtkernel
+elif test "$base_resdir" != "$resdir"
+then
+ # Rerunning previous test for which build failed
+ ln -s $base_resdir/Make*.out $resdir # for kvm-recheck.sh
+ ln -s $base_resdir/.config $resdir # for kvm-recheck.sh
+ echo Initial build failed, not running KVM, see $resdir.
+ if test -f $resdir/build.wait
+ then
+ mv $resdir/build.wait $resdir/build.ready
+ fi
+ exit 1
elif kvm-build.sh $T/KcList $resdir
then
# Had to build a kernel for this test.
@@ -107,23 +117,23 @@ else
# Build failed.
cp .config $resdir || :
echo Build failed, not running KVM, see $resdir.
- if test -f $builddir.wait
+ if test -f $resdir/build.wait
then
- mv $builddir.wait $builddir.ready
+ mv $resdir/build.wait $resdir/build.ready
fi
exit 1
fi
-if test -f $builddir.wait
+if test -f $resdir/build.wait
then
- mv $builddir.wait $builddir.ready
+ mv $resdir/build.wait $resdir/build.ready
fi
-while test -f $builddir.ready
+while test -f $resdir/build.ready
do
sleep 1
done
-seconds=$4
-qemu_args=$5
-boot_args=$6
+seconds=$3
+qemu_args=$4
+boot_args_in=$5
if test -z "$TORTURE_BUILDONLY"
then
@@ -133,7 +143,7 @@ fi
# Generate -smp qemu argument.
qemu_args="-enable-kvm -nographic $qemu_args"
cpu_count=`configNR_CPUS.sh $resdir/ConfigFragment`
-cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"`
+cpu_count=`configfrag_boot_cpus "$boot_args_in" "$config_template" "$cpu_count"`
if test "$cpu_count" -gt "$TORTURE_ALLOTED_CPUS"
then
echo CPU count limited from $cpu_count to $TORTURE_ALLOTED_CPUS | tee -a $resdir/Warnings
@@ -149,16 +159,52 @@ qemu_args="$qemu_args `identify_qemu_args "$QEMU" "$resdir/console.log"`"
qemu_append="`identify_qemu_append "$QEMU"`"
# Pull in Kconfig-fragment boot parameters
-boot_args="`configfrag_boot_params "$boot_args" "$config_template"`"
+boot_args="`configfrag_boot_params "$boot_args_in" "$config_template"`"
# Generate kernel-version-specific boot parameters
boot_args="`per_version_boot_params "$boot_args" $resdir/.config $seconds`"
if test -n "$TORTURE_BOOT_GDB_ARG"
then
boot_args="$boot_args $TORTURE_BOOT_GDB_ARG"
fi
+
+# Give bare-metal advice
+modprobe_args="`echo $boot_args | tr -s ' ' '\012' | grep "^$TORTURE_MOD\." | sed -e "s/$TORTURE_MOD\.//g"`"
+kboot_args="`echo $boot_args | tr -s ' ' '\012' | grep -v "^$TORTURE_MOD\."`"
+testid_txt="`dirname $resdir`/testid.txt"
+touch $resdir/bare-metal
+echo To run this scenario on bare metal: >> $resdir/bare-metal
+echo >> $resdir/bare-metal
+echo " 1." Set your bare-metal build tree to the state shown in this file: >> $resdir/bare-metal
+echo " " $testid_txt >> $resdir/bare-metal
+echo " 2." Update your bare-metal build tree"'"s .config based on this file: >> $resdir/bare-metal
+echo " " $resdir/ConfigFragment >> $resdir/bare-metal
+echo " 3." Make the bare-metal kernel"'"s build system aware of your .config updates: >> $resdir/bare-metal
+echo " " $ 'yes "" | make oldconfig' >> $resdir/bare-metal
+echo " 4." Build your bare-metal kernel. >> $resdir/bare-metal
+echo " 5." Boot your bare-metal kernel with the following parameters: >> $resdir/bare-metal
+echo " " $kboot_args >> $resdir/bare-metal
+echo " 6." Start the test with the following command: >> $resdir/bare-metal
+echo " " $ modprobe $TORTURE_MOD $modprobe_args >> $resdir/bare-metal
+echo " 7." After some time, end the test with the following command: >> $resdir/bare-metal
+echo " " $ rmmod $TORTURE_MOD >> $resdir/bare-metal
+echo " 8." Copy your bare-metal kernel"'"s .config file, overwriting this file: >> $resdir/bare-metal
+echo " " $resdir/.config >> $resdir/bare-metal
+echo " 9." Copy the console output from just before the modprobe to just after >> $resdir/bare-metal
+echo " " the rmmod into this file: >> $resdir/bare-metal
+echo " " $resdir/console.log >> $resdir/bare-metal
+echo "10." Check for runtime errors using the following command: >> $resdir/bare-metal
+echo " " $ tools/testing/selftests/rcutorture/bin/kvm-recheck.sh `dirname $resdir` >> $resdir/bare-metal
+echo >> $resdir/bare-metal
+echo Some of the above steps may be skipped if you build your bare-metal >> $resdir/bare-metal
+echo kernel here: `head -n 1 $testid_txt | sed -e 's/^Build directory: //'` >> $resdir/bare-metal
+
echo $QEMU $qemu_args -m $TORTURE_QEMU_MEM -kernel $KERNEL -append \"$qemu_append $boot_args\" $TORTURE_QEMU_GDB_ARG > $resdir/qemu-cmd
echo "# TORTURE_SHUTDOWN_GRACE=$TORTURE_SHUTDOWN_GRACE" >> $resdir/qemu-cmd
echo "# seconds=$seconds" >> $resdir/qemu-cmd
+echo "# TORTURE_KCONFIG_GDB_ARG=\"$TORTURE_KCONFIG_GDB_ARG\"" >> $resdir/qemu-cmd
+echo "# TORTURE_JITTER_START=\"$TORTURE_JITTER_START\"" >> $resdir/qemu-cmd
+echo "# TORTURE_JITTER_STOP=\"$TORTURE_JITTER_STOP\"" >> $resdir/qemu-cmd
+echo "# TORTURE_TRUST_MAKE=\"$TORTURE_TRUST_MAKE\"; export TORTURE_TRUST_MAKE" >> $resdir/qemu-cmd
if test -n "$TORTURE_BUILDONLY"
then
@@ -167,140 +213,4 @@ then
exit 0
fi
-# Decorate qemu-cmd with redirection, backgrounding, and PID capture
-sed -e 's/$/ 2>\&1 \&/' < $resdir/qemu-cmd > $T/qemu-cmd
-echo 'echo $! > $resdir/qemu_pid' >> $T/qemu-cmd
-
-# In case qemu refuses to run...
-echo "NOTE: $QEMU either did not run or was interactive" > $resdir/console.log
-
-# Attempt to run qemu
-kstarttime=`gawk 'BEGIN { print systime() }' < /dev/null`
-( . $T/qemu-cmd; wait `cat $resdir/qemu_pid`; echo $? > $resdir/qemu-retval ) &
-commandcompleted=0
-if test -z "$TORTURE_KCONFIG_GDB_ARG"
-then
- sleep 10 # Give qemu's pid a chance to reach the file
- if test -s "$resdir/qemu_pid"
- then
- qemu_pid=`cat "$resdir/qemu_pid"`
- echo Monitoring qemu job at pid $qemu_pid
- else
- qemu_pid=""
- echo Monitoring qemu job at yet-as-unknown pid
- fi
-fi
-if test -n "$TORTURE_KCONFIG_GDB_ARG"
-then
- echo Waiting for you to attach a debug session, for example: > /dev/tty
- echo " gdb $base_resdir/vmlinux" > /dev/tty
- echo 'After symbols load and the "(gdb)" prompt appears:' > /dev/tty
- echo " target remote :1234" > /dev/tty
- echo " continue" > /dev/tty
- kstarttime=`gawk 'BEGIN { print systime() }' < /dev/null`
-fi
-while :
-do
- if test -z "$qemu_pid" -a -s "$resdir/qemu_pid"
- then
- qemu_pid=`cat "$resdir/qemu_pid"`
- fi
- kruntime=`gawk 'BEGIN { print systime() - '"$kstarttime"' }' < /dev/null`
- if test -z "$qemu_pid" || kill -0 "$qemu_pid" > /dev/null 2>&1
- then
- if test -n "$TORTURE_KCONFIG_GDB_ARG"
- then
- :
- elif test $kruntime -ge $seconds || test -f "$resdir/../STOP.1"
- then
- break;
- fi
- sleep 1
- else
- commandcompleted=1
- if test $kruntime -lt $seconds
- then
- echo Completed in $kruntime vs. $seconds >> $resdir/Warnings 2>&1
- grep "^(qemu) qemu:" $resdir/kvm-test-1-run.sh.out >> $resdir/Warnings 2>&1
- killpid="`sed -n "s/^(qemu) qemu: terminating on signal [0-9]* from pid \([0-9]*\).*$/\1/p" $resdir/Warnings`"
- if test -n "$killpid"
- then
- echo "ps -fp $killpid" >> $resdir/Warnings 2>&1
- ps -fp $killpid >> $resdir/Warnings 2>&1
- fi
- # Reduce probability of PID reuse by allowing a one-minute buffer
- if test $((kruntime + 60)) -lt $seconds && test -s "$resdir/../jitter_pids"
- then
- awk < "$resdir/../jitter_pids" '
- NF > 0 {
- pidlist = pidlist " " $1;
- n++;
- }
- END {
- if (n > 0) {
- print "kill " pidlist;
- }
- }' | sh
- fi
- else
- echo ' ---' `date`: "Kernel done"
- fi
- break
- fi
-done
-if test -z "$qemu_pid" -a -s "$resdir/qemu_pid"
-then
- qemu_pid=`cat "$resdir/qemu_pid"`
-fi
-if test $commandcompleted -eq 0 -a -n "$qemu_pid"
-then
- if ! test -f "$resdir/../STOP.1"
- then
- echo Grace period for qemu job at pid $qemu_pid
- fi
- oldline="`tail $resdir/console.log`"
- while :
- do
- if test -f "$resdir/../STOP.1"
- then
- echo "PID $qemu_pid killed due to run STOP.1 request" >> $resdir/Warnings 2>&1
- kill -KILL $qemu_pid
- break
- fi
- kruntime=`gawk 'BEGIN { print systime() - '"$kstarttime"' }' < /dev/null`
- if kill -0 $qemu_pid > /dev/null 2>&1
- then
- :
- else
- break
- fi
- must_continue=no
- newline="`tail $resdir/console.log`"
- if test "$newline" != "$oldline" && echo $newline | grep -q ' [0-9]\+us : '
- then
- must_continue=yes
- fi
- last_ts="`tail $resdir/console.log | grep '^\[ *[0-9]\+\.[0-9]\+]' | tail -1 | sed -e 's/^\[ *//' -e 's/\..*$//'`"
- if test -z "$last_ts"
- then
- last_ts=0
- fi
- if test "$newline" != "$oldline" -a "$last_ts" -lt $((seconds + $TORTURE_SHUTDOWN_GRACE))
- then
- must_continue=yes
- fi
- if test $must_continue = no -a $kruntime -ge $((seconds + $TORTURE_SHUTDOWN_GRACE))
- then
- echo "!!! PID $qemu_pid hung at $kruntime vs. $seconds seconds" >> $resdir/Warnings 2>&1
- kill -KILL $qemu_pid
- break
- fi
- oldline=$newline
- sleep 10
- done
-elif test -z "$qemu_pid"
-then
- echo Unknown PID, cannot kill qemu command
-fi
-
-parse-console.sh $resdir/console.log $title
+kvm-test-1-run-qemu.sh $resdir
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-transform.sh b/tools/testing/selftests/rcutorture/bin/kvm-transform.sh
index c45a953ef393..d40b4e60a50c 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-transform.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-transform.sh
@@ -3,7 +3,7 @@
#
# Transform a qemu-cmd file to allow reuse.
#
-# Usage: kvm-transform.sh bzImage console.log < qemu-cmd-in > qemu-cmd-out
+# Usage: kvm-transform.sh bzImage console.log jitter_dir [ seconds ] < qemu-cmd-in > qemu-cmd-out
#
# bzImage: Kernel and initrd from the same prior kvm.sh run.
# console.log: File into which to place console output.
@@ -29,20 +29,62 @@ then
echo "Need console log file name."
exit 1
fi
+jitter_dir="$3"
+if test -z "$jitter_dir" || ! test -d "$jitter_dir"
+then
+ echo "Need valid jitter directory: '$jitter_dir'"
+ exit 1
+fi
+seconds="$4"
+if test -n "$seconds" && echo $seconds | grep -q '[^0-9]'
+then
+ echo "Invalid duration, should be numeric in seconds: '$seconds'"
+ exit 1
+fi
+
+awk -v image="$image" -v consolelog="$consolelog" -v jitter_dir="$jitter_dir" \
+ -v seconds="$seconds" '
+/^# seconds=/ {
+ if (seconds == "")
+ print $0;
+ else
+ print "# seconds=" seconds;
+ next;
+}
+
+/^# TORTURE_JITTER_START=/ {
+ print "# TORTURE_JITTER_START=\". jitterstart.sh " $4 " " jitter_dir " " $6 " " $7;
+ next;
+}
+
+/^# TORTURE_JITTER_STOP=/ {
+ print "# TORTURE_JITTER_STOP=\". jitterstop.sh " " " jitter_dir " " $5;
+ next;
+}
+
+/^#/ {
+ print $0;
+ next;
+}
-awk -v image="$image" -v consolelog="$consolelog" '
{
line = "";
for (i = 1; i <= NF; i++) {
- if (line == "")
+ if ("" seconds != "" && $i ~ /\.shutdown_secs=[0-9]*$/) {
+ sub(/[0-9]*$/, seconds, $i);
+ if (line == "")
+ line = $i;
+ else
+ line = line " " $i;
+ } else if (line == "") {
line = $i;
- else
+ } else {
line = line " " $i;
+ }
if ($i == "-serial") {
i++;
line = line " file:" consolelog;
- }
- if ($i == "-kernel") {
+ } else if ($i == "-kernel") {
i++;
line = line " " image;
}
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 8d3c99b35e06..6bf00a003d3d 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -29,17 +29,21 @@ PATH=${KVM}/bin:$PATH; export PATH
TORTURE_ALLOTED_CPUS="`identify_qemu_vcpus`"
TORTURE_DEFCONFIG=defconfig
TORTURE_BOOT_IMAGE=""
+TORTURE_BUILDONLY=
TORTURE_INITRD="$KVM/initrd"; export TORTURE_INITRD
TORTURE_KCONFIG_ARG=""
TORTURE_KCONFIG_GDB_ARG=""
TORTURE_BOOT_GDB_ARG=""
TORTURE_QEMU_GDB_ARG=""
+TORTURE_JITTER_START=""
+TORTURE_JITTER_STOP=""
TORTURE_KCONFIG_KASAN_ARG=""
TORTURE_KCONFIG_KCSAN_ARG=""
TORTURE_KMAKE_ARG=""
TORTURE_QEMU_MEM=512
TORTURE_SHUTDOWN_GRACE=180
TORTURE_SUITE=rcu
+TORTURE_MOD=rcutorture
TORTURE_TRUST_MAKE=""
resdir=""
configs=""
@@ -100,7 +104,7 @@ do
TORTURE_BUILDONLY=1
;;
--configs|--config)
- checkarg --configs "(list of config files)" "$#" "$2" '^[^/]\+$' '^--'
+ checkarg --configs "(list of config files)" "$#" "$2" '^[^/.a-z]\+$' '^--'
configs="$configs $2"
shift
;;
@@ -116,7 +120,7 @@ do
shift
;;
--datestamp)
- checkarg --datestamp "(relative pathname)" "$#" "$2" '^[a-zA-Z0-9._-/]*$' '^--'
+ checkarg --datestamp "(relative pathname)" "$#" "$2" '^[a-zA-Z0-9._/-]*$' '^--'
ds=$2
shift
;;
@@ -215,6 +219,7 @@ do
--torture)
checkarg --torture "(suite name)" "$#" "$2" '^\(lock\|rcu\|rcuscale\|refscale\|scf\)$' '^--'
TORTURE_SUITE=$2
+ TORTURE_MOD="`echo $TORTURE_SUITE | sed -e 's/^\(lock\|rcu\|scf\)$/\1torture/'`"
shift
if test "$TORTURE_SUITE" = rcuscale || test "$TORTURE_SUITE" = refscale
then
@@ -381,6 +386,7 @@ TORTURE_QEMU_GDB_ARG="$TORTURE_QEMU_GDB_ARG"; export TORTURE_QEMU_GDB_ARG
TORTURE_KCONFIG_KASAN_ARG="$TORTURE_KCONFIG_KASAN_ARG"; export TORTURE_KCONFIG_KASAN_ARG
TORTURE_KCONFIG_KCSAN_ARG="$TORTURE_KCONFIG_KCSAN_ARG"; export TORTURE_KCONFIG_KCSAN_ARG
TORTURE_KMAKE_ARG="$TORTURE_KMAKE_ARG"; export TORTURE_KMAKE_ARG
+TORTURE_MOD="$TORTURE_MOD"; export TORTURE_MOD
TORTURE_QEMU_CMD="$TORTURE_QEMU_CMD"; export TORTURE_QEMU_CMD
TORTURE_QEMU_INTERACTIVE="$TORTURE_QEMU_INTERACTIVE"; export TORTURE_QEMU_INTERACTIVE
TORTURE_QEMU_MAC="$TORTURE_QEMU_MAC"; export TORTURE_QEMU_MAC
@@ -399,12 +405,17 @@ echo Results directory: $resdir/$ds
echo $scriptname $args
touch $resdir/$ds/log
echo $scriptname $args >> $resdir/$ds/log
-echo ${TORTURE_SUITE} > $resdir/$ds/TORTURE_SUITE
-pwd > $resdir/$ds/testid.txt
+echo ${TORTURE_SUITE} > $resdir/$ds/torture_suite
+echo Build directory: `pwd` > $resdir/$ds/testid.txt
if test -d .git
then
+ echo Current commit: `git rev-parse HEAD` >> $resdir/$ds/testid.txt
+ echo >> $resdir/$ds/testid.txt
+ echo ' ---' Output of "'"git status"'": >> $resdir/$ds/testid.txt
git status >> $resdir/$ds/testid.txt
- git rev-parse HEAD >> $resdir/$ds/testid.txt
+ echo >> $resdir/$ds/testid.txt
+ echo >> $resdir/$ds/testid.txt
+ echo ' ---' Output of "'"git diff HEAD"'": >> $resdir/$ds/testid.txt
git diff HEAD >> $resdir/$ds/testid.txt
fi
___EOF___
@@ -434,8 +445,17 @@ function dump(first, pastlast, batchnum)
print "echo ----Start batch " batchnum ": `date` | tee -a " rd "log";
print "needqemurun="
jn=1
+ njitter = 0;
+ split(jitter, ja);
+ if (ja[1] == -1 && ncpus == 0)
+ njitter = 1;
+ else if (ja[1] == -1)
+ njitter = ncpus;
+ else
+ njitter = ja[1];
+ print "TORTURE_JITTER_START=\". jitterstart.sh " njitter " " rd " " dur " " ja[2] " " ja[3] "\"; export TORTURE_JITTER_START";
+ print "TORTURE_JITTER_STOP=\". jitterstop.sh " rd " \"; export TORTURE_JITTER_STOP"
for (j = first; j < pastlast; j++) {
- builddir=KVM "/b" j - first + 1
cpusr[jn] = cpus[j];
if (cfrep[cf[j]] == "") {
cfr[jn] = cf[j];
@@ -444,15 +464,15 @@ function dump(first, pastlast, batchnum)
cfrep[cf[j]]++;
cfr[jn] = cf[j] "." cfrep[cf[j]];
}
+ builddir=rd cfr[jn] "/build";
if (cpusr[jn] > ncpus && ncpus != 0)
ovf = "-ovf";
else
ovf = "";
print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date` | tee -a " rd "log";
- print "rm -f " builddir ".*";
- print "touch " builddir ".wait";
print "mkdir " rd cfr[jn] " || :";
- print "kvm-test-1-run.sh " CONFIGDIR cf[j], builddir, rd cfr[jn], dur " \"" TORTURE_QEMU_ARG "\" \"" TORTURE_BOOTARGS "\" > " rd cfr[jn] "/kvm-test-1-run.sh.out 2>&1 &"
+ print "touch " builddir ".wait";
+ print "kvm-test-1-run.sh " CONFIGDIR cf[j], rd cfr[jn], dur " \"" TORTURE_QEMU_ARG "\" \"" TORTURE_BOOTARGS "\" > " rd cfr[jn] "/kvm-test-1-run.sh.out 2>&1 &"
print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date` | tee -a " rd "log";
print "while test -f " builddir ".wait"
print "do"
@@ -461,23 +481,21 @@ function dump(first, pastlast, batchnum)
print "echo ", cfr[jn], cpusr[jn] ovf ": Build complete. `date` | tee -a " rd "log";
jn++;
}
+ print "runfiles="
for (j = 1; j < jn; j++) {
- builddir=KVM "/b" j
- print "rm -f " builddir ".ready"
+ builddir=rd cfr[j] "/build";
+ if (TORTURE_BUILDONLY)
+ print "rm -f " builddir ".ready"
+ else
+ print "mv " builddir ".ready " builddir ".run"
+ print "runfiles=\"$runfiles " builddir ".run\""
+ fi
print "if test -f \"" rd cfr[j] "/builtkernel\""
print "then"
print "\techo ----", cfr[j], cpusr[j] ovf ": Kernel present. `date` | tee -a " rd "log";
print "\tneedqemurun=1"
print "fi"
}
- njitter = 0;
- split(jitter, ja);
- if (ja[1] == -1 && ncpus == 0)
- njitter = 1;
- else if (ja[1] == -1)
- njitter = ncpus;
- else
- njitter = ja[1];
if (TORTURE_BUILDONLY && njitter != 0) {
njitter = 0;
print "echo Build-only run, so suppressing jitter | tee -a " rd "log"
@@ -488,19 +506,18 @@ function dump(first, pastlast, batchnum)
print "if test -n \"$needqemurun\""
print "then"
print "\techo ---- Starting kernels. `date` | tee -a " rd "log";
- print "\techo > " rd "jitter_pids"
- for (j = 0; j < njitter; j++) {
- print "\tjitter.sh " j " " dur " " ja[2] " " ja[3] "&"
- print "\techo $! >> " rd "jitter_pids"
- }
- print "\twait"
+ print "\t$TORTURE_JITTER_START";
+ print "\twhile ls $runfiles > /dev/null 2>&1"
+ print "\tdo"
+ print "\t\t:"
+ print "\tdone"
+ print "\t$TORTURE_JITTER_STOP";
print "\techo ---- All kernel runs complete. `date` | tee -a " rd "log";
print "else"
print "\twait"
print "\techo ---- No kernel runs. `date` | tee -a " rd "log";
print "fi"
for (j = 1; j < jn; j++) {
- builddir=KVM "/b" j
print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results: | tee -a " rd "log";
print "cat " rd cfr[j] "/kvm-test-1-run.sh.out | tee -a " rd "log";
}
@@ -548,6 +565,18 @@ echo 'ret=$?' >> $T/script
echo "cat $T/kvm-recheck.sh.out | tee -a $resdir/$ds/log" >> $T/script
echo 'exit $ret' >> $T/script
+# Extract the tests and their batches from the script.
+egrep 'Start batch|Starting build\.' $T/script | grep -v ">>" |
+ sed -e 's/:.*$//' -e 's/^echo //' -e 's/-ovf//' |
+ awk '
+ /^----Start/ {
+ batchno = $3;
+ next;
+ }
+ {
+ print batchno, $1, $2
+ }' > $T/batches
+
if test "$dryrun" = script
then
cat $T/script
@@ -566,21 +595,14 @@ then
exit 0
elif test "$dryrun" = batches
then
- # Extract the tests and their batches from the script.
- egrep 'Start batch|Starting build\.' $T/script | grep -v ">>" |
- sed -e 's/:.*$//' -e 's/^echo //' -e 's/-ovf//' |
- awk '
- /^----Start/ {
- batchno = $3;
- next;
- }
- {
- print batchno, $1, $2
- }'
+ cat $T/batches
+ exit 0
else
- # Not a dryrun, so run the script.
+ # Not a dryrun. Record the batches and the number of CPUs, then run the script.
bash $T/script
ret=$?
+ cp $T/batches $resdir/$ds/batches
+ echo '#' cpus=$cpus >> $resdir/$ds/batches
echo " --- Done at `date` (`get_starttime_duration $starttime`) exitcode $ret" | tee -a $resdir/$ds/log
exit $ret
fi
diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh
index ad7525b7ac29..56e2e1a42569 100755
--- a/tools/testing/selftests/rcutorture/bin/torture.sh
+++ b/tools/testing/selftests/rcutorture/bin/torture.sh
@@ -374,7 +374,7 @@ done
if test "$do_kvfree" = "yes"
then
torture_bootargs="rcuscale.kfree_rcu_test=1 rcuscale.kfree_nthreads=16 rcuscale.holdoff=20 rcuscale.kfree_loops=10000 torture.disable_onoff_at_boot"
- torture_set "rcuscale-kvfree" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 10 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --trust-make
+ torture_set "rcuscale-kvfree" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 10 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --memory 1G --trust-make
fi
echo " --- " $scriptname $args
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/CFLIST b/tools/testing/selftests/rcutorture/configs/rcu/CFLIST
index f2b20db9e296..98b6175e5aa0 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/CFLIST
+++ b/tools/testing/selftests/rcutorture/configs/rcu/CFLIST
@@ -7,8 +7,8 @@ TREE07
TREE09
SRCU-N
SRCU-P
-SRCU-t
-SRCU-u
+SRCU-T
+SRCU-U
TINY01
TINY02
TASKS01
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-t b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-T
index d6557c38dfe4..d6557c38dfe4 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-t
+++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-T
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-t.boot b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-T.boot
index 238bfe3bd0cc..238bfe3bd0cc 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-t.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-T.boot
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-u b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-U
index 6bc24e99862f..6bc24e99862f 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-u
+++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-U
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-u.boot b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-U.boot
index ce48c7b82673..ce48c7b82673 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-u.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-U.boot
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
index 1c218944b1e9..64f864f1f361 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
@@ -4,3 +4,4 @@ rcutree.gp_init_delay=3
rcutree.gp_cleanup_delay=3
rcutree.kthread_prio=2
threadirqs
+tree.use_softirq=0
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot
index 5adc6756792a..a8d94caf7d2f 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot
@@ -1 +1 @@
-rcutree.rcu_fanout_leaf=4 nohz_full=1-7
+rcutree.rcu_fanout_leaf=4 nohz_full=1-N
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot
index 22478fd3a865..94d38445d393 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot
@@ -1,3 +1,3 @@
rcupdate.rcu_self_test=1
rcutree.rcu_fanout_exact=1
-rcu_nocbs=0-7
+rcu_nocbs=all
diff --git a/tools/testing/selftests/rcutorture/configs/rcuscale/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcuscale/ver_functions.sh
index 0333e9b18522..ffbe15109f0d 100644
--- a/tools/testing/selftests/rcutorture/configs/rcuscale/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcuscale/ver_functions.sh
@@ -12,5 +12,5 @@
# Adds per-version torture-module parameters to kernels supporting them.
per_version_boot_params () {
echo $1 rcuscale.shutdown=1 \
- rcuscale.verbose=1
+ rcuscale.verbose=0
}
diff --git a/tools/testing/selftests/rcutorture/configs/refscale/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/refscale/ver_functions.sh
index 321e82641287..f81fa2c541a6 100644
--- a/tools/testing/selftests/rcutorture/configs/refscale/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/refscale/ver_functions.sh
@@ -12,5 +12,5 @@
# Adds per-version torture-module parameters to kernels supporting them.
per_version_boot_params () {
echo $1 refscale.shutdown=1 \
- refscale.verbose=1
+ refscale.verbose=0
}
diff --git a/tools/testing/selftests/resctrl/.gitignore b/tools/testing/selftests/resctrl/.gitignore
new file mode 100644
index 000000000000..ab68442b6bc8
--- /dev/null
+++ b/tools/testing/selftests/resctrl/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+resctrl_tests
diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
index d585cc1948cc..6bcee2ec91a9 100644
--- a/tools/testing/selftests/resctrl/Makefile
+++ b/tools/testing/selftests/resctrl/Makefile
@@ -1,5 +1,5 @@
CC = $(CROSS_COMPILE)gcc
-CFLAGS = -g -Wall
+CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2
SRCS=$(wildcard *.c)
OBJS=$(SRCS:.c=.o)
diff --git a/tools/testing/selftests/resctrl/README b/tools/testing/selftests/resctrl/README
index 6e5a0ffa18e8..4b36b25b6ac0 100644
--- a/tools/testing/selftests/resctrl/README
+++ b/tools/testing/selftests/resctrl/README
@@ -46,8 +46,8 @@ ARGUMENTS
Parameter '-h' shows usage information.
usage: resctrl_tests [-h] [-b "benchmark_cmd [options]"] [-t test list] [-n no_of_bits]
- -b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CQM default benchmark is builtin fill_buf
- -t test list: run tests specified in the test list, e.g. -t mbm, mba, cqm, cat
+ -b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CMT default benchmark is builtin fill_buf
+ -t test list: run tests specified in the test list, e.g. -t mbm, mba, cmt, cat
-n no_of_bits: run cache tests using specified no of bits in cache bit mask
-p cpu_no: specify CPU number to run the test. 1 is default
-h: help
diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
index 38dbf4962e33..68ff856d36f0 100644
--- a/tools/testing/selftests/resctrl/cache.c
+++ b/tools/testing/selftests/resctrl/cache.c
@@ -111,7 +111,7 @@ static int get_llc_perf(unsigned long *llc_perf_miss)
/*
* Get LLC Occupancy as reported by RESCTRL FS
- * For CQM,
+ * For CMT,
* 1. If con_mon grp and mon grp given, then read from mon grp in
* con_mon grp
* 2. If only con_mon grp given, then read from con_mon grp
@@ -182,7 +182,7 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
/*
* Measure cache miss from perf.
*/
- if (!strcmp(param->resctrl_val, "cat")) {
+ if (!strncmp(param->resctrl_val, CAT_STR, sizeof(CAT_STR))) {
ret = get_llc_perf(&llc_perf_miss);
if (ret < 0)
return ret;
@@ -192,7 +192,7 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
/*
* Measure llc occupancy from resctrl.
*/
- if (!strcmp(param->resctrl_val, "cqm")) {
+ if (!strncmp(param->resctrl_val, CMT_STR, sizeof(CMT_STR))) {
ret = get_llc_occu_resctrl(&llc_occu_resc);
if (ret < 0)
return ret;
@@ -234,7 +234,7 @@ int cat_val(struct resctrl_val_param *param)
if (ret)
return ret;
- if ((strcmp(resctrl_val, "cat") == 0)) {
+ if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
ret = initialize_llc_perf();
if (ret)
return ret;
@@ -242,7 +242,7 @@ int cat_val(struct resctrl_val_param *param)
/* Test runs until the callback setup() tells the test to stop. */
while (1) {
- if (strcmp(resctrl_val, "cat") == 0) {
+ if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
ret = param->setup(1, param);
if (ret) {
ret = 0;
@@ -270,3 +270,45 @@ int cat_val(struct resctrl_val_param *param)
return ret;
}
+
+/*
+ * show_cache_info: show cache test result information
+ * @sum_llc_val: sum of LLC cache result data
+ * @no_of_bits: number of bits
+ * @cache_span: cache span in bytes for CMT or in lines for CAT
+ * @max_diff: max difference
+ * @max_diff_percent: max difference percentage
+ * @num_of_runs: number of runs
+ * @platform: show test information on this platform
+ * @cmt: CMT test or CAT test
+ *
+ * Return: 0 on success. non-zero on failure.
+ */
+int show_cache_info(unsigned long sum_llc_val, int no_of_bits,
+ unsigned long cache_span, unsigned long max_diff,
+ unsigned long max_diff_percent, unsigned long num_of_runs,
+ bool platform, bool cmt)
+{
+ unsigned long avg_llc_val = 0;
+ float diff_percent;
+ long avg_diff = 0;
+ int ret;
+
+ avg_llc_val = sum_llc_val / (num_of_runs - 1);
+ avg_diff = (long)abs(cache_span - avg_llc_val);
+ diff_percent = ((float)cache_span - avg_llc_val) / cache_span * 100;
+
+ ret = platform && abs((int)diff_percent) > max_diff_percent &&
+ (cmt ? (abs(avg_diff) > max_diff) : true);
+
+ ksft_print_msg("%s Check cache miss rate within %d%%\n",
+ ret ? "Fail:" : "Pass:", max_diff_percent);
+
+ ksft_print_msg("Percent diff=%d\n", abs((int)diff_percent));
+ ksft_print_msg("Number of bits: %d\n", no_of_bits);
+ ksft_print_msg("Average LLC val: %lu\n", avg_llc_val);
+ ksft_print_msg("Cache span (%s): %lu\n", cmt ? "bytes" : "lines",
+ cache_span);
+
+ return ret;
+}
diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
index 5da43767b973..cd4f68388e0f 100644
--- a/tools/testing/selftests/resctrl/cat_test.c
+++ b/tools/testing/selftests/resctrl/cat_test.c
@@ -17,10 +17,10 @@
#define MAX_DIFF_PERCENT 4
#define MAX_DIFF 1000000
-int count_of_bits;
-char cbm_mask[256];
-unsigned long long_mask;
-unsigned long cache_size;
+static int count_of_bits;
+static char cbm_mask[256];
+static unsigned long long_mask;
+static unsigned long cache_size;
/*
* Change schemata. Write schemata to specified
@@ -52,27 +52,6 @@ static int cat_setup(int num, ...)
return ret;
}
-static void show_cache_info(unsigned long sum_llc_perf_miss, int no_of_bits,
- unsigned long span)
-{
- unsigned long allocated_cache_lines = span / 64;
- unsigned long avg_llc_perf_miss = 0;
- float diff_percent;
-
- avg_llc_perf_miss = sum_llc_perf_miss / (NUM_OF_RUNS - 1);
- diff_percent = ((float)allocated_cache_lines - avg_llc_perf_miss) /
- allocated_cache_lines * 100;
-
- printf("%sok CAT: cache miss rate within %d%%\n",
- !is_amd && abs((int)diff_percent) > MAX_DIFF_PERCENT ?
- "not " : "", MAX_DIFF_PERCENT);
- tests_run++;
- printf("# Percent diff=%d\n", abs((int)diff_percent));
- printf("# Number of bits: %d\n", no_of_bits);
- printf("# Avg_llc_perf_miss: %lu\n", avg_llc_perf_miss);
- printf("# Allocated cache lines: %lu\n", allocated_cache_lines);
-}
-
static int check_results(struct resctrl_val_param *param)
{
char *token_array[8], temp[512];
@@ -80,7 +59,7 @@ static int check_results(struct resctrl_val_param *param)
int runs = 0, no_of_bits = 0;
FILE *fp;
- printf("# Checking for pass/fail\n");
+ ksft_print_msg("Checking for pass/fail\n");
fp = fopen(param->filename, "r");
if (!fp) {
perror("# Cannot open file");
@@ -108,9 +87,9 @@ static int check_results(struct resctrl_val_param *param)
fclose(fp);
no_of_bits = count_bits(param->mask);
- show_cache_info(sum_llc_perf_miss, no_of_bits, param->span);
-
- return 0;
+ return show_cache_info(sum_llc_perf_miss, no_of_bits, param->span / 64,
+ MAX_DIFF, MAX_DIFF_PERCENT, NUM_OF_RUNS,
+ !is_amd, false);
}
void cat_test_cleanup(void)
@@ -132,11 +111,8 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
if (ret)
return ret;
- if (!validate_resctrl_feature_request("cat"))
- return -1;
-
/* Get default cbm mask for L3/L2 cache */
- ret = get_cbm_mask(cache_type);
+ ret = get_cbm_mask(cache_type, cbm_mask);
if (ret)
return ret;
@@ -146,15 +122,18 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
ret = get_cache_size(cpu_no, cache_type, &cache_size);
if (ret)
return ret;
- printf("cache size :%lu\n", cache_size);
+ ksft_print_msg("Cache size :%lu\n", cache_size);
/* Get max number of bits from default-cabm mask */
count_of_bits = count_bits(long_mask);
- if (n < 1 || n > count_of_bits - 1) {
- printf("Invalid input value for no_of_bits n!\n");
- printf("Please Enter value in range 1 to %d\n",
- count_of_bits - 1);
+ if (!n)
+ n = count_of_bits / 2;
+
+ if (n > count_of_bits - 1) {
+ ksft_print_msg("Invalid input value for no_of_bits n!\n");
+ ksft_print_msg("Please enter value in range 1 to %d\n",
+ count_of_bits - 1);
return -1;
}
@@ -164,7 +143,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
return -1;
struct resctrl_val_param param = {
- .resctrl_val = "cat",
+ .resctrl_val = CAT_STR,
.cpu_no = cpu_no,
.mum_resctrlfs = 0,
.setup = cat_setup,
diff --git a/tools/testing/selftests/resctrl/cqm_test.c b/tools/testing/selftests/resctrl/cmt_test.c
index c8756152bd61..8968e36db99d 100644
--- a/tools/testing/selftests/resctrl/cqm_test.c
+++ b/tools/testing/selftests/resctrl/cmt_test.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Cache Monitoring Technology (CQM) test
+ * Cache Monitoring Technology (CMT) test
*
* Copyright (C) 2018 Intel Corporation
*
@@ -11,17 +11,17 @@
#include "resctrl.h"
#include <unistd.h>
-#define RESULT_FILE_NAME "result_cqm"
+#define RESULT_FILE_NAME "result_cmt"
#define NUM_OF_RUNS 5
#define MAX_DIFF 2000000
#define MAX_DIFF_PERCENT 15
-int count_of_bits;
-char cbm_mask[256];
-unsigned long long_mask;
-unsigned long cache_size;
+static int count_of_bits;
+static char cbm_mask[256];
+static unsigned long long_mask;
+static unsigned long cache_size;
-static int cqm_setup(int num, ...)
+static int cmt_setup(int num, ...)
{
struct resctrl_val_param *p;
va_list param;
@@ -39,38 +39,6 @@ static int cqm_setup(int num, ...)
return 0;
}
-static void show_cache_info(unsigned long sum_llc_occu_resc, int no_of_bits,
- unsigned long span)
-{
- unsigned long avg_llc_occu_resc = 0;
- float diff_percent;
- long avg_diff = 0;
- bool res;
-
- avg_llc_occu_resc = sum_llc_occu_resc / (NUM_OF_RUNS - 1);
- avg_diff = (long)abs(span - avg_llc_occu_resc);
-
- diff_percent = (((float)span - avg_llc_occu_resc) / span) * 100;
-
- if ((abs((int)diff_percent) <= MAX_DIFF_PERCENT) ||
- (abs(avg_diff) <= MAX_DIFF))
- res = true;
- else
- res = false;
-
- printf("%sok CQM: diff within %d, %d\%%\n", res ? "" : "not",
- MAX_DIFF, (int)MAX_DIFF_PERCENT);
-
- printf("# diff: %ld\n", avg_diff);
- printf("# percent diff=%d\n", abs((int)diff_percent));
- printf("# Results are displayed in (Bytes)\n");
- printf("# Number of bits: %d\n", no_of_bits);
- printf("# Avg_llc_occu_resc: %lu\n", avg_llc_occu_resc);
- printf("# llc_occu_exp (span): %lu\n", span);
-
- tests_run++;
-}
-
static int check_results(struct resctrl_val_param *param, int no_of_bits)
{
char *token_array[8], temp[512];
@@ -78,7 +46,7 @@ static int check_results(struct resctrl_val_param *param, int no_of_bits)
int runs = 0;
FILE *fp;
- printf("# checking for pass/fail\n");
+ ksft_print_msg("Checking for pass/fail\n");
fp = fopen(param->filename, "r");
if (!fp) {
perror("# Error in opening file\n");
@@ -86,7 +54,7 @@ static int check_results(struct resctrl_val_param *param, int no_of_bits)
return errno;
}
- while (fgets(temp, 1024, fp)) {
+ while (fgets(temp, sizeof(temp), fp)) {
char *token = strtok(temp, ":\t");
int fields = 0;
@@ -101,17 +69,18 @@ static int check_results(struct resctrl_val_param *param, int no_of_bits)
runs++;
}
fclose(fp);
- show_cache_info(sum_llc_occu_resc, no_of_bits, param->span);
- return 0;
+ return show_cache_info(sum_llc_occu_resc, no_of_bits, param->span,
+ MAX_DIFF, MAX_DIFF_PERCENT, NUM_OF_RUNS,
+ true, true);
}
-void cqm_test_cleanup(void)
+void cmt_test_cleanup(void)
{
remove(RESULT_FILE_NAME);
}
-int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
+int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
{
int ret, mum_resctrlfs;
@@ -122,10 +91,10 @@ int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
if (ret)
return ret;
- if (!validate_resctrl_feature_request("cqm"))
+ if (!validate_resctrl_feature_request(CMT_STR))
return -1;
- ret = get_cbm_mask("L3");
+ ret = get_cbm_mask("L3", cbm_mask);
if (ret)
return ret;
@@ -134,18 +103,18 @@ int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
ret = get_cache_size(cpu_no, "L3", &cache_size);
if (ret)
return ret;
- printf("cache size :%lu\n", cache_size);
+ ksft_print_msg("Cache size :%lu\n", cache_size);
count_of_bits = count_bits(long_mask);
if (n < 1 || n > count_of_bits) {
- printf("Invalid input value for numbr_of_bits n!\n");
- printf("Please Enter value in range 1 to %d\n", count_of_bits);
+ ksft_print_msg("Invalid input value for numbr_of_bits n!\n");
+ ksft_print_msg("Please enter value in range 1 to %d\n", count_of_bits);
return -1;
}
struct resctrl_val_param param = {
- .resctrl_val = "cqm",
+ .resctrl_val = CMT_STR,
.ctrlgrp = "c1",
.mongrp = "m1",
.cpu_no = cpu_no,
@@ -154,7 +123,7 @@ int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
.mask = ~(long_mask << n) & long_mask,
.span = cache_size * n / count_of_bits,
.num_of_runs = 0,
- .setup = cqm_setup,
+ .setup = cmt_setup,
};
if (strcmp(benchmark_cmd[0], "fill_buf") == 0)
@@ -170,7 +139,7 @@ int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
if (ret)
return ret;
- cqm_test_cleanup();
+ cmt_test_cleanup();
return 0;
}
diff --git a/tools/testing/selftests/resctrl/config b/tools/testing/selftests/resctrl/config
new file mode 100644
index 000000000000..8d9f2deb56ed
--- /dev/null
+++ b/tools/testing/selftests/resctrl/config
@@ -0,0 +1,2 @@
+CONFIG_X86_CPU_RESCTRL=y
+CONFIG_PROC_CPU_RESCTRL=y
diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c
index 79c611c99a3d..51e5cf22632f 100644
--- a/tools/testing/selftests/resctrl/fill_buf.c
+++ b/tools/testing/selftests/resctrl/fill_buf.c
@@ -115,7 +115,7 @@ static int fill_cache_read(unsigned char *start_ptr, unsigned char *end_ptr,
while (1) {
ret = fill_one_span_read(start_ptr, end_ptr);
- if (!strcmp(resctrl_val, "cat"))
+ if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
break;
}
@@ -134,7 +134,7 @@ static int fill_cache_write(unsigned char *start_ptr, unsigned char *end_ptr,
{
while (1) {
fill_one_span_write(start_ptr, end_ptr);
- if (!strcmp(resctrl_val, "cat"))
+ if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
break;
}
diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
index 7bf8eaa6204b..1a1bdb6180cf 100644
--- a/tools/testing/selftests/resctrl/mba_test.c
+++ b/tools/testing/selftests/resctrl/mba_test.c
@@ -12,7 +12,7 @@
#define RESULT_FILE_NAME "result_mba"
#define NUM_OF_RUNS 5
-#define MAX_DIFF 300
+#define MAX_DIFF_PERCENT 5
#define ALLOCATION_MAX 100
#define ALLOCATION_MIN 10
#define ALLOCATION_STEP 10
@@ -56,13 +56,14 @@ static void show_mba_info(unsigned long *bw_imc, unsigned long *bw_resc)
int allocation, runs;
bool failed = false;
- printf("# Results are displayed in (MB)\n");
+ ksft_print_msg("Results are displayed in (MB)\n");
/* Memory bandwidth from 100% down to 10% */
for (allocation = 0; allocation < ALLOCATION_MAX / ALLOCATION_STEP;
allocation++) {
unsigned long avg_bw_imc, avg_bw_resc;
unsigned long sum_bw_imc = 0, sum_bw_resc = 0;
- unsigned long avg_diff;
+ int avg_diff_per;
+ float avg_diff;
/*
* The first run is discarded due to inaccurate value from
@@ -76,23 +77,26 @@ static void show_mba_info(unsigned long *bw_imc, unsigned long *bw_resc)
avg_bw_imc = sum_bw_imc / (NUM_OF_RUNS - 1);
avg_bw_resc = sum_bw_resc / (NUM_OF_RUNS - 1);
- avg_diff = labs((long)(avg_bw_resc - avg_bw_imc));
-
- printf("%sok MBA schemata percentage %u smaller than %d %%\n",
- avg_diff > MAX_DIFF ? "not " : "",
- ALLOCATION_MAX - ALLOCATION_STEP * allocation,
- MAX_DIFF);
- tests_run++;
- printf("# avg_diff: %lu\n", avg_diff);
- printf("# avg_bw_imc: %lu\n", avg_bw_imc);
- printf("# avg_bw_resc: %lu\n", avg_bw_resc);
- if (avg_diff > MAX_DIFF)
+ avg_diff = (float)labs(avg_bw_resc - avg_bw_imc) / avg_bw_imc;
+ avg_diff_per = (int)(avg_diff * 100);
+
+ ksft_print_msg("%s Check MBA diff within %d%% for schemata %u\n",
+ avg_diff_per > MAX_DIFF_PERCENT ?
+ "Fail:" : "Pass:",
+ MAX_DIFF_PERCENT,
+ ALLOCATION_MAX - ALLOCATION_STEP * allocation);
+
+ ksft_print_msg("avg_diff_per: %d%%\n", avg_diff_per);
+ ksft_print_msg("avg_bw_imc: %lu\n", avg_bw_imc);
+ ksft_print_msg("avg_bw_resc: %lu\n", avg_bw_resc);
+ if (avg_diff_per > MAX_DIFF_PERCENT)
failed = true;
}
- printf("%sok schemata change using MBA%s\n", failed ? "not " : "",
- failed ? " # at least one test failed" : "");
- tests_run++;
+ ksft_print_msg("%s Check schemata change using MBA\n",
+ failed ? "Fail:" : "Pass:");
+ if (failed)
+ ksft_print_msg("At least one test failed\n");
}
static int check_results(void)
@@ -141,7 +145,7 @@ void mba_test_cleanup(void)
int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
{
struct resctrl_val_param param = {
- .resctrl_val = "mba",
+ .resctrl_val = MBA_STR,
.ctrlgrp = "c1",
.mongrp = "m1",
.cpu_no = cpu_no,
@@ -154,9 +158,6 @@ int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
remove(RESULT_FILE_NAME);
- if (!validate_resctrl_feature_request("mba"))
- return -1;
-
ret = resctrl_val(benchmark_cmd, &param);
if (ret)
return ret;
diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
index 4700f7453f81..8392e5c55ed0 100644
--- a/tools/testing/selftests/resctrl/mbm_test.c
+++ b/tools/testing/selftests/resctrl/mbm_test.c
@@ -11,16 +11,16 @@
#include "resctrl.h"
#define RESULT_FILE_NAME "result_mbm"
-#define MAX_DIFF 300
+#define MAX_DIFF_PERCENT 5
#define NUM_OF_RUNS 5
-static void
+static int
show_bw_info(unsigned long *bw_imc, unsigned long *bw_resc, int span)
{
unsigned long avg_bw_imc = 0, avg_bw_resc = 0;
unsigned long sum_bw_imc = 0, sum_bw_resc = 0;
- long avg_diff = 0;
- int runs;
+ int runs, ret, avg_diff_per;
+ float avg_diff = 0;
/*
* Discard the first value which is inaccurate due to monitoring setup
@@ -33,15 +33,18 @@ show_bw_info(unsigned long *bw_imc, unsigned long *bw_resc, int span)
avg_bw_imc = sum_bw_imc / 4;
avg_bw_resc = sum_bw_resc / 4;
- avg_diff = avg_bw_resc - avg_bw_imc;
-
- printf("%sok MBM: diff within %d%%\n",
- labs(avg_diff) > MAX_DIFF ? "not " : "", MAX_DIFF);
- tests_run++;
- printf("# avg_diff: %lu\n", labs(avg_diff));
- printf("# Span (MB): %d\n", span);
- printf("# avg_bw_imc: %lu\n", avg_bw_imc);
- printf("# avg_bw_resc: %lu\n", avg_bw_resc);
+ avg_diff = (float)labs(avg_bw_resc - avg_bw_imc) / avg_bw_imc;
+ avg_diff_per = (int)(avg_diff * 100);
+
+ ret = avg_diff_per > MAX_DIFF_PERCENT;
+ ksft_print_msg("%s Check MBM diff within %d%%\n",
+ ret ? "Fail:" : "Pass:", MAX_DIFF_PERCENT);
+ ksft_print_msg("avg_diff_per: %d%%\n", avg_diff_per);
+ ksft_print_msg("Span (MB): %d\n", span);
+ ksft_print_msg("avg_bw_imc: %lu\n", avg_bw_imc);
+ ksft_print_msg("avg_bw_resc: %lu\n", avg_bw_resc);
+
+ return ret;
}
static int check_results(int span)
@@ -49,10 +52,10 @@ static int check_results(int span)
unsigned long bw_imc[NUM_OF_RUNS], bw_resc[NUM_OF_RUNS];
char temp[1024], *token_array[8];
char output[] = RESULT_FILE_NAME;
- int runs;
+ int runs, ret;
FILE *fp;
- printf("# Checking for pass/fail\n");
+ ksft_print_msg("Checking for pass/fail\n");
fp = fopen(output, "r");
if (!fp) {
@@ -76,11 +79,11 @@ static int check_results(int span)
runs++;
}
- show_bw_info(bw_imc, bw_resc, span);
+ ret = show_bw_info(bw_imc, bw_resc, span);
fclose(fp);
- return 0;
+ return ret;
}
static int mbm_setup(int num, ...)
@@ -114,7 +117,7 @@ void mbm_test_cleanup(void)
int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd)
{
struct resctrl_val_param param = {
- .resctrl_val = "mbm",
+ .resctrl_val = MBM_STR,
.ctrlgrp = "c1",
.mongrp = "m1",
.span = span,
@@ -128,9 +131,6 @@ int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd)
remove(RESULT_FILE_NAME);
- if (!validate_resctrl_feature_request("mbm"))
- return -1;
-
ret = resctrl_val(benchmark_cmd, &param);
if (ret)
return ret;
diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
index 39bf59c6b9c5..1ad10c47e31d 100644
--- a/tools/testing/selftests/resctrl/resctrl.h
+++ b/tools/testing/selftests/resctrl/resctrl.h
@@ -23,11 +23,16 @@
#include <sys/eventfd.h>
#include <asm/unistd.h>
#include <linux/perf_event.h>
+#include "../kselftest.h"
#define MB (1024 * 1024)
#define RESCTRL_PATH "/sys/fs/resctrl"
#define PHYS_ID_PATH "/sys/devices/system/cpu/cpu"
#define CBM_MASK_PATH "/sys/fs/resctrl/info"
+#define L3_PATH "/sys/fs/resctrl/info/L3"
+#define MB_PATH "/sys/fs/resctrl/info/MB"
+#define L3_MON_PATH "/sys/fs/resctrl/info/L3_MON"
+#define L3_MON_FEATURES_PATH "/sys/fs/resctrl/info/L3_MON/mon_features"
#define PARENT_EXIT(err_msg) \
do { \
@@ -62,11 +67,15 @@ struct resctrl_val_param {
int (*setup)(int num, ...);
};
-pid_t bm_pid, ppid;
-int tests_run;
+#define MBM_STR "mbm"
+#define MBA_STR "mba"
+#define CMT_STR "cmt"
+#define CAT_STR "cat"
-char llc_occup_path[1024];
-bool is_amd;
+extern pid_t bm_pid, ppid;
+
+extern char llc_occup_path[1024];
+extern bool is_amd;
bool check_resctrlfs_support(void);
int filter_dmesg(void);
@@ -74,7 +83,7 @@ int remount_resctrlfs(bool mum_resctrlfs);
int get_resource_id(int cpu_no, int *resource_id);
int umount_resctrlfs(void);
int validate_bw_report_request(char *bw_report);
-bool validate_resctrl_feature_request(char *resctrl_val);
+bool validate_resctrl_feature_request(const char *resctrl_val);
char *fgrep(FILE *inf, const char *str);
int taskset_benchmark(pid_t bm_pid, int cpu_no);
void run_benchmark(int signum, siginfo_t *info, void *ucontext);
@@ -92,16 +101,20 @@ void tests_cleanup(void);
void mbm_test_cleanup(void);
int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd);
void mba_test_cleanup(void);
-int get_cbm_mask(char *cache_type);
+int get_cbm_mask(char *cache_type, char *cbm_mask);
int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size);
void ctrlc_handler(int signum, siginfo_t *info, void *ptr);
int cat_val(struct resctrl_val_param *param);
void cat_test_cleanup(void);
int cat_perf_miss_val(int cpu_no, int no_of_bits, char *cache_type);
-int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd);
+int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd);
unsigned int count_bits(unsigned long n);
-void cqm_test_cleanup(void);
+void cmt_test_cleanup(void);
int get_core_sibling(int cpu_no);
int measure_cache_vals(struct resctrl_val_param *param, int bm_pid);
+int show_cache_info(unsigned long sum_llc_val, int no_of_bits,
+ unsigned long cache_span, unsigned long max_diff,
+ unsigned long max_diff_percent, unsigned long num_of_runs,
+ bool platform, bool cmt);
#endif /* RESCTRL_H */
diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
index 425cc85ac883..f51b5fc066a3 100644
--- a/tools/testing/selftests/resctrl/resctrl_tests.c
+++ b/tools/testing/selftests/resctrl/resctrl_tests.c
@@ -37,10 +37,10 @@ void detect_amd(void)
static void cmd_help(void)
{
printf("usage: resctrl_tests [-h] [-b \"benchmark_cmd [options]\"] [-t test list] [-n no_of_bits]\n");
- printf("\t-b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CQM");
- printf("\t default benchmark is builtin fill_buf\n");
+ printf("\t-b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CMT\n");
+ printf("\t default benchmark is builtin fill_buf\n");
printf("\t-t test list: run tests specified in the test list, ");
- printf("e.g. -t mbm, mba, cqm, cat\n");
+ printf("e.g. -t mbm, mba, cmt, cat\n");
printf("\t-n no_of_bits: run cache tests using specified no of bits in cache bit mask\n");
printf("\t-p cpu_no: specify CPU number to run the test. 1 is default\n");
printf("\t-h: help\n");
@@ -50,17 +50,88 @@ void tests_cleanup(void)
{
mbm_test_cleanup();
mba_test_cleanup();
- cqm_test_cleanup();
+ cmt_test_cleanup();
+ cat_test_cleanup();
+}
+
+static void run_mbm_test(bool has_ben, char **benchmark_cmd, int span,
+ int cpu_no, char *bw_report)
+{
+ int res;
+
+ ksft_print_msg("Starting MBM BW change ...\n");
+
+ if (!validate_resctrl_feature_request(MBM_STR)) {
+ ksft_test_result_skip("Hardware does not support MBM or MBM is disabled\n");
+ return;
+ }
+
+ if (!has_ben)
+ sprintf(benchmark_cmd[5], "%s", MBA_STR);
+ res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
+ ksft_test_result(!res, "MBM: bw change\n");
+ mbm_test_cleanup();
+}
+
+static void run_mba_test(bool has_ben, char **benchmark_cmd, int span,
+ int cpu_no, char *bw_report)
+{
+ int res;
+
+ ksft_print_msg("Starting MBA Schemata change ...\n");
+
+ if (!validate_resctrl_feature_request(MBA_STR)) {
+ ksft_test_result_skip("Hardware does not support MBA or MBA is disabled\n");
+ return;
+ }
+
+ if (!has_ben)
+ sprintf(benchmark_cmd[1], "%d", span);
+ res = mba_schemata_change(cpu_no, bw_report, benchmark_cmd);
+ ksft_test_result(!res, "MBA: schemata change\n");
+ mba_test_cleanup();
+}
+
+static void run_cmt_test(bool has_ben, char **benchmark_cmd, int cpu_no)
+{
+ int res;
+
+ ksft_print_msg("Starting CMT test ...\n");
+ if (!validate_resctrl_feature_request(CMT_STR)) {
+ ksft_test_result_skip("Hardware does not support CMT or CMT is disabled\n");
+ return;
+ }
+
+ if (!has_ben)
+ sprintf(benchmark_cmd[5], "%s", CMT_STR);
+ res = cmt_resctrl_val(cpu_no, 5, benchmark_cmd);
+ ksft_test_result(!res, "CMT: test\n");
+ cmt_test_cleanup();
+}
+
+static void run_cat_test(int cpu_no, int no_of_bits)
+{
+ int res;
+
+ ksft_print_msg("Starting CAT test ...\n");
+
+ if (!validate_resctrl_feature_request(CAT_STR)) {
+ ksft_test_result_skip("Hardware does not support CAT or CAT is disabled\n");
+ return;
+ }
+
+ res = cat_perf_miss_val(cpu_no, no_of_bits, "L3");
+ ksft_test_result(!res, "CAT: test\n");
cat_test_cleanup();
}
int main(int argc, char **argv)
{
- bool has_ben = false, mbm_test = true, mba_test = true, cqm_test = true;
- int res, c, cpu_no = 1, span = 250, argc_new = argc, i, no_of_bits = 5;
+ bool has_ben = false, mbm_test = true, mba_test = true, cmt_test = true;
+ int c, cpu_no = 1, span = 250, argc_new = argc, i, no_of_bits = 0;
char *benchmark_cmd[BENCHMARK_ARGS], bw_report[64], bm_type[64];
char benchmark_cmd_area[BENCHMARK_ARGS][BENCHMARK_ARG_SIZE];
- int ben_ind, ben_count;
+ int ben_ind, ben_count, tests = 0;
bool cat_test = true;
for (i = 0; i < argc; i++) {
@@ -73,7 +144,7 @@ int main(int argc, char **argv)
}
}
- while ((c = getopt(argc_new, argv, "ht:b:")) != -1) {
+ while ((c = getopt(argc_new, argv, "ht:b:n:p:")) != -1) {
char *token;
switch (c) {
@@ -82,17 +153,21 @@ int main(int argc, char **argv)
mbm_test = false;
mba_test = false;
- cqm_test = false;
+ cmt_test = false;
cat_test = false;
while (token) {
- if (!strcmp(token, "mbm")) {
+ if (!strncmp(token, MBM_STR, sizeof(MBM_STR))) {
mbm_test = true;
- } else if (!strcmp(token, "mba")) {
+ tests++;
+ } else if (!strncmp(token, MBA_STR, sizeof(MBA_STR))) {
mba_test = true;
- } else if (!strcmp(token, "cqm")) {
- cqm_test = true;
- } else if (!strcmp(token, "cat")) {
+ tests++;
+ } else if (!strncmp(token, CMT_STR, sizeof(CMT_STR))) {
+ cmt_test = true;
+ tests++;
+ } else if (!strncmp(token, CAT_STR, sizeof(CAT_STR))) {
cat_test = true;
+ tests++;
} else {
printf("invalid argument\n");
@@ -106,6 +181,10 @@ int main(int argc, char **argv)
break;
case 'n':
no_of_bits = atoi(optarg);
+ if (no_of_bits <= 0) {
+ printf("Bail out! invalid argument for no_of_bits\n");
+ return -1;
+ }
break;
case 'h':
cmd_help();
@@ -118,7 +197,7 @@ int main(int argc, char **argv)
}
}
- printf("TAP version 13\n");
+ ksft_print_header();
/*
* Typically we need root privileges, because:
@@ -126,7 +205,7 @@ int main(int argc, char **argv)
* 2. We execute perf commands
*/
if (geteuid() != 0)
- printf("# WARNING: not running as root, tests may fail.\n");
+ return ksft_exit_fail_msg("Not running as root, abort testing.\n");
/* Detect AMD vendor */
detect_amd();
@@ -155,48 +234,26 @@ int main(int argc, char **argv)
sprintf(bw_report, "reads");
sprintf(bm_type, "fill_buf");
- check_resctrlfs_support();
+ if (!check_resctrlfs_support())
+ return ksft_exit_fail_msg("resctrl FS does not exist\n");
+
filter_dmesg();
- if (!is_amd && mbm_test) {
- printf("# Starting MBM BW change ...\n");
- if (!has_ben)
- sprintf(benchmark_cmd[5], "%s", "mba");
- res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
- printf("%sok MBM: bw change\n", res ? "not " : "");
- mbm_test_cleanup();
- tests_run++;
- }
+ ksft_set_plan(tests ? : 4);
- if (!is_amd && mba_test) {
- printf("# Starting MBA Schemata change ...\n");
- if (!has_ben)
- sprintf(benchmark_cmd[1], "%d", span);
- res = mba_schemata_change(cpu_no, bw_report, benchmark_cmd);
- printf("%sok MBA: schemata change\n", res ? "not " : "");
- mba_test_cleanup();
- tests_run++;
- }
+ if (!is_amd && mbm_test)
+ run_mbm_test(has_ben, benchmark_cmd, span, cpu_no, bw_report);
- if (cqm_test) {
- printf("# Starting CQM test ...\n");
- if (!has_ben)
- sprintf(benchmark_cmd[5], "%s", "cqm");
- res = cqm_resctrl_val(cpu_no, no_of_bits, benchmark_cmd);
- printf("%sok CQM: test\n", res ? "not " : "");
- cqm_test_cleanup();
- tests_run++;
- }
+ if (!is_amd && mba_test)
+ run_mba_test(has_ben, benchmark_cmd, span, cpu_no, bw_report);
- if (cat_test) {
- printf("# Starting CAT test ...\n");
- res = cat_perf_miss_val(cpu_no, no_of_bits, "L3");
- printf("%sok CAT: test\n", res ? "not " : "");
- tests_run++;
- cat_test_cleanup();
- }
+ if (cmt_test)
+ run_cmt_test(has_ben, benchmark_cmd, cpu_no);
+
+ if (cat_test)
+ run_cat_test(cpu_no, no_of_bits);
- printf("1..%d\n", tests_run);
+ umount_resctrlfs();
- return 0;
+ return ksft_exit_pass();
}
diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
index 520fea3606d1..95224345c78e 100644
--- a/tools/testing/selftests/resctrl/resctrl_val.c
+++ b/tools/testing/selftests/resctrl/resctrl_val.c
@@ -221,8 +221,8 @@ static int read_from_imc_dir(char *imc_dir, int count)
*/
static int num_of_imcs(void)
{
+ char imc_dir[512], *temp;
unsigned int count = 0;
- char imc_dir[512];
struct dirent *ep;
int ret;
DIR *dp;
@@ -230,7 +230,25 @@ static int num_of_imcs(void)
dp = opendir(DYN_PMU_PATH);
if (dp) {
while ((ep = readdir(dp))) {
- if (strstr(ep->d_name, UNCORE_IMC)) {
+ temp = strstr(ep->d_name, UNCORE_IMC);
+ if (!temp)
+ continue;
+
+ /*
+ * imc counters are named as "uncore_imc_<n>", hence
+ * increment the pointer to point to <n>. Note that
+ * sizeof(UNCORE_IMC) would count for null character as
+ * well and hence the last underscore character in
+ * uncore_imc'_' need not be counted.
+ */
+ temp = temp + sizeof(UNCORE_IMC);
+
+ /*
+ * Some directories under "DYN_PMU_PATH" could have
+ * names like "uncore_imc_free_running", hence, check if
+ * first character is a numerical digit or not.
+ */
+ if (temp[0] >= '0' && temp[0] <= '9') {
sprintf(imc_dir, "%s/%s/", DYN_PMU_PATH,
ep->d_name);
ret = read_from_imc_dir(imc_dir, count);
@@ -282,9 +300,9 @@ static int initialize_mem_bw_imc(void)
* Memory B/W utilized by a process on a socket can be calculated using
* iMC counters. Perf events are used to read these counters.
*
- * Return: >= 0 on success. < 0 on failure.
+ * Return: = 0 on success. < 0 on failure.
*/
-static float get_mem_bw_imc(int cpu_no, char *bw_report)
+static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
{
float reads, writes, of_mul_read, of_mul_write;
int imc, j, ret;
@@ -355,13 +373,18 @@ static float get_mem_bw_imc(int cpu_no, char *bw_report)
close(imc_counters_config[imc][WRITE].fd);
}
- if (strcmp(bw_report, "reads") == 0)
- return reads;
+ if (strcmp(bw_report, "reads") == 0) {
+ *bw_imc = reads;
+ return 0;
+ }
- if (strcmp(bw_report, "writes") == 0)
- return writes;
+ if (strcmp(bw_report, "writes") == 0) {
+ *bw_imc = writes;
+ return 0;
+ }
- return (reads + writes);
+ *bw_imc = reads + writes;
+ return 0;
}
void set_mbm_path(const char *ctrlgrp, const char *mongrp, int resource_id)
@@ -397,10 +420,10 @@ static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
return;
}
- if (strcmp(resctrl_val, "mbm") == 0)
+ if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)))
set_mbm_path(ctrlgrp, mongrp, resource_id);
- if ((strcmp(resctrl_val, "mba") == 0)) {
+ if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
if (ctrlgrp)
sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH,
RESCTRL_PATH, ctrlgrp, resource_id);
@@ -420,9 +443,8 @@ static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
* 1. If con_mon grp is given, then read from it
* 2. If con_mon grp is not given, then read from root con_mon grp
*/
-static unsigned long get_mem_bw_resctrl(void)
+static int get_mem_bw_resctrl(unsigned long *mbm_total)
{
- unsigned long mbm_total = 0;
FILE *fp;
fp = fopen(mbm_total_path, "r");
@@ -431,7 +453,7 @@ static unsigned long get_mem_bw_resctrl(void)
return -1;
}
- if (fscanf(fp, "%lu", &mbm_total) <= 0) {
+ if (fscanf(fp, "%lu", mbm_total) <= 0) {
perror("Could not get mbm local bytes");
fclose(fp);
@@ -439,7 +461,7 @@ static unsigned long get_mem_bw_resctrl(void)
}
fclose(fp);
- return mbm_total;
+ return 0;
}
pid_t bm_pid, ppid;
@@ -449,7 +471,7 @@ void ctrlc_handler(int signum, siginfo_t *info, void *ptr)
kill(bm_pid, SIGKILL);
umount_resctrlfs();
tests_cleanup();
- printf("Ending\n\n");
+ ksft_print_msg("Ending\n\n");
exit(EXIT_SUCCESS);
}
@@ -492,7 +514,7 @@ static int print_results_bw(char *filename, int bm_pid, float bw_imc,
return 0;
}
-static void set_cqm_path(const char *ctrlgrp, const char *mongrp, char sock_num)
+static void set_cmt_path(const char *ctrlgrp, const char *mongrp, char sock_num)
{
if (strlen(ctrlgrp) && strlen(mongrp))
sprintf(llc_occup_path, CON_MON_LCC_OCCUP_PATH, RESCTRL_PATH,
@@ -512,7 +534,7 @@ static void set_cqm_path(const char *ctrlgrp, const char *mongrp, char sock_num)
* @ctrlgrp: Name of the control monitor group (con_mon grp)
* @mongrp: Name of the monitor group (mon grp)
* @cpu_no: CPU number that the benchmark PID is binded to
- * @resctrl_val: Resctrl feature (Eg: cat, cqm.. etc)
+ * @resctrl_val: Resctrl feature (Eg: cat, cmt.. etc)
*/
static void initialize_llc_occu_resctrl(const char *ctrlgrp, const char *mongrp,
int cpu_no, char *resctrl_val)
@@ -524,14 +546,15 @@ static void initialize_llc_occu_resctrl(const char *ctrlgrp, const char *mongrp,
return;
}
- if (strcmp(resctrl_val, "cqm") == 0)
- set_cqm_path(ctrlgrp, mongrp, resource_id);
+ if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
+ set_cmt_path(ctrlgrp, mongrp, resource_id);
}
static int
measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
{
- unsigned long bw_imc, bw_resc, bw_resc_end;
+ unsigned long bw_resc, bw_resc_end;
+ float bw_imc;
int ret;
/*
@@ -541,13 +564,13 @@ measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
* Compare the two values to validate resctrl value.
* It takes 1sec to measure the data.
*/
- bw_imc = get_mem_bw_imc(param->cpu_no, param->bw_report);
- if (bw_imc <= 0)
- return bw_imc;
+ ret = get_mem_bw_imc(param->cpu_no, param->bw_report, &bw_imc);
+ if (ret < 0)
+ return ret;
- bw_resc_end = get_mem_bw_resctrl();
- if (bw_resc_end <= 0)
- return bw_resc_end;
+ ret = get_mem_bw_resctrl(&bw_resc_end);
+ if (ret < 0)
+ return ret;
bw_resc = (bw_resc_end - *bw_resc_start) / MB;
ret = print_results_bw(param->filename, bm_pid, bw_imc, bw_resc);
@@ -579,8 +602,8 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
if (strcmp(param->filename, "") == 0)
sprintf(param->filename, "stdio");
- if ((strcmp(resctrl_val, "mba")) == 0 ||
- (strcmp(resctrl_val, "mbm")) == 0) {
+ if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) ||
+ !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
ret = validate_bw_report_request(param->bw_report);
if (ret)
return ret;
@@ -645,7 +668,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
PARENT_EXIT("Child is done");
}
- printf("# benchmark PID: %d\n", bm_pid);
+ ksft_print_msg("Benchmark PID: %d\n", bm_pid);
/*
* Register CTRL-C handler for parent, as it has to kill benchmark
@@ -674,15 +697,15 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
if (ret)
goto out;
- if ((strcmp(resctrl_val, "mbm") == 0) ||
- (strcmp(resctrl_val, "mba") == 0)) {
+ if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
+ !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
ret = initialize_mem_bw_imc();
if (ret)
goto out;
initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp,
param->cpu_no, resctrl_val);
- } else if (strcmp(resctrl_val, "cqm") == 0)
+ } else if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
initialize_llc_occu_resctrl(param->ctrlgrp, param->mongrp,
param->cpu_no, resctrl_val);
@@ -710,8 +733,8 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
/* Test runs until the callback setup() tells the test to stop. */
while (1) {
- if ((strcmp(resctrl_val, "mbm") == 0) ||
- (strcmp(resctrl_val, "mba") == 0)) {
+ if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
+ !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
ret = param->setup(1, param);
if (ret) {
ret = 0;
@@ -721,7 +744,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
ret = measure_vals(param, &bw_resc_start);
if (ret)
break;
- } else if (strcmp(resctrl_val, "cqm") == 0) {
+ } else if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
ret = param->setup(1, param);
if (ret) {
ret = 0;
diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c
index 19c0ec4045a4..5f5a166ade60 100644
--- a/tools/testing/selftests/resctrl/resctrlfs.c
+++ b/tools/testing/selftests/resctrl/resctrlfs.c
@@ -10,8 +10,6 @@
*/
#include "resctrl.h"
-int tests_run;
-
static int find_resctrl_mount(char *buffer)
{
FILE *mounts;
@@ -49,8 +47,6 @@ static int find_resctrl_mount(char *buffer)
return -ENOENT;
}
-char cbm_mask[256];
-
/*
* remount_resctrlfs - Remount resctrl FS at /sys/fs/resctrl
* @mum_resctrlfs: Should the resctrl FS be remounted?
@@ -70,28 +66,25 @@ int remount_resctrlfs(bool mum_resctrlfs)
if (ret)
strcpy(mountpoint, RESCTRL_PATH);
- if (!ret && mum_resctrlfs && umount(mountpoint)) {
- printf("not ok unmounting \"%s\"\n", mountpoint);
- perror("# umount");
- tests_run++;
- }
+ if (!ret && mum_resctrlfs && umount(mountpoint))
+ ksft_print_msg("Fail: unmounting \"%s\"\n", mountpoint);
if (!ret && !mum_resctrlfs)
return 0;
+ ksft_print_msg("Mounting resctrl to \"%s\"\n", RESCTRL_PATH);
ret = mount("resctrl", RESCTRL_PATH, "resctrl", 0, NULL);
- printf("%sok mounting resctrl to \"%s\"\n", ret ? "not " : "",
- RESCTRL_PATH);
if (ret)
perror("# mount");
- tests_run++;
-
return ret;
}
int umount_resctrlfs(void)
{
+ if (find_resctrl_mount(NULL))
+ return 0;
+
if (umount(RESCTRL_PATH)) {
perror("# Unable to umount resctrl");
@@ -205,16 +198,18 @@ int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size)
/*
* get_cbm_mask - Get cbm mask for given cache
* @cache_type: Cache level L2/L3
- *
- * Mask is stored in cbm_mask which is global variable.
+ * @cbm_mask: cbm_mask returned as a string
*
* Return: = 0 on success, < 0 on failure.
*/
-int get_cbm_mask(char *cache_type)
+int get_cbm_mask(char *cache_type, char *cbm_mask)
{
char cbm_mask_path[1024];
FILE *fp;
+ if (!cbm_mask)
+ return -1;
+
sprintf(cbm_mask_path, "%s/%s/cbm_mask", CBM_MASK_PATH, cache_type);
fp = fopen(cbm_mask_path, "r");
@@ -268,7 +263,7 @@ int get_core_sibling(int cpu_no)
while (token) {
sibling_cpu_no = atoi(token);
/* Skipping core 0 as we don't want to run test on core 0 */
- if (sibling_cpu_no != 0)
+ if (sibling_cpu_no != 0 && sibling_cpu_no != cpu_no)
break;
token = strtok(NULL, "-,");
}
@@ -334,7 +329,7 @@ void run_benchmark(int signum, siginfo_t *info, void *ucontext)
operation = atoi(benchmark_cmd[4]);
sprintf(resctrl_val, "%s", benchmark_cmd[5]);
- if (strcmp(resctrl_val, "cqm") != 0)
+ if (strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
buffer_span = span * MB;
else
buffer_span = span;
@@ -458,9 +453,9 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
if (ret)
goto out;
- /* Create mon grp and write pid into it for "mbm" and "cqm" test */
- if ((strcmp(resctrl_val, "cqm") == 0) ||
- (strcmp(resctrl_val, "mbm") == 0)) {
+ /* Create mon grp and write pid into it for "mbm" and "cmt" test */
+ if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)) ||
+ !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
if (strlen(mongrp)) {
sprintf(monitorgroup_p, "%s/mon_groups", controlgroup);
sprintf(monitorgroup, "%s/%s", monitorgroup_p, mongrp);
@@ -477,13 +472,10 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
}
out:
- printf("%sok writing benchmark parameters to resctrl FS\n",
- ret ? "not " : "");
+ ksft_print_msg("Writing benchmark parameters to resctrl FS\n");
if (ret)
perror("# writing to resctrlfs");
- tests_run++;
-
return ret;
}
@@ -505,13 +497,13 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
int resource_id, ret = 0;
FILE *fp;
- if ((strcmp(resctrl_val, "mba") != 0) &&
- (strcmp(resctrl_val, "cat") != 0) &&
- (strcmp(resctrl_val, "cqm") != 0))
+ if (strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) &&
+ strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) &&
+ strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
return -ENOENT;
if (!schemata) {
- printf("# Skipping empty schemata update\n");
+ ksft_print_msg("Skipping empty schemata update\n");
return -1;
}
@@ -528,9 +520,10 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
else
sprintf(controlgroup, "%s/schemata", RESCTRL_PATH);
- if (!strcmp(resctrl_val, "cat") || !strcmp(resctrl_val, "cqm"))
+ if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) ||
+ !strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
sprintf(schema, "%s%d%c%s", "L3:", resource_id, '=', schemata);
- if (strcmp(resctrl_val, "mba") == 0)
+ if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)))
sprintf(schema, "%s%d%c%s", "MB:", resource_id, '=', schemata);
fp = fopen(controlgroup, "w");
@@ -551,10 +544,9 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
fclose(fp);
out:
- printf("%sok Write schema \"%s\" to resctrl FS%s%s\n",
- ret ? "not " : "", schema, ret ? " # " : "",
- ret ? reason : "");
- tests_run++;
+ ksft_print_msg("Write schema \"%s\" to resctrl FS%s%s\n",
+ schema, ret ? " # " : "",
+ ret ? reason : "");
return ret;
}
@@ -578,18 +570,20 @@ bool check_resctrlfs_support(void)
fclose(inf);
- printf("%sok kernel supports resctrl filesystem\n", ret ? "" : "not ");
- tests_run++;
+ ksft_print_msg("%s Check kernel supports resctrl filesystem\n",
+ ret ? "Pass:" : "Fail:");
+
+ if (!ret)
+ return ret;
dp = opendir(RESCTRL_PATH);
- printf("%sok resctrl mountpoint \"%s\" exists\n",
- dp ? "" : "not ", RESCTRL_PATH);
+ ksft_print_msg("%s Check resctrl mountpoint \"%s\" exists\n",
+ dp ? "Pass:" : "Fail:", RESCTRL_PATH);
if (dp)
closedir(dp);
- tests_run++;
- printf("# resctrl filesystem %s mounted\n",
- find_resctrl_mount(NULL) ? "not" : "is");
+ ksft_print_msg("resctrl filesystem %s mounted\n",
+ find_resctrl_mount(NULL) ? "not" : "is");
return ret;
}
@@ -615,26 +609,56 @@ char *fgrep(FILE *inf, const char *str)
* validate_resctrl_feature_request - Check if requested feature is valid.
* @resctrl_val: Requested feature
*
- * Return: 0 on success, non-zero on failure
+ * Return: True if the feature is supported, else false
*/
-bool validate_resctrl_feature_request(char *resctrl_val)
+bool validate_resctrl_feature_request(const char *resctrl_val)
{
- FILE *inf = fopen("/proc/cpuinfo", "r");
+ struct stat statbuf;
bool found = false;
char *res;
+ FILE *inf;
- if (!inf)
+ if (!resctrl_val)
return false;
- res = fgrep(inf, "flags");
-
- if (res) {
- char *s = strchr(res, ':');
+ if (remount_resctrlfs(false))
+ return false;
- found = s && !strstr(s, resctrl_val);
- free(res);
+ if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
+ if (!stat(L3_PATH, &statbuf))
+ return true;
+ } else if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
+ if (!stat(MB_PATH, &statbuf))
+ return true;
+ } else if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
+ !strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
+ if (!stat(L3_MON_PATH, &statbuf)) {
+ inf = fopen(L3_MON_FEATURES_PATH, "r");
+ if (!inf)
+ return false;
+
+ if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
+ res = fgrep(inf, "llc_occupancy");
+ if (res) {
+ found = true;
+ free(res);
+ }
+ }
+
+ if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
+ res = fgrep(inf, "mbm_total_bytes");
+ if (res) {
+ free(res);
+ res = fgrep(inf, "mbm_local_bytes");
+ if (res) {
+ found = true;
+ free(res);
+ }
+ }
+ }
+ fclose(inf);
+ }
}
- fclose(inf);
return found;
}
@@ -671,9 +695,9 @@ int filter_dmesg(void)
while (fgets(line, 1024, fp)) {
if (strstr(line, "intel_rdt:"))
- printf("# dmesg: %s", line);
+ ksft_print_msg("dmesg: %s", line);
if (strstr(line, "resctrl:"))
- printf("# dmesg: %s", line);
+ ksft_print_msg("dmesg: %s", line);
}
fclose(fp);
waitpid(pid, NULL, 0);
diff --git a/tools/testing/selftests/sgx/defines.h b/tools/testing/selftests/sgx/defines.h
index 592c1ccf4576..0bd73428d2f3 100644
--- a/tools/testing/selftests/sgx/defines.h
+++ b/tools/testing/selftests/sgx/defines.h
@@ -14,7 +14,7 @@
#define __aligned(x) __attribute__((__aligned__(x)))
#define __packed __attribute__((packed))
-#include "../../../../arch/x86/kernel/cpu/sgx/arch.h"
+#include "../../../../arch/x86/include/asm/sgx.h"
#include "../../../../arch/x86/include/asm/enclu.h"
#include "../../../../arch/x86/include/uapi/asm/sgx.h"
diff --git a/tools/testing/selftests/sgx/load.c b/tools/testing/selftests/sgx/load.c
index 9d43b75aaa55..f441ac34b4d4 100644
--- a/tools/testing/selftests/sgx/load.c
+++ b/tools/testing/selftests/sgx/load.c
@@ -45,19 +45,19 @@ static bool encl_map_bin(const char *path, struct encl *encl)
fd = open(path, O_RDONLY);
if (fd == -1) {
- perror("open()");
+ perror("enclave executable open()");
return false;
}
ret = stat(path, &sb);
if (ret) {
- perror("stat()");
+ perror("enclave executable stat()");
goto err;
}
bin = mmap(NULL, sb.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (bin == MAP_FAILED) {
- perror("mmap()");
+ perror("enclave executable mmap()");
goto err;
}
@@ -90,8 +90,7 @@ static bool encl_ioc_create(struct encl *encl)
ioc.src = (unsigned long)secs;
rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_CREATE, &ioc);
if (rc) {
- fprintf(stderr, "SGX_IOC_ENCLAVE_CREATE failed: errno=%d\n",
- errno);
+ perror("SGX_IOC_ENCLAVE_CREATE failed");
munmap((void *)secs->base, encl->encl_size);
return false;
}
@@ -116,31 +115,72 @@ static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg)
rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_ADD_PAGES, &ioc);
if (rc < 0) {
- fprintf(stderr, "SGX_IOC_ENCLAVE_ADD_PAGES failed: errno=%d.\n",
- errno);
+ perror("SGX_IOC_ENCLAVE_ADD_PAGES failed");
return false;
}
return true;
}
+
+
bool encl_load(const char *path, struct encl *encl)
{
+ const char device_path[] = "/dev/sgx_enclave";
Elf64_Phdr *phdr_tbl;
off_t src_offset;
Elf64_Ehdr *ehdr;
+ struct stat sb;
+ void *ptr;
int i, j;
int ret;
+ int fd = -1;
memset(encl, 0, sizeof(*encl));
- ret = open("/dev/sgx_enclave", O_RDWR);
- if (ret < 0) {
- fprintf(stderr, "Unable to open /dev/sgx_enclave\n");
+ fd = open(device_path, O_RDWR);
+ if (fd < 0) {
+ perror("Unable to open /dev/sgx_enclave");
+ goto err;
+ }
+
+ ret = stat(device_path, &sb);
+ if (ret) {
+ perror("device file stat()");
+ goto err;
+ }
+
+ /*
+ * This just checks if the /dev file has these permission
+ * bits set. It does not check that the current user is
+ * the owner or in the owning group.
+ */
+ if (!(sb.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
+ fprintf(stderr, "no execute permissions on device file %s\n", device_path);
+ goto err;
+ }
+
+ ptr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, fd, 0);
+ if (ptr == (void *)-1) {
+ perror("mmap for read");
+ goto err;
+ }
+ munmap(ptr, PAGE_SIZE);
+
+#define ERR_MSG \
+"mmap() succeeded for PROT_READ, but failed for PROT_EXEC.\n" \
+" Check that current user has execute permissions on %s and \n" \
+" that /dev does not have noexec set: mount | grep \"/dev .*noexec\"\n" \
+" If so, remount it executable: mount -o remount,exec /dev\n\n"
+
+ ptr = mmap(NULL, PAGE_SIZE, PROT_EXEC, MAP_SHARED, fd, 0);
+ if (ptr == (void *)-1) {
+ fprintf(stderr, ERR_MSG, device_path);
goto err;
}
+ munmap(ptr, PAGE_SIZE);
- encl->fd = ret;
+ encl->fd = fd;
if (!encl_map_bin(path, encl))
goto err;
@@ -217,6 +257,8 @@ bool encl_load(const char *path, struct encl *encl)
return true;
err:
+ if (fd != -1)
+ close(fd);
encl_delete(encl);
return false;
}
@@ -229,7 +271,7 @@ static bool encl_map_area(struct encl *encl)
area = mmap(NULL, encl_size * 2, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (area == MAP_FAILED) {
- perror("mmap");
+ perror("reservation mmap()");
return false;
}
@@ -268,8 +310,7 @@ bool encl_build(struct encl *encl)
ioc.sigstruct = (uint64_t)&encl->sigstruct;
ret = ioctl(encl->fd, SGX_IOC_ENCLAVE_INIT, &ioc);
if (ret) {
- fprintf(stderr, "SGX_IOC_ENCLAVE_INIT failed: errno=%d\n",
- errno);
+ perror("SGX_IOC_ENCLAVE_INIT failed");
return false;
}
diff --git a/tools/testing/selftests/sgx/main.c b/tools/testing/selftests/sgx/main.c
index 724cec700926..d304a4044eb9 100644
--- a/tools/testing/selftests/sgx/main.c
+++ b/tools/testing/selftests/sgx/main.c
@@ -15,6 +15,7 @@
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
+#include <sys/auxv.h>
#include "defines.h"
#include "main.h"
#include "../kselftest.h"
@@ -28,24 +29,6 @@ struct vdso_symtab {
Elf64_Word *elf_hashtab;
};
-static void *vdso_get_base_addr(char *envp[])
-{
- Elf64_auxv_t *auxv;
- int i;
-
- for (i = 0; envp[i]; i++)
- ;
-
- auxv = (Elf64_auxv_t *)&envp[i + 1];
-
- for (i = 0; auxv[i].a_type != AT_NULL; i++) {
- if (auxv[i].a_type == AT_SYSINFO_EHDR)
- return (void *)auxv[i].a_un.a_val;
- }
-
- return NULL;
-}
-
static Elf64_Dyn *vdso_get_dyntab(void *addr)
{
Elf64_Ehdr *ehdr = addr;
@@ -162,7 +145,7 @@ static int user_handler(long rdi, long rsi, long rdx, long ursp, long r8, long r
return 0;
}
-int main(int argc, char *argv[], char *envp[])
+int main(int argc, char *argv[])
{
struct sgx_enclave_run run;
struct vdso_symtab symtab;
@@ -195,7 +178,7 @@ int main(int argc, char *argv[], char *envp[])
addr = mmap((void *)encl.encl_base + seg->offset, seg->size,
seg->prot, MAP_SHARED | MAP_FIXED, encl.fd, 0);
if (addr == MAP_FAILED) {
- fprintf(stderr, "mmap() failed, errno=%d.\n", errno);
+ perror("mmap() segment failed");
exit(KSFT_FAIL);
}
}
@@ -203,7 +186,8 @@ int main(int argc, char *argv[], char *envp[])
memset(&run, 0, sizeof(run));
run.tcs = encl.encl_base;
- addr = vdso_get_base_addr(envp);
+ /* Get vDSO base address */
+ addr = (void *)getauxval(AT_SYSINFO_EHDR);
if (!addr)
goto err;
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
index b8268da5adaa..8e45792703ed 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
@@ -764,5 +764,53 @@
"teardown": [
"$TC actions flush action police"
]
+ },
+ {
+ "id": "cdd7",
+ "name": "Add valid police action with packets per second rate limit",
+ "category": [
+ "actions",
+ "police"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action police",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action police pkts_rate 1000 pkts_burst 200 index 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions ls action police",
+ "matchPattern": "action order [0-9]*: police 0x1 rate 0bit burst 0b mtu 4096Mb pkts_rate 1000 pkts_burst 200",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action police"
+ ]
+ },
+ {
+ "id": "f5bc",
+ "name": "Add invalid police action with both bps and pps",
+ "category": [
+ "actions",
+ "police"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action police",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action police rate 1kbit burst 10k pkts_rate 1000 pkts_burst 200 index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions ls action police",
+ "matchPattern": "action order [0-9]*: police 0x1 ",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action police"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json b/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
index 8e8c1ae12260..e0c5f060ccb9 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
@@ -24,6 +24,30 @@
]
},
{
+ "id": "4297",
+ "name": "Add simple action with change command",
+ "category": [
+ "actions",
+ "simple"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action simple",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions change action simple sdata \"Not changed\" index 60",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action simple",
+ "matchPattern": "action order [0-9]*: Simple <Not changed>.*index 60 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action simple"
+ ]
+ },
+ {
"id": "6d4c",
"name": "Add simple action with duplicate index",
"category": [
@@ -151,5 +175,64 @@
"teardown": [
"$TC actions flush action simple"
]
+ },
+ {
+ "id": "8d07",
+ "name": "Verify cleanup of failed actions batch add",
+ "category": [
+ "actions",
+ "simple"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action simple",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action simple sdata \"2\" index 2",
+ [
+ "$TC actions add action simple sdata \"1\" index 1 action simple sdata \"2\" index 2",
+ 255
+ ],
+ "$TC actions flush action simple"
+ ],
+ "cmdUnderTest": "$TC actions add action simple sdata \"2\" index 2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action simple",
+ "matchPattern": "action order [0-9]*: Simple <2>.*index 2 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action simple"
+ ]
+ },
+ {
+ "id": "a68a",
+ "name": "Verify cleanup of failed actions batch change",
+ "category": [
+ "actions",
+ "simple"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action simple",
+ 0,
+ 1,
+ 255
+ ],
+ [
+ "$TC actions change action simple sdata \"1\" index 1 action simple sdata \"2\" goto chain 42 index 2",
+ 255
+ ],
+ "$TC actions flush action simple"
+ ],
+ "cmdUnderTest": "$TC actions add action simple sdata \"1\" index 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action simple",
+ "matchPattern": "action order [0-9]*: Simple <1>.*index 1 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action simple"
+ ]
}
]
diff --git a/tools/testing/selftests/timens/gettime_perf.c b/tools/testing/selftests/timens/gettime_perf.c
index 7bf841a3967b..6b13dc277724 100644
--- a/tools/testing/selftests/timens/gettime_perf.c
+++ b/tools/testing/selftests/timens/gettime_perf.c
@@ -25,6 +25,12 @@ static void fill_function_pointers(void)
if (!vdso)
vdso = dlopen("linux-gate.so.1",
RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
+ if (!vdso)
+ vdso = dlopen("linux-vdso32.so.1",
+ RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
+ if (!vdso)
+ vdso = dlopen("linux-vdso64.so.1",
+ RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
if (!vdso) {
pr_err("[WARN]\tfailed to find vDSO\n");
return;
@@ -32,6 +38,8 @@ static void fill_function_pointers(void)
vdso_clock_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
if (!vdso_clock_gettime)
+ vdso_clock_gettime = (vgettime_t)dlsym(vdso, "__kernel_clock_gettime");
+ if (!vdso_clock_gettime)
pr_err("Warning: failed to find clock_gettime in vDSO\n");
}
diff --git a/tools/testing/selftests/timers/clocksource-switch.c b/tools/testing/selftests/timers/clocksource-switch.c
index bfc974b4572d..ef8eb3604595 100644
--- a/tools/testing/selftests/timers/clocksource-switch.c
+++ b/tools/testing/selftests/timers/clocksource-switch.c
@@ -3,7 +3,7 @@
* (C) Copyright IBM 2012
* Licensed under the GPLv2
*
- * NOTE: This is a meta-test which quickly changes the clocksourc and
+ * NOTE: This is a meta-test which quickly changes the clocksource and
* then uses other tests to detect problems. Thus this test requires
* that the inconsistency-check and nanosleep tests be present in the
* same directory it is run from.
@@ -134,7 +134,7 @@ int main(int argv, char **argc)
return -1;
}
- /* Check everything is sane before we start switching asyncrhonously */
+ /* Check everything is sane before we start switching asynchronously */
for (i = 0; i < count; i++) {
printf("Validating clocksource %s\n", clocksource_list[i]);
if (change_clocksource(clocksource_list[i])) {
diff --git a/tools/testing/selftests/timers/leap-a-day.c b/tools/testing/selftests/timers/leap-a-day.c
index 19e46ed5dfb5..23eb398c8140 100644
--- a/tools/testing/selftests/timers/leap-a-day.c
+++ b/tools/testing/selftests/timers/leap-a-day.c
@@ -5,7 +5,7 @@
* Licensed under the GPLv2
*
* This test signals the kernel to insert a leap second
- * every day at midnight GMT. This allows for stessing the
+ * every day at midnight GMT. This allows for stressing the
* kernel's leap-second behavior, as well as how well applications
* handle the leap-second discontinuity.
*
diff --git a/tools/testing/selftests/timers/leapcrash.c b/tools/testing/selftests/timers/leapcrash.c
index dc80728ed191..f70802c5dd0d 100644
--- a/tools/testing/selftests/timers/leapcrash.c
+++ b/tools/testing/selftests/timers/leapcrash.c
@@ -4,10 +4,10 @@
* (C) Copyright 2013, 2015 Linaro Limited
* Licensed under the GPL
*
- * This test demonstrates leapsecond deadlock that is possibe
+ * This test demonstrates leapsecond deadlock that is possible
* on kernels from 2.6.26 to 3.3.
*
- * WARNING: THIS WILL LIKELY HARDHANG SYSTEMS AND MAY LOSE DATA
+ * WARNING: THIS WILL LIKELY HARD HANG SYSTEMS AND MAY LOSE DATA
* RUN AT YOUR OWN RISK!
* To build:
* $ gcc leapcrash.c -o leapcrash -lrt
diff --git a/tools/testing/selftests/timers/threadtest.c b/tools/testing/selftests/timers/threadtest.c
index cf3e48919874..80aed4bf06fb 100644
--- a/tools/testing/selftests/timers/threadtest.c
+++ b/tools/testing/selftests/timers/threadtest.c
@@ -76,7 +76,7 @@ void checklist(struct timespec *list, int size)
/* The shared thread shares a global list
* that each thread fills while holding the lock.
- * This stresses clock syncronization across cpus.
+ * This stresses clock synchronization across cpus.
*/
void *shared_thread(void *arg)
{
diff --git a/tools/testing/selftests/vm/mremap_dontunmap.c b/tools/testing/selftests/vm/mremap_dontunmap.c
index 3a7b5ef0b0c6..f01dc4a85b0b 100644
--- a/tools/testing/selftests/vm/mremap_dontunmap.c
+++ b/tools/testing/selftests/vm/mremap_dontunmap.c
@@ -127,6 +127,57 @@ static void mremap_dontunmap_simple()
"unable to unmap source mapping");
}
+// This test validates that MREMAP_DONTUNMAP on a shared mapping works as expected.
+static void mremap_dontunmap_simple_shmem()
+{
+ unsigned long num_pages = 5;
+
+ int mem_fd = memfd_create("memfd", MFD_CLOEXEC);
+ BUG_ON(mem_fd < 0, "memfd_create");
+
+ BUG_ON(ftruncate(mem_fd, num_pages * page_size) < 0,
+ "ftruncate");
+
+ void *source_mapping =
+ mmap(NULL, num_pages * page_size, PROT_READ | PROT_WRITE,
+ MAP_FILE | MAP_SHARED, mem_fd, 0);
+ BUG_ON(source_mapping == MAP_FAILED, "mmap");
+
+ BUG_ON(close(mem_fd) < 0, "close");
+
+ memset(source_mapping, 'a', num_pages * page_size);
+
+ // Try to just move the whole mapping anywhere (not fixed).
+ void *dest_mapping =
+ mremap(source_mapping, num_pages * page_size, num_pages * page_size,
+ MREMAP_DONTUNMAP | MREMAP_MAYMOVE, NULL);
+ if (dest_mapping == MAP_FAILED && errno == EINVAL) {
+ // Old kernel which doesn't support MREMAP_DONTUNMAP on shmem.
+ BUG_ON(munmap(source_mapping, num_pages * page_size) == -1,
+ "unable to unmap source mapping");
+ return;
+ }
+
+ BUG_ON(dest_mapping == MAP_FAILED, "mremap");
+
+ // Validate that the pages have been moved, we know they were moved if
+ // the dest_mapping contains a's.
+ BUG_ON(check_region_contains_byte
+ (dest_mapping, num_pages * page_size, 'a') != 0,
+ "pages did not migrate");
+
+ // Because the region is backed by shmem, we will actually see the same
+ // memory at the source location still.
+ BUG_ON(check_region_contains_byte
+ (source_mapping, num_pages * page_size, 'a') != 0,
+ "source should have no ptes");
+
+ BUG_ON(munmap(dest_mapping, num_pages * page_size) == -1,
+ "unable to unmap destination mapping");
+ BUG_ON(munmap(source_mapping, num_pages * page_size) == -1,
+ "unable to unmap source mapping");
+}
+
// This test validates MREMAP_DONTUNMAP will move page tables to a specific
// destination using MREMAP_FIXED, also while validating that the source
// remains intact.
@@ -300,6 +351,7 @@ int main(void)
BUG_ON(page_buffer == MAP_FAILED, "unable to mmap a page.");
mremap_dontunmap_simple();
+ mremap_dontunmap_simple_shmem();
mremap_dontunmap_simple_fixed();
mremap_dontunmap_partial_mapping();
mremap_dontunmap_partial_mapping_overwrite();
diff --git a/tools/testing/selftests/vm/test_vmalloc.sh b/tools/testing/selftests/vm/test_vmalloc.sh
index 06d2bb109f06..d73b846736f1 100755
--- a/tools/testing/selftests/vm/test_vmalloc.sh
+++ b/tools/testing/selftests/vm/test_vmalloc.sh
@@ -11,6 +11,7 @@
TEST_NAME="vmalloc"
DRIVER="test_${TEST_NAME}"
+NUM_CPUS=`grep -c ^processor /proc/cpuinfo`
# 1 if fails
exitcode=1
@@ -22,9 +23,9 @@ ksft_skip=4
# Static templates for performance, stressing and smoke tests.
# Also it is possible to pass any supported parameters manualy.
#
-PERF_PARAM="single_cpu_test=1 sequential_test_order=1 test_repeat_count=3"
-SMOKE_PARAM="single_cpu_test=1 test_loop_count=10000 test_repeat_count=10"
-STRESS_PARAM="test_repeat_count=20"
+PERF_PARAM="sequential_test_order=1 test_repeat_count=3"
+SMOKE_PARAM="test_loop_count=10000 test_repeat_count=10"
+STRESS_PARAM="nr_threads=$NUM_CPUS test_repeat_count=20"
check_test_requirements()
{
@@ -58,8 +59,8 @@ run_perfformance_check()
run_stability_check()
{
- echo "Run stability tests. In order to stress vmalloc subsystem we run"
- echo "all available test cases on all available CPUs simultaneously."
+ echo "Run stability tests. In order to stress vmalloc subsystem all"
+ echo "available test cases are run by NUM_CPUS workers simultaneously."
echo "It will take time, so be patient."
modprobe $DRIVER $STRESS_PARAM > /dev/null 2>&1
@@ -92,17 +93,17 @@ usage()
echo "# Shows help message"
echo "./${DRIVER}.sh"
echo
- echo "# Runs 1 test(id_1), repeats it 5 times on all online CPUs"
- echo "./${DRIVER}.sh run_test_mask=1 test_repeat_count=5"
+ echo "# Runs 1 test(id_1), repeats it 5 times by NUM_CPUS workers"
+ echo "./${DRIVER}.sh nr_threads=$NUM_CPUS run_test_mask=1 test_repeat_count=5"
echo
echo -n "# Runs 4 tests(id_1|id_2|id_4|id_16) on one CPU with "
echo "sequential order"
- echo -n "./${DRIVER}.sh single_cpu_test=1 sequential_test_order=1 "
+ echo -n "./${DRIVER}.sh sequential_test_order=1 "
echo "run_test_mask=23"
echo
- echo -n "# Runs all tests on all online CPUs, shuffled order, repeats "
+ echo -n "# Runs all tests by NUM_CPUS workers, shuffled order, repeats "
echo "20 times"
- echo "./${DRIVER}.sh test_repeat_count=20"
+ echo "./${DRIVER}.sh nr_threads=$NUM_CPUS test_repeat_count=20"
echo
echo "# Performance analysis"
echo "./${DRIVER}.sh performance"
diff --git a/tools/testing/selftests/x86/thunks_32.S b/tools/testing/selftests/x86/thunks_32.S
index a71d92da8f46..f3f56e681e9f 100644
--- a/tools/testing/selftests/x86/thunks_32.S
+++ b/tools/testing/selftests/x86/thunks_32.S
@@ -45,3 +45,5 @@ call64_from_32:
ret
.size call64_from_32, .-call64_from_32
+
+.section .note.GNU-stack,"",%progbits
diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile
index 59e417ec3e13..9db867df7679 100644
--- a/tools/thermal/tmon/Makefile
+++ b/tools/thermal/tmon/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# We need this for the "cc-option" macro.
-include ../../../scripts/Kbuild.include
+include ../../build/Build.include
VERSION = 1.0
diff --git a/tools/tracing/latency/latency-collector.c b/tools/tracing/latency/latency-collector.c
index b69de9263ee6..3a2e6bb781a8 100644
--- a/tools/tracing/latency/latency-collector.c
+++ b/tools/tracing/latency/latency-collector.c
@@ -1018,7 +1018,7 @@ static long go_to_sleep(const struct entry *req)
cond_timedwait(&printstate.cond, &printstate.mutex, &future);
if (time_has_passed(&future))
break;
- };
+ }
if (printstate_has_new_req_arrived(req))
delay = -1;
@@ -1941,7 +1941,7 @@ static void scan_arguments(int argc, char *argv[])
if (value < 0) {
warnx("TIME must be >= 0\n");
show_usage();
- ;
+ exit(0);
}
trace_enable = true;
use_random_sleep = true;
diff --git a/tools/usb/usbip/doc/usbip.8 b/tools/usb/usbip/doc/usbip.8
index a15d20063b98..1f26e4a00638 100644
--- a/tools/usb/usbip/doc/usbip.8
+++ b/tools/usb/usbip/doc/usbip.8
@@ -50,9 +50,16 @@ Attach a remote USB device.
.PP
.HP
+\fBattach\fR \-\-remote=<\fIhost\fR> \-\-device=<\fIdev_id\fR>
+.IP
+Attach a remote USB gadget.
+Only used when the remote usbipd is in device mode.
+.PP
+
+.HP
\fBdetach\fR \-\-port=<\fIport\fR>
.IP
-Detach an imported USB device.
+Detach an imported USB device/gadget.
.PP
.HP
@@ -74,11 +81,25 @@ List USB devices exported by a remote host.
.PP
.HP
+\fBlist\fR \-\-device
+.IP
+List USB gadgets of local usbip-vudc.
+Only used when the local usbipd is in device mode.
+Note that this can not list usbip-vudc USB gadgets of the remote device mode usbipd.
+.PP
+
+.HP
\fBlist\fR \-\-local
.IP
List local USB devices.
.PP
+.HP
+\fBport\fR
+.IP
+List imported devices/gadgets.
+.PP
+
.SH EXAMPLES
@@ -90,8 +111,27 @@ List local USB devices.
client:# usbip attach --remote=server --busid=1-2
- Connect the remote USB device.
+ client:# usbip port
+ - List imported devices/gadgets.
+
client:# usbip detach --port=0
- Detach the usb device.
+The following example shows the usage of device mode
+
+ server:# usbip list --device
+ - List gadgets exported by local usbipd server.
+
+ client:# modprobe vhci-hcd
+
+ client:# usbip attach --remote=server --device=usbip-vudc.0
+ - Connect the remote USB gadget.
+
+ client:# usbip port
+ - List imported devices/gadgets.
+
+ client:# usbip detach --port=0
+ - Detach the usb gadget.
+
.SH "SEE ALSO"
\fBusbipd\fP\fB(8)\fB\fP
diff --git a/tools/usb/usbip/doc/usbipd.8 b/tools/usb/usbip/doc/usbipd.8
index fb62a756893b..d974394f86a1 100644
--- a/tools/usb/usbip/doc/usbipd.8
+++ b/tools/usb/usbip/doc/usbipd.8
@@ -30,6 +30,12 @@ Bind to IPv6. Default is both.
.PP
.HP
+\fB\-e\fR, \fB\-\-device\fR
+.IP
+Run in device mode. Rather than drive an attached device, create a virtual UDC to bind gadgets to.
+.PP
+
+.HP
\fB\-D\fR, \fB\-\-daemon\fR
.IP
Run as a daemon process.
@@ -86,6 +92,26 @@ USB/IP client can connect and use exported devices.
- A usb device 1-2 is now exportable to other hosts!
- Use 'usbip unbind --busid=1-2' when you want to shutdown exporting and use the device locally.
+The following example shows the usage of device mode
+
+ server:# modprobe usbip-vudc
+ - Use /sys/class/udc/ interface.
+ - usbip-host is independent of this module.
+
+ server:# usbipd -e -D
+ - Start usbip daemon in device mode.
+
+ server:# modprobe g_mass_storage file=/tmp/tmp.img
+ - Bind a gadget to usbip-vudc.
+ - in this example, a mass storage gadget is bound.
+
+ server:# usbip list --device
+ - List gadgets exported by local usbipd server.
+
+ server:# modprobe -r g_mass_storage
+ - Unbind a gadget from usbip-vudc.
+ - in this example, the previous mass storage gadget is unbound.
+
.SH "SEE ALSO"
\fBusbip\fP\fB(8)\fB\fP
diff --git a/tools/usb/usbip/libsrc/list.h b/tools/usb/usbip/libsrc/list.h
index a941671e4900..9cca2425587b 100644
--- a/tools/usb/usbip/libsrc/list.h
+++ b/tools/usb/usbip/libsrc/list.h
@@ -77,17 +77,17 @@ static inline void __list_del(struct list_head * prev, struct list_head * next)
#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
+static inline void __list_del_entry(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+}
+
/**
* list_del - deletes entry from list.
* @entry: the element to delete from the list.
* Note: list_empty() on entry does not return true after this, the entry is
* in an undefined state.
*/
-static inline void __list_del_entry(struct list_head *entry)
-{
- __list_del(entry->prev, entry->next);
-}
-
static inline void list_del(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
diff --git a/tools/usb/usbip/src/usbip_list.c b/tools/usb/usbip/src/usbip_list.c
index 8625b0f514ee..3d810bcca02f 100644
--- a/tools/usb/usbip/src/usbip_list.c
+++ b/tools/usb/usbip/src/usbip_list.c
@@ -33,7 +33,8 @@ static const char usbip_list_usage_string[] =
"usbip list [-p|--parsable] <args>\n"
" -p, --parsable Parsable list format\n"
" -r, --remote=<host> List the exportable USB devices on <host>\n"
- " -l, --local List the local USB devices\n";
+ " -l, --local List the local USB devices\n"
+ " -d, --device List the local USB gadgets bound to usbip-vudc\n";
void usbip_list_usage(void)
{