summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/arch/x86/include/asm/msr-index.h3
-rw-r--r--tools/build/feature/Makefile10
-rw-r--r--tools/hv/Makefile4
-rw-r--r--tools/include/linux/mm.h3
-rw-r--r--tools/include/uapi/linux/stddef.h26
-rw-r--r--tools/lib/bpf/libbpf.c21
-rw-r--r--tools/lib/perf/cpumap.c49
-rw-r--r--tools/lib/perf/evsel.c10
-rw-r--r--tools/lib/perf/include/internal/cpumap.h6
-rw-r--r--tools/lib/perf/include/perf/cpumap.h4
-rw-r--r--tools/lib/python/kdoc/kdoc_parser.py5
-rw-r--r--tools/net/ynl/Makefile.deps4
-rwxr-xr-xtools/net/ynl/pyynl/ynl_gen_c.py31
-rw-r--r--tools/perf/Documentation/perf-annotate.txt7
-rw-r--r--tools/perf/Documentation/perf-bench.txt4
-rw-r--r--tools/perf/Documentation/perf-config.txt6
-rw-r--r--tools/perf/Documentation/perf-diff.txt7
-rw-r--r--tools/perf/Documentation/perf-kwork.txt7
-rw-r--r--tools/perf/Documentation/perf-probe.txt6
-rw-r--r--tools/perf/Documentation/perf-report.txt15
-rw-r--r--tools/perf/Documentation/perf-sched.txt7
-rw-r--r--tools/perf/Documentation/perf-script.txt7
-rw-r--r--tools/perf/Documentation/perf-stat.txt4
-rw-r--r--tools/perf/Documentation/perf-timechart.txt7
-rw-r--r--tools/perf/Documentation/tips.txt2
-rw-r--r--tools/perf/Makefile.config1
-rw-r--r--tools/perf/Makefile.perf6
-rw-r--r--tools/perf/arch/arm/util/auxtrace.c6
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c7
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c3
-rw-r--r--tools/perf/arch/arm64/util/header.c2
-rw-r--r--tools/perf/arch/common.c22
-rw-r--r--tools/perf/arch/loongarch/util/Build1
-rw-r--r--tools/perf/arch/powerpc/util/auxtrace.c1
-rw-r--r--tools/perf/arch/sh/include/dwarf-regs-table.h2
-rw-r--r--tools/perf/arch/x86/tests/amd-ibs-period.c3
-rw-r--r--tools/perf/arch/x86/tests/dwarf-unwind.c11
-rw-r--r--tools/perf/arch/x86/util/pmu.c16
-rw-r--r--tools/perf/bench/breakpoint.c4
-rw-r--r--tools/perf/bench/mem-functions.c111
-rw-r--r--tools/perf/bench/numa.c15
-rw-r--r--tools/perf/bench/sched-messaging.c2
-rw-r--r--tools/perf/bench/uprobe.c2
-rw-r--r--tools/perf/builtin-annotate.c15
-rw-r--r--tools/perf/builtin-bench.c42
-rw-r--r--tools/perf/builtin-c2c.c19
-rw-r--r--tools/perf/builtin-config.c2
-rw-r--r--tools/perf/builtin-daemon.c4
-rw-r--r--tools/perf/builtin-data.c8
-rw-r--r--tools/perf/builtin-diff.c13
-rw-r--r--tools/perf/builtin-ftrace.c1
-rw-r--r--tools/perf/builtin-inject.c69
-rw-r--r--tools/perf/builtin-kmem.c2
-rw-r--r--tools/perf/builtin-kwork.c18
-rw-r--r--tools/perf/builtin-lock.c2
-rw-r--r--tools/perf/builtin-probe.c4
-rw-r--r--tools/perf/builtin-record.c106
-rw-r--r--tools/perf/builtin-report.c30
-rw-r--r--tools/perf/builtin-sched.c12
-rw-r--r--tools/perf/builtin-script.c45
-rw-r--r--tools/perf/builtin-stat.c92
-rw-r--r--tools/perf/builtin-timechart.c3
-rw-r--r--tools/perf/builtin-top.c51
-rw-r--r--tools/perf/builtin-trace.c34
-rwxr-xr-xtools/perf/check-headers.sh6
-rw-r--r--tools/perf/jvmti/libjvmti.c5
-rw-r--r--tools/perf/perf.c12
-rw-r--r--tools/perf/pmu-events/Build4
-rw-r--r--tools/perf/pmu-events/arch/arm64/common-and-microarch.json85
-rw-r--r--tools/perf/pmu-events/arch/arm64/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/arm64/nvidia/t410/branch.json45
-rw-r--r--tools/perf/pmu-events/arch/arm64/nvidia/t410/brbe.json6
-rw-r--r--tools/perf/pmu-events/arch/arm64/nvidia/t410/bus.json48
-rw-r--r--tools/perf/pmu-events/arch/arm64/nvidia/t410/exception.json62
-rw-r--r--tools/perf/pmu-events/arch/arm64/nvidia/t410/fp_operation.json78
-rw-r--r--tools/perf/pmu-events/arch/arm64/nvidia/t410/general.json15
-rw-r--r--tools/perf/pmu-events/arch/arm64/nvidia/t410/l1d_cache.json122
-rw-r--r--tools/perf/pmu-events/arch/arm64/nvidia/t410/l1i_cache.json114
-rw-r--r--tools/perf/pmu-events/arch/arm64/nvidia/t410/l2d_cache.json134
-rw-r--r--tools/perf/pmu-events/arch/arm64/nvidia/t410/ll_cache.json107
-rw-r--r--tools/perf/pmu-events/arch/arm64/nvidia/t410/memory.json46
-rw-r--r--tools/perf/pmu-events/arch/arm64/nvidia/t410/metrics.json722
-rw-r--r--tools/perf/pmu-events/arch/arm64/nvidia/t410/misc.json642
-rw-r--r--tools/perf/pmu-events/arch/arm64/nvidia/t410/retired.json94
-rw-r--r--tools/perf/pmu-events/arch/arm64/nvidia/t410/spe.json42
-rw-r--r--tools/perf/pmu-events/arch/arm64/nvidia/t410/spec_operation.json230
-rw-r--r--tools/perf/pmu-events/arch/arm64/nvidia/t410/stall.json145
-rw-r--r--tools/perf/pmu-events/arch/arm64/nvidia/t410/tlb.json158
-rw-r--r--tools/perf/pmu-events/arch/common/common/metrics.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/cache.json27
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/frontend.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/pipeline.json66
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/cache.json27
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/arrowlake/cache.json103
-rw-r--r--tools/perf/pmu-events/arch/x86/arrowlake/frontend.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/arrowlake/pipeline.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json16
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/cache.json42
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/pipeline.json42
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/frontend.json16
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/gnr-metrics.json27
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/cache.json36
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/frontend.json27
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv22
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/cache.json67
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/frontend.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json46
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/cache.json159
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/floating-point.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/frontend.json36
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/memory.json27
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/other.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/pipeline.json200
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/virtual-memory.json30
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json16
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/cache.json22
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json42
-rw-r--r--tools/perf/pmu-events/empty-pmu-events.c108
-rw-r--r--tools/perf/tests/bitmap.c2
-rw-r--r--tools/perf/tests/bp_signal.c2
-rw-r--r--tools/perf/tests/code-reading.c1
-rw-r--r--tools/perf/tests/cpumap.c6
-rw-r--r--tools/perf/tests/dso-data.c2
-rw-r--r--tools/perf/tests/event_update.c4
-rw-r--r--tools/perf/tests/expand-cgroup.c4
-rw-r--r--tools/perf/tests/hists_cumulate.c2
-rw-r--r--tools/perf/tests/hists_filter.c1
-rw-r--r--tools/perf/tests/hists_output.c2
-rw-r--r--tools/perf/tests/mem2node.c2
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c3
-rw-r--r--tools/perf/tests/parse-events.c49
-rw-r--r--tools/perf/tests/perf-record.c8
-rwxr-xr-xtools/perf/tests/shell/data_type_profiling.sh6
-rwxr-xr-xtools/perf/tests/shell/kwork.sh79
-rwxr-xr-xtools/perf/tests/shell/perf_sched_stats.sh42
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh8
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh20
-rwxr-xr-xtools/perf/tests/shell/test_brstack.sh146
-rwxr-xr-xtools/perf/tests/shell/test_task_analyzer.sh42
-rwxr-xr-xtools/perf/tests/shell/trace_btf_general.sh2
-rw-r--r--tools/perf/tests/switch-tracking.c2
-rw-r--r--tools/perf/tests/thread-map.c1
-rw-r--r--tools/perf/tests/topology.c11
-rw-r--r--tools/perf/tests/workloads/datasym.c6
-rw-r--r--tools/perf/tests/wp.c6
-rwxr-xr-xtools/perf/trace/beauty/drm_ioctl.sh2
-rwxr-xr-xtools/perf/trace/beauty/fadvise.sh2
-rw-r--r--tools/perf/trace/beauty/include/uapi/drm/drm.h (renamed from tools/include/uapi/drm/drm.h)0
-rw-r--r--tools/perf/trace/beauty/include/uapi/drm/i915_drm.h (renamed from tools/include/uapi/drm/i915_drm.h)0
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/fadvise.h (renamed from tools/include/uapi/linux/fadvise.h)0
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/addr2line.c17
-rw-r--r--tools/perf/util/addr2line.h2
-rw-r--r--tools/perf/util/affinity.c2
-rw-r--r--tools/perf/util/annotate-arch/annotate-loongarch.c1
-rw-r--r--tools/perf/util/annotate-arch/annotate-x86.c70
-rw-r--r--tools/perf/util/annotate-data.c119
-rw-r--r--tools/perf/util/annotate-data.h3
-rw-r--r--tools/perf/util/annotate.c3
-rw-r--r--tools/perf/util/arm-spe.c26
-rw-r--r--tools/perf/util/block-info.c2
-rw-r--r--tools/perf/util/block-range.c2
-rw-r--r--tools/perf/util/bpf-event.c2
-rw-r--r--tools/perf/util/bpf_counter.c28
-rw-r--r--tools/perf/util/bpf_counter_cgroup.c9
-rw-r--r--tools/perf/util/bpf_kwork.c3
-rw-r--r--tools/perf/util/bpf_kwork_top.c3
-rw-r--r--tools/perf/util/bpf_map.c70
-rw-r--r--tools/perf/util/bpf_map.h23
-rw-r--r--tools/perf/util/bpf_off_cpu.c4
-rw-r--r--tools/perf/util/bpf_skel/syscall_summary.bpf.c18
-rw-r--r--tools/perf/util/bpf_trace_augment.c2
-rw-r--r--tools/perf/util/branch.h3
-rw-r--r--tools/perf/util/callchain.c83
-rw-r--r--tools/perf/util/callchain.h12
-rw-r--r--tools/perf/util/cgroup.c30
-rw-r--r--tools/perf/util/clockid.h3
-rw-r--r--tools/perf/util/config.c6
-rw-r--r--tools/perf/util/cpu-set-sched.h50
-rw-r--r--tools/perf/util/cpumap.c10
-rw-r--r--tools/perf/util/cputopo.c2
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c56
-rw-r--r--tools/perf/util/data-convert-bt.c74
-rw-r--r--tools/perf/util/data-convert-json.c16
-rw-r--r--tools/perf/util/data.c101
-rw-r--r--tools/perf/util/data.h52
-rw-r--r--tools/perf/util/db-export.c1
-rw-r--r--tools/perf/util/debug.c2
-rw-r--r--tools/perf/util/debuginfo.c19
-rw-r--r--tools/perf/util/disasm.c11
-rw-r--r--tools/perf/util/disasm.h2
-rw-r--r--tools/perf/util/drm_pmu.c36
-rw-r--r--tools/perf/util/dsos.c35
-rw-r--r--tools/perf/util/dump-insn.c23
-rw-r--r--tools/perf/util/dwarf-aux.c145
-rw-r--r--tools/perf/util/dwarf-aux.h9
-rw-r--r--tools/perf/util/env.c2
-rw-r--r--tools/perf/util/env.h1
-rw-r--r--tools/perf/util/event.c1
-rw-r--r--tools/perf/util/evlist.c40
-rw-r--r--tools/perf/util/evlist.h2
-rw-r--r--tools/perf/util/evsel.c112
-rw-r--r--tools/perf/util/evsel.h10
-rw-r--r--tools/perf/util/expr.c7
-rw-r--r--tools/perf/util/header.c436
-rw-r--r--tools/perf/util/header.h9
-rw-r--r--tools/perf/util/hist.c5
-rw-r--r--tools/perf/util/hist.h2
-rw-r--r--tools/perf/util/intel-tpebs.c12
-rw-r--r--tools/perf/util/libbfd.c2
-rw-r--r--tools/perf/util/llvm.c10
-rw-r--r--tools/perf/util/maps.c24
-rw-r--r--tools/perf/util/mem2node.c2
-rw-r--r--tools/perf/util/metricgroup.c29
-rw-r--r--tools/perf/util/ordered-events.c2
-rw-r--r--tools/perf/util/parse-events.c2
-rw-r--r--tools/perf/util/perf-regs-arch/perf_regs_s390.c78
-rw-r--r--tools/perf/util/perf_regs.c3
-rw-r--r--tools/perf/util/perf_regs.h1
-rw-r--r--tools/perf/util/pmus.c2
-rw-r--r--tools/perf/util/powerpc-vpadtl.c11
-rw-r--r--tools/perf/util/probe-event.c20
-rw-r--r--tools/perf/util/probe-file.c2
-rw-r--r--tools/perf/util/probe-finder.c8
-rw-r--r--tools/perf/util/sample.c11
-rw-r--r--tools/perf/util/sample.h140
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c2
-rw-r--r--tools/perf/util/session.c83
-rw-r--r--tools/perf/util/sort.c276
-rw-r--r--tools/perf/util/sort.h2
-rw-r--r--tools/perf/util/srcline.c12
-rw-r--r--tools/perf/util/stat-display.c4
-rw-r--r--tools/perf/util/stat-shadow.c1
-rw-r--r--tools/perf/util/stat.c8
-rw-r--r--tools/perf/util/svghelper.c3
-rw-r--r--tools/perf/util/symbol-elf.c20
-rw-r--r--tools/perf/util/symbol.c40
-rw-r--r--tools/perf/util/symbol.h11
-rw-r--r--tools/perf/util/symbol_conf.h4
-rw-r--r--tools/perf/util/synthetic-events.c2
-rw-r--r--tools/perf/util/target.h12
-rw-r--r--tools/perf/util/unwind-libunwind-local.c1
-rw-r--r--tools/perf/util/util.c10
-rw-r--r--tools/perf/util/util.h3
-rw-r--r--tools/perf/util/values.c8
-rw-r--r--tools/power/x86/intel-speed-select/isst-config.c41
-rw-r--r--tools/power/x86/turbostat/turbostat.c231
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.826
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c399
-rw-r--r--tools/sched_ext/scx_qmap.bpf.c24
-rw-r--r--tools/testing/cxl/Kbuild9
-rw-r--r--tools/testing/cxl/test/Kbuild1
-rw-r--r--tools/testing/cxl/test/cxl.c449
-rw-r--r--tools/testing/cxl/test/hmem_test.c47
-rw-r--r--tools/testing/cxl/test/mem.c6
-rw-r--r--tools/testing/cxl/test/mock.c50
-rw-r--r--tools/testing/cxl/test/mock.h8
-rwxr-xr-xtools/testing/ktest/ktest.pl8
-rw-r--r--tools/testing/memblock/internal.h43
-rw-r--r--tools/testing/memblock/linux/string_helpers.h10
-rw-r--r--tools/testing/memblock/mmzone.c4
-rw-r--r--tools/testing/radix-tree/maple.c2
-rw-r--r--tools/testing/selftests/Makefile9
-rw-r--r--tools/testing/selftests/arm64/gcs/gcs-util.h6
-rw-r--r--tools/testing/selftests/arm64/gcs/libc-gcs.c1
-rw-r--r--tools/testing/selftests/bpf/Makefile1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c49
-rw-r--r--tools/testing/selftests/bpf/prog_tests/snprintf.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_sk.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_local_data.h13
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_task_local_data.c96
-rw-r--r--tools/testing/selftests/bpf/prog_tests/token.c86
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trace_printk.c28
-rw-r--r--tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c21
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_cubic.c14
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_unix.c10
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_misc.h4
-rw-r--r--tools/testing/selftests/bpf/progs/map_kptr_fail.c15
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_sk.c16
-rw-r--r--tools/testing/selftests/bpf/progs/task_local_data.bpf.h5
-rw-r--r--tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c15
-rw-r--r--tools/testing/selftests/bpf/progs/timer_start_deadlock.c8
-rw-r--r--tools/testing/selftests/bpf/progs/token_kallsyms.c19
-rw-r--r--tools/testing/selftests/bpf/progs/trace_printk.c10
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_live_stack.c193
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_precision.c4
-rw-r--r--tools/testing/selftests/bpf/sysctl_helpers.c37
-rw-r--r--tools/testing/selftests/bpf/sysctl_helpers.h8
-rw-r--r--tools/testing/selftests/cgroup/lib/cgroup_util.c5
-rwxr-xr-xtools/testing/selftests/cgroup/test_cpuset_v1_base.sh2
-rw-r--r--tools/testing/selftests/cgroup/test_kmem.c10
-rw-r--r--tools/testing/selftests/drivers/net/README.rst10
-rw-r--r--tools/testing/selftests/drivers/net/bonding/lag_lib.sh17
-rw-r--r--tools/testing/selftests/drivers/net/hw/Makefile1
-rw-r--r--tools/testing/selftests/drivers/net/hw/config5
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/ipsec_vxlan.py204
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/load.py5
-rwxr-xr-xtools/testing/selftests/drivers/net/shaper.py24
-rwxr-xr-xtools/testing/selftests/drivers/net/team/dev_addr_lists.sh2
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe_module.tc87
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_multiple_fprobe.tc69
-rw-r--r--tools/testing/selftests/kselftest.h2
-rw-r--r--tools/testing/selftests/kselftest/runner.sh49
-rw-r--r--tools/testing/selftests/kselftest_harness.h12
-rw-r--r--tools/testing/selftests/kvm/access_tracking_perf_test.c44
-rw-r--r--tools/testing/selftests/kvm/arch_timer.c6
-rw-r--r--tools/testing/selftests/kvm/arm64/aarch32_id_regs.c14
-rw-r--r--tools/testing/selftests/kvm/arm64/arch_timer.c8
-rw-r--r--tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c161
-rw-r--r--tools/testing/selftests/kvm/arm64/debug-exceptions.c72
-rw-r--r--tools/testing/selftests/kvm/arm64/hypercalls.c24
-rw-r--r--tools/testing/selftests/kvm/arm64/idreg-idst.c4
-rw-r--r--tools/testing/selftests/kvm/arm64/no-vgic.c8
-rw-r--r--tools/testing/selftests/kvm/arm64/page_fault_test.c82
-rw-r--r--tools/testing/selftests/kvm/arm64/psci_test.c26
-rw-r--r--tools/testing/selftests/kvm/arm64/sea_to_user.c41
-rw-r--r--tools/testing/selftests/kvm/arm64/set_id_regs.c70
-rw-r--r--tools/testing/selftests/kvm/arm64/smccc_filter.c10
-rw-r--r--tools/testing/selftests/kvm/arm64/vgic_init.c56
-rw-r--r--tools/testing/selftests/kvm/arm64/vgic_irq.c137
-rw-r--r--tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c20
-rw-r--r--tools/testing/selftests/kvm/arm64/vgic_v5.c10
-rw-r--r--tools/testing/selftests/kvm/arm64/vpmu_counter_access.c56
-rw-r--r--tools/testing/selftests/kvm/coalesced_io_test.c38
-rw-r--r--tools/testing/selftests/kvm/demand_paging_test.c10
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c14
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c82
-rw-r--r--tools/testing/selftests/kvm/get-reg-list.c2
-rw-r--r--tools/testing/selftests/kvm/guest_memfd_test.c27
-rw-r--r--tools/testing/selftests/kvm/guest_print_test.c22
-rw-r--r--tools/testing/selftests/kvm/hardware_disable_test.c6
-rw-r--r--tools/testing/selftests/kvm/include/arm64/arch_timer.h30
-rw-r--r--tools/testing/selftests/kvm/include/arm64/delay.h4
-rw-r--r--tools/testing/selftests/kvm/include/arm64/gic.h8
-rw-r--r--tools/testing/selftests/kvm/include/arm64/gic_v3_its.h7
-rw-r--r--tools/testing/selftests/kvm/include/arm64/processor.h22
-rw-r--r--tools/testing/selftests/kvm/include/arm64/ucall.h4
-rw-r--r--tools/testing/selftests/kvm/include/arm64/vgic.h22
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h346
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util_types.h8
-rw-r--r--tools/testing/selftests/kvm/include/loongarch/arch_timer.h4
-rw-r--r--tools/testing/selftests/kvm/include/loongarch/ucall.h4
-rw-r--r--tools/testing/selftests/kvm/include/memstress.h30
-rw-r--r--tools/testing/selftests/kvm/include/riscv/arch_timer.h22
-rw-r--r--tools/testing/selftests/kvm/include/riscv/processor.h9
-rw-r--r--tools/testing/selftests/kvm/include/riscv/ucall.h4
-rw-r--r--tools/testing/selftests/kvm/include/s390/diag318_test_handler.h2
-rw-r--r--tools/testing/selftests/kvm/include/s390/facility.h4
-rw-r--r--tools/testing/selftests/kvm/include/s390/ucall.h4
-rw-r--r--tools/testing/selftests/kvm/include/sparsebit.h6
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h40
-rw-r--r--tools/testing/selftests/kvm/include/timer_test.h18
-rw-r--r--tools/testing/selftests/kvm/include/ucall_common.h22
-rw-r--r--tools/testing/selftests/kvm/include/userfaultfd_util.h6
-rw-r--r--tools/testing/selftests/kvm/include/x86/apic.h22
-rw-r--r--tools/testing/selftests/kvm/include/x86/evmcs.h22
-rw-r--r--tools/testing/selftests/kvm/include/x86/hyperv.h28
-rw-r--r--tools/testing/selftests/kvm/include/x86/kvm_util_arch.h36
-rw-r--r--tools/testing/selftests/kvm/include/x86/pmu.h9
-rw-r--r--tools/testing/selftests/kvm/include/x86/processor.h292
-rw-r--r--tools/testing/selftests/kvm/include/x86/sev.h20
-rw-r--r--tools/testing/selftests/kvm/include/x86/smm.h3
-rw-r--r--tools/testing/selftests/kvm/include/x86/svm_util.h12
-rw-r--r--tools/testing/selftests/kvm/include/x86/ucall.h2
-rw-r--r--tools/testing/selftests/kvm/include/x86/vmx.h70
-rw-r--r--tools/testing/selftests/kvm/kvm_page_table_test.c54
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic.c6
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic_private.h26
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic_v3.c90
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c11
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/processor.c165
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/ucall.c12
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/vgic.c40
-rw-r--r--tools/testing/selftests/kvm/lib/elf.c17
-rw-r--r--tools/testing/selftests/kvm/lib/guest_modes.c2
-rw-r--r--tools/testing/selftests/kvm/lib/guest_sprintf.c18
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c359
-rw-r--r--tools/testing/selftests/kvm/lib/loongarch/processor.c110
-rw-r--r--tools/testing/selftests/kvm/lib/loongarch/ucall.c12
-rw-r--r--tools/testing/selftests/kvm/lib/memstress.c38
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/processor.c91
-rw-r--r--tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c12
-rw-r--r--tools/testing/selftests/kvm/lib/s390/facility.c2
-rw-r--r--tools/testing/selftests/kvm/lib/s390/processor.c65
-rw-r--r--tools/testing/selftests/kvm/lib/sparsebit.c18
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c30
-rw-r--r--tools/testing/selftests/kvm/lib/ucall_common.c34
-rw-r--r--tools/testing/selftests/kvm/lib/userfaultfd_util.c14
-rw-r--r--tools/testing/selftests/kvm/lib/x86/apic.c2
-rw-r--r--tools/testing/selftests/kvm/lib/x86/hyperv.c14
-rw-r--r--tools/testing/selftests/kvm/lib/x86/memstress.c14
-rw-r--r--tools/testing/selftests/kvm/lib/x86/pmu.c8
-rw-r--r--tools/testing/selftests/kvm/lib/x86/processor.c292
-rw-r--r--tools/testing/selftests/kvm/lib/x86/sev.c20
-rw-r--r--tools/testing/selftests/kvm/lib/x86/svm.c16
-rw-r--r--tools/testing/selftests/kvm/lib/x86/ucall.c4
-rw-r--r--tools/testing/selftests/kvm/lib/x86/vmx.c44
-rw-r--r--tools/testing/selftests/kvm/loongarch/arch_timer.c28
-rw-r--r--tools/testing/selftests/kvm/loongarch/pmu_test.c10
-rw-r--r--tools/testing/selftests/kvm/memslot_modification_stress_test.c10
-rw-r--r--tools/testing/selftests/kvm/memslot_perf_test.c164
-rw-r--r--tools/testing/selftests/kvm/mmu_stress_test.c28
-rw-r--r--tools/testing/selftests/kvm/pre_fault_memory_test.c12
-rw-r--r--tools/testing/selftests/kvm/riscv/arch_timer.c8
-rw-r--r--tools/testing/selftests/kvm/riscv/ebreak_test.c6
-rw-r--r--tools/testing/selftests/kvm/riscv/get-reg-list.c4
-rw-r--r--tools/testing/selftests/kvm/riscv/sbi_pmu_test.c8
-rw-r--r--tools/testing/selftests/kvm/s390/debug_test.c8
-rw-r--r--tools/testing/selftests/kvm/s390/irq_routing.c2
-rw-r--r--tools/testing/selftests/kvm/s390/memop.c94
-rw-r--r--tools/testing/selftests/kvm/s390/resets.c6
-rw-r--r--tools/testing/selftests/kvm/s390/shared_zeropage_test.c2
-rw-r--r--tools/testing/selftests/kvm/s390/tprot.c24
-rw-r--r--tools/testing/selftests/kvm/s390/ucontrol_test.c8
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c40
-rw-r--r--tools/testing/selftests/kvm/steal_time.c81
-rw-r--r--tools/testing/selftests/kvm/system_counter_offset_test.c12
-rw-r--r--tools/testing/selftests/kvm/x86/amx_test.c14
-rw-r--r--tools/testing/selftests/kvm/x86/aperfmperf_test.c16
-rw-r--r--tools/testing/selftests/kvm/x86/apic_bus_clock_test.c24
-rw-r--r--tools/testing/selftests/kvm/x86/cpuid_test.c6
-rw-r--r--tools/testing/selftests/kvm/x86/debug_regs.c4
-rw-r--r--tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c16
-rw-r--r--tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c6
-rw-r--r--tools/testing/selftests/kvm/x86/fastops_test.c52
-rw-r--r--tools/testing/selftests/kvm/x86/feature_msrs_test.c12
-rw-r--r--tools/testing/selftests/kvm/x86/fix_hypercall_test.c20
-rw-r--r--tools/testing/selftests/kvm/x86/flds_emulation.h6
-rw-r--r--tools/testing/selftests/kvm/x86/hwcr_msr_test.c10
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_clock.c6
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_evmcs.c10
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_extended_hypercalls.c20
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_features.c26
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_ipi.c12
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_svm_test.c10
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c36
-rw-r--r--tools/testing/selftests/kvm/x86/kvm_buslock_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/kvm_clock_test.c14
-rw-r--r--tools/testing/selftests/kvm/x86/kvm_pv_test.c10
-rw-r--r--tools/testing/selftests/kvm/x86/monitor_mwait_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/nested_close_kvm_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/nested_dirty_log_test.c10
-rw-r--r--tools/testing/selftests/kvm/x86/nested_emulation_test.c20
-rw-r--r--tools/testing/selftests/kvm/x86/nested_exceptions_test.c6
-rw-r--r--tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/nested_set_state_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c12
-rw-r--r--tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c26
-rw-r--r--tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/nx_huge_pages_test.c18
-rw-r--r--tools/testing/selftests/kvm/x86/platform_info_test.c6
-rw-r--r--tools/testing/selftests/kvm/x86/pmu_counters_test.c109
-rw-r--r--tools/testing/selftests/kvm/x86/pmu_event_filter_test.c102
-rw-r--r--tools/testing/selftests/kvm/x86/private_mem_conversions_test.c78
-rw-r--r--tools/testing/selftests/kvm/x86/private_mem_kvm_exits_test.c14
-rw-r--r--tools/testing/selftests/kvm/x86/set_boot_cpu_id.c6
-rw-r--r--tools/testing/selftests/kvm/x86/set_sregs_test.c6
-rw-r--r--tools/testing/selftests/kvm/x86/sev_init2_tests.c6
-rw-r--r--tools/testing/selftests/kvm/x86/sev_smoke_test.c22
-rw-r--r--tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c8
-rw-r--r--tools/testing/selftests/kvm/x86/smm_test.c8
-rw-r--r--tools/testing/selftests/kvm/x86/state_test.c14
-rw-r--r--tools/testing/selftests/kvm/x86/svm_int_ctl_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/svm_lbr_nested_state.c2
-rw-r--r--tools/testing/selftests/kvm/x86/svm_nested_clear_efer_svme.c2
-rw-r--r--tools/testing/selftests/kvm/x86/svm_nested_shutdown_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/svm_nested_soft_inject_test.c10
-rw-r--r--tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c14
-rw-r--r--tools/testing/selftests/kvm/x86/svm_vmcall_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/sync_regs_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/triple_fault_event_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86/tsc_msrs_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/tsc_scaling_sync.c4
-rw-r--r--tools/testing/selftests/kvm/x86/ucna_injection_test.c45
-rw-r--r--tools/testing/selftests/kvm/x86/userspace_io_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c58
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_apic_access_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_apicv_updates_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_invalid_nested_guest_state.c2
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_msrs_test.c22
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c12
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_preemption_timer_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/xapic_ipi_test.c64
-rw-r--r--tools/testing/selftests/kvm/x86/xapic_state_test.c20
-rw-r--r--tools/testing/selftests/kvm/x86/xapic_tpr_test.c24
-rw-r--r--tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c8
-rw-r--r--tools/testing/selftests/kvm/x86/xen_shinfo_test.c22
-rw-r--r--tools/testing/selftests/kvm/x86/xss_msr_test.c2
-rw-r--r--tools/testing/selftests/liveupdate/liveupdate.c41
-rwxr-xr-xtools/testing/selftests/mm/charge_reserved_hugetlb.sh5
-rw-r--r--tools/testing/selftests/mm/config1
-rw-r--r--tools/testing/selftests/mm/guard-regions.c4
-rw-r--r--tools/testing/selftests/mm/hmm-tests.c83
-rw-r--r--tools/testing/selftests/mm/hugetlb_dio.c91
-rw-r--r--tools/testing/selftests/mm/merge.c88
-rw-r--r--tools/testing/selftests/mm/soft-dirty.c4
-rw-r--r--tools/testing/selftests/mm/split_huge_page_test.c19
-rw-r--r--tools/testing/selftests/mm/thp_settings.c35
-rw-r--r--tools/testing/selftests/mm/thp_settings.h1
-rw-r--r--tools/testing/selftests/mm/transhuge-stress.c4
-rw-r--r--tools/testing/selftests/mm/vm_util.c24
-rw-r--r--tools/testing/selftests/mm/vm_util.h2
-rw-r--r--tools/testing/selftests/net/Makefile1
-rw-r--r--tools/testing/selftests/net/config1
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh22
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh28
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_lib.sh16
-rwxr-xr-xtools/testing/selftests/net/mptcp/pm_netlink.sh20
-rwxr-xr-xtools/testing/selftests/net/openvswitch/openvswitch.sh37
-rw-r--r--tools/testing/selftests/net/openvswitch/ovs-dpctl.py19
-rw-r--r--tools/testing/selftests/net/ovpn/common.sh355
-rw-r--r--tools/testing/selftests/net/ovpn/config3
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-chachapoly.sh2
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-close-socket-tcp.sh2
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-close-socket.sh90
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-float.sh2
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-mark.sh237
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-symmetric-id-float.sh4
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-symmetric-id-tcp.sh4
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-symmetric-id.sh2
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-tcp.sh2
-rwxr-xr-xtools/testing/selftests/net/ovpn/test.sh449
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_rfc5961_ack-out-of-window.pkt48
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_ts_recent_invalid_ack.pkt4
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh44
-rw-r--r--tools/testing/selftests/net/tcp_ao/config1
-rwxr-xr-xtools/testing/selftests/net/tcp_ecmp_failover.sh216
-rw-r--r--tools/testing/selftests/net/tls.c43
-rw-r--r--tools/testing/selftests/rdma/Makefile7
-rw-r--r--tools/testing/selftests/rdma/config3
-rwxr-xr-xtools/testing/selftests/rdma/rxe_ipv6.sh63
-rwxr-xr-xtools/testing/selftests/rdma/rxe_rping_between_netns.sh85
-rwxr-xr-xtools/testing/selftests/rdma/rxe_socket_with_netns.sh76
-rwxr-xr-xtools/testing/selftests/rdma/rxe_test_NETDEV_UNREGISTER.sh63
-rw-r--r--tools/testing/selftests/riscv/cfi/Makefile2
-rw-r--r--tools/testing/selftests/rseq/Makefile21
-rw-r--r--tools/testing/selftests/rseq/check_optimized.c17
-rw-r--r--tools/testing/selftests/rseq/legacy_check.c65
-rw-r--r--tools/testing/selftests/rseq/param_test.c25
-rw-r--r--tools/testing/selftests/rseq/rseq-abi.h7
-rw-r--r--tools/testing/selftests/rseq/rseq.c39
-rw-r--r--tools/testing/selftests/rseq/rseq.h8
-rwxr-xr-xtools/testing/selftests/rseq/run_legacy_check.sh4
-rwxr-xr-xtools/testing/selftests/rseq/run_param_test.sh39
-rwxr-xr-xtools/testing/selftests/rseq/run_timeslice_test.sh14
-rw-r--r--tools/testing/selftests/rseq/slice_test.c12
-rw-r--r--tools/testing/selftests/sched_ext/Makefile1
-rw-r--r--tools/testing/selftests/sched_ext/dequeue.c1
-rw-r--r--tools/testing/selftests/sched_ext/non_scx_kfunc_deny.bpf.c44
-rw-r--r--tools/testing/selftests/sched_ext/non_scx_kfunc_deny.c47
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json189
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json26
-rw-r--r--tools/testing/selftests/ublk/Makefile1
-rwxr-xr-xtools/testing/selftests/ublk/test_batch_01.sh4
-rwxr-xr-xtools/testing/selftests/ublk/test_batch_02.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_batch_03.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_02.sh4
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_03.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_06.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_07.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_08.sh4
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_09.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_10.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_12.sh4
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_13.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_16.sh4
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_17.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_integrity_02.sh5
-rwxr-xr-xtools/testing/selftests/ublk/test_integrity_03.sh103
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_01.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_02.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_03.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_04.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_05.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_06.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_07.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_null_01.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_null_02.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_null_03.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_part_01.sh4
-rwxr-xr-xtools/testing/selftests/ublk/test_part_02.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_recover_01.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_recover_02.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_recover_03.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_recover_04.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_shmemzc_01.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_shmemzc_02.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_shmemzc_03.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_shmemzc_04.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_01.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_02.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_03.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_04.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_05.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_06.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_07.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_08.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_09.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_01.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_02.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_03.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_04.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_05.sh2
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_06.sh2
-rw-r--r--tools/testing/vma/include/dup.h2
-rw-r--r--tools/testing/vma/include/stubs.h3
-rw-r--r--tools/testing/vsock/util.c15
-rw-r--r--tools/testing/vsock/vsock_test.c50
-rw-r--r--tools/usb/usbip/libsrc/usbip_device_driver.c6
-rw-r--r--tools/usb/usbip/libsrc/usbip_host_common.c3
-rw-r--r--tools/usb/usbip/libsrc/usbip_host_driver.c7
622 files changed, 14364 insertions, 5526 deletions
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index 6673601246b3..eff29645719b 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -793,9 +793,10 @@
#define MSR_AMD64_LBR_SELECT 0xc000010e
/* Zen4 */
-#define MSR_ZEN4_BP_CFG 0xc001102e
+#define MSR_ZEN4_BP_CFG 0xc001102e
#define MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT 4
#define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5
+#define MSR_ZEN2_BP_CFG_BUG_FIX_BIT 33
/* Fam 19h MSRs */
#define MSR_F19H_UMC_PERF_CTL 0xc0010800
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 99bdc8a6d26e..704c687ed3ad 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -104,12 +104,18 @@ else
endif
endif
+ifeq ($(findstring -static,${LDFLAGS}),-static)
+ PKG_CONFIG += --static
+endif
+
all: $(FILES)
__BUILD = $(CC) $(CFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.c,$(@F)) $(LDFLAGS)
BUILD = $(__BUILD) > $(@:.bin=.make.output) 2>&1
BUILD_BFD = $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
- BUILD_ALL = $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -ldl -lz -llzma -lzstd -lssl
+ BUILD_ALL = $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -lslang \
+ $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -ldl -lz -llzma -lzstd \
+ $(shell $(PKG_CONFIG) --libs --cflags openssl 2>/dev/null)
__BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(@F)) $(LDFLAGS)
BUILDXX = $(__BUILDXX) > $(@:.bin=.make.output) 2>&1
@@ -388,7 +394,7 @@ $(OUTPUT)test-libpfm4.bin:
$(BUILD) -lpfm
$(OUTPUT)test-libopenssl.bin:
- $(BUILD) -lssl
+ $(BUILD) $(shell $(PKG_CONFIG) --libs --cflags openssl 2>/dev/null)
$(OUTPUT)test-bpftool-skeletons.bin:
$(SYSTEM_BPFTOOL) version | grep '^features:.*skeletons' \
diff --git a/tools/hv/Makefile b/tools/hv/Makefile
index 34ffcec264ab..016753f3dd7f 100644
--- a/tools/hv/Makefile
+++ b/tools/hv/Makefile
@@ -2,7 +2,7 @@
# Makefile for Hyper-V tools
include ../scripts/Makefile.include
-ARCH := $(shell uname -m 2>/dev/null)
+ARCH ?= $(shell uname -m 2>/dev/null)
sbindir ?= /usr/sbin
libexecdir ?= /usr/libexec
sharedstatedir ?= /var/lib
@@ -20,7 +20,7 @@ override CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
override CFLAGS += -Wno-address-of-packed-member
ALL_TARGETS := hv_kvp_daemon hv_vss_daemon
-ifneq ($(ARCH), aarch64)
+ifneq ($(filter x86_64 x86,$(ARCH)),)
ALL_TARGETS += hv_fcopy_uio_daemon
endif
ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS))
diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h
index 028f3faf46e7..84b5954f66c3 100644
--- a/tools/include/linux/mm.h
+++ b/tools/include/linux/mm.h
@@ -17,6 +17,7 @@
#define __va(x) ((void *)((unsigned long)(x)))
#define __pa(x) ((unsigned long)(x))
+#define __pa_symbol(x) ((unsigned long)(x))
#define pfn_to_page(pfn) ((void *)((pfn) * PAGE_SIZE))
@@ -32,8 +33,6 @@ static inline phys_addr_t virt_to_phys(volatile void *address)
return (phys_addr_t)address;
}
-void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid);
-
static inline void totalram_pages_inc(void)
{
}
diff --git a/tools/include/uapi/linux/stddef.h b/tools/include/uapi/linux/stddef.h
index c53cde425406..457498259494 100644
--- a/tools/include/uapi/linux/stddef.h
+++ b/tools/include/uapi/linux/stddef.h
@@ -3,7 +3,6 @@
#define _LINUX_STDDEF_H
-
#ifndef __always_inline
#define __always_inline __inline__
#endif
@@ -36,6 +35,11 @@
struct __struct_group_tag(TAG) { MEMBERS } ATTRS NAME; \
} ATTRS
+#ifdef __cplusplus
+/* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */
+#define __DECLARE_FLEX_ARRAY(T, member) \
+ T member[0]
+#else
/**
* __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
*
@@ -52,3 +56,23 @@
TYPE NAME[]; \
}
#endif
+
+#ifndef __counted_by
+#define __counted_by(m)
+#endif
+
+#ifndef __counted_by_le
+#define __counted_by_le(m)
+#endif
+
+#ifndef __counted_by_be
+#define __counted_by_be(m)
+#endif
+
+#ifndef __counted_by_ptr
+#define __counted_by_ptr(m)
+#endif
+
+#define __kernel_nonstring
+
+#endif /* _LINUX_STDDEF_H */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 8b0c3246097f..3a80a018fc7d 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -5852,11 +5852,12 @@ static int load_module_btfs(struct bpf_object *obj)
info.name = ptr_to_u64(name);
info.name_len = sizeof(name);
+ btf = NULL;
err = bpf_btf_get_info_by_fd(fd, &info, &len);
if (err) {
err = -errno;
pr_warn("failed to get BTF object #%d info: %s\n", id, errstr(err));
- goto err_out;
+ break;
}
/* ignore non-module BTFs */
@@ -5870,15 +5871,15 @@ static int load_module_btfs(struct bpf_object *obj)
if (err) {
pr_warn("failed to load module [%s]'s BTF object #%d: %s\n",
name, id, errstr(err));
- goto err_out;
+ break;
}
err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
if (err)
- goto err_out;
+ break;
- mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
+ mod_btf = &obj->btf_modules[obj->btf_module_cnt];
mod_btf->btf = btf;
mod_btf->id = id;
@@ -5886,16 +5887,16 @@ static int load_module_btfs(struct bpf_object *obj)
mod_btf->name = strdup(name);
if (!mod_btf->name) {
err = -ENOMEM;
- goto err_out;
+ break;
}
- continue;
+ obj->btf_module_cnt++;
+ }
-err_out:
+ if (err) {
+ btf__free(btf);
close(fd);
- return err;
}
-
- return 0;
+ return err;
}
static struct bpf_core_cand_list *
diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c
index 4160e7d2e120..e51b0490ad57 100644
--- a/tools/lib/perf/cpumap.c
+++ b/tools/lib/perf/cpumap.c
@@ -15,12 +15,12 @@
#define MAX_NR_CPUS 4096
-void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
+void perf_cpu_map__set_nr(struct perf_cpu_map *map, unsigned int nr_cpus)
{
RC_CHK_ACCESS(map)->nr = nr_cpus;
}
-struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
+struct perf_cpu_map *perf_cpu_map__alloc(unsigned int nr_cpus)
{
RC_STRUCT(perf_cpu_map) *cpus;
struct perf_cpu_map *result;
@@ -78,7 +78,7 @@ void perf_cpu_map__put(struct perf_cpu_map *map)
static struct perf_cpu_map *cpu_map__new_sysconf(void)
{
struct perf_cpu_map *cpus;
- int nr_cpus, nr_cpus_conf;
+ long nr_cpus, nr_cpus_conf;
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
if (nr_cpus < 0)
@@ -86,15 +86,13 @@ static struct perf_cpu_map *cpu_map__new_sysconf(void)
nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF);
if (nr_cpus != nr_cpus_conf) {
- pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.",
+ pr_warning("Number of online CPUs (%ld) differs from the number configured (%ld) the CPU map will only cover the first %ld CPUs.",
nr_cpus, nr_cpus_conf, nr_cpus);
}
cpus = perf_cpu_map__alloc(nr_cpus);
if (cpus != NULL) {
- int i;
-
- for (i = 0; i < nr_cpus; ++i)
+ for (long i = 0; i < nr_cpus; ++i)
RC_CHK_ACCESS(cpus)->map[i].cpu = i;
}
@@ -132,23 +130,23 @@ static int cmp_cpu(const void *a, const void *b)
return cpu_a->cpu - cpu_b->cpu;
}
-static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
+static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, unsigned int idx)
{
return RC_CHK_ACCESS(cpus)->map[idx];
}
-static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
+static struct perf_cpu_map *cpu_map__trim_new(unsigned int nr_cpus, const struct perf_cpu *tmp_cpus)
{
size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus);
- int i, j;
if (cpus != NULL) {
+ unsigned int j = 0;
+
memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size);
qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
/* Remove dups */
- j = 0;
- for (i = 0; i < nr_cpus; i++) {
+ for (unsigned int i = 0; i < nr_cpus; i++) {
if (i == 0 ||
__perf_cpu_map__cpu(cpus, i).cpu !=
__perf_cpu_map__cpu(cpus, i - 1).cpu) {
@@ -167,9 +165,8 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
struct perf_cpu_map *cpus = NULL;
unsigned long start_cpu, end_cpu = 0;
char *p = NULL;
- int i, nr_cpus = 0;
+ unsigned int nr_cpus = 0, max_entries = 0;
struct perf_cpu *tmp_cpus = NULL, *tmp;
- int max_entries = 0;
if (!cpu_list)
return perf_cpu_map__new_online_cpus();
@@ -208,9 +205,10 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
for (; start_cpu <= end_cpu; start_cpu++) {
/* check for duplicates */
- for (i = 0; i < nr_cpus; i++)
+ for (unsigned int i = 0; i < nr_cpus; i++) {
if (tmp_cpus[i].cpu == (int16_t)start_cpu)
goto invalid;
+ }
if (nr_cpus == max_entries) {
max_entries += max(end_cpu - start_cpu + 1, 16UL);
@@ -252,12 +250,12 @@ struct perf_cpu_map *perf_cpu_map__new_int(int cpu)
return cpus;
}
-static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
+static unsigned int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
{
return RC_CHK_ACCESS(cpus)->nr;
}
-struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
+struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, unsigned int idx)
{
struct perf_cpu result = {
.cpu = -1
@@ -269,7 +267,7 @@ struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
return result;
}
-int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
+unsigned int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
{
return cpus ? __perf_cpu_map__nr(cpus) : 1;
}
@@ -294,7 +292,7 @@ bool perf_cpu_map__is_empty(const struct perf_cpu_map *map)
int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
{
- int low, high;
+ unsigned int low, high;
if (!cpus)
return -1;
@@ -324,7 +322,7 @@ bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_map *rhs)
{
- int nr;
+ unsigned int nr;
if (lhs == rhs)
return true;
@@ -336,7 +334,7 @@ bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_m
if (nr != __perf_cpu_map__nr(rhs))
return false;
- for (int idx = 0; idx < nr; idx++) {
+ for (unsigned int idx = 0; idx < nr; idx++) {
if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu)
return false;
}
@@ -353,7 +351,7 @@ struct perf_cpu perf_cpu_map__min(const struct perf_cpu_map *map)
struct perf_cpu cpu, result = {
.cpu = -1
};
- int idx;
+ unsigned int idx;
perf_cpu_map__for_each_cpu_skip_any(cpu, idx, map) {
result = cpu;
@@ -384,7 +382,7 @@ bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu
if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a))
return false;
- for (int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
+ for (unsigned int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu)
return false;
if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
@@ -410,8 +408,7 @@ bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu
int perf_cpu_map__merge(struct perf_cpu_map **orig, struct perf_cpu_map *other)
{
struct perf_cpu *tmp_cpus;
- int tmp_len;
- int i, j, k;
+ unsigned int tmp_len, i, j, k;
struct perf_cpu_map *merged;
if (perf_cpu_map__is_subset(*orig, other))
@@ -455,7 +452,7 @@ int perf_cpu_map__merge(struct perf_cpu_map **orig, struct perf_cpu_map *other)
struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
struct perf_cpu_map *other)
{
- int i, j, k;
+ unsigned int i, j, k;
struct perf_cpu_map *merged;
if (perf_cpu_map__is_subset(other, orig))
diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c
index 13a307fc75ae..f747c0bc692d 100644
--- a/tools/lib/perf/evsel.c
+++ b/tools/lib/perf/evsel.c
@@ -127,7 +127,8 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads)
{
struct perf_cpu cpu;
- int idx, thread, err = 0;
+ unsigned int idx;
+ int thread, err = 0;
if (cpus == NULL) {
static struct perf_cpu_map *empty_cpu_map;
@@ -460,7 +461,7 @@ int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread)
{
struct perf_cpu cpu __maybe_unused;
- int idx;
+ unsigned int idx;
int err;
perf_cpu_map__for_each_cpu(cpu, idx, evsel->cpus) {
@@ -499,12 +500,13 @@ int perf_evsel__disable(struct perf_evsel *evsel)
int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
{
- int err = 0, i;
+ int err = 0;
- for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++)
+ for (unsigned int i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++) {
err = perf_evsel__run_ioctl(evsel,
PERF_EVENT_IOC_SET_FILTER,
(void *)filter, i);
+ }
return err;
}
diff --git a/tools/lib/perf/include/internal/cpumap.h b/tools/lib/perf/include/internal/cpumap.h
index e2be2d17c32b..c19678188b17 100644
--- a/tools/lib/perf/include/internal/cpumap.h
+++ b/tools/lib/perf/include/internal/cpumap.h
@@ -16,16 +16,16 @@
DECLARE_RC_STRUCT(perf_cpu_map) {
refcount_t refcnt;
/** Length of the map array. */
- int nr;
+ unsigned int nr;
/** The CPU values. */
struct perf_cpu map[];
};
-struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus);
+struct perf_cpu_map *perf_cpu_map__alloc(unsigned int nr_cpus);
int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu);
bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b);
-void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus);
+void perf_cpu_map__set_nr(struct perf_cpu_map *map, unsigned int nr_cpus);
static inline refcount_t *perf_cpu_map__refcnt(struct perf_cpu_map *map)
{
diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h
index 58cc5c5fa47c..a1dd25db65b6 100644
--- a/tools/lib/perf/include/perf/cpumap.h
+++ b/tools/lib/perf/include/perf/cpumap.h
@@ -49,7 +49,7 @@ LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
* perf_cpu_map__cpu - get the CPU value at the given index. Returns -1 if index
* is invalid.
*/
-LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
+LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, unsigned int idx);
/**
* perf_cpu_map__nr - for an empty map returns 1, as perf_cpu_map__cpu returns a
* cpu of -1 for an invalid index, this makes an empty map
@@ -57,7 +57,7 @@ LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, i
* the result is the number CPUs in the map plus one if the
* "any CPU"/dummy value is present.
*/
-LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
+LIBPERF_API unsigned int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
/**
* perf_cpu_map__has_any_cpu_or_is_empty - is map either empty or has the "any CPU"/dummy value.
*/
diff --git a/tools/lib/python/kdoc/kdoc_parser.py b/tools/lib/python/kdoc/kdoc_parser.py
index 74af7ae47aa4..c3f966da533e 100644
--- a/tools/lib/python/kdoc/kdoc_parser.py
+++ b/tools/lib/python/kdoc/kdoc_parser.py
@@ -439,6 +439,11 @@ class KernelDoc:
# Ignore argument attributes
arg = KernRe(r'\sPOS0?\s').sub(' ', arg)
+ # Replace '[at_least ' with '[static '. This allows sphinx to parse
+ # array parameter declarations like 'char A[at_least 4]', where
+ # 'at_least' is #defined to 'static' by the kernel headers.
+ arg = arg.replace('[at_least ', '[static ')
+
# Strip leading/trailing spaces
arg = arg.strip()
arg = KernRe(r'\s+').sub(' ', arg, count=1)
diff --git a/tools/net/ynl/Makefile.deps b/tools/net/ynl/Makefile.deps
index 08205f9fc525..cc53b2f21c44 100644
--- a/tools/net/ynl/Makefile.deps
+++ b/tools/net/ynl/Makefile.deps
@@ -15,9 +15,11 @@ UAPI_PATH:=../../../../include/uapi/
get_hdr_inc=-D$(1) -include $(UAPI_PATH)/linux/$(2)
get_hdr_inc2=-D$(1) -D$(2) -include $(UAPI_PATH)/linux/$(3)
+CFLAGS_dev-energymodel:=$(call get_hdr_inc,_LINUX_DEV_ENERGYMODEL_H,dev_energymodel.h)
CFLAGS_devlink:=$(call get_hdr_inc,_LINUX_DEVLINK_H_,devlink.h)
CFLAGS_dpll:=$(call get_hdr_inc,_LINUX_DPLL_H,dpll.h)
-CFLAGS_ethtool:=$(call get_hdr_inc,_LINUX_ETHTOOL_H,ethtool.h) \
+CFLAGS_ethtool:=$(call get_hdr_inc,_LINUX_TYPELIMITS_H,typelimits.h) \
+ $(call get_hdr_inc,_LINUX_ETHTOOL_H,ethtool.h) \
$(call get_hdr_inc,_LINUX_ETHTOOL_NETLINK_H_,ethtool_netlink.h) \
$(call get_hdr_inc,_LINUX_ETHTOOL_NETLINK_GENERATED_H,ethtool_netlink_generated.h)
CFLAGS_handshake:=$(call get_hdr_inc,_LINUX_HANDSHAKE_H,handshake.h)
diff --git a/tools/net/ynl/pyynl/ynl_gen_c.py b/tools/net/ynl/pyynl/ynl_gen_c.py
index 0e1e486c1185..cdc3646f2642 100755
--- a/tools/net/ynl/pyynl/ynl_gen_c.py
+++ b/tools/net/ynl/pyynl/ynl_gen_c.py
@@ -3212,6 +3212,8 @@ def render_uapi(family, cw):
for const in family['definitions']:
if const.get('header'):
continue
+ if const.get('scope', 'uapi') != 'uapi':
+ continue
if const['type'] != 'const':
cw.writes_defines(defines)
@@ -3339,6 +3341,25 @@ def render_uapi(family, cw):
cw.p(f'#endif /* {hdr_prot} */')
+def render_scoped_consts(family, cw, scope):
+ defines = []
+ for const in family['definitions']:
+ if const['type'] != 'const':
+ continue
+ if const.get('header'):
+ continue
+ if const.get('scope') != scope:
+ continue
+ name_pfx = const.get('name-prefix', f"{family.ident_name}-")
+ defines.append([
+ c_upper(family.get('c-define-name',
+ f"{name_pfx}{const['name']}")),
+ const['value']])
+ if defines:
+ cw.writes_defines(defines)
+ cw.nl()
+
+
def _render_user_ntf_entry(ri, op):
if not ri.family.is_classic():
ri.cw.block_start(line=f"[{op.enum_name}] = ")
@@ -3504,8 +3525,12 @@ def main():
cw.p('#include "ynl.h"')
headers = []
for definition in parsed['definitions'] + parsed['attribute-sets']:
- if 'header' in definition:
- headers.append(definition['header'])
+ if 'header' not in definition:
+ continue
+ scope = definition.get('scope', 'uapi')
+ if scope != 'uapi' and scope != args.mode:
+ continue
+ headers.append(definition['header'])
if args.mode == 'user':
headers.append(parsed.uapi_header)
seen_header = []
@@ -3522,6 +3547,7 @@ def main():
for one in args.user_header:
cw.p(f'#include "{one}"')
else:
+ render_scoped_consts(parsed, cw, 'user')
cw.p('struct ynl_sock;')
cw.nl()
render_user_family(parsed, cw, True)
@@ -3529,6 +3555,7 @@ def main():
if args.mode == "kernel":
if args.header:
+ render_scoped_consts(parsed, cw, 'kernel')
for _, struct in sorted(parsed.pure_nested_structs.items()):
if struct.request:
cw.p('/* Common nested types */')
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
index 547f1a268018..a688738809c4 100644
--- a/tools/perf/Documentation/perf-annotate.txt
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -110,8 +110,11 @@ include::itrace.txt[]
Interleave source code with assembly code. Enabled by default,
disable with --no-source.
---symfs=<directory>::
- Look for files with symbols relative to this directory.
+--symfs=<directory[,layout]>::
+ Look for files with symbols relative to this directory. The optional
+ layout can be 'hierarchy' (default, matches full path) or 'flat'
+ (only matches base name). This is useful when debug files are stored
+ in a flat directory structure.
-M::
--disassembler-style=:: Set disassembler style for objdump.
diff --git a/tools/perf/Documentation/perf-bench.txt b/tools/perf/Documentation/perf-bench.txt
index 1160224cb718..c5913cf59c98 100644
--- a/tools/perf/Documentation/perf-bench.txt
+++ b/tools/perf/Documentation/perf-bench.txt
@@ -274,6 +274,10 @@ Repeat mmap() invocation this number of times.
--cycles::
Use perf's cpu-cycles event instead of gettimeofday syscall.
+-t::
+--threads=<NUM>::
+Create multiple threads to call mmap/munmap concurrently.
+
SUITES FOR 'numa'
~~~~~~~~~~~~~~~~~
*mem*::
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index 642d1c490d9e..9b223f892829 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -210,6 +210,12 @@ core.*::
Sets a timeout (in milliseconds) for parsing /proc/<pid>/maps files.
Can be overridden by the --proc-map-timeout option on supported
subcommands. The default timeout is 500ms.
+ addr2line-disable-warn::
+ When set to 'true' disable all warnings from 'addr2line' output.
+ Default setting is 'false' to show these warnings.
+ addr2line-timeout::
+ Sets a timeout (in milliseconds) for parsing 'addr2line'
+ output. The default timeout is 5s.
tui.*, gtk.*::
Subcommands that can be configured here are 'top', 'report' and 'annotate'.
diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
index 58efab72d2e5..8e4a3f093135 100644
--- a/tools/perf/Documentation/perf-diff.txt
+++ b/tools/perf/Documentation/perf-diff.txt
@@ -81,8 +81,11 @@ OPTIONS
--force::
Don't do ownership validation.
---symfs=<directory>::
- Look for files with symbols relative to this directory.
+--symfs=<directory[,layout]>::
+ Look for files with symbols relative to this directory. The optional
+ layout can be 'hierarchy' (default, matches full path) or 'flat'
+ (only matches base name). This is useful when debug files are stored
+ in a flat directory structure.
-b::
--baseline-only::
diff --git a/tools/perf/Documentation/perf-kwork.txt b/tools/perf/Documentation/perf-kwork.txt
index 21e607669d78..5c33a1fb2ffe 100644
--- a/tools/perf/Documentation/perf-kwork.txt
+++ b/tools/perf/Documentation/perf-kwork.txt
@@ -169,8 +169,11 @@ OPTIONS for 'perf kwork timehist'
--max-stack::
Maximum number of functions to display in backtrace, default 5.
---symfs=<directory>::
- Look for files with symbols relative to this directory.
+--symfs=<directory[,layout]>::
+ Look for files with symbols relative to this directory. The optional
+ layout can be 'hierarchy' (default, matches full path) or 'flat'
+ (only matches base name). This is useful when debug files are stored
+ in a flat directory structure.
--time::
Only analyze samples within given time window: <start>,<stop>. Times
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index 5c43a6edc0e5..2e5790325430 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -50,6 +50,12 @@ OPTIONS
--source=PATH::
Specify path to kernel source.
+--symfs=<directory[,layout]>::
+ Look for files with symbols relative to this directory. The optional
+ layout can be 'hierarchy' (default, matches full path) or 'flat'
+ (only matches base name). This is useful when debug files are stored
+ in a flat directory structure.
+
-v::
--verbose::
Be more verbose (show parsed arguments, etc).
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index acef3ff4178e..22f87eaa3279 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -88,7 +88,7 @@ OPTIONS
Sort histogram entries by given key(s) - multiple keys can be specified
in CSV format. Following sort keys are available:
pid, comm, dso, symbol, parent, cpu, socket, srcline, weight,
- local_weight, cgroup_id, addr.
+ local_weight, cgroup_id, addr, comm_nodigit.
Each key has following meaning:
@@ -136,13 +136,17 @@ OPTIONS
- addr: (Full) virtual address of the sampled instruction
- retire_lat: On X86, this reports pipeline stall of this instruction compared
to the previous instruction in cycles. And currently supported only on X86
- - simd: Flags describing a SIMD operation. "e" for empty Arm SVE predicate. "p" for partial Arm SVE predicate
+ - simd: Flags describing a SIMD operation. The architecture type can be Arm's
+ ASE (Advanced SIMD extension), SVE, SME. It provides an extra tag for
+ predicate: "e" for empty predicate, "p" for partial predicate, "d" for
+ predicate disabled, and "f" for full predicate.
- type: Data type of sample memory access.
- typeoff: Offset in the data type of sample memory access.
- symoff: Offset in the symbol.
- weight1: Average value of event specific weight (1st field of weight_struct).
- weight2: Average value of event specific weight (2nd field of weight_struct).
- weight3: Average value of event specific weight (3rd field of weight_struct).
+ - comm_nodigit: same as comm, with numbers replaced by "<N>"
By default, overhead, comm, dso and symbol keys are used.
(i.e. --sort overhead,comm,dso,symbol).
@@ -368,8 +372,11 @@ OPTIONS
--force::
Don't do ownership validation.
---symfs=<directory>::
- Look for files with symbols relative to this directory.
+--symfs=<directory[,layout]>::
+ Look for files with symbols relative to this directory. The optional
+ layout can be 'hierarchy' (default, matches full path) or 'flat'
+ (only matches base name). This is useful when debug files are stored
+ in a flat directory structure.
-C::
--cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt
index 4d9981609c04..a4221398e5e0 100644
--- a/tools/perf/Documentation/perf-sched.txt
+++ b/tools/perf/Documentation/perf-sched.txt
@@ -437,8 +437,11 @@ OPTIONS for 'perf sched timehist'
Show all scheduling events followed by a summary by thread with min,
max, and average run times (in sec) and relative stddev.
---symfs=<directory>::
- Look for files with symbols relative to this directory.
+--symfs=<directory[,layout]>::
+ Look for files with symbols relative to this directory. The optional
+ layout can be 'hierarchy' (default, matches full path) or 'flat'
+ (only matches base name). This is useful when debug files are stored
+ in a flat directory structure.
-V::
--cpu-visual::
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index ddf92f9c7821..200ea25891d8 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -307,8 +307,11 @@ OPTIONS
--kallsyms=<file>::
kallsyms pathname
---symfs=<directory>::
- Look for files with symbols relative to this directory.
+--symfs=<directory[,layout]>::
+ Look for files with symbols relative to this directory. The optional
+ layout can be 'hierarchy' (default, matches full path) or 'flat'
+ (only matches base name). This is useful when debug files are stored
+ in a flat directory structure.
-G::
--hide-call-graph::
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 7cccc3a847d1..b72a29c9223c 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -578,6 +578,10 @@ $ perf config stat.no-csv-summary=true
Only enable events on applying cpu with this type for hybrid platform
(e.g. core or atom)"
+--pmu-filter::
+Only enable events on applying pmu with specified for multiple
+pmus with same type (e.g. hisi_sicl2_cpa0 or hisi_sicl0_cpa0)
+
EXAMPLES
--------
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt
index ef2281c56743..bacc5df3c400 100644
--- a/tools/perf/Documentation/perf-timechart.txt
+++ b/tools/perf/Documentation/perf-timechart.txt
@@ -53,8 +53,11 @@ TIMECHART OPTIONS
-f::
--force::
Don't complain, do it.
---symfs=<directory>::
- Look for files with symbols relative to this directory.
+--symfs=<directory[,layout]>::
+ Look for files with symbols relative to this directory. The optional
+ layout can be 'hierarchy' (default, matches full path) or 'flat'
+ (only matches base name). This is useful when debug files are stored
+ in a flat directory structure.
-n::
--proc-num::
Print task info for at least given number of tasks.
diff --git a/tools/perf/Documentation/tips.txt b/tools/perf/Documentation/tips.txt
index 3fee9b2a88ea..ebf12a8c5db5 100644
--- a/tools/perf/Documentation/tips.txt
+++ b/tools/perf/Documentation/tips.txt
@@ -11,7 +11,7 @@ Search options using a keyword: perf report -h <keyword>
Use parent filter to see specific call path: perf report -p <regex>
List events using substring match: perf list <keyword>
To see list of saved events and attributes: perf evlist -v
-Use --symfs <dir> if your symbol files are in non-standard locations
+Use --symfs <dir>[,layout] if your symbol files are in non-standard locations.
To see callchains in a more compact form: perf report -g folded
To see call chains by final symbol taking CPU time (bottom up) use perf report -G
Show individual samples with: perf script
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 15fbba9f4ca8..333ddd0e4bd8 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -349,6 +349,7 @@ CORE_CFLAGS += -fno-omit-frame-pointer
CORE_CFLAGS += -Wall
CORE_CFLAGS += -Wextra
CORE_CFLAGS += -std=gnu11
+CORE_CFLAGS += -funsigned-char
CXXFLAGS += -std=gnu++17 -fno-exceptions -fno-rtti
CXXFLAGS += -Wall
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index f7b936deeaa2..cee19c923c06 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -547,7 +547,7 @@ $(clone_flags_array): $(beauty_uapi_linux_dir)/sched.h $(clone_flags_tbl)
$(Q)$(SHELL) '$(clone_flags_tbl)' $(beauty_uapi_linux_dir) > $@
drm_ioctl_array := $(beauty_ioctl_outdir)/drm_ioctl_array.c
-drm_hdr_dir := $(srctree)/tools/include/uapi/drm
+drm_hdr_dir := $(srctree)/tools/perf/trace/beauty/include/uapi/drm
drm_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/drm_ioctl.sh
$(drm_ioctl_array): $(drm_hdr_dir)/drm.h $(drm_hdr_dir)/i915_drm.h $(drm_ioctl_tbl)
@@ -556,8 +556,8 @@ $(drm_ioctl_array): $(drm_hdr_dir)/drm.h $(drm_hdr_dir)/i915_drm.h $(drm_ioctl_t
fadvise_advice_array := $(beauty_outdir)/fadvise_advice_array.c
fadvise_advice_tbl := $(srctree)/tools/perf/trace/beauty/fadvise.sh
-$(fadvise_advice_array): $(linux_uapi_dir)/in.h $(fadvise_advice_tbl)
- $(Q)$(SHELL) '$(fadvise_advice_tbl)' $(linux_uapi_dir) > $@
+$(fadvise_advice_array): $(beauty_uapi_linux_dir)/fadvise.h $(fadvise_advice_tbl)
+ $(Q)$(SHELL) '$(fadvise_advice_tbl)' $(beauty_uapi_linux_dir) > $@
fsmount_arrays := $(beauty_outdir)/fsmount_arrays.c
fsmount_tbls := $(srctree)/tools/perf/trace/beauty/fsmount.sh
diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c
index eb6404267f17..27bb14c8b880 100644
--- a/tools/perf/arch/arm/util/auxtrace.c
+++ b/tools/perf/arch/arm/util/auxtrace.c
@@ -8,7 +8,7 @@
#include <errno.h>
#include <stdbool.h>
#include <linux/coresight-pmu.h>
-#include <linux/zalloc.h>
+#include <stdlib.h>
#include <api/fs/fs.h>
#include "../../../util/auxtrace.h"
@@ -27,7 +27,7 @@ static struct perf_pmu **find_all_arm_spe_pmus(int *nr_spes, int *err)
/* arm_spe_xxxxxxxxx\0 */
char arm_spe_pmu_name[sizeof(ARM_SPE_PMU_NAME) + 10];
- arm_spe_pmus = zalloc(sizeof(struct perf_pmu *) * nr_cpus);
+ arm_spe_pmus = calloc(nr_cpus, sizeof(struct perf_pmu *));
if (!arm_spe_pmus) {
pr_err("spes alloc failed\n");
*err = -ENOMEM;
@@ -79,7 +79,7 @@ static struct perf_pmu **find_all_hisi_ptt_pmus(int *nr_ptts, int *err)
if (!(*nr_ptts))
goto out;
- hisi_ptt_pmus = zalloc(sizeof(struct perf_pmu *) * (*nr_ptts));
+ hisi_ptt_pmus = calloc((*nr_ptts), sizeof(struct perf_pmu *));
if (!hisi_ptt_pmus) {
pr_err("hisi_ptt alloc failed\n");
*err = -ENOMEM;
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 4418d21708d6..b7a839de8707 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -197,7 +197,8 @@ static struct perf_pmu *cs_etm_get_pmu(struct auxtrace_record *itr)
static int cs_etm_validate_config(struct perf_pmu *cs_etm_pmu,
struct evsel *evsel)
{
- int idx, err = 0;
+ unsigned int idx;
+ int err = 0;
struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus;
struct perf_cpu_map *intersect_cpus;
struct perf_cpu cpu;
@@ -546,7 +547,7 @@ static size_t
cs_etm_info_priv_size(struct auxtrace_record *itr,
struct evlist *evlist)
{
- int idx;
+ unsigned int idx;
int etmv3 = 0, etmv4 = 0, ete = 0;
struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus;
struct perf_cpu_map *intersect_cpus;
@@ -783,7 +784,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
struct perf_record_auxtrace_info *info,
size_t priv_size)
{
- int i;
+ unsigned int i;
u32 offset;
u64 nr_cpu, type;
struct perf_cpu_map *cpu_map;
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
index 17ced7bbbdda..f00d72d087fc 100644
--- a/tools/perf/arch/arm64/util/arm-spe.c
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -144,7 +144,8 @@ static int arm_spe_info_fill(struct auxtrace_record *itr,
struct perf_record_auxtrace_info *auxtrace_info,
size_t priv_size)
{
- int i, ret;
+ unsigned int i;
+ int ret;
size_t offset;
struct arm_spe_recording *sper =
container_of(itr, struct arm_spe_recording, itr);
diff --git a/tools/perf/arch/arm64/util/header.c b/tools/perf/arch/arm64/util/header.c
index cbc0ba101636..95e71c4f6c78 100644
--- a/tools/perf/arch/arm64/util/header.c
+++ b/tools/perf/arch/arm64/util/header.c
@@ -43,7 +43,7 @@ static int _get_cpuid(char *buf, size_t sz, struct perf_cpu cpu)
int get_cpuid(char *buf, size_t sz, struct perf_cpu cpu)
{
struct perf_cpu_map *cpus;
- int idx;
+ unsigned int idx;
if (cpu.cpu != -1)
return _get_cpuid(buf, sz, cpu);
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index 4908d54dd33b..21836f70f231 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -9,14 +9,14 @@
#include "../util/debug.h"
#include <linux/zalloc.h>
-const char *const arc_triplets[] = {
+static const char *const arc_triplets[] = {
"arc-linux-",
"arc-snps-linux-uclibc-",
"arc-snps-linux-gnu-",
NULL
};
-const char *const arm_triplets[] = {
+static const char *const arm_triplets[] = {
"arm-eabi-",
"arm-linux-androideabi-",
"arm-unknown-linux-",
@@ -28,13 +28,13 @@ const char *const arm_triplets[] = {
NULL
};
-const char *const arm64_triplets[] = {
+static const char *const arm64_triplets[] = {
"aarch64-linux-android-",
"aarch64-linux-gnu-",
NULL
};
-const char *const powerpc_triplets[] = {
+static const char *const powerpc_triplets[] = {
"powerpc-unknown-linux-gnu-",
"powerpc-linux-gnu-",
"powerpc64-unknown-linux-gnu-",
@@ -43,40 +43,40 @@ const char *const powerpc_triplets[] = {
NULL
};
-const char *const riscv32_triplets[] = {
+static const char *const riscv32_triplets[] = {
"riscv32-unknown-linux-gnu-",
"riscv32-linux-android-",
"riscv32-linux-gnu-",
NULL
};
-const char *const riscv64_triplets[] = {
+static const char *const riscv64_triplets[] = {
"riscv64-unknown-linux-gnu-",
"riscv64-linux-android-",
"riscv64-linux-gnu-",
NULL
};
-const char *const s390_triplets[] = {
+static const char *const s390_triplets[] = {
"s390-ibm-linux-",
"s390x-linux-gnu-",
NULL
};
-const char *const sh_triplets[] = {
+static const char *const sh_triplets[] = {
"sh-unknown-linux-gnu-",
"sh-linux-gnu-",
NULL
};
-const char *const sparc_triplets[] = {
+static const char *const sparc_triplets[] = {
"sparc-unknown-linux-gnu-",
"sparc64-unknown-linux-gnu-",
"sparc64-linux-gnu-",
NULL
};
-const char *const x86_triplets[] = {
+static const char *const x86_triplets[] = {
"x86_64-pc-linux-gnu-",
"x86_64-unknown-linux-gnu-",
"i686-pc-linux-gnu-",
@@ -90,7 +90,7 @@ const char *const x86_triplets[] = {
NULL
};
-const char *const mips_triplets[] = {
+static const char *const mips_triplets[] = {
"mips-unknown-linux-gnu-",
"mipsel-linux-android-",
"mips-linux-gnu-",
diff --git a/tools/perf/arch/loongarch/util/Build b/tools/perf/arch/loongarch/util/Build
index 3ad73d0289f3..8d91e78d31c9 100644
--- a/tools/perf/arch/loongarch/util/Build
+++ b/tools/perf/arch/loongarch/util/Build
@@ -1,4 +1,3 @@
perf-util-y += header.o
perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
-perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/arch/powerpc/util/auxtrace.c b/tools/perf/arch/powerpc/util/auxtrace.c
index 292ea335e4ff..e39deff6c857 100644
--- a/tools/perf/arch/powerpc/util/auxtrace.c
+++ b/tools/perf/arch/powerpc/util/auxtrace.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/zalloc.h>
#include "../../util/evlist.h"
#include "../../util/debug.h"
diff --git a/tools/perf/arch/sh/include/dwarf-regs-table.h b/tools/perf/arch/sh/include/dwarf-regs-table.h
index 900e69619970..b5974a090fb4 100644
--- a/tools/perf/arch/sh/include/dwarf-regs-table.h
+++ b/tools/perf/arch/sh/include/dwarf-regs-table.h
@@ -2,7 +2,7 @@
#ifdef DEFINE_DWARF_REGSTR_TABLE
/* This is included in perf/util/dwarf-regs.c */
-const char * const sh_regstr_tbl[] = {
+static const char * const sh_regstr_tbl[] = {
"r0",
"r1",
"r2",
diff --git a/tools/perf/arch/x86/tests/amd-ibs-period.c b/tools/perf/arch/x86/tests/amd-ibs-period.c
index 223e059e04de..cee9e11c05e0 100644
--- a/tools/perf/arch/x86/tests/amd-ibs-period.c
+++ b/tools/perf/arch/x86/tests/amd-ibs-period.c
@@ -8,7 +8,6 @@
#include "arch-tests.h"
#include "linux/perf_event.h"
-#include "linux/zalloc.h"
#include "tests/tests.h"
#include "../perf-sys.h"
#include "pmu.h"
@@ -60,7 +59,7 @@ static int dummy_workload_1(unsigned long count)
0xcc, /* int 3 */
};
- p = zalloc(2 * page_size);
+ p = calloc(2, page_size);
if (!p) {
printf("malloc() failed. %m");
return 1;
diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c
index e91a73d09cec..99d2b7ed016f 100644
--- a/tools/perf/arch/x86/tests/dwarf-unwind.c
+++ b/tools/perf/arch/x86/tests/dwarf-unwind.c
@@ -54,22 +54,13 @@ int test__arch_unwind_sample(struct perf_sample *sample,
struct thread *thread)
{
struct regs_dump *regs = perf_sample__user_regs(sample);
- u64 *buf;
+ u64 *buf = calloc(PERF_REGS_MAX, sizeof(u64));
- buf = malloc(sizeof(u64) * PERF_REGS_MAX);
if (!buf) {
pr_debug("failed to allocate sample uregs data\n");
return -1;
}
-#ifdef MEMORY_SANITIZER
- /*
- * Assignments to buf in the assembly function perf_regs_load aren't
- * seen by memory sanitizer. Zero the memory to convince memory
- * sanitizer the memory is initialized.
- */
- memset(buf, 0, sizeof(u64) * PERF_REGS_MAX);
-#endif
perf_regs_load(buf);
regs->abi = PERF_SAMPLE_REGS_ABI;
regs->regs = buf;
diff --git a/tools/perf/arch/x86/util/pmu.c b/tools/perf/arch/x86/util/pmu.c
index a3f96221758d..7c9d238922a6 100644
--- a/tools/perf/arch/x86/util/pmu.c
+++ b/tools/perf/arch/x86/util/pmu.c
@@ -5,8 +5,8 @@
#include <dirent.h>
#include <fcntl.h>
#include <linux/stddef.h>
+#include <linux/string.h>
#include <linux/perf_event.h>
-#include <linux/zalloc.h>
#include <api/fs/fs.h>
#include <api/io_dir.h>
#include <internal/cpumap.h>
@@ -71,11 +71,6 @@ static int snc_nodes_per_l3_cache(void)
return snc_nodes;
}
-static bool starts_with(const char *str, const char *prefix)
-{
- return !strncmp(prefix, str, strlen(prefix));
-}
-
static int num_chas(void)
{
static bool checked_chas;
@@ -93,7 +88,7 @@ static int num_chas(void)
while ((dent = io_dir__readdir(&dir)) != NULL) {
/* Note, dent->d_type will be DT_LNK and so isn't a useful filter. */
- if (starts_with(dent->d_name, "uncore_cha_"))
+ if (strstarts(dent->d_name, "uncore_cha_"))
num_chas++;
}
close(fd);
@@ -225,7 +220,8 @@ static void gnr_uncore_cha_imc_adjust_cpumask_for_snc(struct perf_pmu *pmu, bool
static struct perf_cpu_map *cha_adjusted[MAX_SNCS];
static struct perf_cpu_map *imc_adjusted[MAX_SNCS];
struct perf_cpu_map **adjusted = cha ? cha_adjusted : imc_adjusted;
- int idx, pmu_snc, cpu_adjust;
+ unsigned int idx;
+ int pmu_snc, cpu_adjust;
struct perf_cpu cpu;
bool alloc;
@@ -305,9 +301,9 @@ void perf_pmu__arch_init(struct perf_pmu *pmu)
else
pmu->mem_events = perf_mem_events_intel;
} else if (x86__is_intel_graniterapids()) {
- if (starts_with(pmu->name, "uncore_cha_"))
+ if (strstarts(pmu->name, "uncore_cha_"))
gnr_uncore_cha_imc_adjust_cpumask_for_snc(pmu, /*cha=*/true);
- else if (starts_with(pmu->name, "uncore_imc_"))
+ else if (strstarts(pmu->name, "uncore_imc_"))
gnr_uncore_cha_imc_adjust_cpumask_for_snc(pmu, /*cha=*/false);
}
}
diff --git a/tools/perf/bench/breakpoint.c b/tools/perf/bench/breakpoint.c
index dfd18f5db97d..1b7cd4481bd2 100644
--- a/tools/perf/bench/breakpoint.c
+++ b/tools/perf/bench/breakpoint.c
@@ -16,7 +16,7 @@
#include "bench.h"
#include "futex.h"
-struct {
+static struct {
unsigned int nbreakpoints;
unsigned int nparallel;
unsigned int nthreads;
@@ -173,7 +173,7 @@ int bench_breakpoint_thread(int argc, const char **argv)
return 0;
}
-struct {
+static struct {
unsigned int npassive;
unsigned int nactive;
} enable_params = {
diff --git a/tools/perf/bench/mem-functions.c b/tools/perf/bench/mem-functions.c
index 2908a3a796c9..5ede52853953 100644
--- a/tools/perf/bench/mem-functions.c
+++ b/tools/perf/bench/mem-functions.c
@@ -7,13 +7,14 @@
* Written by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp>
*/
-#include "debug.h"
+#include "bench.h"
#include "../perf-sys.h"
#include <subcmd/parse-options.h>
-#include "../util/header.h"
-#include "../util/cloexec.h"
-#include "../util/string2.h"
-#include "bench.h"
+#include "util/cloexec.h"
+#include "util/debug.h"
+#include "util/header.h"
+#include "util/stat.h"
+#include "util/string2.h"
#include "mem-memcpy-arch.h"
#include "mem-memset-arch.h"
@@ -26,6 +27,7 @@
#include <errno.h>
#include <linux/time64.h>
#include <linux/log2.h>
+#include <pthread.h>
#define K 1024
@@ -41,6 +43,7 @@ static unsigned int nr_loops = 1;
static bool use_cycles;
static int cycles_fd;
static unsigned int seed;
+static unsigned int nr_threads = 1;
static const struct option bench_common_options[] = {
OPT_STRING('s', "size", &size_str, "1MB",
@@ -121,6 +124,8 @@ static struct perf_event_attr cycle_attr = {
.config = PERF_COUNT_HW_CPU_CYCLES
};
+static struct stats stats;
+
static int init_cycles(void)
{
cycles_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1, perf_event_open_cloexec_flag());
@@ -174,18 +179,18 @@ static void clock_accum(union bench_clock *a, union bench_clock *b)
static double timeval2double(struct timeval *ts)
{
- return (double)ts->tv_sec + (double)ts->tv_usec / (double)USEC_PER_SEC;
+ return ((double)ts->tv_sec + (double)ts->tv_usec / (double)USEC_PER_SEC) / nr_threads;
}
#define print_bps(x) do { \
if (x < K) \
- printf(" %14lf bytes/sec\n", x); \
+ printf(" %14lf bytes/sec", x); \
else if (x < K * K) \
- printf(" %14lfd KB/sec\n", x / K); \
+ printf(" %14lfd KB/sec", x / K); \
else if (x < K * K * K) \
- printf(" %14lf MB/sec\n", x / K / K); \
+ printf(" %14lf MB/sec", x / K / K); \
else \
- printf(" %14lf GB/sec\n", x / K / K / K); \
+ printf(" %14lf GB/sec", x / K / K / K); \
} while (0)
static void __bench_mem_function(struct bench_mem_info *info, struct bench_params *p,
@@ -196,6 +201,7 @@ static void __bench_mem_function(struct bench_mem_info *info, struct bench_param
union bench_clock rt = { 0 };
void *src = NULL, *dst = NULL;
+ init_stats(&stats);
printf("# function '%s' (%s)\n", r->name, r->desc);
if (r->fn.init && r->fn.init(info, p, &src, &dst))
@@ -210,11 +216,16 @@ static void __bench_mem_function(struct bench_mem_info *info, struct bench_param
switch (bench_format) {
case BENCH_FORMAT_DEFAULT:
if (use_cycles) {
- printf(" %14lf cycles/byte\n", (double)rt.cycles/(double)p->size_total);
+ printf(" %14lf cycles/byte", (double)rt.cycles/(double)p->size_total);
} else {
result_bps = (double)p->size_total/timeval2double(&rt.tv);
print_bps(result_bps);
}
+ if (nr_threads > 1) {
+ printf("/thread\t( +- %6.2f%% )",
+ rel_stddev_stats(stddev_stats(&stats), avg_stats(&stats)));
+ }
+ printf("\n");
break;
case BENCH_FORMAT_SIMPLE:
@@ -388,7 +399,7 @@ static void mem_free(struct bench_mem_info *info __maybe_unused,
*dst = *src = NULL;
}
-struct function memcpy_functions[] = {
+static struct function memcpy_functions[] = {
{ .name = "default",
.desc = "Default memcpy() provided by glibc",
.fn.init = mem_alloc,
@@ -494,16 +505,27 @@ static void mmap_page_touch(void *dst, size_t size, unsigned int page_shift, boo
}
}
-static int do_mmap(const struct function *r, struct bench_params *p,
- void *src __maybe_unused, void *dst __maybe_unused,
- union bench_clock *accum)
+struct mmap_data {
+ pthread_t id;
+ const struct function *func;
+ struct bench_params *params;
+ union bench_clock result;
+ unsigned int seed;
+ int error;
+};
+
+static void *do_mmap_thread(void *arg)
{
+ struct mmap_data *data = arg;
+ const struct function *r = data->func;
+ struct bench_params *p = data->params;
union bench_clock start, end, diff;
mmap_op_t fn = r->fn.mmap_op;
bool populate = strcmp(r->name, "populate") == 0;
+ void *dst;
- if (p->seed)
- srand(p->seed);
+ if (data->seed)
+ srand(data->seed);
for (unsigned int i = 0; i < p->nr_loops; i++) {
clock_get(&start);
@@ -514,16 +536,59 @@ static int do_mmap(const struct function *r, struct bench_params *p,
fn(dst, p->size, p->page_shift, p->seed);
clock_get(&end);
diff = clock_diff(&start, &end);
- clock_accum(accum, &diff);
+ clock_accum(&data->result, &diff);
bench_munmap(dst, p->size);
}
- return 0;
+ return data;
out:
- printf("# Memory allocation failed - maybe size (%s) %s?\n", size_str,
- p->page_shift != PAGE_SHIFT_4KB ? "has insufficient hugepages" : "is too large");
- return -1;
+ data->error = -ENOMEM;
+ return NULL;
+}
+
+static int do_mmap(const struct function *r, struct bench_params *p,
+ void *src __maybe_unused, void *dst __maybe_unused,
+ union bench_clock *accum)
+{
+ struct mmap_data *data;
+ int error = 0;
+
+ data = calloc(nr_threads, sizeof(*data));
+ if (!data) {
+ printf("# Failed to allocate thread resources\n");
+ return -1;
+ }
+
+ for (unsigned int i = 0; i < nr_threads; i++) {
+ data[i].func = r;
+ data[i].params = p;
+ if (p->seed)
+ data[i].seed = p->seed + i;
+
+ if (pthread_create(&data[i].id, NULL, do_mmap_thread, &data[i]) < 0)
+ data[i].error = -errno;
+ }
+
+ for (unsigned int i = 0; i < nr_threads; i++) {
+ union bench_clock *t = &data[i].result;
+
+ pthread_join(data[i].id, NULL);
+
+ clock_accum(accum, t);
+ if (use_cycles)
+ update_stats(&stats, t->cycles);
+ else
+ update_stats(&stats, t->tv.tv_sec * 1e6 + t->tv.tv_usec);
+ error |= data[i].error;
+ }
+ free(data);
+
+ if (error) {
+ printf("# Memory allocation failed - maybe size (%s) %s?\n", size_str,
+ p->page_shift != PAGE_SHIFT_4KB ? "has insufficient hugepages" : "is too large");
+ }
+ return error ? -1 : 0;
}
static const char * const bench_mem_mmap_usage[] = {
@@ -548,6 +613,8 @@ int bench_mem_mmap(int argc, const char **argv)
static const struct option bench_mmap_options[] = {
OPT_UINTEGER('r', "randomize", &seed,
"Seed to randomize page access offset."),
+ OPT_UINTEGER('t', "threads", &nr_threads,
+ "Number of threads to run concurrently (default: 1)."),
OPT_PARENT(bench_common_options),
OPT_END()
};
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 19be2aaf4dc0..42d7afc03f9b 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -32,7 +32,6 @@
#include <linux/kernel.h>
#include <linux/time64.h>
#include <linux/numa.h>
-#include <linux/zalloc.h>
#include "../util/header.h"
#include "../util/mutex.h"
@@ -166,7 +165,7 @@ static struct global_info *g = NULL;
static int parse_cpus_opt(const struct option *opt, const char *arg, int unset);
static int parse_nodes_opt(const struct option *opt, const char *arg, int unset);
-struct params p0;
+static struct params p0;
static const struct option options[] = {
OPT_INTEGER('p', "nr_proc" , &p0.nr_proc, "number of processes"),
@@ -980,10 +979,8 @@ static int count_process_nodes(int process_nr)
int nodes;
int n, t;
- node_present = (char *)malloc(g->p.nr_nodes * sizeof(char));
+ node_present = calloc(g->p.nr_nodes, sizeof(char));
BUG_ON(!node_present);
- for (nodes = 0; nodes < g->p.nr_nodes; nodes++)
- node_present[nodes] = 0;
for (t = 0; t < g->p.nr_threads; t++) {
struct thread_data *td;
@@ -1090,10 +1087,8 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
if (!g->p.show_convergence && !g->p.measure_convergence)
return;
- nodes = (int *)malloc(g->p.nr_nodes * sizeof(int));
+ nodes = calloc(g->p.nr_nodes, sizeof(int));
BUG_ON(!nodes);
- for (node = 0; node < g->p.nr_nodes; node++)
- nodes[node] = 0;
loops_done_min = -1;
loops_done_max = 0;
@@ -1423,7 +1418,7 @@ static void worker_process(int process_nr)
bind_to_memnode(td->bind_node);
bind_to_cpumask(td->bind_cpumask);
- pthreads = zalloc(g->p.nr_threads * sizeof(pthread_t));
+ pthreads = calloc(g->p.nr_threads, sizeof(pthread_t));
process_data = setup_private_data(g->p.bytes_process);
if (g->p.show_details >= 3) {
@@ -1629,7 +1624,7 @@ static int __bench_numa(const char *name)
if (init())
return -1;
- pids = zalloc(g->p.nr_proc * sizeof(*pids));
+ pids = calloc(g->p.nr_proc, sizeof(*pids));
pid = -1;
if (g->p.serialize_startup) {
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
index 93dcd9dba3d0..4fb6657fc826 100644
--- a/tools/perf/bench/sched-messaging.c
+++ b/tools/perf/bench/sched-messaging.c
@@ -301,7 +301,7 @@ int bench_sched_messaging(int argc, const char **argv)
argc = parse_options(argc, argv, options,
bench_sched_message_usage, 0);
- worker_tab = malloc(num_fds * 2 * num_groups * sizeof(union messaging_worker));
+ worker_tab = calloc(num_fds * 2 * num_groups, sizeof(union messaging_worker));
if (!worker_tab)
err(EXIT_FAILURE, "main:malloc()");
diff --git a/tools/perf/bench/uprobe.c b/tools/perf/bench/uprobe.c
index c4dac868f1ee..89697ff788ef 100644
--- a/tools/perf/bench/uprobe.c
+++ b/tools/perf/bench/uprobe.c
@@ -58,7 +58,7 @@ static const char * const bench_uprobe_usage[] = {
goto cleanup; \
}
-struct bench_uprobe_bpf *skel;
+static struct bench_uprobe_bpf *skel;
static int bench_uprobe__setup_bpf_skel(enum bench_uprobe bench)
{
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 9c27bb30b708..5e57b78548f4 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -13,7 +13,6 @@
#include <linux/list.h>
#include "util/cache.h"
#include <linux/rbtree.h>
-#include <linux/zalloc.h>
#include "util/symbol.h"
#include "util/debug.h"
@@ -313,15 +312,6 @@ out_put:
return ret;
}
-static int process_feature_event(const struct perf_tool *tool __maybe_unused,
- struct perf_session *session,
- union perf_event *event)
-{
- if (event->feat.feat_id < HEADER_LAST_FEATURE)
- return perf_event__process_feature(session, event);
- return 0;
-}
-
static int hist_entry__stdio_annotate(struct hist_entry *he,
struct evsel *evsel,
struct perf_annotate *ann)
@@ -744,8 +734,7 @@ int cmd_annotate(int argc, const char **argv)
&annotate.group_set,
"Show event group information together"),
OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"),
- OPT_CALLBACK(0, "symfs", NULL, "directory",
- "Look for files with symbols relative to this directory",
+ OPT_CALLBACK(0, "symfs", NULL, "directory[,layout]", SYMFS_HELP,
symbol__config_symfs),
OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src,
"Interleave source code with assembly code (default)"),
@@ -876,7 +865,7 @@ int cmd_annotate(int argc, const char **argv)
annotate.tool.id_index = perf_event__process_id_index;
annotate.tool.auxtrace_info = perf_event__process_auxtrace_info;
annotate.tool.auxtrace = perf_event__process_auxtrace;
- annotate.tool.feature = process_feature_event;
+ annotate.tool.feature = perf_event__process_feature;
annotate.tool.ordering_requires_timestamps = true;
annotate.session = perf_session__new(&data, &annotate.tool);
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index 02dea1b88228..02d47913cc6a 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -37,14 +37,14 @@ struct bench {
};
#ifdef HAVE_LIBNUMA_SUPPORT
-static struct bench numa_benchmarks[] = {
+static const struct bench numa_benchmarks[] = {
{ "mem", "Benchmark for NUMA workloads", bench_numa },
{ "all", "Run all NUMA benchmarks", NULL },
{ NULL, NULL, NULL }
};
#endif
-static struct bench sched_benchmarks[] = {
+static const struct bench sched_benchmarks[] = {
{ "messaging", "Benchmark for scheduling and IPC", bench_sched_messaging },
{ "pipe", "Benchmark for pipe() between two processes", bench_sched_pipe },
{ "seccomp-notify", "Benchmark for seccomp user notify", bench_sched_seccomp_notify},
@@ -52,7 +52,7 @@ static struct bench sched_benchmarks[] = {
{ NULL, NULL, NULL }
};
-static struct bench syscall_benchmarks[] = {
+static const struct bench syscall_benchmarks[] = {
{ "basic", "Benchmark for basic getppid(2) calls", bench_syscall_basic },
{ "getpgid", "Benchmark for getpgid(2) calls", bench_syscall_getpgid },
{ "fork", "Benchmark for fork(2) calls", bench_syscall_fork },
@@ -61,7 +61,7 @@ static struct bench syscall_benchmarks[] = {
{ NULL, NULL, NULL },
};
-static struct bench mem_benchmarks[] = {
+static const struct bench mem_benchmarks[] = {
{ "memcpy", "Benchmark for memcpy() functions", bench_mem_memcpy },
{ "memset", "Benchmark for memset() functions", bench_mem_memset },
{ "find_bit", "Benchmark for find_bit() functions", bench_mem_find_bit },
@@ -70,7 +70,7 @@ static struct bench mem_benchmarks[] = {
{ NULL, NULL, NULL }
};
-static struct bench futex_benchmarks[] = {
+static const struct bench futex_benchmarks[] = {
{ "hash", "Benchmark for futex hash table", bench_futex_hash },
{ "wake", "Benchmark for futex wake calls", bench_futex_wake },
{ "wake-parallel", "Benchmark for parallel futex wake calls", bench_futex_wake_parallel },
@@ -82,7 +82,7 @@ static struct bench futex_benchmarks[] = {
};
#ifdef HAVE_EVENTFD_SUPPORT
-static struct bench epoll_benchmarks[] = {
+static const struct bench epoll_benchmarks[] = {
{ "wait", "Benchmark epoll concurrent epoll_waits", bench_epoll_wait },
{ "ctl", "Benchmark epoll concurrent epoll_ctls", bench_epoll_ctl },
{ "all", "Run all futex benchmarks", NULL },
@@ -90,7 +90,7 @@ static struct bench epoll_benchmarks[] = {
};
#endif // HAVE_EVENTFD_SUPPORT
-static struct bench internals_benchmarks[] = {
+static const struct bench internals_benchmarks[] = {
{ "synthesize", "Benchmark perf event synthesis", bench_synthesize },
{ "kallsyms-parse", "Benchmark kallsyms parsing", bench_kallsyms_parse },
{ "inject-build-id", "Benchmark build-id injection", bench_inject_build_id },
@@ -99,14 +99,14 @@ static struct bench internals_benchmarks[] = {
{ NULL, NULL, NULL }
};
-static struct bench breakpoint_benchmarks[] = {
+static const struct bench breakpoint_benchmarks[] = {
{ "thread", "Benchmark thread start/finish with breakpoints", bench_breakpoint_thread},
{ "enable", "Benchmark breakpoint enable/disable", bench_breakpoint_enable},
{ "all", "Run all breakpoint benchmarks", NULL},
{ NULL, NULL, NULL },
};
-static struct bench uprobe_benchmarks[] = {
+static const struct bench uprobe_benchmarks[] = {
{ "baseline", "Baseline libc usleep(1000) call", bench_uprobe_baseline, },
{ "empty", "Attach empty BPF prog to uprobe on usleep, system wide", bench_uprobe_empty, },
{ "trace_printk", "Attach trace_printk BPF prog to uprobe on usleep syswide", bench_uprobe_trace_printk, },
@@ -116,12 +116,12 @@ static struct bench uprobe_benchmarks[] = {
};
struct collection {
- const char *name;
- const char *summary;
- struct bench *benchmarks;
+ const char *name;
+ const char *summary;
+ const struct bench *benchmarks;
};
-static struct collection collections[] = {
+static const struct collection collections[] = {
{ "sched", "Scheduler and IPC benchmarks", sched_benchmarks },
{ "syscall", "System call benchmarks", syscall_benchmarks },
{ "mem", "Memory access benchmarks", mem_benchmarks },
@@ -147,9 +147,9 @@ static struct collection collections[] = {
#define for_each_bench(coll, bench) \
for (bench = coll->benchmarks; bench && bench->name; bench++)
-static void dump_benchmarks(struct collection *coll)
+static void dump_benchmarks(const struct collection *coll)
{
- struct bench *bench;
+ const struct bench *bench;
printf("\n # List of available benchmarks for collection '%s':\n\n", coll->name);
@@ -178,7 +178,7 @@ static const char * const bench_usage[] = {
static void print_usage(void)
{
- struct collection *coll;
+ const struct collection *coll;
int i;
printf("Usage: \n");
@@ -234,9 +234,9 @@ static int run_bench(const char *coll_name, const char *bench_name, bench_fn_t f
return ret;
}
-static void run_collection(struct collection *coll)
+static void run_collection(const struct collection *coll)
{
- struct bench *bench;
+ const struct bench *bench;
const char *argv[2];
argv[1] = NULL;
@@ -260,7 +260,7 @@ static void run_collection(struct collection *coll)
static void run_all_collections(void)
{
- struct collection *coll;
+ const struct collection *coll;
for_each_collection(coll)
run_collection(coll);
@@ -268,7 +268,7 @@ static void run_all_collections(void)
int cmd_bench(int argc, const char **argv)
{
- struct collection *coll;
+ const struct collection *coll;
int ret = 0;
/* Unbuffered output */
@@ -306,7 +306,7 @@ int cmd_bench(int argc, const char **argv)
}
for_each_collection(coll) {
- struct bench *bench;
+ const struct bench *bench;
if (strcmp(coll->name, argv[0]))
continue;
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index d390ae4e3ec8..72a7802775ee 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -155,7 +155,7 @@ static void *c2c_he_zalloc(size_t size)
if (!c2c_he->nodeset)
goto out_free;
- c2c_he->node_stats = zalloc(c2c.nodes_cnt * sizeof(*c2c_he->node_stats));
+ c2c_he->node_stats = calloc(c2c.nodes_cnt, sizeof(*c2c_he->node_stats));
if (!c2c_he->node_stats)
goto out_free;
@@ -2310,7 +2310,6 @@ static int setup_nodes(struct perf_session *session)
{
struct numa_node *n;
unsigned long **nodes;
- int node, idx;
struct perf_cpu cpu;
int *cpu2node;
struct perf_env *env = perf_session__env(session);
@@ -2325,24 +2324,25 @@ static int setup_nodes(struct perf_session *session)
if (!n)
return -EINVAL;
- nodes = zalloc(sizeof(unsigned long *) * c2c.nodes_cnt);
+ nodes = calloc(c2c.nodes_cnt, sizeof(unsigned long *));
if (!nodes)
return -ENOMEM;
c2c.nodes = nodes;
- cpu2node = zalloc(sizeof(int) * c2c.cpus_cnt);
+ cpu2node = calloc(c2c.cpus_cnt, sizeof(int));
if (!cpu2node)
return -ENOMEM;
- for (idx = 0; idx < c2c.cpus_cnt; idx++)
+ for (int idx = 0; idx < c2c.cpus_cnt; idx++)
cpu2node[idx] = -1;
c2c.cpu2node = cpu2node;
- for (node = 0; node < c2c.nodes_cnt; node++) {
+ for (int node = 0; node < c2c.nodes_cnt; node++) {
struct perf_cpu_map *map = n[node].map;
unsigned long *set;
+ unsigned int idx;
set = bitmap_zalloc(c2c.cpus_cnt);
if (!set)
@@ -2892,9 +2892,10 @@ static int ui_quirks(void)
#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
-const char callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
- CALLCHAIN_REPORT_HELP
- "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
+static const char callchain_help[] =
+ "Display call graph (stack chain/backtrace):\n\n"
+ CALLCHAIN_REPORT_HELP
+ "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
static int
parse_callchain_opt(const struct option *opt, const char *arg, int unset)
diff --git a/tools/perf/builtin-config.c b/tools/perf/builtin-config.c
index 45b5312fbe83..237600643bbd 100644
--- a/tools/perf/builtin-config.c
+++ b/tools/perf/builtin-config.c
@@ -23,7 +23,7 @@ static const char * const config_usage[] = {
NULL
};
-enum actions {
+static enum actions {
ACTION_LIST = 1
} actions;
diff --git a/tools/perf/builtin-daemon.c b/tools/perf/builtin-daemon.c
index 33473e071392..c4632577d129 100644
--- a/tools/perf/builtin-daemon.c
+++ b/tools/perf/builtin-daemon.c
@@ -1016,7 +1016,7 @@ static int setup_config_changes(struct daemon *daemon)
{
char *basen = strdup(daemon->config_real);
char *dirn = strdup(daemon->config_real);
- char *base, *dir;
+ const char *base, *dir;
int fd, wd = -1;
if (!dirn || !basen)
@@ -1029,7 +1029,7 @@ static int setup_config_changes(struct daemon *daemon)
}
dir = dirname(dirn);
- base = basename(basen);
+ base = perf_basename(basen);
pr_debug("config file: %s, dir: %s\n", base, dir);
wd = inotify_add_watch(fd, dir, IN_CLOSE_WRITE);
diff --git a/tools/perf/builtin-data.c b/tools/perf/builtin-data.c
index 85f59886b5cf..4c08ccb8c06b 100644
--- a/tools/perf/builtin-data.c
+++ b/tools/perf/builtin-data.c
@@ -28,15 +28,15 @@ static const char *data_usage[] = {
NULL
};
-const char *to_json;
-const char *to_ctf;
-struct perf_data_convert_opts opts = {
+static const char *to_json;
+static const char *to_ctf;
+static struct perf_data_convert_opts opts = {
.force = false,
.all = false,
.time_str = NULL,
};
-const struct option data_options[] = {
+static const struct option data_options[] = {
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_STRING('i', "input", &input_name, "file", "input file name"),
OPT_STRING(0, "to-json", &to_json, NULL, "Convert to JSON format"),
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 59bf1f72d12e..1b3df868849a 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -113,7 +113,7 @@ enum {
COMPUTE_STREAM, /* After COMPUTE_MAX to avoid use current compute arrays */
};
-const char *compute_names[COMPUTE_MAX] = {
+static const char *compute_names[COMPUTE_MAX] = {
[COMPUTE_DELTA] = "delta",
[COMPUTE_DELTA_ABS] = "delta-abs",
[COMPUTE_RATIO] = "ratio",
@@ -382,7 +382,7 @@ static void block_hist_free(void *he)
free(bh);
}
-struct hist_entry_ops block_hist_ops = {
+static struct hist_entry_ops block_hist_ops = {
.new = block_hist_zalloc,
.free = block_hist_free,
};
@@ -1280,8 +1280,7 @@ static const struct option options[] = {
OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
"separator for columns, no spaces will be added between "
"columns '.' is reserved."),
- OPT_CALLBACK(0, "symfs", NULL, "directory",
- "Look for files with symbols relative to this directory",
+ OPT_CALLBACK(0, "symfs", NULL, "directory[,layout]", SYMFS_HELP,
symbol__config_symfs),
OPT_UINTEGER('o', "order", &sort_compute, "Specify compute sorting."),
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
@@ -1353,7 +1352,7 @@ static int cycles_printf(struct hist_entry *he, struct hist_entry *pair,
/*
* Avoid printing the warning "addr2line_init failed for ..."
*/
- symbol_conf.disable_add2line_warn = true;
+ symbol_conf.addr2line_disable_warn = true;
bi = block_he->block_info;
@@ -1892,7 +1891,7 @@ static int data_init(int argc, const char **argv)
return -EINVAL;
}
- data__files = zalloc(sizeof(*data__files) * data__files_cnt);
+ data__files = calloc(data__files_cnt, sizeof(*data__files));
if (!data__files)
return -ENOMEM;
@@ -1987,7 +1986,7 @@ int cmd_diff(int argc, const char **argv)
if (compute == COMPUTE_STREAM) {
symbol_conf.show_branchflag_count = true;
- symbol_conf.disable_add2line_warn = true;
+ symbol_conf.addr2line_disable_warn = true;
callchain_param.mode = CHAIN_FLAT;
callchain_param.key = CCKEY_SRCLINE;
callchain_param.branch_callstack = 1;
diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
index 4cc33452d79b..8a7dbfb14535 100644
--- a/tools/perf/builtin-ftrace.c
+++ b/tools/perf/builtin-ftrace.c
@@ -20,6 +20,7 @@
#include <linux/capability.h>
#include <linux/err.h>
#include <linux/string.h>
+#include <linux/zalloc.h>
#include <sys/stat.h>
#include "debug.h"
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 5b29f4296861..f174bc69cec4 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -133,7 +133,7 @@ struct perf_inject {
struct perf_file_section secs[HEADER_FEAT_BITS];
struct guest_session guest_session;
struct strlist *known_build_ids;
- const struct evsel *mmap_evsel;
+ struct evsel *mmap_evsel;
struct ip_callchain *raw_callchain;
};
@@ -270,9 +270,8 @@ static s64 perf_event__repipe_auxtrace(const struct perf_tool *tool,
inject->have_auxtrace = true;
if (!inject->output.is_pipe) {
- off_t offset;
+ off_t offset = perf_data__seek(&inject->output, 0, SEEK_CUR);
- offset = lseek(inject->output.file.fd, 0, SEEK_CUR);
if (offset == -1)
return -errno;
ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
@@ -519,7 +518,7 @@ static struct dso *findnew_dso(int pid, int tid, const char *filename,
* processing mmap events. If not stashed, search the evlist for the first mmap
* gathering event.
*/
-static const struct evsel *inject__mmap_evsel(struct perf_inject *inject)
+static struct evsel *inject__mmap_evsel(struct perf_inject *inject)
{
struct evsel *pos;
@@ -1023,7 +1022,6 @@ int perf_event__inject_buildid(const struct perf_tool *tool, union perf_event *e
sample__for_each_callchain_node(thread, evsel, sample, PERF_MAX_STACK_DEPTH,
/*symbols=*/false, mark_dso_hit_callback, &args);
-
thread__put(thread);
repipe:
perf_event__repipe(tool, event, sample, machine);
@@ -1087,6 +1085,7 @@ static int perf_inject__sched_stat(const struct perf_tool *tool,
struct perf_sample sample_sw;
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
u32 pid = evsel__intval(evsel, sample, "pid");
+ int ret;
list_for_each_entry(ent, &inject->samples, node) {
if (pid == ent->tid)
@@ -1103,7 +1102,9 @@ found:
perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type,
evsel->core.attr.read_format, &sample_sw);
build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
- return perf_event__repipe(tool, event_sw, &sample_sw, machine);
+ ret = perf_event__repipe(tool, event_sw, &sample_sw, machine);
+ perf_sample__exit(&sample_sw);
+ return ret;
}
#endif
@@ -1429,6 +1430,7 @@ static int synthesize_build_id(struct perf_inject *inject, struct dso *dso, pid_
{
struct machine *machine = perf_session__findnew_machine(inject->session, machine_pid);
struct perf_sample synth_sample = {
+ .evsel = inject__mmap_evsel(inject),
.pid = -1,
.tid = -1,
.time = -1,
@@ -1648,6 +1650,7 @@ static int guest_session__fetch(struct guest_session *gs)
size_t hdr_sz = sizeof(*hdr);
ssize_t ret;
+ perf_sample__init(&gs->ev.sample, /*all=*/false);
buf = gs->ev.event_buf;
if (!buf) {
buf = malloc(PERF_SAMPLE_MAX_SIZE);
@@ -1745,18 +1748,24 @@ static int guest_session__inject_events(struct guest_session *gs, u64 timestamp)
if (!gs->fetched) {
ret = guest_session__fetch(gs);
if (ret)
- return ret;
+ break;
gs->fetched = true;
}
ev = gs->ev.event;
sample = &gs->ev.sample;
- if (!ev->header.size)
- return 0; /* EOF */
-
- if (sample->time > timestamp)
- return 0;
+ if (!ev->header.size) {
+ /* EOF */
+ perf_sample__exit(&gs->ev.sample);
+ gs->fetched = false;
+ ret = 0;
+ break;
+ }
+ if (sample->time > timestamp) {
+ ret = 0;
+ break;
+ }
/* Change cpumode to guest */
cpumode = ev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
@@ -1779,12 +1788,14 @@ static int guest_session__inject_events(struct guest_session *gs, u64 timestamp)
if (id_hdr_size & 7) {
pr_err("Bad id_hdr_size %u\n", id_hdr_size);
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
if (ev->header.size & 7) {
pr_err("Bad event size %u\n", ev->header.size);
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
/* Remove guest id sample */
@@ -1792,14 +1803,16 @@ static int guest_session__inject_events(struct guest_session *gs, u64 timestamp)
if (ev->header.size & 7) {
pr_err("Bad raw event size %u\n", ev->header.size);
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
guest_id = guest_session__lookup_id(gs, id);
if (!guest_id) {
pr_err("Guest event with unknown id %llu\n",
(unsigned long long)id);
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
/* Change to host ID to avoid conflicting ID values */
@@ -1819,19 +1832,28 @@ static int guest_session__inject_events(struct guest_session *gs, u64 timestamp)
/* New id sample with new ID and CPU */
ret = evlist__append_id_sample(inject->session->evlist, ev, sample);
if (ret)
- return ret;
+ break;
if (ev->header.size & 7) {
pr_err("Bad new event size %u\n", ev->header.size);
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
- gs->fetched = false;
-
ret = output_bytes(inject, ev, ev->header.size);
if (ret)
- return ret;
+ break;
+
+ /* Reset for next guest session event fetch. */
+ perf_sample__exit(sample);
+ gs->fetched = false;
+ }
+ if (ret && gs->fetched) {
+ /* Clear saved sample state on error. */
+ perf_sample__exit(&gs->ev.sample);
+ gs->fetched = false;
}
+ return ret;
}
static int guest_session__flush_events(struct guest_session *gs)
@@ -2134,6 +2156,7 @@ static bool keep_feat(struct perf_inject *inject, int feat)
case HEADER_HYBRID_TOPOLOGY:
case HEADER_PMU_CAPS:
case HEADER_CPU_DOMAIN_INFO:
+ case HEADER_CLN_SIZE:
return true;
/* Information that can be updated */
case HEADER_BUILD_ID:
@@ -2479,12 +2502,12 @@ int cmd_inject(int argc, const char **argv)
.output = {
.path = "-",
.mode = PERF_DATA_MODE_WRITE,
- .use_stdio = true,
+ .file.use_stdio = true,
},
};
struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
- .use_stdio = true,
+ .file.use_stdio = true,
};
int ret;
const char *known_build_ids = NULL;
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 7929a5fa5f46..9c64a0d74823 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -82,7 +82,7 @@ static unsigned long nr_allocs, nr_cross_allocs;
/* filters for controlling start and stop of time of analysis */
static struct perf_time_interval ptime;
-const char *time_str;
+static const char *time_str;
static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
int bytes_req, int bytes_alloc, int cpu)
diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c
index 7f3068264568..9d3a4c779a41 100644
--- a/tools/perf/builtin-kwork.c
+++ b/tools/perf/builtin-kwork.c
@@ -985,7 +985,7 @@ static int process_irq_handler_exit_event(const struct perf_tool *tool,
return 0;
}
-const struct evsel_str_handler irq_tp_handlers[] = {
+static const struct evsel_str_handler irq_tp_handlers[] = {
{ "irq:irq_handler_entry", process_irq_handler_entry_event, },
{ "irq:irq_handler_exit", process_irq_handler_exit_event, },
};
@@ -1080,7 +1080,7 @@ static int process_softirq_exit_event(const struct perf_tool *tool,
return 0;
}
-const struct evsel_str_handler softirq_tp_handlers[] = {
+static const struct evsel_str_handler softirq_tp_handlers[] = {
{ "irq:softirq_raise", process_softirq_raise_event, },
{ "irq:softirq_entry", process_softirq_entry_event, },
{ "irq:softirq_exit", process_softirq_exit_event, },
@@ -1211,7 +1211,7 @@ static int process_workqueue_execute_end_event(const struct perf_tool *tool,
return 0;
}
-const struct evsel_str_handler workqueue_tp_handlers[] = {
+static const struct evsel_str_handler workqueue_tp_handlers[] = {
{ "workqueue:workqueue_activate_work", process_workqueue_activate_work_event, },
{ "workqueue:workqueue_execute_start", process_workqueue_execute_start_event, },
{ "workqueue:workqueue_execute_end", process_workqueue_execute_end_event, },
@@ -1281,7 +1281,7 @@ static int process_sched_switch_event(const struct perf_tool *tool,
return 0;
}
-const struct evsel_str_handler sched_tp_handlers[] = {
+static const struct evsel_str_handler sched_tp_handlers[] = {
{ "sched:sched_switch", process_sched_switch_event, },
};
@@ -1561,13 +1561,13 @@ static void print_bad_events(struct perf_kwork *kwork)
}
}
-const char *graph_load = "||||||||||||||||||||||||||||||||||||||||||||||||";
-const char *graph_idle = " ";
static void top_print_per_cpu_load(struct perf_kwork *kwork)
{
int i, load_width;
u64 total, load, load_ratio;
struct kwork_top_stat *stat = &kwork->top_stat;
+ const char *graph_load = "||||||||||||||||||||||||||||||||||||||||||||||||";
+ const char *graph_idle = " ";
for (i = 0; i < MAX_NR_CPUS; i++) {
total = stat->cpus_runtime[i].total;
@@ -2208,7 +2208,7 @@ static int perf_kwork__top(struct perf_kwork *kwork)
struct __top_cpus_runtime *cpus_runtime;
int ret = 0;
- cpus_runtime = zalloc(sizeof(struct __top_cpus_runtime) * (MAX_NR_CPUS + 1));
+ cpus_runtime = calloc(MAX_NR_CPUS + 1, sizeof(struct __top_cpus_runtime));
if (!cpus_runtime)
return -1;
@@ -2423,8 +2423,8 @@ int cmd_kwork(int argc, const char **argv)
"Display call chains if present"),
OPT_UINTEGER(0, "max-stack", &kwork.max_stack,
"Maximum number of functions to display backtrace."),
- OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
- "Look for files with symbols relative to this directory"),
+ OPT_CALLBACK(0, "symfs", NULL, "directory[,layout]", SYMFS_HELP,
+ symbol__config_symfs),
OPT_STRING(0, "time", &kwork.time_str, "str",
"Time span for analysis (start,stop)"),
OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index e8962c985d34..5585aeb97684 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -2250,7 +2250,7 @@ static int parse_map_entry(const struct option *opt, const char *str,
static int parse_max_stack(const struct option *opt, const char *str,
int unset __maybe_unused)
{
- unsigned long *len = (unsigned long *)opt->value;
+ int *len = opt->value;
long val;
char *endptr;
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 1b4ba85ee019..a67b565278ae 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -597,8 +597,8 @@ __cmd_probe(int argc, const char **argv)
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
OPT_BOOLEAN(0, "cache", &probe_conf.cache, "Manipulate probe cache"),
- OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
- "Look for files with symbols relative to this directory"),
+ OPT_CALLBACK(0, "symfs", NULL, "directory[,layout]", SYMFS_HELP,
+ symbol__config_symfs),
OPT_CALLBACK(0, "target-ns", NULL, "pid",
"target pid for namespace contexts", opt_set_target_ns),
OPT_BOOLEAN(0, "bootconfig", &probe_conf.bootconfig,
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 60d764068302..4a5eba498c02 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -40,7 +40,6 @@
#include "util/perf_api_probe.h"
#include "util/trigger.h"
#include "util/perf-hooks.h"
-#include "util/cpu-set-sched.h"
#include "util/synthetic-events.h"
#include "util/time-utils.h"
#include "util/units.h"
@@ -56,6 +55,7 @@
#include "asm/bug.h"
#include "perf.h"
#include "cputopo.h"
+#include "dwarf-regs.h"
#include <errno.h>
#include <inttypes.h>
@@ -453,7 +453,7 @@ static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size
static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
{
int ret, idx;
- int trace_fd = rec->session->data->file.fd;
+ int trace_fd = perf_data__fd(rec->session->data);
struct record_aio aio = { .rec = rec, .size = 0 };
/*
@@ -1070,12 +1070,12 @@ static int record__thread_data_init_maps(struct record_thread *thread_data, stru
thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
thread_data->mask->maps.nbits);
if (mmap) {
- thread_data->maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *));
+ thread_data->maps = calloc(thread_data->nr_mmaps, sizeof(struct mmap *));
if (!thread_data->maps)
return -ENOMEM;
}
if (overwrite_mmap) {
- thread_data->overwrite_maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *));
+ thread_data->overwrite_maps = calloc(thread_data->nr_mmaps, sizeof(struct mmap *));
if (!thread_data->overwrite_maps) {
zfree(&thread_data->maps);
return -ENOMEM;
@@ -1220,7 +1220,7 @@ static int record__alloc_thread_data(struct record *rec, struct evlist *evlist)
int t, ret;
struct record_thread *thread_data;
- rec->thread_data = zalloc(rec->nr_threads * sizeof(*(rec->thread_data)));
+ rec->thread_data = calloc(rec->nr_threads, sizeof(*(rec->thread_data)));
if (!rec->thread_data) {
pr_err("Failed to allocate thread data\n");
return -ENOMEM;
@@ -1640,7 +1640,7 @@ static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
int rc = 0;
int nr_mmaps;
struct mmap **maps;
- int trace_fd = rec->data.file.fd;
+ int trace_fd = perf_data__fd(&rec->data);
off_t off = 0;
if (!evlist)
@@ -1845,10 +1845,12 @@ record__finish_output(struct record *rec)
}
rec->session->header.data_size += rec->bytes_written;
- data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
+ data->file.size = perf_data__seek(data, 0, SEEK_CUR);
if (record__threads_enabled(rec)) {
- for (i = 0; i < data->dir.nr; i++)
- data->dir.files[i].size = lseek(data->dir.files[i].fd, 0, SEEK_CUR);
+ for (i = 0; i < data->dir.nr; i++) {
+ data->dir.files[i].size =
+ perf_data_file__seek(&data->dir.files[i], 0, SEEK_CUR);
+ }
}
/* Buildid scanning disabled or build ID in kernel and synthesized map events. */
@@ -2976,65 +2978,32 @@ out_delete_session:
return status;
}
-static void callchain_debug(struct callchain_param *callchain)
-{
- static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
-
- pr_debug("callchain: type %s\n", str[callchain->record_mode]);
-
- if (callchain->record_mode == CALLCHAIN_DWARF)
- pr_debug("callchain: stack dump size %d\n",
- callchain->dump_size);
-}
-
-int record_opts__parse_callchain(struct record_opts *record,
- struct callchain_param *callchain,
- const char *arg, bool unset)
-{
- int ret;
- callchain->enabled = !unset;
-
- /* --no-call-graph */
- if (unset) {
- callchain->record_mode = CALLCHAIN_NONE;
- pr_debug("callchain: disabled\n");
- return 0;
- }
-
- ret = parse_callchain_record_opt(arg, callchain);
- if (!ret) {
- /* Enable data address sampling for DWARF unwind. */
- if (callchain->record_mode == CALLCHAIN_DWARF &&
- !record->record_data_mmap_set)
- record->record_data_mmap = true;
- callchain_debug(callchain);
- }
-
- return ret;
-}
-
-int record_parse_callchain_opt(const struct option *opt,
+static int record_parse_callchain_opt(const struct option *opt,
const char *arg,
int unset)
{
return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
}
-int record_callchain_opt(const struct option *opt,
- const char *arg __maybe_unused,
- int unset __maybe_unused)
+static int record_callchain_opt(const struct option *opt,
+ const char *arg __maybe_unused,
+ int unset)
{
- struct callchain_param *callchain = opt->value;
-
- callchain->enabled = true;
-
- if (callchain->record_mode == CALLCHAIN_NONE)
- callchain->record_mode = CALLCHAIN_FP;
+ /*
+ * The -g option only sets the callchain if not already configured by
+ * .perfconfig. It does, however, enable it.
+ */
+ if (callchain_param.record_mode != CALLCHAIN_NONE) {
+ callchain_param.enabled = true;
+ return 0;
+ }
- callchain_debug(callchain);
- return 0;
+ return record_opts__parse_callchain(opt->value, &callchain_param,
+ EM_HOST != EM_S390 ? "fp" : "dwarf",
+ unset);
}
+
static int perf_record_config(const char *var, const char *value, void *cb)
{
struct record *rec = cb;
@@ -3526,7 +3495,7 @@ static struct option __record_options[] = {
OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
"Minimal number of bytes that is extracted from mmap data pages (default: 1)",
record__mmap_flush_parse),
- OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
+ OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
NULL, "enables call-graph recording" ,
&record_callchain_opt),
OPT_CALLBACK(0, "call-graph", &record.opts,
@@ -3696,7 +3665,7 @@ struct option *record_options = __record_options;
static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
{
struct perf_cpu cpu;
- int idx;
+ unsigned int idx;
if (cpu_map__is_dummy(cpus))
return 0;
@@ -3743,7 +3712,7 @@ static int record__alloc_thread_masks(struct record *rec, int nr_threads, int nr
{
int t, ret;
- rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks)));
+ rec->thread_masks = calloc(nr_threads, sizeof(*(rec->thread_masks)));
if (!rec->thread_masks) {
pr_err("Failed to allocate thread masks\n");
return -ENOMEM;
@@ -3953,7 +3922,7 @@ static int record__init_thread_numa_masks(struct record *rec, struct perf_cpu_ma
return -ENOMEM;
}
- spec = zalloc(topo->nr * sizeof(char *));
+ spec = calloc(topo->nr, sizeof(char *));
if (!spec) {
pr_err("Failed to allocate NUMA spec\n");
ret = -ENOMEM;
@@ -4131,8 +4100,11 @@ int cmd_record(int argc, const char **argv)
perf_debuginfod_setup(&record.debuginfod);
- /* Make system wide (-a) the default target. */
- if (!argc && target__none(&rec->opts.target))
+ /*
+ * Use system wide (-a) for the default target (ie. when no
+ * workload). User ID filtering also implies system-wide.
+ */
+ if ((!argc && target__none(&rec->opts.target)) || rec->uid_str)
rec->opts.target.system_wide = true;
if (nr_cgroups && !rec->opts.target.system_wide) {
@@ -4310,7 +4282,8 @@ int cmd_record(int argc, const char **argv)
record.opts.tail_synthesize = true;
if (rec->evlist->core.nr_entries == 0) {
- struct evlist *def_evlist = evlist__new_default();
+ struct evlist *def_evlist = evlist__new_default(&rec->opts.target,
+ callchain_param.enabled);
if (!def_evlist)
goto out;
@@ -4339,9 +4312,6 @@ int cmd_record(int argc, const char **argv)
err = parse_uid_filter(rec->evlist, uid);
if (err)
goto out;
-
- /* User ID filtering implies system wide. */
- rec->opts.target.system_wide = true;
}
/* Enable ignoring missing threads when -p option is defined. */
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 3b81f4b3dc49..95c0bdba6b11 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -245,25 +245,20 @@ static int process_feature_event(const struct perf_tool *tool,
union perf_event *event)
{
struct report *rep = container_of(tool, struct report, tool);
+ int ret = perf_event__process_feature(tool, session, event);
- if (event->feat.feat_id < HEADER_LAST_FEATURE)
- return perf_event__process_feature(session, event);
+ if (ret == 0 && event->header.size == sizeof(struct perf_record_header_feature) &&
+ (int)event->feat.feat_id >= session->header.last_feat) {
+ /*
+ * (feat_id = HEADER_LAST_FEATURE) is the end marker which means
+ * all features are received.
+ */
+ if (rep->header_only)
+ session_done = 1;
- if (event->feat.feat_id != HEADER_LAST_FEATURE) {
- pr_err("failed: wrong feature ID: %" PRI_lu64 "\n",
- event->feat.feat_id);
- return -1;
- } else if (rep->header_only) {
- session_done = 1;
+ setup_forced_leader(rep, session->evlist);
}
-
- /*
- * (feat_id = HEADER_LAST_FEATURE) is the end marker which
- * means all features are received, now we can force the
- * group if needed.
- */
- setup_forced_leader(rep, session->evlist);
- return 0;
+ return ret;
}
static int process_sample_event(const struct perf_tool *tool,
@@ -1416,8 +1411,7 @@ int cmd_report(int argc, const char **argv)
"columns '.' is reserved."),
OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved,
"Only display entries resolved to a symbol"),
- OPT_CALLBACK(0, "symfs", NULL, "directory",
- "Look for files with symbols relative to this directory",
+ OPT_CALLBACK(0, "symfs", NULL, "directory[,layout]", SYMFS_HELP,
symbol__config_symfs),
OPT_STRING('C', "cpu", &report.cpu_list, "cpu",
"list of cpus to profile"),
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 3f509cfdd58c..555247568e7a 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -2405,7 +2405,7 @@ static int init_idle_threads(int ncpu)
{
int i, ret;
- idle_threads = zalloc(ncpu * sizeof(struct thread *));
+ idle_threads = calloc(ncpu, sizeof(struct thread *));
if (!idle_threads)
return -ENOMEM;
@@ -3483,7 +3483,7 @@ static int setup_cpus_switch_event(struct perf_sched *sched)
if (!sched->cpu_last_switched)
return -1;
- sched->curr_pid = malloc(MAX_CPUS * sizeof(*(sched->curr_pid)));
+ sched->curr_pid = calloc(MAX_CPUS, sizeof(*(sched->curr_pid)));
if (!sched->curr_pid) {
zfree(&sched->cpu_last_switched);
return -1;
@@ -3559,7 +3559,7 @@ static int setup_map_cpus(struct perf_sched *sched)
sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF);
if (sched->map.comp) {
- sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int));
+ sched->map.comp_cpus = calloc(sched->max_cpu.cpu, sizeof(int));
if (!sched->map.comp_cpus)
return -1;
}
@@ -4879,8 +4879,8 @@ int cmd_sched(int argc, const char **argv)
"Display call chains if present (default on)"),
OPT_UINTEGER(0, "max-stack", &sched.max_stack,
"Maximum number of functions to display backtrace."),
- OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
- "Look for files with symbols relative to this directory"),
+ OPT_CALLBACK(0, "symfs", NULL, "directory[,layout]", SYMFS_HELP,
+ symbol__config_symfs),
OPT_BOOLEAN('s', "summary", &sched.summary_only,
"Show only syscall summary with statistics"),
OPT_BOOLEAN('S', "with-summary", &sched.summary,
@@ -4955,6 +4955,7 @@ int cmd_sched(int argc, const char **argv)
.switch_event = replay_switch_event,
.fork_event = replay_fork_event,
};
+ struct trace_sched_handler stats_ops = {};
int ret;
perf_tool__init(&sched.tool, /*ordered_events=*/true);
@@ -5037,6 +5038,7 @@ int cmd_sched(int argc, const char **argv)
} else if (!strcmp(argv[0], "stats")) {
const char *const stats_subcommands[] = {"record", "report", NULL};
+ sched.tp_handler = &stats_ops;
argc = parse_options_subcommand(argc, argv, stats_options,
stats_subcommands,
stats_usage,
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 7c743a303507..c8ac9f01a36b 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -166,7 +166,7 @@ struct perf_script {
int range_num;
};
-struct output_option {
+static struct output_option {
const char *str;
enum perf_output_field field;
} all_output_options[] = {
@@ -1271,11 +1271,11 @@ static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en,
if (PRINT_FIELD(BRCNTR)) {
struct evsel *pos = evsel__leader(evsel);
- unsigned int i = 0, j, num, mask, width;
+ unsigned int i = 0, j, num, mask, width, numprinted = 0;
perf_env__find_br_cntr_info(evsel__env(evsel), NULL, &width);
mask = (1L << width) - 1;
- printed += fprintf(fp, "br_cntr: ");
+ printed += fprintf(fp, "\t# br_cntr: ");
evlist__for_each_entry_from(evsel->evlist, pos) {
if (!(pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS))
continue;
@@ -1283,16 +1283,20 @@ static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en,
break;
num = (br_cntr >> (i++ * width)) & mask;
+ numprinted += num;
if (!verbose) {
for (j = 0; j < num; j++)
printed += fprintf(fp, "%s", pos->abbr_name);
} else
printed += fprintf(fp, "%s %d ", pos->name, num);
}
- printed += fprintf(fp, "\t");
+ if (numprinted == 0 && !verbose)
+ printed += fprintf(fp, "-");
+ printed += fprintf(fp, " ");
}
- printed += fprintf(fp, "#%s%s%s%s",
+ printed += fprintf(fp, "%s%s%s%s%s",
+ !PRINT_FIELD(BRCNTR) ? "#" : "",
en->flags.predicted ? " PRED" : "",
en->flags.mispred ? " MISPRED" : "",
en->flags.in_tx ? " INTX" : "",
@@ -2568,7 +2572,6 @@ static struct scripting_ops *scripting_ops;
static void __process_stat(struct evsel *counter, u64 tstamp)
{
int nthreads = perf_thread_map__nr(counter->core.threads);
- int idx, thread;
struct perf_cpu cpu;
static int header_printed;
@@ -2578,7 +2581,9 @@ static void __process_stat(struct evsel *counter, u64 tstamp)
header_printed = 1;
}
- for (thread = 0; thread < nthreads; thread++) {
+ for (int thread = 0; thread < nthreads; thread++) {
+ unsigned int idx;
+
perf_cpu_map__for_each_cpu(cpu, idx, evsel__cpus(counter)) {
struct perf_counts_values *counts;
@@ -2905,8 +2910,12 @@ static int print_event_with_time(const struct perf_tool *tool,
thread = machine__findnew_thread(machine, pid, tid);
if (evsel) {
+ struct evsel *saved_evsel = sample->evsel;
+
+ sample->evsel = evsel;
perf_sample__fprintf_start(script, sample, thread, evsel,
event->header.type, stdout);
+ sample->evsel = saved_evsel;
}
perf_event__fprintf(event, machine, stdout);
@@ -3814,7 +3823,7 @@ out:
static int have_cmd(int argc, const char **argv)
{
- char **__argv = malloc(sizeof(const char *) * argc);
+ char **__argv = calloc(argc, sizeof(const char *));
if (!__argv) {
pr_err("malloc failed\n");
@@ -3939,15 +3948,6 @@ int process_cpu_map_event(const struct perf_tool *tool,
return set_maps(script);
}
-static int process_feature_event(const struct perf_tool *tool __maybe_unused,
- struct perf_session *session,
- union perf_event *event)
-{
- if (event->feat.feat_id < HEADER_LAST_FEATURE)
- return perf_event__process_feature(session, event);
- return 0;
-}
-
static int perf_script__process_auxtrace_info(const struct perf_tool *tool,
struct perf_session *session,
union perf_event *event)
@@ -4074,8 +4074,7 @@ int cmd_script(int argc, const char **argv)
"file", "kallsyms pathname"),
OPT_BOOLEAN('G', "hide-call-graph", &no_callchain,
"When printing symbols do not display call chain"),
- OPT_CALLBACK(0, "symfs", NULL, "directory",
- "Look for files with symbols relative to this directory",
+ OPT_CALLBACK(0, "symfs", NULL, "directory[,layout]", SYMFS_HELP,
symbol__config_symfs),
OPT_CALLBACK('F', "fields", NULL, "str",
"comma separated output fields prepend with 'type:'. "
@@ -4313,7 +4312,7 @@ int cmd_script(int argc, const char **argv)
}
}
- __argv = malloc((argc + 6) * sizeof(const char *));
+ __argv = calloc(argc + 6, sizeof(const char *));
if (!__argv) {
pr_err("malloc failed\n");
err = -ENOMEM;
@@ -4339,7 +4338,7 @@ int cmd_script(int argc, const char **argv)
dup2(live_pipe[0], 0);
close(live_pipe[1]);
- __argv = malloc((argc + 4) * sizeof(const char *));
+ __argv = calloc(argc + 4, sizeof(const char *));
if (!__argv) {
pr_err("malloc failed\n");
err = -ENOMEM;
@@ -4377,7 +4376,7 @@ script_found:
}
}
- __argv = malloc((argc + 2) * sizeof(const char *));
+ __argv = calloc(argc + 2, sizeof(const char *));
if (!__argv) {
pr_err("malloc failed\n");
err = -ENOMEM;
@@ -4423,7 +4422,7 @@ script_found:
#ifdef HAVE_LIBTRACEEVENT
script.tool.tracing_data = perf_event__process_tracing_data;
#endif
- script.tool.feature = process_feature_event;
+ script.tool.feature = perf_event__process_feature;
script.tool.build_id = perf_event__process_build_id;
script.tool.id_index = perf_event__process_id_index;
script.tool.auxtrace_info = perf_script__process_auxtrace_info;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 73c2ba7e3076..99d7db372b48 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -164,7 +164,7 @@ struct opt_aggr_mode {
};
/* Turn command line option into most generic aggregation mode setting. */
-static enum aggr_mode opt_aggr_mode_to_aggr_mode(struct opt_aggr_mode *opt_mode)
+static enum aggr_mode opt_aggr_mode_to_aggr_mode(const struct opt_aggr_mode *opt_mode)
{
enum aggr_mode mode = AGGR_GLOBAL;
@@ -410,7 +410,7 @@ static int read_tool_counters(void)
struct evsel *counter;
evlist__for_each_entry(evsel_list, counter) {
- int idx;
+ unsigned int idx;
if (!evsel__is_tool(counter))
continue;
@@ -1214,13 +1214,28 @@ static int parse_cputype(const struct option *opt,
return 0;
}
+static int parse_pmu_filter(const struct option *opt,
+ const char *str,
+ int unset __maybe_unused)
+{
+ struct evlist *evlist = *(struct evlist **)opt->value;
+
+ if (!list_empty(&evlist->core.entries)) {
+ fprintf(stderr, "Must define pmu-filter before events/metrics\n");
+ return -1;
+ }
+
+ parse_events_option_args.pmu_filter = str;
+ return 0;
+}
+
static int parse_cache_level(const struct option *opt,
const char *str,
int unset __maybe_unused)
{
int level;
- struct opt_aggr_mode *opt_aggr_mode = (struct opt_aggr_mode *)opt->value;
- u32 *aggr_level = (u32 *)opt->data;
+ bool *per_cache = opt->value;
+ u32 *aggr_level = opt->data;
/*
* If no string is specified, aggregate based on the topology of
@@ -1258,7 +1273,7 @@ static int parse_cache_level(const struct option *opt,
return -EINVAL;
}
out:
- opt_aggr_mode->cache = true;
+ *per_cache = true;
*aggr_level = level;
return 0;
}
@@ -1917,25 +1932,33 @@ static int default_evlist_evsel_cmp(void *priv __maybe_unused,
const struct evsel *lhs = container_of(lhs_core, struct evsel, core);
const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node);
const struct evsel *rhs = container_of(rhs_core, struct evsel, core);
+ const struct evsel *lhs_leader = evsel__leader(lhs);
+ const struct evsel *rhs_leader = evsel__leader(rhs);
- if (evsel__leader(lhs) == evsel__leader(rhs)) {
+ if (lhs_leader == rhs_leader) {
/* Within the same group, respect the original order. */
return lhs_core->idx - rhs_core->idx;
}
+ /*
+ * Compare using leader's attributes so that all members of a group
+ * stay together. This ensures leaders are opened before their members.
+ */
+
/* Sort default metrics evsels first, and default show events before those. */
- if (lhs->default_metricgroup != rhs->default_metricgroup)
- return lhs->default_metricgroup ? -1 : 1;
+ if (lhs_leader->default_metricgroup != rhs_leader->default_metricgroup)
+ return lhs_leader->default_metricgroup ? -1 : 1;
- if (lhs->default_show_events != rhs->default_show_events)
- return lhs->default_show_events ? -1 : 1;
+ if (lhs_leader->default_show_events != rhs_leader->default_show_events)
+ return lhs_leader->default_show_events ? -1 : 1;
/* Sort by PMU type (prefers legacy types first). */
- if (lhs->pmu != rhs->pmu)
- return lhs->pmu->type - rhs->pmu->type;
+ if (lhs_leader->pmu != rhs_leader->pmu)
+ return lhs_leader->pmu->type - rhs_leader->pmu->type;
- /* Sort by name. */
- return strcmp(evsel__name((struct evsel *)lhs), evsel__name((struct evsel *)rhs));
+ /* Sort by leader's name. */
+ return strcmp(evsel__name((struct evsel *)lhs_leader),
+ evsel__name((struct evsel *)rhs_leader));
}
/*
@@ -2305,24 +2328,23 @@ static struct perf_stat perf_stat = {
static int __cmd_report(int argc, const char **argv)
{
struct perf_session *session;
+ struct opt_aggr_mode opt_mode = {};
const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file", "input file name"),
- OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode,
- "aggregate counts per processor socket", AGGR_SOCKET),
- OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode,
- "aggregate counts per processor die", AGGR_DIE),
- OPT_SET_UINT(0, "per-cluster", &perf_stat.aggr_mode,
- "aggregate counts perf processor cluster", AGGR_CLUSTER),
- OPT_CALLBACK_OPTARG(0, "per-cache", &perf_stat.aggr_mode, &perf_stat.aggr_level,
- "cache level",
- "aggregate count at this cache level (Default: LLC)",
+ OPT_BOOLEAN(0, "per-thread", &opt_mode.thread, "aggregate counts per thread"),
+ OPT_BOOLEAN(0, "per-socket", &opt_mode.socket,
+ "aggregate counts per processor socket"),
+ OPT_BOOLEAN(0, "per-die", &opt_mode.die, "aggregate counts per processor die"),
+ OPT_BOOLEAN(0, "per-cluster", &opt_mode.cluster,
+ "aggregate counts per processor cluster"),
+ OPT_CALLBACK_OPTARG(0, "per-cache", &opt_mode.cache, &perf_stat.aggr_level,
+ "cache level", "aggregate count at this cache level (Default: LLC)",
parse_cache_level),
- OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode,
- "aggregate counts per physical processor core", AGGR_CORE),
- OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode,
- "aggregate counts per numa node", AGGR_NODE),
- OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode,
- "disable CPU count aggregation", AGGR_NONE),
+ OPT_BOOLEAN(0, "per-core", &opt_mode.core,
+ "aggregate counts per physical processor core"),
+ OPT_BOOLEAN(0, "per-node", &opt_mode.node, "aggregate counts per numa node"),
+ OPT_BOOLEAN('A', "no-aggr", &opt_mode.no_aggr,
+ "disable aggregation across CPUs or PMUs"),
OPT_END()
};
struct stat st;
@@ -2330,6 +2352,10 @@ static int __cmd_report(int argc, const char **argv)
argc = parse_options(argc, argv, options, stat_report_usage, 0);
+ perf_stat.aggr_mode = opt_aggr_mode_to_aggr_mode(&opt_mode);
+ if (perf_stat.aggr_mode == AGGR_GLOBAL)
+ perf_stat.aggr_mode = AGGR_UNSET; /* No option found so leave unset. */
+
if (!input_name || !strlen(input_name)) {
if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
input_name = "-";
@@ -2506,7 +2532,7 @@ int cmd_stat(int argc, const char **argv)
OPT_BOOLEAN(0, "per-die", &opt_mode.die, "aggregate counts per processor die"),
OPT_BOOLEAN(0, "per-cluster", &opt_mode.cluster,
"aggregate counts per processor cluster"),
- OPT_CALLBACK_OPTARG(0, "per-cache", &opt_mode, &stat_config.aggr_level,
+ OPT_CALLBACK_OPTARG(0, "per-cache", &opt_mode.cache, &stat_config.aggr_level,
"cache level", "aggregate count at this cache level (Default: LLC)",
parse_cache_level),
OPT_BOOLEAN(0, "per-core", &opt_mode.core,
@@ -2561,6 +2587,10 @@ int cmd_stat(int argc, const char **argv)
"Only enable events on applying cpu with this type "
"for hybrid platform (e.g. core or atom)",
parse_cputype),
+ OPT_CALLBACK(0, "pmu-filter", &evsel_list, "pmu",
+ "Only enable events on applying pmu with specified "
+ "for multiple pmus with same type(e.g. hisi_sicl2_cpa0 or hisi_sicl0_cpa0)",
+ parse_pmu_filter),
#ifdef HAVE_LIBPFM
OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
"libpfm4 event selector. use 'perf list' to list available events",
@@ -2744,7 +2774,7 @@ int cmd_stat(int argc, const char **argv)
}
if (stat_config.walltime_run_table) {
- stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0]));
+ stat_config.walltime_run = calloc(stat_config.run_count, sizeof(stat_config.walltime_run[0]));
if (!stat_config.walltime_run) {
pr_err("failed to setup -r option");
goto out;
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index f8b49d69e9a5..28f33e39895d 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -1951,8 +1951,7 @@ int cmd_timechart(int argc, const char **argv)
OPT_CALLBACK('p', "process", NULL, "process",
"process selector. Pass a pid or process name.",
parse_process),
- OPT_CALLBACK(0, "symfs", NULL, "directory",
- "Look for files with symbols relative to this directory",
+ OPT_CALLBACK(0, "symfs", NULL, "directory[,layout]", SYMFS_HELP,
symbol__config_symfs),
OPT_INTEGER('n', "proc-num", &tchart.proc_num,
"min. number of tasks to print"),
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 710604c4f6f6..f6eb543de537 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -56,6 +56,7 @@
#include "util/debug.h"
#include "util/ordered-events.h"
#include "util/pfm.h"
+#include "dwarf-regs.h"
#include <assert.h>
#include <elf.h>
@@ -1387,13 +1388,6 @@ out_join_thread:
}
static int
-callchain_opt(const struct option *opt, const char *arg, int unset)
-{
- symbol_conf.use_callchain = true;
- return record_callchain_opt(opt, arg, unset);
-}
-
-static int
parse_callchain_opt(const struct option *opt, const char *arg, int unset)
{
struct callchain_param *callchain = opt->value;
@@ -1413,6 +1407,24 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset)
return parse_callchain_top_opt(arg);
}
+static int
+callchain_opt(const struct option *opt, const char *arg __maybe_unused, int unset)
+{
+ struct callchain_param *callchain = opt->value;
+
+ /*
+ * The -g option only sets the callchain if not already configured by
+ * .perfconfig. It does, however, enable it.
+ */
+ if (callchain->record_mode != CALLCHAIN_NONE) {
+ callchain->enabled = true;
+ return 0;
+ }
+
+ return parse_callchain_opt(opt, EM_HOST != EM_S390 ? "fp" : "dwarf", unset);
+}
+
+
static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
{
if (!strcmp(var, "top.call-graph")) {
@@ -1437,11 +1449,10 @@ parse_percent_limit(const struct option *opt, const char *arg,
return 0;
}
-const char top_callchain_help[] = CALLCHAIN_RECORD_HELP CALLCHAIN_REPORT_HELP
- "\n\t\t\t\tDefault: fp,graph,0.5,caller,function";
-
int cmd_top(int argc, const char **argv)
{
+ static const char top_callchain_help[] = CALLCHAIN_RECORD_HELP CALLCHAIN_REPORT_HELP
+ "\n\t\t\t\tDefault: fp,graph,0.5,caller,function";
char errbuf[BUFSIZ];
struct perf_top top = {
.count_filter = 5,
@@ -1694,8 +1705,17 @@ int cmd_top(int argc, const char **argv)
if (annotate_check_args() < 0)
goto out_delete_evlist;
+ status = target__validate(target);
+ if (status) {
+ target__strerror(target, status, errbuf, BUFSIZ);
+ ui__warning("%s\n", errbuf);
+ }
+
+ if (target__none(target))
+ target->system_wide = true;
+
if (!top.evlist->core.nr_entries) {
- struct evlist *def_evlist = evlist__new_default();
+ struct evlist *def_evlist = evlist__new_default(target, callchain_param.enabled);
if (!def_evlist)
goto out_delete_evlist;
@@ -1788,12 +1808,6 @@ int cmd_top(int argc, const char **argv)
goto out_delete_evlist;
}
- status = target__validate(target);
- if (status) {
- target__strerror(target, status, errbuf, BUFSIZ);
- ui__warning("%s\n", errbuf);
- }
-
if (top.uid_str) {
uid_t uid = parse_uid(top.uid_str);
@@ -1807,9 +1821,6 @@ int cmd_top(int argc, const char **argv)
goto out_delete_evlist;
}
- if (target__none(target))
- target->system_wide = true;
-
if (evlist__create_maps(top.evlist, target) < 0) {
ui__error("Couldn't create thread/CPU maps: %s\n",
errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 311d9da9896a..e58c49d047a2 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -21,7 +21,6 @@
#include <bpf/libbpf.h>
#include <bpf/btf.h>
#endif
-#include "util/bpf_map.h"
#include "util/rlimit.h"
#include "builtin.h"
#include "util/cgroup.h"
@@ -1565,7 +1564,9 @@ static bool syscall_id_equal(long key1, long key2, void *ctx __maybe_unused)
static struct hashmap *alloc_syscall_stats(void)
{
- return hashmap__new(syscall_id_hash, syscall_id_equal, NULL);
+ struct hashmap *result = hashmap__new(syscall_id_hash, syscall_id_equal, NULL);
+
+ return IS_ERR(result) ? NULL : result;
}
static void delete_syscall_stats(struct hashmap *syscall_stats)
@@ -1573,7 +1574,7 @@ static void delete_syscall_stats(struct hashmap *syscall_stats)
struct hashmap_entry *pos;
size_t bkt;
- if (syscall_stats == NULL)
+ if (!syscall_stats)
return;
hashmap__for_each_entry(syscall_stats, pos, bkt)
@@ -1589,7 +1590,7 @@ static struct thread_trace *thread_trace__new(struct trace *trace)
ttrace->files.max = -1;
if (trace->summary) {
ttrace->syscall_stats = alloc_syscall_stats();
- if (IS_ERR(ttrace->syscall_stats))
+ if (!ttrace->syscall_stats)
zfree(&ttrace);
}
}
@@ -2003,9 +2004,13 @@ static int trace__symbols_init(struct trace *trace, int argc, const char **argv,
if (err < 0)
goto out;
+ if (trace->summary_only && trace->summary_mode != SUMMARY__BY_THREAD)
+ goto out;
+
err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
evlist->core.threads, trace__tool_process,
- /*needs_mmap=*/callchain_param.enabled,
+ /*needs_mmap=*/callchain_param.enabled &&
+ !trace->summary_only,
/*mmap_data=*/false,
/*nr_threads_synthesize=*/1);
out:
@@ -2264,9 +2269,7 @@ static int trace__validate_ev_qualifier(struct trace *trace)
struct str_node *pos;
size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
- trace->ev_qualifier_ids.entries = malloc(nr_allocated *
- sizeof(trace->ev_qualifier_ids.entries[0]));
-
+ trace->ev_qualifier_ids.entries = calloc(nr_allocated, sizeof(trace->ev_qualifier_ids.entries[0]));
if (trace->ev_qualifier_ids.entries == NULL) {
fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
trace->output);
@@ -2955,7 +2958,7 @@ static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
++trace->stats.vfs_getname;
}
- if (ttrace->entry_time) {
+ if (ttrace->entry_time && sample->time >= ttrace->entry_time) {
duration = sample->time - ttrace->entry_time;
if (trace__filter_duration(trace, duration))
goto out;
@@ -4464,7 +4467,7 @@ create_maps:
if (trace->summary_mode == SUMMARY__BY_TOTAL && !trace->summary_bpf) {
trace->syscall_stats = alloc_syscall_stats();
- if (IS_ERR(trace->syscall_stats))
+ if (!trace->syscall_stats)
goto out_delete_evlist;
}
@@ -4771,7 +4774,7 @@ static int trace__replay(struct trace *trace)
if (trace->summary_mode == SUMMARY__BY_TOTAL) {
trace->syscall_stats = alloc_syscall_stats();
- if (IS_ERR(trace->syscall_stats))
+ if (!trace->syscall_stats)
goto out;
}
@@ -5299,6 +5302,13 @@ static int trace__parse_summary_mode(const struct option *opt, const char *str,
return 0;
}
+static int trace_parse_callchain_opt(const struct option *opt,
+ const char *arg,
+ int unset)
+{
+ return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
+}
+
static int trace__config(const char *var, const char *value, void *arg)
{
struct trace *trace = arg;
@@ -5446,7 +5456,7 @@ int cmd_trace(int argc, const char **argv)
OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
OPT_CALLBACK(0, "call-graph", &trace.opts,
"record_mode[,record_size]", record_callchain_help,
- &record_parse_callchain_opt),
+ &trace_parse_callchain_opt),
OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
"Use libtraceevent to print the tracepoint arguments."),
OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 31826621eebd..531c0e0e84df 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -6,10 +6,7 @@ NC='\033[0m' # No Color
declare -a FILES=(
"include/uapi/linux/const.h"
- "include/uapi/drm/drm.h"
- "include/uapi/drm/i915_drm.h"
"include/uapi/linux/bits.h"
- "include/uapi/linux/fadvise.h"
"include/uapi/linux/fscrypt.h"
"include/uapi/linux/genetlink.h"
"include/uapi/linux/if_addr.h"
@@ -90,7 +87,10 @@ declare -a SYNC_CHECK_FILES=(
declare -a BEAUTY_FILES=(
"arch/x86/include/asm/irq_vectors.h"
"arch/x86/include/uapi/asm/prctl.h"
+ "include/uapi/drm/drm.h"
+ "include/uapi/drm/i915_drm.h"
"include/linux/socket.h"
+ "include/uapi/linux/fadvise.h"
"include/uapi/linux/fcntl.h"
"include/uapi/linux/fs.h"
"include/uapi/linux/mount.h"
diff --git a/tools/perf/jvmti/libjvmti.c b/tools/perf/jvmti/libjvmti.c
index 87bfd4781003..d3dc53010e76 100644
--- a/tools/perf/jvmti/libjvmti.c
+++ b/tools/perf/jvmti/libjvmti.c
@@ -98,7 +98,7 @@ get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t **
/*
* Phase 2 -- allocate big enough line table
*/
- *tab = malloc(nr_total * sizeof(**tab));
+ *tab = calloc(nr_total, sizeof(**tab));
if (!*tab)
return JVMTI_ERROR_OUT_OF_MEMORY;
@@ -262,11 +262,10 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
}
nr_lines = 0;
} else if (nr_lines > 0) {
- line_file_names = malloc(sizeof(char*) * nr_lines);
+ line_file_names = calloc(nr_lines, sizeof(char *));
if (!line_file_names) {
warnx("jvmti: cannot allocate space for line table method names");
} else {
- memset(line_file_names, 0, sizeof(char*) * nr_lines);
ret = fill_source_filenames(jvmti, nr_lines, line_tab, line_file_names);
if (ret != JVMTI_ERROR_NONE) {
warnx("jvmti: fill_source_filenames failed");
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index f475a8664ffc..1f51e8de6b1b 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -48,7 +48,7 @@ struct cmd_struct {
int option;
};
-static struct cmd_struct commands[] = {
+static const struct cmd_struct commands[] = {
{ "archive", NULL, 0 },
{ "buildid-cache", cmd_buildid_cache, 0 },
{ "buildid-list", cmd_buildid_list, 0 },
@@ -178,7 +178,7 @@ static int set_debug_file(const char *path)
return 0;
}
-struct option options[] = {
+static const struct option options[] = {
OPT_ARGUMENT("help", "help"),
OPT_ARGUMENT("version", "version"),
OPT_ARGUMENT("exec-path", "exec-path"),
@@ -280,7 +280,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(commands); i++) {
- struct cmd_struct *p = commands+i;
+ const struct cmd_struct *p = commands + i;
printf("%s ", p->cmd);
}
putchar('\n');
@@ -289,7 +289,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(options)-1; i++) {
- struct option *p = options+i;
+ const struct option *p = options + i;
printf("--%s ", p->long_name);
}
putchar('\n');
@@ -331,7 +331,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
#define RUN_SETUP (1<<0)
#define USE_PAGER (1<<1)
-static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
+static int run_builtin(const struct cmd_struct *p, int argc, const char **argv)
{
int status;
struct stat st;
@@ -390,7 +390,7 @@ static void handle_internal_command(int argc, const char **argv)
}
for (i = 0; i < ARRAY_SIZE(commands); i++) {
- struct cmd_struct *p = commands+i;
+ const struct cmd_struct *p = commands+i;
if (p->fn == NULL)
continue;
if (strcmp(p->cmd, cmd))
diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build
index dc5f94862a3b..dc1df2d57ddc 100644
--- a/tools/perf/pmu-events/Build
+++ b/tools/perf/pmu-events/Build
@@ -211,10 +211,10 @@ ifneq ($(strip $(ORPHAN_FILES)),)
# Message for $(call echo-cmd,rm). Generally cleaning files isn't part
# of a build step.
-quiet_cmd_rm = RM $^
+quiet_cmd_rm = RM ...$(words $^) orphan file(s)...
+# The list of files can be long. Use xargs to prevent issues.
prune_orphans: $(ORPHAN_FILES)
- # The list of files can be long. Use xargs to prevent issues.
$(Q)$(call echo-cmd,rm)echo "$^" | xargs rm -f
JEVENTS_DEPS += prune_orphans
diff --git a/tools/perf/pmu-events/arch/arm64/common-and-microarch.json b/tools/perf/pmu-events/arch/arm64/common-and-microarch.json
index 468cb085d879..144325d87be4 100644
--- a/tools/perf/pmu-events/arch/arm64/common-and-microarch.json
+++ b/tools/perf/pmu-events/arch/arm64/common-and-microarch.json
@@ -1513,11 +1513,26 @@
"BriefDescription": "Level 2 data cache refill, software preload"
},
{
+ "EventCode": "0x8150",
+ "EventName": "L3D_CACHE_RW",
+ "BriefDescription": "Level 3 data cache demand access."
+ },
+ {
+ "EventCode": "0x8151",
+ "EventName": "L3D_CACHE_PRFM",
+ "BriefDescription": "Level 3 data cache software prefetch"
+ },
+ {
"EventCode": "0x8152",
"EventName": "L3D_CACHE_MISS",
"BriefDescription": "Level 3 data cache demand access miss"
},
{
+ "EventCode": "0x8153",
+ "EventName": "L3D_CACHE_REFILL_PRFM",
+ "BriefDescription": "Level 3 data cache refill, software prefetch."
+ },
+ {
"EventCode": "0x8154",
"EventName": "L1D_CACHE_HWPRF",
"BriefDescription": "Level 1 data cache hardware prefetch."
@@ -1528,6 +1543,11 @@
"BriefDescription": "Level 2 data cache hardware prefetch."
},
{
+ "EventCode": "0x8156",
+ "EventName": "L3D_CACHE_HWPRF",
+ "BriefDescription": "Level 3 data cache hardware prefetch."
+ },
+ {
"EventCode": "0x8158",
"EventName": "STALL_FRONTEND_MEMBOUND",
"BriefDescription": "Frontend stall cycles, memory bound."
@@ -1683,6 +1703,11 @@
"BriefDescription": "Level 2 data cache refill, hardware prefetch."
},
{
+ "EventCode": "0x81BE",
+ "EventName": "L3D_CACHE_REFILL_HWPRF",
+ "BriefDescription": "Level 3 data cache refill, hardware prefetch."
+ },
+ {
"EventCode": "0x81C0",
"EventName": "L1I_CACHE_HIT_RD",
"BriefDescription": "Level 1 instruction cache demand fetch hit."
@@ -1713,11 +1738,31 @@
"BriefDescription": "Level 1 instruction cache demand fetch first hit, fetched by software preload"
},
{
+ "EventCode": "0x81DC",
+ "EventName": "L1D_CACHE_HIT_RW_FPRFM",
+ "BriefDescription": "Level 1 data cache demand access first hit, fetched by software prefetch."
+ },
+ {
"EventCode": "0x81E0",
"EventName": "L1I_CACHE_HIT_RD_FHWPRF",
"BriefDescription": "Level 1 instruction cache demand fetch first hit, fetched by hardware prefetcher"
},
{
+ "EventCode": "0x81EC",
+ "EventName": "L1D_CACHE_HIT_RW_FHWPRF",
+ "BriefDescription": "Level 1 data cache demand access first hit, fetched by hardware prefetcher."
+ },
+ {
+ "EventCode": "0x81F0",
+ "EventName": "L1I_CACHE_HIT_RD_FPRF",
+ "BriefDescription": "Level 1 instruction cache demand fetch first hit, fetched by prefetch."
+ },
+ {
+ "EventCode": "0x81FC",
+ "EventName": "L1D_CACHE_HIT_RW_FPRF",
+ "BriefDescription": "Level 1 data cache demand access first hit, fetched by prefetch."
+ },
+ {
"EventCode": "0x8200",
"EventName": "L1I_CACHE_HIT",
"BriefDescription": "Level 1 instruction cache hit."
@@ -1768,11 +1813,26 @@
"BriefDescription": "Level 1 instruction cache demand fetch line-fill buffer first hit, recently fetched by software preload"
},
{
+ "EventCode": "0x825C",
+ "EventName": "L1D_LFB_HIT_RW_FPRFM",
+ "BriefDescription": "Level 1 data cache demand access line-fill buffer first hit, recently fetched by software prefetch."
+ },
+ {
"EventCode": "0x8260",
"EventName": "L1I_LFB_HIT_RD_FHWPRF",
"BriefDescription": "Level 1 instruction cache demand fetch line-fill buffer first hit, recently fetched by hardware prefetcher"
},
{
+ "EventCode": "0x826C",
+ "EventName": "L1D_LFB_HIT_RW_FHWPRF",
+ "BriefDescription": "Level 1 data cache demand access line-fill buffer first hit, recently fetched by hardware prefetcher."
+ },
+ {
+ "EventCode": "0x827C",
+ "EventName": "L1D_LFB_HIT_RW_FPRF",
+ "BriefDescription": "Level 1 data cache demand access line-fill buffer first hit, recently fetched by prefetch."
+ },
+ {
"EventCode": "0x8280",
"EventName": "L1I_CACHE_PRF",
"BriefDescription": "Level 1 instruction cache, preload or prefetch hit."
@@ -1808,6 +1868,11 @@
"BriefDescription": "Last level cache refill"
},
{
+ "EventCode": "0x828E",
+ "EventName": "L3D_CACHE_REFILL_PRF",
+ "BriefDescription": "Level 3 data cache refill, prefetch."
+ },
+ {
"EventCode": "0x8320",
"EventName": "L1D_CACHE_REFILL_PERCYC",
"BriefDescription": "Level 1 data or unified cache refills in progress."
@@ -1873,6 +1938,16 @@
"BriefDescription": "Floating-point operation speculatively_executed, smallest type is 8-bit floating-point."
},
{
+ "EventCode": "0x8480",
+ "EventName": "FP_SP_FIXED_MIN_OPS_SPEC",
+ "BriefDescription": "Non-scalable element arithmetic operations speculatively executed, smallest type is single-precision floating-point."
+ },
+ {
+ "EventCode": "0x8482",
+ "EventName": "FP_HP_FIXED_MIN_OPS_SPEC",
+ "BriefDescription": "Non-scalable element arithmetic operations speculatively executed, smallest type is half-precision floating-point."
+ },
+ {
"EventCode": "0x8483",
"EventName": "FP_BF16_FIXED_MIN_OPS_SPEC",
"BriefDescription": "Non-scalable element arithmetic operations speculatively executed, smallest type is BFloat16 floating-point."
@@ -1883,6 +1958,16 @@
"BriefDescription": "Non-scalable element arithmetic operations speculatively executed, smallest type is 8-bit floating-point."
},
{
+ "EventCode": "0x8488",
+ "EventName": "FP_SP_SCALE_MIN_OPS_SPEC",
+ "BriefDescription": "Scalable element arithmetic operations speculatively executed, smallest type is single-precision floating-point."
+ },
+ {
+ "EventCode": "0x848A",
+ "EventName": "FP_HP_SCALE_MIN_OPS_SPEC",
+ "BriefDescription": "Scalable element arithmetic operations speculatively executed, smallest type is half-precision floating-point."
+ },
+ {
"EventCode": "0x848B",
"EventName": "FP_BF16_SCALE_MIN_OPS_SPEC",
"BriefDescription": "Scalable element arithmetic operations speculatively executed, smallest type is BFloat16 floating-point."
diff --git a/tools/perf/pmu-events/arch/arm64/mapfile.csv b/tools/perf/pmu-events/arch/arm64/mapfile.csv
index bb3fa8a33496..7f0eaa702048 100644
--- a/tools/perf/pmu-events/arch/arm64/mapfile.csv
+++ b/tools/perf/pmu-events/arch/arm64/mapfile.csv
@@ -46,3 +46,4 @@
0x00000000500f0000,v1,ampere/emag,core
0x00000000c00fac30,v1,ampere/ampereone,core
0x00000000c00fac40,v1,ampere/ampereonex,core
+0x000000004e0f0100,v1,nvidia/t410,core
diff --git a/tools/perf/pmu-events/arch/arm64/nvidia/t410/branch.json b/tools/perf/pmu-events/arch/arm64/nvidia/t410/branch.json
new file mode 100644
index 000000000000..ef4effc00ec3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/nvidia/t410/branch.json
@@ -0,0 +1,45 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED",
+ "PublicDescription": "This event counts branches which are speculatively executed and mispredicted."
+ },
+ {
+ "ArchStdEvent": "BR_PRED",
+ "PublicDescription": "This event counts all speculatively executed branches."
+ },
+ {
+ "EventCode": "0x017e",
+ "EventName": "BR_PRED_BTB_CTX_UPDATE",
+ "PublicDescription": "Branch context table update."
+ },
+ {
+ "EventCode": "0x0188",
+ "EventName": "BR_MIS_PRED_DIR_RESOLVED",
+ "PublicDescription": "Number of branch misprediction due to direction misprediction."
+ },
+ {
+ "EventCode": "0x0189",
+ "EventName": "BR_MIS_PRED_DIR_UNCOND_RESOLVED",
+ "PublicDescription": "Number of branch misprediction due to direction misprediction for unconditional branches."
+ },
+ {
+ "EventCode": "0x018a",
+ "EventName": "BR_MIS_PRED_DIR_UNCOND_DIRECT_RESOLVED",
+ "PublicDescription": "Number of branch misprediction due to direction misprediction for unconditional direct branches."
+ },
+ {
+ "EventCode": "0x018b",
+ "EventName": "BR_PRED_MULTI_RESOLVED",
+ "PublicDescription": "Number of resolved branch which made prediction by polymorphic indirect predictor."
+ },
+ {
+ "EventCode": "0x018c",
+ "EventName": "BR_MIS_PRED_MULTI_RESOLVED",
+ "PublicDescription": "Number of branch misprediction which made prediction by polymorphic indirect predictor."
+ },
+ {
+ "EventCode": "0x01e4",
+ "EventName": "BR_RGN_RECLAIM",
+ "PublicDescription": "This event counts the Indirect predictor entries flushed by region reclamation."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/nvidia/t410/brbe.json b/tools/perf/pmu-events/arch/arm64/nvidia/t410/brbe.json
new file mode 100644
index 000000000000..9c315b2d7046
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/nvidia/t410/brbe.json
@@ -0,0 +1,6 @@
+[
+ {
+ "ArchStdEvent": "BRB_FILTRATE",
+ "PublicDescription": "This event counts each valid branch record captured in the branch record buffer. Branch records that are not captured because they are removed by filtering are not counted."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/nvidia/t410/bus.json b/tools/perf/pmu-events/arch/arm64/nvidia/t410/bus.json
new file mode 100644
index 000000000000..5bb8de617c68
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/nvidia/t410/bus.json
@@ -0,0 +1,48 @@
+[
+ {
+ "ArchStdEvent": "BUS_ACCESS",
+ "PublicDescription": "This event counts the number of data-beat accesses between the CPU and the external bus. This count includes accesses due to read, write, and snoop. Each beat of data is counted individually."
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES",
+ "PublicDescription": "This event counts bus cycles in the CPU. Bus cycles represent a clock cycle in which a transaction could be sent or received on the interface from the CPU to the external bus. Since that interface is driven at the same clock speed as the CPU, this event increments at the rate of CPU clock. Regardless of the WFE/WFI state of the PE, this event increments on each processor clock."
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD",
+ "PublicDescription": "This event counts memory Read transactions seen on the external bus. Each beat of data is counted individually."
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR",
+ "PublicDescription": "This event counts memory Write transactions seen on the external bus. Each beat of data is counted individually."
+ },
+ {
+ "EventCode": "0x0154",
+ "EventName": "BUS_REQUEST_REQ",
+ "PublicDescription": "Bus request, request."
+ },
+ {
+ "EventCode": "0x0155",
+ "EventName": "BUS_REQUEST_RETRY",
+ "PublicDescription": "Bus request, retry."
+ },
+ {
+ "EventCode": "0x0198",
+ "EventName": "L2_CHI_CBUSY0",
+ "PublicDescription": "Number of RXDAT or RXRSP response received width CBusy of 0."
+ },
+ {
+ "EventCode": "0x0199",
+ "EventName": "L2_CHI_CBUSY1",
+ "PublicDescription": "Number of RXDAT or RXRSP response received width CBusy of 1."
+ },
+ {
+ "EventCode": "0x019a",
+ "EventName": "L2_CHI_CBUSY2",
+ "PublicDescription": "Number of RXDAT or RXRSP response received width CBusy of 2."
+ },
+ {
+ "EventCode": "0x019b",
+ "EventName": "L2_CHI_CBUSY3",
+ "PublicDescription": "Number of RXDAT or RXRSP response received width CBusy of 3."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/nvidia/t410/exception.json b/tools/perf/pmu-events/arch/arm64/nvidia/t410/exception.json
new file mode 100644
index 000000000000..ecd996c3610b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/nvidia/t410/exception.json
@@ -0,0 +1,62 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN",
+ "PublicDescription": "This event counts any taken architecturally visible exceptions such as IRQ, FIQ, SError, and other synchronous exceptions. Exceptions are counted whether or not they are taken locally."
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN",
+ "PublicDescription": "This event counts any architecturally executed exception return instructions. For example: AArch64: ERET."
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF",
+ "PublicDescription": "This event counts the number of synchronous exceptions which are taken locally that are due to attempting to execute an instruction that is UNDEFINED.\nAttempting to execute instruction bit patterns that have not been allocated.\nAttempting to execute instructions when they are disabled.\nAttempting to execute instructions at an inappropriate Exception level.\nAttempting to execute an instruction when the value of PSTATE.IL is 1."
+ },
+ {
+ "ArchStdEvent": "EXC_SVC",
+ "PublicDescription": "This event counts SVC exceptions taken locally."
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT",
+ "PublicDescription": "This event counts synchronous exceptions that are taken locally and caused by Instruction Aborts."
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT",
+ "PublicDescription": "This event counts exceptions that are taken locally and are caused by data aborts or SErrors. Conditions that could cause those exceptions are attempting to read or write memory where the MMU generates a fault, attempting to read or write memory with a misaligned address, Interrupts from the nSEI inputs and internally generated SErrors."
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ",
+ "PublicDescription": "This event counts IRQ exceptions including the virtual IRQs that are taken locally."
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ",
+ "PublicDescription": "This event counts FIQ exceptions including the virtual FIQs that are taken locally."
+ },
+ {
+ "ArchStdEvent": "EXC_SMC",
+ "PublicDescription": "This event counts SMC exceptions taken to EL3."
+ },
+ {
+ "ArchStdEvent": "EXC_HVC",
+ "PublicDescription": "This event counts HVC exceptions taken to EL2."
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT",
+ "PublicDescription": "This event counts exceptions which are traps not taken locally and are caused by Instruction Aborts. For example, attempting to execute an instruction with a misaligned PC."
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT",
+ "PublicDescription": "This event counts exceptions which are traps not taken locally and are caused by Data Aborts or SError Interrupts. Conditions that could cause those exceptions are:\n* Attempting to read or write memory where the MMU generates a fault,\n* Attempting to read or write memory with a misaligned address,\n* Interrupts from the SEI input,\n* Internally generated SErrors."
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER",
+ "PublicDescription": "This event counts the number of synchronous trap exceptions which are not taken locally and are not SVC, SMC, HVC, Data Aborts, Instruction Aborts, or Interrupts."
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ",
+ "PublicDescription": "This event counts IRQ exceptions including the virtual IRQs that are not taken locally."
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ",
+ "PublicDescription": "This event counts FIQs which are not taken locally but taken from EL0, EL1, or EL2 to EL3 (which would be the normal behavior for FIQs when not executing in EL3)."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/nvidia/t410/fp_operation.json b/tools/perf/pmu-events/arch/arm64/nvidia/t410/fp_operation.json
new file mode 100644
index 000000000000..3588e130781d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/nvidia/t410/fp_operation.json
@@ -0,0 +1,78 @@
+[
+ {
+ "ArchStdEvent": "FP_HP_SPEC",
+ "PublicDescription": "This event counts speculatively executed half precision floating point operations."
+ },
+ {
+ "ArchStdEvent": "FP_SP_SPEC",
+ "PublicDescription": "This event counts speculatively executed single precision floating point operations."
+ },
+ {
+ "ArchStdEvent": "FP_DP_SPEC",
+ "PublicDescription": "This event counts speculatively executed double precision floating point operations."
+ },
+ {
+ "ArchStdEvent": "FP_SCALE_OPS_SPEC",
+ "PublicDescription": "This event counts speculatively executed scalable single precision floating point operations."
+ },
+ {
+ "ArchStdEvent": "FP_FIXED_OPS_SPEC",
+ "PublicDescription": "This event counts speculatively executed non-scalable single precision floating point operations."
+ },
+ {
+ "ArchStdEvent": "FP_HP_SCALE_OPS_SPEC",
+ "PublicDescription": "This event increments by v for each speculatively executed scalable element arithmetic operation, due to an instruction where the largest type was half-precision floating-point, where v is a value such that (v*(VL/128)) is the number of arithmetic operations carried out by the operation or instruction which causes the counter to increment.\nThis event does not count operations that are counted by FP_FIXED_OPS_SPEC or FP_SCALE2_OPS_SPEC."
+ },
+ {
+ "ArchStdEvent": "FP_HP_FIXED_OPS_SPEC",
+ "PublicDescription": "This event increments by v for each speculatively executed non-scalable element arithmetic operation, due to an instruction where the largest type was half-precision floating-point, where v is the number of arithmetic operations carried out by the operation or which instruction causes the event to increment.\nThis event does not count operations that are counted by FP_SCALE_OPS_SPEC or FP_SCALE2_OPS_SPEC."
+ },
+ {
+ "ArchStdEvent": "FP_SP_SCALE_OPS_SPEC",
+ "PublicDescription": "This event increments by v for each speculatively executed scalable element arithmetic operation, due to an instruction where the largest type was single-precision floating-point, where v is a value such that (v*(VL/128)) is the number of arithmetic operations carried out by the operation or instruction which causes the event to increment.\nThis event does not count operations that are counted by FP_FIXED_OPS_SPEC or FP_SCALE2_OPS_SPEC."
+ },
+ {
+ "ArchStdEvent": "FP_SP_FIXED_OPS_SPEC",
+ "PublicDescription": "This event increments by v for each speculatively executed non-scalable element arithmetic operation, due to an instruction where the largest type was single-precision floating-point, where v is the number of arithmetic operations carried out by the operation or instruction which causes the event to increment.\nThis event does not count operations that are counted by FP_SCALE_OPS_SPEC or FP_SCALE2_OPS_SPEC."
+ },
+ {
+ "ArchStdEvent": "FP_DP_SCALE_OPS_SPEC",
+ "PublicDescription": "This event increments by v for each speculatively executed scalable element arithmetic operation, due to an instruction where the largest type was double-precision floating-point, where v is a value such that (v*(VL/128)) is the number of arithmetic operations carried out by the operation or instruction which causes the event to increment.\nThis event does not count operations that are counted by FP_FIXED_OPS_SPEC or FP_SCALE2_OPS_SPEC."
+ },
+ {
+ "ArchStdEvent": "FP_DP_FIXED_OPS_SPEC",
+ "PublicDescription": "This event increments by v for each speculatively executed non-scalable element arithmetic operation, due to an instruction where the largest type was double-precision floating-point, where v is the number of arithmetic operations carried out by the operation or instruction which causes the event to increment.\nThis event does not count operations that are counted by FP_SCALE_OPS_SPEC or FP_SCALE2_OPS_SPEC."
+ },
+ {
+ "ArchStdEvent": "FP_SP_FIXED_MIN_OPS_SPEC",
+ "PublicDescription": "This event increments by v for each speculatively executed non-scalable element arithmetic operation, due to an instruction where the smallest type was single-precision floating-point, where v is the number of arithmetic operations carried out by the operation or instruction which causes the event to increment.\nThis event does not count operations that are counted by FP_SCALE_OPS_SPEC or FP_SCALE2_OPS_SPEC."
+ },
+ {
+ "ArchStdEvent": "FP_HP_FIXED_MIN_OPS_SPEC",
+ "PublicDescription": "This event increments by v for each speculatively executed non-scalable element arithmetic operation, due to an instruction where the smallest type was half-precision floating-point, where v is the number of arithmetic operations carried out by the operation or instruction which causes the event to increment.\nThis event does not count operations that are counted by FP_SCALE_OPS_SPEC or FP_SCALE2_OPS_SPEC."
+ },
+ {
+ "ArchStdEvent": "FP_BF16_FIXED_MIN_OPS_SPEC",
+ "PublicDescription": "This event increments by v for each speculatively executed non-scalable element arithmetic operation, due to an instruction where the smallest type was BFloat16 floating-point. Where v is the number of arithmetic operations carried out by the operation or instruction which causes the event to increment. This event does not count operations that are counted by FP_SCALE_OPS_SPEC or FP_SCALE2_OPS_SPEC."
+ },
+ {
+ "ArchStdEvent": "FP_FP8_FIXED_MIN_OPS_SPEC",
+ "PublicDescription": "This event increments by v for each speculatively executed non-scalable element arithmetic operation, due to an instruction where the smallest type was 8-bit floating-point, where v is the number of arithmetic operations carried out by the operation or instruction which causes the event to increment.\nThis event does not count operations that are counted by FP_SCALE_OPS_SPEC or FP_SCALE2_OPS_SPEC."
+ },
+ {
+ "ArchStdEvent": "FP_SP_SCALE_MIN_OPS_SPEC",
+ "PublicDescription": "This event increments by v for each speculatively executed scalable element arithmetic operation, due to an instruction where the smallest type was single-precision floating-point, where v is a value such that (v*(VL/128)) is the number of arithmetic operations carried out by the operation or instruction which causes the event to increment.\nThis event does not count operations that are counted by FP_FIXED_OPS_SPEC or FP_SCALE2_OPS_SPEC."
+ },
+ {
+ "ArchStdEvent": "FP_HP_SCALE_MIN_OPS_SPEC",
+ "PublicDescription": "This event increments by v for each speculatively executed scalable element arithmetic operation, due to an instruction where the smallest type was half-precision floating-point, where v is a value such that (v*(VL/128)) is the number of arithmetic operations carried out by the operation or instruction which causes the event to increment.\nThis event does not count operations that are counted by FP_FIXED_OPS_SPEC or FP_SCALE2_OPS_SPEC."
+ },
+ {
+ "ArchStdEvent": "FP_BF16_SCALE_MIN_OPS_SPEC",
+ "PublicDescription": "This event increments by v for each speculatively executed scalable element arithmetic operation, due to an instruction where the smallest type was BFloat16 floating-point, where v is a value such that (v*(VL/128)) is the number of arithmetic operations carried out by the operation or instruction which causes the event to increment.\nThis event does not count operations that are counted by FP_FIXED_OPS_SPEC or FP_SCALE2_OPS_SPEC."
+ },
+ {
+ "ArchStdEvent": "FP_FP8_SCALE_MIN_OPS_SPEC",
+ "PublicDescription": "This event increments by v for each speculatively executed scalable element arithmetic operation, due to an instruction where the smallest type was 8-bit floating-point, where v is a value such that (v*(VL/128)) is the number of arithmetic operations carried out by the operation or instruction which causes the event to increment.\nThis event does not count operations that are counted by FP_FIXED_OPS_SPEC or FP_SCALE2_OPS_SPEC."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/nvidia/t410/general.json b/tools/perf/pmu-events/arch/arm64/nvidia/t410/general.json
new file mode 100644
index 000000000000..bd9c248387aa
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/nvidia/t410/general.json
@@ -0,0 +1,15 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES",
+ "PublicDescription": "This event counts CPU clock cycles when the PE is not in WFE/WFI. The clock measured by this event is defined as the physical clock driving the CPU logic."
+ },
+ {
+ "ArchStdEvent": "CNT_CYCLES",
+ "PublicDescription": "This event increments at a constant frequency equal to the rate of increment of the System Counter, CNTPCT_EL0.\nThis event does not increment when the PE is in WFE/WFI."
+ },
+ {
+ "EventCode": "0x01e1",
+ "EventName": "CPU_SLOT",
+ "PublicDescription": "Entitled CPU slots.\nThis event counts the number of slots. When in ST mode, this event shall increment by PMMIR_EL1.SLOTS quantities, and when in SMT partitioned resource mode (regardless of in WFI state or otherwise), this event is incremented by PMMIR_EL1.SLOTS/2 quantities."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/nvidia/t410/l1d_cache.json b/tools/perf/pmu-events/arch/arm64/nvidia/t410/l1d_cache.json
new file mode 100644
index 000000000000..ed6f764eff24
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/nvidia/t410/l1d_cache.json
@@ -0,0 +1,122 @@
+[
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL",
+ "PublicDescription": "This event counts L1 D-cache refills caused by speculatively executed load or store operations, preload instructions, or hardware cache prefetching that missed in the L1 D-cache. This event only counts one event per cache line.\nSince the caches are Write-back only for this processor, there are no Write-through cache accesses."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE",
+ "PublicDescription": "This event counts L1 D-cache accesses from any load/store operations, software preload, or hardware prefetch operations. Atomic operations that resolve in the CPU's caches (near atomic operations) count as both a write access and read access. Each access to a cache line is counted including the multiple accesses caused by single instructions such as LDM or STM. Each access to other L1 data or unified memory structures, for example refill buffers, write buffers, and write-back buffers, are also counted.\nThis event counts the sum of the following events:\nL1D_CACHE_RD,\nL1D_CACHE_WR,\nL1D_CACHE_PRFM, and\nL1D_CACHE_HWPRF."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB",
+ "PublicDescription": "This event counts write-backs of dirty data from the L1 D-cache to the L2 cache. This occurs when either a dirty cache line is evicted from L1 D-cache and allocated in the L2 cache or dirty data is written to the L2 and possibly to the next level of cache. This event counts both victim cache line evictions and cache write-backs from snoops or cache maintenance operations. The following cache operations are not counted:\n* Invalidations which do not result in data being transferred out of the L1 (such as evictions of clean data),\n* Full line writes which write to L2 without writing L1, such as write streaming mode.\nThis event is the sum of the following events:\nL1D_CACHE_WB_CLEAN and\nL1D_CACHE_WB_VICTIM."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_LMISS_RD",
+ "PublicDescription": "This event counts cache line refills into the L1 D-cache from any memory Read operations, that incurred additional latency.\nCounts same as L1D_CACHE_REFILL_RD on this CPU."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD",
+ "PublicDescription": "This event counts L1 D-cache accesses from any Load operation. Atomic Load operations that resolve in the CPU's caches count as both a write access and read access."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR",
+ "PublicDescription": "This event counts L1 D-cache accesses generated by Store operations. This event also counts accesses caused by a DC ZVA (D-cache zero, specified by virtual address) instruction. Near atomic operations that resolve in the CPU's caches count as a write access and read access.\nThis event is a subset of the L1D_CACHE event, except this event only counts memory Write operations."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD",
+ "PublicDescription": "This event counts L1 D-cache refills caused by speculatively executed Load instructions where the memory Read operation misses in the L1 D-cache. This event only counts one event per cache line.\nThis event is a subset of the L1D_CACHE_REFILL event, but only counts memory Read operations. This event does not count reads caused by cache maintenance operations or preload instructions."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR",
+ "PublicDescription": "This event counts L1 D-cache refills caused by speculatively executed Store instructions where the memory Write operation misses in the L1 D-cache. This event only counts one event per cache line.\nThis event is a subset of the L1D_CACHE_REFILL event, but only counts memory Write operations."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER",
+ "PublicDescription": "This event counts L1 D-cache refills (L1D_CACHE_REFILL) where the cache line data came from caches inside the immediate Cluster of the Core (L2 cache)."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER",
+ "PublicDescription": "This event counts L1 D-cache refills (L1D_CACHE_REFILL) for which the cache line data came from outside the immediate Cluster of the Core, like an SLC in the system interconnect or DRAM or remote socket."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM",
+ "PublicDescription": "This event counts dirty cache line evictions from the L1 D-cache caused by a new cache line allocation. This event does not count evictions caused by cache maintenance operations.\nThis event is a subset of the L1D_CACHE_WB event, but only counts write-backs that are a result of the line being allocated for an access made by the CPU."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN",
+ "PublicDescription": "This event counts write-backs from the L1 D-cache that are a result of a coherency operation made by another CPU. Event counts include cache maintenance operations.\nThis event is a subset of the L1D_CACHE_WB event."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL",
+ "PublicDescription": "This event counts each explicit invalidation of a cache line in the L1 D-cache caused by:\n* Cache Maintenance Operations (CMO) that operate by a virtual address.\n* Broadcast cache coherency operations from another CPU in the system.\nThis event does not count for the following conditions:\n* A cache refill invalidates a cache line.\n* A CMO which is executed on that CPU and invalidates a cache line specified by Set/Way.\nNote that CMOs that operate by Set/Way cannot be broadcast from one CPU to another."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RW",
+ "PublicDescription": "This event counts L1 data demand cache accesses from any Load or Store operation. Near atomic operations that resolve in the CPU's caches count as both a write access and read access.\nThis event is implemented as L1D_CACHE_RD + L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_PRFM",
+ "PublicDescription": "This event counts L1 D-cache accesses from software preload or prefetch instructions."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_MISS",
+ "PublicDescription": "This event counts each demand access counted by L1D_CACHE_RW that misses in the L1 Data or unified cache, causing an access to outside of the L1 caches of this PE."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_PRFM",
+ "PublicDescription": "This event counts L1 D-cache refills where the cache line access was generated by software preload or prefetch instructions."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_HWPRF",
+ "PublicDescription": "This event counts L1 D-cache accesses from any Load/Store operations generated by the hardware prefetcher."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_HWPRF",
+ "PublicDescription": "This event counts each hardware prefetch access counted by L1D_CACHE_HWPRF that causes a refill of the L1 D-cache from outside of the L1 D-cache."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_HIT_RW_FPRFM",
+ "PublicDescription": "This event counts each demand access first hit counted by L1D_CACHE_HIT_RW_FPRF where the cache line was fetched in response to a prefetch instruction. That is, the L1D_CACHE_REFILL_PRFM event was generated when the cache line was fetched into the cache.\nOnly the first hit by a demand access is counted. After this event is generated for a cache line, the event is not generated again for the same cache line while it remains in the cache."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_HIT_RW_FHWPRF",
+ "PublicDescription": "This event counts each demand access first hit counted by L1D_CACHE_HIT_RW_FPRF where the cache line was fetched by a hardware prefetcher. That is, the L1D_CACHE_REFILL_HWPRF Event was generated when the cache line was fetched into the cache.\nOnly the first hit by a demand access is counted. After this event is generated for a cache line, the event is not generated again for the same cache line while it remains in the cache."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_HIT_RW_FPRF",
+ "PublicDescription": "This event counts each demand access first hit counted by L1D_CACHE_HIT_RW where the cache line was fetched in response to a prefetch instruction or by a hardware prefetcher. That is, the L1D_CACHE_REFILL_PRF event was generated when the cache line was fetched into the cache.\nOnly the first hit by a demand access is counted. After this event is generated for a cache line, the event is not generated again for the same cache line while it remains in the cache."
+ },
+ {
+ "ArchStdEvent": "L1D_LFB_HIT_RW_FPRFM",
+ "PublicDescription": "This event counts each demand access line-fill buffer first hit counted by L1D_LFB_HIT_RW_FPRF where the cache line was fetched in response to a prefetch instruction. That is, the access hits a cache line that is in the process of being loaded into the L1 D-cache, and so does not generate a new refill, but has to wait for the previous refill to complete, and the L1D_CACHE_REFILL_PRFM event was generated when the cache line was fetched into the cache.\nOnly the first hit by a demand access is counted. After this event is generated for a cache line, the event is not generated again for the same cache line while it remains in the cache."
+ },
+ {
+ "ArchStdEvent": "L1D_LFB_HIT_RW_FHWPRF",
+ "PublicDescription": "This event counts each demand access line-fill buffer first hit counted by L1D_LFB_HIT_RW_FPRF, where the cache line was fetched by a hardware prefetcher. That is, the access hits a cache line that is in the process of being loaded into the L1 D-cache, and so does not generate a new refill, but has to wait for the previous refill to complete, and the L1D_CACHE_REFILL_HWPRF Event was generated when the cache line was fetched into the cache.\nOnly the first hit by a demand access is counted. After this event is generated for a cache line, the event is not generated again for the same cache line while it remains in the cache."
+ },
+ {
+ "ArchStdEvent": "L1D_LFB_HIT_RW_FPRF",
+ "PublicDescription": "This event counts each demand access line-fill buffer first hit counted by L1D_LFB_HIT_RW where the cache line was fetched in response to a prefetch instruction or by a hardware prefetcher. That is, the access hits a cache line that is in the process of being loaded into the L1 D-cache, and so does not generate a new refill, but has to wait for the previous refill to complete, and the L1D_CACHE_REFILL_PRF event was generated when the cache line was fetched into the cache.\nOnly the first hit by a demand access is counted. After this event is generated for a cache line, the event is not generated again for the same cache line while it remains in the cache."
+ },
+ {
+ "EventCode": "0x01f5",
+ "EventName": "L1D_CACHE_REFILL_RW",
+ "PublicDescription": "L1 D-cache refill, demand Read and Write. This event counts demand Read and Write accesses that causes a refill of the L1 D-cache of this PE, from outside of this cache."
+ },
+ {
+ "EventCode": "0x0204",
+ "EventName": "L1D_CACHE_REFILL_OUTER_LLC",
+ "PublicDescription": "This event counts L1D_CACHE_REFILL from L3 D-cache."
+ },
+ {
+ "EventCode": "0x0205",
+ "EventName": "L1D_CACHE_REFILL_OUTER_DRAM",
+ "PublicDescription": "This event counts L1D_CACHE_REFILL from local memory."
+ },
+ {
+ "EventCode": "0x0206",
+ "EventName": "L1D_CACHE_REFILL_OUTER_REMOTE",
+ "PublicDescription": "This event counts L1D_CACHE_REFILL from a remote memory."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/nvidia/t410/l1i_cache.json b/tools/perf/pmu-events/arch/arm64/nvidia/t410/l1i_cache.json
new file mode 100644
index 000000000000..952454004d98
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/nvidia/t410/l1i_cache.json
@@ -0,0 +1,114 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL",
+ "PublicDescription": "This event counts cache line refills in the L1 I-cache caused by a missed instruction fetch (demand, hardware prefetch, and software preload accesses). Instruction fetches may include accessing multiple instructions, but the single cache line allocation is counted once."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE",
+ "PublicDescription": "This event counts instruction fetches (demand, hardware prefetch, and software preload accesses) which access the L1 Instruction Cache. Instruction Cache accesses caused by cache maintenance operations are not counted."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_LMISS",
+ "PublicDescription": "This event counts cache line refills into the L1 I-cache, that incurred additional latency.\nCounts the same as L1I_CACHE_REFILL in this CPU."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_RD",
+ "PublicDescription": "This event counts demand instruction fetches which access the L1 I-cache."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_PRFM",
+ "PublicDescription": "This event counts instruction fetches generated by software preload or prefetch instructions which access the L1 I-cache."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_HWPRF",
+ "PublicDescription": "This event counts instruction fetches which access the L1 I-cache generated by the hardware prefetcher."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL_PRFM",
+ "PublicDescription": "This event counts cache line refills in the L1 I-cache caused by a missed instruction fetch generated by software preload or prefetch instructions. Instruction fetches may include accessing multiple instructions, but the single cache line allocation is counted once."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL_HWPRF",
+ "PublicDescription": "This event counts each hardware prefetch access counted by L1I_CACHE_HWPRF that causes a refill of the Level 1I-cache from outside of the L1 I-cache."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_HIT_RD",
+ "PublicDescription": "This event counts demand instruction fetches that access the L1 I-cache and hit in the L1 I-cache."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_HIT_RD_FPRF",
+ "PublicDescription": "This event counts each demand fetch first hit counted by L1I_CACHE_HIT_RD where the cache line was fetched in response to a software preload or by a hardware prefetcher. That is, the L1I_CACHE_REFILL_PRF event was generated when the cache line was fetched into the cache.\nOnly the first hit by a demand access is counted. After this event is generated for a cache line, the event is not generated again for the same cache line while it remains in the cache."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_HIT",
+ "PublicDescription": "This event counts instruction fetches that access the L1 I-cache (demand, hardware prefetch, and software preload accesses) and hit in the L1 I-cache. I-cache accesses caused by cache maintenance operations are not counted."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_HIT_PRFM",
+ "PublicDescription": "This event counts instruction fetches generated by software preload or prefetch instructions that access the L1 I-cache and hit in the L1 I-cache."
+ },
+ {
+ "ArchStdEvent": "L1I_LFB_HIT_RD",
+ "PublicDescription": "This event counts demand instruction fetches that access the L1 I-cache and hit in a line that is in the process of being loaded into the L1 I-cache."
+ },
+ {
+ "EventCode": "0x0174",
+ "EventName": "L1I_HWPRF_REQ_DROP",
+ "PublicDescription": "L1 I-cache hardware prefetch dropped."
+ },
+ {
+ "EventCode": "0x01e3",
+ "EventName": "L1I_CACHE_REFILL_RD",
+ "PublicDescription": "L1 I-cache refill, Read.\nThis event counts demand instruction fetch that causes a refill of the L1 I-cache of this PE, from outside of this cache."
+ },
+ {
+ "EventCode": "0x01ea",
+ "EventName": "L1I_CFC_ENTRIES",
+ "PublicDescription": "This event counts the CFC (Cache Fill Control) entries.\nThe CFC is the fill buffer for I-cache."
+ },
+ {
+ "EventCode": "0x01ef",
+ "EventName": "L1I_CACHE_INVAL",
+ "PublicDescription": "L1 I-cache invalidate.\nThis event counts each explicit invalidation of a cache line in the L1 I-cache caused by:\n* Broadcast cache coherency operations from another CPU in the system.\n* Invalidation dues to capacity eviction in L2 D-cache.\nThis event does not count for the following conditions:\n* A cache refill invalidates a cache line.\n* A CMO which is executed on that CPU Core and invalidates a cache line specified by Set/Way.\n* Cache Maintenance Operations (CMO) that operate by a virtual address.\nNote that\n* CMOs that operate by Set/Way cannot be broadcast from one CPU Core to another.\n* The CMO is treated as No-op for the purposes of L1 I-cache line invalidation, as this Core implements fully coherent I-cache."
+ },
+ {
+ "EventCode": "0x0212",
+ "EventName": "L1I_CACHE_HIT_HWPRF",
+ "PublicDescription": "This event counts each hardware prefetch access that hits an L1 I-cache."
+ },
+ {
+ "EventCode": "0x0215",
+ "EventName": "L1I_LFB_HIT",
+ "PublicDescription": "L1 Line fill buffer hit.\nThis event counts each Demand or software preload or hardware prefetch induced instruction fetch that hits an L1 I-cache line that is in the process of being loaded into the L1 instruction cache, and so does not generate a new refill, but has to wait for the previous refill to complete."
+ },
+ {
+ "EventCode": "0x0216",
+ "EventName": "L1I_LFB_HIT_PRFM",
+ "PublicDescription": "This event counts each software prefetch access that hits a cache line that is in the process of being loaded into the L1 instruction cache, and so does not generate a new refill, but has to wait for the previous refill to complete."
+ },
+ {
+ "EventCode": "0x0219",
+ "EventName": "L1I_LFB_HIT_HWPRF",
+ "PublicDescription": "This event counts each hardware prefetch access that hits a cache line that is in the process of being loaded into the L1 instruction cache, and so does not generate a new refill, but has to wait for the previous refill to complete."
+ },
+ {
+ "EventCode": "0x0221",
+ "EventName": "L1I_PRFM_REQ",
+ "PublicDescription": "L1 I-cache software prefetch requests."
+ },
+ {
+ "EventCode": "0x0222",
+ "EventName": "L1I_HWPRF_REQ",
+ "PublicDescription": "L1 I-cache hardware prefetch requests."
+ },
+ {
+ "EventCode": "0x0228",
+ "EventName": "L1I_CACHE_HIT_PRFM_FPRF",
+ "PublicDescription": "L1 I-cache software prefetch access first hit, fetched by hardware or software prefetch.\nThis event counts each software preload access first hit where the cache line was fetched in response to a hardware prefetcher or software preload instruction.\nOnly the first hit is counted. After this event is generated for a cache line, the event is not generated again for the same cache line while it remains in the cache."
+ },
+ {
+ "EventCode": "0x022a",
+ "EventName": "L1I_CACHE_HIT_HWPRF_FPRF",
+ "PublicDescription": "L1 I-cache hardware prefetch access first hit, fetched by hardware or software prefetch.\nThis event counts each hardware prefetch access first hit where the cache line was fetched in response to a hardware or prefetch instruction.\nOnly the first hit is counted. After this event is generated for a cache line, the event is not generated again for the same cache line while it remains in the cache."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/nvidia/t410/l2d_cache.json b/tools/perf/pmu-events/arch/arm64/nvidia/t410/l2d_cache.json
new file mode 100644
index 000000000000..66f21a94381e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/nvidia/t410/l2d_cache.json
@@ -0,0 +1,134 @@
+[
+ {
+ "ArchStdEvent": "L2D_CACHE",
+ "PublicDescription": "This event counts accesses to the L2 cache due to data accesses. L2 cache is a unified cache for data and instruction accesses. Accesses are for misses in the L1 D-cache or translation resolutions due to accesses. This event also counts write-back of dirty data from L1 D-cache to the L2 cache.\nI-cache accesses are included in this event. This event is the sum of the following events:\nL2D_CACHE_RD,\nL2D_CACHE_WR,\nL2D_CACHE_PRFM, and\nL2D_CACHE_HWPRF."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL",
+ "PublicDescription": "This event counts cache line refills into the L2 cache. L2 cache is a unified cache for data and instruction accesses. Accesses are for misses in the L1 D-cache or translation resolutions due to accesses.\nI-cache refills are included in this event. This event is the sum of the following events:\nL2D_CACHE_REFILL_RD,\nL2D_CACHE_REFILL_WR,\nL2D_CACHE_REFILL_HWPRF, and\nL2D_CACHE_REFILL_PRFM."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB",
+ "PublicDescription": "This event counts write-backs of data from the L2 cache to outside the CPU. This includes snoops to the L2 (from other CPUs) which return data even if the snoops cause an invalidation. L2 cache line invalidations which do not write data outside the CPU and snoops which return data from an L1 cache are not counted. Data would not be written outside the cache when invalidating a clean cache line.\nThis event is the sum of the following events:\nL2D_CACHE_WB_VICTIM and\nL2D_CACHE_WB_CLEAN."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD",
+ "PublicDescription": "This event counts L2 D-cache accesses due to memory Read operations. L2 cache is a unified cache for data and instruction accesses, accesses are for misses in the L1 D-cache or translation resolutions due to accesses.\nI-cache accesses are included in this event. This event is a subset of the L2D_CACHE event, but this event only counts memory Read operations."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR",
+ "PublicDescription": "This event counts L2 cache accesses due to memory Write operations. L2 cache is a unified cache for data and instruction accesses, accesses are for misses in the L1 D-cache or translation resolutions due to accesses.\nThis event is a subset of the L2D_CACHE event, but this event only counts memory Write operations."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD",
+ "PublicDescription": "This event counts refills for memory accesses due to memory Read operation counted by L2D_CACHE_RD. L2 cache is a unified cache for data and instruction accesses, accesses are for misses in the L1 D-cache or translation resolutions due to accesses.\nThis CPU includes I-cache refills in this counter as an L2I equivalent event was not implemented. This event is a subset of the L2D_CACHE_REFILL event. This event does not count L2 refills caused by stashes into L2.\nThis count includes demand requests that encounter an L2 prefetch request or an L2 software prefetch request to the same cache line, which is still pending in the L2 LFB."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR",
+ "PublicDescription": "This event counts refills for memory accesses due to memory Write operation counted by L2D_CACHE_WR. L2 cache is a unified cache for data and instruction accesses, accesses are for misses in the L1 D-cache or translation resolutions due to accesses.\nThis count includes demand requests that encounter an L2 prefetch request or an L2 software prefetch request to the same cache line, which is still pending in the L2 LFB."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM",
+ "PublicDescription": "This event counts evictions from the L2 cache because of a line being allocated into the L2 cache.\nThis event is a subset of the L2D_CACHE_WB event."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN",
+ "PublicDescription": "This event counts write-backs from the L2 cache that are a result of any of the following:\n* Cache maintenance operations,\n* Snoop responses, or\n* Direct cache transfers to another CPU due to a forwarding snoop request.\nThis event is a subset of the L2D_CACHE_WB event."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL",
+ "PublicDescription": "This event counts each explicit invalidation of a cache line in the L2 cache by cache maintenance operations that operate by a virtual address, or by external coherency operations. This event does not count if either:\n* A cache refill invalidates a cache line, or\n* A cache Maintenance Operation (CMO), which invalidates a cache line specified by Set/Way,\nis executed on that CPU.\nCMOs that operate by Set/Way cannot be broadcast from one CPU to another."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_LMISS_RD",
+ "PublicDescription": "This event counts cache line refills into the L2 unified cache from any memory Read operations that incurred additional latency.\nCounts the same as L2D_CACHE_REFILL_RD in this CPU"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RW",
+ "PublicDescription": "This event counts L2 cache demand accesses from any Load/Store operations. L2 cache is a unified cache for data and instruction accesses, accesses are for misses in the L1 D-cache or translation resolutions due to accesses.\nI-cache accesses are included in this event.\nThis event is the sum of the following events:\nL2D_CACHE_RD and\nL2D_CACHE_WR."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_PRFM",
+ "PublicDescription": "This event counts L2 D-cache accesses generated by software preload or prefetch instructions with target = L1/L2/L3 cache.\nNote that a software preload or prefetch instructions with (target = L1/L2/L3) that hits in L1D will not result in an L2 D-cache access. Therefore, such a software preload or prefetch instructions will not be counted by this event."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_MISS",
+ "PublicDescription": "This event counts cache line misses in the L2 cache. L2 cache is a unified cache for data and instruction accesses. Accesses are for misses in the L1 D-cache or translation resolutions due to accesses.\nThis event counts the same as L2D_CACHE_REFILL_RD in this CPU."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_PRFM",
+ "PublicDescription": "This event counts refills due to accesses generated as a result of software preload or prefetch instructions as counted by L2D_CACHE_PRFM. I-cache refills are included in this event."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_HWPRF",
+ "PublicDescription": "This event counts the L2 D-cache access caused by L1 or L2 hardware prefetcher."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_HWPRF",
+ "PublicDescription": "This event counts each hardware prefetch access counted by L2D_CACHE_HWPRF that causes a refill of the L2 cache, or any L1 Data, or Instruction cache of this PE, from outside of those caches.\nThis does not include prefetch requests pending waiting for a refill in LFB and a new demand request to the same cache line hitting the LFB entry. All such refills are counted as L2D_LFB_HIT_RWL1PRF_FHWPRF."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_PRF",
+ "PublicDescription": "This event counts each access to L2 Cache due to a prefetch instruction, or hardware prefetch that causes a refill of the L2 or any Level 1, from outside of those caches."
+ },
+ {
+ "EventCode": "0x0108",
+ "EventName": "L2D_CACHE_IF_REFILL",
+ "PublicDescription": "L2 D-cache refill, instruction fetch.\nThis event counts demand instruction fetch that causes a refill of the L2 cache or L1 cache of this PE, from outside of those caches."
+ },
+ {
+ "EventCode": "0x0109",
+ "EventName": "L2D_CACHE_TBW_REFILL",
+ "PublicDescription": "L2 D-cache refill, Page table walk.\nThis event counts demand translation table walk that causes a refill of the L2 cache or L1 cache of this PE, from outside of those caches."
+ },
+ {
+ "EventCode": "0x010a",
+ "EventName": "L2D_CACHE_PF_REFILL",
+ "PublicDescription": "L2 D-cache refill, prefetch.\nThis event counts L1 or L2 hardware or software prefetch accesses that causes a refill of the L2 cache or L1 cache of this PE, from outside of those caches."
+ },
+ {
+ "EventCode": "0x010b",
+ "EventName": "L2D_LFB_HIT_RWL1PRF_FHWPRF",
+ "PublicDescription": "L2 line fill buffer demand Read, demand Write or L1 prefetch first hit, fetched by hardware prefetch.\nThis event counts each of the following access that hit the line-fill buffer when the same cache line is already being fetched due to an L2 hardware prefetcher.\n* Demand Read or Write\n* L1I-HWPRF\n* L1D-HWPRF\n* L1I PRFM\n* L1D PRFM\nThese accesses hit a cache line that is currently being loaded into the L2 cache as a result of a hardware prefetcher to the same line. Consequently, this access does not initiate a new refill but waits for the completion of the previous refill.\nOnly the first hit is counted. After this event is generated for a cache line, the event is not generated again for the same cache line while it remains in the cache."
+ },
+ {
+ "EventCode": "0x0179",
+ "EventName": "L2D_CACHE_HIT_RWL1PRF_FHWPRF",
+ "PublicDescription": "L2 D-cache demand Read, demand Write and L1 prefetch hit, fetched by hardware prefetch. This event counts each demand Read, demand Write and L1 hardware or software prefetch request that hit an L2 D-cache line that was refilled into L2 D-cache in response to an L2 hardware prefetch. Only the first hit is counted. After this event is generated for a cache line, the event is not generated again for the same cache line while it remains in the cache."
+ },
+ {
+ "EventCode": "0x01b8",
+ "EventName": "L2D_CACHE_L1PRF",
+ "PublicDescription": "L2 D-cache access, L1 hardware or software prefetch. This event counts L1 Hardware or software prefetch access to L2 D-cache."
+ },
+ {
+ "EventCode": "0x01b9",
+ "EventName": "L2D_CACHE_REFILL_L1PRF",
+ "PublicDescription": "L2 D-cache refill, L1 hardware or software prefetch.\nThis event counts each access counted by L2D_CACHE_L1PRF that causes a refill of the L2 cache or any L1 cache of this PE, from outside of those caches."
+ },
+ {
+ "EventCode": "0x0201",
+ "EventName": "L2D_CACHE_BACKSNOOP_L1D_VIRT_ALIASING",
+ "PublicDescription": "This event counts when the L2 D-cache sends an invalidating back-snoop to the L1 D for an access initiated by the L1 D, where the corresponding line is already present in the L1 D-cache.\nThe L2 D-cache line tags the PE that refilled the line. It also retains specific bits of the VA to identify virtually aliased addresses.\nThe L1 D request requiring a back-snoop can originate either from the same PE that refilled the L2 D line or from a different PE. In either case, this event only counts those back snoop where the requested VA mismatch the VA stored in the L2 D tag.\nThis event is counted only by PE that initiated the original request necessitating a back-snoop.\nNote : The L1 D is VIPT, it identifies this access as a miss. Conversely, as L2 is PIPT, it identifies this as a hit. L2 D utilizes the back-snoop mechanism to refill L1 D with the snooped data."
+ },
+ {
+ "EventCode": "0x0208",
+ "EventName": "L2D_CACHE_RWL1PRF",
+ "PublicDescription": "L2 D-cache access, demand Read, demand Write or L1 hardware or software prefetch.\nThis event counts each access to L2 D-cache due to the following:\n* Demand Read or Write.\n* L1 Hardware or software prefetch."
+ },
+ {
+ "EventCode": "0x020a",
+ "EventName": "L2D_CACHE_REFILL_RWL1PRF",
+ "PublicDescription": "L2 D-cache refill, demand Read, demand Write or L1 hardware or software prefetch.\nThis event counts each access counted by L2D_CACHE_RWL1PRF that causes a refill of the L2 cache, or any L1 cache of this PE, from outside of those caches."
+ },
+ {
+ "EventCode": "0x020c",
+ "EventName": "L2D_CACHE_HIT_RWL1PRF_FPRFM",
+ "PublicDescription": "L2 D-cache demand Read, demand Write and L1 prefetch hit, fetched by software prefetch.\nThis event counts each demand Read, demand Write and L1 hardware or software prefetch request that hit an L2 D-cache line that was refilled into L2 D-cache in response to an L2 software prefetch. Only the first hit is counted. After this event is generated for a cache line, the event is not generated again for the same cache line while it remains in the cache."
+ },
+ {
+ "EventCode": "0x020e",
+ "EventName": "L2D_CACHE_HIT_RWL1PRF_FPRF",
+ "PublicDescription": "L2 D-cache demand Read, demand Write and L1 prefetch hit, fetched by software or hardware prefetch.\nThis event counts each demand Read, demand Write and L1 hardware or software prefetch request that hit an L2 D-cache line that was refilled into L2 D-cache in response to an L2 hardware prefetch or software prefetch. Only the first hit is counted. After this event is generated for a cache line, the event is not generated again for the same cache line while it remains in the cache."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/nvidia/t410/ll_cache.json b/tools/perf/pmu-events/arch/arm64/nvidia/t410/ll_cache.json
new file mode 100644
index 000000000000..851d0a70de9c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/nvidia/t410/ll_cache.json
@@ -0,0 +1,107 @@
+[
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE",
+ "PublicDescription": "This event counts each memory Write operation that writes an entire line into the L3 data without fetching data from outside the L3 Data. These are allocations of cache lines in the L3 Data that are not refills counted by\nL3D_CACHE_REFILL. For example:\nA Write-back of an entire cache line from an L2 cache to the L3 D-cache.\n* A Write of an entire cache line from a coalescing Write buffer.\n* An operation such as DC ZVA.\nThis counter does not count writes that write an entire line to beyond level 3. Thus this counter does not count the streaming writes to beyond L3 cache."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL",
+ "PublicDescription": "This event counts each access counted by L3D_CACHE that causes a refill of the L3 Data, or any L1 Data, instruction or L2 cache of this PE, from outside of those caches. This includes the refill due to hardware prefetch and software prefetch accesses.\nThis event is a sum of L3D_CACHE_MISS, L3D_CACHE_REFILL_PRFM and L3D_CACHE_REFILL_HWPRF event.\nA refill includes any access that causes data to be fetched from outside of the L1 to L3 caches, even if the data is ultimately not allocated into the L3 D-cache."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE",
+ "PublicDescription": "This event counts each memory Read operation or memory Write operation that causes a cache access to the Level 3.\nThis event is a sum of the following Events:\n* L3D_CACHE_RD(0x00a0)\n* L3D_CACHE_ALLOCATE(0x0029)\n* L3D_CACHE_PRFM(0x8151)\n* L3D_CACHE_HWPRF(0x8156)\n* L2D_CACHE_WB(0x0018)"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD",
+ "PublicDescription": "This is an alias to the event L3D_CACHE_RD (0x00a0)."
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD",
+ "PublicDescription": "This is an alias to the event L3D_CACHE_REFILL_RD (0x00a2)."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD",
+ "PublicDescription": "This event counts each Memory Read operation to L3 D-cache from instruction fetch, Load/Store, and MMU translation table accesses. This does not include hardware prefetcher or PRFM instruction accesses. This include L1 and L2 prefetcher accesses to L3 D-cache."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL_RD",
+ "PublicDescription": "This event counts each access counted by both L3D_CACHE_RD and L3D_CACHE_REFILL. That is, every refill of the L3 cache counted by L3D_CACHE_REFILL that is caused by a Memory Read operation.\nThe L3D_CACHE_MISS(0x8152), L3D_CACHE_REFILL_RD (0x00a2) and L3D_CACHE_LMISS_RD(0x400b) count the same event in the hardware."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_LMISS_RD",
+ "PublicDescription": "This event counts each memory Read operation to the L3 cache counted by L3D_CACHE that incurs additional latency because it returns data from outside of the L1 to L3 caches.\nThe L3D_CACHE_MISS(0x8152), L3D_CACHE_REFILL_RD (0x00a2) and L3D_CACHE_LMISS_RD(0x400b) count the same event in the hardware."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RW",
+ "PublicDescription": "This event counts each access counted by L3D_CACHE that is due to a demand memory Read operation or demand memory Write operation.\nThis event is a sum of L3D_CACHE_RD(0x00a0), L3D_CACHE_ALLOCATE(0x0029) and L2D_CACHE_WB(0x0018).\nNote that this counter does not count that writes an entire line to beyond level 3. Thus this counter does not count the streaming Writes to beyond L3 cache."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_PRFM",
+ "PublicDescription": "This event counts each access counted by L3D_CACHE that is due to a prefetch instruction. This includes L3 Data accesses due to the L1, L2, or L3 prefetch instruction."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_MISS",
+ "PublicDescription": "This event counts each demand Read access counted by L3D_CACHE_RD that misses in the L1 to L3 Data, causing an access to outside of the L3 cache.\nThe L3D_CACHE_MISS(0x8152), L3D_CACHE_REFILL_RD (0x00a2) and L3D_CACHE_LMISS_RD(0x400b) count the same event in the hardware."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL_PRFM",
+ "PublicDescription": "This event counts each access counted by L3D_CACHE_PRFM that causes a refill of the L3 cache, or any L1 or L2 Data, from outside of those caches."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_HWPRF",
+ "PublicDescription": "This event counts each access to L3 cache that is due to a hardware prefetcher. This includes L3D accesses due to the Level-1 or Level-2 or Level-3 hardware prefetcher."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL_HWPRF",
+ "PublicDescription": "This event counts each hardware prefetch counted by L3D_CACHE_HWPRF that causes a refill of the L3 Data or unified cache, or any L1 or L2 Data, Instruction, or unified cache of this PE, from outside of those caches."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL_PRF",
+ "PublicDescription": "This event counts each access to L3 cache due to a prefetch instruction, or hardware prefetch that causes a refill of the L3 Data, or any L1 or L2 Data, from outside of those caches."
+ },
+ {
+ "EventCode": "0x01e8",
+ "EventName": "L3D_CACHE_RWL1PRFL2PRF",
+ "PublicDescription": "L3 cache access, demand Read, demand Write, L1 hardware or software prefetch or L2 hardware or software prefetch.\nThis event counts each access to L3 D-cache due to the following:\n* Demand Read or Write.\n* L1 Hardware or software prefetch.\n* L2 Hardware or software prefetch."
+ },
+ {
+ "EventCode": "0x01e9",
+ "EventName": "L3D_CACHE_REFILL_RWL1PRFL2PRF",
+ "PublicDescription": "L3 cache refill, demand Read, demand Write, L1 hardware or software prefetch or L2 hardware or software prefetch.\nThis event counts each access counted by L3D_CACHE_RWL1PRFL2PRF that causes a refill of the L3 cache, or any L1 or L2 cache of this PE, from outside of those caches."
+ },
+ {
+ "EventCode": "0x01f6",
+ "EventName": "L3D_CACHE_REFILL_L2PRF",
+ "PublicDescription": "This event counts each access counted by L3D_CACHE_L2PRF that causes a refill of the L3 cache, or any L1 or L2 cache of this PE, from outside of those caches."
+ },
+ {
+ "EventCode": "0x01f7",
+ "EventName": "L3D_CACHE_HIT_RWL1PRFL2PRF_FPRF",
+ "PublicDescription": "L3 cache demand Read, demand Write, L1 prefetch L2 prefetch first hit, fetched by software or hardware prefetch.\nThis event counts each demand Read, demand Write, L1 hardware or software prefetch request and L2 hardware or software prefetch that hit an L3 D-cache line that was refilled into L3 D-cache in response to an L3 hardware prefetch or software prefetch. Only the first hit is counted. After this event is generated for a cache line, the event is not generated again for the same cache line while it remains in the cache."
+ },
+ {
+ "EventCode": "0x0225",
+ "EventName": "L3D_CACHE_REFILL_IF",
+ "PublicDescription": "L3 cache refill, instruction fetch.\nThis event counts demand instruction fetch that causes a refill of the L3 cache, or any L1 or L2 cache of this PE, from outside of those caches."
+ },
+ {
+ "EventCode": "0x0226",
+ "EventName": "L3D_CACHE_REFILL_MM",
+ "PublicDescription": "L3 cache refill, translation table walk access.\nThis event counts demand translation table access that causes a refill of the L3 cache, or any L1 or L2 cache of this PE, from outside of those caches."
+ },
+ {
+ "EventCode": "0x0227",
+ "EventName": "L3D_CACHE_REFILL_L1PRF",
+ "PublicDescription": "This event counts each access counted by L3D_CACHE_L1PRF that causes a refill of the L3 cache, or any L1 or L2 cache of this PE, from outside of those caches."
+ },
+ {
+ "EventCode": "0x022c",
+ "EventName": "L3D_CACHE_L1PRF",
+ "PublicDescription": "This event counts the L3 D-cache access due to L1 hardware prefetch or software prefetch request.\nThe L1 hardware prefetch or software prefetch requests that miss the L1I, L1D and L2 D-cache are counted by this counter"
+ },
+ {
+ "EventCode": "0x022d",
+ "EventName": "L3D_CACHE_L2PRF",
+ "PublicDescription": "This event counts the L3 D-cache access due to L2 hardware prefetch or software prefetch request.\nThe L2 hardware prefetch or software prefetch requests that miss the L2 D-cache are counted by this counter"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/nvidia/t410/memory.json b/tools/perf/pmu-events/arch/arm64/nvidia/t410/memory.json
new file mode 100644
index 000000000000..becd2d90bf39
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/nvidia/t410/memory.json
@@ -0,0 +1,46 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS",
+ "PublicDescription": "This event counts memory accesses issued by the CPU load/store unit, where those accesses are issued due to load or store operations. This event counts memory accesses regardless of whether the data is received from any level of cache hierarchy or external memory. If memory accesses are broken up into smaller transactions than what were specified in the load or store instructions, then the event counts those smaller memory transactions.\nMemory accesses generated by the following instructions or activity are not counted: instruction fetches, cache maintenance instructions, translation table walks or prefetches, memory prefetch operations. This event counts the sum of the following events:\nMEM_ACCESS_RD and\nMEM_ACCESS_WR."
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR",
+ "PublicDescription": "This event counts any detected correctable or uncorrectable physical memory errors (ECC or parity) in protected CPU RAMs. On the Core, this event counts errors in the caches (including data and tag RAMs). Any detected memory error (from either a speculative and abandoned access, or an architecturally executed access) is counted.\nNote that errors are only detected when the actual protected memory is accessed by an operation."
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS",
+ "PublicDescription": "This event counts each external bus read access that causes an access to a remote device. That is, a socket that does not contain the PE."
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD",
+ "PublicDescription": "This event counts memory accesses issued by the CPU due to Load operations. This event counts any memory Load access, no matter whether the data is received from any level of cache hierarchy or external memory. This event also counts atomic Load operations. If memory accesses are broken up by the Load/Store unit into smaller transactions that are issued by the bus interface, then the event counts those smaller transactions.\nThe following instructions are not counted:\n1) Instruction fetches,\n2) Cache maintenance instructions,\n3) Translation table walks or prefetches,\n4) Memory prefetch operations.\nThis event is a subset of the MEM_ACCESS event but the event only counts memory-Read operations."
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR",
+ "PublicDescription": "This event counts memory accesses issued by the CPU due to Store operations. This event counts any memory Store access, no matter whether the data is located in any level of cache or external memory. This event also counts atomic Load and Store operations. If memory accesses are broken up by the Load/Store unit into smaller transactions that are issued by the bus interface, then the event counts those smaller transactions."
+ },
+ {
+ "ArchStdEvent": "LDST_ALIGN_LAT",
+ "PublicDescription": "This event counts the number of memory Read and Write accesses in a cycle that incurred additional latency due to the alignment of the address and the size of data being accessed, which results in a store crossing a single cache line.\nThis event is implemented as the sum of the following events on this CPU:\nLD_ALIGN_LAT and\nST_ALIGN_LAT."
+ },
+ {
+ "ArchStdEvent": "LD_ALIGN_LAT",
+ "PublicDescription": "This event counts the number of memory Read accesses in a cycle that incurred additional latency due to the alignment of the address and size of data being accessed, which results in a load crossing a single cache line."
+ },
+ {
+ "ArchStdEvent": "ST_ALIGN_LAT",
+ "PublicDescription": "This event counts the number of memory Write accesses in a cycle that incurred additional latency due to the alignment of the address and size of data being accessed."
+ },
+ {
+ "ArchStdEvent": "INST_FETCH_PERCYC",
+ "PublicDescription": "This event counts number of instruction fetches outstanding per cycle, which will provide an average latency of instruction fetch."
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD_PERCYC",
+ "PublicDescription": "This event counts the number of outstanding Loads or memory Read accesses per cycle."
+ },
+ {
+ "ArchStdEvent": "INST_FETCH",
+ "PublicDescription": "This event counts instruction memory accesses that the PE makes."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/nvidia/t410/metrics.json b/tools/perf/pmu-events/arch/arm64/nvidia/t410/metrics.json
new file mode 100644
index 000000000000..b825ede03f54
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/nvidia/t410/metrics.json
@@ -0,0 +1,722 @@
+[
+ {
+ "MetricName": "backend_bound",
+ "MetricExpr": "100 * (STALL_SLOT_BACKEND / CPU_SLOT)",
+ "BriefDescription": "This metric is the percentage of total slots that were stalled due to resource constraints in the backend of the processor.",
+ "ScaleUnit": "1percent of slots",
+ "MetricGroup": "TopdownL1"
+ },
+ {
+ "MetricName": "backend_busy_bound",
+ "MetricExpr": "100 * (STALL_BACKEND_BUSY / STALL_BACKEND)",
+ "BriefDescription": "This metric is the percentage of total cycles stalled in the backend due to issue queues being full to accept operations for execution.",
+ "ScaleUnit": "1percent of cycles",
+ "MetricGroup": "Topdown_Backend"
+ },
+ {
+ "MetricName": "backend_cache_l1d_bound",
+ "MetricExpr": "100 * (STALL_BACKEND_L1D / (STALL_BACKEND_L1D + STALL_BACKEND_MEM))",
+ "BriefDescription": "This metric is the percentage of total cycles stalled in the backend due to memory access latency issues caused by L1 D-cache misses.",
+ "ScaleUnit": "1percent of cycles",
+ "MetricGroup": "Topdown_Backend"
+ },
+ {
+ "MetricName": "backend_cache_l2d_bound",
+ "MetricExpr": "100 * (STALL_BACKEND_MEM / (STALL_BACKEND_L1D + STALL_BACKEND_MEM))",
+ "BriefDescription": "This metric is the percentage of total cycles stalled in the backend due to memory access latency issues caused by L2 D-cache misses.",
+ "ScaleUnit": "1percent of cycles",
+ "MetricGroup": "Topdown_Backend"
+ },
+ {
+ "MetricName": "backend_core_bound",
+ "MetricExpr": "100 * (STALL_BACKEND_CPUBOUND / STALL_BACKEND)",
+ "BriefDescription": "This metric is the percentage of total cycles stalled in the backend due to backend Core resource constraints not related to instruction fetch latency issues caused by memory access components.",
+ "ScaleUnit": "1percent of cycles",
+ "MetricGroup": "Topdown_Backend"
+ },
+ {
+ "MetricName": "backend_core_rename_bound",
+ "MetricExpr": "100 * (STALL_BACKEND_RENAME / STALL_BACKEND_CPUBOUND)",
+ "BriefDescription": "This metric is the percentage of total cycles stalled in the backend as the rename unit registers are unavailable.",
+ "ScaleUnit": "1percent of cycles",
+ "MetricGroup": "Topdown_Backend"
+ },
+ {
+ "MetricName": "backend_mem_bound",
+ "MetricExpr": "100 * (STALL_BACKEND_MEMBOUND / STALL_BACKEND)",
+ "BriefDescription": "This metric is the percentage of total cycles stalled in the backend due to backend Core resource constraints related to memory access latency issues caused by memory access components.",
+ "ScaleUnit": "1percent of cycles",
+ "MetricGroup": "Topdown_Backend"
+ },
+ {
+ "MetricName": "backend_mem_cache_bound",
+ "MetricExpr": "100 * ((STALL_BACKEND_L1D + STALL_BACKEND_MEM) / STALL_BACKEND_MEMBOUND)",
+ "BriefDescription": "This metric is the percentage of total cycles stalled in the backend due to memory latency issues caused by D-cache misses.",
+ "ScaleUnit": "1percent of cycles",
+ "MetricGroup": "Topdown_Backend"
+ },
+ {
+ "MetricName": "backend_mem_store_bound",
+ "MetricExpr": "100 * (STALL_BACKEND_ST / STALL_BACKEND_MEMBOUND)",
+ "BriefDescription": "This metric is the percentage of total cycles stalled in the backend due to memory Write pending caused by Stores stalled in the pre-commit stage.",
+ "ScaleUnit": "1percent of cycles",
+ "MetricGroup": "Topdown_Backend"
+ },
+ {
+ "MetricName": "backend_mem_tlb_bound",
+ "MetricExpr": "100 * (STALL_BACKEND_TLB / STALL_BACKEND_MEMBOUND)",
+ "BriefDescription": "This metric is the percentage of total cycles stalled in the backend due to memory access latency issues caused by Data TLB misses.",
+ "ScaleUnit": "1percent of cycles",
+ "MetricGroup": "Topdown_Backend"
+ },
+ {
+ "MetricName": "backend_stalled_cycles",
+ "MetricExpr": "100 * (STALL_BACKEND / CPU_CYCLES)",
+ "BriefDescription": "This metric is the percentage of cycles that were stalled due to resource constraints in the backend unit of the processor.",
+ "ScaleUnit": "1percent of cycles",
+ "MetricGroup": "Cycle_Accounting"
+ },
+ {
+ "MetricName": "bad_speculation",
+ "MetricExpr": "100 - (frontend_bound + retiring + backend_bound)",
+ "BriefDescription": "This metric is the percentage of total slots that executed operations and didn't retire due to a pipeline flush. This indicates cycles that were utilized but inefficiently.",
+ "ScaleUnit": "1percent of slots",
+ "MetricGroup": "TopdownL1"
+ },
+ {
+ "MetricName": "barrier_percentage",
+ "MetricExpr": "100 * ((ISB_SPEC + DSB_SPEC + DMB_SPEC) / INST_SPEC)",
+ "BriefDescription": "This metric measures instruction and data barrier operations as a percentage of operations speculatively executed.",
+ "ScaleUnit": "1percent of operations",
+ "MetricGroup": "Operation_Mix"
+ },
+ {
+ "MetricName": "branch_direct_ratio",
+ "MetricExpr": "BR_IMMED_RETIRED / BR_RETIRED",
+ "BriefDescription": "This metric measures the ratio of direct branches retired to the total number of branches architecturally executed.",
+ "ScaleUnit": "1per branch",
+ "MetricGroup": "Branch_Effectiveness"
+ },
+ {
+ "MetricName": "branch_indirect_ratio",
+ "MetricExpr": "BR_IND_RETIRED / BR_RETIRED",
+ "BriefDescription": "This metric measures the ratio of indirect branches retired, including function returns, to the total number of branches architecturally executed.",
+ "ScaleUnit": "1per branch",
+ "MetricGroup": "Branch_Effectiveness"
+ },
+ {
+ "MetricName": "branch_misprediction_ratio",
+ "MetricExpr": "BR_MIS_PRED_RETIRED / BR_RETIRED",
+ "BriefDescription": "This metric measures the ratio of branches mispredicted to the total number of branches architecturally executed. This gives an indication of the effectiveness of the branch prediction unit.",
+ "ScaleUnit": "1per branch",
+ "MetricGroup": "Miss_Ratio;Branch_Effectiveness"
+ },
+ {
+ "MetricName": "branch_mpki",
+ "MetricExpr": "1000 * (BR_MIS_PRED_RETIRED / INST_RETIRED)",
+ "BriefDescription": "This metric measures the number of branch mispredictions per thousand instructions executed.",
+ "ScaleUnit": "1MPKI",
+ "MetricGroup": "MPKI;Branch_Effectiveness"
+ },
+ {
+ "MetricName": "branch_percentage",
+ "MetricExpr": "100 * ((BR_IMMED_SPEC + BR_INDIRECT_SPEC) / INST_SPEC)",
+ "BriefDescription": "This metric measures branch operations as a percentage of operations speculatively executed.",
+ "ScaleUnit": "1percent of operations",
+ "MetricGroup": "Operation_Mix"
+ },
+ {
+ "MetricName": "branch_return_ratio",
+ "MetricExpr": "BR_RETURN_RETIRED / BR_RETIRED",
+ "BriefDescription": "This metric measures the ratio of branches retired that are function returns to the total number of branches architecturally executed.",
+ "ScaleUnit": "1per branch",
+ "MetricGroup": "Branch_Effectiveness"
+ },
+ {
+ "MetricName": "bus_bandwidth",
+ "MetricExpr": "BUS_ACCESS * 32 / duration_time ",
+ "BriefDescription": "This metric measures the bus-bandwidth of the data transferred between this PE's L2 with unCore in the system.",
+ "ScaleUnit": "1Bytes/sec"
+ },
+ {
+ "MetricName": "cpu_cycles_fraction_in_st_mode",
+ "MetricExpr": "((CPU_SLOT/CPU_CYCLES) - 5) / 5",
+ "BriefDescription": "This metric counts fraction of the CPU cycles spent in ST mode during program execution.",
+ "ScaleUnit": "1fraction of cycles",
+ "MetricGroup": "SMT"
+ },
+ {
+ "MetricName": "cpu_cycles_in_smt_mode",
+ "MetricExpr": "(1 - cpu_cycles_fraction_in_st_mode) * CPU_CYCLES",
+ "BriefDescription": "This metric counts CPU cycles in SMT mode during program execution.",
+ "ScaleUnit": "1CPU cycles",
+ "MetricGroup": "SMT"
+ },
+ {
+ "MetricName": "cpu_cycles_in_st_mode",
+ "MetricExpr": "cpu_cycles_fraction_in_st_mode * CPU_CYCLES",
+ "BriefDescription": "This metric counts CPU cycles in ST mode during program execution.",
+ "ScaleUnit": "1CPU cycles",
+ "MetricGroup": "SMT"
+ },
+ {
+ "MetricName": "crypto_percentage",
+ "MetricExpr": "100 * (CRYPTO_SPEC / INST_SPEC)",
+ "BriefDescription": "This metric measures crypto operations as a percentage of operations speculatively executed.",
+ "ScaleUnit": "1percent of operations",
+ "MetricGroup": "Operation_Mix"
+ },
+ {
+ "MetricName": "dtlb_mpki",
+ "MetricExpr": "1000 * (DTLB_WALK / INST_RETIRED)",
+ "BriefDescription": "This metric measures the number of Data TLB Walks per thousand instructions executed.",
+ "ScaleUnit": "1MPKI",
+ "MetricGroup": "MPKI;DTLB_Effectiveness"
+ },
+ {
+ "MetricName": "dtlb_walk_average_latency",
+ "MetricExpr": "DTLB_WALK_PERCYC / DTLB_WALK",
+ "BriefDescription": "This metric measures the average latency of Data TLB walks in CPU cycles.",
+ "ScaleUnit": "1CPU cycles",
+ "MetricGroup": "Average_Latency"
+ },
+ {
+ "MetricName": "dtlb_walk_ratio",
+ "MetricExpr": "DTLB_WALK / L1D_TLB",
+ "BriefDescription": "This metric measures the ratio of Data TLB Walks to the total number of Data TLB accesses. This gives an indication of the effectiveness of the Data TLB accesses.",
+ "ScaleUnit": "1per TLB access",
+ "MetricGroup": "Miss_Ratio;DTLB_Effectiveness"
+ },
+ {
+ "MetricName": "fp16_percentage",
+ "MetricExpr": "100 * (FP_HP_SPEC / INST_SPEC)",
+ "BriefDescription": "This metric measures half-precision floating point operations as a percentage of operations speculatively executed.",
+ "ScaleUnit": "1percent of operations",
+ "MetricGroup": "FP_Precision_Mix"
+ },
+ {
+ "MetricName": "fp32_percentage",
+ "MetricExpr": "100 * (FP_SP_SPEC / INST_SPEC)",
+ "BriefDescription": "This metric measures single-precision floating point operations as a percentage of operations speculatively executed.",
+ "ScaleUnit": "1percent of operations",
+ "MetricGroup": "FP_Precision_Mix"
+ },
+ {
+ "MetricName": "fp64_percentage",
+ "MetricExpr": "100 * (FP_DP_SPEC / INST_SPEC)",
+ "BriefDescription": "This metric measures double-precision floating point operations as a percentage of operations speculatively executed.",
+ "ScaleUnit": "1percent of operations",
+ "MetricGroup": "FP_Precision_Mix"
+ },
+ {
+ "MetricName": "fp_ops_per_cycle",
+ "MetricExpr": "(FP_SCALE_OPS_SPEC + FP_FIXED_OPS_SPEC) / CPU_CYCLES",
+ "BriefDescription": "This metric measures floating point operations per cycle in any precision performed by any instruction. Operations are counted by computation and by vector lanes, fused computations such as multiply-add count as twice per vector lane for example.",
+ "ScaleUnit": "1operations per cycle",
+ "MetricGroup": "FP_Arithmetic_Intensity"
+ },
+ {
+ "MetricName": "frontend_bound",
+ "MetricExpr": "100 * (STALL_SLOT_FRONTEND_WITHOUT_MISPRED / CPU_SLOT)",
+ "BriefDescription": "This metric is the percentage of total slots that were stalled due to resource constraints in the frontend of the processor.",
+ "ScaleUnit": "1percent of slots",
+ "MetricGroup": "TopdownL1"
+ },
+ {
+ "MetricName": "frontend_cache_l1i_bound",
+ "MetricExpr": "100 * (STALL_FRONTEND_L1I / (STALL_FRONTEND_L1I + STALL_FRONTEND_MEM))",
+ "BriefDescription": "This metric is the percentage of total cycles stalled in the frontend due to memory access latency issues caused by L1 I-cache misses.",
+ "ScaleUnit": "1percent of cycles",
+ "MetricGroup": "Topdown_Frontend"
+ },
+ {
+ "MetricName": "frontend_cache_l2i_bound",
+ "MetricExpr": "100 * (STALL_FRONTEND_MEM / (STALL_FRONTEND_L1I + STALL_FRONTEND_MEM))",
+ "BriefDescription": "This metric is the percentage of total cycles stalled in the frontend due to memory access latency issues caused by L2 I-cache misses.",
+ "ScaleUnit": "1percent of cycles",
+ "MetricGroup": "Topdown_Frontend"
+ },
+ {
+ "MetricName": "frontend_core_bound",
+ "MetricExpr": "100 * (STALL_FRONTEND_CPUBOUND / STALL_FRONTEND)",
+ "BriefDescription": "This metric is the percentage of total cycles stalled in the frontend due to frontend Core resource constraints not related to instruction fetch latency issues caused by memory access components.",
+ "ScaleUnit": "1percent of cycles",
+ "MetricGroup": "Topdown_Frontend"
+ },
+ {
+ "MetricName": "frontend_core_flow_bound",
+ "MetricExpr": "100 * (STALL_FRONTEND_FLOW / STALL_FRONTEND_CPUBOUND)",
+ "BriefDescription": "This metric is the percentage of total cycles stalled in the frontend as the decode unit is awaiting input from the branch prediction unit.",
+ "ScaleUnit": "1percent of cycles",
+ "MetricGroup": "Topdown_Frontend"
+ },
+ {
+ "MetricName": "frontend_core_flush_bound",
+ "MetricExpr": "100 * (STALL_FRONTEND_FLUSH / STALL_FRONTEND_CPUBOUND)",
+ "BriefDescription": "This metric is the percentage of total cycles stalled in the frontend as the processor is recovering from a pipeline flush caused by bad speculation or other machine resteers.",
+ "ScaleUnit": "1percent of cycles",
+ "MetricGroup": "Topdown_Frontend"
+ },
+ {
+ "MetricName": "frontend_mem_bound",
+ "MetricExpr": "100 * (STALL_FRONTEND_MEMBOUND / STALL_FRONTEND)",
+ "BriefDescription": "This metric is the percentage of total cycles stalled in the frontend due to frontend Core resource constraints related to the instruction fetch latency issues caused by memory access components.",
+ "ScaleUnit": "1percent of cycles",
+ "MetricGroup": "Topdown_Frontend"
+ },
+ {
+ "MetricName": "frontend_mem_cache_bound",
+ "MetricExpr": "100 * ((STALL_FRONTEND_L1I + STALL_FRONTEND_MEM) / STALL_FRONTEND_MEMBOUND)",
+ "BriefDescription": "This metric is the percentage of total cycles stalled in the frontend due to instruction fetch latency issues caused by I-cache misses.",
+ "ScaleUnit": "1percent of cycles",
+ "MetricGroup": "Topdown_Frontend"
+ },
+ {
+ "MetricName": "frontend_mem_tlb_bound",
+ "MetricExpr": "100 * (STALL_FRONTEND_TLB / STALL_FRONTEND_MEMBOUND)",
+ "BriefDescription": "This metric is the percentage of total cycles stalled in the frontend due to instruction fetch latency issues caused by Instruction TLB misses.",
+ "ScaleUnit": "1percent of cycles",
+ "MetricGroup": "Topdown_Frontend"
+ },
+ {
+ "MetricName": "frontend_stalled_cycles",
+ "MetricExpr": "100 * (STALL_FRONTEND / CPU_CYCLES)",
+ "BriefDescription": "This metric is the percentage of cycles that were stalled due to resource constraints in the frontend unit of the processor.",
+ "ScaleUnit": "1percent of cycles",
+ "MetricGroup": "Cycle_Accounting"
+ },
+ {
+ "MetricName": "instruction_fetch_average_latency",
+ "MetricExpr": "INST_FETCH_PERCYC / INST_FETCH",
+ "BriefDescription": "This metric measures the average latency of instruction fetches in CPU cycles.",
+ "ScaleUnit": "1CPU cycles",
+ "MetricGroup": "Average_Latency"
+ },
+ {
+ "MetricName": "integer_dp_percentage",
+ "MetricExpr": "100 * (DP_SPEC / INST_SPEC)",
+ "BriefDescription": "This metric measures scalar integer operations as a percentage of operations speculatively executed.",
+ "ScaleUnit": "1percent of operations",
+ "MetricGroup": "Operation_Mix"
+ },
+ {
+ "MetricName": "ipc",
+ "MetricExpr": "INST_RETIRED / CPU_CYCLES",
+ "BriefDescription": "This metric measures the number of instructions retired per cycle.",
+ "ScaleUnit": "1per cycle",
+ "MetricGroup": "General"
+ },
+ {
+ "MetricName": "itlb_mpki",
+ "MetricExpr": "1000 * (ITLB_WALK / INST_RETIRED)",
+ "BriefDescription": "This metric measures the number of instruction TLB Walks per thousand instructions executed.",
+ "ScaleUnit": "1MPKI",
+ "MetricGroup": "MPKI;ITLB_Effectiveness"
+ },
+ {
+ "MetricName": "itlb_walk_average_latency",
+ "MetricExpr": "ITLB_WALK_PERCYC / ITLB_WALK",
+ "BriefDescription": "This metric measures the average latency of instruction TLB walks in CPU cycles.",
+ "ScaleUnit": "1CPU cycles",
+ "MetricGroup": "Average_Latency"
+ },
+ {
+ "MetricName": "itlb_walk_ratio",
+ "MetricExpr": "ITLB_WALK / L1I_TLB",
+ "BriefDescription": "This metric measures the ratio of instruction TLB Walks to the total number of Instruction TLB accesses. This gives an indication of the effectiveness of the Instruction TLB accesses.",
+ "ScaleUnit": "1per TLB access",
+ "MetricGroup": "Miss_Ratio;ITLB_Effectiveness"
+ },
+ {
+ "MetricName": "l1d_cache_miss_ratio",
+ "MetricExpr": "L1D_CACHE_REFILL / L1D_CACHE",
+ "BriefDescription": "This metric measures the ratio of L1 D-cache accesses missed to the total number of L1 D-cache accesses. This gives an indication of the effectiveness of the L1 D-cache.",
+ "ScaleUnit": "1per cache access",
+ "MetricGroup": "Miss_Ratio;L1D_Cache_Effectiveness"
+ },
+ {
+ "MetricName": "l1d_cache_mpki",
+ "MetricExpr": "1000 * (L1D_CACHE_REFILL / INST_RETIRED)",
+ "BriefDescription": "This metric measures the number of L1 D-cache accesses missed per thousand instructions executed.",
+ "ScaleUnit": "1MPKI",
+ "MetricGroup": "MPKI;L1D_Cache_Effectiveness"
+ },
+ {
+ "MetricName": "l1d_cache_rw_miss_ratio",
+ "MetricExpr": "l1d_demand_misses / l1d_demand_accesses",
+ "BriefDescription": "This metric measures the ratio of L1 D-cache Read accesses missed to the total number of L1 D-cache accesses. This gives an indication of the effectiveness of the L1 D-cache for demand Load or Store traffic.",
+ "ScaleUnit": "1per cache access",
+ "MetricGroup": "L1I_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l1d_demand_accesses",
+ "MetricExpr": "L1D_CACHE_RW",
+ "BriefDescription": "This metric measures the count of L1 D-cache accesses incurred on Load or Store by the instruction stream of the program.",
+ "ScaleUnit": "1count",
+ "MetricGroup": "L1I_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l1d_demand_misses",
+ "MetricExpr": "L1D_CACHE_REFILL_RW",
+ "BriefDescription": "This metric measures the count of L1 D-cache misses incurred on a Load or Store by the instruction stream of the program.",
+ "ScaleUnit": "1count",
+ "MetricGroup": "L1I_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l1d_prf_accuracy",
+ "MetricExpr": "100 * (l1d_useful_prf / l1d_refilled_prf)",
+ "BriefDescription": "This metric measures the fraction of prefetched memory addresses that are used by the instruction stream.",
+ "ScaleUnit": "1percent of prefetch",
+ "MetricGroup": "L1I_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l1d_prf_coverage",
+ "MetricExpr": "100 * (l1d_useful_prf / (l1d_demand_misses + l1d_refilled_prf))",
+ "BriefDescription": "This metric measures the baseline demand cache misses which the prefetcher brings into the cache.",
+ "ScaleUnit": "1percent of cache access",
+ "MetricGroup": "L1I_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l1d_refilled_prf",
+ "MetricExpr": "L1D_CACHE_REFILL_HWPRF + L1D_CACHE_REFILL_PRFM + L1D_LFB_HIT_RW_FHWPRF + L1D_LFB_HIT_RW_FPRFM",
+ "BriefDescription": "This metric measures the count of cache lines refilled by L1 data prefetcher (hardware prefetches or software preload) into L1 D-cache.",
+ "ScaleUnit": "1count",
+ "MetricGroup": "L1I_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l1d_tlb_miss_ratio",
+ "MetricExpr": "L1D_TLB_REFILL / L1D_TLB",
+ "BriefDescription": "This metric measures the ratio of L1 Data TLB accesses missed to the total number of L1 Data TLB accesses. This gives an indication of the effectiveness of the L1 Data TLB.",
+ "ScaleUnit": "1per TLB access",
+ "MetricGroup": "Miss_Ratio;DTLB_Effectiveness"
+ },
+ {
+ "MetricName": "l1d_tlb_mpki",
+ "MetricExpr": "1000 * (L1D_TLB_REFILL / INST_RETIRED)",
+ "BriefDescription": "This metric measures the number of L1 Data TLB accesses missed per thousand instructions executed.",
+ "ScaleUnit": "1MPKI",
+ "MetricGroup": "MPKI;DTLB_Effectiveness"
+ },
+ {
+ "MetricName": "l1d_useful_prf",
+ "MetricExpr": "L1D_CACHE_HIT_RW_FPRF + L1D_LFB_HIT_RW_FHWPRF + L1D_LFB_HIT_RW_FPRFM",
+ "BriefDescription": "This metric measures the count of cache lines refilled by L1 data prefetcher (hardware prefetches or software preload) into L1 D-cache which are further used by Load or Store from the instruction stream of the program.",
+ "ScaleUnit": "1count",
+ "MetricGroup": "L1I_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l1i_cache_miss_ratio",
+ "MetricExpr": "L1I_CACHE_REFILL / L1I_CACHE",
+ "BriefDescription": "This metric measures the ratio of L1 I-cache accesses missed to the total number of L1 I-cache accesses. This gives an indication of the effectiveness of the L1 I-cache.",
+ "ScaleUnit": "1per cache access",
+ "MetricGroup": "Miss_Ratio;L1I_Cache_Effectiveness"
+ },
+ {
+ "MetricName": "l1i_cache_mpki",
+ "MetricExpr": "1000 * (L1I_CACHE_REFILL / INST_RETIRED)",
+ "BriefDescription": "This metric measures the number of L1 I-cache accesses missed per thousand instructions executed.",
+ "ScaleUnit": "1MPKI",
+ "MetricGroup": "MPKI;L1I_Cache_Effectiveness"
+ },
+ {
+ "MetricName": "l1i_cache_rd_miss_ratio",
+ "MetricExpr": "l1i_demand_misses / l1i_demand_accesses",
+ "BriefDescription": "This metric measures the ratio of L1 I-cache Read accesses missed to the total number of L1 I-cache accesses. This gives an indication of the effectiveness of the L1 I-cache for demand instruction fetch traffic. Note that cache accesses in this cache are demand instruction fetch.",
+ "ScaleUnit": "1per cache access",
+ "MetricGroup": "L1D_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l1i_demand_accesses",
+ "MetricExpr": "L1I_CACHE_RD",
+ "BriefDescription": "This metric measures the count of L1 I-cache accesses caused by an instruction fetch by the instruction stream of the program.",
+ "ScaleUnit": "1count",
+ "MetricGroup": "L1D_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l1i_demand_misses",
+ "MetricExpr": "L1I_CACHE_REFILL_RD",
+ "BriefDescription": "This metric measures the count of L1 I-cache misses caused by an instruction fetch by the instruction stream of the program.",
+ "ScaleUnit": "1count",
+ "MetricGroup": "L1D_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l1i_prf_accuracy",
+ "MetricExpr": "100 * (l1i_useful_prf / l1i_refilled_prf)",
+ "BriefDescription": "This metric measures the fraction of prefetched memory addresses that are used by the instruction stream.",
+ "ScaleUnit": "1percent of prefetch",
+ "MetricGroup": "L1D_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l1i_prf_coverage",
+ "MetricExpr": "100 * (l1i_useful_prf / (l1i_demand_misses + l1i_refilled_prf))",
+ "BriefDescription": "This metric measures the baseline demand cache misses which the prefetcher brings into the cache.",
+ "ScaleUnit": "1percent of cache access",
+ "MetricGroup": "L1D_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l1i_refilled_prf",
+ "MetricExpr": "L1I_CACHE_REFILL_HWPRF + L1I_CACHE_REFILL_PRFM",
+ "BriefDescription": "This metric measures the count of cache lines refilled by L1 instruction prefetcher (hardware prefetches or software preload) into L1 I-cache.",
+ "ScaleUnit": "1count",
+ "MetricGroup": "L1D_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l1i_tlb_miss_ratio",
+ "MetricExpr": "L1I_TLB_REFILL / L1I_TLB",
+ "BriefDescription": "This metric measures the ratio of L1 Instruction TLB accesses missed to the total number of L1 Instruction TLB accesses. This gives an indication of the effectiveness of the L1 Instruction TLB.",
+ "ScaleUnit": "1per TLB access",
+ "MetricGroup": "Miss_Ratio;ITLB_Effectiveness"
+ },
+ {
+ "MetricName": "l1i_tlb_mpki",
+ "MetricExpr": "1000 * (L1I_TLB_REFILL / INST_RETIRED)",
+ "BriefDescription": "This metric measures the number of L1 Instruction TLB accesses missed per thousand instructions executed.",
+ "ScaleUnit": "1MPKI",
+ "MetricGroup": "MPKI;ITLB_Effectiveness"
+ },
+ {
+ "MetricName": "l1i_useful_prf",
+ "MetricExpr": "L1I_CACHE_HIT_RD_FPRF",
+ "BriefDescription": "This metric measures the count of cache lines refilled by L1 instruction prefetcher (hardware prefetches or software preload) into L1 I-cache which are further used by instruction stream of the program.",
+ "ScaleUnit": "1count",
+ "MetricGroup": "L1D_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l2_cache_miss_ratio",
+ "MetricExpr": "L2D_CACHE_REFILL / L2D_CACHE",
+ "BriefDescription": "This metric measures the ratio of L2 cache accesses missed to the total number of L2 cache accesses. This gives an indication of the effectiveness of the L2 cache, which is a unified cache that stores both data and instruction.\nNote that cache accesses in this cache are either data memory access or instruction fetch as this is a unified cache.",
+ "ScaleUnit": "1per cache access",
+ "MetricGroup": "Miss_Ratio;L2_Cache_Effectiveness"
+ },
+ {
+ "MetricName": "l2_cache_mpki",
+ "MetricExpr": "1000 * (l2d_demand_misses / INST_RETIRED)",
+ "BriefDescription": "This metric measures the number of L2 unified cache accesses missed per thousand instructions executed.\nNote that cache accesses in this cache are either data memory access or instruction fetch as this is a unified cache.",
+ "ScaleUnit": "1MPKI",
+ "MetricGroup": "MPKI;L2_Cache_Effectiveness"
+ },
+ {
+ "MetricName": "l2_tlb_miss_ratio",
+ "MetricExpr": "L2D_TLB_REFILL / L2D_TLB",
+ "BriefDescription": "This metric measures the ratio of L2 unified TLB accesses missed to the total number of L2 unified TLB accesses.\nThis gives an indication of the effectiveness of the L2 TLB.",
+ "ScaleUnit": "1per TLB access",
+ "MetricGroup": "Miss_Ratio;ITLB_Effectiveness;DTLB_Effectiveness"
+ },
+ {
+ "MetricName": "l2_tlb_mpki",
+ "MetricExpr": "1000 * (L2D_TLB_REFILL / INST_RETIRED)",
+ "BriefDescription": "This metric measures the number of L2 unified TLB accesses missed per thousand instructions executed.",
+ "ScaleUnit": "1MPKI",
+ "MetricGroup": "MPKI;ITLB_Effectiveness;DTLB_Effectiveness"
+ },
+ {
+ "MetricName": "l2d_cache_rwl1prf_miss_ratio",
+ "MetricExpr": "l2d_demand_misses / l2d_demand_accesses",
+ "BriefDescription": "This metric measures the ratio of L2 D-cache Read accesses missed to the total number of L2 D-cache accesses.\nThis gives an indication of the effectiveness of the L2 D-cache for demand instruction fetch, Load, Store, or L1 prefetcher accesses traffic.",
+ "ScaleUnit": "1per cache access",
+ "MetricGroup": "L2_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l2d_demand_accesses",
+ "MetricExpr": "L2D_CACHE_RD + L2D_CACHE_WR + L2D_CACHE_L1PRF",
+ "BriefDescription": "This metric measures the count of L2 D-cache accesses incurred on an instruction fetch, Load, Store, or L1 prefetcher accesses by the instruction stream of the program.",
+ "ScaleUnit": "1count",
+ "MetricGroup": "L2_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l2d_demand_misses",
+ "MetricExpr": "L2D_CACHE_REFILL_RD + L2D_CACHE_REFILL_WR + L2D_CACHE_REFILL_L1PRF",
+ "BriefDescription": "This metric measures the count of L2 D-cache misses incurred on an instruction fetch, Load, Store, or L1 prefetcher accesses by the instruction stream of the program.",
+ "ScaleUnit": "1count",
+ "MetricGroup": "L2_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l2d_prf_accuracy",
+ "MetricExpr": "100 * (l2d_useful_prf / l2d_refilled_prf)",
+ "BriefDescription": "This metric measures the fraction of prefetched memory addresses that are used by the instruction stream.",
+ "ScaleUnit": "1percent of prefetch",
+ "MetricGroup": "L2_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l2d_prf_coverage",
+ "MetricExpr": "100 * (l2d_useful_prf / (l2d_demand_misses + l2d_refilled_prf))",
+ "BriefDescription": "This metric measures the baseline demand cache misses which the prefetcher brings into the cache.",
+ "ScaleUnit": "1percent of cache access",
+ "MetricGroup": "L2_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l2d_refilled_prf",
+ "MetricExpr": "(L2D_CACHE_REFILL_PRF - L2D_CACHE_REFILL_L1PRF) + L2D_LFB_HIT_RWL1PRF_FHWPRF",
+ "BriefDescription": "This metric measures the count of cache lines refilled by L2 data prefetcher (hardware prefetches or software preload) into L2 D-cache.",
+ "ScaleUnit": "1count",
+ "MetricGroup": "L2_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l2d_useful_prf",
+ "MetricExpr": "L2D_CACHE_HIT_RWL1PRF_FPRF + L2D_LFB_HIT_RWL1PRF_FHWPRF",
+ "BriefDescription": "This metric measures the count of cache lines refilled by L2 data prefetcher (hardware prefetches or software preload) into L2 D-cache which are further used by instruction fetch, Load, Store, or L1 prefetcher accesses from the instruction stream of the program.",
+ "ScaleUnit": "1count",
+ "MetricGroup": "L2_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l3d_cache_rwl1prfl2prf_miss_ratio",
+ "MetricExpr": "l3d_demand_misses / l3d_demand_accesses",
+ "BriefDescription": "This metric measures the ratio of L3 D-cache Read accesses missed to the total number of L3 D-cache accesses. This gives an indication of the effectiveness of the L2 D-cache for demand instruction fetch, Load, Store, L1 prefetcher, or L2 prefetcher accesses traffic.",
+ "ScaleUnit": "1per cache access",
+ "MetricGroup": "L3_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l3d_demand_accesses",
+ "MetricExpr": "L3D_CACHE_RWL1PRFL2PRF",
+ "BriefDescription": "This metric measures the count of L3 D-cache accesses incurred on an instruction fetch, Load, Store, L1 prefetcher, or L2 prefetcher accesses by the instruction stream of the program.",
+ "ScaleUnit": "1count",
+ "MetricGroup": "L3_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l3d_demand_misses",
+ "MetricExpr": "L3D_CACHE_REFILL_RWL1PRFL2PRF",
+ "BriefDescription": "This metric measures the count of L3 D-cache misses incurred on an instruction fetch, Load, Store, L1 prefetcher, or L2 prefetcher accesses by the instruction stream of the program.",
+ "ScaleUnit": "1count",
+ "MetricGroup": "L3_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l3d_prf_accuracy",
+ "MetricExpr": "100 * (l3d_useful_prf / l3d_refilled_prf)",
+ "BriefDescription": "This metric measures the fraction of prefetched memory addresses that are used by the instruction stream.",
+ "ScaleUnit": "1percent of prefetch",
+ "MetricGroup": "L3_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l3d_prf_coverage",
+ "MetricExpr": "100 * (l3d_useful_prf / (l3d_demand_misses + l3d_refilled_prf))",
+ "BriefDescription": "This metric measures the baseline demand cache misses which the prefetcher brings into the cache.",
+ "ScaleUnit": "1percent of cache access",
+ "MetricGroup": "L3_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l3d_refilled_prf",
+ "MetricExpr": "L3D_CACHE_REFILL_HWPRF + L3D_CACHE_REFILL_PRFM - L3D_CACHE_REFILL_L1PRF - L3D_CACHE_REFILL_L2PRF",
+ "BriefDescription": "This metric measures the count of cache lines refilled by L3 data prefetcher (hardware prefetches or software preload) into L3 D-cache.",
+ "ScaleUnit": "1count",
+ "MetricGroup": "L3_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "l3d_useful_prf",
+ "MetricExpr": "L3D_CACHE_HIT_RWL1PRFL2PRF_FPRF",
+ "BriefDescription": "This metric measures the count of cache lines refilled by L3 data prefetcher (hardware prefetches or software preload) into L3 D-cache which are further used by instruction fetch, Load, Store, L1 prefetcher, or L2 prefetcher accesses from the instruction stream of the program.",
+ "ScaleUnit": "1count",
+ "MetricGroup": "L3_Prefetcher_Effectiveness"
+ },
+ {
+ "MetricName": "ll_cache_read_hit_ratio",
+ "MetricExpr": "(LL_CACHE_RD - LL_CACHE_MISS_RD) / LL_CACHE_RD",
+ "BriefDescription": "This metric measures the ratio of last level cache Read accesses hit in the cache to the total number of last level cache accesses. This gives an indication of the effectiveness of the last level cache for Read traffic. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a system level cache.",
+ "ScaleUnit": "1per cache access",
+ "MetricGroup": "LL_Cache_Effectiveness"
+ },
+ {
+ "MetricName": "ll_cache_read_miss_ratio",
+ "MetricExpr": "LL_CACHE_MISS_RD / LL_CACHE_RD",
+ "BriefDescription": "This metric measures the ratio of last level cache Read accesses missed to the total number of last level cache accesses. This gives an indication of the effectiveness of the last level cache for Read traffic. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a system level cache.",
+ "ScaleUnit": "1per cache access",
+ "MetricGroup": "Miss_Ratio;LL_Cache_Effectiveness"
+ },
+ {
+ "MetricName": "ll_cache_read_mpki",
+ "MetricExpr": "1000 * (LL_CACHE_MISS_RD / INST_RETIRED)",
+ "BriefDescription": "This metric measures the number of last level cache Read accesses missed per thousand instructions executed.",
+ "ScaleUnit": "1MPKI",
+ "MetricGroup": "MPKI;LL_Cache_Effectiveness"
+ },
+ {
+ "MetricName": "load_average_latency",
+ "MetricExpr": "MEM_ACCESS_RD_PERCYC / MEM_ACCESS",
+ "BriefDescription": "This metric measures the average latency of Load operations in CPU cycles.",
+ "ScaleUnit": "1CPU cycles",
+ "MetricGroup": "Average_Latency"
+ },
+ {
+ "MetricName": "load_percentage",
+ "MetricExpr": "100 * (LD_SPEC / INST_SPEC)",
+ "BriefDescription": "This metric measures Load operations as a percentage of operations speculatively executed.",
+ "ScaleUnit": "1percent of operations",
+ "MetricGroup": "Operation_Mix"
+ },
+ {
+ "MetricName": "nonsve_fp_ops_per_cycle",
+ "MetricExpr": "FP_FIXED_OPS_SPEC / CPU_CYCLES",
+ "BriefDescription": "This metric measures floating point operations per cycle in any precision performed by an instruction that is not an SVE instruction. Operations are counted by computation and by vector lanes, fused computations such as multiply-add count as twice per vector lane for example.",
+ "ScaleUnit": "1operations per cycle",
+ "MetricGroup": "FP_Arithmetic_Intensity"
+ },
+ {
+ "MetricName": "retiring",
+ "MetricExpr": "100 * ((OP_RETIRED/OP_SPEC) * (1 - (STALL_SLOT/CPU_SLOT)))",
+ "BriefDescription": "This metric is the percentage of total slots that retired operations, which indicates cycles that were utilized efficiently.",
+ "ScaleUnit": "1percent of slots",
+ "MetricGroup": "TopdownL1"
+ },
+ {
+ "MetricName": "scalar_fp_percentage",
+ "MetricExpr": "100 * (VFP_SPEC / INST_SPEC)",
+ "BriefDescription": "This metric measures scalar floating point operations as a percentage of operations speculatively executed.",
+ "ScaleUnit": "1percent of operations",
+ "MetricGroup": "Operation_Mix"
+ },
+ {
+ "MetricName": "simd_percentage",
+ "MetricExpr": "100 * (ASE_SPEC / INST_SPEC)",
+ "BriefDescription": "This metric measures advanced SIMD operations as a percentage of total operations speculatively executed.",
+ "ScaleUnit": "1percent of operations",
+ "MetricGroup": "Operation_Mix"
+ },
+ {
+ "MetricName": "store_percentage",
+ "MetricExpr": "100 * (ST_SPEC / INST_SPEC)",
+ "BriefDescription": "This metric measures Store operations as a percentage of operations speculatively executed.",
+ "ScaleUnit": "1percent of operations",
+ "MetricGroup": "Operation_Mix"
+ },
+ {
+ "MetricName": "sve_all_percentage",
+ "MetricExpr": "100 * (SVE_INST_SPEC / INST_SPEC)",
+ "BriefDescription": "This metric measures scalable vector operations, including Loads and Stores, as a percentage of operations speculatively executed.",
+ "ScaleUnit": "1percent of operations",
+ "MetricGroup": "Operation_Mix"
+ },
+ {
+ "MetricName": "sve_fp_ops_per_cycle",
+ "MetricExpr": "FP_SCALE_OPS_SPEC / CPU_CYCLES",
+ "BriefDescription": "This metric measures floating point operations per cycle in any precision performed by SVE instructions. Operations are counted by computation and by vector lanes, fused computations such as multiply-add count as twice per vector lane for example.",
+ "ScaleUnit": "1operations per cycle",
+ "MetricGroup": "FP_Arithmetic_Intensity"
+ },
+ {
+ "MetricName": "sve_predicate_empty_percentage",
+ "MetricExpr": "100 * (SVE_PRED_EMPTY_SPEC / SVE_PRED_SPEC)",
+ "BriefDescription": "This metric measures scalable vector operations with no active predicates as a percentage of SVE predicated operations speculatively executed.",
+ "ScaleUnit": "1percent of SVE predicated operations",
+ "MetricGroup": "SVE_Effectiveness"
+ },
+ {
+ "MetricName": "sve_predicate_full_percentage",
+ "MetricExpr": "100 * (SVE_PRED_FULL_SPEC / SVE_PRED_SPEC)",
+ "BriefDescription": "This metric measures scalable vector operations with all active predicates as a percentage of SVE predicated operations speculatively executed.",
+ "ScaleUnit": "1percent of SVE predicated operations",
+ "MetricGroup": "SVE_Effectiveness"
+ },
+ {
+ "MetricName": "sve_predicate_partial_percentage",
+ "MetricExpr": "100 * (SVE_PRED_PARTIAL_SPEC / SVE_PRED_SPEC)",
+ "BriefDescription": "This metric measures scalable vector operations with at least one active predicates as a percentage of SVE predicated operations speculatively executed.",
+ "ScaleUnit": "1percent of SVE predicated operations",
+ "MetricGroup": "SVE_Effectiveness"
+ },
+ {
+ "MetricName": "sve_predicate_percentage",
+ "MetricExpr": "100 * (SVE_PRED_SPEC / INST_SPEC)",
+ "BriefDescription": "This metric measures scalable vector operations with predicates as a percentage of operations speculatively executed.",
+ "ScaleUnit": "1percent of operations",
+ "MetricGroup": "SVE_Effectiveness"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/nvidia/t410/misc.json b/tools/perf/pmu-events/arch/arm64/nvidia/t410/misc.json
new file mode 100644
index 000000000000..8ff87d844e52
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/nvidia/t410/misc.json
@@ -0,0 +1,642 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR",
+ "PublicDescription": "This event counts software writes to the PMSWINC_EL0 (software PMU increment) register. The PMSWINC_EL0 register is a manually updated counter for use by application software.\nThis event could be used to measure any user program event, such as accesses to a particular data structure (by writing to the PMSWINC_EL0 register each time the data structure is accessed).\nTo use the PMSWINC_EL0 register and event, developers must insert instructions that write to the PMSWINC_EL0 register into the source code.\nSince the SW_INCR event records writes to the PMSWINC_EL0 register, there is no need to do a Read/Increment/Write sequence to the PMSWINC_EL0 register."
+ },
+ {
+ "ArchStdEvent": "TRB_WRAP",
+ "PublicDescription": "This event is generated each time the trace buffer current Write pointer is wrapped to the trace buffer base pointer."
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT0",
+ "PublicDescription": "Trace unit external output 0."
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT1",
+ "PublicDescription": "Trace unit external output 1."
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT2",
+ "PublicDescription": "Trace unit external output 2."
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT3",
+ "PublicDescription": "Trace unit external output 3."
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT4",
+ "PublicDescription": "Cross-trigger Interface output trigger 4."
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT5",
+ "PublicDescription": "Cross-trigger Interface output trigger 5."
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT6",
+ "PublicDescription": "Cross-trigger Interface output trigger 6."
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT7",
+ "PublicDescription": "Cross-trigger Interface output trigger 7."
+ },
+ {
+ "EventCode": "0x00e1",
+ "EventName": "L1I_PRFM_REQ_DROP",
+ "PublicDescription": "L1 I-cache software prefetch dropped."
+ },
+ {
+ "EventCode": "0x0100",
+ "EventName": "L1_PF_REFILL",
+ "PublicDescription": "L1 prefetch requests, refilled to L1 cache."
+ },
+ {
+ "EventCode": "0x0120",
+ "EventName": "FLUSH",
+ "PublicDescription": "This event counts both the CT flush and BX flush. The BR_MIS_PRED counts the BX flushes. So the FLUSH-BR_MIS_PRED gives the CT flushes."
+ },
+ {
+ "EventCode": "0x0121",
+ "EventName": "FLUSH_MEM",
+ "PublicDescription": "Flushes due to memory hazards. This only includes CT flushes."
+ },
+ {
+ "EventCode": "0x0122",
+ "EventName": "FLUSH_BAD_BRANCH",
+ "PublicDescription": "Flushes due to bad predicted branch. This only includes CT flushes."
+ },
+ {
+ "EventCode": "0x0123",
+ "EventName": "FLUSH_STDBYPASS",
+ "PublicDescription": "Flushes due to bad predecode. This only includes CT flushes."
+ },
+ {
+ "EventCode": "0x0124",
+ "EventName": "FLUSH_ISB",
+ "PublicDescription": "Flushes due to ISB or similar side-effects. This only includes CT flushes."
+ },
+ {
+ "EventCode": "0x0125",
+ "EventName": "FLUSH_OTHER",
+ "PublicDescription": "Flushes due to other hazards. This only includes CT flushes."
+ },
+ {
+ "EventCode": "0x0126",
+ "EventName": "STORE_STREAM",
+ "PublicDescription": "Stored lines in streaming no-Write-allocate mode."
+ },
+ {
+ "EventCode": "0x0127",
+ "EventName": "NUKE_RAR",
+ "PublicDescription": "Load/Store nuke due to Read-after-Read ordering hazard."
+ },
+ {
+ "EventCode": "0x0128",
+ "EventName": "NUKE_RAW",
+ "PublicDescription": "Load/Store nuke due to Read-after-Write ordering hazard."
+ },
+ {
+ "EventCode": "0x0129",
+ "EventName": "L1_PF_GEN_PAGE",
+ "PublicDescription": "Load/Store prefetch to L1 generated, Page mode."
+ },
+ {
+ "EventCode": "0x012a",
+ "EventName": "L1_PF_GEN_STRIDE",
+ "PublicDescription": "Load/Store prefetch to L1 generated, stride mode."
+ },
+ {
+ "EventCode": "0x012b",
+ "EventName": "L2_PF_GEN_LD",
+ "PublicDescription": "Load prefetch to L2 generated."
+ },
+ {
+ "EventCode": "0x012d",
+ "EventName": "LS_PF_TRAIN_TABLE_ALLOC",
+ "PublicDescription": "LS prefetch train table entry allocated."
+ },
+ {
+ "EventCode": "0x0130",
+ "EventName": "LS_PF_GEN_TABLE_ALLOC",
+ "PublicDescription": "This event counts the number of cycles with at least one table allocation, for L2 hardware prefetches (including the software PRFM instructions that are converted into hardware prefetches due to D-TLB miss).\nLS prefetch gen table allocation (for L2 prefetches)."
+ },
+ {
+ "EventCode": "0x0131",
+ "EventName": "LS_PF_GEN_TABLE_ALLOC_PF_PEND",
+ "PublicDescription": "This event counts the number of cycles in which at least one hardware prefetch is dropped due to the inability to identify a victim when the generation table is full. The hardware prefetch considered here includes the software PRFM that is converted into hardware prefetches due to D-TLB miss."
+ },
+ {
+ "EventCode": "0x0132",
+ "EventName": "TBW",
+ "PublicDescription": "Tablewalks."
+ },
+ {
+ "EventCode": "0x0134",
+ "EventName": "S1L2_HIT",
+ "PublicDescription": "Translation cache hit on S1L2 walk cache entry."
+ },
+ {
+ "EventCode": "0x0135",
+ "EventName": "S1L1_HIT",
+ "PublicDescription": "Translation cache hit on S1L1 walk cache entry."
+ },
+ {
+ "EventCode": "0x0136",
+ "EventName": "S1L0_HIT",
+ "PublicDescription": "Translation cache hit on S1L0 walk cache entry."
+ },
+ {
+ "EventCode": "0x0137",
+ "EventName": "S2L2_HIT",
+ "PublicDescription": "Translation cache hit for S2L2 IPA walk cache entry."
+ },
+ {
+ "EventCode": "0x0138",
+ "EventName": "IPA_REQ",
+ "PublicDescription": "Translation cache lookups for IPA to PA entries."
+ },
+ {
+ "EventCode": "0x0139",
+ "EventName": "IPA_REFILL",
+ "PublicDescription": "Translation cache refills for IPA to PA entries."
+ },
+ {
+ "EventCode": "0x013a",
+ "EventName": "S1_FLT",
+ "PublicDescription": "Stage1 tablewalk fault."
+ },
+ {
+ "EventCode": "0x013b",
+ "EventName": "S2_FLT",
+ "PublicDescription": "Stage2 tablewalk fault."
+ },
+ {
+ "EventCode": "0x013c",
+ "EventName": "COLT_REFILL",
+ "PublicDescription": "Aggregated page refill."
+ },
+ {
+ "EventCode": "0x0145",
+ "EventName": "L1_PF_HIT",
+ "PublicDescription": "L1 prefetch requests, hitting in L1 cache."
+ },
+ {
+ "EventCode": "0x0146",
+ "EventName": "L1_PF",
+ "PublicDescription": "L1 prefetch requests."
+ },
+ {
+ "EventCode": "0x0147",
+ "EventName": "CACHE_LS_REFILL",
+ "PublicDescription": "L2 D-cache refill, Load/Store."
+ },
+ {
+ "EventCode": "0x0148",
+ "EventName": "CACHE_PF",
+ "PublicDescription": "L2 prefetch requests."
+ },
+ {
+ "EventCode": "0x0149",
+ "EventName": "CACHE_PF_HIT",
+ "PublicDescription": "L2 prefetch requests, hitting in L2 cache."
+ },
+ {
+ "EventCode": "0x0150",
+ "EventName": "UNUSED_PF",
+ "PublicDescription": "L2 unused prefetch."
+ },
+ {
+ "EventCode": "0x0151",
+ "EventName": "PFT_SENT",
+ "PublicDescription": "L2 prefetch TGT sent.\nNote that PFT_SENT != PFT_USEFUL + PFT_DROP. There may be PFT_SENT for which the accesses resulted in a SLC hit."
+ },
+ {
+ "EventCode": "0x0152",
+ "EventName": "PFT_USEFUL",
+ "PublicDescription": "L2 prefetch TGT useful."
+ },
+ {
+ "EventCode": "0x0153",
+ "EventName": "PFT_DROP",
+ "PublicDescription": "L2 prefetch TGT dropped."
+ },
+ {
+ "EventCode": "0x0162",
+ "EventName": "LRQ_FULL",
+ "PublicDescription": "This event counts the number of cycles the LRQ is full."
+ },
+ {
+ "EventCode": "0x0163",
+ "EventName": "FETCH_FQ_EMPTY",
+ "PublicDescription": "Fetch Queue empty cycles."
+ },
+ {
+ "EventCode": "0x0164",
+ "EventName": "FPG2",
+ "PublicDescription": "Forward progress guarantee. Medium range livelock triggered."
+ },
+ {
+ "EventCode": "0x0165",
+ "EventName": "FPG",
+ "PublicDescription": "Forward progress guarantee. Tofu global livelock buster is triggered."
+ },
+ {
+ "EventCode": "0x0172",
+ "EventName": "DEADBLOCK",
+ "PublicDescription": "Write-back evictions converted to dataless EVICT.\nThe victim line is deemed deadblock if the likeliness of a reuse is low. The Core uses dataless evict to evict a deadblock; and it uses an evict with data to evict an L2 line that is not a deadblock."
+ },
+ {
+ "EventCode": "0x0173",
+ "EventName": "PF_PRQ_ALLOC_PF_PEND",
+ "PublicDescription": "L1 prefetch prq allocation (replacing pending)."
+ },
+ {
+ "EventCode": "0x0178",
+ "EventName": "FETCH_ICACHE_INSTR",
+ "PublicDescription": "Instructions fetched from I-cache."
+ },
+ {
+ "EventCode": "0x017b",
+ "EventName": "NEAR_CAS",
+ "PublicDescription": "Near atomics: compare and swap."
+ },
+ {
+ "EventCode": "0x017c",
+ "EventName": "NEAR_CAS_PASS",
+ "PublicDescription": "Near atomics: compare and swap pass."
+ },
+ {
+ "EventCode": "0x017d",
+ "EventName": "FAR_CAS",
+ "PublicDescription": "Far atomics: compare and swap."
+ },
+ {
+ "EventCode": "0x0186",
+ "EventName": "L2_BTB_RELOAD_MAIN_BTB",
+ "PublicDescription": "Number of completed L1 BTB update initiated by L2 BTB hit which swap branch information between L1 BTB and L2 BTB."
+ },
+ {
+ "EventCode": "0x018f",
+ "EventName": "L1_PF_GEN_MCMC",
+ "PublicDescription": "Load/Store prefetch to L1 generated, MCMC."
+ },
+ {
+ "EventCode": "0x0190",
+ "EventName": "PF_MODE_0_CYCLES",
+ "PublicDescription": "Number of cycles in which the hardware prefetcher is in the most aggressive mode."
+ },
+ {
+ "EventCode": "0x0191",
+ "EventName": "PF_MODE_1_CYCLES",
+ "PublicDescription": "Number of cycles in which the hardware prefetcher is in the more aggressive mode."
+ },
+ {
+ "EventCode": "0x0192",
+ "EventName": "PF_MODE_2_CYCLES",
+ "PublicDescription": "Number of cycles in which the hardware prefetcher is in the less aggressive mode."
+ },
+ {
+ "EventCode": "0x0193",
+ "EventName": "PF_MODE_3_CYCLES",
+ "PublicDescription": "Number of cycles in which the hardware prefetcher is in the most conservative mode."
+ },
+ {
+ "EventCode": "0x0194",
+ "EventName": "TXREQ_LIMIT_MAX_CYCLES",
+ "PublicDescription": "Number of cycles in which the dynamic TXREQ limit is the L2_TQ_SIZE."
+ },
+ {
+ "EventCode": "0x0195",
+ "EventName": "TXREQ_LIMIT_3QUARTER_CYCLES",
+ "PublicDescription": "Number of cycles in which the dynamic TXREQ limit is between 3/4 of the L2_TQ_SIZE and the L2_TQ_SIZE-1."
+ },
+ {
+ "EventCode": "0x0196",
+ "EventName": "TXREQ_LIMIT_HALF_CYCLES",
+ "PublicDescription": "Number of cycles in which the dynamic TXREQ limit is between 1/2 of the L2_TQ_SIZE and 3/4 of the L2_TQ_SIZE."
+ },
+ {
+ "EventCode": "0x0197",
+ "EventName": "TXREQ_LIMIT_1QUARTER_CYCLES",
+ "PublicDescription": "Number of cycles in which the dynamic TXREQ limit is between 1/4 of the L2_TQ_SIZE and 1/2 of the L2_TQ_SIZE."
+ },
+ {
+ "EventCode": "0x019d",
+ "EventName": "PREFETCH_LATE_CMC",
+ "PublicDescription": "LS/readclean or LS/readunique lookup hit on TQ entry allocated by CMC prefetch request."
+ },
+ {
+ "EventCode": "0x019e",
+ "EventName": "PREFETCH_LATE_BO",
+ "PublicDescription": "LS/readclean or LS/readunique lookup hit on TQ entry allocated by BO prefetch request."
+ },
+ {
+ "EventCode": "0x019f",
+ "EventName": "PREFETCH_LATE_STRIDE",
+ "PublicDescription": "LS/readclean or LS/readunique lookup hit on TQ entry allocated by STRIDE prefetch request."
+ },
+ {
+ "EventCode": "0x01a0",
+ "EventName": "PREFETCH_LATE_SPATIAL",
+ "PublicDescription": "LS/readclean or LS/readunique lookup hit on TQ entry allocated by SPATIAL prefetch request."
+ },
+ {
+ "EventCode": "0x01a2",
+ "EventName": "PREFETCH_LATE_TBW",
+ "PublicDescription": "LS/readclean or LS/readunique lookup hit on TQ entry allocated by TBW prefetch request."
+ },
+ {
+ "EventCode": "0x01a3",
+ "EventName": "PREFETCH_LATE_PAGE",
+ "PublicDescription": "LS/readclean or LS/readunique lookup hit on TQ entry allocated by PAGE prefetch request."
+ },
+ {
+ "EventCode": "0x01a4",
+ "EventName": "PREFETCH_LATE_GSMS",
+ "PublicDescription": "LS/readclean or LS/readunique lookup hit on TQ entry allocated by GSMS prefetch request."
+ },
+ {
+ "EventCode": "0x01a5",
+ "EventName": "PREFETCH_LATE_SIP_CONS",
+ "PublicDescription": "LS/readclean or LS/readunique lookup hit on TQ entry allocated by SIP_CONS prefetch request."
+ },
+ {
+ "EventCode": "0x01a6",
+ "EventName": "PREFETCH_REFILL_CMC",
+ "PublicDescription": "PF/prefetch or PF/readclean request from CMC pf engine filled the L2 cache."
+ },
+ {
+ "EventCode": "0x01a7",
+ "EventName": "PREFETCH_REFILL_BO",
+ "PublicDescription": "PF/prefetch or PF/readclean request from BO pf engine filled the L2 cache."
+ },
+ {
+ "EventCode": "0x01a8",
+ "EventName": "PREFETCH_REFILL_STRIDE",
+ "PublicDescription": "PF/prefetch or PF/readclean request from STRIDE pf engine filled the L2 cache."
+ },
+ {
+ "EventCode": "0x01a9",
+ "EventName": "PREFETCH_REFILL_SPATIAL",
+ "PublicDescription": "PF/prefetch or PF/readclean request from SPATIAL pf engine filled the L2 cache."
+ },
+ {
+ "EventCode": "0x01ab",
+ "EventName": "PREFETCH_REFILL_TBW",
+ "PublicDescription": "PF/prefetch or PF/readclean request from TBW pf engine filled the L2 cache."
+ },
+ {
+ "EventCode": "0x01ac",
+ "EventName": "PREFETCH_REFILL_PAGE",
+ "PublicDescription": "PF/prefetch or PF/readclean request from PAGE pf engine filled the L2 cache."
+ },
+ {
+ "EventCode": "0x01ad",
+ "EventName": "PREFETCH_REFILL_GSMS",
+ "PublicDescription": "PF/prefetch or PF/readclean request from GSMS pf engine filled the L2 cache."
+ },
+ {
+ "EventCode": "0x01ae",
+ "EventName": "PREFETCH_REFILL_SIP_CONS",
+ "PublicDescription": "PF/prefetch or PF/readclean request from SIP_CONS pf engine filled the L2 cache."
+ },
+ {
+ "EventCode": "0x01af",
+ "EventName": "CACHE_HIT_LINE_PF_CMC",
+ "PublicDescription": "LS/readclean or LS/readunique lookup hit in L2 cache on line filled by CMC prefetch request."
+ },
+ {
+ "EventCode": "0x01b0",
+ "EventName": "CACHE_HIT_LINE_PF_BO",
+ "PublicDescription": "LS/readclean or LS/readunique lookup hit in L2 cache on line filled by BO prefetch request."
+ },
+ {
+ "EventCode": "0x01b1",
+ "EventName": "CACHE_HIT_LINE_PF_STRIDE",
+ "PublicDescription": "LS/readclean or LS/readunique lookup hit in L2 cache on line filled by STRIDE prefetch request."
+ },
+ {
+ "EventCode": "0x01b2",
+ "EventName": "CACHE_HIT_LINE_PF_SPATIAL",
+ "PublicDescription": "LS/readclean or LS/readunique lookup hit in L2 cache on line filled by SPATIAL prefetch request."
+ },
+ {
+ "EventCode": "0x01b4",
+ "EventName": "CACHE_HIT_LINE_PF_TBW",
+ "PublicDescription": "LS/readclean or LS/readunique lookup hit in L2 cache on line filled by TBW prefetch request."
+ },
+ {
+ "EventCode": "0x01b5",
+ "EventName": "CACHE_HIT_LINE_PF_PAGE",
+ "PublicDescription": "LS/readclean or LS/readunique lookup hit in L2 cache on line filled by PAGE prefetch request."
+ },
+ {
+ "EventCode": "0x01b6",
+ "EventName": "CACHE_HIT_LINE_PF_GSMS",
+ "PublicDescription": "LS/readclean or LS/readunique lookup hit in L2 cache on line filled by GSMS prefetch request."
+ },
+ {
+ "EventCode": "0x01b7",
+ "EventName": "CACHE_HIT_LINE_PF_SIP_CONS",
+ "PublicDescription": "LS/readclean or LS/readunique lookup hit in L2 cache on line filled by SIP_CONS prefetch request."
+ },
+ {
+ "EventCode": "0x01ba",
+ "EventName": "PREFETCH_LATE_STORE_ISSUE",
+ "PublicDescription": "This event counts the number of demand requests that matches a Store-issue prefetcher's pending refill request. These are called late prefetch requests and are still counted as useful prefetcher requests for the sake of accuracy and coverage measurements."
+ },
+ {
+ "EventCode": "0x01bb",
+ "EventName": "PREFETCH_LATE_STORE_STRIDE",
+ "PublicDescription": "This event counts the number of demand requests that matches a Store-stride prefetcher's pending refill request. These are called late prefetch requests and are still counted as useful prefetcher requests for the sake of accuracy and coverage measurements."
+ },
+ {
+ "EventCode": "0x01bc",
+ "EventName": "PREFETCH_LATE_PC_OFFSET",
+ "PublicDescription": "This event counts the number of demand requests that matches a PC-offset prefetcher's pending refill request. These are called late prefetch requests and are still counted as useful prefetcher requests for the sake of accuracy and coverage measurements."
+ },
+ {
+ "EventCode": "0x01bd",
+ "EventName": "PREFETCH_LATE_IFUPF",
+ "PublicDescription": "This event counts the number of demand requests that matches a IFU prefetcher's pending refill request. These are called late prefetch requests and are still counted as useful prefetcher requests for the sake of accuracy and coverage measurements."
+ },
+ {
+ "EventCode": "0x01be",
+ "EventName": "PREFETCH_REFILL_STORE_ISSUE",
+ "PublicDescription": "This event counts the number of cache refills due to Store-Issue prefetcher."
+ },
+ {
+ "EventCode": "0x01bf",
+ "EventName": "PREFETCH_REFILL_STORE_STRIDE",
+ "PublicDescription": "This event counts the number of cache refills due to Store-stride prefetcher."
+ },
+ {
+ "EventCode": "0x01c0",
+ "EventName": "PREFETCH_REFILL_PC_OFFSET",
+ "PublicDescription": "This event counts the number of cache refills due to PC-offset prefetcher."
+ },
+ {
+ "EventCode": "0x01c1",
+ "EventName": "PREFETCH_REFILL_IFUPF",
+ "PublicDescription": "This event counts the number of cache refills due to IFU prefetcher."
+ },
+ {
+ "EventCode": "0x01c2",
+ "EventName": "CACHE_HIT_LINE_PF_STORE_ISSUE",
+ "PublicDescription": "This event counts the number of first hit to a cache line filled by Store-issue prefetcher."
+ },
+ {
+ "EventCode": "0x01c3",
+ "EventName": "CACHE_HIT_LINE_PF_STORE_STRIDE",
+ "PublicDescription": "This event counts the number of first hit to a cache line filled by Store-stride prefetcher."
+ },
+ {
+ "EventCode": "0x01c4",
+ "EventName": "CACHE_HIT_LINE_PF_PC_OFFSET",
+ "PublicDescription": "This event counts the number of first hit to a cache line filled by PC-offset prefetcher."
+ },
+ {
+ "EventCode": "0x01c5",
+ "EventName": "CACHE_HIT_LINE_PF_IFUPF",
+ "PublicDescription": "This event counts the number of first hit to a cache line filled by IFU prefetcher."
+ },
+ {
+ "EventCode": "0x01c6",
+ "EventName": "L2_PF_GEN_ST_ISSUE",
+ "PublicDescription": "Store-issue prefetch to L2 generated."
+ },
+ {
+ "EventCode": "0x01c7",
+ "EventName": "L2_PF_GEN_ST_STRIDE",
+ "PublicDescription": "Store-stride prefetch to L2 generated"
+ },
+ {
+ "EventCode": "0x01cb",
+ "EventName": "L2_TQ_OUTSTANDING",
+ "PublicDescription": "Outstanding tracker count, per cycle.\nThis event increments by the number of valid entries pertaining to this thread in the L2TQ, in each cycle.\nThis event can be used to calculate the occupancy of L2TQ by dividing this by the CPU_CYCLES event. The L2TQ queue tracks the outstanding Read, Write and Snoop transactions. The Read transaction and the Write transaction entries are attributable to PE, whereas the Snoop transactions are not always attributable to PE."
+ },
+ {
+ "EventCode": "0x01cc",
+ "EventName": "TXREQ_LIMIT_COUNT_CYCLES",
+ "PublicDescription": "This event increments by the dynamic TXREQ value, in each cycle.\nThis is a companion event of TXREQ_LIMIT_MAX_CYCLES, TXREQ_LIMIT_3QUARTER_CYCLES, TXREQ_LIMIT_HALF_CYCLES, and TXREQ_LIMIT_1QUARTER_CYCLES."
+ },
+ {
+ "EventCode": "0x01ce",
+ "EventName": "L3DPRFM_TO_L2PRQ_CONVERTED",
+ "PublicDescription": "This event counts the number of Converted-L3D-PRFMs. These are indeed L3D PRFM and activities around these PRFM are counted by the L3D_CACHE_PRFM, L3D_CACHE_REFILL_PRFM and L3D_CACHE_REFILL Events."
+ },
+ {
+ "EventCode": "0x01d2",
+ "EventName": "DVM_TLBI_RCVD",
+ "PublicDescription": "This event counts the number of TLBI DVM message received over CHI interface, for *this* Core."
+ },
+ {
+ "EventCode": "0x01d6",
+ "EventName": "DSB_COMMITING_LOCAL_TLBI",
+ "PublicDescription": "This event counts the number of DSB that are retired and committed at least one local TLBI instruction. This event increments no more than once (in a cycle) even if the DSB commits multiple local TLBI instruction."
+ },
+ {
+ "EventCode": "0x01d7",
+ "EventName": "DSB_COMMITING_BROADCAST_TLBI",
+ "PublicDescription": "This event counts the number of DSB that are retired and committed at least one broadcast TLBI instruction. This event increments no more than once (in a cycle) even if the DSB commits multiple broadcast TLBI instruction."
+ },
+ {
+ "EventCode": "0x01eb",
+ "EventName": "L1DPRFM_L2DPRFM_TO_L2PRQ_CONVERTED",
+ "PublicDescription": "This event counts the number of Converted-L1D-PRFMs and Converted-L2D-PRFM.\nActivities involving the Converted-L1D-PRFM are counted by the L1D_CACHE_PRFM. However they are *not* counted by the L1D_CACHE_REFILL_PRFM, and L1D_CACHE_REFILL, as these Converted-L1D-PRFM are treated as L2 D hardware prefetches. Activities around the Converted-L1D-PRFMs and Converted-L2D-PRFMs are counted by the L2D_CACHE_PRFM, L2D_CACHE_REFILL_PRFM and L2D_CACHE_REFILL Events."
+ },
+ {
+ "EventCode": "0x01ec",
+ "EventName": "PREFETCH_LATE_CONVERTED_PRFM",
+ "PublicDescription": "This event counts the number of demand requests that matches a Converted-L1D-PRFM or Converted-L2D-PRFM pending refill request at L2 D-cache. These are called late prefetch requests and are still counted as useful prefetcher requests for the sake of accuracy and coverage measurements.\nNote that this event is not counted by the L2D_CACHE_HIT_RWL1PRF_LATE_HWPRF, though the Converted-L1D-PRFM or Converted-L2D-PRFM are replayed by the L2PRQ."
+ },
+ {
+ "EventCode": "0x01ed",
+ "EventName": "PREFETCH_REFILL_CONVERTED_PRFM",
+ "PublicDescription": "This event counts the number of L2 D-cache refills due to Converted-L1D-PRFM or Converted-L2D-PRFM.\nNote : L2D_CACHE_REFILL_PRFM is inclusive of PREFETCH_REFILL_PRFM_CONVERTED, where both the PREFETCH_REFILL_PRFM_CONVERTED and the L2D_CACHE_REFILL_PRFM increment when L2 D-cache refills due to Converted-L1D-PRFM or Converted-L2D-PRFM."
+ },
+ {
+ "EventCode": "0x01ee",
+ "EventName": "CACHE_HIT_LINE_PF_CONVERTED_PRFM",
+ "PublicDescription": "This event counts the number of first hit to a cache line filled by Converted-L1D-PRFM or Converted-L2D-PRFM.\nNote that L2D_CACHE_HIT_RWL1PRF_FPRFM is inclusive of CACHE_HIT_LINE_PF_CONVERTED_PRFM, where both the CACHE_HIT_LINE_PF_CONVERTED_PRFM and the L2D_CACHE_HIT_RWL1PRF_FPRFM increment on a first hit to L2 D-cache filled by Converted-L1D-PRFM or Converted-L2D-PRFM."
+ },
+ {
+ "EventCode": "0x01f0",
+ "EventName": "TMS_ST_TO_SMT_LATENCY",
+ "PublicDescription": "This event counts the number of CPU cycles spent on TMS for ST-to-SMT switch.\nThis event is counted by both the threads - This event in both threads increment during TMS for ST-to-SMT switch."
+ },
+ {
+ "EventCode": "0x01f1",
+ "EventName": "TMS_SMT_TO_ST_LATENCY",
+ "PublicDescription": "This event counts the number of CPU cycles spent on TMS for SMT-to-ST switch. The count also includes the CPU cycles spend due to an aborted SMT-to-ST TMS attempt.\nThis event is counted only by the thread that is not in WFI."
+ },
+ {
+ "EventCode": "0x01f2",
+ "EventName": "TMS_ST_TO_SMT_COUNT",
+ "PublicDescription": "This event counts the number of completed TMS from ST-to-SMT.\nThis event is counted only by the active thread (the one that is not in WFI).\nNote: When an active thread enters the Debug state in ST-Full resource mode, it is switched to SMT mode. This is because the inactive thread cannot wake up while the other thread remains in the Debug state. To prEvent this issue, threads operating in ST-Full resource mode are transitioned to SMT mode upon entering Debug state. This event count will also reflect such switches from ST to SMT mode.\n(Also see the (NV_CPUACTLR14_EL1.chka_prEvent_st_tx_to_smt_when_tx_in_debug_state bit to disable this behavior.)"
+ },
+ {
+ "EventCode": "0x01f3",
+ "EventName": "TMS_SMT_TO_ST_COUNT",
+ "PublicDescription": "This event counts the number of completed TMS from SMT-to-ST.\nThis event is counted only by the thread that is not in WFI."
+ },
+ {
+ "EventCode": "0x01f4",
+ "EventName": "TMS_SMT_TO_ST_COUNT_ABRT",
+ "PublicDescription": "This event counts the number of aborted TMS from SMT-to-ST.\nThis event is counted only by the thread that is not in WFI."
+ },
+ {
+ "EventCode": "0x0202",
+ "EventName": "L0I_CACHE_RD",
+ "PublicDescription": "This event counts the number of predict blocks serviced out of L0 I-cache.\nNote: The L0 I-cache performs at most 4 L0 I look-up in a cycle. Two of which are to service PB from L0 I. And the other two to refill L0 I-cache from L1 I. This event count only the L0 I-cache lookup pertaining to servicing the PB from L0 I."
+ },
+ {
+ "EventCode": "0x0203",
+ "EventName": "L0I_CACHE_REFILL",
+ "PublicDescription": "This event counts the number of L0I cache refill from L1 I-cache."
+ },
+ {
+ "EventCode": "0x0207",
+ "EventName": "INTR_LATENCY",
+ "PublicDescription": "This event counts the number of cycles elapsed between when an Interrupt is recognized (after masking) to when a uop associated with the first instruction in the destination exception level is allocated. If there is some other flush condition that pre-empts the Interrupt, then the cycles counted terminates early at the first instruction executed after that flush. In the event of dropped Interrupts (when an Interrupt is deasserted before it is taken), this counter measures the number of cycles that elapse from the moment an Interrupt is recognized (post-masking) until the Interrupt is dropped or deasserted.\nNote that\n* IESB(Implicit Error Synchronization Barrier) is an internal mop, so the latency of an implicit IESB mop executed before the Interrupt taken is included in the Interrupt latency count.\n* Nukes or TMS sequence within the window are also counted by the Interrupt latency Event.\n* A SMT to ST TMS will be aborted on detecting the wake condition for the WFI thread. The Interrupt latency count includes any additional penalty for an aborted TMS."
+ },
+ {
+ "EventCode": "0x021c",
+ "EventName": "CWT_ALLOC_ENTRY",
+ "PublicDescription": "Cache Way Tracker Allocate entry."
+ },
+ {
+ "EventCode": "0x021d",
+ "EventName": "CWT_ALLOC_LINE",
+ "PublicDescription": "Cache Way Tracker Allocate line."
+ },
+ {
+ "EventCode": "0x021e",
+ "EventName": "CWT_HIT",
+ "PublicDescription": "Cache Way Tracker hit."
+ },
+ {
+ "EventCode": "0x021f",
+ "EventName": "CWT_HIT_TAG",
+ "PublicDescription": "Cache Way Tracker hit when ITAG lookup suppressed."
+ },
+ {
+ "EventCode": "0x0220",
+ "EventName": "CWT_REPLAY_TAG",
+ "PublicDescription": "Cache Way Tracker causes ITAG replay due to miss when ITAG lookup suppressed."
+ },
+ {
+ "EventCode": "0x0250",
+ "EventName": "GPT_REQ",
+ "PublicDescription": "GPT lookup."
+ },
+ {
+ "EventCode": "0x0251",
+ "EventName": "GPT_WC_HIT",
+ "PublicDescription": "GPT lookup hit in Walk cache."
+ },
+ {
+ "EventCode": "0x0252",
+ "EventName": "GPT_PG_HIT",
+ "PublicDescription": "GPT lookup hit in TLB."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/nvidia/t410/retired.json b/tools/perf/pmu-events/arch/arm64/nvidia/t410/retired.json
new file mode 100644
index 000000000000..34c7eefa66b0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/nvidia/t410/retired.json
@@ -0,0 +1,94 @@
+[
+ {
+ "ArchStdEvent": "INST_RETIRED",
+ "PublicDescription": "This event counts instructions that have been architecturally executed."
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED",
+ "PublicDescription": "This event counts architecturally executed writes to the CONTEXTIDR_EL1 register, which usually contains the kernel PID and can be output with hardware trace."
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_RETIRED",
+ "PublicDescription": "This event counts architecturally executed direct branches."
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_RETIRED",
+ "PublicDescription": "This event counts architecturally executed procedure returns."
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED",
+ "PublicDescription": "This event counts architectural writes to TTBR0/1_EL1. If virtualization host extensions are enabled (by setting the HCR_EL2.E2H bit to 1), then accesses to TTBR0/1_EL1 that are redirected to TTBR0/1_EL2, or accesses to TTBR0/1_EL12, are counted. TTBRn registers are typically updated when the kernel is swapping user-space threads or applications."
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED",
+ "PublicDescription": "This event counts architecturally executed branches, whether the branch is taken or not. Instructions that explicitly write to the PC are also counted. Note that exception generating instructions, exception return instructions, and context synchronization instructions are not counted."
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED",
+ "PublicDescription": "This event counts branches counted by BR_RETIRED which were mispredicted and caused a pipeline flush."
+ },
+ {
+ "ArchStdEvent": "OP_RETIRED",
+ "PublicDescription": "This event counts micro-operations that are architecturally executed. This is a count of number of micro-operations retired from the commit queue in a single cycle."
+ },
+ {
+ "ArchStdEvent": "BR_INDNR_TAKEN_RETIRED",
+ "PublicDescription": "This event counts architecturally executed indirect branches excluding procedure returns that were taken."
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_PRED_RETIRED",
+ "PublicDescription": "This event counts architecturally executed direct branches that were correctly predicted."
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_MIS_PRED_RETIRED",
+ "PublicDescription": "This event counts architecturally executed direct branches that were mispredicted and caused a pipeline flush."
+ },
+ {
+ "ArchStdEvent": "BR_IND_PRED_RETIRED",
+ "PublicDescription": "This event counts architecturally executed indirect branches including procedure returns that were correctly predicted."
+ },
+ {
+ "ArchStdEvent": "BR_IND_MIS_PRED_RETIRED",
+ "PublicDescription": "This event counts architecturally executed indirect branches including procedure returns that were mispredicted and caused a pipeline flush."
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_PRED_RETIRED",
+ "PublicDescription": "This event counts architecturally executed procedure returns that were correctly predicted."
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_MIS_PRED_RETIRED",
+ "PublicDescription": "This event counts architecturally executed procedure returns that were mispredicted and caused a pipeline flush."
+ },
+ {
+ "ArchStdEvent": "BR_INDNR_PRED_RETIRED",
+ "PublicDescription": "This event counts architecturally executed indirect branches excluding procedure returns that were correctly predicted."
+ },
+ {
+ "ArchStdEvent": "BR_INDNR_MIS_PRED_RETIRED",
+ "PublicDescription": "This event counts architecturally executed indirect branches excluding procedure returns that were mispredicted and caused a pipeline flush."
+ },
+ {
+ "ArchStdEvent": "BR_TAKEN_PRED_RETIRED",
+ "PublicDescription": "This event counts architecturally executed branches that were taken and were correctly predicted."
+ },
+ {
+ "ArchStdEvent": "BR_TAKEN_MIS_PRED_RETIRED",
+ "PublicDescription": "This event counts architecturally executed branches that were taken and were mispredicted causing a pipeline flush."
+ },
+ {
+ "ArchStdEvent": "BR_SKIP_PRED_RETIRED",
+ "PublicDescription": "This event counts architecturally executed branches that were not taken and were correctly predicted."
+ },
+ {
+ "ArchStdEvent": "BR_SKIP_MIS_PRED_RETIRED",
+ "PublicDescription": "This event counts architecturally executed branches that were not taken and were mispredicted causing a pipeline flush."
+ },
+ {
+ "ArchStdEvent": "BR_PRED_RETIRED",
+ "PublicDescription": "This event counts branch instructions counted by BR_RETIRED which were correctly predicted."
+ },
+ {
+ "ArchStdEvent": "BR_IND_RETIRED",
+ "PublicDescription": "This event counts architecturally executed indirect branches including procedure returns."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/nvidia/t410/spe.json b/tools/perf/pmu-events/arch/arm64/nvidia/t410/spe.json
new file mode 100644
index 000000000000..00d0c5051a48
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/nvidia/t410/spe.json
@@ -0,0 +1,42 @@
+[
+ {
+ "ArchStdEvent": "SAMPLE_POP",
+ "PublicDescription": "This event counts statistical profiling sample population, the count of all operations that could be sampled but may or may not be chosen for sampling."
+ },
+ {
+ "ArchStdEvent": "SAMPLE_FEED",
+ "PublicDescription": "This event counts statistical profiling samples taken for sampling."
+ },
+ {
+ "ArchStdEvent": "SAMPLE_FILTRATE",
+ "PublicDescription": "This event counts statistical profiling samples taken which are not removed by filtering."
+ },
+ {
+ "ArchStdEvent": "SAMPLE_COLLISION",
+ "PublicDescription": "This event counts statistical profiling samples that have collided with a previous sample and so therefore not taken."
+ },
+ {
+ "ArchStdEvent": "SAMPLE_FEED_BR",
+ "PublicDescription": "This event counts statistical profiling samples taken which are branches."
+ },
+ {
+ "ArchStdEvent": "SAMPLE_FEED_LD",
+ "PublicDescription": "This event counts statistical profiling samples taken which are Loads or Load atomic operations."
+ },
+ {
+ "ArchStdEvent": "SAMPLE_FEED_ST",
+ "PublicDescription": "This event counts statistical profiling samples taken which are Stores or Store atomic operations."
+ },
+ {
+ "ArchStdEvent": "SAMPLE_FEED_OP",
+ "PublicDescription": "This event counts statistical profiling samples taken which are matching any operation type filters supported."
+ },
+ {
+ "ArchStdEvent": "SAMPLE_FEED_EVENT",
+ "PublicDescription": "This event counts statistical profiling samples taken which are matching event packet filter constraints."
+ },
+ {
+ "ArchStdEvent": "SAMPLE_FEED_LAT",
+ "PublicDescription": "This event counts statistical profiling samples taken which are exceeding minimum latency set by operation latency filter constraints."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/nvidia/t410/spec_operation.json b/tools/perf/pmu-events/arch/arm64/nvidia/t410/spec_operation.json
new file mode 100644
index 000000000000..8bc802f5f350
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/nvidia/t410/spec_operation.json
@@ -0,0 +1,230 @@
+[
+ {
+ "ArchStdEvent": "INST_SPEC",
+ "PublicDescription": "This event counts operations that have been speculatively executed."
+ },
+ {
+ "ArchStdEvent": "OP_SPEC",
+ "PublicDescription": "This event counts micro-operations speculatively executed. This is the count of the number of micro-operations dispatched in a cycle."
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC",
+ "PublicDescription": "This event counts unaligned memory Read operations issued by the CPU. This event counts unaligned accesses (as defined by the actual instruction), even if they are subsequently issued as multiple aligned accesses.\nThis event does not count preload operations (PLD, PLI).\nThis event is a subset of the UNALIGNED_LDST_SPEC event."
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC",
+ "PublicDescription": "This event counts unaligned memory Write operations issued by the CPU. This event counts unaligned accesses (as defined by the actual instruction), even if they are subsequently issued as multiple aligned accesses.\nThis event is a subset of the UNALIGNED_LDST_SPEC event."
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC",
+ "PublicDescription": "This event counts unaligned memory operations issued by the CPU. This event counts unaligned accesses (as defined by the actual instruction), even if they are subsequently issued as multiple aligned accesses.\nThis event is the sum of the following events:\nUNALIGNED_ST_SPEC and\nUNALIGNED_LD_SPEC."
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC",
+ "PublicDescription": "This event counts Load-Exclusive operations that have been speculatively executed. For example: LDREX, LDX"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC",
+ "PublicDescription": "This event counts Store-exclusive operations that have been speculatively executed and have successfully completed the Store operation."
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC",
+ "PublicDescription": "This event counts Store-exclusive operations that have been speculatively executed and have not successfully completed the Store operation."
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC",
+ "PublicDescription": "This event counts Store-exclusive operations that have been speculatively executed.\nThis event is the sum of the following events:\nSTREX_PASS_SPEC and\nSTREX_FAIL_SPEC."
+ },
+ {
+ "ArchStdEvent": "LD_SPEC",
+ "PublicDescription": "This event counts speculatively executed Load operations including Single Instruction Multiple Data (SIMD) Load operations."
+ },
+ {
+ "ArchStdEvent": "ST_SPEC",
+ "PublicDescription": "This event counts speculatively executed Store operations including Single Instruction Multiple Data (SIMD) Store operations."
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC",
+ "PublicDescription": "This event counts Load and Store operations that have been speculatively executed."
+ },
+ {
+ "ArchStdEvent": "DP_SPEC",
+ "PublicDescription": "This event counts speculatively executed logical or arithmetic instructions such as MOV/MVN operations."
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC",
+ "PublicDescription": "This event counts speculatively executed Advanced SIMD operations excluding Load, Store, and Move micro-operations that move data to or from SIMD (vector) registers."
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC",
+ "PublicDescription": "This event counts speculatively executed floating point operations. This event does not count operations that move data to or from floating point (vector) registers."
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC",
+ "PublicDescription": "This event counts speculatively executed operations which cause software changes of the PC. Those operations include all taken branch operations."
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC",
+ "PublicDescription": "This event counts speculatively executed cryptographic operations except for PMULL and VMULL operations."
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC",
+ "PublicDescription": "This event counts direct branch operations which are speculatively executed."
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC",
+ "PublicDescription": "This event counts procedure return operations (RET, RETAA and RETAB) which are speculatively executed."
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC",
+ "PublicDescription": "This event counts indirect branch operations including procedure returns, which are speculatively executed. This includes operations that force a software change of the PC, other than exception-generating operations and direct branch instructions. Some examples of the instructions counted by this event include BR Xn, RET, etc."
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC",
+ "PublicDescription": "This event counts ISB operations that are executed."
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC",
+ "PublicDescription": "This event counts DSB operations that are speculatively issued to Load/Store unit in the CPU."
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC",
+ "PublicDescription": "This event counts DMB operations that are speculatively issued to the Load/Store unit in the CPU. This event does not count implied barriers from Load-acquire/Store-release operations."
+ },
+ {
+ "ArchStdEvent": "CSDB_SPEC",
+ "PublicDescription": "This event counts CSDB operations that are speculatively issued to the Load/Store unit in the CPU. This event does not count implied barriers from Load-acquire/Store-release operations."
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC",
+ "PublicDescription": "This event counts any Load acquire operations that are speculatively executed. For example: LDAR, LDARH, LDARB"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC",
+ "PublicDescription": "This event counts any Store release operations that are speculatively executed. For example: STLR, STLRH, STLRB"
+ },
+ {
+ "ArchStdEvent": "SIMD_INST_SPEC",
+ "PublicDescription": "This event counts speculatively executed operations that are SIMD or SVE vector operations or Advanced SIMD non-scalar operations."
+ },
+ {
+ "ArchStdEvent": "ASE_INST_SPEC",
+ "PublicDescription": "This event counts speculatively executed Advanced SIMD operations."
+ },
+ {
+ "ArchStdEvent": "SVE_INST_SPEC",
+ "PublicDescription": "This event counts speculatively executed operations that are SVE operations."
+ },
+ {
+ "ArchStdEvent": "INT_SPEC",
+ "PublicDescription": "This event counts speculatively executed integer arithmetic operations."
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_SPEC",
+ "PublicDescription": "This event counts speculatively executed predicated SVE operations.\nThis counter also counts SVE operation due to instruction with Governing predicate operand that determines the Active elements that do not write to any SVE Z vector destination register using either zeroing or merging predicate. Thus, the operations due to instructions such as INCP, DECP, UQINCP, UQDECP, SQINCP, SQDECP and PNEXT, are counted by the SVE_PRED_* events."
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_EMPTY_SPEC",
+ "PublicDescription": "This event counts speculatively executed predicated SVE operations with no active predicate elements.\nThis counter also counts SVE operation due to instruction with Governing predicate operand that determines the Active elements that do not write to any SVE Z vector destination register using either zeroing or merging predicate. Thus, the operations due to instructions such as INCP, DECP, UQINCP, UQDECP, SQINCP, SQDECP and PNEXT, are counted by the SVE_PRED_* events."
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_FULL_SPEC",
+ "PublicDescription": "This event counts speculatively executed predicated SVE operations with all predicate elements active.\nThis counter also counts SVE operation due to instruction with Governing predicate operand that determines the Active elements that do not write to any SVE Z vector destination register using either zeroing or merging predicate. Thus, the operations due to instructions such as INCP, DECP, UQINCP, UQDECP, SQINCP, SQDECP and PNEXT, are counted by the SVE_PRED_* events."
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_PARTIAL_SPEC",
+ "PublicDescription": "This event counts speculatively executed predicated SVE operations with at least one but not all active predicate elements.\nThis counter also counts SVE operation due to instruction with Governing predicate operand that determines the Active elements that do not write to any SVE Z vector destination register using either zeroing or merging predicate. Thus, the operations due to instructions such as INCP, DECP, UQINCP, UQDECP, SQINCP, SQDECP and PNEXT, are counted by the SVE_PRED_* events."
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_NOT_FULL_SPEC",
+ "PublicDescription": "This event counts speculatively executed predicated SVE operations with at least one non active predicate elements.\nThis counter also counts SVE operation due to instruction with Governing predicate operand that determines the Active elements that do not write to any SVE Z vector destination register using either zeroing or merging predicate. Thus, the operations due to instructions such as INCP, DECP, UQINCP, UQDECP, SQINCP, SQDECP and PNEXT, are counted by the SVE_PRED_* events."
+ },
+ {
+ "ArchStdEvent": "PRF_SPEC",
+ "PublicDescription": "This event counts speculatively executed operations that prefetch memory. For example, Scalar: PRFM, SVE: PRFB, PRFD, PRFH, or PRFW."
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_SPEC",
+ "PublicDescription": "This event counts speculatively executed SVE first fault or non-fault Load operations."
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_FAULT_SPEC",
+ "PublicDescription": "This event counts speculatively executed SVE first fault or non-fault Load operations that clear at least one bit in the FFR."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT8_SPEC",
+ "PublicDescription": "This event counts speculatively executed Advanced SIMD or SVE integer operations with the largest data type being an 8-bit integer."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT16_SPEC",
+ "PublicDescription": "This event counts speculatively executed Advanced SIMD or SVE integer operations with the largest data type a 16-bit integer."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT32_SPEC",
+ "PublicDescription": "This event counts speculatively executed Advanced SIMD or SVE integer operations with the largest data type a 32-bit integer."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT64_SPEC",
+ "PublicDescription": "This event counts speculatively executed Advanced SIMD or SVE integer operations with the largest data type a 64-bit integer."
+ },
+ {
+ "EventCode": "0x011d",
+ "EventName": "SPEC_RET_STACK_FULL",
+ "PublicDescription": "This event counts predict pipe stalls due to speculative return address predictor full."
+ },
+ {
+ "EventCode": "0x011f",
+ "EventName": "MOPS_SPEC",
+ "PublicDescription": "Macro-ops speculatively decoded."
+ },
+ {
+ "EventCode": "0x0180",
+ "EventName": "BR_SPEC_PRED_TAKEN",
+ "PublicDescription": "Number of predicted taken from branch predictor."
+ },
+ {
+ "EventCode": "0x0181",
+ "EventName": "BR_SPEC_PRED_TAKEN_FROM_L2BTB",
+ "PublicDescription": "Number of predicted taken branch from L2 BTB."
+ },
+ {
+ "EventCode": "0x0182",
+ "EventName": "BR_SPEC_PRED_TAKEN_MULTI",
+ "PublicDescription": "Number of predicted taken for polymorphic branch."
+ },
+ {
+ "EventCode": "0x0185",
+ "EventName": "BR_SPEC_PRED_STATIC",
+ "PublicDescription": "Number of post fetch prediction."
+ },
+ {
+ "EventCode": "0x01d0",
+ "EventName": "TLBI_LOCAL_SPEC",
+ "PublicDescription": "A non-broadcast TLBI instruction executed (Speculatively or otherwise) on *this* PE."
+ },
+ {
+ "EventCode": "0x01d1",
+ "EventName": "TLBI_BROADCAST_SPEC",
+ "PublicDescription": "A broadcast TLBI instruction executed (Speculatively or otherwise) on *this* PE."
+ },
+ {
+ "EventCode": "0x01e7",
+ "EventName": "BR_SPEC_PRED_ALN_REDIR",
+ "PublicDescription": "BPU predict pipe align redirect (either AL-APQ hit/miss)."
+ },
+ {
+ "EventCode": "0x0200",
+ "EventName": "SIMD_CRYPTO_INST_SPEC",
+ "PublicDescription": "SIMD, SVE, and CRYPTO instructions speculatively decoded."
+ },
+ {
+ "EventCode": "0x022e",
+ "EventName": "VPRED_LD_SPEC",
+ "PublicDescription": "This event counts the number of Speculatively-executed-Load operations with addresses produced by the value-prediction mechanism. The loaded data might be discarded if the predicted address differs from the actual address."
+ },
+ {
+ "EventCode": "0x022f",
+ "EventName": "VPRED_LD_SPEC_MISMATCH",
+ "PublicDescription": "This event counts a subset of VPRED_LD_SPEC where the predicted Load address and the actual address mismatched."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/nvidia/t410/stall.json b/tools/perf/pmu-events/arch/arm64/nvidia/t410/stall.json
new file mode 100644
index 000000000000..92d9e0866c24
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/nvidia/t410/stall.json
@@ -0,0 +1,145 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND",
+ "PublicDescription": "This event counts cycles when frontend could not send any micro-operations to the rename stage because of frontend resource stalls caused by fetch memory latency or branch prediction flow stalls. STALL_FRONTEND_SLOTS counts SLOTS during the cycle when this event counts. STALL_SLOT_FRONTEND will count SLOTS when this event is counted on this CPU."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND",
+ "PublicDescription": "This event counts cycles whenever the rename unit is unable to send any micro-operations to the backend of the pipeline because of backend resource constraints. Backend resource constraints can include issue stage fullness, execution stage fullness, or other internal pipeline resource fullness. All the backend slots were empty during the cycle when this event counts."
+ },
+ {
+ "ArchStdEvent": "STALL",
+ "PublicDescription": "This event counts cycles when no operations are sent to the rename unit from the frontend or from the rename unit to the backend for any reason (either frontend or backend stall). This event is the sum of the following events:\nSTALL_FRONTEND and\nSTALL_BACKEND."
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_BACKEND",
+ "PublicDescription": "This event counts slots per cycle in which no operations are sent from the rename unit to the backend due to backend resource constraints. STALL_BACKEND counts during the cycle when STALL_SLOT_BACKEND counts at least 1. STALL_BACKEND counts during the cycle when STALL_SLOT_BACKEND is SLOTS."
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_FRONTEND",
+ "PublicDescription": "This event counts slots per cycle in which no operations are sent to the rename unit from the frontend due to frontend resource constraints. STALL_FRONTEND counts during the cycle when STALL_SLOT_FRONTEND is SLOTS."
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT",
+ "PublicDescription": "This event counts slots per cycle in which no operations are sent to the rename unit from the frontend or from the rename unit to the backend for any reason (either frontend or backend stall).\nSTALL_SLOT is the sum of the following events:\nSTALL_SLOT_FRONTEND and\nSTALL_SLOT_BACKEND."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEM",
+ "PublicDescription": "This event counts cycles when the backend is stalled because there is a pending demand Load request in progress in the last level Core cache.\nLast level cache in this CPU is Level 2, hence this event counts same as STALL_BACKEND_L2D."
+ },
+ {
+ "ArchStdEvent": "STALL_FRONTEND_MEMBOUND",
+ "PublicDescription": "This event counts cycles when the frontend could not send any micro-operations to the rename stage due to resource constraints in the memory resources."
+ },
+ {
+ "ArchStdEvent": "STALL_FRONTEND_L1I",
+ "PublicDescription": "This event counts cycles when the frontend is stalled because there is an instruction fetch request pending in the L1 I-cache."
+ },
+ {
+ "ArchStdEvent": "STALL_FRONTEND_MEM",
+ "PublicDescription": "This event counts cycles when the frontend is stalled because there is an instruction fetch request pending in the last level Core cache.\nLast level cache in this CPU is Level 2, hence this event counts rather than STALL_FRONTEND_L2I."
+ },
+ {
+ "ArchStdEvent": "STALL_FRONTEND_TLB",
+ "PublicDescription": "This event counts when the frontend is stalled on any TLB misses being handled. This event also counts the TLB accesses made by hardware prefetches."
+ },
+ {
+ "ArchStdEvent": "STALL_FRONTEND_CPUBOUND",
+ "PublicDescription": "This event counts cycles when the frontend could not send any micro-operations to the rename stage due to resource constraints in the CPU resources excluding memory resources."
+ },
+ {
+ "ArchStdEvent": "STALL_FRONTEND_FLOW",
+ "PublicDescription": "This event counts cycles when the frontend could not send any micro-operations to the rename stage due to resource constraints in the branch prediction unit."
+ },
+ {
+ "ArchStdEvent": "STALL_FRONTEND_FLUSH",
+ "PublicDescription": "This event counts cycles when the frontend could not send any micro-operations to the rename stage as the frontend is recovering from a machine flush or resteer. Example scenarios that cause a flush include branch mispredictions, taken exceptions, microarchitectural flush etc."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEMBOUND",
+ "PublicDescription": "This event counts cycles when the backend could not accept any micro-operations due to resource constraints in the memory resources."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_L1D",
+ "PublicDescription": "This event counts cycles when the backend is stalled because there is a pending demand Load request in progress in the L1 D-cache."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_TLB",
+ "PublicDescription": "This event counts cycles when the backend is stalled on any demand TLB misses being handled."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_ST",
+ "PublicDescription": "This event counts cycles when the backend is stalled and there is a Store that has not reached the pre-commit stage."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_CPUBOUND",
+ "PublicDescription": "This event counts cycles when the backend could not accept any micro-operations due to any resource constraints in the CPU excluding memory resources."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_BUSY",
+ "PublicDescription": "This event counts cycles when the backend could not accept any micro-operations because the issue queues are full to take any operations for execution."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_ILOCK",
+ "PublicDescription": "This event counts cycles when the backend could not accept any micro-operations due to resource constraints imposed by input dependency."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_RENAME",
+ "PublicDescription": "This event counts cycles when backend is stalled even when operations are available from the frontend but at least one is not ready to be sent to the backend because no rename register is available."
+ },
+ {
+ "EventCode": "0x0158",
+ "EventName": "FLAG_DISP_STALL",
+ "PublicDescription": "Rename stalled due to FRF(Flag register file) full."
+ },
+ {
+ "EventCode": "0x0159",
+ "EventName": "GEN_DISP_STALL",
+ "PublicDescription": "Rename stalled due to GRF (General-purpose register file) full."
+ },
+ {
+ "EventCode": "0x015a",
+ "EventName": "VEC_DISP_STALL",
+ "PublicDescription": "Rename stalled due to VRF (Vector register file) full."
+ },
+ {
+ "EventCode": "0x015c",
+ "EventName": "SX_IQ_STALL",
+ "PublicDescription": "Dispatch stalled due to IQ full, SX."
+ },
+ {
+ "EventCode": "0x015d",
+ "EventName": "MX_IQ_STALL",
+ "PublicDescription": "Dispatch stalled due to IQ full, MX."
+ },
+ {
+ "EventCode": "0x015e",
+ "EventName": "LS_IQ_STALL",
+ "PublicDescription": "Dispatch stalled due to IQ full, LS."
+ },
+ {
+ "EventCode": "0x015f",
+ "EventName": "VX_IQ_STALL",
+ "PublicDescription": "Dispatch stalled due to IQ full, VX."
+ },
+ {
+ "EventCode": "0x0160",
+ "EventName": "MCQ_FULL_STALL",
+ "PublicDescription": "Dispatch stalled due to MCQ full."
+ },
+ {
+ "EventCode": "0x01cf",
+ "EventName": "PRD_DISP_STALL",
+ "PublicDescription": "Rename stalled due to predicate registers (physical) are full."
+ },
+ {
+ "EventCode": "0x01e0",
+ "EventName": "CSDB_STALL",
+ "PublicDescription": "Rename stalled due to CSDB."
+ },
+ {
+ "EventCode": "0x01e2",
+ "EventName": "STALL_SLOT_FRONTEND_WITHOUT_MISPRED",
+ "PublicDescription": "Stall slot frontend during non-mispredicted branch.\nThis event counts the STALL_STOT_FRONTEND Events, except for the 4 cycles following a mispredicted branch Event or 4 cycles following a commit flush&restart Event."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/nvidia/t410/tlb.json b/tools/perf/pmu-events/arch/arm64/nvidia/t410/tlb.json
new file mode 100644
index 000000000000..18ec5c348c87
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/nvidia/t410/tlb.json
@@ -0,0 +1,158 @@
+[
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL",
+ "PublicDescription": "This event counts L1 Instruction TLB refills from any instruction fetch (demand, hardware prefetch, and software preload accesses). If there are multiple misses in the TLB that are resolved by the refill, then this event only counts once. This event will not count if the translation table walk results in a fault (such as a translation or access fault), since there is no new translation created for the TLB."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL",
+ "PublicDescription": "This event counts L1 Data TLB accesses that resulted in TLB refills. If there are multiple misses in the TLB that are resolved by the refill, then this event only counts once. This event counts for refills caused by preload instructions or hardware prefetch accesses. This event counts regardless of whether the miss hits in L2 or results in a translation table walk. This event will not count if the translation table walk results in a fault (such as a translation or access fault), since there is no new translation created for the TLB. This event will not count on an access from an AT (Address Translation) instruction.\nThis event counts the sum of the following events:\nL1D_TLB_REFILL_RD and\nL1D_TLB_REFILL_WR."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB",
+ "PublicDescription": "This event counts L1 Data TLB accesses caused by any memory Load or Store operation.\nNote that Load or Store instructions can be broken up into multiple memory operations.\nThis event does not count TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L1I_TLB",
+ "PublicDescription": "This event counts L1 instruction TLB accesses (caused by demand or hardware prefetch or software preload accesses), whether the access hits or misses in the TLB. This event counts both demand accesses and prefetch or preload generated accesses.\nThis event is a superset of the L1I_TLB_REFILL event."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL",
+ "PublicDescription": "This event counts L2 TLB refills caused by memory operations from both data and instruction fetch, except for those caused by TLB maintenance operations and hardware prefetches.\nThis event is the sum of the following events:\nL2D_TLB_REFILL_RD and\nL2D_TLB_REFILL_WR."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB",
+ "PublicDescription": "This event counts L2 TLB accesses except those caused by TLB maintenance operations.\nThis event is the sum of the following events:\nL2D_TLB_RD and\nL2D_TLB_WR."
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK",
+ "PublicDescription": "This event counts number of demand data translation table walks caused by a miss in the L2 TLB and performing at least one memory access. Translation table walks are counted even if the translation ended up taking a translation fault for reasons different than EPD, E0PD and NFD. Note that partial translations that cause a translation table walk are also counted. Also note that this event counts walks triggered by software preloads, but not walks triggered by hardware prefetchers, and that this event does not count walks triggered by TLB maintenance operations.\nThis event does not include prefetches."
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK",
+ "PublicDescription": "This event counts number of instruction translation table walks caused by a miss in the L2 TLB and performing at least one memory access. Translation table walks are counted even if the translation ended up taking a translation fault for reasons different than EPD, E0PD and NFD. Note that partial translations that cause a translation table walk are also counted. Also note that this event does not count walks triggered by TLB maintenance operations.\nThis event does not include prefetches."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD",
+ "PublicDescription": "This event counts L1 Data TLB refills caused by memory Read operations. If there are multiple misses in the TLB that are resolved by the refill, then this event only counts once. This event counts for refills caused by preload instructions or hardware prefetch accesses. This event counts regardless of whether the miss hits in L2 or results in a translation table walk. This event will not count if the translation table walk results in a fault (such as a translation or access fault), since there is no new translation created for the TLB. This event will not count on an access from an Address Translation (AT) instruction.\nThis event is a subset of the L1D_TLB_REFILL event."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR",
+ "PublicDescription": "This event counts L1 Data TLB refills caused by data side memory Write operations. If there are multiple misses in the TLB that are resolved by the refill, then this event only counts once. This event counts for refills caused by preload instructions or hardware prefetch accesses. This event counts regardless of whether the miss hits in L2 or results in a translation table walk. This event will not count if the table walk results in a fault (such as a translation or access fault), since there is no new translation created for the TLB. This event will not count with an access from an Address Translation (AT) instruction.\nThis event is a subset of the L1D_TLB_REFILL event."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD",
+ "PublicDescription": "This event counts L1 Data TLB accesses caused by memory Read operations. This event counts whether the access hits or misses in the TLB. This event does not count TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR",
+ "PublicDescription": "This event counts any L1 Data side TLB accesses caused by memory Write operations. This event counts whether the access hits or misses in the TLB. This event does not count TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD",
+ "PublicDescription": "This event counts L2 TLB refills caused by memory Read operations from both data and instruction fetch except for those caused by TLB maintenance operations or hardware prefetches.\nThis event is a subset of the L2D_TLB_REFILL event."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR",
+ "PublicDescription": "This event counts L2 TLB refills caused by memory Write operations from both data and instruction fetch except for those caused by TLB maintenance operations.\nThis event is a subset of the L2D_TLB_REFILL event."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD",
+ "PublicDescription": "This event counts L2 TLB accesses caused by memory Read operations from both data and instruction fetch except for those caused by TLB maintenance operations.\nThis event is a subset of the L2D_TLB event."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR",
+ "PublicDescription": "This event counts L2 TLB accesses caused by memory Write operations from both data and instruction fetch except for those caused by TLB maintenance operations.\nThis event is a subset of the L2D_TLB event."
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK_PERCYC",
+ "PublicDescription": "This event counts the number of data translation table walks in progress per cycle."
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK_PERCYC",
+ "PublicDescription": "This event counts the number of instruction translation table walks in progress per cycle."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RW",
+ "PublicDescription": "This event counts L1 Data TLB demand accesses caused by memory Read or Write operations. This event counts whether the access hits or misses in the TLB. This event does not count TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_RD",
+ "PublicDescription": "This event counts L1 Instruction TLB demand accesses whether the access hits or misses in the TLB."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_PRFM",
+ "PublicDescription": "This event counts L1 Data TLB accesses generated by software prefetch or preload memory accesses. Load or Store instructions can be broken into multiple memory operations. This event does not count TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_PRFM",
+ "PublicDescription": "This event counts L1 Instruction TLB accesses generated by software preload or prefetch instructions. This event counts whether the access hits or misses in the TLB. This event does not count TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "DTLB_HWUPD",
+ "PublicDescription": "This event counts number of memory accesses triggered by a data translation table walk and performing an update of a translation table entry. Memory accesses are counted even if the translation ended up taking a translation fault for reasons different than EPD, E0PD and NFD. Note that this event counts accesses triggered by software preloads, but not accesses triggered by hardware prefetchers."
+ },
+ {
+ "ArchStdEvent": "ITLB_HWUPD",
+ "PublicDescription": "This event counts number of memory accesses triggered by an instruction translation table walk and performing an update of a translation table entry. Memory accesses are counted even if the translation ended up taking a translation fault for reasons different than EPD, E0PD and NFD."
+ },
+ {
+ "ArchStdEvent": "DTLB_STEP",
+ "PublicDescription": "This event counts number of memory accesses triggered by a demand data translation table walk and performing a Read of a translation table entry. Memory accesses are counted even if the translation ended up taking a translation fault for reasons different than EPD, E0PD and NFD.\nNote that this event counts accesses triggered by software preloads, but not accesses triggered by hardware prefetchers."
+ },
+ {
+ "ArchStdEvent": "ITLB_STEP",
+ "PublicDescription": "This event counts number of memory accesses triggered by an instruction translation table walk and performing a Read of a translation table entry. Memory accesses are counted even if the translation ended up taking a translation fault for reasons different than EPD, E0PD and NFD."
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK_LARGE",
+ "PublicDescription": "This event counts number of demand data translation table walks caused by a miss in the L2 TLB and yielding a large page. The set of large pages is defined as all pages with a final size higher than or equal to 2MB. Translation table walks that end up taking a translation fault are not counted, as the page size would be undefined in that case. If DTLB_WALK_BLOCK is implemented, then it is an alias for this event in this family.\nNote that partial translations that cause a translation table walk are also counted.\nAlso note that this event counts walks triggered by software preloads, but not walks triggered by hardware prefetchers, and that this event does not count walks triggered by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK_LARGE",
+ "PublicDescription": "This event counts number of instruction translation table walks caused by a miss in the L2 TLB and yielding a large page. The set of large pages is defined as all pages with a final size higher than or equal to 2MB. Translation table walks that end up taking a translation fault are not counted, as the page size would be undefined in that case. In this family, this is equal to ITLB_WALK_BLOCK event.\nNote that partial translations that cause a translation table walk are also counted.\nAlso note that this event does not count walks triggered by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK_SMALL",
+ "PublicDescription": "This event counts number of data translation table walks caused by a miss in the L2 TLB and yielding a small page. The set of small pages is defined as all pages with a final size lower than 2MB. Translation table walks that end up taking a translation fault are not counted, as the page size would be undefined in that case. If DTLB_WALK_PAGE event is implemented, then it is an alias for this event in this family. Note that partial translations that cause a translation table walk are also counted.\nAlso note that this event counts walks triggered by software preloads, but not walks triggered by hardware prefetchers, and that this event does not count walks triggered by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK_SMALL",
+ "PublicDescription": "This event counts number of instruction translation table walks caused by a miss in the L2 TLB and yielding a small page. The set of small pages is defined as all pages with a final size lower than 2MB. Translation table walks that end up taking a translation fault are not counted, as the page size would be undefined in that case. In this family, this is equal to ITLB_WALK_PAGE event.\nNote that partial translations that cause a translation table walk are also counted.\nAlso note that this event does not count walks triggered by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK_RW",
+ "PublicDescription": "This event counts number of demand data translation table walks caused by a miss in the L2 TLB and performing at least one memory access. Translation table walks are counted even if the translation ended up taking a translation fault for reasons different than EPD, E0PD and NFD.\nNote that partial translations that cause a translation table walk are also counted.\nAlso note that this event does not count walks triggered by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK_RD",
+ "PublicDescription": "This event counts number of demand instruction translation table walks caused by a miss in the L2 TLB and performing at least one memory access. Translation table walks are counted even if the translation ended up taking a translation fault for reasons different than EPD, E0PD and NFD.\nNote that partial translations that cause a translation table walk are also counted.\nAlso note that this event does not count walks triggered by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK_PRFM",
+ "PublicDescription": "This event counts number of software prefetches or preloads generated data translation table walks caused by a miss in the L2 TLB and performing at least one memory access. Translation table walks are counted even if the translation ended up taking a translation fault for reasons different than EPD, E0PD and NFD.\nNote that partial translations that cause a translation table walk are also counted.\nAlso note that this event does not count walks triggered by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK_PRFM",
+ "PublicDescription": "This event counts number of software prefetches or preloads generated instruction translation table walks caused by a miss in the L2 TLB and performing at least one memory access. Translation table walks are counted even if the translation ended up taking a translation fault for reasons different than EPD, E0PD and NFD.\nNote that partial translations that cause a translation table walk are also counted.\nAlso note that this event does not count walks triggered by TLB maintenance operations."
+ },
+ {
+ "EventCode": "0x010e",
+ "EventName": "L1D_TLB_REFILL_RD_PF",
+ "PublicDescription": "L1 Data TLB refill, Read, prefetch."
+ },
+ {
+ "EventCode": "0x010f",
+ "EventName": "L2TLB_PF_REFILL",
+ "PublicDescription": "L2 Data TLB refill, Read, prefetch.\nThis event counts MMU refills due to internal PFStream requests."
+ },
+ {
+ "EventCode": "0x0223",
+ "EventName": "L1I_TLB_REFILL_RD",
+ "PublicDescription": "L1 Instruction TLB refills due to Demand miss."
+ },
+ {
+ "EventCode": "0x0224",
+ "EventName": "L1I_TLB_REFILL_PRFM",
+ "PublicDescription": "L1 Instruction TLB refills due to Software prefetch miss."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/common/common/metrics.json b/tools/perf/pmu-events/arch/common/common/metrics.json
index 0d010b3ebc6d..cefc8bfe7830 100644
--- a/tools/perf/pmu-events/arch/common/common/metrics.json
+++ b/tools/perf/pmu-events/arch/common/common/metrics.json
@@ -46,14 +46,14 @@
},
{
"BriefDescription": "Max front or backend stalls per instruction",
- "MetricExpr": "max(stalled\\-cycles\\-frontend, stalled\\-cycles\\-backend) / instructions",
+ "MetricExpr": "(max(stalled\\-cycles\\-frontend, stalled\\-cycles\\-backend) / instructions) if (has_event(stalled\\-cycles\\-frontend) & has_event(stalled\\-cycles\\-backend)) else ((stalled\\-cycles\\-frontend / instructions) if has_event(stalled\\-cycles\\-frontend) else ((stalled\\-cycles\\-backend / instructions) if has_event(stalled\\-cycles\\-backend) else 0))",
"MetricGroup": "Default",
"MetricName": "stalled_cycles_per_instruction",
"DefaultShowEvents": "1"
},
{
"BriefDescription": "Frontend stalls per cycle",
- "MetricExpr": "stalled\\-cycles\\-frontend / cpu\\-cycles",
+ "MetricExpr": "(stalled\\-cycles\\-frontend / cpu\\-cycles) if has_event(stalled\\-cycles\\-frontend) else 0",
"MetricGroup": "Default",
"MetricName": "frontend_cycles_idle",
"MetricThreshold": "frontend_cycles_idle > 0.1",
@@ -61,7 +61,7 @@
},
{
"BriefDescription": "Backend stalls per cycle",
- "MetricExpr": "stalled\\-cycles\\-backend / cpu\\-cycles",
+ "MetricExpr": "(stalled\\-cycles\\-backend / cpu\\-cycles) if has_event(stalled\\-cycles\\-backend) else 0",
"MetricGroup": "Default",
"MetricName": "backend_cycles_idle",
"MetricThreshold": "backend_cycles_idle > 0.2",
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/cache.json b/tools/perf/pmu-events/arch/x86/alderlake/cache.json
index be15a7f83717..5d0d824f3e7e 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/cache.json
@@ -876,105 +876,97 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 128 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 128. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
"MSRValue": "0x80",
- "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 128 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 16 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 16. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
"MSRValue": "0x10",
- "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 16 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 256 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 256. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
"MSRValue": "0x100",
- "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 256 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 32 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 32. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
"MSRValue": "0x20",
- "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 32 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 4 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 4. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
"MSRValue": "0x4",
- "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 4 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 512 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 512. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
"MSRValue": "0x200",
- "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 512 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 64 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 64. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
"MSRValue": "0x40",
- "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 64 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 8 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 8. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
"MSRValue": "0x8",
- "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 8 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
@@ -1030,12 +1022,11 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of stores uops retired. Counts with or without PEBS enabled.",
+ "BriefDescription": "Counts the number of stores uops retired.",
"Counter": "0,1,2,3,4,5",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.STORE_LATENCY",
- "PublicDescription": "Counts the number of stores uops retired. Counts with or without PEBS enabled. If PEBS is enabled and a PEBS record is generated, will populate PEBS Latency and PEBS Data Source fields accordingly.",
"SampleAfterValue": "1000003",
"UMask": "0x6",
"Unit": "cpu_atom"
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/frontend.json b/tools/perf/pmu-events/arch/x86/alderlake/frontend.json
index ff3b30c2619a..11fc853f2d0b 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/frontend.json
@@ -328,6 +328,24 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "ICACHE_TAG.STALLS_INUSE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS_INUSE",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "ICACHE_TAG.STALLS_ISB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS_ISB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
"Counter": "0,1,2,3",
"CounterMask": "1",
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json b/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json
index 57a8c78cdc49..80cad3c49d20 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json
@@ -245,6 +245,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of near indirect JMP branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "UMask": "0xef",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT_CALL",
"Counter": "0,1,2,3,4,5",
"Deprecated": "1",
@@ -465,6 +474,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of mispredicted near indirect JMP branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "UMask": "0xef",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.INDIRECT_CALL",
"Counter": "0,1,2,3,4,5",
"Deprecated": "1",
@@ -573,7 +591,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.THREAD]",
"Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
@@ -582,7 +600,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of unhalted core clock cycles.",
+ "BriefDescription": "Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.THREAD_P]",
"Counter": "0,1,2,3,4,5",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.CORE_P",
@@ -651,7 +669,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency. (Fixed event)",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles at TSC frequency.",
"Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses fixed counter 2.",
@@ -689,7 +707,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.CORE]",
"Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
@@ -707,7 +725,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Counts the number of unhalted core clock cycles.",
+ "BriefDescription": "Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.CORE_P]",
"Counter": "0,1,2,3,4,5",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
@@ -875,7 +893,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Counts the total number of instructions retired. (Fixed event)",
+ "BriefDescription": "Fixed Counter: Counts the total number of instructions retired.",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. Available PDIST counters: 32",
@@ -1274,6 +1292,42 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of CLFLUSH, CLWB, and CLDEMOTE instructions retired.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xe0",
+ "EventName": "MISC_RETIRED1.CL_INST",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xff",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of LFENCE instructions retired.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xe0",
+ "EventName": "MISC_RETIRED1.LFENCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of accesses to KeyLocker cache.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xe1",
+ "EventName": "MISC_RETIRED2.KEYLOCKER_ACCESS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of misses to KeyLocker cache.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xe1",
+ "EventName": "MISC_RETIRED2.KEYLOCKER_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x11",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa2",
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/cache.json b/tools/perf/pmu-events/arch/x86/alderlaken/cache.json
index 76a841675337..1f97a4dc6fb1 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/cache.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/cache.json
@@ -246,98 +246,90 @@
"UMask": "0x82"
},
{
- "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 128 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 128. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
"MSRValue": "0x80",
- "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 128 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 16 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 16. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
"MSRValue": "0x10",
- "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 16 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 256 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 256. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
"MSRValue": "0x100",
- "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 256 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 32 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 32. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
"MSRValue": "0x20",
- "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 32 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 4 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 4. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
"MSRValue": "0x4",
- "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 4 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 512 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 512. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
"MSRValue": "0x200",
- "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 512 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 64 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 64. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
"MSRValue": "0x40",
- "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 64 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 8 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 8. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
"MSRValue": "0x8",
- "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 8 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
@@ -387,12 +379,11 @@
"UMask": "0x12"
},
{
- "BriefDescription": "Counts the number of stores uops retired. Counts with or without PEBS enabled.",
+ "BriefDescription": "Counts the number of stores uops retired.",
"Counter": "0,1,2,3,4,5",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.STORE_LATENCY",
- "PublicDescription": "Counts the number of stores uops retired. Counts with or without PEBS enabled. If PEBS is enabled and a PEBS record is generated, will populate PEBS Latency and PEBS Data Source fields accordingly.",
"SampleAfterValue": "1000003",
"UMask": "0x6"
},
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json b/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json
index d650cbd48c1f..a13851071624 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json
@@ -109,6 +109,14 @@
"UMask": "0xfb"
},
{
+ "BriefDescription": "Counts the number of near indirect JMP branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "UMask": "0xef"
+ },
+ {
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT_CALL",
"Counter": "0,1,2,3,4,5",
"Deprecated": "1",
@@ -226,6 +234,14 @@
"UMask": "0xfb"
},
{
+ "BriefDescription": "Counts the number of mispredicted near indirect JMP branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "UMask": "0xef"
+ },
+ {
"BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.INDIRECT_CALL",
"Counter": "0,1,2,3,4,5",
"Deprecated": "1",
@@ -278,7 +294,7 @@
"UMask": "0xfe"
},
{
- "BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.THREAD]",
"Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
@@ -286,7 +302,7 @@
"UMask": "0x2"
},
{
- "BriefDescription": "Counts the number of unhalted core clock cycles.",
+ "BriefDescription": "Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.THREAD_P]",
"Counter": "0,1,2,3,4,5",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.CORE_P",
@@ -303,7 +319,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency. (Fixed event)",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles at TSC frequency.",
"Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses fixed counter 2.",
@@ -320,7 +336,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.CORE]",
"Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
@@ -328,7 +344,7 @@
"UMask": "0x2"
},
{
- "BriefDescription": "Counts the number of unhalted core clock cycles.",
+ "BriefDescription": "Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.CORE_P]",
"Counter": "0,1,2,3,4,5",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
@@ -336,7 +352,7 @@
"SampleAfterValue": "2000003"
},
{
- "BriefDescription": "Counts the total number of instructions retired. (Fixed event)",
+ "BriefDescription": "Fixed Counter: Counts the total number of instructions retired.",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. Available PDIST counters: 32",
@@ -427,6 +443,38 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts the number of CLFLUSH, CLWB, and CLDEMOTE instructions retired.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xe0",
+ "EventName": "MISC_RETIRED1.CL_INST",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xff"
+ },
+ {
+ "BriefDescription": "Counts the number of LFENCE instructions retired.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xe0",
+ "EventName": "MISC_RETIRED1.LFENCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of accesses to KeyLocker cache.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xe1",
+ "EventName": "MISC_RETIRED2.KEYLOCKER_ACCESS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of misses to KeyLocker cache.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xe1",
+ "EventName": "MISC_RETIRED2.KEYLOCKER_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x11"
+ },
+ {
"BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state. For Tremont, UMWAIT and TPAUSE will only put the CPU into C0.1 activity state (not C0.2 activity state)",
"Counter": "0,1,2,3,4,5",
"EventCode": "0x75",
diff --git a/tools/perf/pmu-events/arch/x86/arrowlake/cache.json b/tools/perf/pmu-events/arch/x86/arrowlake/cache.json
index fba4a0672f6c..4c3aa1fab5a8 100644
--- a/tools/perf/pmu-events/arch/x86/arrowlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/arrowlake/cache.json
@@ -629,6 +629,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an instruction cache or TLB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x35",
+ "EventName": "MEM_BOUND_STALLS_IFETCH.ALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7f",
+ "Unit": "cpu_lowpower"
+ },
+ {
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2 cache.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x35",
@@ -732,6 +741,24 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of unhalted cycles that the core is stalled due to a demand load miss which hit in the LLC, no snoop was required, and the LLC provided data",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.LLC_HIT_NOSNOOP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which hit in the LLC, a snoop was required, the snoop misses or the snoop hits but no fwd. LLC provides the data",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.LLC_HIT_SNOOP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which missed all the local caches.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
@@ -750,6 +777,24 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which missed all the caches. DRAM, MMIO or other LOCAL memory type provides the data",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.LLC_MISS_LOCALMEM",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x50",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled to a demand load miss and the data was provided from an unknown source. If the core has access to an L3 cache, an LLC miss refers to an L3 cache miss, otherwise it is an L2 cache miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.LLC_MISS_LOCALMEM",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x50",
+ "Unit": "cpu_lowpower"
+ },
+ {
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled to a store buffer full condition",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
@@ -1082,6 +1127,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of retired load ops with an unknown source",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd4",
+ "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.LOCAL_DRAM",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of load ops retired that miss the L3 cache and hit in DRAM",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd4",
@@ -1182,6 +1236,15 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of load ops retired that hit in the L3 cache in which no snoop was required",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT_NO_SNOOP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of loads that hit in a write combining buffer (WCB), excluding the first load that caused the WCB to allocate.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
@@ -1331,7 +1394,7 @@
"Unit": "cpu_lowpower"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 1024. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1343,7 +1406,7 @@
"Unit": "cpu_lowpower"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 128.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1355,7 +1418,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 128. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1367,7 +1430,7 @@
"Unit": "cpu_lowpower"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 16.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1379,7 +1442,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 16. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1391,7 +1454,7 @@
"Unit": "cpu_lowpower"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 2048. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1403,7 +1466,7 @@
"Unit": "cpu_lowpower"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 256.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1415,7 +1478,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 256. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1427,7 +1490,7 @@
"Unit": "cpu_lowpower"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 32.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1439,7 +1502,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 32. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1451,7 +1514,7 @@
"Unit": "cpu_lowpower"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 4.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1463,7 +1526,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 4. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1475,7 +1538,7 @@
"Unit": "cpu_lowpower"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 512.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1487,7 +1550,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 512. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1499,7 +1562,7 @@
"Unit": "cpu_lowpower"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 64.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1511,7 +1574,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 64. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1523,7 +1586,7 @@
"Unit": "cpu_lowpower"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 8.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1535,7 +1598,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 8. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1707,7 +1770,7 @@
"Unit": "cpu_lowpower"
},
{
- "BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "Counts the number of stores uops retired.",
"Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1717,7 +1780,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "Counts the number of stores uops retired.",
"Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
diff --git a/tools/perf/pmu-events/arch/x86/arrowlake/frontend.json b/tools/perf/pmu-events/arch/x86/arrowlake/frontend.json
index a15de050a76c..21f00eafa98a 100644
--- a/tools/perf/pmu-events/arch/x86/arrowlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/arrowlake/frontend.json
@@ -628,6 +628,24 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache In use-full",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS_INUSE",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache ISB-full",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS_ISB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"CounterMask": "1",
diff --git a/tools/perf/pmu-events/arch/x86/arrowlake/pipeline.json b/tools/perf/pmu-events/arch/x86/arrowlake/pipeline.json
index 805616052925..fb973c75be57 100644
--- a/tools/perf/pmu-events/arch/x86/arrowlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/arrowlake/pipeline.json
@@ -822,7 +822,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles.",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.THREAD]",
"Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"SampleAfterValue": "2000003",
@@ -839,7 +839,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.THREAD]",
"Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"SampleAfterValue": "2000003",
@@ -909,7 +909,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles.",
"Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
@@ -947,7 +947,7 @@
"Unit": "cpu_lowpower"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles.",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.CORE]",
"Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
@@ -964,7 +964,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.CORE]",
"Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
@@ -1134,10 +1134,10 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of instructions retired",
+ "BriefDescription": "Fixed Counter: Counts the number of instructions retired.",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
- "PublicDescription": "Fixed Counter: Counts the number of instructions retired Available PDIST counters: 32",
+ "PublicDescription": "Fixed Counter: Counts the number of instructions retired. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1",
"Unit": "cpu_lowpower"
@@ -1608,6 +1608,14 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the total number of machine clears for any reason including, but not limited to, memory ordering, memory disambiguation, SMC, and FP assist.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.ANY",
+ "SampleAfterValue": "20003",
+ "Unit": "cpu_lowpower"
+ },
+ {
"BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine without the use of microcode.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
@@ -1814,6 +1822,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of CLFLUSH, CLWB, and CLDEMOTE instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe0",
+ "EventName": "MISC_RETIRED1.CL_INST",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xff",
+ "Unit": "cpu_lowpower"
+ },
+ {
"BriefDescription": "Counts the number of LFENCE instructions retired.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe0",
@@ -1823,6 +1840,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of LFENCE instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe0",
+ "EventName": "MISC_RETIRED1.LFENCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_lowpower"
+ },
+ {
"BriefDescription": "Counts the number of RDPMC, RDTSC, and RDTSCP instructions retired.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe0",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json
index 26568e4b77f7..b2f8947f6741 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json
@@ -514,7 +514,7 @@
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
"PublicDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM Available PDIST counters: 0",
- "SampleAfterValue": "1000003",
+ "SampleAfterValue": "100007",
"UMask": "0x2"
},
{
@@ -534,7 +534,7 @@
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
"PublicDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM Available PDIST counters: 0",
- "SampleAfterValue": "1000003",
+ "SampleAfterValue": "100007",
"UMask": "0x4"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json
index 793c486ffabe..e51f5e85ffd1 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json
@@ -272,6 +272,22 @@
"UMask": "0x4"
},
{
+ "BriefDescription": "ICACHE_TAG.STALLS_INUSE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS_INUSE",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "ICACHE_TAG.STALLS_ISB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS_ISB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
"Counter": "0,1,2,3",
"CounterMask": "1",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json
index 92cf47967f0b..3c8dcd9cff7c 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json
@@ -3501,7 +3501,7 @@
"EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "Counts when a a transaction with the opcode type RspIFwd Snoop Response was received which indicates a remote caching agent forwarded the data and the requesting agent is able to acquire the data in E (Exclusive) or M (modified) states. This is commonly returned with RFO (the Read for Ownership issued before a write) transactions. The snoop could have either been to a cacheline in the M,E,F (Modified, Exclusive or Forward) states.",
+ "PublicDescription": "Counts when a transaction with the opcode type RspIFwd Snoop Response was received which indicates a remote caching agent forwarded the data and the requesting agent is able to acquire the data in E (Exclusive) or M (modified) states. This is commonly returned with RFO (the Read for Ownership issued before a write) transactions. The snoop could have either been to a cacheline in the M,E,F (Modified, Exclusive or Forward) states.",
"UMask": "0x4",
"Unit": "CHA"
},
@@ -3523,7 +3523,7 @@
"EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "Counts when a a transaction with the opcode type RspSFwd Snoop Response was received which indicates a remote caching agent forwarded the data but held on to its current copy. This is common for data and code reads that hit in a remote socket in E (Exclusive) or F (Forward) state.",
+ "PublicDescription": "Counts when a transaction with the opcode type RspSFwd Snoop Response was received which indicates a remote caching agent forwarded the data but held on to its current copy. This is common for data and code reads that hit in a remote socket in E (Exclusive) or F (Forward) state.",
"UMask": "0x8",
"Unit": "CHA"
},
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json
index d4cf2199d46b..ddb0f65307f4 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json
@@ -223,6 +223,7 @@
"Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
+ "PortMask": "0xff",
"UMask": "0xff",
"Unit": "IIO"
},
@@ -234,7 +235,7 @@
"Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x0000",
+ "PortMask": "0x01",
"PublicDescription": "x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
"UMask": "0x1",
"Unit": "IIO"
@@ -247,7 +248,7 @@
"Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x0000",
+ "PortMask": "0x02",
"PublicDescription": "x4 card is plugged in to slot 1",
"UMask": "0x2",
"Unit": "IIO"
@@ -260,7 +261,7 @@
"Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x0000",
+ "PortMask": "0x04",
"PublicDescription": "x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
"UMask": "0x4",
"Unit": "IIO"
@@ -273,7 +274,7 @@
"Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x0000",
+ "PortMask": "0x08",
"PublicDescription": "x4 card is plugged in to slot 3",
"UMask": "0x8",
"Unit": "IIO"
@@ -286,7 +287,7 @@
"Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x0000",
+ "PortMask": "0x10",
"PublicDescription": "x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
"UMask": "0x10",
"Unit": "IIO"
@@ -299,7 +300,7 @@
"Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x0000",
+ "PortMask": "0x20",
"PublicDescription": "x4 card is plugged in to slot 1",
"UMask": "0x20",
"Unit": "IIO"
@@ -312,7 +313,7 @@
"Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x0000",
+ "PortMask": "0x40",
"PublicDescription": "x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
"UMask": "0x40",
"Unit": "IIO"
@@ -325,7 +326,7 @@
"Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x0000",
+ "PortMask": "0x80",
"PublicDescription": "x4 card is plugged in to slot 3",
"UMask": "0x80",
"Unit": "IIO"
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/cache.json b/tools/perf/pmu-events/arch/x86/grandridge/cache.json
index 9abddb06a837..0aa921ba89b4 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/cache.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/cache.json
@@ -285,8 +285,8 @@
"UMask": "0x82"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 1024. Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_1024",
@@ -296,8 +296,8 @@
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 128. Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
@@ -307,8 +307,8 @@
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 16. Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
@@ -318,8 +318,8 @@
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 2048. Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_2048",
@@ -329,8 +329,8 @@
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 256. Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
@@ -340,8 +340,8 @@
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 32. Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
@@ -351,8 +351,8 @@
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 4. Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
@@ -362,8 +362,8 @@
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 512. Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
@@ -373,8 +373,8 @@
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 64. Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
@@ -384,8 +384,8 @@
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 8. Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
@@ -458,7 +458,7 @@
"UMask": "0x12"
},
{
- "BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "Counts the number of stores uops retired.",
"Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/pipeline.json b/tools/perf/pmu-events/arch/x86/grandridge/pipeline.json
index f56d8d816e53..20986b987e18 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/pipeline.json
@@ -178,7 +178,7 @@
"UMask": "0xf7"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.THREAD]",
"Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"SampleAfterValue": "2000003",
@@ -192,7 +192,7 @@
"SampleAfterValue": "2000003"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles.",
"Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
@@ -208,7 +208,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.CORE]",
"Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
@@ -222,10 +222,10 @@
"SampleAfterValue": "2000003"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of instructions retired",
+ "BriefDescription": "Fixed Counter: Counts the number of instructions retired.",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
- "PublicDescription": "Fixed Counter: Counts the number of instructions retired Available PDIST counters: 32",
+ "PublicDescription": "Fixed Counter: Counts the number of instructions retired. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -302,6 +302,38 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts the number of CLFLUSH, CLWB, and CLDEMOTE instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe0",
+ "EventName": "MISC_RETIRED1.CL_INST",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xff"
+ },
+ {
+ "BriefDescription": "Counts the number of LFENCE instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe0",
+ "EventName": "MISC_RETIRED1.LFENCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of accesses to KeyLocker cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe1",
+ "EventName": "MISC_RETIRED2.KEYLOCKER_ACCESS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of misses to KeyLocker cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe1",
+ "EventName": "MISC_RETIRED2.KEYLOCKER_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x11"
+ },
+ {
"BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x75",
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/frontend.json b/tools/perf/pmu-events/arch/x86/graniterapids/frontend.json
index d580d305c926..1fdeaebb739f 100644
--- a/tools/perf/pmu-events/arch/x86/graniterapids/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/frontend.json
@@ -326,6 +326,22 @@
"UMask": "0x4"
},
{
+ "BriefDescription": "ICACHE_TAG.STALLS_INUSE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS_INUSE",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "ICACHE_TAG.STALLS_ISB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS_ISB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
"Counter": "0,1,2,3",
"CounterMask": "1",
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/gnr-metrics.json b/tools/perf/pmu-events/arch/x86/graniterapids/gnr-metrics.json
index cc3c834ca286..299631fb8d53 100644
--- a/tools/perf/pmu-events/arch/x86/graniterapids/gnr-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/gnr-metrics.json
@@ -144,6 +144,12 @@
"ScaleUnit": "100%"
},
{
+ "BriefDescription": "The number of times per second that ownership of a cacheline was stolen from the integrated IO controller before it was able to write back the modified line",
+ "MetricExpr": "(UNC_I_MISC1.LOST_FWD + UNC_I_MISC1.SEC_RCVD_INVLD) / duration_time",
+ "MetricName": "io_lost_fwd",
+ "ScaleUnit": "1per_sec"
+ },
+ {
"BriefDescription": "Message Signaled Interrupts (MSI) per second sent by the integrated I/O traffic controller (IIO) to System Configuration Controller (Ubox)",
"MetricExpr": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX_POSTED / duration_time",
"MetricName": "io_msi",
@@ -295,6 +301,27 @@
"ScaleUnit": "1MB/s"
},
{
+ "BriefDescription": "All reads to the local sub-numa cluster cache as a percentage of total memory read accesses",
+ "MetricExpr": "(L2_LINES_IN.ALL - (OCR.READS_TO_CORE.SNC_CACHE.HITM + OCR.READS_TO_CORE.SNC_CACHE.HIT_WITH_FWD + OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_FWD + OCR.READS_TO_CORE.REMOTE_MEMORY + OCR.READS_TO_CORE.L3_MISS_LOCAL)) / L2_LINES_IN.ALL",
+ "MetricName": "numa_percent_all_reads_to_local_cluster_cache",
+ "PublicDescription": "All reads to the local sub-numa cluster cache as a percentage of total memory read accesses. Includes demand and prefetch requests for data reads, code reads, read for ownerships (RFO), does not include LLC prefetches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "All reads to the local sub-numa cluster memory as a percentage of total memory read accesses",
+ "MetricExpr": "OCR.READS_TO_CORE.L3_MISS_LOCAL / L2_LINES_IN.ALL",
+ "MetricName": "numa_percent_all_reads_to_local_cluster_memory",
+ "PublicDescription": "All reads to the local sub-numa cluster memory as a percentage of total memory read accesses. Includes demand and prefetch requests for data reads, code reads, read for ownerships (RFO), does not include LLC prefetches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "All reads to a remote sub-numa cluster cache as a percentage of total memory read accesses",
+ "MetricExpr": "(OCR.READS_TO_CORE.SNC_CACHE.HIT_WITH_FWD + OCR.READS_TO_CORE.SNC_CACHE.HITM) / L2_LINES_IN.ALL",
+ "MetricName": "numa_percent_all_reads_to_remote_cluster_cache",
+ "PublicDescription": "All reads to a remote sub-numa cluster cache as a percentage of total memory read accesses. Includes demand and prefetch requests for data reads, code reads, read for ownerships (RFO), does not include LLC prefetches",
+ "ScaleUnit": "100%"
+ },
+ {
"BriefDescription": "Memory read that miss the last level cache (LLC) addressed to local DRAM as a percentage of total memory read accesses, does not include LLC prefetches",
"MetricExpr": "(UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL) / (UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE)",
"MetricName": "numa_reads_addressed_to_local_dram",
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/cache.json b/tools/perf/pmu-events/arch/x86/lunarlake/cache.json
index 3d2616be8ec1..2db3e8a51fbd 100644
--- a/tools/perf/pmu-events/arch/x86/lunarlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/cache.json
@@ -551,6 +551,24 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an icache or itlb miss which missed all the caches.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x35",
+ "EventName": "MEM_BOUND_STALLS_IFETCH.LLC_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x78",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an icache or itlb miss which missed all the caches. Local DRAM, MMIO or other local memory type provides the data.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x35",
+ "EventName": "MEM_BOUND_STALLS_IFETCH.LLC_MISS_LOCALMEM",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x50",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an L1 demand load miss.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
@@ -1088,7 +1106,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 128.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1100,7 +1118,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 16.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1112,7 +1130,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 256.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1124,7 +1142,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 32.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1136,7 +1154,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 4.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1148,7 +1166,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 512.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1160,7 +1178,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 64.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1172,7 +1190,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 8.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1274,7 +1292,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "Counts the number of stores uops retired.",
"Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/frontend.json b/tools/perf/pmu-events/arch/x86/lunarlake/frontend.json
index b21d602e9f1a..798eebf77436 100644
--- a/tools/perf/pmu-events/arch/x86/lunarlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/frontend.json
@@ -425,6 +425,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to Instruction L1 cache miss, that missed in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "FRONTEND_RETIRED_SOURCE.ICACHE_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xe",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to ITLB miss that hit in the second level TLB.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
@@ -501,6 +510,24 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache In use-full",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS_INUSE",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache ISB-full",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS_ISB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"CounterMask": "1",
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json b/tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json
index 97797f7b072e..d98723b3cd78 100644
--- a/tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json
@@ -634,7 +634,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles.",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.THREAD]",
"Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"SampleAfterValue": "2000003",
@@ -725,7 +725,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles.",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.CORE]",
"Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
@@ -1530,8 +1530,9 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of accesses to KeyLocker cache.",
+ "BriefDescription": "This event is deprecated.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Deprecated": "1",
"EventCode": "0xe1",
"EventName": "MISC_RETIRED2.KEYLOCKER_ACCESS",
"SampleAfterValue": "1000003",
@@ -1539,8 +1540,9 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of misses to KeyLocker cache.",
+ "BriefDescription": "This event is deprecated.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Deprecated": "1",
"EventCode": "0xe1",
"EventName": "MISC_RETIRED2.KEYLOCKER_MISS",
"SampleAfterValue": "1000003",
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index 149bbe7abaf5..8a9e1735e21e 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -1,7 +1,7 @@
Family-model,Version,Filename,EventType
-GenuineIntel-6-(97|9A|B7|BA|BF),v1.35,alderlake,core
-GenuineIntel-6-BE,v1.35,alderlaken,core
-GenuineIntel-6-C[56],v1.14,arrowlake,core
+GenuineIntel-6-(97|9A|B7|BA|BF),v1.37,alderlake,core
+GenuineIntel-6-BE,v1.37,alderlaken,core
+GenuineIntel-6-C[56],v1.16,arrowlake,core
GenuineIntel-6-(1C|26|27|35|36),v5,bonnell,core
GenuineIntel-6-(3D|47),v30,broadwell,core
GenuineIntel-6-56,v12,broadwellde,core
@@ -9,11 +9,11 @@ GenuineIntel-6-4F,v23,broadwellx,core
GenuineIntel-6-55-[56789ABCDEF],v1.25,cascadelakex,core
GenuineIntel-6-DD,v1.00,clearwaterforest,core
GenuineIntel-6-9[6C],v1.05,elkhartlake,core
-GenuineIntel-6-CF,v1.20,emeraldrapids,core
+GenuineIntel-6-CF,v1.21,emeraldrapids,core
GenuineIntel-6-5[CF],v13,goldmont,core
GenuineIntel-6-7A,v1.01,goldmontplus,core
-GenuineIntel-6-B6,v1.10,grandridge,core
-GenuineIntel-6-A[DE],v1.16,graniterapids,core
+GenuineIntel-6-B6,v1.11,grandridge,core
+GenuineIntel-6-A[DE],v1.17,graniterapids,core
GenuineIntel-6-(3C|45|46),v36,haswell,core
GenuineIntel-6-3F,v29,haswellx,core
GenuineIntel-6-7[DE],v1.24,icelake,core
@@ -22,15 +22,15 @@ GenuineIntel-6-3A,v24,ivybridge,core
GenuineIntel-6-3E,v24,ivytown,core
GenuineIntel-6-2D,v24,jaketown,core
GenuineIntel-6-(57|85),v16,knightslanding,core
-GenuineIntel-6-BD,v1.19,lunarlake,core
-GenuineIntel-6-(AA|AC|B5),v1.18,meteorlake,core
+GenuineIntel-6-BD,v1.21,lunarlake,core
+GenuineIntel-6-(AA|AC|B5),v1.20,meteorlake,core
GenuineIntel-6-1[AEF],v4,nehalemep,core
GenuineIntel-6-2E,v4,nehalemex,core
-GenuineIntel-6-CC,v1.02,pantherlake,core
+GenuineIntel-6-CC,v1.04,pantherlake,core
GenuineIntel-6-A7,v1.04,rocketlake,core
GenuineIntel-6-2A,v19,sandybridge,core
-GenuineIntel-6-8F,v1.35,sapphirerapids,core
-GenuineIntel-6-AF,v1.13,sierraforest,core
+GenuineIntel-6-8F,v1.36,sapphirerapids,core
+GenuineIntel-6-AF,v1.15,sierraforest,core
GenuineIntel-6-(37|4A|4C|4D|5A),v15,silvermont,core
GenuineIntel-6-(4E|5E|8E|9E|A5|A6),v59,skylake,core
GenuineIntel-6-55-[01234],v1.37,skylakex,core
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/cache.json b/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
index d3fc04b2ffbd..4c1220c19456 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
@@ -514,6 +514,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an ICACHE or ITLB miss which hit in the LLC, no snoop was required. LLC provides the data. If the core has access to an L3 cache, an LLC hit refers to an L3 cache hit, otherwise it counts zeros.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x35",
+ "EventName": "MEM_BOUND_STALLS_IFETCH.LLC_HIT_NOSNOOP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an ICACHE or ITLB miss which missed all the caches. If the core has access to an L3 cache, an LLC miss refers to an L3 cache miss, otherwise it is an L2 cache miss.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x35",
@@ -523,6 +532,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an ICACHE or ITLB miss which missed all the caches. DRAM, MMIO or other LOCAL memory type provides the data. If the core has access to an L3 cache, an LLC miss refers to an L3 cache miss, otherwise it is an L2 cache miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x35",
+ "EventName": "MEM_BOUND_STALLS_IFETCH.LLC_MISS_LOCALMEM",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x50",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an L1 demand load miss.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
@@ -560,6 +578,24 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which hit in the LLC, no snoop was required. LLC provides the data. If the core has access to an L3 cache, an LLC hit refers to an L3 cache hit, otherwise it counts zeros.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.LLC_HIT_NOSNOOP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which hit in the LLC, a snoop was required, the snoop misses or the snoop hits but NO_FWD. LLC provides the data. If the core has access to an L3 cache, an LLC hit refers to an L3 cache hit, otherwise it counts zeros.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.LLC_HIT_SNOOP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which missed all the local caches. If the core has access to an L3 cache, an LLC miss refers to an L3 cache miss, otherwise it is an L2 cache miss.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
@@ -569,6 +605,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled to a demand load miss and the data was provided from an unknown source. If the core has access to an L3 cache, an LLC miss refers to an L3 cache miss, otherwise it is an L2 cache miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.LLC_MISS_LOCALMEM",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x50",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled to a store buffer full condition",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
@@ -969,7 +1014,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 1024. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -981,7 +1026,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 128. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -993,7 +1038,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 16. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1005,7 +1050,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 2048. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1017,7 +1062,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 256. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1029,7 +1074,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 32. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1041,7 +1086,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 4. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1053,7 +1098,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 512. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1065,7 +1110,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 64. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1077,7 +1122,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 8. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -1159,7 +1204,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "Counts the number of stores uops retired.",
"Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json b/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
index 6484834b1127..dcf8c8e720f3 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
@@ -431,6 +431,24 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "ICACHE_TAG.STALLS_INUSE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS_INUSE",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "ICACHE_TAG.STALLS_ISB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS_ISB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
"Counter": "0,1,2,3",
"CounterMask": "1",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json b/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
index bfdaabe9377d..7662846745bd 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
@@ -517,7 +517,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.THREAD]",
"Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"SampleAfterValue": "2000003",
@@ -583,7 +583,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles.",
"Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
@@ -620,7 +620,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.CORE]",
"Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
@@ -804,10 +804,10 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of instructions retired",
+ "BriefDescription": "Fixed Counter: Counts the number of instructions retired.",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
- "PublicDescription": "Fixed Counter: Counts the number of instructions retired Available PDIST counters: 32",
+ "PublicDescription": "Fixed Counter: Counts the number of instructions retired. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1208,6 +1208,42 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of CLFLUSH, CLWB, and CLDEMOTE instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe0",
+ "EventName": "MISC_RETIRED1.CL_INST",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xff",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of LFENCE instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe0",
+ "EventName": "MISC_RETIRED1.LFENCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of accesses to KeyLocker cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe1",
+ "EventName": "MISC_RETIRED2.KEYLOCKER_ACCESS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of misses to KeyLocker cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe1",
+ "EventName": "MISC_RETIRED2.KEYLOCKER_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x11",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa2",
diff --git a/tools/perf/pmu-events/arch/x86/pantherlake/cache.json b/tools/perf/pmu-events/arch/x86/pantherlake/cache.json
index 91f5ab908926..e5323093eec0 100644
--- a/tools/perf/pmu-events/arch/x86/pantherlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/pantherlake/cache.json
@@ -150,6 +150,60 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of L2 cache accesses from front door Demand Code Read requests. Does not include rejects or recycles, per core event.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.DEMAND_CODE_RD",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 cache accesses from front door Demand Code Read requests that resulted in a Miss. Does not include rejects or recycles, per core event.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.DEMAND_CODE_RD_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x44",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 cache accesses from front door Demand Data Read requests. Does not include rejects or recycles, per core event.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.DEMAND_DATA_RD",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 cache accesses from front door Demand Data Read requests that resulted in a Miss. Does not include rejects or recycles, per core event.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.DEMAND_DATA_RD_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x41",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 cache accesses from front door Demand RFO requests. Does not include rejects or recycles, per core event.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.DEMAND_RFO",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 cache accesses from front door Demand RFO requests that resulted in a Miss. Does not include rejects or recycles, per core event.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.DEMAND_RFO_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x42",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of L2 cache accesses from front door requests that resulted in a Hit. Does not include rejects or recycles, per core event.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x24",
@@ -159,6 +213,24 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of L2 cache accesses from front door Hardware Prefetch requests. Does not include rejects or recycles, per core event.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.HWPF",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 cache accesses from front door requests that resulted in a Miss. Does not include rejects or recycles, per core event.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x17f",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Read requests with true-miss in L2 cache [This event is alias to L2_RQSTS.MISS]",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x24",
@@ -366,6 +438,24 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which hit in the LLC, no snoop was required. LLC provided data.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.LLC_HIT_NOSNOOP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which hit in the LLC, a snoop was required, the snoop misses or the snoop hits but no fwd. LLC provides the data.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.LLC_HIT_SNOOP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which missed all the local caches.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
@@ -717,6 +807,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the total number of load ops retired that miss the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.ALL",
+ "PublicDescription": "Counts the total number of load ops retired that miss the L3 cache. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xff",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of load ops retired that miss the L3 cache and hit in DRAM",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd3",
@@ -747,6 +847,26 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of load ops retired that hit in the L3 cache in which a snoop was required and no data was forwarded.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd4",
+ "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.L3_HIT_SNOOP_NO_FWD",
+ "PublicDescription": "Counts the number of load ops retired that hit in the L3 cache in which a snoop was required and no data was forwarded. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired that hit in the L3 cache in which a snoop was required and non-modified data was forwarded.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd4",
+ "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.L3_HIT_SNOOP_WITH_FWD",
+ "PublicDescription": "Counts the number of load ops retired that hit in the L3 cache in which a snoop was required and non-modified data was forwarded. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of load ops retired that hit the L1 data cache.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
@@ -797,6 +917,26 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of load ops retired that hit in the L3 cache in which no snoop was required.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT_NO_SNOOP",
+ "PublicDescription": "Counts the number of load ops retired that hit in the L3 cache in which no snoop was required. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired that hit in the L3 cache in which a snoop was required and it hit and forwarded data, it hit and did not forward data, or it hit and the forwarded data was modified.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT_SNOOP_HIT",
+ "PublicDescription": "Counts the number of load ops retired that hit in the L3 cache in which a snoop was required and it hit and forwarded data, it hit and did not forward data, or it hit and the forwarded data was modified. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of cycles that uops are blocked for any of the following reasons: load buffer, store buffer or RSV full.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x04",
@@ -880,13 +1020,14 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 1024.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_1024",
"MSRIndex": "0x3F6",
"MSRValue": "0x400",
- "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 1024. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
@@ -894,6 +1035,7 @@
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
@@ -906,6 +1048,7 @@
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
@@ -916,13 +1059,14 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 2048.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_2048",
"MSRIndex": "0x3F6",
"MSRValue": "0x800",
- "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 2048. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
@@ -930,6 +1074,7 @@
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
@@ -942,6 +1087,7 @@
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
@@ -954,6 +1100,7 @@
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
@@ -966,6 +1113,7 @@
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
@@ -978,6 +1126,7 @@
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
@@ -990,6 +1139,7 @@
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
@@ -1072,6 +1222,7 @@
{
"BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES",
"Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.STORE_LATENCY",
"PublicDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES Available PDIST counters: 0,1",
diff --git a/tools/perf/pmu-events/arch/x86/pantherlake/floating-point.json b/tools/perf/pmu-events/arch/x86/pantherlake/floating-point.json
index e306a45b22ee..77f6c9028d93 100644
--- a/tools/perf/pmu-events/arch/x86/pantherlake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/pantherlake/floating-point.json
@@ -1,5 +1,15 @@
[
{
+ "BriefDescription": "Counts the number of cycles when any of the floating point dividers are active.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xcd",
+ "EventName": "ARITH.FPDIV_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Cycles when floating-point divide unit is busy executing divide or square root operations.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"CounterMask": "1",
@@ -11,6 +21,24 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of floating point dividers per cycle in the loop stage.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcd",
+ "EventName": "ARITH.FPDIV_OCCUPANCY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of floating point divider uops executed per cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcd",
+ "EventName": "ARITH.FPDIV_UOPS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts all microcode FP assists.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc1",
diff --git a/tools/perf/pmu-events/arch/x86/pantherlake/frontend.json b/tools/perf/pmu-events/arch/x86/pantherlake/frontend.json
index d36faa683d3f..5e69b81742f5 100644
--- a/tools/perf/pmu-events/arch/x86/pantherlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/pantherlake/frontend.json
@@ -423,6 +423,24 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache In use-full",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS_INUSE",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache ISB-full",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS_ISB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"CounterMask": "1",
@@ -561,5 +579,23 @@
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the micro-sequencer is busy.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "MS_DECODED.MS_BUSY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of times entered into a ucode flow in the FEC. Includes inserted flows due to front-end detected faults or assists.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "MS_DECODED.MS_ENTRY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/pantherlake/memory.json b/tools/perf/pmu-events/arch/x86/pantherlake/memory.json
index 3d31e620383d..4248cc101391 100644
--- a/tools/perf/pmu-events/arch/x86/pantherlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/pantherlake/memory.json
@@ -9,6 +9,24 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a DL1 miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.L1_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DL1 miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.L1_MISS_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to request buffers full or lock in progress.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
@@ -18,6 +36,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to request buffers full or lock in progress.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.WCB_FULL_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x82",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of machine clears due to memory ordering caused by a snoop from an external agent. Does not count internally generated machine clears such as those due to memory disambiguation.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
diff --git a/tools/perf/pmu-events/arch/x86/pantherlake/other.json b/tools/perf/pmu-events/arch/x86/pantherlake/other.json
index d49651d4f112..915c52f5abd1 100644
--- a/tools/perf/pmu-events/arch/x86/pantherlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/pantherlake/other.json
@@ -31,6 +31,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the total number of BTCLEARS.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe8",
+ "EventName": "PREDICTION.BTCLEAR",
+ "PublicDescription": "Counts the total number of BTCLEARS which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Cycles the uncore cannot take further requests",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"CounterMask": "1",
diff --git a/tools/perf/pmu-events/arch/x86/pantherlake/pipeline.json b/tools/perf/pmu-events/arch/x86/pantherlake/pipeline.json
index fb87d30c403d..86009237df2f 100644
--- a/tools/perf/pmu-events/arch/x86/pantherlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/pantherlake/pipeline.json
@@ -1,5 +1,15 @@
[
{
+ "BriefDescription": "Counts the number of cycles when any of the floating point or integer dividers are active.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xcd",
+ "EventName": "ARITH.DIV_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"CounterMask": "1",
@@ -11,6 +21,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of cycles when any of the integer dividers are active.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xcd",
+ "EventName": "ARITH.IDIV_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Cycles when integer divide unit is busy executing divide or square root operations.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"CounterMask": "1",
@@ -22,6 +42,24 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts number of active integer dividers per cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcd",
+ "EventName": "ARITH.IDIV_OCCUPANCY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of integer divider uops executed per cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcd",
+ "EventName": "ARITH.IDIV_UOPS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc1",
@@ -59,6 +97,38 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_INST_RETIRED.NEAR_INDIRECT]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Deprecated": "1",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_NEAR_IND",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_INST_RETIRED.NEAR_INDIRECT] Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x50",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_INST_RETIRED.NEAR_INDIRECT_OR_RETURN]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Deprecated": "1",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_NEAR_IND_OR_RET",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_INST_RETIRED.NEAR_INDIRECT_OR_RETURN] Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x58",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND",
+ "PublicDescription": "Counts the number of conditional branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Conditional branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
@@ -89,6 +159,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN",
+ "PublicDescription": "Counts the number of taken conditional branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Taken conditional branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
@@ -99,6 +179,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of taken backward conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN_BWD",
+ "PublicDescription": "Counts the number of taken backward conditional branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Taken backward conditional branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
@@ -109,6 +199,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of taken forward conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN_FWD",
+ "PublicDescription": "Counts the number of taken forward conditional branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Taken forward conditional branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
@@ -179,6 +279,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired. [This event is alias to BR_INST_RETIRED.ALL_NEAR_IND]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_INDIRECT",
+ "PublicDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired. [This event is alias to BR_INST_RETIRED.ALL_NEAR_IND] Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x50",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Indirect near branch instructions retired (excluding returns) [This event is alias to BR_INST_RETIRED.INDIRECT]",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
@@ -209,6 +319,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of near indirect JMP, near indirect CALL, and RET branch instructions retired. [This event is alias to BR_INST_RETIRED.ALL_NEAR_IND_OR_RET]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_INDIRECT_OR_RETURN",
+ "PublicDescription": "Counts the number of near indirect JMP, near indirect CALL, and RET branch instructions retired. [This event is alias to BR_INST_RETIRED.ALL_NEAR_IND_OR_RET] Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x58",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "This event is deprecated. [This event is alias to BR_INST_RETIRED.NEAR_INDIRECT_CALL]",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"Deprecated": "1",
@@ -283,7 +403,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Taken branch instructions retired.",
+ "BriefDescription": "Near Taken branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
@@ -755,7 +875,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles.",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.THREAD]",
"Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"SampleAfterValue": "2000003",
@@ -1550,6 +1670,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of CLFLUSH, CLWB, and CLDEMOTE instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe0",
+ "EventName": "MISC_RETIRED1.CL_INST",
+ "PublicDescription": "Counts the number of CLFLUSH, CLWB, and CLDEMOTE instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xff",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of LFENCE instructions retired.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe0",
@@ -1621,6 +1751,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number issue slots not consumed due to a color request for an FCW or MXCSR control register when all 4 colors (copies) are already in use.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.COLOR_STALLS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of issue slots where no uop could issue due to an IQ scoreboard that stalls allocation until a specified older uop retires or (in the case of jump scoreboard) executes. Commonly executed instructions with IQ scoreboards include LFENCE and MFENCE.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x75",
@@ -1733,6 +1872,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to Branch Mispredict",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
@@ -1796,6 +1944,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to ROB full",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.REORDER_BUFFER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to iq/jeu scoreboards or ms scb",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
@@ -2077,6 +2234,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of uops issued by the front end every cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops issued by the front end every cycle. When 4-uops are requested and only 2-uops are delivered, the event counts 2. Uops_issued correlates to the number of ROB entries. If uop takes 2 ROB slots it counts as 2 uops_issued.",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Uops that RAT issues to RS",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xae",
@@ -2108,6 +2274,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of uops retired that are the last uop of a macro-instruction.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.EOM",
+ "PublicDescription": "Counts the number of uops retired that are the last uop of a macro-instruction. EOM uops indicate the 'end of a macro-instruction' and play a crucial role in the processor's control flow and recovery mechanisms.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Retired uops except the last uop of each instruction.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc2",
@@ -2128,6 +2304,16 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of uops retired that originated from a loop stream detector.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.LSD",
+ "PublicDescription": "Counts the number of uops retired that originated from a loop stream detector. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
@@ -2162,6 +2348,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "UOPS_RETIRED.NANO_CODE",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.NANO_CODE",
+ "PublicDescription": "UOPS_RETIRED.NANO_CODE Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc2",
diff --git a/tools/perf/pmu-events/arch/x86/pantherlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/pantherlake/virtual-memory.json
index 8d56c16b2a39..8f3dd36707dc 100644
--- a/tools/perf/pmu-events/arch/x86/pantherlake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/pantherlake/virtual-memory.json
@@ -79,6 +79,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 4K page.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x12",
@@ -179,6 +189,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 4K page.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x13",
@@ -268,6 +288,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 4K page.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x11",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json
index c66324d41a89..373b26c84448 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json
@@ -514,7 +514,7 @@
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
"PublicDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM Available PDIST counters: 0",
- "SampleAfterValue": "1000003",
+ "SampleAfterValue": "100007",
"UMask": "0x2"
},
{
@@ -534,7 +534,7 @@
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
"PublicDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM Available PDIST counters: 0",
- "SampleAfterValue": "1000003",
+ "SampleAfterValue": "100007",
"UMask": "0x4"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json
index 793c486ffabe..e51f5e85ffd1 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json
@@ -272,6 +272,22 @@
"UMask": "0x4"
},
{
+ "BriefDescription": "ICACHE_TAG.STALLS_INUSE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS_INUSE",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "ICACHE_TAG.STALLS_ISB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS_ISB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
"Counter": "0,1,2,3",
"CounterMask": "1",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json
index 1bdda3c3ccbf..59f6fd2c7a8f 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json
@@ -3501,7 +3501,7 @@
"EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "Counts when a a transaction with the opcode type RspIFwd Snoop Response was received which indicates a remote caching agent forwarded the data and the requesting agent is able to acquire the data in E (Exclusive) or M (modified) states. This is commonly returned with RFO (the Read for Ownership issued before a write) transactions. The snoop could have either been to a cacheline in the M,E,F (Modified, Exclusive or Forward) states.",
+ "PublicDescription": "Counts when a transaction with the opcode type RspIFwd Snoop Response was received which indicates a remote caching agent forwarded the data and the requesting agent is able to acquire the data in E (Exclusive) or M (modified) states. This is commonly returned with RFO (the Read for Ownership issued before a write) transactions. The snoop could have either been to a cacheline in the M,E,F (Modified, Exclusive or Forward) states.",
"UMask": "0x4",
"Unit": "CHA"
},
@@ -3523,7 +3523,7 @@
"EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "Counts when a a transaction with the opcode type RspSFwd Snoop Response was received which indicates a remote caching agent forwarded the data but held on to its current copy. This is common for data and code reads that hit in a remote socket in E (Exclusive) or F (Forward) state.",
+ "PublicDescription": "Counts when a transaction with the opcode type RspSFwd Snoop Response was received which indicates a remote caching agent forwarded the data but held on to its current copy. This is common for data and code reads that hit in a remote socket in E (Exclusive) or F (Forward) state.",
"UMask": "0x8",
"Unit": "CHA"
},
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json
index dac7e6c50f31..45675a1099e2 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json
@@ -303,6 +303,7 @@
"Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
+ "PortMask": "0xff",
"UMask": "0xff",
"Unit": "IIO"
},
@@ -314,7 +315,7 @@
"Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x0000",
+ "PortMask": "0x01",
"PublicDescription": "x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
"UMask": "0x1",
"Unit": "IIO"
@@ -327,7 +328,7 @@
"Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x0000",
+ "PortMask": "0x02",
"PublicDescription": "x4 card is plugged in to slot 1",
"UMask": "0x2",
"Unit": "IIO"
@@ -340,7 +341,7 @@
"Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x0000",
+ "PortMask": "0x04",
"PublicDescription": "x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
"UMask": "0x4",
"Unit": "IIO"
@@ -353,7 +354,7 @@
"Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x0000",
+ "PortMask": "0x08",
"PublicDescription": "x4 card is plugged in to slot 3",
"UMask": "0x8",
"Unit": "IIO"
@@ -366,7 +367,7 @@
"Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x0000",
+ "PortMask": "0x10",
"PublicDescription": "x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
"UMask": "0x10",
"Unit": "IIO"
@@ -379,7 +380,7 @@
"Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x0000",
+ "PortMask": "0x20",
"PublicDescription": "x4 card is plugged in to slot 1",
"UMask": "0x20",
"Unit": "IIO"
@@ -392,7 +393,7 @@
"Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x0000",
+ "PortMask": "0x40",
"PublicDescription": "x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
"UMask": "0x40",
"Unit": "IIO"
@@ -405,7 +406,7 @@
"Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x0000",
+ "PortMask": "0x80",
"PublicDescription": "x4 card is plugged in to slot 3",
"UMask": "0x80",
"Unit": "IIO"
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/cache.json b/tools/perf/pmu-events/arch/x86/sierraforest/cache.json
index de0e7661a52d..168f43557a0e 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/cache.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/cache.json
@@ -326,7 +326,7 @@
"UMask": "0x82"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 1024. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -337,7 +337,7 @@
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 128. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -348,7 +348,7 @@
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 16. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -359,7 +359,7 @@
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 2048. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -370,7 +370,7 @@
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 256. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -381,7 +381,7 @@
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 32. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -392,7 +392,7 @@
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 4. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -403,7 +403,7 @@
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 512. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -414,7 +414,7 @@
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 64. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -425,7 +425,7 @@
"UMask": "0x5"
},
{
- "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold of 8. Only counts with PEBS enabled.",
"Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -499,7 +499,7 @@
"UMask": "0x12"
},
{
- "BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "Counts the number of stores uops retired.",
"Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json b/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json
index 70af13143024..cf67ff6135e0 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json
@@ -186,7 +186,7 @@
"UMask": "0xf7"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.THREAD]",
"Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"SampleAfterValue": "2000003",
@@ -200,7 +200,7 @@
"SampleAfterValue": "2000003"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles.",
"Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
@@ -216,7 +216,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.CORE]",
"Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
@@ -230,10 +230,10 @@
"SampleAfterValue": "2000003"
},
{
- "BriefDescription": "Fixed Counter: Counts the number of instructions retired",
+ "BriefDescription": "Fixed Counter: Counts the number of instructions retired.",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
- "PublicDescription": "Fixed Counter: Counts the number of instructions retired Available PDIST counters: 32",
+ "PublicDescription": "Fixed Counter: Counts the number of instructions retired. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -310,6 +310,38 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts the number of CLFLUSH, CLWB, and CLDEMOTE instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe0",
+ "EventName": "MISC_RETIRED1.CL_INST",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xff"
+ },
+ {
+ "BriefDescription": "Counts the number of LFENCE instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe0",
+ "EventName": "MISC_RETIRED1.LFENCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of accesses to KeyLocker cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe1",
+ "EventName": "MISC_RETIRED2.KEYLOCKER_ACCESS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of misses to KeyLocker cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe1",
+ "EventName": "MISC_RETIRED2.KEYLOCKER_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x11"
+ },
+ {
"BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x75",
diff --git a/tools/perf/pmu-events/empty-pmu-events.c b/tools/perf/pmu-events/empty-pmu-events.c
index 76c395cf513c..a92dd0424f79 100644
--- a/tools/perf/pmu-events/empty-pmu-events.c
+++ b/tools/perf/pmu-events/empty-pmu-events.c
@@ -1310,33 +1310,33 @@ static const char *const big_c_string =
/* offset=128375 */ "migrations_per_second\000Default\000software@cpu\\-migrations\\,name\\=cpu\\-migrations@ * 1e9 / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Process migrations to a new CPU per CPU second\000\0001migrations/sec\000\000\000\000011"
/* offset=128635 */ "page_faults_per_second\000Default\000software@page\\-faults\\,name\\=page\\-faults@ * 1e9 / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Page faults per CPU second\000\0001faults/sec\000\000\000\000011"
/* offset=128866 */ "insn_per_cycle\000Default\000instructions / cpu\\-cycles\000insn_per_cycle < 1\000Instructions Per Cycle\000\0001instructions\000\000\000\000001"
-/* offset=128979 */ "stalled_cycles_per_instruction\000Default\000max(stalled\\-cycles\\-frontend, stalled\\-cycles\\-backend) / instructions\000\000Max front or backend stalls per instruction\000\000\000\000\000\000001"
-/* offset=129143 */ "frontend_cycles_idle\000Default\000stalled\\-cycles\\-frontend / cpu\\-cycles\000frontend_cycles_idle > 0.1\000Frontend stalls per cycle\000\000\000\000\000\000001"
-/* offset=129273 */ "backend_cycles_idle\000Default\000stalled\\-cycles\\-backend / cpu\\-cycles\000backend_cycles_idle > 0.2\000Backend stalls per cycle\000\000\000\000\000\000001"
-/* offset=129399 */ "cycles_frequency\000Default\000cpu\\-cycles / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Cycles per CPU second\000\0001GHz\000\000\000\000011"
-/* offset=129575 */ "branch_frequency\000Default\000branches / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Branches per CPU second\000\0001000M/sec\000\000\000\000011"
-/* offset=129755 */ "branch_miss_rate\000Default\000branch\\-misses / branches\000branch_miss_rate > 0.05\000Branch miss rate\000\000100%\000\000\000\000001"
-/* offset=129859 */ "l1d_miss_rate\000Default2\000L1\\-dcache\\-load\\-misses / L1\\-dcache\\-loads\000l1d_miss_rate > 0.05\000L1D miss rate\000\000100%\000\000\000\000001"
-/* offset=129975 */ "llc_miss_rate\000Default2\000LLC\\-load\\-misses / LLC\\-loads\000llc_miss_rate > 0.05\000LLC miss rate\000\000100%\000\000\000\000001"
-/* offset=130076 */ "l1i_miss_rate\000Default3\000L1\\-icache\\-load\\-misses / L1\\-icache\\-loads\000l1i_miss_rate > 0.05\000L1I miss rate\000\000100%\000\000\000\000001"
-/* offset=130191 */ "dtlb_miss_rate\000Default3\000dTLB\\-load\\-misses / dTLB\\-loads\000dtlb_miss_rate > 0.05\000dTLB miss rate\000\000100%\000\000\000\000001"
-/* offset=130297 */ "itlb_miss_rate\000Default3\000iTLB\\-load\\-misses / iTLB\\-loads\000itlb_miss_rate > 0.05\000iTLB miss rate\000\000100%\000\000\000\000001"
-/* offset=130403 */ "l1_prefetch_miss_rate\000Default4\000L1\\-dcache\\-prefetch\\-misses / L1\\-dcache\\-prefetches\000l1_prefetch_miss_rate > 0.05\000L1 prefetch miss rate\000\000100%\000\000\000\000001"
-/* offset=130551 */ "CPI\000\0001 / IPC\000\000\000\000\000\000\000\000000"
-/* offset=130574 */ "IPC\000group1\000inst_retired.any / cpu_clk_unhalted.thread\000\000\000\000\000\000\000\000000"
-/* offset=130638 */ "Frontend_Bound_SMT\000\000idq_uops_not_delivered.core / (4 * (cpu_clk_unhalted.thread / 2 * (1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk)))\000\000\000\000\000\000\000\000000"
-/* offset=130805 */ "dcache_miss_cpi\000\000l1d\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\000000"
-/* offset=130870 */ "icache_miss_cycles\000\000l1i\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\000000"
-/* offset=130938 */ "cache_miss_cycles\000group1\000dcache_miss_cpi + icache_miss_cycles\000\000\000\000\000\000\000\000000"
-/* offset=131010 */ "DCache_L2_All_Hits\000\000l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit\000\000\000\000\000\000\000\000000"
-/* offset=131105 */ "DCache_L2_All_Miss\000\000max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + l2_rqsts.pf_miss + l2_rqsts.rfo_miss\000\000\000\000\000\000\000\000000"
-/* offset=131240 */ "DCache_L2_All\000\000DCache_L2_All_Hits + DCache_L2_All_Miss\000\000\000\000\000\000\000\000000"
-/* offset=131305 */ "DCache_L2_Hits\000\000d_ratio(DCache_L2_All_Hits, DCache_L2_All)\000\000\000\000\000\000\000\000000"
-/* offset=131374 */ "DCache_L2_Misses\000\000d_ratio(DCache_L2_All_Miss, DCache_L2_All)\000\000\000\000\000\000\000\000000"
-/* offset=131445 */ "M1\000\000ipc + M2\000\000\000\000\000\000\000\000000"
-/* offset=131468 */ "M2\000\000ipc + M1\000\000\000\000\000\000\000\000000"
-/* offset=131491 */ "M3\000\0001 / M3\000\000\000\000\000\000\000\000000"
-/* offset=131512 */ "L1D_Cache_Fill_BW\000\00064 * l1d.replacement / 1e9 / duration_time\000\000\000\000\000\000\000\000000"
+/* offset=128979 */ "stalled_cycles_per_instruction\000Default\000(max(stalled\\-cycles\\-frontend, stalled\\-cycles\\-backend) / instructions if has_event(stalled\\-cycles\\-frontend) & has_event(stalled\\-cycles\\-backend) else (stalled\\-cycles\\-frontend / instructions if has_event(stalled\\-cycles\\-frontend) else (stalled\\-cycles\\-backend / instructions if has_event(stalled\\-cycles\\-backend) else 0)))\000\000Max front or backend stalls per instruction\000\000\000\000\000\000001"
+/* offset=129404 */ "frontend_cycles_idle\000Default\000(stalled\\-cycles\\-frontend / cpu\\-cycles if has_event(stalled\\-cycles\\-frontend) else 0)\000frontend_cycles_idle > 0.1\000Frontend stalls per cycle\000\000\000\000\000\000001"
+/* offset=129583 */ "backend_cycles_idle\000Default\000(stalled\\-cycles\\-backend / cpu\\-cycles if has_event(stalled\\-cycles\\-backend) else 0)\000backend_cycles_idle > 0.2\000Backend stalls per cycle\000\000\000\000\000\000001"
+/* offset=129757 */ "cycles_frequency\000Default\000cpu\\-cycles / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Cycles per CPU second\000\0001GHz\000\000\000\000011"
+/* offset=129933 */ "branch_frequency\000Default\000branches / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Branches per CPU second\000\0001000M/sec\000\000\000\000011"
+/* offset=130113 */ "branch_miss_rate\000Default\000branch\\-misses / branches\000branch_miss_rate > 0.05\000Branch miss rate\000\000100%\000\000\000\000001"
+/* offset=130217 */ "l1d_miss_rate\000Default2\000L1\\-dcache\\-load\\-misses / L1\\-dcache\\-loads\000l1d_miss_rate > 0.05\000L1D miss rate\000\000100%\000\000\000\000001"
+/* offset=130333 */ "llc_miss_rate\000Default2\000LLC\\-load\\-misses / LLC\\-loads\000llc_miss_rate > 0.05\000LLC miss rate\000\000100%\000\000\000\000001"
+/* offset=130434 */ "l1i_miss_rate\000Default3\000L1\\-icache\\-load\\-misses / L1\\-icache\\-loads\000l1i_miss_rate > 0.05\000L1I miss rate\000\000100%\000\000\000\000001"
+/* offset=130549 */ "dtlb_miss_rate\000Default3\000dTLB\\-load\\-misses / dTLB\\-loads\000dtlb_miss_rate > 0.05\000dTLB miss rate\000\000100%\000\000\000\000001"
+/* offset=130655 */ "itlb_miss_rate\000Default3\000iTLB\\-load\\-misses / iTLB\\-loads\000itlb_miss_rate > 0.05\000iTLB miss rate\000\000100%\000\000\000\000001"
+/* offset=130761 */ "l1_prefetch_miss_rate\000Default4\000L1\\-dcache\\-prefetch\\-misses / L1\\-dcache\\-prefetches\000l1_prefetch_miss_rate > 0.05\000L1 prefetch miss rate\000\000100%\000\000\000\000001"
+/* offset=130909 */ "CPI\000\0001 / IPC\000\000\000\000\000\000\000\000000"
+/* offset=130932 */ "IPC\000group1\000inst_retired.any / cpu_clk_unhalted.thread\000\000\000\000\000\000\000\000000"
+/* offset=130996 */ "Frontend_Bound_SMT\000\000idq_uops_not_delivered.core / (4 * (cpu_clk_unhalted.thread / 2 * (1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk)))\000\000\000\000\000\000\000\000000"
+/* offset=131163 */ "dcache_miss_cpi\000\000l1d\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\000000"
+/* offset=131228 */ "icache_miss_cycles\000\000l1i\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\000000"
+/* offset=131296 */ "cache_miss_cycles\000group1\000dcache_miss_cpi + icache_miss_cycles\000\000\000\000\000\000\000\000000"
+/* offset=131368 */ "DCache_L2_All_Hits\000\000l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit\000\000\000\000\000\000\000\000000"
+/* offset=131463 */ "DCache_L2_All_Miss\000\000max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + l2_rqsts.pf_miss + l2_rqsts.rfo_miss\000\000\000\000\000\000\000\000000"
+/* offset=131598 */ "DCache_L2_All\000\000DCache_L2_All_Hits + DCache_L2_All_Miss\000\000\000\000\000\000\000\000000"
+/* offset=131663 */ "DCache_L2_Hits\000\000d_ratio(DCache_L2_All_Hits, DCache_L2_All)\000\000\000\000\000\000\000\000000"
+/* offset=131732 */ "DCache_L2_Misses\000\000d_ratio(DCache_L2_All_Miss, DCache_L2_All)\000\000\000\000\000\000\000\000000"
+/* offset=131803 */ "M1\000\000ipc + M2\000\000\000\000\000\000\000\000000"
+/* offset=131826 */ "M2\000\000ipc + M1\000\000\000\000\000\000\000\000000"
+/* offset=131849 */ "M3\000\0001 / M3\000\000\000\000\000\000\000\000000"
+/* offset=131870 */ "L1D_Cache_Fill_BW\000\00064 * l1d.replacement / 1e9 / duration_time\000\000\000\000\000\000\000\000000"
;
static const struct compact_pmu_event pmu_events__common_default_core[] = {
@@ -2626,22 +2626,22 @@ static const struct pmu_table_entry pmu_events__common[] = {
static const struct compact_pmu_event pmu_metrics__common_default_core[] = {
{ 127956 }, /* CPUs_utilized\000Default\000(software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@) / (duration_time * 1e9)\000\000Average CPU utilization\000\0001CPUs\000\000\000\000011 */
-{ 129273 }, /* backend_cycles_idle\000Default\000stalled\\-cycles\\-backend / cpu\\-cycles\000backend_cycles_idle > 0.2\000Backend stalls per cycle\000\000\000\000\000\000001 */
-{ 129575 }, /* branch_frequency\000Default\000branches / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Branches per CPU second\000\0001000M/sec\000\000\000\000011 */
-{ 129755 }, /* branch_miss_rate\000Default\000branch\\-misses / branches\000branch_miss_rate > 0.05\000Branch miss rate\000\000100%\000\000\000\000001 */
+{ 129583 }, /* backend_cycles_idle\000Default\000(stalled\\-cycles\\-backend / cpu\\-cycles if has_event(stalled\\-cycles\\-backend) else 0)\000backend_cycles_idle > 0.2\000Backend stalls per cycle\000\000\000\000\000\000001 */
+{ 129933 }, /* branch_frequency\000Default\000branches / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Branches per CPU second\000\0001000M/sec\000\000\000\000011 */
+{ 130113 }, /* branch_miss_rate\000Default\000branch\\-misses / branches\000branch_miss_rate > 0.05\000Branch miss rate\000\000100%\000\000\000\000001 */
{ 128142 }, /* cs_per_second\000Default\000software@context\\-switches\\,name\\=context\\-switches@ * 1e9 / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Context switches per CPU second\000\0001cs/sec\000\000\000\000011 */
-{ 129399 }, /* cycles_frequency\000Default\000cpu\\-cycles / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Cycles per CPU second\000\0001GHz\000\000\000\000011 */
-{ 130191 }, /* dtlb_miss_rate\000Default3\000dTLB\\-load\\-misses / dTLB\\-loads\000dtlb_miss_rate > 0.05\000dTLB miss rate\000\000100%\000\000\000\000001 */
-{ 129143 }, /* frontend_cycles_idle\000Default\000stalled\\-cycles\\-frontend / cpu\\-cycles\000frontend_cycles_idle > 0.1\000Frontend stalls per cycle\000\000\000\000\000\000001 */
+{ 129757 }, /* cycles_frequency\000Default\000cpu\\-cycles / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Cycles per CPU second\000\0001GHz\000\000\000\000011 */
+{ 130549 }, /* dtlb_miss_rate\000Default3\000dTLB\\-load\\-misses / dTLB\\-loads\000dtlb_miss_rate > 0.05\000dTLB miss rate\000\000100%\000\000\000\000001 */
+{ 129404 }, /* frontend_cycles_idle\000Default\000(stalled\\-cycles\\-frontend / cpu\\-cycles if has_event(stalled\\-cycles\\-frontend) else 0)\000frontend_cycles_idle > 0.1\000Frontend stalls per cycle\000\000\000\000\000\000001 */
{ 128866 }, /* insn_per_cycle\000Default\000instructions / cpu\\-cycles\000insn_per_cycle < 1\000Instructions Per Cycle\000\0001instructions\000\000\000\000001 */
-{ 130297 }, /* itlb_miss_rate\000Default3\000iTLB\\-load\\-misses / iTLB\\-loads\000itlb_miss_rate > 0.05\000iTLB miss rate\000\000100%\000\000\000\000001 */
-{ 130403 }, /* l1_prefetch_miss_rate\000Default4\000L1\\-dcache\\-prefetch\\-misses / L1\\-dcache\\-prefetches\000l1_prefetch_miss_rate > 0.05\000L1 prefetch miss rate\000\000100%\000\000\000\000001 */
-{ 129859 }, /* l1d_miss_rate\000Default2\000L1\\-dcache\\-load\\-misses / L1\\-dcache\\-loads\000l1d_miss_rate > 0.05\000L1D miss rate\000\000100%\000\000\000\000001 */
-{ 130076 }, /* l1i_miss_rate\000Default3\000L1\\-icache\\-load\\-misses / L1\\-icache\\-loads\000l1i_miss_rate > 0.05\000L1I miss rate\000\000100%\000\000\000\000001 */
-{ 129975 }, /* llc_miss_rate\000Default2\000LLC\\-load\\-misses / LLC\\-loads\000llc_miss_rate > 0.05\000LLC miss rate\000\000100%\000\000\000\000001 */
+{ 130655 }, /* itlb_miss_rate\000Default3\000iTLB\\-load\\-misses / iTLB\\-loads\000itlb_miss_rate > 0.05\000iTLB miss rate\000\000100%\000\000\000\000001 */
+{ 130761 }, /* l1_prefetch_miss_rate\000Default4\000L1\\-dcache\\-prefetch\\-misses / L1\\-dcache\\-prefetches\000l1_prefetch_miss_rate > 0.05\000L1 prefetch miss rate\000\000100%\000\000\000\000001 */
+{ 130217 }, /* l1d_miss_rate\000Default2\000L1\\-dcache\\-load\\-misses / L1\\-dcache\\-loads\000l1d_miss_rate > 0.05\000L1D miss rate\000\000100%\000\000\000\000001 */
+{ 130434 }, /* l1i_miss_rate\000Default3\000L1\\-icache\\-load\\-misses / L1\\-icache\\-loads\000l1i_miss_rate > 0.05\000L1I miss rate\000\000100%\000\000\000\000001 */
+{ 130333 }, /* llc_miss_rate\000Default2\000LLC\\-load\\-misses / LLC\\-loads\000llc_miss_rate > 0.05\000LLC miss rate\000\000100%\000\000\000\000001 */
{ 128375 }, /* migrations_per_second\000Default\000software@cpu\\-migrations\\,name\\=cpu\\-migrations@ * 1e9 / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Process migrations to a new CPU per CPU second\000\0001migrations/sec\000\000\000\000011 */
{ 128635 }, /* page_faults_per_second\000Default\000software@page\\-faults\\,name\\=page\\-faults@ * 1e9 / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Page faults per CPU second\000\0001faults/sec\000\000\000\000011 */
-{ 128979 }, /* stalled_cycles_per_instruction\000Default\000max(stalled\\-cycles\\-frontend, stalled\\-cycles\\-backend) / instructions\000\000Max front or backend stalls per instruction\000\000\000\000\000\000001 */
+{ 128979 }, /* stalled_cycles_per_instruction\000Default\000(max(stalled\\-cycles\\-frontend, stalled\\-cycles\\-backend) / instructions if has_event(stalled\\-cycles\\-frontend) & has_event(stalled\\-cycles\\-backend) else (stalled\\-cycles\\-frontend / instructions if has_event(stalled\\-cycles\\-frontend) else (stalled\\-cycles\\-backend / instructions if has_event(stalled\\-cycles\\-backend) else 0)))\000\000Max front or backend stalls per instruction\000\000\000\000\000\000001 */
};
@@ -2714,21 +2714,21 @@ static const struct pmu_table_entry pmu_events__test_soc_cpu[] = {
};
static const struct compact_pmu_event pmu_metrics__test_soc_cpu_default_core[] = {
-{ 130551 }, /* CPI\000\0001 / IPC\000\000\000\000\000\000\000\000000 */
-{ 131240 }, /* DCache_L2_All\000\000DCache_L2_All_Hits + DCache_L2_All_Miss\000\000\000\000\000\000\000\000000 */
-{ 131010 }, /* DCache_L2_All_Hits\000\000l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit\000\000\000\000\000\000\000\000000 */
-{ 131105 }, /* DCache_L2_All_Miss\000\000max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + l2_rqsts.pf_miss + l2_rqsts.rfo_miss\000\000\000\000\000\000\000\000000 */
-{ 131305 }, /* DCache_L2_Hits\000\000d_ratio(DCache_L2_All_Hits, DCache_L2_All)\000\000\000\000\000\000\000\000000 */
-{ 131374 }, /* DCache_L2_Misses\000\000d_ratio(DCache_L2_All_Miss, DCache_L2_All)\000\000\000\000\000\000\000\000000 */
-{ 130638 }, /* Frontend_Bound_SMT\000\000idq_uops_not_delivered.core / (4 * (cpu_clk_unhalted.thread / 2 * (1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk)))\000\000\000\000\000\000\000\000000 */
-{ 130574 }, /* IPC\000group1\000inst_retired.any / cpu_clk_unhalted.thread\000\000\000\000\000\000\000\000000 */
-{ 131512 }, /* L1D_Cache_Fill_BW\000\00064 * l1d.replacement / 1e9 / duration_time\000\000\000\000\000\000\000\000000 */
-{ 131445 }, /* M1\000\000ipc + M2\000\000\000\000\000\000\000\000000 */
-{ 131468 }, /* M2\000\000ipc + M1\000\000\000\000\000\000\000\000000 */
-{ 131491 }, /* M3\000\0001 / M3\000\000\000\000\000\000\000\000000 */
-{ 130938 }, /* cache_miss_cycles\000group1\000dcache_miss_cpi + icache_miss_cycles\000\000\000\000\000\000\000\000000 */
-{ 130805 }, /* dcache_miss_cpi\000\000l1d\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\000000 */
-{ 130870 }, /* icache_miss_cycles\000\000l1i\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\000000 */
+{ 130909 }, /* CPI\000\0001 / IPC\000\000\000\000\000\000\000\000000 */
+{ 131598 }, /* DCache_L2_All\000\000DCache_L2_All_Hits + DCache_L2_All_Miss\000\000\000\000\000\000\000\000000 */
+{ 131368 }, /* DCache_L2_All_Hits\000\000l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit\000\000\000\000\000\000\000\000000 */
+{ 131463 }, /* DCache_L2_All_Miss\000\000max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + l2_rqsts.pf_miss + l2_rqsts.rfo_miss\000\000\000\000\000\000\000\000000 */
+{ 131663 }, /* DCache_L2_Hits\000\000d_ratio(DCache_L2_All_Hits, DCache_L2_All)\000\000\000\000\000\000\000\000000 */
+{ 131732 }, /* DCache_L2_Misses\000\000d_ratio(DCache_L2_All_Miss, DCache_L2_All)\000\000\000\000\000\000\000\000000 */
+{ 130996 }, /* Frontend_Bound_SMT\000\000idq_uops_not_delivered.core / (4 * (cpu_clk_unhalted.thread / 2 * (1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk)))\000\000\000\000\000\000\000\000000 */
+{ 130932 }, /* IPC\000group1\000inst_retired.any / cpu_clk_unhalted.thread\000\000\000\000\000\000\000\000000 */
+{ 131870 }, /* L1D_Cache_Fill_BW\000\00064 * l1d.replacement / 1e9 / duration_time\000\000\000\000\000\000\000\000000 */
+{ 131803 }, /* M1\000\000ipc + M2\000\000\000\000\000\000\000\000000 */
+{ 131826 }, /* M2\000\000ipc + M1\000\000\000\000\000\000\000\000000 */
+{ 131849 }, /* M3\000\0001 / M3\000\000\000\000\000\000\000\000000 */
+{ 131296 }, /* cache_miss_cycles\000group1\000dcache_miss_cpi + icache_miss_cycles\000\000\000\000\000\000\000\000000 */
+{ 131163 }, /* dcache_miss_cpi\000\000l1d\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\000000 */
+{ 131228 }, /* icache_miss_cycles\000\000l1i\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\000000 */
};
diff --git a/tools/perf/tests/bitmap.c b/tools/perf/tests/bitmap.c
index 98956e0e0765..e7adf60be721 100644
--- a/tools/perf/tests/bitmap.c
+++ b/tools/perf/tests/bitmap.c
@@ -16,7 +16,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
bm = bitmap_zalloc(nbits);
if (map && bm) {
- int i;
+ unsigned int i;
struct perf_cpu cpu;
perf_cpu_map__for_each_cpu(cpu, i, map)
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index 3faeb5b6fe0b..f580ba7486b1 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -36,7 +36,7 @@ static int fd3;
static int overflows;
static int overflows_2;
-volatile long the_var;
+static volatile long the_var;
/*
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 5927d1ea20e2..47043a3a2fb4 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/rbtree.h>
#include <linux/types.h>
+#include <linux/zalloc.h>
#include <inttypes.h>
#include <stdlib.h>
#include <unistd.h>
diff --git a/tools/perf/tests/cpumap.c b/tools/perf/tests/cpumap.c
index 2354246afc5a..b051dce2cd86 100644
--- a/tools/perf/tests/cpumap.c
+++ b/tools/perf/tests/cpumap.c
@@ -156,7 +156,8 @@ static int test__cpu_map_print(struct test_suite *test __maybe_unused, int subte
return 0;
}
-static int __test__cpu_map_merge(const char *lhs, const char *rhs, int nr, const char *expected)
+static int __test__cpu_map_merge(const char *lhs, const char *rhs, unsigned int nr,
+ const char *expected)
{
struct perf_cpu_map *a = perf_cpu_map__new(lhs);
struct perf_cpu_map *b = perf_cpu_map__new(rhs);
@@ -204,7 +205,8 @@ static int test__cpu_map_merge(struct test_suite *test __maybe_unused,
return ret;
}
-static int __test__cpu_map_intersect(const char *lhs, const char *rhs, int nr, const char *expected)
+static int __test__cpu_map_intersect(const char *lhs, const char *rhs, unsigned int nr,
+ const char *expected)
{
struct perf_cpu_map *a = perf_cpu_map__new(lhs);
struct perf_cpu_map *b = perf_cpu_map__new(rhs);
diff --git a/tools/perf/tests/dso-data.c b/tools/perf/tests/dso-data.c
index a1fff4203b75..46bc3f597260 100644
--- a/tools/perf/tests/dso-data.c
+++ b/tools/perf/tests/dso-data.c
@@ -58,7 +58,7 @@ struct test_data_offset {
int size;
};
-struct test_data_offset offsets[] = {
+static struct test_data_offset offsets[] = {
/* Fill first cache page. */
{
.offset = 10,
diff --git a/tools/perf/tests/event_update.c b/tools/perf/tests/event_update.c
index cb9e6de2e033..facc65e29f20 100644
--- a/tools/perf/tests/event_update.c
+++ b/tools/perf/tests/event_update.c
@@ -8,6 +8,7 @@
#include "header.h"
#include "machine.h"
#include "util/synthetic-events.h"
+#include "target.h"
#include "tool.h"
#include "tests.h"
#include "debug.h"
@@ -81,7 +82,8 @@ static int test__event_update(struct test_suite *test __maybe_unused, int subtes
{
struct evsel *evsel;
struct event_name tmp;
- struct evlist *evlist = evlist__new_default();
+ struct target target = {};
+ struct evlist *evlist = evlist__new_default(&target, /*sample_callchains=*/false);
TEST_ASSERT_VAL("failed to get evlist", evlist);
diff --git a/tools/perf/tests/expand-cgroup.c b/tools/perf/tests/expand-cgroup.c
index c7b32a220ca1..dd547f2f77cc 100644
--- a/tools/perf/tests/expand-cgroup.c
+++ b/tools/perf/tests/expand-cgroup.c
@@ -8,6 +8,7 @@
#include "parse-events.h"
#include "pmu-events/pmu-events.h"
#include "pfm.h"
+#include "target.h"
#include <subcmd/parse-options.h>
#include <stdio.h>
#include <stdlib.h>
@@ -99,7 +100,8 @@ out: for (i = 0; i < nr_events; i++)
static int expand_default_events(void)
{
int ret;
- struct evlist *evlist = evlist__new_default();
+ struct target target = {};
+ struct evlist *evlist = evlist__new_default(&target, /*sample_callchains=*/false);
TEST_ASSERT_VAL("failed to get evlist", evlist);
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 3eb9ef8d7ec6..606aa926a8fc 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -81,7 +81,7 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
{
struct addr_location al;
struct evsel *evsel = hists_to_evsel(hists);
- struct perf_sample sample = { .period = 1000, };
+ struct perf_sample sample = { .evsel = evsel, .period = 1000, };
size_t i;
addr_location__init(&al);
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index 1cebd20cc91c..cc6b26e373d1 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -70,6 +70,7 @@ static int add_hist_entries(struct evlist *evlist,
};
struct hists *hists = evsel__hists(evsel);
+ sample.evsel = evsel;
/* make sure it has no filter at first */
hists->thread_filter = NULL;
hists->dso_filter = NULL;
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index ee5ec8bda60e..7818950d786e 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -51,7 +51,7 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
{
struct addr_location al;
struct evsel *evsel = hists_to_evsel(hists);
- struct perf_sample sample = { .period = 100, };
+ struct perf_sample sample = { .evsel = evsel, .period = 100, };
size_t i;
addr_location__init(&al);
diff --git a/tools/perf/tests/mem2node.c b/tools/perf/tests/mem2node.c
index a0e88c496107..7ce1ad7b6ce5 100644
--- a/tools/perf/tests/mem2node.c
+++ b/tools/perf/tests/mem2node.c
@@ -30,7 +30,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
if (map && bm) {
struct perf_cpu cpu;
- int i;
+ unsigned int i;
perf_cpu_map__for_each_cpu(cpu, i, map)
__set_bit(cpu.cpu, bm);
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
index 3644d6f52c07..0be43f8db3bd 100644
--- a/tools/perf/tests/openat-syscall-all-cpus.c
+++ b/tools/perf/tests/openat-syscall-all-cpus.c
@@ -22,7 +22,8 @@
static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
- int err = TEST_FAIL, fd, idx;
+ int err = TEST_FAIL, fd;
+ unsigned int idx;
struct perf_cpu cpu;
struct perf_cpu_map *cpus;
struct evsel *evsel;
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 1d3cc224fbc2..05c3e899b425 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1796,31 +1796,38 @@ static bool test__acr_valid(void)
static int test__ratio_to_prev(struct evlist *evlist)
{
- struct evsel *evsel;
+ struct evsel *evsel, *leader;
TEST_ASSERT_VAL("wrong number of entries", 2 * perf_pmus__num_core_pmus() == evlist->core.nr_entries);
- evlist__for_each_entry(evlist, evsel) {
- if (!perf_pmu__has_format(evsel->pmu, "acr_mask"))
- return TEST_OK;
-
- if (evsel == evlist__first(evlist)) {
- TEST_ASSERT_VAL("wrong config2", 0 == evsel->core.attr.config2);
- TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
- TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
- TEST_ASSERT_EVSEL("unexpected event",
- evsel__match(evsel, HARDWARE, HW_CPU_CYCLES),
- evsel);
- } else {
- TEST_ASSERT_VAL("wrong config2", 0 == evsel->core.attr.config2);
- TEST_ASSERT_VAL("wrong leader", !evsel__is_group_leader(evsel));
- TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 0);
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
- TEST_ASSERT_EVSEL("unexpected event",
- evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS),
- evsel);
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel != evsel__leader(evsel) ||
+ !perf_pmu__has_format(evsel->pmu, "acr_mask")) {
+ continue;
}
+ leader = evsel;
+ /* cycles */
+ TEST_ASSERT_VAL("wrong config2", 0 == leader->core.attr.config2);
+ TEST_ASSERT_VAL("wrong core.nr_members", leader->core.nr_members == 2);
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(leader) == 0);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(leader, HARDWARE, HW_CPU_CYCLES),
+ leader);
+ /*
+ * The period value gets configured within evlist__config,
+ * while this test executes only parse events method.
+ */
+ TEST_ASSERT_VAL("wrong period", 0 == leader->core.attr.sample_period);
+
+ /* instructions/period=200000,ratio-to-prev=2.0/ */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong config2", 0 == evsel->core.attr.config2);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 0);
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS),
+ evsel);
/*
* The period value gets configured within evlist__config,
* while this test executes only parse events method.
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index efbd9cd60c63..ad44cc68820b 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -84,8 +84,11 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest
CPU_ZERO_S(cpu_mask_size, cpu_mask);
perf_sample__init(&sample, /*all=*/false);
- if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */
- evlist = evlist__new_default();
+ if (evlist == NULL) { /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */
+ struct target target = {};
+
+ evlist = evlist__new_default(&target, /*sample_callchains=*/false);
+ }
if (evlist == NULL) {
pr_debug("Not enough memory to create evlist\n");
@@ -297,6 +300,7 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest
}
perf_mmap__consume(&md->core);
+ perf_sample__exit(&sample);
}
perf_mmap__read_done(&md->core);
}
diff --git a/tools/perf/tests/shell/data_type_profiling.sh b/tools/perf/tests/shell/data_type_profiling.sh
index 2a7f8f7c42d0..eca694600a04 100755
--- a/tools/perf/tests/shell/data_type_profiling.sh
+++ b/tools/perf/tests/shell/data_type_profiling.sh
@@ -8,13 +8,17 @@ set -e
# data type profiling manifestation
# Values in testtypes and testprogs should match
-testtypes=("# data-type: struct Buf" "# data-type: struct _buf")
+testtypes=("# data-type: struct Buf" "# data-type: struct buf")
testprogs=("perf test -w code_with_type" "perf test -w datasym")
err=0
perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
perfout=$(mktemp /tmp/__perf_test.perf.out.XXXXX)
+# Check for support of perf mem before trap handler
+perf mem record -o /dev/null -- true 2>&1 | \
+ grep -q "failed: no PMU supports the memory events" && exit 2
+
cleanup() {
rm -rf "${perfdata}" "${perfout}"
rm -rf "${perfdata}".old
diff --git a/tools/perf/tests/shell/kwork.sh b/tools/perf/tests/shell/kwork.sh
new file mode 100755
index 000000000000..42bfd9382816
--- /dev/null
+++ b/tools/perf/tests/shell/kwork.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+# perf kwork tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# Root permissions required for tracing events.
+if [ "$(id -u)" != 0 ]; then
+ echo "[Skip] No root permission"
+ exit 2
+fi
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test_kwork.perf.data.XXXXX)
+
+cleanup() {
+ rm -f "${perfdata}"
+ rm -f "${perfdata}".old
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+test_kwork_record() {
+ echo "Kwork record"
+ perf kwork record -o "${perfdata}" -- sleep 1
+ echo "Kwork record [Success]"
+}
+
+test_kwork_report() {
+ echo "Kwork report"
+ if ! perf kwork report -i "${perfdata}" | grep -q "Kwork Name"; then
+ echo "Kwork report [Failed missing output]"
+ err=1
+ fi
+ echo "Kwork report [Success]"
+}
+
+test_kwork_latency() {
+ echo "Kwork latency"
+ if ! perf kwork latency -i "${perfdata}" | grep -q "Avg delay"; then
+ echo "Kwork latency [Failed missing output]"
+ err=1
+ fi
+ echo "Kwork latency [Success]"
+}
+
+test_kwork_timehist() {
+ echo "Kwork timehist"
+ if ! perf kwork timehist -i "${perfdata}" | grep -q "Kwork name"; then
+ echo "Kwork timehist [Failed missing output]"
+ err=1
+ fi
+ echo "Kwork timehist [Success]"
+}
+
+test_kwork_top() {
+ echo "Kwork top"
+ if ! perf kwork top -i "${perfdata}" | grep -q "COMMAND"; then
+ echo "Kwork top [Failed missing output]"
+ err=1
+ fi
+ echo "Kwork top [Success]"
+}
+
+test_kwork_record
+test_kwork_report
+test_kwork_latency
+test_kwork_timehist
+test_kwork_top
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/perf_sched_stats.sh b/tools/perf/tests/shell/perf_sched_stats.sh
index 2b1410b050d0..f13eb0a75b76 100755
--- a/tools/perf/tests/shell/perf_sched_stats.sh
+++ b/tools/perf/tests/shell/perf_sched_stats.sh
@@ -4,10 +4,34 @@
set -e
+if [ "$(id -u)" != 0 ]; then
+ echo "[Skip] No root permission"
+ exit 2
+fi
+
+perfdata=$(mktemp /tmp/__perf_test_sched_stats.perf.data.XXXXX)
+perfdata2=$(mktemp /tmp/__perf_test_sched_stats.perf.data.XXXXX)
+
+cleanup() {
+ rm -f "${perfdata}"
+ rm -f "${perfdata}".old
+ rm -f "${perfdata2}"
+ rm -f "${perfdata2}".old
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
err=0
test_perf_sched_stats_record() {
echo "Basic perf sched stats record test"
- if ! perf sched stats record true 2>&1 | \
+ if ! perf sched stats record -o "${perfdata}" true 2>&1 | \
grep -E -q "[ perf sched stats: Wrote samples to perf.data ]"
then
echo "Basic perf sched stats record test [Failed]"
@@ -19,15 +43,13 @@ test_perf_sched_stats_record() {
test_perf_sched_stats_report() {
echo "Basic perf sched stats report test"
- perf sched stats record true > /dev/null
- if ! perf sched stats report 2>&1 | grep -E -q "Description"
+ perf sched stats record -o "${perfdata}" true > /dev/null
+ if ! perf sched stats report -i "${perfdata}" 2>&1 | grep -E -q "Description"
then
echo "Basic perf sched stats report test [Failed]"
err=1
- rm perf.data
return
fi
- rm perf.data
echo "Basic perf sched stats report test [Success]"
}
@@ -44,16 +66,14 @@ test_perf_sched_stats_live() {
test_perf_sched_stats_diff() {
echo "Basic perf sched stats diff test"
- perf sched stats record true > /dev/null
- perf sched stats record true > /dev/null
- if ! perf sched stats diff > /dev/null
+ perf sched stats record -o "${perfdata}" true > /dev/null
+ perf sched stats record -o "${perfdata2}" true > /dev/null
+ if ! perf sched stats diff "${perfdata}" "${perfdata2}" > /dev/null
then
echo "Basic perf sched stats diff test [Failed]"
err=1
- rm perf.data.old perf.data
return
fi
- rm perf.data.old perf.data
echo "Basic perf sched stats diff test [Success]"
}
@@ -61,4 +81,6 @@ test_perf_sched_stats_record
test_perf_sched_stats_report
test_perf_sched_stats_live
test_perf_sched_stats_diff
+
+cleanup
exit $err
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index ab99bef556bf..eca629ee83f0 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -22,9 +22,9 @@ event_pattern='probe_libc:inet_pton(_[[:digit:]]+)?'
add_libc_inet_pton_event() {
- event_name=$(perf probe -f -x $libc -a inet_pton 2>&1 | tail -n +2 | head -n -5 | \
+ event_name=$(perf probe -f -x $libc -a inet_pton 2>&1 | \
awk -v ep="$event_pattern" -v l="$libc" '$0 ~ ep && $0 ~ \
- ("\\(on inet_pton in " l "\\)") {print $1}')
+ ("\\(on inet_pton in " l "\\)") {print $1}' | head -n 1)
if [ $? -ne 0 ] || [ -z "$event_name" ] ; then
printf "FAIL: could not add event\n"
@@ -40,12 +40,12 @@ trace_libc_inet_pton_backtrace() {
echo ".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
case "$(uname -m)" in
s390x)
- eventattr='call-graph=dwarf,max-stack=4'
+ eventattr='call-graph=dwarf,max-stack=8'
echo "((__GI_)?getaddrinfo|text_to_binary_address)\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
echo "(gaih_inet|main)\+0x[[:xdigit:]]+[[:space:]]\(inlined|.*/bin/ping.*\)$" >> $expected
;;
*)
- eventattr='max-stack=4'
+ eventattr='call-graph=dwarf,max-stack=8'
echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
;;
esac
diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
index f43e28a136d3..35463358b273 100755
--- a/tools/perf/tests/shell/stat_bpf_counters.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters.sh
@@ -41,8 +41,14 @@ check_counts()
test_bpf_counters()
{
printf "Testing --bpf-counters "
- base_instructions=$(perf stat --no-big-num -e instructions -- $workload 2>&1 | awk '/instructions/ {print $1}')
- bpf_instructions=$(perf stat --no-big-num --bpf-counters -e instructions -- $workload 2>&1 | awk '/instructions/ {print $1}')
+ base_instructions=$(perf stat --no-big-num -e instructions -- $workload 2>&1 | \
+ awk -v i=0 -v c=0 '/instructions/ { \
+ if ($1 != "<not") { i++; c += $1 } \
+ } END { if (i > 0) printf "%.0f", c; else print "<not" }')
+ bpf_instructions=$(perf stat --no-big-num --bpf-counters -e instructions -- $workload 2>&1 | \
+ awk -v i=0 -v c=0 '/instructions/ { \
+ if ($1 != "<not") { i++; c += $1 } \
+ } END { if (i > 0) printf "%.0f", c; else print "<not" }')
check_counts $base_instructions $bpf_instructions
compare_number $base_instructions $bpf_instructions
echo "[Success]"
@@ -52,8 +58,14 @@ test_bpf_modifier()
{
printf "Testing bpf event modifier "
stat_output=$(perf stat --no-big-num -e instructions/name=base_instructions/,instructions/name=bpf_instructions/b -- $workload 2>&1)
- base_instructions=$(echo "$stat_output"| awk '/base_instructions/ {print $1}')
- bpf_instructions=$(echo "$stat_output"| awk '/bpf_instructions/ {print $1}')
+ base_instructions=$(echo "$stat_output"| \
+ awk -v i=0 -v c=0 '/base_instructions/ { \
+ if ($1 != "<not") { i++; c += $1 } \
+ } END { if (i > 0) printf "%.0f", c; else print "<not" }')
+ bpf_instructions=$(echo "$stat_output"| \
+ awk -v i=0 -v c=0 '/bpf_instructions/ { \
+ if ($1 != "<not") { i++; c += $1 } \
+ } END { if (i > 0) printf "%.0f", c; else print "<not" }')
check_counts $base_instructions $bpf_instructions
compare_number $base_instructions $bpf_instructions
echo "[Success]"
diff --git a/tools/perf/tests/shell/test_brstack.sh b/tools/perf/tests/shell/test_brstack.sh
index 85233d435be6..eb5837f82e39 100755
--- a/tools/perf/tests/shell/test_brstack.sh
+++ b/tools/perf/tests/shell/test_brstack.sh
@@ -38,9 +38,13 @@ is_arm64() {
[ "$(uname -m)" = "aarch64" ];
}
+has_kaslr_bug() {
+ [ "$(uname -m)" != "aarch64" ];
+}
+
check_branches() {
if ! tr -s ' ' '\n' < "$TMPDIR/perf.script" | grep -E -m1 -q "$1"; then
- echo "Branches missing $1"
+ echo "ERROR: Branches missing $1"
err=1
fi
}
@@ -48,6 +52,8 @@ check_branches() {
test_user_branches() {
echo "Testing user branch stack sampling"
+ start_err=$err
+ err=0
perf record -o "$TMPDIR/perf.data" --branch-filter any,save_type,u -- ${TESTPROG} > "$TMPDIR/record.txt" 2>&1
perf script -i "$TMPDIR/perf.data" --fields brstacksym > "$TMPDIR/perf.script"
@@ -73,59 +79,88 @@ test_user_branches() {
perf script -i "$TMPDIR/perf.data" --fields brstack | \
tr ' ' '\n' > "$TMPDIR/perf.script"
- # There should be no kernel addresses with the u option, in either
- # source or target addresses.
- if grep -E -m1 "0x[89a-f][0-9a-f]{15}" $TMPDIR/perf.script; then
- echo "ERROR: Kernel address found in user mode"
+ # There should be no kernel addresses in the target with the u option.
+ local regex="0x[89a-f][0-9a-f]{15}"
+ if has_kaslr_bug; then
+ # If the system has a kaslr bug that may leak kernel addresses
+ # in the source of something like an ERET/SYSRET. Make the regex
+ # more specific and just check the target address is in user
+ # code.
+ regex="^0x[0-9a-f]{0,16}/0x[89a-f][0-9a-f]{15}/"
+ fi
+ if grep -q -E -m1 "$regex" $TMPDIR/perf.script; then
+ echo "Testing user branch stack sampling [Failed kernel address found in user mode]"
err=1
fi
# some branch types are still not being tested:
# IND COND_CALL COND_RET SYSRET SERROR NO_TX
+ if [ $err -eq 0 ]; then
+ echo "Testing user branch stack sampling [Passed]"
+ err=$start_err
+ else
+ echo "Testing user branch stack sampling [Failed]"
+ fi
}
test_trap_eret_branches() {
echo "Testing trap & eret branches"
+
if ! is_arm64; then
- echo "skip: not arm64"
+ echo "Testing trap & eret branches [Skipped not arm64]"
+ return
+ fi
+ start_err=$err
+ err=0
+ perf record -o $TMPDIR/perf.data --branch-filter any,save_type,u,k -- \
+ perf test -w traploop 1000 > "$TMPDIR/record.txt" 2>&1
+ perf script -i $TMPDIR/perf.data --fields brstacksym | \
+ tr ' ' '\n' > $TMPDIR/perf.script
+
+ # BRBINF<n>.TYPE == TRAP are mapped to PERF_BR_IRQ by the BRBE driver
+ check_branches "^trap_bench\+[^ ]+/[^ ]/IRQ/"
+ check_branches "^[^ ]+/trap_bench\+[^ ]+/ERET/"
+ if [ $err -eq 0 ]; then
+ echo "Testing trap & eret branches [Passed]"
+ err=$start_err
else
- perf record -o $TMPDIR/perf.data --branch-filter any,save_type,u,k -- \
- perf test -w traploop 1000
- perf script -i $TMPDIR/perf.data --fields brstacksym | \
- tr ' ' '\n' > $TMPDIR/perf.script
-
- # BRBINF<n>.TYPE == TRAP are mapped to PERF_BR_IRQ by the BRBE driver
- check_branches "^trap_bench\+[^ ]+/[^ ]/IRQ/"
- check_branches "^[^ ]+/trap_bench\+[^ ]+/ERET/"
+ echo "Testing trap & eret branches [Failed]"
fi
}
test_kernel_branches() {
- echo "Testing that k option only includes kernel source addresses"
+ echo "Testing kernel branch sampling"
- if ! perf record --branch-filter any,k -o- -- true > /dev/null; then
- echo "skip: not enough privileges"
+ if ! perf record --branch-filter any,k -o- -- true > "$TMPDIR/record.txt" 2>&1; then
+ echo "Testing that k option [Skipped not enough privileges]"
+ return
+ fi
+ start_err=$err
+ err=0
+ perf record -o $TMPDIR/perf.data --branch-filter any,k -- \
+ perf bench syscall basic --loop 1000 > "$TMPDIR/record.txt" 2>&1
+ perf script -i $TMPDIR/perf.data --fields brstack | \
+ tr ' ' '\n' > $TMPDIR/perf.script
+
+ # Example of branch entries:
+ # "0xffffffff93bda241/0xffffffff93bda20f/M/-/-/..."
+ # Source addresses come first in user or kernel code. Next is the target
+ # address that must be in the kernel.
+
+ # Look for source addresses with top bit set
+ if ! grep -q -E -m1 "^0x[89a-f][0-9a-f]{15}" $TMPDIR/perf.script; then
+ echo "Testing kernel branch sampling [Failed kernel branches missing]"
+ err=1
+ fi
+ # Look for no target addresses without top bit set
+ if grep -q -E -m1 "^0x[0-9a-f]{0,16}/0x[0-7][0-9a-f]{1,15}/" $TMPDIR/perf.script; then
+ echo "Testing kernel branch sampling [Failed user branches found]"
+ err=1
+ fi
+ if [ $err -eq 0 ]; then
+ echo "Testing kernel branch sampling [Passed]"
+ err=$start_err
else
- perf record -o $TMPDIR/perf.data --branch-filter any,k -- \
- perf bench syscall basic --loop 1000
- perf script -i $TMPDIR/perf.data --fields brstack | \
- tr ' ' '\n' > $TMPDIR/perf.script
-
- # Example of branch entries:
- # "0xffffffff93bda241/0xffffffff93bda20f/M/-/-/..."
- # Source addresses come first and target address can be either
- # userspace or kernel even with k option, as long as the source
- # is in kernel.
-
- #Look for source addresses with top bit set
- if ! grep -E -m1 "^0x[89a-f][0-9a-f]{15}" $TMPDIR/perf.script; then
- echo "ERROR: Kernel branches missing"
- err=1
- fi
- # Look for no source addresses without top bit set
- if grep -E -m1 "^0x[0-7][0-9a-f]{0,15}" $TMPDIR/perf.script; then
- echo "ERROR: User branches found with kernel filter"
- err=1
- fi
+ echo "Testing kernel branch sampling [Failed]"
fi
}
@@ -136,14 +171,15 @@ test_filter() {
test_filter_expect=$2
echo "Testing branch stack filtering permutation ($test_filter_filter,$test_filter_expect)"
- perf record -o "$TMPDIR/perf.data" --branch-filter "$test_filter_filter,save_type,u" -- ${TESTPROG} > "$TMPDIR/record.txt" 2>&1
+ perf record -o "$TMPDIR/perf.data" --branch-filter "$test_filter_filter,save_type,u" -- \
+ ${TESTPROG} > "$TMPDIR/record.txt" 2>&1
perf script -i "$TMPDIR/perf.data" --fields brstack > "$TMPDIR/perf.script"
# fail if we find any branch type that doesn't match any of the expected ones
# also consider UNKNOWN branch types (-)
if [ ! -s "$TMPDIR/perf.script" ]
then
- echo "Empty script output"
+ echo "Testing branch stack filtering [Failed empty script output]"
err=1
return
fi
@@ -154,26 +190,36 @@ test_filter() {
> "$TMPDIR/perf.script-filtered" || true
if [ -s "$TMPDIR/perf.script-filtered" ]
then
- echo "Unexpected branch filter in script output"
+ echo "Testing branch stack filtering [Failed unexpected branch filter]"
cat "$TMPDIR/perf.script"
err=1
return
fi
+ echo "Testing branch stack filtering [Passed]"
}
test_syscall() {
echo "Testing syscalls"
# skip if perf doesn't have enough privileges
- if ! perf record --branch-filter any,k -o- -- true > /dev/null; then
- echo "skip: not enough privileges"
+ if ! perf record --branch-filter any,k -o- -- true > "$TMPDIR/record.txt" 2>&1; then
+ echo "Testing syscalls [Skipped: not enough privileges]"
+ return
+ fi
+ start_err=$err
+ err=0
+ perf record -o $TMPDIR/perf.data --branch-filter \
+ any_call,save_type,u,k -c 10007 -- \
+ perf bench syscall basic --loop 8000 > "$TMPDIR/record.txt" 2>&1
+ perf script -i $TMPDIR/perf.data --fields brstacksym | \
+ tr ' ' '\n' > $TMPDIR/perf.script
+
+ check_branches "getppid[^ ]*/SYSCALL/"
+
+ if [ $err -eq 0 ]; then
+ echo "Testing syscalls [Passed]"
+ err=$start_err
else
- perf record -o $TMPDIR/perf.data --branch-filter \
- any_call,save_type,u,k -c 10000 -- \
- perf bench syscall basic --loop 1000
- perf script -i $TMPDIR/perf.data --fields brstacksym | \
- tr ' ' '\n' > $TMPDIR/perf.script
-
- check_branches "getppid[^ ]*/SYSCALL/"
+ echo "Testing syscalls [Failed]"
fi
}
set -e
diff --git a/tools/perf/tests/shell/test_task_analyzer.sh b/tools/perf/tests/shell/test_task_analyzer.sh
index e194fcf61df3..0314412e63b4 100755
--- a/tools/perf/tests/shell/test_task_analyzer.sh
+++ b/tools/perf/tests/shell/test_task_analyzer.sh
@@ -3,6 +3,11 @@
# SPDX-License-Identifier: GPL-2.0
tmpdir=$(mktemp -d /tmp/perf-script-task-analyzer-XXXXX)
+# TODO: perf script report only supports input from the CWD perf.data file, make
+# it support input from any file.
+perfdata="perf.data"
+csv="$tmpdir/csv"
+csvsummary="$tmpdir/csvsummary"
err=0
# set PERF_EXEC_PATH to find scripts in the source directory
@@ -15,11 +20,10 @@ fi
export ASAN_OPTIONS=detect_leaks=0
cleanup() {
- rm -f perf.data
- rm -f perf.data.old
- rm -f csv
- rm -f csvsummary
+ rm -f "${perfdata}"
+ rm -f "${perfdata}".old
rm -rf "$tmpdir"
+
trap - exit term int
}
@@ -61,10 +65,10 @@ skip_no_probe_record_support() {
prepare_perf_data() {
# 1s should be sufficient to catch at least some switches
- perf record -e sched:sched_switch -a -- sleep 1 > /dev/null 2>&1
+ perf record -e sched:sched_switch -a -o "${perfdata}" -- sleep 1 > /dev/null 2>&1
# check if perf data file got created in above step.
- if [ ! -e "perf.data" ]; then
- printf "FAIL: perf record failed to create \"perf.data\" \n"
+ if [ ! -e "${perfdata}" ]; then
+ printf "FAIL: perf record failed to create \"${perfdata}\" \n"
return 1
fi
}
@@ -130,28 +134,28 @@ test_extended_times_summary_ns() {
}
test_csv() {
- perf script report task-analyzer --csv csv > /dev/null
- check_exec_0 "perf script report task-analyzer --csv csv"
- find_str_or_fail "Comm;" csv "${FUNCNAME[0]}"
+ perf script report task-analyzer --csv "${csv}" > /dev/null
+ check_exec_0 "perf script report task-analyzer --csv ${csv}"
+ find_str_or_fail "Comm;" "${csv}" "${FUNCNAME[0]}"
}
test_csv_extended_times() {
- perf script report task-analyzer --csv csv --extended-times > /dev/null
- check_exec_0 "perf script report task-analyzer --csv csv --extended-times"
- find_str_or_fail "Out-Out;" csv "${FUNCNAME[0]}"
+ perf script report task-analyzer --csv "${csv}" --extended-times > /dev/null
+ check_exec_0 "perf script report task-analyzer --csv ${csv} --extended-times"
+ find_str_or_fail "Out-Out;" "${csv}" "${FUNCNAME[0]}"
}
test_csvsummary() {
- perf script report task-analyzer --csv-summary csvsummary > /dev/null
- check_exec_0 "perf script report task-analyzer --csv-summary csvsummary"
- find_str_or_fail "Comm;" csvsummary "${FUNCNAME[0]}"
+ perf script report task-analyzer --csv-summary "${csvsummary}" > /dev/null
+ check_exec_0 "perf script report task-analyzer --csv-summary ${csvsummary}"
+ find_str_or_fail "Comm;" "${csvsummary}" "${FUNCNAME[0]}"
}
test_csvsummary_extended() {
- perf script report task-analyzer --csv-summary csvsummary --summary-extended \
+ perf script report task-analyzer --csv-summary "${csvsummary}" --summary-extended \
>/dev/null
- check_exec_0 "perf script report task-analyzer --csv-summary csvsummary --summary-extended"
- find_str_or_fail "Out-Out;" csvsummary "${FUNCNAME[0]}"
+ check_exec_0 "perf script report task-analyzer --csv-summary ${csvsummary} --summary-extended"
+ find_str_or_fail "Out-Out;" "${csvsummary}" "${FUNCNAME[0]}"
}
skip_no_probe_record_support
diff --git a/tools/perf/tests/shell/trace_btf_general.sh b/tools/perf/tests/shell/trace_btf_general.sh
index ef2da806be6b..7a94a5743924 100755
--- a/tools/perf/tests/shell/trace_btf_general.sh
+++ b/tools/perf/tests/shell/trace_btf_general.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# perf trace BTF general tests
+# perf trace BTF general tests (exclusive)
# SPDX-License-Identifier: GPL-2.0
err=0
diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
index 15791fcb76b2..72a8289e846d 100644
--- a/tools/perf/tests/switch-tracking.c
+++ b/tools/perf/tests/switch-tracking.c
@@ -239,11 +239,13 @@ static int add_event(struct evlist *evlist, struct list_head *events,
if (!sample.time) {
pr_debug("event with no time\n");
+ perf_sample__exit(&sample);
return -1;
}
node->event_time = sample.time;
+ perf_sample__exit(&sample);
return 0;
}
diff --git a/tools/perf/tests/thread-map.c b/tools/perf/tests/thread-map.c
index 54209592168d..877868107455 100644
--- a/tools/perf/tests/thread-map.c
+++ b/tools/perf/tests/thread-map.c
@@ -9,7 +9,6 @@
#include "debug.h"
#include "event.h"
#include "util/synthetic-events.h"
-#include <linux/zalloc.h>
#include <perf/event.h>
#include <internal/threadmap.h>
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
index ec01150d208d..f54502ebef4b 100644
--- a/tools/perf/tests/topology.c
+++ b/tools/perf/tests/topology.c
@@ -9,6 +9,7 @@
#include "evlist.h"
#include "debug.h"
#include "pmus.h"
+#include "target.h"
#include <linux/err.h>
#define TEMPL "/tmp/perf-test-XXXXXX"
@@ -37,11 +38,12 @@ static int session_write_header(char *path)
.path = path,
.mode = PERF_DATA_MODE_WRITE,
};
+ struct target target = {};
session = perf_session__new(&data, NULL);
TEST_ASSERT_VAL("can't get session", !IS_ERR(session));
- session->evlist = evlist__new_default();
+ session->evlist = evlist__new_default(&target, /*sample_callchains=*/false);
TEST_ASSERT_VAL("can't get evlist", session->evlist);
session->evlist->session = session;
@@ -52,7 +54,8 @@ static int session_write_header(char *path)
session->header.data_size += DATA_SIZE;
TEST_ASSERT_VAL("failed to write header",
- !perf_session__write_header(session, session->evlist, data.file.fd, true));
+ !perf_session__write_header(session, session->evlist,
+ perf_data__fd(&data), true));
evlist__delete(session->evlist);
perf_session__delete(session);
@@ -67,7 +70,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
.path = path,
.mode = PERF_DATA_MODE_READ,
};
- int i;
+ unsigned int i;
struct aggr_cpu_id id;
struct perf_cpu cpu;
struct perf_env *env;
@@ -114,7 +117,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
TEST_ASSERT_VAL("Session header CPU map not set", env->cpu);
- for (i = 0; i < env->nr_cpus_avail; i++) {
+ for (i = 0; i < (unsigned int)env->nr_cpus_avail; i++) {
cpu.cpu = i;
if (!perf_cpu_map__has(map, cpu))
continue;
diff --git a/tools/perf/tests/workloads/datasym.c b/tools/perf/tests/workloads/datasym.c
index 1d0b7d64e1ba..19242c7255c0 100644
--- a/tools/perf/tests/workloads/datasym.c
+++ b/tools/perf/tests/workloads/datasym.c
@@ -4,14 +4,14 @@
#include <linux/compiler.h>
#include "../tests.h"
-typedef struct _buf {
+struct buf {
char data1;
char reserved[55];
char data2;
-} buf __attribute__((aligned(64)));
+} __attribute__((aligned(64)));
/* volatile to try to avoid the compiler seeing reserved as unused. */
-static volatile buf workload_datasym_buf1 = {
+static volatile struct buf workload_datasym_buf1 = {
/* to have this in the data section */
.reserved[0] = 1,
};
diff --git a/tools/perf/tests/wp.c b/tools/perf/tests/wp.c
index 6c178985e37f..69b31f00eed0 100644
--- a/tools/perf/tests/wp.c
+++ b/tools/perf/tests/wp.c
@@ -22,11 +22,11 @@ do { \
#ifdef __i386__
/* Only breakpoint length less-than 8 has hardware support on i386. */
-volatile u32 data1;
+static volatile u32 data1;
#else
-volatile u64 data1;
+static volatile u64 data1;
#endif
-volatile u8 data2[3];
+static volatile u8 data2[3];
#ifndef __s390x__
static int wp_read(int fd, long long *count, int size)
diff --git a/tools/perf/trace/beauty/drm_ioctl.sh b/tools/perf/trace/beauty/drm_ioctl.sh
index 9aa94fd523a9..f2f1a257bac8 100755
--- a/tools/perf/trace/beauty/drm_ioctl.sh
+++ b/tools/perf/trace/beauty/drm_ioctl.sh
@@ -1,7 +1,7 @@
#!/bin/sh
# SPDX-License-Identifier: LGPL-2.1
-[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/drm/
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/perf/trace/beauty/include/uapi/drm/
printf "#ifndef DRM_COMMAND_BASE\n"
grep "#define DRM_COMMAND_BASE" $header_dir/drm.h
diff --git a/tools/perf/trace/beauty/fadvise.sh b/tools/perf/trace/beauty/fadvise.sh
index 4d3dd6e56ded..e9857112fa51 100755
--- a/tools/perf/trace/beauty/fadvise.sh
+++ b/tools/perf/trace/beauty/fadvise.sh
@@ -1,7 +1,7 @@
#!/bin/sh
# SPDX-License-Identifier: LGPL-2.1
-[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/perf/trace/beauty/include/uapi/linux/
printf "static const char *fadvise_advices[] = {\n"
regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+POSIX_FADV_(\w+)[[:space:]]+([[:digit:]]+)[[:space:]]+.*'
diff --git a/tools/include/uapi/drm/drm.h b/tools/perf/trace/beauty/include/uapi/drm/drm.h
index 27cc159c1d27..27cc159c1d27 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/perf/trace/beauty/include/uapi/drm/drm.h
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/perf/trace/beauty/include/uapi/drm/i915_drm.h
index 535cb68fdb5c..535cb68fdb5c 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/perf/trace/beauty/include/uapi/drm/i915_drm.h
diff --git a/tools/include/uapi/linux/fadvise.h b/tools/perf/trace/beauty/include/uapi/linux/fadvise.h
index 0862b87434c2..0862b87434c2 100644
--- a/tools/include/uapi/linux/fadvise.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/fadvise.h
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index bcccad7487a9..70cc91d00804 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -149,7 +149,6 @@ endif
perf-util-y += cs-etm-base.o
perf-util-y += parse-branch-options.o
-perf-util-y += dump-insn.o
perf-util-y += parse-regs-options.o
perf-util-y += parse-sublevel-options.o
perf-util-y += term.o
@@ -171,7 +170,6 @@ perf-util-y += mutex.o
perf-util-y += sharded_mutex.o
perf-util-y += intel-tpebs.o
-perf-util-$(CONFIG_LIBBPF) += bpf_map.o
perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf_counter.o
perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf_counter_cgroup.o
perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf_ftrace.o
diff --git a/tools/perf/util/addr2line.c b/tools/perf/util/addr2line.c
index 31c0391fffa3..4b0d349ed334 100644
--- a/tools/perf/util/addr2line.c
+++ b/tools/perf/util/addr2line.c
@@ -18,9 +18,6 @@
#define MAX_INLINE_NEST 1024
-/* If addr2line doesn't return data for 5 seconds then timeout. */
-int addr2line_timeout_ms = 5 * 1000;
-
static int filename_split(char *filename, unsigned int *line_nr)
{
char *sep;
@@ -123,7 +120,7 @@ static enum cmd_a2l_style cmd_addr2line_configure(struct child_process *a2l, con
lines = 3;
pr_debug3("Detected binutils addr2line style\n");
} else {
- if (!symbol_conf.disable_add2line_warn) {
+ if (!symbol_conf.addr2line_disable_warn) {
char *output = NULL;
size_t output_len;
@@ -310,7 +307,7 @@ int cmd__addr2line(const char *dso_name, u64 addr,
}
if (a2l == NULL) {
- if (!symbol_conf.disable_add2line_warn)
+ if (!symbol_conf.addr2line_disable_warn)
pr_warning("%s %s: addr2line_subprocess_init failed\n", __func__, dso_name);
goto out;
}
@@ -330,16 +327,16 @@ int cmd__addr2line(const char *dso_name, u64 addr,
len = snprintf(buf, sizeof(buf), "%016"PRIx64"\n,\n", addr);
written = len > 0 ? write(a2l->in, buf, len) : -1;
if (written != len) {
- if (!symbol_conf.disable_add2line_warn)
+ if (!symbol_conf.addr2line_disable_warn)
pr_warning("%s %s: could not send request\n", __func__, dso_name);
goto out;
}
io__init(&io, a2l->out, buf, sizeof(buf));
- io.timeout_ms = addr2line_timeout_ms;
+ io.timeout_ms = symbol_conf.addr2line_timeout_ms;
switch (read_addr2line_record(&io, cmd_a2l_style, dso_name, addr, /*first=*/true,
&record_function, &record_filename, &record_line_nr)) {
case -1:
- if (!symbol_conf.disable_add2line_warn)
+ if (!symbol_conf.addr2line_disable_warn)
pr_warning("%s %s: could not read first record\n", __func__, dso_name);
goto out;
case 0:
@@ -355,7 +352,7 @@ int cmd__addr2line(const char *dso_name, u64 addr,
/*addr=*/1, /*first=*/true,
NULL, NULL, NULL)) {
case -1:
- if (!symbol_conf.disable_add2line_warn)
+ if (!symbol_conf.addr2line_disable_warn)
pr_warning("%s %s: could not read sentinel record\n",
__func__, dso_name);
break;
@@ -363,7 +360,7 @@ int cmd__addr2line(const char *dso_name, u64 addr,
/* The sentinel as expected. */
break;
default:
- if (!symbol_conf.disable_add2line_warn)
+ if (!symbol_conf.addr2line_disable_warn)
pr_warning("%s %s: unexpected record instead of sentinel",
__func__, dso_name);
break;
diff --git a/tools/perf/util/addr2line.h b/tools/perf/util/addr2line.h
index d35a47ba8dab..75989a92f16b 100644
--- a/tools/perf/util/addr2line.h
+++ b/tools/perf/util/addr2line.h
@@ -8,8 +8,6 @@ struct dso;
struct inline_node;
struct symbol;
-extern int addr2line_timeout_ms;
-
int cmd__addr2line(const char *dso_name, u64 addr,
char **file, unsigned int *line_nr,
struct dso *dso,
diff --git a/tools/perf/util/affinity.c b/tools/perf/util/affinity.c
index 4fe851334296..6c64b5f69a4e 100644
--- a/tools/perf/util/affinity.c
+++ b/tools/perf/util/affinity.c
@@ -90,7 +90,7 @@ void cpu_map__set_affinity(const struct perf_cpu_map *cpumap)
int cpu_set_size = get_cpu_set_size();
unsigned long *cpuset = bitmap_zalloc(cpu_set_size * 8);
struct perf_cpu cpu;
- int idx;
+ unsigned int idx;
if (!cpuset)
return;
diff --git a/tools/perf/util/annotate-arch/annotate-loongarch.c b/tools/perf/util/annotate-arch/annotate-loongarch.c
index 950f34e59e5c..c2addca77320 100644
--- a/tools/perf/util/annotate-arch/annotate-loongarch.c
+++ b/tools/perf/util/annotate-arch/annotate-loongarch.c
@@ -110,6 +110,7 @@ static int loongarch_jump__parse(const struct arch *arch, struct ins_operands *o
}
static const struct ins_ops loongarch_jump_ops = {
+ .free = jump__delete,
.parse = loongarch_jump__parse,
.scnprintf = jump__scnprintf,
.is_jump = true,
diff --git a/tools/perf/util/annotate-arch/annotate-x86.c b/tools/perf/util/annotate-arch/annotate-x86.c
index eb9a649ca656..7e6136536393 100644
--- a/tools/perf/util/annotate-arch/annotate-x86.c
+++ b/tools/perf/util/annotate-arch/annotate-x86.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include <linux/compiler.h>
+#include <linux/zalloc.h>
#include <assert.h>
#include <inttypes.h>
#include "../annotate-data.h"
@@ -204,6 +205,15 @@ static int x86__cpuid_parse(struct arch *arch, const char *cpuid)
}
#ifdef HAVE_LIBDW_SUPPORT
+static void invalidate_reg_state(struct type_state_reg *reg)
+{
+ reg->kind = TSR_KIND_INVALID;
+ reg->ok = false;
+ reg->lifetime_active = false;
+ reg->lifetime_end = 0;
+ reg->copied_from = -1;
+}
+
static void update_insn_state_x86(struct type_state *state,
struct data_loc_info *dloc, Dwarf_Die *cu_die,
struct disasm_line *dl)
@@ -222,24 +232,40 @@ static void update_insn_state_x86(struct type_state *state,
if (ins__is_call(&dl->ins)) {
struct symbol *func = dl->ops.target.sym;
+ const char *call_name;
+ u64 call_addr;
- if (func == NULL)
- return;
+ /* Try to resolve the call target name */
+ if (func)
+ call_name = func->name;
+ else
+ call_name = dl->ops.target.name;
/* __fentry__ will preserve all registers */
- if (!strcmp(func->name, "__fentry__"))
+ if (call_name && !strcmp(call_name, "__fentry__"))
return;
- pr_debug_dtp("call [%x] %s\n", insn_offset, func->name);
+ if (call_name)
+ pr_debug_dtp("call [%x] %s\n", insn_offset, call_name);
+ else
+ pr_debug_dtp("call [%x] <unknown>\n", insn_offset);
- /* Otherwise invalidate caller-saved registers after call */
+ /* Invalidate caller-saved registers after call */
+ call_addr = map__rip_2objdump(dloc->ms->map,
+ dloc->ms->sym->start + dl->al.offset);
for (unsigned i = 0; i < ARRAY_SIZE(state->regs); i++) {
- if (state->regs[i].caller_saved)
- state->regs[i].ok = false;
+ struct type_state_reg *reg = &state->regs[i];
+
+ if (!reg->caller_saved)
+ continue;
+ /* Keep register valid within DWARF location lifetime */
+ if (reg->lifetime_active && call_addr < reg->lifetime_end)
+ continue;
+ invalidate_reg_state(reg);
}
/* Update register with the return type (if any) */
- if (die_find_func_rettype(cu_die, func->name, &type_die)) {
+ if (call_name && die_find_func_rettype(cu_die, call_name, &type_die)) {
tsr = &state->regs[state->ret_reg];
tsr->type = type_die;
tsr->kind = TSR_KIND_TYPE;
@@ -265,6 +291,8 @@ static void update_insn_state_x86(struct type_state *state,
tsr = &state->regs[dst->reg1];
tsr->copied_from = -1;
+ tsr->lifetime_active = false;
+ tsr->lifetime_end = 0;
if (src->imm)
imm_value = src->offset;
@@ -330,6 +358,8 @@ static void update_insn_state_x86(struct type_state *state,
tsr = &state->regs[dst->reg1];
tsr->copied_from = -1;
+ tsr->lifetime_active = false;
+ tsr->lifetime_end = 0;
if (src->imm)
imm_value = src->offset;
@@ -364,8 +394,7 @@ static void update_insn_state_x86(struct type_state *state,
src_tsr = state->regs[sreg];
tsr = &state->regs[dst->reg1];
- tsr->copied_from = -1;
- tsr->ok = false;
+ invalidate_reg_state(tsr);
/* Case 1: Based on stack pointer or frame pointer */
if (sreg == fbreg || sreg == state->stack_reg) {
@@ -433,8 +462,7 @@ static void update_insn_state_x86(struct type_state *state,
!strncmp(dl->ins.name, "inc", 3) || !strncmp(dl->ins.name, "dec", 3)) {
pr_debug_dtp("%s [%x] invalidate reg%d\n",
dl->ins.name, insn_offset, dst->reg1);
- state->regs[dst->reg1].ok = false;
- state->regs[dst->reg1].copied_from = -1;
+ invalidate_reg_state(&state->regs[dst->reg1]);
return;
}
@@ -446,6 +474,8 @@ static void update_insn_state_x86(struct type_state *state,
state->regs[dst->reg1].kind = TSR_KIND_CONST;
state->regs[dst->reg1].imm_value = 0;
state->regs[dst->reg1].ok = true;
+ state->regs[dst->reg1].lifetime_active = false;
+ state->regs[dst->reg1].lifetime_end = 0;
state->regs[dst->reg1].copied_from = -1;
return;
}
@@ -496,7 +526,7 @@ static void update_insn_state_x86(struct type_state *state,
if (!get_global_var_type(cu_die, dloc, ip, var_addr,
&offset, &type_die) ||
!die_get_member_type(&type_die, offset, &type_die)) {
- tsr->ok = false;
+ invalidate_reg_state(tsr);
return;
}
@@ -524,7 +554,7 @@ static void update_insn_state_x86(struct type_state *state,
if (!has_reg_type(state, src->reg1) ||
!state->regs[src->reg1].ok) {
- tsr->ok = false;
+ invalidate_reg_state(tsr);
return;
}
@@ -532,6 +562,8 @@ static void update_insn_state_x86(struct type_state *state,
tsr->kind = state->regs[src->reg1].kind;
tsr->imm_value = state->regs[src->reg1].imm_value;
tsr->offset = state->regs[src->reg1].offset;
+ tsr->lifetime_active = state->regs[src->reg1].lifetime_active;
+ tsr->lifetime_end = state->regs[src->reg1].lifetime_end;
tsr->ok = true;
/* To copy back the variable type later (hopefully) */
@@ -560,7 +592,7 @@ retry:
stack = find_stack_state(state, offset);
if (stack == NULL) {
- tsr->ok = false;
+ invalidate_reg_state(tsr);
return;
} else if (!stack->compound) {
tsr->type = stack->type;
@@ -575,7 +607,7 @@ retry:
tsr->offset = 0;
tsr->ok = true;
} else {
- tsr->ok = false;
+ invalidate_reg_state(tsr);
return;
}
@@ -628,7 +660,7 @@ retry:
if (!get_global_var_type(cu_die, dloc, ip, addr, &offset,
&type_die) ||
!die_get_member_type(&type_die, offset, &type_die)) {
- tsr->ok = false;
+ invalidate_reg_state(tsr);
return;
}
@@ -679,7 +711,7 @@ retry:
}
pr_debug_type_name(&tsr->type, tsr->kind);
} else {
- tsr->ok = false;
+ invalidate_reg_state(tsr);
}
}
/* And then dereference the calculated pointer if it has one */
@@ -721,7 +753,7 @@ retry:
}
}
- tsr->ok = false;
+ invalidate_reg_state(tsr);
}
}
/* Case 3. register to memory transfers */
diff --git a/tools/perf/util/annotate-data.c b/tools/perf/util/annotate-data.c
index 44fbd41e3845..1eff0a27237d 100644
--- a/tools/perf/util/annotate-data.c
+++ b/tools/perf/util/annotate-data.c
@@ -455,13 +455,6 @@ static const char *match_result_str(enum type_match_result tmr)
}
}
-static bool is_pointer_type(Dwarf_Die *type_die)
-{
- int tag = dwarf_tag(type_die);
-
- return tag == DW_TAG_pointer_type || tag == DW_TAG_array_type;
-}
-
static bool is_compound_type(Dwarf_Die *type_die)
{
int tag = dwarf_tag(type_die);
@@ -474,19 +467,24 @@ static bool is_better_type(Dwarf_Die *type_a, Dwarf_Die *type_b)
{
Dwarf_Word size_a, size_b;
Dwarf_Die die_a, die_b;
+ Dwarf_Die ptr_a, ptr_b;
+ Dwarf_Die *ptr_type_a, *ptr_type_b;
+
+ ptr_type_a = die_get_pointer_type(type_a, &ptr_a);
+ ptr_type_b = die_get_pointer_type(type_b, &ptr_b);
/* pointer type is preferred */
- if (is_pointer_type(type_a) != is_pointer_type(type_b))
- return is_pointer_type(type_b);
+ if ((ptr_type_a != NULL) != (ptr_type_b != NULL))
+ return ptr_type_b != NULL;
- if (is_pointer_type(type_b)) {
+ if (ptr_type_b) {
/*
* We want to compare the target type, but 'void *' can fail to
* get the target type.
*/
- if (die_get_real_type(type_a, &die_a) == NULL)
+ if (die_get_real_type(ptr_type_a, &die_a) == NULL)
return true;
- if (die_get_real_type(type_b, &die_b) == NULL)
+ if (die_get_real_type(ptr_type_b, &die_b) == NULL)
return false;
type_a = &die_a;
@@ -539,7 +537,7 @@ static enum type_match_result check_variable(struct data_loc_info *dloc,
* and local variables are accessed directly without a pointer.
*/
if (needs_pointer) {
- if (!is_pointer_type(type_die) ||
+ if (die_get_pointer_type(type_die, type_die) == NULL ||
__die_get_real_type(type_die, type_die) == NULL)
return PERF_TMR_NO_POINTER;
}
@@ -776,12 +774,7 @@ static void global_var__collect(struct data_loc_info *dloc)
if (!dwarf_offdie(dwarf, pos->die_off, &type_die))
continue;
- if (!get_global_var_info(dloc, pos->addr, &var_name,
- &var_offset))
- continue;
-
- if (var_offset != 0)
- continue;
+ get_global_var_info(dloc, pos->addr, &var_name, &var_offset);
global_var__add(dloc, pos->addr, var_name, &type_die);
}
@@ -816,9 +809,8 @@ bool get_global_var_type(Dwarf_Die *cu_die, struct data_loc_info *dloc,
}
/* Try to get the variable by address first */
- if (die_find_variable_by_addr(cu_die, var_addr, &var_die, &offset) &&
- check_variable(dloc, &var_die, type_die, DWARF_REG_PC, offset,
- /*is_fbreg=*/false) == PERF_TMR_OK) {
+ if (die_find_variable_by_addr(cu_die, var_addr, &var_die, type_die,
+ &offset)) {
var_name = dwarf_diename(&var_die);
*var_offset = offset;
goto ok;
@@ -848,6 +840,18 @@ static bool die_is_same(Dwarf_Die *die_a, Dwarf_Die *die_b)
return (die_a->cu == die_b->cu) && (die_a->addr == die_b->addr);
}
+static void tsr_set_lifetime(struct type_state_reg *tsr,
+ const struct die_var_type *var)
+{
+ if (var && var->has_range && var->end > var->addr) {
+ tsr->lifetime_active = true;
+ tsr->lifetime_end = var->end;
+ } else {
+ tsr->lifetime_active = false;
+ tsr->lifetime_end = 0;
+ }
+}
+
/**
* update_var_state - Update type state using given variables
* @state: type state table
@@ -873,19 +877,29 @@ static void update_var_state(struct type_state *state, struct data_loc_info *dlo
}
for (var = var_types; var != NULL; var = var->next) {
- if (var->addr != addr)
- continue;
+ /* Check if addr falls within the variable's valid range */
+ if (var->has_range) {
+ if (addr < var->addr || (var->end && addr >= var->end))
+ continue;
+ } else {
+ if (addr != var->addr)
+ continue;
+ }
/* Get the type DIE using the offset */
if (!dwarf_offdie(dloc->di->dbg, var->die_off, &mem_die))
continue;
if (var->reg == DWARF_REG_FB || var->reg == fbreg || var->reg == state->stack_reg) {
+ Dwarf_Die ptr_die;
+ Dwarf_Die *ptr_type;
int offset = var->offset;
struct type_state_stack *stack;
+ ptr_type = die_get_pointer_type(&mem_die, &ptr_die);
+
/* If the reg location holds the pointer value, dereference the type */
- if (!var->is_reg_var_addr && is_pointer_type(&mem_die) &&
- __die_get_real_type(&mem_die, &mem_die) == NULL)
+ if (!var->is_reg_var_addr && ptr_type &&
+ __die_get_real_type(ptr_type, &mem_die) == NULL)
continue;
if (var->reg != DWARF_REG_FB)
@@ -927,6 +941,7 @@ static void update_var_state(struct type_state *state, struct data_loc_info *dlo
reg->type = mem_die;
reg->kind = TSR_KIND_POINTER;
reg->ok = true;
+ tsr_set_lifetime(reg, var);
pr_debug_dtp("var [%"PRIx64"] reg%d addr offset %x",
insn_offset, var->reg, var->offset);
@@ -943,6 +958,7 @@ static void update_var_state(struct type_state *state, struct data_loc_info *dlo
reg->type = mem_die;
reg->kind = TSR_KIND_TYPE;
reg->ok = true;
+ tsr_set_lifetime(reg, var);
pr_debug_dtp("var [%"PRIx64"] reg%d offset %x",
insn_offset, var->reg, var->offset);
@@ -1110,7 +1126,9 @@ again:
goto check_non_register;
if (state->regs[reg].kind == TSR_KIND_TYPE) {
+ Dwarf_Die ptr_die;
Dwarf_Die sized_type;
+ Dwarf_Die *ptr_type;
struct strbuf sb;
strbuf_init(&sb, 32);
@@ -1122,7 +1140,8 @@ again:
* Normal registers should hold a pointer (or array) to
* dereference a memory location.
*/
- if (!is_pointer_type(&state->regs[reg].type)) {
+ ptr_type = die_get_pointer_type(&state->regs[reg].type, &ptr_die);
+ if (!ptr_type) {
if (dloc->op->offset < 0 && reg != state->stack_reg)
goto check_kernel;
@@ -1130,7 +1149,7 @@ again:
}
/* Remove the pointer and get the target type */
- if (__die_get_real_type(&state->regs[reg].type, type_die) == NULL)
+ if (__die_get_real_type(ptr_type, type_die) == NULL)
return PERF_TMR_NO_POINTER;
dloc->type_offset = dloc->op->offset + state->regs[reg].offset;
@@ -1230,6 +1249,11 @@ again:
return PERF_TMR_BAIL_OUT;
}
+ if (state->regs[reg].kind == TSR_KIND_CONST &&
+ dso__kernel(map__dso(dloc->ms->map))) {
+ if (dloc->op->offset < 0 && reg != state->stack_reg && reg != dloc->fbreg)
+ goto check_kernel;
+ }
check_non_register:
if (reg == dloc->fbreg || reg == state->stack_reg) {
struct type_state_stack *stack;
@@ -1601,12 +1625,13 @@ retry:
if (reg == DWARF_REG_PC) {
if (!die_find_variable_by_addr(&scopes[i], dloc->var_addr,
- &var_die, &type_offset))
+ &var_die, &mem_die,
+ &type_offset))
continue;
} else {
/* Look up variables/parameters in this scope */
if (!die_find_variable_by_reg(&scopes[i], pc, reg,
- &type_offset, is_fbreg, &var_die))
+ &mem_die, &type_offset, is_fbreg, &var_die))
continue;
}
@@ -1614,26 +1639,22 @@ retry:
dwarf_diename(&var_die), (long)dwarf_dieoffset(&var_die),
i+1, nr_scopes, (long)dwarf_dieoffset(&scopes[i]));
- /* Found a variable, see if it's correct */
- result = check_variable(dloc, &var_die, &mem_die, reg, type_offset, is_fbreg);
- if (result == PERF_TMR_OK) {
- if (reg == DWARF_REG_PC) {
- pr_debug_dtp("addr=%#"PRIx64" type_offset=%#x\n",
- dloc->var_addr, type_offset);
- } else if (reg == DWARF_REG_FB || is_fbreg) {
- pr_debug_dtp("stack_offset=%#x type_offset=%#x\n",
- fb_offset, type_offset);
- } else {
- pr_debug_dtp("type_offset=%#x\n", type_offset);
- }
-
- if (!found || is_better_type(type_die, &mem_die)) {
- *type_die = mem_die;
- dloc->type_offset = type_offset;
- found = true;
- }
+ if (reg == DWARF_REG_PC) {
+ pr_debug_dtp("addr=%#"PRIx64" type_offset=%#x\n",
+ dloc->var_addr, type_offset);
+ } else if (reg == DWARF_REG_FB || is_fbreg) {
+ pr_debug_dtp("stack_offset=%#x type_offset=%#x\n",
+ fb_offset, type_offset);
} else {
- pr_debug_dtp("failed: %s\n", match_result_str(result));
+ pr_debug_dtp("type_offset=%#x\n", type_offset);
+ }
+
+ if (!found || dloc->type_offset < type_offset ||
+ (dloc->type_offset == type_offset &&
+ !is_better_type(&mem_die, type_die))) {
+ *type_die = mem_die;
+ dloc->type_offset = type_offset;
+ found = true;
}
pr_debug_location(&var_die, pc, reg);
diff --git a/tools/perf/util/annotate-data.h b/tools/perf/util/annotate-data.h
index 9b222869e42d..c26130744260 100644
--- a/tools/perf/util/annotate-data.h
+++ b/tools/perf/util/annotate-data.h
@@ -182,6 +182,9 @@ struct type_state_reg {
s32 offset;
bool ok;
bool caller_saved;
+ /* DWARF location range tracking for register lifetime */
+ bool lifetime_active;
+ u64 lifetime_end;
u8 kind;
u8 copied_from;
};
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 63f0ee9d4c03..e745f3034a0e 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -8,7 +8,6 @@
#include <errno.h>
#include <inttypes.h>
-#include <libgen.h>
#include <stdlib.h>
#include "util.h" // hex_width()
#include "ui/ui.h"
@@ -1245,7 +1244,7 @@ int hist_entry__annotate_printf(struct hist_entry *he, struct evsel *evsel)
if (opts->full_path)
d_filename = filename;
else
- d_filename = basename(filename);
+ d_filename = perf_basename(filename);
if (evsel__is_group_event(evsel)) {
evsel__group_desc(evsel, buf, sizeof(buf));
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index 70dd9bee47c7..e5835042acdf 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -353,12 +353,26 @@ static struct simd_flags arm_spe__synth_simd_flags(const struct arm_spe_record *
if (record->op & ARM_SPE_OP_SVE)
simd_flags.arch |= SIMD_OP_FLAGS_ARCH_SVE;
-
- if (record->type & ARM_SPE_SVE_PARTIAL_PRED)
- simd_flags.pred |= SIMD_OP_FLAGS_PRED_PARTIAL;
-
- if (record->type & ARM_SPE_SVE_EMPTY_PRED)
- simd_flags.pred |= SIMD_OP_FLAGS_PRED_EMPTY;
+ else if (record->op & ARM_SPE_OP_SME)
+ simd_flags.arch |= SIMD_OP_FLAGS_ARCH_SME;
+ else if (record->op & (ARM_SPE_OP_ASE | ARM_SPE_OP_SIMD_FP))
+ simd_flags.arch |= SIMD_OP_FLAGS_ARCH_ASE;
+
+ if (record->op & ARM_SPE_OP_SVE) {
+ if (!(record->op & ARM_SPE_OP_PRED))
+ simd_flags.pred = SIMD_OP_FLAGS_PRED_DISABLED;
+ else if (record->type & ARM_SPE_SVE_PARTIAL_PRED)
+ simd_flags.pred = SIMD_OP_FLAGS_PRED_PARTIAL;
+ else if (record->type & ARM_SPE_SVE_EMPTY_PRED)
+ simd_flags.pred = SIMD_OP_FLAGS_PRED_EMPTY;
+ else
+ simd_flags.pred = SIMD_OP_FLAGS_PRED_FULL;
+ } else {
+ if (record->type & ARM_SPE_SVE_PARTIAL_PRED)
+ simd_flags.pred = SIMD_OP_FLAGS_PRED_PARTIAL;
+ else if (record->type & ARM_SPE_SVE_EMPTY_PRED)
+ simd_flags.pred = SIMD_OP_FLAGS_PRED_EMPTY;
+ }
return simd_flags;
}
diff --git a/tools/perf/util/block-info.c b/tools/perf/util/block-info.c
index 649392bee7ed..8d3a9a661f26 100644
--- a/tools/perf/util/block-info.c
+++ b/tools/perf/util/block-info.c
@@ -303,7 +303,7 @@ static int block_range_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
char buf[128];
char *start_line, *end_line;
- symbol_conf.disable_add2line_warn = true;
+ symbol_conf.addr2line_disable_warn = true;
start_line = map__srcline(he->ms.map, bi->sym->start + bi->start,
he->ms.sym);
diff --git a/tools/perf/util/block-range.c b/tools/perf/util/block-range.c
index 15c42196c24c..7c559fcfd7e0 100644
--- a/tools/perf/util/block-range.c
+++ b/tools/perf/util/block-range.c
@@ -4,7 +4,7 @@
#include <assert.h>
#include <stdlib.h>
-struct {
+static struct {
struct rb_root root;
u64 blocks;
} block_ranges;
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 67e7786bb878..a27945c279ef 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -349,7 +349,7 @@ static struct bpf_metadata *bpf_metadata_alloc(__u32 nr_prog_tags,
if (!metadata)
return NULL;
- metadata->prog_names = zalloc(nr_prog_tags * sizeof(char *));
+ metadata->prog_names = calloc(nr_prog_tags, sizeof(char *));
if (!metadata->prog_names) {
bpf_metadata_free(metadata);
return NULL;
diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
index a5882b582205..34b6b0da18b7 100644
--- a/tools/perf/util/bpf_counter.c
+++ b/tools/perf/util/bpf_counter.c
@@ -294,7 +294,8 @@ static int bpf_program_profiler__read(struct evsel *evsel)
struct perf_counts_values *counts;
int reading_map_fd;
__u32 key = 0;
- int err, idx, bpf_cpu;
+ int err, bpf_cpu;
+ unsigned int idx;
if (list_empty(&evsel->bpf_counter_list))
return -EAGAIN;
@@ -318,11 +319,12 @@ static int bpf_program_profiler__read(struct evsel *evsel)
}
for (bpf_cpu = 0; bpf_cpu < num_cpu_bpf; bpf_cpu++) {
- idx = perf_cpu_map__idx(evsel__cpus(evsel),
- (struct perf_cpu){.cpu = bpf_cpu});
- if (idx == -1)
+ int i = perf_cpu_map__idx(evsel__cpus(evsel),
+ (struct perf_cpu){.cpu = bpf_cpu});
+
+ if (i == -1)
continue;
- counts = perf_counts(evsel->counts, idx, 0);
+ counts = perf_counts(evsel->counts, i, 0);
counts->val += values[bpf_cpu].counter;
counts->ena += values[bpf_cpu].enabled;
counts->run += values[bpf_cpu].running;
@@ -351,7 +353,7 @@ static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu_map_idx
return 0;
}
-struct bpf_counter_ops bpf_program_profiler_ops = {
+static struct bpf_counter_ops bpf_program_profiler_ops = {
.load = bpf_program_profiler__load,
.enable = bpf_program_profiler__enable,
.disable = bpf_program_profiler__disable,
@@ -668,7 +670,7 @@ static int bperf__install_pe(struct evsel *evsel, int cpu_map_idx, int fd)
static int bperf_sync_counters(struct evsel *evsel)
{
struct perf_cpu cpu;
- int idx;
+ unsigned int idx;
perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus)
bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu.cpu);
@@ -695,13 +697,11 @@ static int bperf__read(struct evsel *evsel)
struct bpf_perf_event_value values[num_cpu_bpf];
struct perf_counts_values *counts;
int reading_map_fd, err = 0;
- __u32 i;
- int j;
bperf_sync_counters(evsel);
reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
- for (i = 0; i < filter_entry_cnt; i++) {
+ for (__u32 i = 0; i < filter_entry_cnt; i++) {
struct perf_cpu entry;
__u32 cpu;
@@ -709,9 +709,10 @@ static int bperf__read(struct evsel *evsel)
if (err)
goto out;
switch (evsel->follower_skel->bss->type) {
- case BPERF_FILTER_GLOBAL:
- assert(i == 0);
+ case BPERF_FILTER_GLOBAL: {
+ unsigned int j;
+ assert(i == 0);
perf_cpu_map__for_each_cpu(entry, j, evsel__cpus(evsel)) {
counts = perf_counts(evsel->counts, j, 0);
counts->val = values[entry.cpu].counter;
@@ -719,6 +720,7 @@ static int bperf__read(struct evsel *evsel)
counts->run = values[entry.cpu].running;
}
break;
+ }
case BPERF_FILTER_CPU:
cpu = perf_cpu_map__cpu(evsel__cpus(evsel), i).cpu;
assert(cpu >= 0);
@@ -831,7 +833,7 @@ static int bperf__destroy(struct evsel *evsel)
* the leader prog.
*/
-struct bpf_counter_ops bperf_ops = {
+static struct bpf_counter_ops bperf_ops = {
.load = bperf__load,
.enable = bperf__enable,
.disable = bperf__disable,
diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c
index 17d7196c6589..519fee3dc3d0 100644
--- a/tools/perf/util/bpf_counter_cgroup.c
+++ b/tools/perf/util/bpf_counter_cgroup.c
@@ -11,7 +11,6 @@
#include <sys/time.h>
#include <sys/resource.h>
#include <linux/err.h>
-#include <linux/zalloc.h>
#include <linux/perf_event.h>
#include <api/fs/fs.h>
#include <bpf/bpf.h>
@@ -98,7 +97,7 @@ static int bperf_load_program(struct evlist *evlist)
struct bpf_link *link;
struct evsel *evsel;
struct cgroup *cgrp, *leader_cgrp;
- int i, j;
+ unsigned int i;
struct perf_cpu cpu;
int total_cpus = cpu__max_cpu().cpu;
int map_fd, prog_fd, err;
@@ -146,6 +145,8 @@ static int bperf_load_program(struct evlist *evlist)
evlist__for_each_entry(evlist, evsel) {
if (cgrp == NULL || evsel->cgrp == leader_cgrp) {
+ unsigned int j;
+
leader_cgrp = evsel->cgrp;
evsel->cgrp = NULL;
@@ -234,7 +235,7 @@ static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused,
static int bperf_cgrp__sync_counters(struct evlist *evlist)
{
struct perf_cpu cpu;
- int idx;
+ unsigned int idx;
int prog_fd = bpf_program__fd(skel->progs.trigger_read);
perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.all_cpus)
@@ -286,7 +287,7 @@ static int bperf_cgrp__read(struct evsel *evsel)
evlist__for_each_entry(evlist, evsel) {
__u32 idx = evsel->core.idx;
- int i;
+ unsigned int i;
struct perf_cpu cpu;
err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
diff --git a/tools/perf/util/bpf_kwork.c b/tools/perf/util/bpf_kwork.c
index 5cff755c71fa..d3a2e548f2b6 100644
--- a/tools/perf/util/bpf_kwork.c
+++ b/tools/perf/util/bpf_kwork.c
@@ -148,7 +148,8 @@ static bool valid_kwork_class_type(enum kwork_class_type type)
static int setup_filters(struct perf_kwork *kwork)
{
if (kwork->cpu_list != NULL) {
- int idx, nr_cpus;
+ unsigned int idx;
+ int nr_cpus;
struct perf_cpu_map *map;
struct perf_cpu cpu;
int fd = bpf_map__fd(skel->maps.perf_kwork_cpu_filter);
diff --git a/tools/perf/util/bpf_kwork_top.c b/tools/perf/util/bpf_kwork_top.c
index b6f187dd9136..189a29d2bc96 100644
--- a/tools/perf/util/bpf_kwork_top.c
+++ b/tools/perf/util/bpf_kwork_top.c
@@ -123,7 +123,8 @@ static bool valid_kwork_class_type(enum kwork_class_type type)
static int setup_filters(struct perf_kwork *kwork)
{
if (kwork->cpu_list) {
- int idx, nr_cpus, fd;
+ unsigned int idx;
+ int nr_cpus, fd;
struct perf_cpu_map *map;
struct perf_cpu cpu;
diff --git a/tools/perf/util/bpf_map.c b/tools/perf/util/bpf_map.c
deleted file mode 100644
index 442f91b4e8e1..000000000000
--- a/tools/perf/util/bpf_map.c
+++ /dev/null
@@ -1,70 +0,0 @@
-// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
-
-#include "util/bpf_map.h"
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <errno.h>
-#include <stdbool.h>
-#include <stdlib.h>
-#include <unistd.h>
-
-static bool bpf_map__is_per_cpu(enum bpf_map_type type)
-{
- return type == BPF_MAP_TYPE_PERCPU_HASH ||
- type == BPF_MAP_TYPE_PERCPU_ARRAY ||
- type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
- type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE;
-}
-
-static void *bpf_map__alloc_value(const struct bpf_map *map)
-{
- if (bpf_map__is_per_cpu(bpf_map__type(map)))
- return malloc(round_up(bpf_map__value_size(map), 8) *
- sysconf(_SC_NPROCESSORS_CONF));
-
- return malloc(bpf_map__value_size(map));
-}
-
-int bpf_map__fprintf(struct bpf_map *map, FILE *fp)
-{
- void *prev_key = NULL, *key, *value;
- int fd = bpf_map__fd(map), err;
- int printed = 0;
-
- if (fd < 0)
- return fd;
-
- err = -ENOMEM;
- key = malloc(bpf_map__key_size(map));
- if (key == NULL)
- goto out;
-
- value = bpf_map__alloc_value(map);
- if (value == NULL)
- goto out_free_key;
-
- while ((err = bpf_map_get_next_key(fd, prev_key, key) == 0)) {
- int intkey = *(int *)key;
-
- if (!bpf_map_lookup_elem(fd, key, value)) {
- bool boolval = *(bool *)value;
- if (boolval)
- printed += fprintf(fp, "[%d] = %d,\n", intkey, boolval);
- } else {
- printed += fprintf(fp, "[%d] = ERROR,\n", intkey);
- }
-
- prev_key = key;
- }
-
- if (err == ENOENT)
- err = printed;
-
- free(value);
-out_free_key:
- free(key);
-out:
- return err;
-}
diff --git a/tools/perf/util/bpf_map.h b/tools/perf/util/bpf_map.h
deleted file mode 100644
index c2f7c13cba23..000000000000
--- a/tools/perf/util/bpf_map.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
-#ifndef __PERF_BPF_MAP_H
-#define __PERF_BPF_MAP_H 1
-
-#include <stdio.h>
-struct bpf_map;
-
-#ifdef HAVE_LIBBPF_SUPPORT
-
-int bpf_map__fprintf(struct bpf_map *map, FILE *fp);
-
-#else
-
-#include <linux/compiler.h>
-
-static inline int bpf_map__fprintf(struct bpf_map *map __maybe_unused, FILE *fp __maybe_unused)
-{
- return 0;
-}
-
-#endif // HAVE_LIBBPF_SUPPORT
-
-#endif // __PERF_BPF_MAP_H
diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
index 88e0660c4bff..a3b699a5322f 100644
--- a/tools/perf/util/bpf_off_cpu.c
+++ b/tools/perf/util/bpf_off_cpu.c
@@ -39,7 +39,7 @@ union off_cpu_data {
u64 array[1024 / sizeof(u64)];
};
-u64 off_cpu_raw[MAX_STACKS + 5];
+static u64 off_cpu_raw[MAX_STACKS + 5];
static int off_cpu_config(struct evlist *evlist)
{
@@ -67,7 +67,7 @@ static void off_cpu_start(void *arg)
struct evlist *evlist = arg;
struct evsel *evsel;
struct perf_cpu pcpu;
- int i;
+ unsigned int i;
/* update task filter for the given workload */
if (skel->rodata->has_task && skel->rodata->uses_tgid &&
diff --git a/tools/perf/util/bpf_skel/syscall_summary.bpf.c b/tools/perf/util/bpf_skel/syscall_summary.bpf.c
index 1bcd066a5199..4172f3c9fc48 100644
--- a/tools/perf/util/bpf_skel/syscall_summary.bpf.c
+++ b/tools/perf/util/bpf_skel/syscall_summary.bpf.c
@@ -118,13 +118,11 @@ int sys_enter(u64 *ctx)
return 0;
}
-SEC("tp_btf/sys_exit")
-int sys_exit(u64 *ctx)
+static int do_exit(long ret)
{
int tid;
int key = 0;
u64 cgroup = 0;
- long ret = ctx[1]; /* return value of the syscall */
struct syscall_trace *st;
s64 delta;
@@ -150,4 +148,18 @@ int sys_exit(u64 *ctx)
return 0;
}
+SEC("tp_btf/sys_exit")
+int sys_exit(u64 *ctx)
+{
+ long ret = ctx[1]; /* return value of the syscall */
+
+ return do_exit(ret);
+}
+
+SEC("tp_btf/sched_process_exit")
+int process_exit(u64 *ctx)
+{
+ return do_exit(0);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/perf/util/bpf_trace_augment.c b/tools/perf/util/bpf_trace_augment.c
index 56ed17534caa..9e706f0fa53d 100644
--- a/tools/perf/util/bpf_trace_augment.c
+++ b/tools/perf/util/bpf_trace_augment.c
@@ -60,7 +60,7 @@ int augmented_syscalls__create_bpf_output(struct evlist *evlist)
void augmented_syscalls__setup_bpf_output(void)
{
struct perf_cpu cpu;
- int i;
+ unsigned int i;
if (bpf_output == NULL)
return;
diff --git a/tools/perf/util/branch.h b/tools/perf/util/branch.h
index 7429530fa774..a1d4736497c4 100644
--- a/tools/perf/util/branch.h
+++ b/tools/perf/util/branch.h
@@ -66,6 +66,9 @@ static inline struct branch_entry *perf_sample__branch_entries(struct perf_sampl
{
u64 *entry = (u64 *)sample->branch_stack;
+ if (entry == NULL)
+ return NULL;
+
entry++;
if (sample->no_hw_idx)
return (struct branch_entry *)entry;
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 8ff0898799ee..f031cbbeeba8 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -30,6 +30,7 @@
#include "map.h"
#include "callchain.h"
#include "branch.h"
+#include "record.h"
#include "symbol.h"
#include "thread.h"
#include "util.h"
@@ -170,7 +171,7 @@ static int get_stack_size(const char *str, unsigned long *_size)
static int
__parse_callchain_report_opt(const char *arg, bool allow_record_opt)
{
- char *tok;
+ char *tok, *arg_copy;
char *endptr, *saveptr = NULL;
bool minpcnt_set = false;
bool record_opt_set = false;
@@ -182,12 +183,17 @@ __parse_callchain_report_opt(const char *arg, bool allow_record_opt)
if (!arg)
return 0;
- while ((tok = strtok_r((char *)arg, ",", &saveptr)) != NULL) {
+ arg_copy = strdup(arg);
+ if (!arg_copy)
+ return -ENOMEM;
+
+ tok = strtok_r(arg_copy, ",", &saveptr);
+ while (tok) {
if (!strncmp(tok, "none", strlen(tok))) {
callchain_param.mode = CHAIN_NONE;
callchain_param.enabled = false;
symbol_conf.use_callchain = false;
- return 0;
+ goto out;
}
if (!parse_callchain_mode(tok) ||
@@ -214,30 +220,35 @@ try_numbers:
unsigned long size = 0;
if (get_stack_size(tok, &size) < 0)
- return -1;
+ goto err_out;
callchain_param.dump_size = size;
try_stack_size = false;
} else if (!minpcnt_set) {
/* try to get the min percent */
callchain_param.min_percent = strtod(tok, &endptr);
if (tok == endptr)
- return -1;
+ goto err_out;
minpcnt_set = true;
} else {
/* try print limit at last */
callchain_param.print_limit = strtoul(tok, &endptr, 0);
if (tok == endptr)
- return -1;
+ goto err_out;
}
next:
- arg = NULL;
+ tok = strtok_r(NULL, ",", &saveptr);
}
if (callchain_register_param(&callchain_param) < 0) {
pr_err("Can't register callchain params\n");
- return -1;
+ goto err_out;
}
+out:
+ free(arg_copy);
return 0;
+err_out:
+ free(arg_copy);
+ return -1;
}
int parse_callchain_report_opt(const char *arg)
@@ -257,14 +268,12 @@ int parse_callchain_record(const char *arg, struct callchain_param *param)
int ret = -1;
/* We need buffer that we know we can write to. */
- buf = malloc(strlen(arg) + 1);
+ buf = strdup(arg);
if (!buf)
return -ENOMEM;
- strcpy(buf, arg);
-
- tok = strtok_r((char *)buf, ",", &saveptr);
- name = tok ? : (char *)buf;
+ tok = strtok_r(buf, ",", &saveptr);
+ name = tok ? : buf;
do {
/* Framepointer style */
@@ -328,6 +337,44 @@ int parse_callchain_record(const char *arg, struct callchain_param *param)
return ret;
}
+static void callchain_debug(const struct callchain_param *callchain)
+{
+ static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
+
+ pr_debug("callchain: type %s\n", str[callchain->record_mode]);
+
+ if (callchain->record_mode == CALLCHAIN_DWARF)
+ pr_debug("callchain: stack dump size %d\n",
+ callchain->dump_size);
+}
+
+int record_opts__parse_callchain(struct record_opts *record,
+ struct callchain_param *callchain,
+ const char *arg, bool unset)
+{
+ int ret;
+
+ callchain->enabled = !unset;
+
+ /* --no-call-graph */
+ if (unset) {
+ callchain->record_mode = CALLCHAIN_NONE;
+ pr_debug("callchain: disabled\n");
+ return 0;
+ }
+
+ ret = parse_callchain_record_opt(arg, callchain);
+ if (!ret) {
+ /* Enable data address sampling for DWARF unwind. */
+ if (callchain->record_mode == CALLCHAIN_DWARF &&
+ !record->record_data_mmap_set)
+ record->record_data_mmap = true;
+ callchain_debug(callchain);
+ }
+
+ return ret;
+}
+
int perf_callchain_config(const char *var, const char *value)
{
char *endptr;
@@ -1854,16 +1901,19 @@ int sample__merge_deferred_callchain(struct perf_sample *sample_orig,
u64 nr_deferred = sample_callchain->callchain->nr;
struct ip_callchain *callchain;
+ if (sample_orig->merged_callchain) {
+ /* Already merged. */
+ return -EINVAL;
+ }
+
if (sample_orig->callchain->nr < 2) {
sample_orig->deferred_callchain = false;
return -EINVAL;
}
callchain = calloc(1 + nr_orig + nr_deferred, sizeof(u64));
- if (callchain == NULL) {
- sample_orig->deferred_callchain = false;
+ if (callchain == NULL)
return -ENOMEM;
- }
callchain->nr = nr_orig + nr_deferred;
/* copy original including PERF_CONTEXT_USER_DEFERRED (but the cookie) */
@@ -1872,6 +1922,7 @@ int sample__merge_deferred_callchain(struct perf_sample *sample_orig,
memcpy(&callchain->ips[nr_orig], sample_callchain->callchain->ips,
nr_deferred * sizeof(u64));
+ sample_orig->merged_callchain = true;
sample_orig->callchain = callchain;
return 0;
}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index df54ddb8c0cb..06d463ccc7a0 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -9,11 +9,13 @@
struct addr_location;
struct evsel;
+struct hist_entry;
+struct hists;
struct ip_callchain;
struct map;
struct perf_sample;
+struct record_opts;
struct thread;
-struct hists;
#define HELP_PAD "\t\t\t\t"
@@ -237,14 +239,6 @@ struct callchain_cursor *get_tls_callchain_cursor(void);
int callchain_cursor__copy(struct callchain_cursor *dst,
struct callchain_cursor *src);
-struct option;
-struct hist_entry;
-
-int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset);
-int record_callchain_opt(const struct option *opt, const char *arg, int unset);
-
-struct record_opts;
-
int record_opts__parse_callchain(struct record_opts *record,
struct callchain_param *callchain,
const char *arg, bool unset);
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index 040eb75f0804..1b5664d1481f 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -417,7 +417,6 @@ static bool has_pattern_string(const char *str)
int evlist__expand_cgroup(struct evlist *evlist, const char *str, bool open_cgroup)
{
struct evlist *orig_list, *tmp_list;
- struct evsel *pos, *evsel, *leader;
struct rblist orig_metric_events;
struct cgroup *cgrp = NULL;
struct cgroup_name *cn;
@@ -452,6 +451,7 @@ int evlist__expand_cgroup(struct evlist *evlist, const char *str, bool open_cgro
goto out_err;
list_for_each_entry(cn, &cgroup_list, list) {
+ struct evsel *pos;
char *name;
if (!cn->used)
@@ -467,21 +467,37 @@ int evlist__expand_cgroup(struct evlist *evlist, const char *str, bool open_cgro
if (cgrp == NULL)
continue;
- leader = NULL;
+ /* copy the list and set to the new cgroup. */
evlist__for_each_entry(orig_list, pos) {
- evsel = evsel__clone(/*dest=*/NULL, pos);
+ struct evsel *evsel = evsel__clone(/*dest=*/NULL, pos);
+
if (evsel == NULL)
goto out_err;
+ /* stash the copy during the copying. */
+ pos->priv = evsel;
cgroup__put(evsel->cgrp);
evsel->cgrp = cgroup__get(cgrp);
- if (evsel__is_group_leader(pos))
- leader = evsel;
- evsel__set_leader(evsel, leader);
-
evlist__add(tmp_list, evsel);
}
+ /* update leader information using stashed pointer to copy. */
+ evlist__for_each_entry(orig_list, pos) {
+ struct evsel *evsel = pos->priv;
+
+ if (evsel__leader(pos))
+ evsel__set_leader(evsel, evsel__leader(pos)->priv);
+
+ if (pos->metric_leader)
+ evsel->metric_leader = pos->metric_leader->priv;
+
+ if (pos->first_wildcard_match)
+ evsel->first_wildcard_match = pos->first_wildcard_match->priv;
+ }
+ /* the stashed copy is no longer used. */
+ evlist__for_each_entry(orig_list, pos)
+ pos->priv = NULL;
+
/* cgroup__new() has a refcount, release it here */
cgroup__put(cgrp);
nr_cgroups++;
diff --git a/tools/perf/util/clockid.h b/tools/perf/util/clockid.h
index 9b49b4711c76..33dbd8673c1c 100644
--- a/tools/perf/util/clockid.h
+++ b/tools/perf/util/clockid.h
@@ -1,8 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
-
#ifndef __PERF_CLOCKID_H
#define __PERF_CLOCKID_H
+#include <time.h>
+
struct option;
int parse_clockid(const struct option *opt, const char *str, int unset);
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 0452fbc6c085..087002fb1b9b 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -19,7 +19,6 @@
#include "util/hist.h" /* perf_hist_config */
#include "util/stat.h" /* perf_stat__set_big_num */
#include "util/evsel.h" /* evsel__hw_names, evsel__use_bpf_counters */
-#include "util/addr2line.h" /* addr2line_timeout_ms */
#include "srcline.h"
#include "build-id.h"
#include "debug.h"
@@ -459,7 +458,10 @@ static int perf_default_core_config(const char *var, const char *value)
proc_map_timeout = strtoul(value, NULL, 10);
if (!strcmp(var, "core.addr2line-timeout"))
- addr2line_timeout_ms = strtoul(value, NULL, 10);
+ symbol_conf.addr2line_timeout_ms = strtoul(value, NULL, 10);
+
+ if (!strcmp(var, "core.addr2line-disable-warn"))
+ symbol_conf.addr2line_disable_warn = perf_config_bool(var, value);
/* Add other config variables here. */
return 0;
diff --git a/tools/perf/util/cpu-set-sched.h b/tools/perf/util/cpu-set-sched.h
deleted file mode 100644
index 8cf4e40d322a..000000000000
--- a/tools/perf/util/cpu-set-sched.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-// Definitions taken from glibc for use with older systems, same licensing.
-#ifndef _CPU_SET_SCHED_PERF_H
-#define _CPU_SET_SCHED_PERF_H
-
-#include <features.h>
-#include <sched.h>
-
-#ifndef CPU_EQUAL
-#ifndef __CPU_EQUAL_S
-#if __GNUC_PREREQ (2, 91)
-# define __CPU_EQUAL_S(setsize, cpusetp1, cpusetp2) \
- (__builtin_memcmp (cpusetp1, cpusetp2, setsize) == 0)
-#else
-# define __CPU_EQUAL_S(setsize, cpusetp1, cpusetp2) \
- (__extension__ \
- ({ const __cpu_mask *__arr1 = (cpusetp1)->__bits; \
- const __cpu_mask *__arr2 = (cpusetp2)->__bits; \
- size_t __imax = (setsize) / sizeof (__cpu_mask); \
- size_t __i; \
- for (__i = 0; __i < __imax; ++__i) \
- if (__arr1[__i] != __arr2[__i]) \
- break; \
- __i == __imax; }))
-#endif
-#endif // __CPU_EQUAL_S
-
-#define CPU_EQUAL(cpusetp1, cpusetp2) \
- __CPU_EQUAL_S (sizeof (cpu_set_t), cpusetp1, cpusetp2)
-#endif // CPU_EQUAL
-
-#ifndef CPU_OR
-#ifndef __CPU_OP_S
-#define __CPU_OP_S(setsize, destset, srcset1, srcset2, op) \
- (__extension__ \
- ({ cpu_set_t *__dest = (destset); \
- const __cpu_mask *__arr1 = (srcset1)->__bits; \
- const __cpu_mask *__arr2 = (srcset2)->__bits; \
- size_t __imax = (setsize) / sizeof (__cpu_mask); \
- size_t __i; \
- for (__i = 0; __i < __imax; ++__i) \
- ((__cpu_mask *) __dest->__bits)[__i] = __arr1[__i] op __arr2[__i]; \
- __dest; }))
-#endif // __CPU_OP_S
-
-#define CPU_OR(destset, srcset1, srcset2) \
- __CPU_OP_S (sizeof (cpu_set_t), destset, srcset1, srcset2, |)
-#endif // CPU_OR
-
-#endif // _CPU_SET_SCHED_PERF_H
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index a80845038a5e..11922e1ded84 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -254,7 +254,7 @@ struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus,
aggr_cpu_id_get_t get_id,
void *data, bool needs_sort)
{
- int idx;
+ unsigned int idx;
struct perf_cpu cpu;
struct cpu_aggr_map *c = cpu_aggr_map__empty_new(perf_cpu_map__nr(cpus));
@@ -280,7 +280,7 @@ struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus,
}
}
/* Trim. */
- if (c->nr != perf_cpu_map__nr(cpus)) {
+ if (c->nr != (int)perf_cpu_map__nr(cpus)) {
struct cpu_aggr_map *trimmed_c =
realloc(c,
sizeof(struct cpu_aggr_map) + sizeof(struct aggr_cpu_id) * c->nr);
@@ -631,9 +631,9 @@ size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size)
#define COMMA first ? "" : ","
- for (i = 0; i < perf_cpu_map__nr(map) + 1; i++) {
+ for (i = 0; i < (int)perf_cpu_map__nr(map) + 1; i++) {
struct perf_cpu cpu = { .cpu = INT16_MAX };
- bool last = i == perf_cpu_map__nr(map);
+ bool last = i == (int)perf_cpu_map__nr(map);
if (!last)
cpu = perf_cpu_map__cpu(map, i);
@@ -679,7 +679,7 @@ static char hex_char(unsigned char val)
size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
{
- int idx;
+ unsigned int idx;
char *ptr = buf;
unsigned char *bitmap;
struct perf_cpu c, last_cpu = perf_cpu_map__max(map);
diff --git a/tools/perf/util/cputopo.c b/tools/perf/util/cputopo.c
index 8bbeb2dc76fd..e0091804fe98 100644
--- a/tools/perf/util/cputopo.c
+++ b/tools/perf/util/cputopo.c
@@ -191,7 +191,7 @@ bool cpu_topology__core_wide(const struct cpu_topology *topology,
const char *core_cpu_list = topology->core_cpus_list[i];
struct perf_cpu_map *core_cpus = perf_cpu_map__new(core_cpu_list);
struct perf_cpu cpu;
- int idx;
+ unsigned int idx;
bool has_first, first = true;
perf_cpu_map__for_each_cpu(cpu, idx, core_cpus) {
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index 212f17a3dc72..dee3020ceaa9 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -22,12 +22,15 @@
/* use raw logging */
#ifdef CS_DEBUG_RAW
#define CS_LOG_RAW_FRAMES
+#define CS_PKT_MON 1
#ifdef CS_RAW_PACKED
#define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT | \
OCSD_DFRMTR_PACKED_RAW_OUT)
#else
#define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT)
#endif
+#else
+#define CS_PKT_MON 0
#endif
/*
@@ -237,46 +240,24 @@ cs_etm_decoder__init_def_logger_printing(struct cs_etm_decoder_params *d_params,
(void *)decoder,
cs_etm_decoder__print_str_cb);
if (ret != 0)
- ret = -1;
-
- return 0;
-}
+ return -1;
#ifdef CS_LOG_RAW_FRAMES
-static void
-cs_etm_decoder__init_raw_frame_logging(struct cs_etm_decoder_params *d_params,
- struct cs_etm_decoder *decoder)
-{
- /* Only log these during a --dump operation */
- if (d_params->operation == CS_ETM_OPERATION_PRINT) {
- /* set up a library default logger to process the
- * raw frame printer we add later
- */
- ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
-
- /* no stdout / err / file output */
- ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
-
- /* set the string CB for the default logger,
- * passes strings to perf print logger.
- */
- ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
- (void *)decoder,
- cs_etm_decoder__print_str_cb);
-
+ /*
+ * Only log raw frames if --dump operation and hardware is actually
+ * generating formatted CoreSight trace frames
+ */
+ if ((d_params->operation == CS_ETM_OPERATION_PRINT) &&
+ (d_params->formatted == true)) {
/* use the built in library printer for the raw frames */
- ocsd_dt_set_raw_frame_printer(decoder->dcd_tree,
- CS_RAW_DEBUG_FLAGS);
+ ret = ocsd_dt_set_raw_frame_printer(decoder->dcd_tree,
+ CS_RAW_DEBUG_FLAGS);
+ if (ret != 0)
+ return -1;
}
-}
-#else
-static void
-cs_etm_decoder__init_raw_frame_logging(
- struct cs_etm_decoder_params *d_params __maybe_unused,
- struct cs_etm_decoder *decoder __maybe_unused)
-{
-}
#endif
+ return 0;
+}
static ocsd_datapath_resp_t
cs_etm_decoder__do_soft_timestamp(struct cs_etm_queue *etmq,
@@ -686,7 +667,7 @@ cs_etm_decoder__create_etm_decoder(struct cs_etm_decoder_params *d_params,
trace_config, &csid))
return -1;
- if (ocsd_dt_set_pkt_protocol_printer(decoder->dcd_tree, csid, 0))
+ if (ocsd_dt_set_pkt_protocol_printer(decoder->dcd_tree, csid, CS_PKT_MON))
return -1;
return 0;
@@ -738,9 +719,6 @@ cs_etm_decoder__new(int decoders, struct cs_etm_decoder_params *d_params,
if (ret != 0)
goto err_free_decoder;
- /* init raw frame logging if required */
- cs_etm_decoder__init_raw_frame_logging(d_params, decoder);
-
for (i = 0; i < decoders; i++) {
ret = cs_etm_decoder__create_etm_decoder(d_params,
&t_params[i],
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index ba1c8e48d495..3b8f2df823a9 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -1181,6 +1181,10 @@ static int add_event(struct ctf_writer *cw, struct evsel *evsel)
const char *name = evsel__name(evsel);
int ret;
+ if (evsel->priv) {
+ pr_err("Error: attempt to add already added event %s\n", name);
+ return -1;
+ }
pr("Adding event '%s' (type %d)\n", name, evsel->core.attr.type);
event_class = bt_ctf_event_class_create(name);
@@ -1223,13 +1227,28 @@ err:
return -1;
}
-static int setup_events(struct ctf_writer *cw, struct perf_session *session)
+enum setup_events_type {
+ SETUP_EVENTS_ALL,
+ SETUP_EVENTS_NOT_TRACEPOINT,
+ SETUP_EVENTS_TRACEPOINT_ONLY,
+};
+
+static int setup_events(struct ctf_writer *cw, struct perf_session *session,
+ enum setup_events_type type)
{
struct evlist *evlist = session->evlist;
struct evsel *evsel;
int ret;
evlist__for_each_entry(evlist, evsel) {
+ bool is_tracepoint = evsel->core.attr.type == PERF_TYPE_TRACEPOINT;
+
+ if (is_tracepoint && type == SETUP_EVENTS_NOT_TRACEPOINT)
+ continue;
+
+ if (!is_tracepoint && type == SETUP_EVENTS_TRACEPOINT_ONLY)
+ continue;
+
ret = add_event(cw, evsel);
if (ret)
return ret;
@@ -1360,7 +1379,7 @@ static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
*/
ncpus = env->nr_cpus_avail ?: MAX_CPUS;
- stream = zalloc(sizeof(*stream) * ncpus);
+ stream = calloc(ncpus, sizeof(*stream));
if (!stream) {
pr_err("Failed to allocate streams.\n");
return -ENOMEM;
@@ -1412,15 +1431,24 @@ static int process_feature_event(const struct perf_tool *tool,
struct convert *c = container_of(tool, struct convert, tool);
struct ctf_writer *cw = &c->writer;
struct perf_record_header_feature *fe = &event->feat;
+ int ret = perf_event__process_feature(tool, session, event);
- if (event->feat.feat_id < HEADER_LAST_FEATURE) {
- int ret = perf_event__process_feature(session, event);
-
- if (ret)
- return ret;
- }
+ if (ret)
+ return ret;
switch (fe->feat_id) {
+ case HEADER_EVENT_DESC:
+ /*
+ * In non-pipe mode (not here) the evsels combine the desc with
+ * the perf_event_attr when it is parsed. In pipe mode the
+ * perf_event_attr events appear first and then the event desc
+ * feature events that set the names appear after. Once we have
+ * the full evsel data we can generate the babeltrace
+ * events. For tracepoint events we still don't have the tracing
+ * data and so need to wait until the tracing data event to add
+ * those events to babeltrace.
+ */
+ return setup_events(cw, session, SETUP_EVENTS_NOT_TRACEPOINT);
case HEADER_HOSTNAME:
if (session->header.env.hostname) {
return bt_ctf_writer_add_environment_field(cw->writer, "host",
@@ -1451,6 +1479,26 @@ static int process_feature_event(const struct perf_tool *tool,
return 0;
}
+static int process_tracing_data(const struct perf_tool *tool,
+ struct perf_session *session,
+ union perf_event *event)
+{
+ struct convert *c = container_of(tool, struct convert, tool);
+ struct ctf_writer *cw = &c->writer;
+ int ret;
+
+ ret = perf_event__process_tracing_data(tool, session, event);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Now the attr was set up by the attr event, the name by the feature
+ * event desc event and the tracepoint data set up above, the tracepoint
+ * babeltrace events can be added.
+ */
+ return setup_events(cw, session, SETUP_EVENTS_TRACEPOINT_ONLY);
+}
+
static int ctf_writer__setup_clock(struct ctf_writer *cw,
struct perf_session *session,
bool tod)
@@ -1680,9 +1728,10 @@ int bt_convert__perf2ctf(const char *input, const char *path,
c.tool.exit = perf_event__process_exit;
c.tool.fork = perf_event__process_fork;
c.tool.lost = perf_event__process_lost;
- c.tool.tracing_data = perf_event__process_tracing_data;
+ c.tool.tracing_data = process_tracing_data;
c.tool.build_id = perf_event__process_build_id;
c.tool.namespaces = perf_event__process_namespaces;
+ c.tool.finished_round = perf_event__process_finished_round;
c.tool.attr = perf_event__process_attr;
c.tool.feature = process_feature_event;
c.tool.ordering_requires_timestamps = true;
@@ -1727,8 +1776,11 @@ int bt_convert__perf2ctf(const char *input, const char *path,
if (ctf_writer__setup_env(cw, session))
goto free_writer;
- /* CTF events setup */
- if (setup_events(cw, session))
+ /*
+ * CTF events setup. Note, in pipe mode no events exist yet (they come
+ * in via header feature events) and so this does nothing.
+ */
+ if (setup_events(cw, session, SETUP_EVENTS_ALL))
goto free_writer;
if (opts->all && setup_non_sample_events(cw, session))
diff --git a/tools/perf/util/data-convert-json.c b/tools/perf/util/data-convert-json.c
index 6a626322476a..d526c91312ed 100644
--- a/tools/perf/util/data-convert-json.c
+++ b/tools/perf/util/data-convert-json.c
@@ -326,17 +326,7 @@ static void output_headers(struct perf_session *session, struct convert_json *c)
output_json_format(out, false, 2, "]");
}
-static int process_feature_event(const struct perf_tool *tool __maybe_unused,
- struct perf_session *session,
- union perf_event *event)
-{
- if (event->feat.feat_id < HEADER_LAST_FEATURE)
- return perf_event__process_feature(session, event);
-
- return 0;
-}
-
-int bt_convert__perf2json(const char *input_name, const char *output_name,
+int bt_convert__perf2json(const char *_input_name, const char *output_name,
struct perf_data_convert_opts *opts __maybe_unused)
{
struct perf_session *session;
@@ -352,7 +342,7 @@ int bt_convert__perf2json(const char *input_name, const char *output_name,
};
struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
- .path = input_name,
+ .path = _input_name,
.force = opts->force,
};
@@ -375,7 +365,7 @@ int bt_convert__perf2json(const char *input_name, const char *output_name,
c.tool.auxtrace = perf_event__process_auxtrace;
c.tool.event_update = perf_event__process_event_update;
c.tool.attr = perf_event__process_attr;
- c.tool.feature = process_feature_event;
+ c.tool.feature = perf_event__process_feature;
c.tool.ordering_requires_timestamps = true;
if (opts->all) {
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index 90df41da1a32..94dc534a7386 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -20,18 +20,33 @@
#include "rlimit.h"
#include <internal/lib.h>
-static void close_dir(struct perf_data_file *files, int nr)
+static void perf_data_file__close(struct perf_data_file *file)
{
- while (--nr >= 0) {
- close(files[nr].fd);
- zfree(&files[nr].path);
+ if (file->use_stdio) {
+ if (file->fptr) {
+ fclose(file->fptr);
+ file->fptr = NULL;
+ }
+ } else {
+ close(file->fd);
+ file->fd = -1;
}
+ zfree(&file->path);
+}
+
+static void close_dir(struct perf_data_file *files, int nr)
+{
+ while (--nr >= 0)
+ perf_data_file__close(&files[nr]);
+
free(files);
}
void perf_data__close_dir(struct perf_data *data)
{
close_dir(data->dir.files, data->dir.nr);
+ data->dir.files = NULL;
+ data->dir.nr = 0;
}
int perf_data__create_dir(struct perf_data *data, int nr)
@@ -43,7 +58,7 @@ int perf_data__create_dir(struct perf_data *data, int nr)
if (WARN_ON(!data->is_dir))
return -EINVAL;
- files = zalloc(nr * sizeof(*files));
+ files = calloc(nr, sizeof(*files));
if (!files)
return -ENOMEM;
@@ -132,16 +147,21 @@ int perf_data__open_dir(struct perf_data *data)
files = file;
file = &files[nr++];
- file->path = strdup(path);
+ *file = (struct perf_data_file){
+ .path = strdup(path),
+ .fd = -1,
+ .size = st.st_size,
+ .use_stdio = false,
+ };
if (!file->path)
goto out_err;
ret = open(file->path, O_RDONLY);
- if (ret < 0)
+ if (ret < 0) {
+ ret = -errno;
goto out_err;
-
+ }
file->fd = ret;
- file->size = st.st_size;
}
closedir(dir);
@@ -174,7 +194,7 @@ static bool check_pipe(struct perf_data *data)
}
if (is_pipe) {
- if (data->use_stdio) {
+ if (data->file.use_stdio) {
const char *mode;
mode = perf_data__is_read(data) ? "r" : "w";
@@ -182,7 +202,7 @@ static bool check_pipe(struct perf_data *data)
if (data->file.fptr == NULL) {
data->file.fd = fd;
- data->use_stdio = false;
+ data->file.use_stdio = false;
}
/*
@@ -344,7 +364,7 @@ int perf_data__open(struct perf_data *data)
return 0;
/* currently it allows stdio for pipe only */
- data->use_stdio = false;
+ data->file.use_stdio = false;
if (!data->path)
data->path = "perf.data";
@@ -364,41 +384,57 @@ void perf_data__close(struct perf_data *data)
if (perf_data__is_dir(data))
perf_data__close_dir(data);
- zfree(&data->file.path);
-
- if (data->use_stdio)
- fclose(data->file.fptr);
- else
- close(data->file.fd);
+ perf_data_file__close(&data->file);
}
-ssize_t perf_data__read(struct perf_data *data, void *buf, size_t size)
+static ssize_t perf_data_file__read(struct perf_data_file *file, void *buf, size_t size)
{
- if (data->use_stdio) {
- if (fread(buf, size, 1, data->file.fptr) == 1)
+ if (file->use_stdio) {
+ if (fread(buf, size, 1, file->fptr) == 1)
return size;
- return feof(data->file.fptr) ? 0 : -1;
+ return feof(file->fptr) ? 0 : -1;
}
- return readn(data->file.fd, buf, size);
+ return readn(file->fd, buf, size);
+}
+
+ssize_t perf_data__read(struct perf_data *data, void *buf, size_t size)
+{
+ return perf_data_file__read(&data->file, buf, size);
}
ssize_t perf_data_file__write(struct perf_data_file *file,
void *buf, size_t size)
{
+ if (file->use_stdio) {
+ if (fwrite(buf, size, /*nmemb=*/1, file->fptr) == 1)
+ return size;
+ return -1;
+ }
return writen(file->fd, buf, size);
}
ssize_t perf_data__write(struct perf_data *data,
void *buf, size_t size)
{
- if (data->use_stdio) {
- if (fwrite(buf, size, 1, data->file.fptr) == 1)
- return size;
- return -1;
- }
return perf_data_file__write(&data->file, buf, size);
}
+off_t perf_data_file__seek(struct perf_data_file *file, off_t offset, int whence)
+{
+ if (file->use_stdio) {
+ off_t res = fseeko(file->fptr, offset, whence);
+
+ return res < 0 ? -1 : ftello(file->fptr);
+ }
+ return lseek(file->fd, offset, whence);
+}
+
+off_t perf_data__seek(struct perf_data *data, off_t offset, int whence)
+{
+ /* Note, a pipe fd will fail with -1 with errno of ESPIPE. */
+ return perf_data_file__seek(&data->file, offset, whence);
+}
+
int perf_data__switch(struct perf_data *data,
const char *postfix,
size_t pos, bool at_exit,
@@ -420,19 +456,18 @@ int perf_data__switch(struct perf_data *data,
pr_warning("Failed to rename %s to %s\n", data->path, *new_filepath);
if (!at_exit) {
- close(data->file.fd);
+ perf_data_file__close(&data->file);
ret = perf_data__open(data);
if (ret < 0)
goto out;
- if (lseek(data->file.fd, pos, SEEK_SET) == (off_t)-1) {
+ if (perf_data__seek(data, pos, SEEK_SET) == (off_t)-1) {
ret = -errno;
- pr_debug("Failed to lseek to %zu: %m\n",
- pos);
+ pr_debug("Failed to seek to %zu: %m", pos);
goto out;
}
}
- ret = data->file.fd;
+ ret = perf_data__fd(data);
out:
return ret;
}
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
index 1438e32e0451..8299fb5fa7da 100644
--- a/tools/perf/util/data.h
+++ b/tools/perf/util/data.h
@@ -17,32 +17,70 @@ enum perf_dir_version {
PERF_DIR_VERSION = 1,
};
+/**
+ * struct perf_data_file: A wrapper around a file used for perf.data reading or writing. Generally
+ * part of struct perf_data.
+ */
struct perf_data_file {
+ /**
+ * @path: Path of file. Generally a copy of perf_data.path but for a
+ * directory it is the file within the directory.
+ */
char *path;
union {
+ /** @fd: File descriptor for read/writes. Valid if use_stdio is false. */
int fd;
+ /**
+ * @fptr: Stdio FILE. Valid if use_stdio is true, currently just
+ * pipes in perf inject.
+ */
FILE *fptr;
};
+ /** @size: Size of file when opened. */
unsigned long size;
+ /** @use_stdio: Use buffered stdio operations. */
+ bool use_stdio;
};
+/**
+ * struct perf_data: A wrapper around a file used for perf.data reading or writing.
+ */
struct perf_data {
+ /** @path: Path to open and of the file. NULL implies 'perf.data' will be used. */
const char *path;
+ /** @file: Underlying file to be used. */
struct perf_data_file file;
+ /** @is_pipe: Underlying file is a pipe. */
bool is_pipe;
+ /** @is_dir: Underlying file is a directory. */
bool is_dir;
+ /** @force: Ignore opening a file creating created by a different user. */
bool force;
- bool use_stdio;
+ /** @in_place_update: A file opened for reading but will be written to. */
bool in_place_update;
+ /** @mode: Read or write mode. */
enum perf_data_mode mode;
struct {
+ /** @version: perf_dir_version. */
u64 version;
+ /** @files: perf data files for the directory. */
struct perf_data_file *files;
+ /** @nr: Number of perf data files for the directory. */
int nr;
} dir;
};
+static inline int perf_data_file__fd(struct perf_data_file *file)
+{
+ return file->use_stdio ? fileno(file->fptr) : file->fd;
+}
+
+ssize_t perf_data_file__write(struct perf_data_file *file,
+ void *buf, size_t size);
+off_t perf_data_file__seek(struct perf_data_file *file, off_t offset, int whence);
+
+
static inline bool perf_data__is_read(struct perf_data *data)
{
return data->mode == PERF_DATA_MODE_READ;
@@ -70,10 +108,7 @@ static inline bool perf_data__is_single_file(struct perf_data *data)
static inline int perf_data__fd(struct perf_data *data)
{
- if (data->use_stdio)
- return fileno(data->file.fptr);
-
- return data->file.fd;
+ return perf_data_file__fd(&data->file);
}
int perf_data__open(struct perf_data *data);
@@ -81,8 +116,7 @@ void perf_data__close(struct perf_data *data);
ssize_t perf_data__read(struct perf_data *data, void *buf, size_t size);
ssize_t perf_data__write(struct perf_data *data,
void *buf, size_t size);
-ssize_t perf_data_file__write(struct perf_data_file *file,
- void *buf, size_t size);
+off_t perf_data__seek(struct perf_data *data, off_t offset, int whence);
/*
* If at_exit is set, only rename current perf.data to
* perf.data.<postfix>, continue write on original data.
@@ -99,8 +133,10 @@ int perf_data__open_dir(struct perf_data *data);
void perf_data__close_dir(struct perf_data *data);
unsigned long perf_data__size(struct perf_data *data);
int perf_data__make_kcore_dir(struct perf_data *data, char *buf, size_t buf_sz);
-bool has_kcore_dir(const char *path);
char *perf_data__kallsyms_name(struct perf_data *data);
char *perf_data__guest_kallsyms_name(struct perf_data *data, pid_t machine_pid);
+
+bool has_kcore_dir(const char *path);
bool is_perf_data(const char *path);
+
#endif /* __PERF_DATA_H */
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index ae9a9065aab7..cc2bb1af4243 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -19,7 +19,6 @@
#include "callchain.h"
#include "call-path.h"
#include "db-export.h"
-#include <linux/zalloc.h>
int db_export__init(struct db_export *dbe)
{
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 1dfa4d0eec4d..6b5ffe81f141 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -48,7 +48,7 @@ int debug_ordered_events;
static int redirect_to_stderr;
int debug_data_convert;
static FILE *_debug_file;
-bool debug_display_time;
+static bool debug_display_time;
int debug_type_profile;
FILE *debug_file(void)
diff --git a/tools/perf/util/debuginfo.c b/tools/perf/util/debuginfo.c
index 4a559b3e8cdc..0e35c13abd04 100644
--- a/tools/perf/util/debuginfo.c
+++ b/tools/perf/util/debuginfo.c
@@ -88,18 +88,17 @@ static struct debuginfo *__debuginfo__new(const char *path)
return dbg;
}
-enum dso_binary_type distro_dwarf_types[] = {
- DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
- DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
- DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
- DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
- DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
- DSO_BINARY_TYPE__NOT_FOUND,
-};
-
struct debuginfo *debuginfo__new(const char *path)
{
- enum dso_binary_type *type;
+ static const enum dso_binary_type distro_dwarf_types[] = {
+ DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
+ DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
+ DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
+ DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
+ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
+ DSO_BINARY_TYPE__NOT_FOUND,
+ };
+ const enum dso_binary_type *type;
char buf[PATH_MAX], nil = '\0';
struct dso *dso;
struct debuginfo *dinfo = NULL;
diff --git a/tools/perf/util/disasm.c b/tools/perf/util/disasm.c
index 9e0420e14be1..59ba88e1f744 100644
--- a/tools/perf/util/disasm.c
+++ b/tools/perf/util/disasm.c
@@ -13,6 +13,7 @@
#include <unistd.h>
#include <linux/string.h>
+#include <linux/zalloc.h>
#include <subcmd/run-command.h>
#include "annotate.h"
@@ -451,7 +452,7 @@ int jump__scnprintf(const struct ins *ins, char *bf, size_t size,
ops->target.offset);
}
-static void jump__delete(struct ins_operands *ops __maybe_unused)
+void jump__delete(struct ins_operands *ops __maybe_unused)
{
/*
* The ops->jump.raw_comment and ops->jump.raw_func_start belong to the
@@ -908,13 +909,14 @@ static void annotation_line__init(struct annotation_line *al,
al->offset = args->offset;
al->line = strdup(args->line);
al->line_nr = args->line_nr;
- al->fileloc = args->fileloc;
+ al->fileloc = args->fileloc ? strdup(args->fileloc) : NULL;
al->data_nr = nr;
}
static void annotation_line__exit(struct annotation_line *al)
{
zfree_srcline(&al->path);
+ zfree(&al->fileloc);
zfree(&al->line);
zfree(&al->cycles);
zfree(&al->br_cntr);
@@ -950,7 +952,7 @@ struct disasm_line *disasm_line__new(struct annotate_args *args)
annotation_line__init(&dl->al, args, nr);
if (dl->al.line == NULL)
- goto out_delete;
+ goto out_free_line;
if (args->offset != -1) {
if (arch__is_powerpc(args->arch)) {
@@ -965,8 +967,7 @@ struct disasm_line *disasm_line__new(struct annotate_args *args)
return dl;
out_free_line:
- zfree(&dl->al.line);
-out_delete:
+ annotation_line__exit(&dl->al);
free(dl);
return NULL;
}
diff --git a/tools/perf/util/disasm.h b/tools/perf/util/disasm.h
index a6e478caf61a..25756e3f47e4 100644
--- a/tools/perf/util/disasm.h
+++ b/tools/perf/util/disasm.h
@@ -161,6 +161,8 @@ int jump__scnprintf(const struct ins *ins, char *bf, size_t size,
int mov__scnprintf(const struct ins *ins, char *bf, size_t size,
struct ins_operands *ops, int max_ins_name);
+void jump__delete(struct ins_operands *ops);
+
int symbol__disassemble(struct symbol *sym, struct annotate_args *args);
char *expand_tabs(char *line, char **storage, size_t *storage_len);
diff --git a/tools/perf/util/drm_pmu.c b/tools/perf/util/drm_pmu.c
index b48a375e4584..b8badae7015c 100644
--- a/tools/perf/util/drm_pmu.c
+++ b/tools/perf/util/drm_pmu.c
@@ -15,6 +15,7 @@
#include <unistd.h>
#include <linux/unistd.h>
#include <linux/kcmp.h>
+#include <linux/string.h>
#include <linux/zalloc.h>
#include <sys/stat.h>
#include <sys/syscall.h>
@@ -129,11 +130,6 @@ static struct drm_pmu *add_drm_pmu(struct list_head *pmus, char *line, size_t li
}
-static bool starts_with(const char *str, const char *prefix)
-{
- return !strncmp(prefix, str, strlen(prefix));
-}
-
static int add_event(struct drm_pmu_event **events, int *num_events,
const char *line, enum drm_pmu_unit unit, const char *desc)
{
@@ -174,7 +170,7 @@ static int read_drm_pmus_cb(void *args, int fdinfo_dir_fd, const char *fd_name)
}
while (io__getline(&io, &line, &line_len) > 0) {
- if (starts_with(line, "drm-driver:")) {
+ if (strstarts(line, "drm-driver:")) {
drm = add_drm_pmu(pmus, line, line_len);
if (!drm)
break;
@@ -184,59 +180,59 @@ static int read_drm_pmus_cb(void *args, int fdinfo_dir_fd, const char *fd_name)
* Note the string matching below is alphabetical, with more
* specific matches appearing before less specific.
*/
- if (starts_with(line, "drm-active-")) {
+ if (strstarts(line, "drm-active-")) {
add_event(&events, &num_events, line, DRM_PMU_UNIT_BYTES,
"Total memory active in one or more engines");
continue;
}
- if (starts_with(line, "drm-cycles-")) {
+ if (strstarts(line, "drm-cycles-")) {
add_event(&events, &num_events, line, DRM_PMU_UNIT_CYCLES,
"Busy cycles");
continue;
}
- if (starts_with(line, "drm-engine-capacity-")) {
+ if (strstarts(line, "drm-engine-capacity-")) {
add_event(&events, &num_events, line, DRM_PMU_UNIT_CAPACITY,
"Engine capacity");
continue;
}
- if (starts_with(line, "drm-engine-")) {
+ if (strstarts(line, "drm-engine-")) {
add_event(&events, &num_events, line, DRM_PMU_UNIT_NS,
"Utilization in ns");
continue;
}
- if (starts_with(line, "drm-maxfreq-")) {
+ if (strstarts(line, "drm-maxfreq-")) {
add_event(&events, &num_events, line, DRM_PMU_UNIT_HZ,
"Maximum frequency");
continue;
}
- if (starts_with(line, "drm-purgeable-")) {
+ if (strstarts(line, "drm-purgeable-")) {
add_event(&events, &num_events, line, DRM_PMU_UNIT_BYTES,
"Size of resident and purgeable memory buffers");
continue;
}
- if (starts_with(line, "drm-resident-")) {
+ if (strstarts(line, "drm-resident-")) {
add_event(&events, &num_events, line, DRM_PMU_UNIT_BYTES,
"Size of resident memory buffers");
continue;
}
- if (starts_with(line, "drm-shared-")) {
+ if (strstarts(line, "drm-shared-")) {
add_event(&events, &num_events, line, DRM_PMU_UNIT_BYTES,
"Size of shared memory buffers");
continue;
}
- if (starts_with(line, "drm-total-cycles-")) {
+ if (strstarts(line, "drm-total-cycles-")) {
add_event(&events, &num_events, line, DRM_PMU_UNIT_BYTES,
"Total busy cycles");
continue;
}
- if (starts_with(line, "drm-total-")) {
+ if (strstarts(line, "drm-total-")) {
add_event(&events, &num_events, line, DRM_PMU_UNIT_BYTES,
"Size of shared and private memory");
continue;
}
- if (verbose > 1 && starts_with(line, "drm-") &&
- !starts_with(line, "drm-client-id:") &&
- !starts_with(line, "drm-pdev:"))
+ if (verbose > 1 && strstarts(line, "drm-") &&
+ !strstarts(line, "drm-client-id:") &&
+ !strstarts(line, "drm-pdev:"))
pr_debug("Unhandled DRM PMU fdinfo line match '%s'\n", line);
}
if (drm) {
@@ -261,7 +257,7 @@ bool drm_pmu__have_event(const struct perf_pmu *pmu, const char *name)
{
struct drm_pmu *drm = container_of(pmu, struct drm_pmu, pmu);
- if (!starts_with(name, "drm-"))
+ if (!strstarts(name, "drm-"))
return false;
for (int i = 0; i < drm->num_events; i++) {
diff --git a/tools/perf/util/dsos.c b/tools/perf/util/dsos.c
index 0a7645c7fae7..e927e707abac 100644
--- a/tools/perf/util/dsos.c
+++ b/tools/perf/util/dsos.c
@@ -6,7 +6,6 @@
#include "vdso.h"
#include "namespaces.h"
#include <errno.h>
-#include <libgen.h>
#include <stdlib.h>
#include <string.h>
#include <symbol.h> // filename__read_build_id
@@ -196,6 +195,9 @@ static struct dso *__dsos__find_by_longname_id(struct dsos *dsos,
int __dsos__add(struct dsos *dsos, struct dso *dso)
{
+ if (!dso)
+ return -EINVAL;
+
if (dsos->cnt == dsos->allocated) {
unsigned int to_allocate = 2;
struct dso **temp;
@@ -294,34 +296,21 @@ struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
static void dso__set_basename(struct dso *dso)
{
- char *base, *lname;
+ bool allocated = false;
+ const char *base;
int tid;
if (perf_pid_map_tid(dso__long_name(dso), &tid)) {
- if (asprintf(&base, "[JIT] tid %d", tid) < 0)
- return;
- } else {
- /*
- * basename() may modify path buffer, so we must pass
- * a copy.
- */
- lname = strdup(dso__long_name(dso));
- if (!lname)
- return;
+ char *jitname;
- /*
- * basename() may return a pointer to internal
- * storage which is reused in subsequent calls
- * so copy the result.
- */
- base = strdup(basename(lname));
-
- free(lname);
-
- if (!base)
+ if (asprintf(&jitname, "[JIT] tid %d", tid) < 0)
return;
+ allocated = true;
+ base = jitname;
+ } else {
+ base = perf_basename(dso__long_name(dso));
}
- dso__set_short_name(dso, base, true);
+ dso__set_short_name(dso, base, allocated);
}
static struct dso *__dsos__addnew_id(struct dsos *dsos, const char *name, const struct dso_id *id)
diff --git a/tools/perf/util/dump-insn.c b/tools/perf/util/dump-insn.c
deleted file mode 100644
index c1cc0ade48d0..000000000000
--- a/tools/perf/util/dump-insn.c
+++ /dev/null
@@ -1,23 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/compiler.h>
-#include "dump-insn.h"
-
-/* Fallback code */
-
-__weak
-const char *dump_insn(struct perf_insn *x __maybe_unused,
- u64 ip __maybe_unused, u8 *inbuf __maybe_unused,
- int inlen __maybe_unused, int *lenp)
-{
- if (lenp)
- *lenp = 0;
- return "?";
-}
-
-__weak
-int arch_is_uncond_branch(const unsigned char *buf __maybe_unused,
- size_t len __maybe_unused,
- int x86_64 __maybe_unused)
-{
- return 0;
-}
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 9267af204c7d..92db2fccc788 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -303,6 +303,33 @@ Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
return vr_die;
}
+/**
+ * die_get_pointer_type - Get a pointer/array type die
+ * @type_die: a DIE of a type
+ * @die_mem: where to store a type DIE
+ *
+ * Get a pointer/array type DIE from @type_die. If the type is a typedef or
+ * qualifier (const, volatile, etc.), follow the chain to find the underlying
+ * pointer type.
+ */
+Dwarf_Die *die_get_pointer_type(Dwarf_Die *type_die, Dwarf_Die *die_mem)
+{
+ int tag;
+
+ do {
+ tag = dwarf_tag(type_die);
+ if (tag == DW_TAG_pointer_type || tag == DW_TAG_array_type)
+ return type_die;
+ if (tag != DW_TAG_typedef && tag != DW_TAG_const_type &&
+ tag != DW_TAG_restrict_type && tag != DW_TAG_volatile_type &&
+ tag != DW_TAG_shared_type)
+ return NULL;
+ type_die = die_get_type(type_die, die_mem);
+ } while (type_die);
+
+ return NULL;
+}
+
/* Get attribute and translate it as a udata */
static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name,
Dwarf_Word *result)
@@ -1378,6 +1405,8 @@ struct find_var_data {
Dwarf_Addr addr;
/* Target register */
unsigned reg;
+ /* Access data type */
+ Dwarf_Die type;
/* Access offset, set for global data */
int offset;
/* True if the current register is the frame base */
@@ -1390,29 +1419,31 @@ struct find_var_data {
static bool match_var_offset(Dwarf_Die *die_mem, struct find_var_data *data,
s64 addr_offset, s64 addr_type, bool is_pointer)
{
- Dwarf_Die type_die;
Dwarf_Word size;
+ Dwarf_Die ptr_die;
+ Dwarf_Die *ptr_type;
s64 offset = addr_offset - addr_type;
- if (offset == 0) {
- /* Update offset relative to the start of the variable */
- data->offset = 0;
- return true;
- }
-
if (offset < 0)
return false;
- if (die_get_real_type(die_mem, &type_die) == NULL)
+ if (__die_get_real_type(die_mem, &data->type) == NULL)
return false;
- if (is_pointer && dwarf_tag(&type_die) == DW_TAG_pointer_type) {
+ ptr_type = die_get_pointer_type(&data->type, &ptr_die);
+ if (is_pointer && ptr_type) {
/* Get the target type of the pointer */
- if (die_get_real_type(&type_die, &type_die) == NULL)
+ if (__die_get_real_type(ptr_type, &data->type) == NULL)
return false;
}
- if (dwarf_aggregate_size(&type_die, &size) < 0)
+ if (offset == 0) {
+ /* Update offset relative to the start of the variable */
+ data->offset = 0;
+ return true;
+ }
+
+ if (dwarf_aggregate_size(&data->type, &size) < 0)
return false;
if ((u64)offset >= size)
@@ -1529,7 +1560,7 @@ static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg)
* when the variable is in the stack.
*/
Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die, Dwarf_Addr pc, int reg,
- int *poffset, bool is_fbreg,
+ Dwarf_Die *type_die, int *poffset, bool is_fbreg,
Dwarf_Die *die_mem)
{
struct find_var_data data = {
@@ -1541,8 +1572,10 @@ Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die, Dwarf_Addr pc, int reg,
Dwarf_Die *result;
result = die_find_child(sc_die, __die_find_var_reg_cb, &data, die_mem);
- if (result)
+ if (result) {
*poffset = data.offset;
+ *type_die = data.type;
+ }
return result;
}
@@ -1586,7 +1619,8 @@ static int __die_find_var_addr_cb(Dwarf_Die *die_mem, void *arg)
* This is usually for global variables.
*/
Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr addr,
- Dwarf_Die *die_mem, int *offset)
+ Dwarf_Die *die_mem, Dwarf_Die *type_die,
+ int *offset)
{
struct find_var_data data = {
.addr = addr,
@@ -1594,8 +1628,10 @@ Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr addr,
Dwarf_Die *result;
result = die_find_child(sc_die, __die_find_var_addr_cb, &data, die_mem);
- if (result)
+ if (result) {
*offset = data.offset;
+ *type_die = data.type;
+ }
return result;
}
@@ -1605,10 +1641,11 @@ static int __die_collect_vars_cb(Dwarf_Die *die_mem, void *arg)
Dwarf_Die type_die;
int tag = dwarf_tag(die_mem);
Dwarf_Attribute attr;
- Dwarf_Addr base, start, end;
+ Dwarf_Addr base, start, end = 0;
Dwarf_Op *ops;
size_t nops;
struct die_var_type *vt;
+ ptrdiff_t off;
if (tag != DW_TAG_variable && tag != DW_TAG_formal_parameter)
return DIE_FIND_CB_SIBLING;
@@ -1616,39 +1653,40 @@ static int __die_collect_vars_cb(Dwarf_Die *die_mem, void *arg)
if (dwarf_attr(die_mem, DW_AT_location, &attr) == NULL)
return DIE_FIND_CB_SIBLING;
- /*
- * Only collect the first location as it can reconstruct the
- * remaining state by following the instructions.
- * start = 0 means it covers the whole range.
- */
- if (dwarf_getlocations(&attr, 0, &base, &start, &end, &ops, &nops) <= 0)
- return DIE_FIND_CB_SIBLING;
-
- if (!check_allowed_ops(ops, nops))
- return DIE_FIND_CB_SIBLING;
-
if (__die_get_real_type(die_mem, &type_die) == NULL)
return DIE_FIND_CB_SIBLING;
- vt = malloc(sizeof(*vt));
- if (vt == NULL)
- return DIE_FIND_CB_END;
-
- /* Usually a register holds the value of a variable */
- vt->is_reg_var_addr = false;
+ /*
+ * Collect all location entries as variables may have different
+ * locations across different address ranges.
+ */
+ off = 0;
+ while ((off = dwarf_getlocations(&attr, off, &base, &start, &end, &ops, &nops)) > 0) {
+ if (!check_allowed_ops(ops, nops))
+ continue;
- if (((ops->atom >= DW_OP_breg0 && ops->atom <= DW_OP_breg31) ||
- ops->atom == DW_OP_bregx || ops->atom == DW_OP_fbreg) &&
- !is_breg_access_indirect(ops, nops))
- /* The register contains an address of the variable. */
- vt->is_reg_var_addr = true;
+ vt = malloc(sizeof(*vt));
+ if (vt == NULL)
+ return DIE_FIND_CB_END;
- vt->die_off = dwarf_dieoffset(&type_die);
- vt->addr = start;
- vt->reg = reg_from_dwarf_op(ops);
- vt->offset = offset_from_dwarf_op(ops);
- vt->next = *var_types;
- *var_types = vt;
+ /* Usually a register holds the value of a variable */
+ vt->is_reg_var_addr = false;
+
+ if (((ops->atom >= DW_OP_breg0 && ops->atom <= DW_OP_breg31) ||
+ ops->atom == DW_OP_bregx || ops->atom == DW_OP_fbreg) &&
+ !is_breg_access_indirect(ops, nops))
+ /* The register contains an address of the variable. */
+ vt->is_reg_var_addr = true;
+
+ vt->die_off = dwarf_dieoffset(&type_die);
+ vt->addr = start;
+ vt->end = end;
+ vt->has_range = (end != 0 || start != 0);
+ vt->reg = reg_from_dwarf_op(ops);
+ vt->offset = offset_from_dwarf_op(ops);
+ vt->next = *var_types;
+ *var_types = vt;
+ }
return DIE_FIND_CB_SIBLING;
}
@@ -1707,6 +1745,8 @@ static int __die_collect_global_vars_cb(Dwarf_Die *die_mem, void *arg)
vt->die_off = dwarf_dieoffset(&type_die);
vt->addr = ops->number;
+ vt->end = 0;
+ vt->has_range = false;
vt->reg = -1;
vt->offset = 0;
vt->next = *var_types;
@@ -2091,13 +2131,28 @@ Dwarf_Die *die_get_member_type(Dwarf_Die *type_die, int offset,
tag = dwarf_tag(&mb_type);
- if (tag == DW_TAG_structure_type || tag == DW_TAG_union_type) {
+ if (tag == DW_TAG_structure_type || tag == DW_TAG_union_type ||
+ tag == DW_TAG_array_type) {
Dwarf_Word loc;
/* Update offset for the start of the member struct */
if (die_get_data_member_location(member, &loc) == 0)
offset -= loc;
}
+
+ /* Handle array types: resolve to the element type by one level */
+ if (tag == DW_TAG_array_type) {
+ Dwarf_Word size;
+
+ if (die_get_real_type(&mb_type, &mb_type) == NULL)
+ return NULL;
+
+ if (dwarf_aggregate_size(&mb_type, &size) < 0)
+ return NULL;
+
+ offset = offset % size;
+ tag = dwarf_tag(&mb_type);
+ }
}
*die_mem = mb_type;
return die_mem;
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index cd481ec9c5a1..a79968a2e573 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -60,6 +60,8 @@ Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem);
Dwarf_Die *__die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem);
/* Get a type die, but skip qualifiers and typedef */
Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem);
+/* Get a pointer/array type, following typedefs/qualifiers */
+Dwarf_Die *die_get_pointer_type(Dwarf_Die *type_die, Dwarf_Die *die_mem);
/* Check whether the DIE is signed or not */
bool die_is_signed_type(Dwarf_Die *tp_die);
@@ -146,10 +148,12 @@ struct die_var_type {
struct die_var_type *next;
u64 die_off;
u64 addr;
+ u64 end; /* end address of location range */
int reg;
int offset;
/* Whether the register holds a address to the type */
bool is_reg_var_addr;
+ bool has_range; /* whether end is valid */
};
/* Return type info of a member at offset */
@@ -163,12 +167,13 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf);
/* Find a variable saved in the 'reg' at given address */
Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die, Dwarf_Addr pc, int reg,
- int *poffset, bool is_fbreg,
+ Dwarf_Die *type_die, int *poffset, bool is_fbreg,
Dwarf_Die *die_mem);
/* Find a (global) variable located in the 'addr' */
Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr addr,
- Dwarf_Die *die_mem, int *offset);
+ Dwarf_Die *die_mem, Dwarf_Die *type_die,
+ int *offset);
/* Save all variables and parameters in this scope */
void die_collect_vars(Dwarf_Die *sc_die, struct die_var_type **var_types);
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 93d475a80f14..1e54e2c86360 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -718,7 +718,7 @@ int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu)
for (i = 0; i < env->nr_numa_nodes; i++) {
struct perf_cpu tmp;
- int j;
+ unsigned int j;
nn = &env->numa_nodes[i];
perf_cpu_map__for_each_cpu(tmp, j, nn->map)
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index a4501cbca375..c7052ac1f856 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -112,6 +112,7 @@ struct perf_env {
struct cpu_cache_level *caches;
struct cpu_domain_map **cpu_domain;
int caches_cnt;
+ unsigned int cln_size;
u32 comp_ratio;
u32 comp_ver;
u32 comp_type;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index bc045fddf7d5..66f4843bb235 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -12,7 +12,6 @@
#include <unistd.h>
#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
#include <linux/perf_event.h>
-#include <linux/zalloc.h>
#include "cpumap.h"
#include "dso.h"
#include "event.h"
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 591bdf0b3e2a..ee971d15b3c6 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -13,6 +13,7 @@
#include "util/mmap.h"
#include "thread_map.h"
#include "target.h"
+#include "dwarf-regs.h"
#include "evlist.h"
#include "evsel.h"
#include "record.h"
@@ -98,38 +99,47 @@ struct evlist *evlist__new(void)
return evlist;
}
-struct evlist *evlist__new_default(void)
+struct evlist *evlist__new_default(const struct target *target, bool sample_callchains)
{
struct evlist *evlist = evlist__new();
bool can_profile_kernel;
struct perf_pmu *pmu = NULL;
+ struct evsel *evsel;
+ char buf[256];
+ int err;
if (!evlist)
return NULL;
can_profile_kernel = perf_event_paranoid_check(1);
- while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
- char buf[256];
- int err;
-
- snprintf(buf, sizeof(buf), "%s/cycles/%s", pmu->name,
+ if (EM_HOST == EM_S390 && sample_callchains) {
+ snprintf(buf, sizeof(buf), "software/%s/%s",
+ target__has_cpu(target) ? "cpu-clock" : "task-clock",
can_profile_kernel ? "P" : "Pu");
err = parse_event(evlist, buf);
- if (err) {
- evlist__delete(evlist);
- return NULL;
+ if (err)
+ goto out_err;
+ } else {
+ while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
+ snprintf(buf, sizeof(buf), "%s/cycles/%s", pmu->name,
+ can_profile_kernel ? "P" : "Pu");
+ err = parse_event(evlist, buf);
+ if (err)
+ goto out_err;
}
}
+ /* If there is only 1 event a sample identifier isn't necessary. */
if (evlist->core.nr_entries > 1) {
- struct evsel *evsel;
-
evlist__for_each_entry(evlist, evsel)
evsel__set_sample_id(evsel, /*can_sample_identifier=*/false);
}
return evlist;
+out_err:
+ evlist__delete(evlist);
+ return NULL;
}
struct evlist *evlist__new_dummy(void)
@@ -815,9 +825,8 @@ static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
bool overwrite)
{
int i;
- struct mmap *map;
+ struct mmap *map = calloc(evlist->core.nr_mmaps, sizeof(struct mmap));
- map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
if (!map)
return NULL;
@@ -1622,8 +1631,11 @@ int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct
struct evsel *evsel = evlist__event2evsel(evlist, event);
int ret;
- if (!evsel)
+ if (!evsel) {
+ /* Ensure the sample is okay for perf_sample__exit. */
+ perf_sample__init(sample, /*all=*/false);
return -EFAULT;
+ }
ret = evsel__parse_sample(evsel, event, sample);
if (ret)
return ret;
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index d17c3b57a409..e507f5f20ef6 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -104,7 +104,7 @@ struct evsel_str_handler {
};
struct evlist *evlist__new(void);
-struct evlist *evlist__new_default(void);
+struct evlist *evlist__new_default(const struct target *target, bool sample_callchains);
struct evlist *evlist__new_dummy(void);
void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
struct perf_thread_map *threads);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index f59228c1a39e..2ee87fd84d3e 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -926,7 +926,8 @@ const char *evsel__name(struct evsel *evsel)
break;
case PERF_TYPE_TRACEPOINT:
- scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
+ scnprintf(bf, sizeof(bf), "unknown tracepoint id=%#"PRIx64,
+ evsel->core.attr.config);
break;
case PERF_TYPE_BREAKPOINT:
@@ -938,8 +939,8 @@ const char *evsel__name(struct evsel *evsel)
break;
default:
- scnprintf(bf, sizeof(bf), "unknown attr type: %d",
- evsel->core.attr.type);
+ scnprintf(bf, sizeof(bf), "unknown event PMU=%d config=%#"PRIx64,
+ evsel->core.attr.type, evsel->core.attr.config);
break;
}
@@ -1015,12 +1016,17 @@ uint16_t evsel__e_machine(struct evsel *evsel, uint32_t *e_flags)
return perf_session__e_machine(session, e_flags);
}
-static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
- struct callchain_param *param)
+static void __evsel__config_callchain(struct evsel *evsel, const struct record_opts *opts,
+ const struct callchain_param *param)
{
bool function = evsel__is_function_event(evsel);
struct perf_event_attr *attr = &evsel->core.attr;
+ if (EM_HOST == EM_S390 && param->record_mode == CALLCHAIN_FP) {
+ pr_warning_once(
+ "Framepointer unwinding lacks kernel support. Use '--call-graph dwarf'\n");
+ }
+
evsel__set_sample_bit(evsel, CALLCHAIN);
attr->sample_max_stack = param->max_stack;
@@ -1080,14 +1086,14 @@ static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *o
attr->defer_callchain = 1;
}
-void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
- struct callchain_param *param)
+void evsel__config_callchain(struct evsel *evsel, const struct record_opts *opts,
+ const struct callchain_param *param)
{
if (param->enabled)
return __evsel__config_callchain(evsel, opts, param);
}
-static void evsel__reset_callgraph(struct evsel *evsel, struct callchain_param *param)
+static void evsel__reset_callgraph(struct evsel *evsel, const struct callchain_param *param)
{
struct perf_event_attr *attr = &evsel->core.attr;
@@ -1106,7 +1112,7 @@ static void evsel__reset_callgraph(struct evsel *evsel, struct callchain_param *
static void evsel__apply_ratio_to_prev(struct evsel *evsel,
struct perf_event_attr *attr,
- struct record_opts *opts,
+ const struct record_opts *opts,
const char *buf)
{
struct perf_event_attr *prev_attr = NULL;
@@ -1170,7 +1176,7 @@ static void evsel__apply_ratio_to_prev(struct evsel *evsel,
}
static void evsel__apply_config_terms(struct evsel *evsel,
- struct record_opts *opts, bool track)
+ const struct record_opts *opts, bool track)
{
struct evsel_config_term *term;
struct list_head *config_terms = &evsel->config_terms;
@@ -1445,7 +1451,7 @@ void __weak arch_evsel__apply_ratio_to_prev(struct evsel *evsel __maybe_unused,
{
}
-static void evsel__set_default_freq_period(struct record_opts *opts,
+static void evsel__set_default_freq_period(const struct record_opts *opts,
struct perf_event_attr *attr)
{
if (opts->freq) {
@@ -1490,8 +1496,8 @@ bool evsel__is_offcpu_event(struct evsel *evsel)
* enable/disable events specifically, as there's no
* initial traced exec call.
*/
-void evsel__config(struct evsel *evsel, struct record_opts *opts,
- struct callchain_param *callchain)
+void evsel__config(struct evsel *evsel, const struct record_opts *opts,
+ const struct callchain_param *callchain)
{
struct evsel *leader = evsel__leader(evsel);
struct perf_event_attr *attr = &evsel->core.attr;
@@ -3067,7 +3073,7 @@ static inline bool overflow(const void *endp, u16 max_size, const void *offset,
#define OVERFLOW_CHECK(offset, size, max_size) \
do { \
if (overflow(endp, (max_size), (offset), (size))) \
- return -EFAULT; \
+ goto out_efault; \
} while (0)
#define OVERFLOW_CHECK_u64(offset) \
@@ -3199,6 +3205,8 @@ static int __set_offcpu_sample(struct perf_sample *data)
data->cgroup = *array;
return 0;
+out_efault:
+ return -EFAULT;
}
int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
@@ -3217,7 +3225,8 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
*/
union u64_swap u;
- memset(data, 0, sizeof(*data));
+ perf_sample__init(data, /*all=*/true);
+ data->evsel = evsel;
data->cpu = data->pid = data->tid = -1;
data->stream_id = data->id = data->time = -1ULL;
data->period = evsel->core.attr.sample_period;
@@ -3231,25 +3240,26 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
data->callchain = (struct ip_callchain *)&event->callchain_deferred.nr;
if (data->callchain->nr > max_callchain_nr)
- return -EFAULT;
+ goto out_efault;
data->deferred_cookie = event->callchain_deferred.cookie;
if (evsel->core.attr.sample_id_all)
perf_evsel__parse_id_sample(evsel, event, data);
+
return 0;
}
if (event->header.type != PERF_RECORD_SAMPLE) {
- if (!evsel->core.attr.sample_id_all)
- return 0;
- return perf_evsel__parse_id_sample(evsel, event, data);
+ if (evsel->core.attr.sample_id_all)
+ perf_evsel__parse_id_sample(evsel, event, data);
+ return 0;
}
array = event->sample.array;
if (perf_event__check_size(event, evsel->sample_size))
- return -EFAULT;
+ goto out_efault;
if (type & PERF_SAMPLE_IDENTIFIER) {
data->id = *array;
@@ -3342,7 +3352,7 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
sizeof(struct sample_read_value);
if (data->read.group.nr > max_group_nr)
- return -EFAULT;
+ goto out_efault;
sz = data->read.group.nr * sample_read_value_size(read_format);
OVERFLOW_CHECK(array, sz, max_size);
@@ -3370,7 +3380,7 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
data->callchain = (struct ip_callchain *)array++;
callchain_nr = data->callchain->nr;
if (callchain_nr > max_callchain_nr)
- return -EFAULT;
+ goto out_efault;
sz = callchain_nr * sizeof(u64);
/*
* Save the cookie for the deferred user callchain. The last 2
@@ -3428,7 +3438,7 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
data->branch_stack = (struct branch_stack *)array++;
if (data->branch_stack->nr > max_branch_nr)
- return -EFAULT;
+ goto out_efault;
sz = data->branch_stack->nr * sizeof(struct branch_entry);
if (evsel__has_branch_hw_idx(evsel)) {
@@ -3505,7 +3515,7 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
data->user_stack.size = *array++;
if (WARN_ONCE(data->user_stack.size > sz,
"user stack dump failure\n"))
- return -EFAULT;
+ goto out_efault;
}
}
@@ -3582,10 +3592,15 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
array = (void *)array + sz;
}
- if (evsel__is_offcpu_event(evsel))
- return __set_offcpu_sample(data);
+ if (evsel__is_offcpu_event(evsel)) {
+ if (__set_offcpu_sample(data))
+ goto out_efault;
+ }
return 0;
+out_efault:
+ perf_sample__exit(data);
+ return -EFAULT;
}
int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event,
@@ -3785,25 +3800,42 @@ bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
{
int paranoid;
- if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
- evsel->core.attr.type == PERF_TYPE_HARDWARE &&
- evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) {
+ if ((err == ENODEV || err == ENOENT || err == ENXIO) &&
+ evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
/*
- * If it's cycles then fall back to hrtimer based cpu-clock sw
- * counter, which is always available even if no PMU support.
- *
- * PPC returns ENXIO until 2.6.37 (behavior changed with commit
- * b0a873e).
+ * If it's the legacy hardware cycles event fails then fall back
+ * to hrtimer based cpu-clock sw counter, which is always
+ * available even if no PMU support. PPC returned ENXIO rather
+ * than ENODEV or ENOENT until 2.6.37.
*/
- evsel->core.attr.type = PERF_TYPE_SOFTWARE;
+ evsel->pmu = perf_pmus__find_by_type(PERF_TYPE_SOFTWARE);
+ assert(evsel->pmu); /* software is a "well-known" and can't fail PMU type. */
+
+ /* Configure the event. */
+ evsel->core.attr.type = PERF_TYPE_SOFTWARE;
evsel->core.attr.config = target__has_cpu(target)
? PERF_COUNT_SW_CPU_CLOCK
: PERF_COUNT_SW_TASK_CLOCK;
- scnprintf(msg, msgsize,
- "The cycles event is not supported, trying to fall back to %s",
- target__has_cpu(target) ? "cpu-clock" : "task-clock");
+ evsel->core.is_pmu_core = false;
+
+ /* Remove excludes for new event. */
+ if (evsel->fallenback_eacces) {
+ evsel->core.attr.exclude_kernel = 0;
+ evsel->core.attr.exclude_hv = 0;
+ evsel->fallenback_eacces = false;
+ }
+ if (evsel->fallenback_eopnotsupp) {
+ evsel->core.attr.exclude_guest = 0;
+ evsel->fallenback_eopnotsupp = false;
+ }
+ /* Name is recomputed by evsel__name. */
zfree(&evsel->name);
+
+ /* Log message. */
+ scnprintf(msg, msgsize,
+ "The cycles event is not supported, trying to fall back to %s",
+ evsel__name(evsel));
return true;
} else if (err == EACCES && !evsel->core.attr.exclude_kernel &&
(paranoid = perf_event_paranoid()) > 1) {
@@ -3830,7 +3862,7 @@ bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
" samples", paranoid);
evsel->core.attr.exclude_kernel = 1;
evsel->core.attr.exclude_hv = 1;
-
+ evsel->fallenback_eacces = true;
return true;
} else if (err == EOPNOTSUPP && !evsel->core.attr.exclude_guest &&
!evsel->exclude_GH) {
@@ -3851,7 +3883,7 @@ bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
/* Apple M1 requires exclude_guest */
scnprintf(msg, msgsize, "Trying to fall back to excluding guest samples");
evsel->core.attr.exclude_guest = 1;
-
+ evsel->fallenback_eopnotsupp = true;
return true;
}
no_fallback:
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index a3d754c029a0..339b5c08a33d 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -124,6 +124,8 @@ struct evsel {
bool default_metricgroup; /* A member of the Default metricgroup */
bool default_show_events; /* If a default group member, show the event */
bool needs_uniquify;
+ bool fallenback_eacces;
+ bool fallenback_eopnotsupp;
struct hashmap *per_pkg_mask;
int err;
int script_output_type;
@@ -285,10 +287,10 @@ void evsel__set_priv_destructor(void (*destructor)(void *priv));
struct callchain_param;
-void evsel__config(struct evsel *evsel, struct record_opts *opts,
- struct callchain_param *callchain);
-void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
- struct callchain_param *callchain);
+void evsel__config(struct evsel *evsel, const struct record_opts *opts,
+ const struct callchain_param *callchain);
+void evsel__config_callchain(struct evsel *evsel, const struct record_opts *opts,
+ const struct callchain_param *callchain);
int __evsel__sample_size(u64 sample_type);
void evsel__calc_id_pos(struct evsel *evsel);
diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
index 465fe2e9bbbe..644769e92708 100644
--- a/tools/perf/util/expr.c
+++ b/tools/perf/util/expr.c
@@ -376,7 +376,8 @@ int expr__find_ids(const char *expr, const char *one,
if (one)
expr__del_id(ctx, one);
- return ret;
+ /* A positive value means syntax error, convert to -EINVAL */
+ return ret > 0 ? -EINVAL : ret;
}
double expr_id_data__value(const struct expr_id_data *data)
@@ -406,9 +407,9 @@ double expr__get_literal(const char *literal, const struct expr_scanner_ctx *ctx
&count))
result = count;
else
- pr_err("Failure to read '%s'", literal);
+ pr_err("Failure to read '%s'\n", literal);
} else {
- pr_err("Unrecognized literal '%s'", literal);
+ pr_err("Unrecognized literal '%s'\n", literal);
}
pr_debug2("literal: %s = %f\n", literal, result);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 9142a8ba4019..f30e48eb3fc3 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -54,6 +54,7 @@
#include "bpf-event.h"
#include "bpf-utils.h"
#include "clockid.h"
+#include "cacheline.h"
#include <linux/ctype.h>
#include <internal/lib.h>
@@ -62,6 +63,15 @@
#include <event-parse.h>
#endif
+#define MAX_BPF_DATA_LEN (256 * 1024 * 1024)
+#define MAX_BPF_PROGS 131072
+#define MAX_CACHE_ENTRIES 32768
+#define MAX_GROUP_DESC 32768
+#define MAX_NUMA_NODES 4096
+#define MAX_PMU_CAPS 512
+#define MAX_PMU_MAPPINGS 4096
+#define MAX_SCHED_DOMAINS 64
+
/*
* magic2 = "PERFILE2"
* must be a numerical value to let the endianness
@@ -306,16 +316,19 @@ static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
return 0;
}
-#ifdef HAVE_LIBTRACEEVENT
static int write_tracing_data(struct feat_fd *ff,
- struct evlist *evlist)
+ struct evlist *evlist __maybe_unused)
{
if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
return -1;
+#ifdef HAVE_LIBTRACEEVENT
return read_tracing_data(ff->fd, &evlist->core.entries);
-}
+#else
+ pr_err("ERROR: Trying to write tracing data without libtraceevent support.\n");
+ return -1;
#endif
+}
static int write_build_id(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
@@ -1026,10 +1039,10 @@ static int write_dir_format(struct feat_fd *ff,
return do_write(ff, &data->dir.version, sizeof(data->dir.version));
}
-#ifdef HAVE_LIBBPF_SUPPORT
-static int write_bpf_prog_info(struct feat_fd *ff,
+static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused,
struct evlist *evlist __maybe_unused)
{
+#ifdef HAVE_LIBBPF_SUPPORT
struct perf_env *env = &ff->ph->env;
struct rb_root *root;
struct rb_node *next;
@@ -1067,11 +1080,16 @@ static int write_bpf_prog_info(struct feat_fd *ff,
out:
up_read(&env->bpf_progs.lock);
return ret;
+#else
+ pr_err("ERROR: Trying to write bpf_prog_info without libbpf support.\n");
+ return -1;
+#endif // HAVE_LIBBPF_SUPPORT
}
-static int write_bpf_btf(struct feat_fd *ff,
+static int write_bpf_btf(struct feat_fd *ff __maybe_unused,
struct evlist *evlist __maybe_unused)
{
+#ifdef HAVE_LIBBPF_SUPPORT
struct perf_env *env = &ff->ph->env;
struct rb_root *root;
struct rb_node *next;
@@ -1100,8 +1118,11 @@ static int write_bpf_btf(struct feat_fd *ff,
out:
up_read(&env->bpf_progs.lock);
return ret;
-}
+#else
+ pr_err("ERROR: Trying to write btf data without libbpf support.\n");
+ return -1;
#endif // HAVE_LIBBPF_SUPPORT
+}
static int cpu_cache_level__sort(const void *a, const void *b)
{
@@ -1304,6 +1325,19 @@ out:
return ret;
}
+static int write_cln_size(struct feat_fd *ff,
+ struct evlist *evlist __maybe_unused)
+{
+ int cln_size = cacheline_size();
+
+ if (!cln_size)
+ cln_size = DEFAULT_CACHELINE_SIZE;
+
+ ff->ph->env.cln_size = cln_size;
+
+ return do_write(ff, &cln_size, sizeof(cln_size));
+}
+
static int write_stat(struct feat_fd *ff __maybe_unused,
struct evlist *evlist __maybe_unused)
{
@@ -1980,9 +2014,9 @@ static void print_dir_format(struct feat_fd *ff, FILE *fp)
fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
}
-#ifdef HAVE_LIBBPF_SUPPORT
-static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
+static void print_bpf_prog_info(struct feat_fd *ff __maybe_unused, FILE *fp)
{
+#ifdef HAVE_LIBBPF_SUPPORT
struct perf_env *env = &ff->ph->env;
struct rb_root *root;
struct rb_node *next;
@@ -1993,7 +2027,7 @@ static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
next = rb_first(root);
if (!next)
- printf("# bpf_prog_info empty\n");
+ fprintf(fp, "# bpf_prog_info empty\n");
while (next) {
struct bpf_prog_info_node *node;
@@ -2006,10 +2040,14 @@ static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
}
up_read(&env->bpf_progs.lock);
+#else
+ fprintf(fp, "# bpf_prog_info missing, no libbpf support\n");
+#endif // HAVE_LIBBPF_SUPPORT
}
-static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
+static void print_bpf_btf(struct feat_fd *ff __maybe_unused, FILE *fp)
{
+#ifdef HAVE_LIBBPF_SUPPORT
struct perf_env *env = &ff->ph->env;
struct rb_root *root;
struct rb_node *next;
@@ -2031,8 +2069,10 @@ static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
}
up_read(&env->bpf_progs.lock);
-}
+#else
+ fprintf(fp, "# bpf btf data missing, no libbpf support\n");
#endif // HAVE_LIBBPF_SUPPORT
+}
static void free_event_desc(struct evsel *events)
{
@@ -2261,6 +2301,11 @@ static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
}
}
+static void print_cln_size(struct feat_fd *ff, FILE *fp)
+{
+ fprintf(fp, "# cacheline size: %u\n", ff->ph->env.cln_size);
+}
+
static void print_compressed(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n",
@@ -2545,6 +2590,11 @@ static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
perf_event_header__bswap(&old_bev.header);
len = old_bev.header.size - sizeof(old_bev);
+ if (len < 0 || len >= PATH_MAX) {
+ pr_warning("invalid build_id filename length %zd\n", len);
+ return -1;
+ }
+
if (readn(input, filename, len) != len)
return -1;
@@ -2587,6 +2637,11 @@ static int perf_header__read_build_ids(struct perf_header *header,
perf_event_header__bswap(&bev.header);
len = bev.header.size - sizeof(bev);
+ if (len < 0 || len >= PATH_MAX) {
+ pr_warning("invalid build_id filename length %zd\n", len);
+ goto out;
+ }
+
if (readn(input, filename, len) != len)
goto out;
/*
@@ -2644,14 +2699,17 @@ static int process_e_machine(struct feat_fd *ff, void *data __maybe_unused)
return do_read_u32(ff, &ff->ph->env.e_flags);
}
-#ifdef HAVE_LIBTRACEEVENT
-static int process_tracing_data(struct feat_fd *ff, void *data)
+static int process_tracing_data(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
{
+#ifdef HAVE_LIBTRACEEVENT
ssize_t ret = trace_report(ff->fd, data, false);
return ret < 0 ? -1 : 0;
-}
+#else
+ pr_err("ERROR: Trying to read tracing data without libtraceevent support.\n");
+ return -1;
#endif
+}
static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
{
@@ -2673,6 +2731,13 @@ static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
ret = do_read_u32(ff, &nr_cpus_online);
if (ret)
return ret;
+
+ if (nr_cpus_online > nr_cpus_avail) {
+ pr_err("Invalid HEADER_NRCPUS: nr_cpus_online (%u) > nr_cpus_avail (%u)\n",
+ nr_cpus_online, nr_cpus_avail);
+ return -1;
+ }
+
env->nr_cpus_avail = (int)nr_cpus_avail;
env->nr_cpus_online = (int)nr_cpus_online;
return 0;
@@ -2746,6 +2811,12 @@ process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
return 0;
}
+/*
+ * Some arbitrary max for the number of command line arguments,
+ * Wildcards can expand and end up with tons of command line args.
+ */
+#define MAX_CMDLINE_NR 1048576
+
static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
{
struct perf_env *env = &ff->ph->env;
@@ -2755,13 +2826,16 @@ static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
if (do_read_u32(ff, &nr))
return -1;
+ if (nr > MAX_CMDLINE_NR)
+ return -1;
+
env->nr_cmdline = nr;
cmdline = zalloc(ff->size + nr + 1);
if (!cmdline)
return -1;
- argv = zalloc(sizeof(char *) * (nr + 1));
+ argv = calloc(nr + 1, sizeof(char *));
if (!argv)
goto error;
@@ -2794,6 +2868,11 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
int cpu_nr = env->nr_cpus_avail;
u64 size = 0;
+ if (cpu_nr == 0) {
+ pr_err("Invalid HEADER_CPU_TOPOLOGY: missing HEADER_NRCPUS\n");
+ return -1;
+ }
+
env->cpu = calloc(cpu_nr, sizeof(*env->cpu));
if (!env->cpu)
return -1;
@@ -2801,6 +2880,12 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
if (do_read_u32(ff, &nr))
goto free_cpu;
+ if (nr > (u32)cpu_nr) {
+ pr_err("Invalid HEADER_CPU_TOPOLOGY: nr_sibling_cores (%u) > nr_cpus_avail (%d)\n",
+ nr, cpu_nr);
+ goto free_cpu;
+ }
+
env->nr_sibling_cores = nr;
size += sizeof(u32);
if (strbuf_init(&sb, 128) < 0)
@@ -2820,7 +2905,13 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
env->sibling_cores = strbuf_detach(&sb, NULL);
if (do_read_u32(ff, &nr))
- return -1;
+ goto free_cpu;
+
+ if (nr > (u32)cpu_nr) {
+ pr_err("Invalid HEADER_CPU_TOPOLOGY: nr_sibling_threads (%u) > nr_cpus_avail (%d)\n",
+ nr, cpu_nr);
+ goto free_cpu;
+ }
env->nr_sibling_threads = nr;
size += sizeof(u32);
@@ -2869,7 +2960,13 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
return 0;
if (do_read_u32(ff, &nr))
- return -1;
+ goto free_cpu;
+
+ if (nr > (u32)cpu_nr) {
+ pr_err("Invalid HEADER_CPU_TOPOLOGY: nr_sibling_dies (%u) > nr_cpus_avail (%d)\n",
+ nr, cpu_nr);
+ goto free_cpu;
+ }
env->nr_sibling_dies = nr;
size += sizeof(u32);
@@ -2915,7 +3012,19 @@ static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
if (do_read_u32(ff, &nr))
return -1;
- nodes = zalloc(sizeof(*nodes) * nr);
+ if (nr > MAX_NUMA_NODES) {
+ pr_err("Invalid HEADER_NUMA_TOPOLOGY: nr_nodes (%u) > %u\n",
+ nr, MAX_NUMA_NODES);
+ return -1;
+ }
+
+ if (ff->size < sizeof(u32) + nr * (sizeof(u32) + 2 * sizeof(u64))) {
+ pr_err("Invalid HEADER_NUMA_TOPOLOGY: section too small (%zu) for %u nodes\n",
+ ff->size, nr);
+ return -1;
+ }
+
+ nodes = calloc(nr, sizeof(*nodes));
if (!nodes)
return -ENOMEM;
@@ -2966,6 +3075,18 @@ static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
return 0;
}
+ if (pmu_num > MAX_PMU_MAPPINGS) {
+ pr_err("Invalid HEADER_PMU_MAPPINGS: pmu_num (%u) > %u\n",
+ pmu_num, MAX_PMU_MAPPINGS);
+ return -1;
+ }
+
+ if (ff->size < sizeof(u32) + pmu_num * 2 * sizeof(u32)) {
+ pr_err("Invalid HEADER_PMU_MAPPINGS: section too small (%zu) for %u PMUs\n",
+ ff->size, pmu_num);
+ return -1;
+ }
+
env->nr_pmu_mappings = pmu_num;
if (strbuf_init(&sb, 128) < 0)
return -1;
@@ -3016,12 +3137,25 @@ static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
if (do_read_u32(ff, &nr_groups))
return -1;
- env->nr_groups = nr_groups;
if (!nr_groups) {
pr_debug("group desc not available\n");
return 0;
}
+ if (nr_groups > MAX_GROUP_DESC) {
+ pr_err("Invalid HEADER_GROUP_DESC: nr_groups (%u) > %u\n",
+ nr_groups, MAX_GROUP_DESC);
+ return -1;
+ }
+
+ if (ff->size < sizeof(u32) + nr_groups * 3 * sizeof(u32)) {
+ pr_err("Invalid HEADER_GROUP_DESC: section too small (%zu) for %u groups\n",
+ ff->size, nr_groups);
+ return -1;
+ }
+
+ env->nr_groups = nr_groups;
+
desc = calloc(nr_groups, sizeof(*desc));
if (!desc)
return -1;
@@ -3113,7 +3247,19 @@ static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
if (do_read_u32(ff, &cnt))
return -1;
- caches = zalloc(sizeof(*caches) * cnt);
+ if (cnt > MAX_CACHE_ENTRIES) {
+ pr_err("Invalid HEADER_CACHE: cnt (%u) > %u\n",
+ cnt, MAX_CACHE_ENTRIES);
+ return -1;
+ }
+
+ if (ff->size < 2 * sizeof(u32) + cnt * 7 * sizeof(u32)) {
+ pr_err("Invalid HEADER_CACHE: section too small (%zu) for %u entries\n",
+ ff->size, cnt);
+ return -1;
+ }
+
+ caches = calloc(cnt, sizeof(*caches));
if (!caches)
return -1;
@@ -3154,6 +3300,16 @@ out_free_caches:
return -1;
}
+static int process_cln_size(struct feat_fd *ff, void *data __maybe_unused)
+{
+ struct perf_env *env = &ff->ph->env;
+
+ if (do_read_u32(ff, &env->cln_size))
+ return -1;
+
+ return 0;
+}
+
static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
{
struct perf_session *session;
@@ -3195,7 +3351,19 @@ static int process_mem_topology(struct feat_fd *ff,
if (do_read_u64(ff, &nr))
return -1;
- nodes = zalloc(sizeof(*nodes) * nr);
+ if (nr > MAX_NUMA_NODES) {
+ pr_err("Invalid HEADER_MEM_TOPOLOGY: nr_nodes (%llu) > %u\n",
+ (unsigned long long)nr, MAX_NUMA_NODES);
+ return -1;
+ }
+
+ if (ff->size < 3 * sizeof(u64) + nr * 2 * sizeof(u64)) {
+ pr_err("Invalid HEADER_MEM_TOPOLOGY: section too small (%zu) for %llu nodes\n",
+ ff->size, (unsigned long long)nr);
+ return -1;
+ }
+
+ nodes = calloc(nr, sizeof(*nodes));
if (!nodes)
return -1;
@@ -3285,7 +3453,19 @@ static int process_hybrid_topology(struct feat_fd *ff,
if (do_read_u32(ff, &nr))
return -1;
- nodes = zalloc(sizeof(*nodes) * nr);
+ if (nr > MAX_PMU_MAPPINGS) {
+ pr_err("Invalid HEADER_HYBRID_TOPOLOGY: nr_nodes (%u) > %u\n",
+ nr, MAX_PMU_MAPPINGS);
+ return -1;
+ }
+
+ if (ff->size < sizeof(u32) + nr * 2 * sizeof(u32)) {
+ pr_err("Invalid HEADER_HYBRID_TOPOLOGY: section too small (%zu) for %u nodes\n",
+ ff->size, nr);
+ return -1;
+ }
+
+ nodes = calloc(nr, sizeof(*nodes));
if (!nodes)
return -ENOMEM;
@@ -3330,9 +3510,9 @@ static int process_dir_format(struct feat_fd *ff,
return do_read_u64(ff, &data->dir.version);
}
-#ifdef HAVE_LIBBPF_SUPPORT
-static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
+static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
{
+#ifdef HAVE_LIBBPF_SUPPORT
struct bpf_prog_info_node *info_node;
struct perf_env *env = &ff->ph->env;
struct perf_bpil *info_linear;
@@ -3347,6 +3527,18 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
if (do_read_u32(ff, &count))
return -1;
+ if (count > MAX_BPF_PROGS) {
+ pr_err("Invalid HEADER_BPF_PROG_INFO: count (%u) > %u\n",
+ count, MAX_BPF_PROGS);
+ return -1;
+ }
+
+ if (ff->size < sizeof(u32) + count * (2 * sizeof(u32) + sizeof(u64))) {
+ pr_err("Invalid HEADER_BPF_PROG_INFO: section too small (%zu) for %u entries\n",
+ ff->size, count);
+ return -1;
+ }
+
down_write(&env->bpf_progs.lock);
for (i = 0; i < count; ++i) {
@@ -3364,6 +3556,12 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
goto out;
}
+ if (data_len > MAX_BPF_DATA_LEN) {
+ pr_warning("Invalid HEADER_BPF_PROG_INFO: data_len (%u) too large\n",
+ data_len);
+ goto out;
+ }
+
info_linear = malloc(sizeof(struct perf_bpil) +
data_len);
if (!info_linear)
@@ -3402,10 +3600,15 @@ out:
free(info_node);
up_write(&env->bpf_progs.lock);
return err;
+#else
+ pr_err("ERROR: Trying to read bpf_prog_info without libbpf support.\n");
+ return -1;
+#endif // HAVE_LIBBPF_SUPPORT
}
-static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
+static int process_bpf_btf(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
{
+#ifdef HAVE_LIBBPF_SUPPORT
struct perf_env *env = &ff->ph->env;
struct btf_node *node = NULL;
u32 count, i;
@@ -3419,6 +3622,17 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
if (do_read_u32(ff, &count))
return -1;
+ if (count > MAX_BPF_PROGS) {
+ pr_err("bpf btf count %u too large (max %u)\n", count, MAX_BPF_PROGS);
+ return -1;
+ }
+
+ if (ff->size < sizeof(u32) + count * 2 * sizeof(u32)) {
+ pr_err("Invalid HEADER_BPF_BTF: section too small (%zu) for %u entries\n",
+ ff->size, count);
+ return -1;
+ }
+
down_write(&env->bpf_progs.lock);
for (i = 0; i < count; ++i) {
@@ -3429,6 +3643,12 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
if (do_read_u32(ff, &data_size))
goto out;
+ if (data_size > MAX_BPF_DATA_LEN) {
+ pr_err("bpf btf data size %u too large (max %u)\n",
+ data_size, MAX_BPF_DATA_LEN);
+ goto out;
+ }
+
node = malloc(sizeof(struct btf_node) + data_size);
if (!node)
goto out;
@@ -3449,8 +3669,11 @@ out:
up_write(&env->bpf_progs.lock);
free(node);
return err;
-}
+#else
+ pr_err("ERROR: Trying to read btf data without libbpf support.\n");
+ return -1;
#endif // HAVE_LIBBPF_SUPPORT
+}
static int process_compressed(struct feat_fd *ff,
void *data __maybe_unused)
@@ -3492,7 +3715,13 @@ static int __process_pmu_caps(struct feat_fd *ff, int *nr_caps,
if (!nr_pmu_caps)
return 0;
- *caps = zalloc(sizeof(char *) * nr_pmu_caps);
+ if (nr_pmu_caps > MAX_PMU_CAPS) {
+ pr_err("Invalid pmu caps: nr_pmu_caps (%u) > %u\n",
+ nr_pmu_caps, MAX_PMU_CAPS);
+ return -1;
+ }
+
+ *caps = calloc(nr_pmu_caps, sizeof(char *));
if (!*caps)
return -1;
@@ -3569,7 +3798,19 @@ static int process_pmu_caps(struct feat_fd *ff, void *data __maybe_unused)
return 0;
}
- pmu_caps = zalloc(sizeof(*pmu_caps) * nr_pmu);
+ if (nr_pmu > MAX_PMU_MAPPINGS) {
+ pr_err("Invalid HEADER_PMU_CAPS: nr_pmu (%u) > %u\n",
+ nr_pmu, MAX_PMU_MAPPINGS);
+ return -1;
+ }
+
+ if (ff->size < sizeof(u32) + nr_pmu * sizeof(u32)) {
+ pr_err("Invalid HEADER_PMU_CAPS: section too small (%zu) for %u PMUs\n",
+ ff->size, nr_pmu);
+ return -1;
+ }
+
+ pmu_caps = calloc(nr_pmu, sizeof(*pmu_caps));
if (!pmu_caps)
return -ENOMEM;
@@ -3622,7 +3863,18 @@ static int process_cpu_domain_info(struct feat_fd *ff, void *data __maybe_unused
nra = env->nr_cpus_avail;
nr = env->nr_cpus_online;
- cd_map = zalloc(sizeof(*cd_map) * nra);
+ if (nra == 0 || nr == 0) {
+ pr_err("Invalid HEADER_CPU_DOMAIN_INFO: missing HEADER_NRCPUS\n");
+ return -1;
+ }
+
+ if (ff->size < 2 * sizeof(u32) + nr * 2 * sizeof(u32)) {
+ pr_err("Invalid HEADER_CPU_DOMAIN_INFO: section too small (%zu) for %u CPUs\n",
+ (size_t)ff->size, nr);
+ return -1;
+ }
+
+ cd_map = calloc(nra, sizeof(*cd_map));
if (!cd_map)
return -1;
@@ -3638,12 +3890,34 @@ static int process_cpu_domain_info(struct feat_fd *ff, void *data __maybe_unused
if (ret)
return ret;
+ /*
+ * Sanity check: real systems have at most ~10 sched domain levels
+ * (SMT, CLS, MC, PKG + NUMA hops). Reject obviously bogus values
+ * from malformed perf.data files before they cause excessive
+ * allocation in the per-CPU loop.
+ */
+ if (max_sched_domains > MAX_SCHED_DOMAINS) {
+ pr_err("Invalid HEADER_CPU_DOMAIN_INFO: max_sched_domains %u > %u\n",
+ max_sched_domains, MAX_SCHED_DOMAINS);
+ return -1;
+ }
+
env->max_sched_domains = max_sched_domains;
for (i = 0; i < nr; i++) {
if (do_read_u32(ff, &cpu))
return -1;
+ if (cpu >= nra) {
+ pr_err("Invalid HEADER_CPU_DOMAIN_INFO: cpu %d >= nr_cpus_avail (%d)\n", cpu, nra);
+ return -1;
+ }
+
+ if (cd_map[cpu]) {
+ pr_err("Invalid HEADER_CPU_DOMAIN_INFO: duplicate cpu %u\n", cpu);
+ return -1;
+ }
+
cd_map[cpu] = zalloc(sizeof(*cd_map[cpu]));
if (!cd_map[cpu])
return -1;
@@ -3653,9 +3927,15 @@ static int process_cpu_domain_info(struct feat_fd *ff, void *data __maybe_unused
if (do_read_u32(ff, &nr_domains))
return -1;
+ if (nr_domains > max_sched_domains) {
+ pr_err("Invalid HEADER_CPU_DOMAIN_INFO: nr_domains %u > max_sched_domains (%u)\n",
+ nr_domains, max_sched_domains);
+ return -1;
+ }
+
cd_map[cpu]->nr_domains = nr_domains;
- cd_map[cpu]->domains = zalloc(sizeof(*d_info) * max_sched_domains);
+ cd_map[cpu]->domains = calloc(max_sched_domains, sizeof(*d_info));
if (!cd_map[cpu]->domains)
return -1;
@@ -3663,11 +3943,23 @@ static int process_cpu_domain_info(struct feat_fd *ff, void *data __maybe_unused
if (do_read_u32(ff, &domain))
return -1;
+ if (domain >= max_sched_domains) {
+ pr_err("Invalid HEADER_CPU_DOMAIN_INFO: domain %d >= max_sched_domains (%d)\n",
+ domain, max_sched_domains);
+ return -1;
+ }
+
d_info = zalloc(sizeof(*d_info));
if (!d_info)
return -1;
- assert(cd_map[cpu]->domains[domain] == NULL);
+ if (cd_map[cpu]->domains[domain]) {
+ pr_err("Invalid HEADER_CPU_DOMAIN_INFO: duplicate domain %u for cpu %u\n",
+ domain, cpu);
+ free(d_info);
+ return -1;
+ }
+
cd_map[cpu]->domains[domain] = d_info;
d_info->domain = domain;
@@ -3726,9 +4018,7 @@ static int process_cpu_domain_info(struct feat_fd *ff, void *data __maybe_unused
const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = {
-#ifdef HAVE_LIBTRACEEVENT
FEAT_OPN(TRACING_DATA, tracing_data, false),
-#endif
FEAT_OPN(BUILD_ID, build_id, false),
FEAT_OPR(HOSTNAME, hostname, false),
FEAT_OPR(OSRELEASE, osrelease, false),
@@ -3752,10 +4042,8 @@ const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
FEAT_OPR(CLOCKID, clockid, false),
FEAT_OPN(DIR_FORMAT, dir_format, false),
-#ifdef HAVE_LIBBPF_SUPPORT
FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
FEAT_OPR(BPF_BTF, bpf_btf, false),
-#endif
FEAT_OPR(COMPRESSED, compressed, false),
FEAT_OPR(CPU_PMU_CAPS, cpu_pmu_caps, false),
FEAT_OPR(CLOCK_DATA, clock_data, false),
@@ -3763,6 +4051,7 @@ const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPR(PMU_CAPS, pmu_caps, false),
FEAT_OPR(CPU_DOMAIN_INFO, cpu_domain_info, true),
FEAT_OPR(E_MACHINE, e_machine, false),
+ FEAT_OPR(CLN_SIZE, cln_size, false),
};
struct header_print_data {
@@ -3770,6 +4059,13 @@ struct header_print_data {
bool full; /* extended list of headers */
};
+const char *header_feat__name(unsigned int id)
+{
+ if (id < HEADER_LAST_FEATURE)
+ return feat_ops[id].name ?: "INVALID";
+ return "INVALID";
+}
+
static int perf_file_section__fprintf_info(struct perf_file_section *section,
struct perf_header *ph,
int feat, int fd, void *data)
@@ -3778,11 +4074,11 @@ static int perf_file_section__fprintf_info(struct perf_file_section *section,
struct feat_fd ff;
if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
- pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
- "%d, continuing...\n", section->offset, feat);
+ pr_debug("Failed to lseek to %" PRIu64 " offset for feature %s (%d), continuing...\n",
+ section->offset, header_feat__name(feat), feat);
return 0;
}
- if (feat >= HEADER_LAST_FEATURE) {
+ if (feat >= ph->last_feat) {
pr_warning("unknown feature %d\n", feat);
return 0;
}
@@ -3834,7 +4130,7 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
return 0;
fprintf(fp, "# missing features: ");
- for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
+ for_each_clear_bit(bit, header->adds_features, header->last_feat) {
if (bit)
fprintf(fp, "%s ", feat_ops[bit].name);
}
@@ -4164,7 +4460,7 @@ int perf_header__process_sections(struct perf_header *header, int fd,
if (err < 0)
goto out_free;
- for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
+ for_each_set_bit(feat, header->adds_features, header->last_feat) {
err = process(sec++, header, feat, fd, data);
if (err < 0)
goto out_free;
@@ -4379,6 +4675,7 @@ int perf_file_header__read(struct perf_file_header *header,
ph->data_offset = header->data.offset;
ph->data_size = header->data.size;
ph->feat_offset = header->data.offset + header->data.size;
+ ph->last_feat = HEADER_LAST_FEATURE;
return 0;
}
@@ -4394,8 +4691,8 @@ static int perf_file_section__process(struct perf_file_section *section,
};
if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
- pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
- "%d, continuing...\n", section->offset, feat);
+ pr_debug("Failed to lseek to %" PRIu64 " offset for feature %s (%d), continuing...\n",
+ section->offset, header_feat__name(feat), feat);
return 0;
}
@@ -4428,6 +4725,8 @@ static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
if (ph->needs_swap)
header->size = bswap_64(header->size);
+ /* The last feature is written out as a 0 sized event and will update this value. */
+ ph->last_feat = 0;
return 0;
}
@@ -4660,31 +4959,68 @@ out_delete_evlist:
return -ENOMEM;
}
-int perf_event__process_feature(struct perf_session *session,
+int perf_event__process_feature(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session,
union perf_event *event)
{
struct feat_fd ff = { .fd = 0 };
struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event;
+ struct perf_header *header = &session->header;
int type = fe->header.type;
- u64 feat = fe->feat_id;
+ int feat = (int)fe->feat_id;
int ret = 0;
bool print = dump_trace;
+ bool last_feature_mark = false;
if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
pr_warning("invalid record type %d in pipe-mode\n", type);
return 0;
}
- if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
- pr_warning("invalid record type %d in pipe-mode\n", type);
+ if (feat == HEADER_RESERVED) {
+ pr_warning("invalid reserved record type in pipe-mode\n");
+ return -1;
+ }
+ if (feat < 0 || feat == INT_MAX) {
+ pr_warning("invalid value for feature type %x\n", feat);
+ return -1;
+ }
+ if (feat >= header->last_feat) {
+ if (event->header.size == sizeof(*fe)) {
+ /*
+ * Either an unexpected zero size feature or the
+ * HEADER_LAST_FEATURE mark.
+ */
+ if (feat > header->last_feat)
+ header->last_feat = min(feat, HEADER_LAST_FEATURE);
+ last_feature_mark = true;
+ } else {
+ /*
+ * A feature but beyond what is known as in
+ * bounds. Assume the last feature is 1 beyond this
+ * feature.
+ */
+ session->header.last_feat = min(feat + 1, HEADER_LAST_FEATURE);
+ }
+ }
+ if (feat >= HEADER_LAST_FEATURE) {
+ if (!last_feature_mark) {
+ pr_warning("unknown feature %d for data file version (%s) in this version of perf (%s)\n",
+ feat, header->env.version, perf_version_string);
+ }
+ return 0;
+ }
+ if (event->header.size < sizeof(*fe)) {
+ pr_warning("feature header size too small\n");
return -1;
}
-
ff.buf = (void *)fe->data;
ff.size = event->header.size - sizeof(*fe);
- ff.ph = &session->header;
+ ff.ph = header;
if (feat_ops[feat].process && feat_ops[feat].process(&ff, NULL)) {
- ret = -1;
+ // Processing failed, ignore when this is the last feature mark.
+ if (!last_feature_mark)
+ ret = -1;
goto out;
}
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index cc40ac796f52..86b1a72026d3 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -55,6 +55,7 @@ enum {
HEADER_PMU_CAPS,
HEADER_CPU_DOMAIN_INFO,
HEADER_E_MACHINE,
+ HEADER_CLN_SIZE,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
@@ -109,6 +110,7 @@ struct perf_header {
u64 data_size;
u64 feat_offset;
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
+ int last_feat;
struct perf_env env;
};
@@ -132,6 +134,8 @@ struct perf_header_feature_ops {
extern const char perf_version_string[];
+const char *header_feat__name(unsigned int id);
+
int perf_session__read_header(struct perf_session *session);
int perf_session__write_header(struct perf_session *session,
struct evlist *evlist,
@@ -170,7 +174,8 @@ int perf_header__process_sections(struct perf_header *header, int fd,
int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full);
-int perf_event__process_feature(struct perf_session *session,
+int perf_event__process_feature(const struct perf_tool *tool,
+ struct perf_session *session,
union perf_event *event);
int perf_event__process_attr(const struct perf_tool *tool, union perf_event *event,
struct evlist **pevlist);
@@ -202,6 +207,8 @@ int write_padded(struct feat_fd *fd, const void *bf,
int build_caches_for_cpu(u32 cpu, struct cpu_cache_level caches[], u32 *cntp);
+#define DEFAULT_CACHELINE_SIZE 64
+
/*
* arch specific callback
*/
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 7ffaa3d9851b..747fdc455c80 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -110,6 +110,9 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
len = thread__comm_len(h->thread);
if (hists__new_col_len(hists, HISTC_COMM, len))
hists__set_col_len(hists, HISTC_THREAD, len + 8);
+ if (hists->hpp_list->comm_nodigit)
+ hists__new_col_len(hists, HISTC_COMM_NODIGIT,
+ (u16) sort__comm_nodigit_len(h));
if (h->ms.map) {
len = dso__name_len(map__dso(h->ms.map));
@@ -1148,7 +1151,7 @@ iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
* cumulated only one time to prevent entries more than 100%
* overhead.
*/
- he_cache = malloc(sizeof(*he_cache) * (cursor->nr + 1));
+ he_cache = calloc(cursor->nr + 1, sizeof(*he_cache));
if (he_cache == NULL)
return -ENOMEM;
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 1d5ea632ca4e..d97a4efb9250 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -44,6 +44,7 @@ enum hist_column {
HISTC_THREAD,
HISTC_TGID,
HISTC_COMM,
+ HISTC_COMM_NODIGIT,
HISTC_CGROUP_ID,
HISTC_CGROUP,
HISTC_PARENT,
@@ -522,6 +523,7 @@ struct perf_hpp_list {
int socket;
int thread;
int comm;
+ int comm_nodigit;
};
extern struct perf_hpp_list perf_hpp_list;
diff --git a/tools/perf/util/intel-tpebs.c b/tools/perf/util/intel-tpebs.c
index 3c958d738ca6..8b615dc94e9e 100644
--- a/tools/perf/util/intel-tpebs.c
+++ b/tools/perf/util/intel-tpebs.c
@@ -22,7 +22,6 @@
#include "tool.h"
#include "cpumap.h"
#include "metricgroup.h"
-#include "stat.h"
#include <sys/stat.h>
#include <sys/file.h>
#include <errno.h>
@@ -217,15 +216,6 @@ static int process_sample_event(const struct perf_tool *tool __maybe_unused,
return 0;
}
-static int process_feature_event(const struct perf_tool *tool __maybe_unused,
- struct perf_session *session,
- union perf_event *event)
-{
- if (event->feat.feat_id < HEADER_LAST_FEATURE)
- return perf_event__process_feature(session, event);
- return 0;
-}
-
static void *__sample_reader(void *arg __maybe_unused)
{
struct perf_session *session;
@@ -238,7 +228,7 @@ static void *__sample_reader(void *arg __maybe_unused)
perf_tool__init(&tool, /*ordered_events=*/false);
tool.sample = process_sample_event;
- tool.feature = process_feature_event;
+ tool.feature = perf_event__process_feature;
tool.attr = perf_event__process_attr;
session = perf_session__new(&data, &tool);
diff --git a/tools/perf/util/libbfd.c b/tools/perf/util/libbfd.c
index 63ea3fb53e77..c1c12308cc12 100644
--- a/tools/perf/util/libbfd.c
+++ b/tools/perf/util/libbfd.c
@@ -233,7 +233,7 @@ int libbfd__addr2line(const char *dso_name, u64 addr,
}
if (a2l == NULL) {
- if (!symbol_conf.disable_add2line_warn)
+ if (!symbol_conf.addr2line_disable_warn)
pr_warning("addr2line_init failed for %s\n", dso_name);
return 0;
}
diff --git a/tools/perf/util/llvm.c b/tools/perf/util/llvm.c
index 0d126d233c01..a0deb742a733 100644
--- a/tools/perf/util/llvm.c
+++ b/tools/perf/util/llvm.c
@@ -153,11 +153,17 @@ int symbol__disassemble_llvm(const char *filename, struct symbol *sym,
/*get_op_info=*/NULL, symbol_lookup_callback);
} else {
char triplet[64];
+ const char *features = NULL;
scnprintf(triplet, sizeof(triplet), "%s-linux-gnu",
args->arch->name);
- disasm = LLVMCreateDisasm(triplet, &storage, /*tag_type=*/0,
- /*get_op_info=*/NULL, symbol_lookup_callback);
+ if (args->arch->id.e_machine == EM_AARCH64)
+ features = "+all";
+ disasm = LLVMCreateDisasmCPUFeatures(triplet, /*cpu=*/"",
+ features, &storage,
+ /*tag_type=*/0,
+ /*get_op_info=*/NULL,
+ symbol_lookup_callback);
}
if (disasm == NULL)
diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c
index 4092211cff62..81a97ac34077 100644
--- a/tools/perf/util/maps.c
+++ b/tools/perf/util/maps.c
@@ -844,7 +844,6 @@ static int __maps__insert_sorted(struct maps *maps, unsigned int first_after_ind
static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
{
int err = 0;
- FILE *fp = debug_file();
unsigned int i, ni = INT_MAX; // Some gcc complain, but depends on maps_by_name...
if (!maps__maps_by_address_sorted(maps))
@@ -872,8 +871,8 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
dso__name(map__dso(new)));
} else if (verbose >= 2) {
pr_debug("overlapping maps:\n");
- map__fprintf(new, fp);
- map__fprintf(pos, fp);
+ map__fprintf(new, debug_file());
+ map__fprintf(pos, debug_file());
}
if (maps_by_name)
@@ -894,7 +893,7 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
map__set_end(before, map__start(new));
if (verbose >= 2 && !use_browser)
- map__fprintf(before, fp);
+ map__fprintf(before, debug_file());
}
if (map__end(new) < map__end(pos)) {
/* The new map isn't as long as the existing map. */
@@ -912,7 +911,7 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
map__map_ip(after, map__end(new)));
if (verbose >= 2 && !use_browser)
- map__fprintf(after, fp);
+ map__fprintf(after, debug_file());
}
/*
* If adding one entry, for `before` or `after`, we can replace
@@ -956,6 +955,7 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
if (maps_by_name) {
map__put(maps_by_name[ni]);
maps_by_name[ni] = map__get(new);
+ maps__set_maps_by_name_sorted(maps, false);
}
err = __maps__insert_sorted(maps, i + 1, after, NULL);
@@ -982,6 +982,7 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
if (maps_by_name) {
map__put(maps_by_name[ni]);
maps_by_name[ni] = map__get(new);
+ maps__set_maps_by_name_sorted(maps, false);
}
check_invariants(maps);
@@ -1080,16 +1081,9 @@ int maps__copy_from(struct maps *dest, struct maps *parent)
map__put(new);
}
maps__set_maps_by_address_sorted(dest, maps__maps_by_address_sorted(parent));
- if (!err) {
- RC_CHK_ACCESS(dest)->last_search_by_name_idx =
- RC_CHK_ACCESS(parent)->last_search_by_name_idx;
- maps__set_maps_by_name_sorted(dest,
- dest_maps_by_name &&
- maps__maps_by_name_sorted(parent));
- } else {
- RC_CHK_ACCESS(dest)->last_search_by_name_idx = 0;
- maps__set_maps_by_name_sorted(dest, false);
- }
+ RC_CHK_ACCESS(dest)->last_search_by_name_idx = 0;
+ /* Values were copied into the name array in address order. */
+ maps__set_maps_by_name_sorted(dest, false);
} else {
/* Unexpected copying to a maps containing entries. */
for (unsigned int i = 0; !err && i < n; i++) {
diff --git a/tools/perf/util/mem2node.c b/tools/perf/util/mem2node.c
index 03a7d7b27737..51a2292cbf7e 100644
--- a/tools/perf/util/mem2node.c
+++ b/tools/perf/util/mem2node.c
@@ -59,7 +59,7 @@ int mem2node__init(struct mem2node *map, struct perf_env *env)
max += bitmap_weight(n->set, n->size);
}
- entries = zalloc(sizeof(*entries) * max);
+ entries = calloc(max, sizeof(*entries));
if (!entries)
return -ENOMEM;
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 7e39d469111b..4db9578efd81 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -387,8 +387,13 @@ static bool match_pm_metric_or_groups(const struct pmu_metric *pm, const char *p
const char *metric_or_groups)
{
const char *pm_pmu = pm->pmu ?: "cpu";
+ struct perf_pmu *perf_pmu = NULL;
- if (strcmp(pmu, "all") && strcmp(pm_pmu, pmu))
+ if (pm->pmu)
+ perf_pmu = perf_pmus__find(pm->pmu);
+
+ if (strcmp(pmu, "all") && strcmp(pm_pmu, pmu) &&
+ (perf_pmu && !perf_pmu__name_wildcard_match(perf_pmu, pmu)))
return false;
return match_metric_or_groups(pm->metric_group, metric_or_groups) ||
@@ -909,10 +914,9 @@ static int __add_metric(struct list_head *metric_list,
expr = metric_no_threshold ? pm->metric_name : pm->metric_threshold;
visited_node.name = "__threshold__";
}
- if (expr__find_ids(expr, NULL, root_metric->pctx) < 0) {
- /* Broken metric. */
- ret = -EINVAL;
- }
+
+ ret = expr__find_ids(expr, NULL, root_metric->pctx);
+
if (!ret) {
/* Resolve referenced metrics. */
struct perf_pmu *pmu;
@@ -1096,7 +1100,7 @@ static int metricgroup__add_metric(const char *pmu, const char *metric_name, con
*/
ret = metricgroup__for_each_metric(table, metricgroup__add_metric_callback, &data);
if (!ret && !data.has_match)
- ret = -EINVAL;
+ ret = -ENOENT;
/*
* add to metric_list so that they can be released
@@ -1147,6 +1151,8 @@ static int metricgroup__add_metric_list(const char *pmu, const char *list,
user_requested_cpu_list,
system_wide, metric_list, table);
if (ret == -EINVAL)
+ pr_err("Fail to parse metric or group `%s'\n", metric_name);
+ else if (ret == -ENOENT)
pr_err("Cannot find metric or group `%s'\n", metric_name);
if (ret)
@@ -1259,7 +1265,8 @@ err_out:
static int parse_ids(bool metric_no_merge, bool fake_pmu,
struct expr_parse_ctx *ids, const char *modifier,
bool group_events, const bool tool_events[TOOL_PMU__EVENT_MAX],
- struct evlist **out_evlist)
+ struct evlist **out_evlist,
+ const char *filter_pmu)
{
struct parse_events_error parse_error;
struct evlist *parsed_evlist;
@@ -1313,7 +1320,7 @@ static int parse_ids(bool metric_no_merge, bool fake_pmu,
}
pr_debug("Parsing metric events '%s'\n", events.buf);
parse_events_error__init(&parse_error);
- ret = __parse_events(parsed_evlist, events.buf, /*pmu_filter=*/NULL,
+ ret = __parse_events(parsed_evlist, events.buf, filter_pmu,
&parse_error, fake_pmu, /*warn_if_reordered=*/false,
/*fake_tp=*/false);
if (ret) {
@@ -1416,7 +1423,8 @@ static int parse_groups(struct evlist *perf_evlist,
/*modifier=*/NULL,
/*group_events=*/false,
tool_events,
- &combined_evlist);
+ &combined_evlist,
+ (pmu && strcmp(pmu, "all") == 0) ? NULL : pmu);
}
if (combined)
expr__ctx_free(combined);
@@ -1471,7 +1479,8 @@ static int parse_groups(struct evlist *perf_evlist,
}
if (!metric_evlist) {
ret = parse_ids(metric_no_merge, fake_pmu, m->pctx, m->modifier,
- m->group_events, tool_events, &m->evlist);
+ m->group_events, tool_events, &m->evlist,
+ (pmu && strcmp(pmu, "all") == 0) ? NULL : pmu);
if (ret)
goto out;
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index 8c62611f10aa..a5857f9f5af2 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -243,7 +243,7 @@ static int do_flush(struct ordered_events *oe, bool show_progress)
if (iter->timestamp > limit)
break;
ret = oe->deliver(oe, iter);
- if (ret)
+ if (ret < 0)
return ret;
ordered_events__delete(oe, iter);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 7b4629625b1e..1497e1f2a08c 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -429,7 +429,7 @@ bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
if (parse_state->pmu_filter == NULL)
return false;
- return strcmp(parse_state->pmu_filter, pmu->name) != 0;
+ return perf_pmu__wildcard_match(pmu, parse_state->pmu_filter) == 0;
}
static int parse_events_add_pmu(struct parse_events_state *parse_state,
diff --git a/tools/perf/util/perf-regs-arch/perf_regs_s390.c b/tools/perf/util/perf-regs-arch/perf_regs_s390.c
index c61df24edf0f..19f219225183 100644
--- a/tools/perf/util/perf-regs-arch/perf_regs_s390.c
+++ b/tools/perf/util/perf-regs-arch/perf_regs_s390.c
@@ -1,7 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <regex.h>
#include "../perf_regs.h"
#include "../../arch/s390/include/perf_regs.h"
+#include "debug.h"
+
+#include <linux/zalloc.h>
+#include <linux/kernel.h>
uint64_t __perf_reg_mask_s390(bool intr __maybe_unused)
{
@@ -95,3 +101,75 @@ uint64_t __perf_reg_sp_s390(void)
{
return PERF_REG_S390_R15;
}
+
+/* %rXX */
+#define SDT_OP_REGEX1 "^(%r([0-9]|1[0-5]))$"
+/* +-###(%rXX) */
+#define SDT_OP_REGEX2 "^([+-]?[0-9]+\\(%r([0-9]|1[0-5])\\))$"
+static regex_t sdt_op_regex1, sdt_op_regex2;
+
+static int sdt_init_op_regex(void)
+{
+ static int initialized;
+ int ret = 0;
+
+ if (initialized)
+ return 0;
+
+ ret = regcomp(&sdt_op_regex1, SDT_OP_REGEX1, REG_EXTENDED);
+ if (ret)
+ goto error;
+ initialized = 1;
+
+ ret = regcomp(&sdt_op_regex2, SDT_OP_REGEX2, REG_EXTENDED);
+ if (ret)
+ goto free_regex1;
+ initialized = 2;
+
+ return 0;
+
+free_regex1:
+ regfree(&sdt_op_regex1);
+error:
+ pr_debug4("Regex compilation error, initialized %d\n", initialized);
+ initialized = 0;
+ return ret;
+}
+
+/*
+ * Parse OP and convert it into uprobe format, which is, +/-NUM(%gprREG).
+ * Possible variants of OP are:
+ * Format Example
+ * -------------------------
+ * NUM(%rREG) 48(%r1)
+ * -NUM(%rREG) -48(%r1)
+ * +NUM(%rREG) +48(%r1)
+ * %rREG %r1
+ */
+int __perf_sdt_arg_parse_op_s390(char *old_op, char **new_op)
+{
+ int ret, new_len;
+ regmatch_t rm[6];
+
+ *new_op = NULL;
+ ret = sdt_init_op_regex();
+ if (ret)
+ return -EINVAL;
+
+ if (!regexec(&sdt_op_regex1, old_op, ARRAY_SIZE(rm), rm, 0) ||
+ !regexec(&sdt_op_regex2, old_op, ARRAY_SIZE(rm), rm, 0)) {
+ new_len = 1; /* NULL byte */
+ new_len += (int)(rm[1].rm_eo - rm[1].rm_so);
+ *new_op = zalloc(new_len);
+ if (!*new_op)
+ return -ENOMEM;
+
+ scnprintf(*new_op, new_len, "%.*s",
+ (int)(rm[1].rm_eo - rm[1].rm_so), old_op + rm[1].rm_so);
+ } else {
+ pr_debug4("Skipping unsupported SDT argument: %s\n", old_op);
+ return SDT_ARG_SKIP;
+ }
+
+ return SDT_ARG_VALID;
+}
diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c
index 5b8f34beb24e..f52b0e1f7fc7 100644
--- a/tools/perf/util/perf_regs.c
+++ b/tools/perf/util/perf_regs.c
@@ -23,6 +23,9 @@ int perf_sdt_arg_parse_op(uint16_t e_machine, char *old_op, char **new_op)
case EM_X86_64:
ret = __perf_sdt_arg_parse_op_x86(old_op, new_op);
break;
+ case EM_S390:
+ ret = __perf_sdt_arg_parse_op_s390(old_op, new_op);
+ break;
default:
pr_debug("Unknown ELF machine %d, standard arguments parse will be skipped.\n",
e_machine);
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
index 7c04700bf837..573f0d1dfe04 100644
--- a/tools/perf/util/perf_regs.h
+++ b/tools/perf/util/perf_regs.h
@@ -62,6 +62,7 @@ uint64_t __perf_reg_mask_s390(bool intr);
const char *__perf_reg_name_s390(int id);
uint64_t __perf_reg_ip_s390(void);
uint64_t __perf_reg_sp_s390(void);
+int __perf_sdt_arg_parse_op_s390(char *old_op, char **new_op);
int __perf_sdt_arg_parse_op_x86(char *old_op, char **new_op);
uint64_t __perf_reg_mask_x86(bool intr);
diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c
index 98be2eb8f1f0..9a2023ceeefd 100644
--- a/tools/perf/util/pmus.c
+++ b/tools/perf/util/pmus.c
@@ -621,7 +621,7 @@ void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *p
while ((pmu = scan_fn(pmu)) != NULL)
len += perf_pmu__num_events(pmu);
- aliases = zalloc(sizeof(struct sevent) * len);
+ aliases = calloc(len, sizeof(struct sevent));
if (!aliases) {
pr_err("FATAL: not enough memory to print PMU events\n");
return;
diff --git a/tools/perf/util/powerpc-vpadtl.c b/tools/perf/util/powerpc-vpadtl.c
index d1c3396f182f..710f3093f3f9 100644
--- a/tools/perf/util/powerpc-vpadtl.c
+++ b/tools/perf/util/powerpc-vpadtl.c
@@ -4,6 +4,7 @@
*/
#include <linux/string.h>
+#include <linux/zalloc.h>
#include <errno.h>
#include <inttypes.h>
#include "color.h"
@@ -182,7 +183,9 @@ static int powerpc_vpadtl_sample(struct powerpc_vpadtl_entry *record,
{
struct perf_sample sample;
union perf_event event;
+ int ret;
+ perf_sample__init(&sample, /*all=*/true);
sample.ip = be64_to_cpu(record->srr0);
sample.period = 1;
sample.cpu = cpu;
@@ -198,12 +201,12 @@ static int powerpc_vpadtl_sample(struct powerpc_vpadtl_entry *record,
event.sample.header.misc = sample.cpumode;
event.sample.header.size = sizeof(struct perf_event_header);
- if (perf_session__deliver_synth_event(vpa->session, &event, &sample)) {
+ ret = perf_session__deliver_synth_event(vpa->session, &event, &sample);
+ if (ret)
pr_debug("Failed to create sample for dtl entry\n");
- return -1;
- }
- return 0;
+ perf_sample__exit(&sample);
+ return ret;
}
static int powerpc_vpadtl_get_buffer(struct powerpc_vpadtl_queue *vpaq)
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 710e4620923e..34b4badd2c14 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -11,7 +11,6 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
-#include <libgen.h>
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
@@ -229,7 +228,7 @@ static int convert_exec_to_group(const char *exec, char **result)
if (!exec_copy)
return -ENOMEM;
- ptr1 = basename(exec_copy);
+ ptr1 = (char *)perf_basename(exec_copy);
if (!ptr1) {
ret = -EINVAL;
goto out;
@@ -1850,7 +1849,7 @@ int parse_perf_probe_command(const char *cmd, struct perf_probe_event *pev)
/* Copy arguments and ensure return probe has no C argument */
pev->nargs = argc - 1;
- pev->args = zalloc(sizeof(struct perf_probe_arg) * pev->nargs);
+ pev->args = calloc(pev->nargs, sizeof(struct perf_probe_arg));
if (pev->args == NULL) {
ret = -ENOMEM;
goto out;
@@ -2000,7 +1999,7 @@ int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev)
}
tev->nargs = argc - 2;
- tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
+ tev->args = calloc(tev->nargs, sizeof(struct probe_trace_arg));
if (tev->args == NULL) {
ret = -ENOMEM;
goto out;
@@ -2373,7 +2372,7 @@ static int convert_to_perf_probe_event(struct probe_trace_event *tev,
/* Convert trace_arg to probe_arg */
pev->nargs = tev->nargs;
- pev->args = zalloc(sizeof(struct perf_probe_arg) * pev->nargs);
+ pev->args = calloc(pev->nargs, sizeof(struct perf_probe_arg));
if (pev->args == NULL)
return -ENOMEM;
for (i = 0; i < tev->nargs && ret >= 0; i++) {
@@ -2480,7 +2479,7 @@ int perf_probe_event__copy(struct perf_probe_event *dst,
if (perf_probe_point__copy(&dst->point, &src->point) < 0)
goto out_err;
- dst->args = zalloc(sizeof(struct perf_probe_arg) * src->nargs);
+ dst->args = calloc(src->nargs, sizeof(struct perf_probe_arg));
if (!dst->args)
goto out_err;
dst->nargs = src->nargs;
@@ -3179,7 +3178,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
}
/* Setup result trace-probe-events */
- *tevs = zalloc(sizeof(*tev) * num_matched_functions);
+ *tevs = calloc(num_matched_functions, sizeof(*tev));
if (!*tevs) {
ret = -ENOMEM;
goto out;
@@ -3251,8 +3250,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
tev->uprobes = pev->uprobes;
tev->nargs = pev->nargs;
if (tev->nargs) {
- tev->args = zalloc(sizeof(struct probe_trace_arg) *
- tev->nargs);
+ tev->args = calloc(tev->nargs, sizeof(struct probe_trace_arg));
if (tev->args == NULL)
goto nomem_out;
}
@@ -3363,7 +3361,7 @@ static int try_to_find_absolute_address(struct perf_probe_event *pev,
}
tev->nargs = pev->nargs;
- tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
+ tev->args = calloc(tev->nargs, sizeof(struct probe_trace_arg));
if (!tev->args)
goto errout;
@@ -3549,7 +3547,7 @@ static int find_probe_trace_events_from_cache(struct perf_probe_event *pev,
goto out;
}
- *tevs = zalloc(ret * sizeof(*tev));
+ *tevs = calloc(ret, sizeof(*tev));
if (!*tevs) {
ret = -ENOMEM;
goto out;
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index f78c3bc3d601..4032572cbf55 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -414,7 +414,7 @@ int probe_cache_entry__get_event(struct probe_cache_entry *entry,
if (ret > probe_conf.max_probes)
return -E2BIG;
- *tevs = zalloc(ret * sizeof(*tev));
+ *tevs = calloc(ret, sizeof(*tev));
if (!*tevs)
return -ENOMEM;
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 5ffd97ee4898..64328abeef8b 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -1305,7 +1305,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
tev->point.offset);
/* Expand special probe argument if exist */
- args = zalloc(sizeof(struct perf_probe_arg) * MAX_PROBE_ARGS);
+ args = calloc(MAX_PROBE_ARGS, sizeof(struct perf_probe_arg));
if (args == NULL) {
ret = -ENOMEM;
goto end;
@@ -1316,7 +1316,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
goto end;
tev->nargs = ret;
- tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
+ tev->args = calloc(tev->nargs, sizeof(struct probe_trace_arg));
if (tev->args == NULL) {
ret = -ENOMEM;
goto end;
@@ -1393,7 +1393,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
int ret, i;
/* Allocate result tevs array */
- *tevs = zalloc(sizeof(struct probe_trace_event) * tf.max_tevs);
+ *tevs = calloc(tf.max_tevs, sizeof(struct probe_trace_event));
if (*tevs == NULL)
return -ENOMEM;
@@ -1566,7 +1566,7 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg,
int ret;
/* Allocate result vls array */
- *vls = zalloc(sizeof(struct variable_list) * af.max_vls);
+ *vls = calloc(af.max_vls, sizeof(struct variable_list));
if (*vls == NULL)
return -ENOMEM;
diff --git a/tools/perf/util/sample.c b/tools/perf/util/sample.c
index 8f82aaf1aab6..cf73329326d7 100644
--- a/tools/perf/util/sample.c
+++ b/tools/perf/util/sample.c
@@ -19,15 +19,22 @@ void perf_sample__init(struct perf_sample *sample, bool all)
if (all) {
memset(sample, 0, sizeof(*sample));
} else {
+ sample->evsel = NULL;
sample->user_regs = NULL;
sample->intr_regs = NULL;
+ sample->merged_callchain = false;
+ sample->callchain = NULL;
}
}
void perf_sample__exit(struct perf_sample *sample)
{
- free(sample->user_regs);
- free(sample->intr_regs);
+ zfree(&sample->user_regs);
+ zfree(&sample->intr_regs);
+ if (sample->merged_callchain) {
+ zfree(&sample->callchain);
+ sample->merged_callchain = false;
+ }
}
struct regs_dump *perf_sample__user_regs(struct perf_sample *sample)
diff --git a/tools/perf/util/sample.h b/tools/perf/util/sample.h
index 3cce8dd202aa..e556c9b656ea 100644
--- a/tools/perf/util/sample.h
+++ b/tools/perf/util/sample.h
@@ -5,6 +5,7 @@
#include <linux/perf_event.h>
#include <linux/types.h>
+struct evsel;
struct machine;
struct thread;
@@ -70,58 +71,177 @@ struct aux_sample {
};
struct simd_flags {
- u8 arch:1, /* architecture (isa) */
- pred:2; /* predication */
+ u8 arch: 2, /* architecture (isa) */
+ pred: 3, /* predication */
+ resv: 3; /* reserved */
};
/* simd architecture flags */
-#define SIMD_OP_FLAGS_ARCH_SVE 0x01 /* ARM SVE */
+enum simd_op_flags {
+ SIMD_OP_FLAGS_ARCH_NONE = 0x0, /* No SIMD operation */
+ SIMD_OP_FLAGS_ARCH_SVE, /* Arm SVE */
+ SIMD_OP_FLAGS_ARCH_SME, /* Arm SME */
+ SIMD_OP_FLAGS_ARCH_ASE, /* Arm Advanced SIMD */
+};
/* simd predicate flags */
-#define SIMD_OP_FLAGS_PRED_PARTIAL 0x01 /* partial predicate */
-#define SIMD_OP_FLAGS_PRED_EMPTY 0x02 /* empty predicate */
+enum simd_pred_flags {
+ SIMD_OP_FLAGS_PRED_NONE = 0x0, /* Not available */
+ SIMD_OP_FLAGS_PRED_PARTIAL, /* partial predicate */
+ SIMD_OP_FLAGS_PRED_EMPTY, /* empty predicate */
+ SIMD_OP_FLAGS_PRED_FULL, /* full predicate */
+ SIMD_OP_FLAGS_PRED_DISABLED, /* disabled predicate */
+};
+/**
+ * struct perf_sample
+ *
+ * A sample is generally filled in by evlist__parse_sample/evsel__parse_sample
+ * which fills in the variables from a "union perf_event *event" which is data
+ * from a perf ring buffer or perf.data file. The "event" sample is variable in
+ * length as determined by the perf_event_attr (in the evsel) and details within
+ * the sample event itself. A struct perf_sample avoids needing to care about
+ * the variable length nature of the original event.
+ *
+ * To avoid being excessively large parts of the struct perf_sample are pointers
+ * into the original sample event. In general the lifetime of a struct
+ * perf_sample needs to be less than the "union perf_event *event" it was
+ * derived from.
+ *
+ * The struct regs_dump user_regs and intr_regs are lazily allocated again for
+ * size reasons, due to them holding a cache of looked up registers. The
+ * function pair of perf_sample__init and perf_sample__exit correctly initialize
+ * and clean up these values.
+ */
struct perf_sample {
+ /** @evsel: Backward reference to the evsel used when constructing the sample. */
+ struct evsel *evsel;
+ /** @ip: The sample event PERF_SAMPLE_IP value. */
u64 ip;
- u32 pid, tid;
+ /** @pid: The sample event PERF_SAMPLE_TID pid value. */
+ u32 pid;
+ /** @tid: The sample event PERF_SAMPLE_TID tid value. */
+ u32 tid;
+ /** @time: The sample event PERF_SAMPLE_TIME value. */
u64 time;
+ /** @addr: The sample event PERF_SAMPLE_ADDR value. */
u64 addr;
+ /** @id: The sample event PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER value. */
u64 id;
+ /** @stream_id: The sample event PERF_SAMPLE_STREAM_ID value. */
u64 stream_id;
+ /** @period: The sample event PERF_SAMPLE_PERIOD value. */
u64 period;
+ /** @weight: Data determined by PERF_SAMPLE_WEIGHT or PERF_SAMPLE_WEIGHT_STRUCT. */
u64 weight;
+ /** @transaction: The sample event PERF_SAMPLE_TRANSACTION value. */
u64 transaction;
+ /** @insn_cnt: Filled in and used by intel-pt. */
u64 insn_cnt;
+ /** @cyc_cnt: Filled in and used by intel-pt. */
u64 cyc_cnt;
+ /** @cpu: The sample event PERF_SAMPLE_CPU value. */
u32 cpu;
+ /**
+ * @raw_size: The size in bytes of raw data from PERF_SAMPLE_RAW. For
+ * alignment reasons this should always be sizeof(u32)
+ * followed by a multiple of sizeof(u64).
+ */
u32 raw_size;
+ /** @data_src: The sample event PERF_SAMPLE_DATA_SRC value. */
u64 data_src;
+ /** @phys_addr: The sample event PERF_SAMPLE_PHYS_ADDR value. */
u64 phys_addr;
+ /** @data_page_size: The sample event PERF_SAMPLE_DATA_PAGE_SIZE value. */
u64 data_page_size;
+ /** @code_page_size: The sample event PERF_SAMPLE_CODE_PAGE_SIZE value. */
u64 code_page_size;
+ /** @cgroup: The sample event PERF_SAMPLE_CGROUP value. */
u64 cgroup;
+ /** @flags: Extra flag data from auxiliary events like intel-pt. */
u32 flags;
+ /** @machine_pid: The guest machine pid derived from the sample id. */
u32 machine_pid;
+ /** @vcpu: The guest machine vcpu derived from the sample id. */
u32 vcpu;
+ /**
+ * @insn_len: Instruction length from auxiliary events like
+ * intel-pt. The instruction itself is held in insn.
+ */
u16 insn_len;
- u8 cpumode;
+ /** @misc: The entire struct perf_event_header misc variable. */
u16 misc;
+ /**
+ * @ins_lat: Instruction latency information from weight2 in
+ * PERF_SAMPLE_WEIGHT_STRUCT or auxiliary events like
+ * intel-pt.
+ */
u16 ins_lat;
- /** @weight3: On x86 holds retire_lat, on powerpc holds p_stage_cyc. */
+ /**
+ * @weight3: From PERF_SAMPLE_WEIGHT_STRUCT. On x86 holds retire_lat, on
+ * powerpc holds p_stage_cyc.
+ */
u16 weight3;
- bool no_hw_idx; /* No hw_idx collected in branch_stack */
- bool deferred_callchain; /* Has deferred user callchains */
+ /**
+ * @cpumode: The cpumode from struct perf_event_header misc variable
+ * masked with CPUMODE_MASK. Gives user, kernel and hypervisor
+ * information.
+ */
+ u8 cpumode;
+ /**
+ * @no_hw_idx: For PERF_SAMPLE_BRANCH_STACK, true when
+ * PERF_SAMPLE_BRANCH_HW_INDEX isn't set.
+ */
+ bool no_hw_idx;
+ /**
+ * @deferred_callchain: When processing PERF_SAMPLE_CALLCHAIN a deferred
+ * user callchain marker was encountered.
+ */
+ bool deferred_callchain;
+ /**
+ * @merged_callchain: A synthesized merged callchain that is allocated
+ * and needs freeing.
+ */
+ bool merged_callchain;
+ /**
+ * @deferred_cookie: Identifier of the deferred callchain in the later
+ * PERF_RECORD_CALLCHAIN_DEFERRED event.
+ */
u64 deferred_cookie;
+ /** @insn: A copy of the sampled instruction filled in by perf_sample__fetch_insn. */
char insn[MAX_INSN];
+ /** @raw_data: Pointer into the original event for PERF_SAMPLE_RAW data. */
void *raw_data;
+ /**
+ * @callchain: Pointer into the original event for PERF_SAMPLE_CALLCHAIN
+ * data. For deferred callchains this may be a copy that
+ * needs freeing, see sample__merge_deferred_callchain.
+ */
struct ip_callchain *callchain;
+ /** @branch_stack: Pointer into the original event for PERF_SAMPLE_BRANCH_STACK data. */
struct branch_stack *branch_stack;
+ /**
+ * @branch_stack_cntr: Pointer into the original event for
+ * PERF_SAMPLE_BRANCH_COUNTERS data.
+ */
u64 *branch_stack_cntr;
+ /** @user_regs: Values and pointers into the sample for PERF_SAMPLE_REGS_USER. */
struct regs_dump *user_regs;
+ /** @intr_regs: Values and pointers into the sample for PERF_SAMPLE_REGS_INTR. */
struct regs_dump *intr_regs;
+ /** @user_stack: Size and pointer into the sample for PERF_SAMPLE_STACK_USER. */
struct stack_dump user_stack;
+ /**
+ * @read: The sample event PERF_SAMPLE_READ counter values. The valid
+ * values depend on the attr.read_format PERF_FORMAT_ values.
+ */
struct sample_read read;
+ /**
+ * @aux_sample: Similar to raw data but with a 64-bit size and
+ * alignment, PERF_SAMPLE_AUX data.
+ */
struct aux_sample aux_sample;
+ /** @simd_flags: SIMD flag information from ARM SPE auxiliary events. */
struct simd_flags simd_flags;
};
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 2b0df7bd9a46..5a30caaec73e 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -1701,7 +1701,7 @@ static void python_process_stat(struct perf_stat_config *config,
struct perf_cpu_map *cpus = counter->core.cpus;
for (int thread = 0; thread < perf_thread_map__nr(threads); thread++) {
- int idx;
+ unsigned int idx;
struct perf_cpu cpu;
perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 4b465abfa36c..fe0de2a0277f 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -131,10 +131,17 @@ static int ordered_events__deliver_event(struct ordered_events *oe,
{
struct perf_session *session = container_of(oe, struct perf_session,
ordered_events);
+ int ret = perf_session__deliver_event(session, event->event,
+ session->tool, event->file_offset,
+ event->file_path);
- return perf_session__deliver_event(session, event->event,
- session->tool, event->file_offset,
- event->file_path);
+ if (ret) {
+ pr_err("%#" PRIx64 " [%#x]: ordered event processing failed (%d) for event of type: %s (%d)\n",
+ event->file_offset, event->event->header.size, ret,
+ perf_event__name(event->event->header.type),
+ event->event->header.type);
+ }
+ return ret;
}
struct perf_session *__perf_session__new(struct perf_data *data,
@@ -1257,8 +1264,9 @@ static int deliver_sample_value(struct evlist *evlist,
bool per_thread)
{
struct perf_sample_id *sid = evlist__id2sid(evlist, v->id);
- struct evsel *evsel;
+ struct evsel *saved_evsel = sample->evsel;
u64 *storage = NULL;
+ int ret;
if (sid) {
storage = perf_sample_id__get_period_storage(sid, sample->tid, per_thread);
@@ -1282,8 +1290,10 @@ static int deliver_sample_value(struct evlist *evlist,
if (!sample->period)
return 0;
- evsel = container_of(sid->evsel, struct evsel, core);
- return tool->sample(tool, event, sample, evsel, machine);
+ sample->evsel = container_of(sid->evsel, struct evsel, core);
+ ret = tool->sample(tool, event, sample, sample->evsel, machine);
+ sample->evsel = saved_evsel;
+ return ret;
}
static int deliver_sample_group(struct evlist *evlist,
@@ -1355,39 +1365,44 @@ static int evlist__deliver_deferred_callchain(struct evlist *evlist,
struct machine *machine)
{
struct deferred_event *de, *tmp;
- struct evsel *evsel;
int ret = 0;
if (!tool->merge_deferred_callchains) {
- evsel = evlist__id2evsel(evlist, sample->id);
- return tool->callchain_deferred(tool, event, sample,
- evsel, machine);
+ struct evsel *saved_evsel = sample->evsel;
+
+ sample->evsel = evlist__id2evsel(evlist, sample->id);
+ ret = tool->callchain_deferred(tool, event, sample,
+ sample->evsel, machine);
+ sample->evsel = saved_evsel;
+ return ret;
}
list_for_each_entry_safe(de, tmp, &evlist->deferred_samples, list) {
struct perf_sample orig_sample;
+ perf_sample__init(&orig_sample, /*all=*/false);
ret = evlist__parse_sample(evlist, de->event, &orig_sample);
if (ret < 0) {
pr_err("failed to parse original sample\n");
+ perf_sample__exit(&orig_sample);
break;
}
- if (sample->tid != orig_sample.tid)
+ if (sample->tid != orig_sample.tid) {
+ perf_sample__exit(&orig_sample);
continue;
+ }
if (event->callchain_deferred.cookie == orig_sample.deferred_cookie)
sample__merge_deferred_callchain(&orig_sample, sample);
else
orig_sample.deferred_callchain = false;
- evsel = evlist__id2evsel(evlist, orig_sample.id);
+ orig_sample.evsel = evlist__id2evsel(evlist, orig_sample.id);
ret = evlist__deliver_sample(evlist, tool, de->event,
- &orig_sample, evsel, machine);
-
- if (orig_sample.deferred_callchain)
- free(orig_sample.callchain);
+ &orig_sample, orig_sample.evsel, machine);
+ perf_sample__exit(&orig_sample);
list_del(&de->list);
free(de->event);
free(de);
@@ -1408,22 +1423,24 @@ static int session__flush_deferred_samples(struct perf_session *session,
struct evlist *evlist = session->evlist;
struct machine *machine = &session->machines.host;
struct deferred_event *de, *tmp;
- struct evsel *evsel;
int ret = 0;
list_for_each_entry_safe(de, tmp, &evlist->deferred_samples, list) {
struct perf_sample sample;
+ perf_sample__init(&sample, /*all=*/false);
ret = evlist__parse_sample(evlist, de->event, &sample);
if (ret < 0) {
pr_err("failed to parse original sample\n");
+ perf_sample__exit(&sample);
break;
}
- evsel = evlist__id2evsel(evlist, sample.id);
+ sample.evsel = evlist__id2evsel(evlist, sample.id);
ret = evlist__deliver_sample(evlist, tool, de->event,
- &sample, evsel, machine);
+ &sample, sample.evsel, machine);
+ perf_sample__exit(&sample);
list_del(&de->list);
free(de->event);
free(de);
@@ -1446,8 +1463,12 @@ static int machines__deliver_event(struct machines *machines,
dump_event(evlist, event, file_offset, sample, file_path);
- evsel = evlist__id2evsel(evlist, sample->id);
+ if (!sample->evsel)
+ sample->evsel = evlist__id2evsel(evlist, sample->id);
+ else
+ assert(sample->evsel == evlist__id2evsel(evlist, sample->id));
+ evsel = sample->evsel;
machine = machines__find_for_cpumode(machines, event, sample);
switch (event->header.type) {
@@ -2110,8 +2131,10 @@ more:
}
if ((skip = perf_session__process_event(session, event, head, "pipe")) < 0) {
- pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
- head, event->header.size, event->header.type);
+ pr_err("%#" PRIx64 " [%#x]: piped event processing failed for event of type: %s (%d)\n",
+ head, event->header.size,
+ perf_event__name(event->header.type),
+ event->header.type);
err = -EINVAL;
goto out_err;
}
@@ -2225,8 +2248,10 @@ static int __perf_session__process_decomp_events(struct perf_session *session)
if (size < sizeof(struct perf_event_header) ||
(skip = perf_session__process_event(session, event, decomp->file_pos,
decomp->file_path)) < 0) {
- pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
- decomp->file_pos + decomp->head, event->header.size, event->header.type);
+ pr_err("%#" PRIx64 " [%#x]: decompress event processing failed for event of type: %s (%d)\n",
+ decomp->file_pos + decomp->head, event->header.size,
+ perf_event__name(event->header.type),
+ event->header.type);
return -EINVAL;
}
@@ -2382,8 +2407,9 @@ reader__read_event(struct reader *rd, struct perf_session *session,
if (size < sizeof(struct perf_event_header) ||
(skip = rd->process(session, event, rd->file_pos, rd->path)) < 0) {
errno = -skip;
- pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%m]\n",
+ pr_err("%#" PRIx64 " [%#x]: processing failed for event of type: %s (%d) [%m]\n",
rd->file_offset + rd->head, event->header.size,
+ perf_event__name(event->header.type),
event->header.type);
err = skip;
goto out;
@@ -2533,7 +2559,7 @@ static int __perf_session__process_dir_events(struct perf_session *session)
nr_readers++;
}
- rd = zalloc(nr_readers * sizeof(struct reader));
+ rd = calloc(nr_readers, sizeof(struct reader));
if (!rd)
return -ENOMEM;
@@ -2557,7 +2583,7 @@ static int __perf_session__process_dir_events(struct perf_session *session)
if (!data->dir.files[i].size)
continue;
rd[readers] = (struct reader) {
- .fd = data->dir.files[i].fd,
+ .fd = perf_data_file__fd(&data->dir.files[i]),
.path = data->dir.files[i].path,
.data_size = data->dir.files[i].size,
.data_offset = 0,
@@ -2766,7 +2792,8 @@ struct evsel *perf_session__find_first_evtype(struct perf_session *session,
int perf_session__cpu_bitmap(struct perf_session *session,
const char *cpu_list, unsigned long *cpu_bitmap)
{
- int i, err = -1;
+ unsigned int i;
+ int err = -1;
struct perf_cpu_map *map;
int nr_cpus = min(perf_session__env(session)->nr_cpus_avail, MAX_NR_CPUS);
struct perf_cpu cpu;
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 42d5cd7ef4e2..0020089cb13c 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <ctype.h>
#include <errno.h>
#include <inttypes.h>
#include <regex.h>
@@ -30,6 +31,7 @@
#include "time-utils.h"
#include "cgroup.h"
#include "machine.h"
+#include "session.h"
#include "trace-event.h"
#include <linux/kernel.h>
#include <linux/string.h>
@@ -42,11 +44,11 @@ regex_t parent_regex;
const char default_parent_pattern[] = "^sys_|^do_page_fault";
const char *parent_pattern = default_parent_pattern;
const char *default_sort_order = "comm,dso,symbol";
-const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
+static const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc";
-const char default_top_sort_order[] = "dso,symbol";
-const char default_diff_sort_order[] = "dso,symbol";
-const char default_tracepoint_sort_order[] = "trace";
+static const char default_top_sort_order[] = "dso,symbol";
+static const char default_diff_sort_order[] = "dso,symbol";
+static const char default_tracepoint_sort_order[] = "trace";
const char *sort_order;
const char *field_order;
regex_t ignore_callees_regex;
@@ -171,7 +173,7 @@ static int hist_entry__tgid_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%7d:%-*.*s", tgid, width, width, comm ?: "");
}
-struct sort_entry sort_tgid = {
+static struct sort_entry sort_tgid = {
.se_header = " Tgid:Command",
.se_cmp = sort__tgid_cmp,
.se_snprintf = hist_entry__tgid_snprintf,
@@ -193,8 +195,12 @@ static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags)
{
u64 arch = simd_flags->arch;
- if (arch & SIMD_OP_FLAGS_ARCH_SVE)
+ if (arch == SIMD_OP_FLAGS_ARCH_SVE)
return "SVE";
+ else if (arch == SIMD_OP_FLAGS_ARCH_SME)
+ return "SME";
+ else if (arch == SIMD_OP_FLAGS_ARCH_ASE)
+ return "ASE";
else
return "n/a";
}
@@ -203,21 +209,26 @@ static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width __maybe_unused)
{
const char *name;
+ const char *pred_str = ".";
if (!he->simd_flags.arch)
return repsep_snprintf(bf, size, "");
name = hist_entry__get_simd_name(&he->simd_flags);
- if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY)
- return repsep_snprintf(bf, size, "[e] %s", name);
- else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL)
- return repsep_snprintf(bf, size, "[p] %s", name);
+ if (he->simd_flags.pred == SIMD_OP_FLAGS_PRED_EMPTY)
+ pred_str = "e";
+ else if (he->simd_flags.pred == SIMD_OP_FLAGS_PRED_PARTIAL)
+ pred_str = "p";
+ else if (he->simd_flags.pred == SIMD_OP_FLAGS_PRED_DISABLED)
+ pred_str = "d";
+ else if (he->simd_flags.pred == SIMD_OP_FLAGS_PRED_FULL)
+ pred_str = "f";
- return repsep_snprintf(bf, size, "[.] %s", name);
+ return repsep_snprintf(bf, size, "[%s] %s", pred_str, name);
}
-struct sort_entry sort_simd = {
+static struct sort_entry sort_simd = {
.se_header = "Simd ",
.se_cmp = sort__simd_cmp,
.se_snprintf = hist_entry__simd_snprintf,
@@ -265,6 +276,115 @@ struct sort_entry sort_comm = {
.se_width_idx = HISTC_COMM,
};
+/* --sort comm_nodigit */
+
+size_t sort__comm_nodigit_len(struct hist_entry *entry)
+{
+ const char *comm = comm__str(entry->comm);
+ size_t index, len_nodigit = 0;
+ bool in_number = false;
+
+ if (!comm)
+ return 0;
+
+ for (index = 0; comm[index]; index++) {
+ if (!isdigit((unsigned char)comm[index])) {
+ in_number = false;
+ len_nodigit++;
+ } else if (!in_number) {
+ in_number = true;
+ len_nodigit += 3; /* <N> */
+ }
+ }
+
+ return len_nodigit;
+}
+
+static int64_t strcmp_nodigit(const char *left, const char *right)
+{
+ for (;;) {
+ while (*left && isdigit((unsigned char)*left))
+ left++;
+ while (*right && isdigit((unsigned char)*right))
+ right++;
+ if (*left == *right && !*left) {
+ return 0;
+ } else if (*left == *right) {
+ left++;
+ right++;
+ } else {
+ return (int64_t)((unsigned char)*left - (unsigned char)*right);
+ }
+ }
+}
+
+static int64_t
+sort__comm_nodigit_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return strcmp_nodigit(comm__str(right->comm), comm__str(left->comm));
+}
+
+static int64_t
+sort__comm_nodigit_collapse(struct hist_entry *left, struct hist_entry *right)
+{
+ return strcmp_nodigit(comm__str(right->comm), comm__str(left->comm));
+}
+
+static int64_t
+sort__comm_nodigit_sort(struct hist_entry *left, struct hist_entry *right)
+{
+ return strcmp_nodigit(comm__str(right->comm), comm__str(left->comm));
+}
+
+static int hist_entry__comm_nodigit_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ int ret = 0;
+ unsigned int print_len, printed = 0, start = 0, end = 0;
+ bool in_digit;
+ const char *comm = comm__str(he->comm), *print;
+
+ while (printed < width && printed < size && comm[start]) {
+ in_digit = !!isdigit((unsigned char)comm[start]);
+ end = start + 1;
+ while (comm[end] && !!isdigit((unsigned char)comm[end]) == in_digit)
+ end++;
+ if (in_digit) {
+ print_len = 3; /* <N> */
+ print = "<N>";
+ } else {
+ print_len = end - start;
+ print = &comm[start];
+ }
+ print_len = min(print_len, width - printed);
+ ret = repsep_snprintf(bf + printed, size - printed, "%-.*s",
+ print_len, print);
+ if (ret < 0)
+ return ret;
+ start = end;
+ printed += ret;
+ }
+ /* Pad to width if necessary */
+ if (printed < width && printed < size) {
+ ret = repsep_snprintf(bf + printed, size - printed, "%-*.*s",
+ width - printed, width - printed, "");
+ if (ret < 0)
+ return ret;
+ printed += ret;
+ }
+ return printed;
+}
+
+struct sort_entry sort_comm_nodigit = {
+ .se_header = "CommandNoDigit",
+ .se_cmp = sort__comm_nodigit_cmp,
+ .se_collapse = sort__comm_nodigit_collapse,
+ .se_sort = sort__comm_nodigit_sort,
+ .se_snprintf = hist_entry__comm_nodigit_snprintf,
+ .se_filter = hist_entry__thread_filter,
+ .se_width_idx = HISTC_COMM_NODIGIT,
+};
+
/* --sort dso */
static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
@@ -494,7 +614,7 @@ hist_entry__symoff_snprintf(struct hist_entry *he, char *bf, size_t size, unsign
return repsep_snprintf(bf, size, "[%c] %s+0x%llx", he->level, sym->name, he->ip - sym->start);
}
-struct sort_entry sort_sym_offset = {
+static struct sort_entry sort_sym_offset = {
.se_header = "Symbol Offset",
.se_cmp = sort__symoff_cmp,
.se_sort = sort__symoff_sort,
@@ -605,7 +725,7 @@ static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
}
-struct sort_entry sort_srcline_from = {
+static struct sort_entry sort_srcline_from = {
.se_header = "From Source:Line",
.se_cmp = sort__srcline_from_cmp,
.se_collapse = sort__srcline_from_collapse,
@@ -653,7 +773,7 @@ static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
}
-struct sort_entry sort_srcline_to = {
+static struct sort_entry sort_srcline_to = {
.se_header = "To Source:Line",
.se_cmp = sort__srcline_to_cmp,
.se_collapse = sort__srcline_to_collapse,
@@ -689,7 +809,7 @@ static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%-*s", width, tmp);
}
-struct sort_entry sort_sym_ipc = {
+static struct sort_entry sort_sym_ipc = {
.se_header = "IPC [IPC Coverage]",
.se_cmp = sort__sym_cmp,
.se_snprintf = hist_entry__sym_ipc_snprintf,
@@ -707,7 +827,7 @@ static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
return repsep_snprintf(bf, size, "%-*s", width, tmp);
}
-struct sort_entry sort_sym_ipc_null = {
+static struct sort_entry sort_sym_ipc_null = {
.se_header = "IPC [IPC Coverage]",
.se_cmp = sort__sym_cmp,
.se_snprintf = hist_entry__sym_ipc_null_snprintf,
@@ -740,7 +860,7 @@ static int hist_entry__callchain_branch_predicted_snprintf(
return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
}
-struct sort_entry sort_callchain_branch_predicted = {
+static struct sort_entry sort_callchain_branch_predicted = {
.se_header = "Predicted",
.se_cmp = sort__callchain_branch_predicted_cmp,
.se_snprintf = hist_entry__callchain_branch_predicted_snprintf,
@@ -770,7 +890,7 @@ static int hist_entry__callchain_branch_abort_snprintf(struct hist_entry *he,
return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
}
-struct sort_entry sort_callchain_branch_abort = {
+static struct sort_entry sort_callchain_branch_abort = {
.se_header = "Abort",
.se_cmp = sort__callchain_branch_abort_cmp,
.se_snprintf = hist_entry__callchain_branch_abort_snprintf,
@@ -803,7 +923,7 @@ static int hist_entry__callchain_branch_cycles_snprintf(struct hist_entry *he,
return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
}
-struct sort_entry sort_callchain_branch_cycles = {
+static struct sort_entry sort_callchain_branch_cycles = {
.se_header = "Cycles",
.se_cmp = sort__callchain_branch_cycles_cmp,
.se_snprintf = hist_entry__callchain_branch_cycles_snprintf,
@@ -870,7 +990,7 @@ static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
}
-struct sort_entry sort_srcfile = {
+static struct sort_entry sort_srcfile = {
.se_header = "Source File",
.se_cmp = sort__srcfile_cmp,
.se_collapse = sort__srcfile_collapse,
@@ -922,7 +1042,7 @@ static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
}
-struct sort_entry sort_cpu = {
+static struct sort_entry sort_cpu = {
.se_header = "CPU",
.se_cmp = sort__cpu_cmp,
.se_snprintf = hist_entry__cpu_snprintf,
@@ -953,7 +1073,7 @@ static int hist_entry__parallelism_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%*d", width, he->parallelism);
}
-struct sort_entry sort_parallelism = {
+static struct sort_entry sort_parallelism = {
.se_header = "Parallelism",
.se_cmp = sort__parallelism_cmp,
.se_filter = hist_entry__parallelism_filter,
@@ -994,7 +1114,7 @@ static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
he->cgroup_id.ino);
}
-struct sort_entry sort_cgroup_id = {
+static struct sort_entry sort_cgroup_id = {
.se_header = "cgroup id (dev/inode)",
.se_cmp = sort__cgroup_id_cmp,
.se_snprintf = hist_entry__cgroup_id_snprintf,
@@ -1027,7 +1147,7 @@ static int hist_entry__cgroup_snprintf(struct hist_entry *he,
return repsep_snprintf(bf, size, "%s", cgrp_name);
}
-struct sort_entry sort_cgroup = {
+static struct sort_entry sort_cgroup = {
.se_header = "Cgroup",
.se_cmp = sort__cgroup_cmp,
.se_snprintf = hist_entry__cgroup_snprintf,
@@ -1058,7 +1178,7 @@ static int hist_entry__socket_filter(struct hist_entry *he, int type, const void
return sk >= 0 && he->socket != sk;
}
-struct sort_entry sort_socket = {
+static struct sort_entry sort_socket = {
.se_header = "Socket",
.se_cmp = sort__socket_cmp,
.se_snprintf = hist_entry__socket_snprintf,
@@ -1089,7 +1209,7 @@ static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%-.*s", width, he_time);
}
-struct sort_entry sort_time = {
+static struct sort_entry sort_time = {
.se_header = "Time",
.se_cmp = sort__time_cmp,
.se_snprintf = hist_entry__time_snprintf,
@@ -1158,7 +1278,7 @@ static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
}
-struct sort_entry sort_trace = {
+static struct sort_entry sort_trace = {
.se_header = "Trace output",
.se_cmp = sort__trace_cmp,
.se_snprintf = hist_entry__trace_snprintf,
@@ -1453,7 +1573,7 @@ sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right)
return _sort__addr_cmp(to_l->addr, to_r->addr);
}
-struct sort_entry sort_addr_from = {
+static struct sort_entry sort_addr_from = {
.se_header = "Source Address",
.se_cmp = sort__addr_from_cmp,
.se_snprintf = hist_entry__addr_from_snprintf,
@@ -1461,7 +1581,7 @@ struct sort_entry sort_addr_from = {
.se_width_idx = HISTC_ADDR_FROM,
};
-struct sort_entry sort_addr_to = {
+static struct sort_entry sort_addr_to = {
.se_header = "Target Address",
.se_cmp = sort__addr_to_cmp,
.se_snprintf = hist_entry__addr_to_snprintf,
@@ -1518,7 +1638,7 @@ static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
he->branch_info->flags.cycles);
}
-struct sort_entry sort_cycles = {
+static struct sort_entry sort_cycles = {
.se_header = "Basic Block Cycles",
.se_cmp = sort__cycles_cmp,
.se_snprintf = hist_entry__cycles_snprintf,
@@ -1808,7 +1928,7 @@ static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
}
-struct sort_entry sort_mispredict = {
+static struct sort_entry sort_mispredict = {
.se_header = "Branch Mispredicted",
.se_cmp = sort__mispredict_cmp,
.se_snprintf = hist_entry__mispredict_snprintf,
@@ -1827,7 +1947,7 @@ static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
}
-struct sort_entry sort_local_weight = {
+static struct sort_entry sort_local_weight = {
.se_header = "Local Weight",
.se_cmp = sort__weight_cmp,
.se_snprintf = hist_entry__local_weight_snprintf,
@@ -1841,7 +1961,7 @@ static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
he->weight * he->stat.nr_events);
}
-struct sort_entry sort_global_weight = {
+static struct sort_entry sort_global_weight = {
.se_header = "Weight",
.se_cmp = sort__weight_cmp,
.se_snprintf = hist_entry__global_weight_snprintf,
@@ -1860,7 +1980,7 @@ static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
}
-struct sort_entry sort_local_ins_lat = {
+static struct sort_entry sort_local_ins_lat = {
.se_header = "Local INSTR Latency",
.se_cmp = sort__ins_lat_cmp,
.se_snprintf = hist_entry__local_ins_lat_snprintf,
@@ -1874,7 +1994,7 @@ static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
he->ins_lat * he->stat.nr_events);
}
-struct sort_entry sort_global_ins_lat = {
+static struct sort_entry sort_global_ins_lat = {
.se_header = "INSTR Latency",
.se_cmp = sort__ins_lat_cmp,
.se_snprintf = hist_entry__global_ins_lat_snprintf,
@@ -1900,70 +2020,70 @@ static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%-*u", width, he->weight3);
}
-struct sort_entry sort_local_p_stage_cyc = {
+static struct sort_entry sort_local_p_stage_cyc = {
.se_header = "Local Pipeline Stage Cycle",
.se_cmp = sort__p_stage_cyc_cmp,
.se_snprintf = hist_entry__p_stage_cyc_snprintf,
.se_width_idx = HISTC_LOCAL_P_STAGE_CYC,
};
-struct sort_entry sort_global_p_stage_cyc = {
+static struct sort_entry sort_global_p_stage_cyc = {
.se_header = "Pipeline Stage Cycle",
.se_cmp = sort__p_stage_cyc_cmp,
.se_snprintf = hist_entry__global_p_stage_cyc_snprintf,
.se_width_idx = HISTC_GLOBAL_P_STAGE_CYC,
};
-struct sort_entry sort_mem_daddr_sym = {
+static struct sort_entry sort_mem_daddr_sym = {
.se_header = "Data Symbol",
.se_cmp = sort__daddr_cmp,
.se_snprintf = hist_entry__daddr_snprintf,
.se_width_idx = HISTC_MEM_DADDR_SYMBOL,
};
-struct sort_entry sort_mem_iaddr_sym = {
+static struct sort_entry sort_mem_iaddr_sym = {
.se_header = "Code Symbol",
.se_cmp = sort__iaddr_cmp,
.se_snprintf = hist_entry__iaddr_snprintf,
.se_width_idx = HISTC_MEM_IADDR_SYMBOL,
};
-struct sort_entry sort_mem_daddr_dso = {
+static struct sort_entry sort_mem_daddr_dso = {
.se_header = "Data Object",
.se_cmp = sort__dso_daddr_cmp,
.se_snprintf = hist_entry__dso_daddr_snprintf,
.se_width_idx = HISTC_MEM_DADDR_DSO,
};
-struct sort_entry sort_mem_locked = {
+static struct sort_entry sort_mem_locked = {
.se_header = "Locked",
.se_cmp = sort__locked_cmp,
.se_snprintf = hist_entry__locked_snprintf,
.se_width_idx = HISTC_MEM_LOCKED,
};
-struct sort_entry sort_mem_tlb = {
+static struct sort_entry sort_mem_tlb = {
.se_header = "TLB access",
.se_cmp = sort__tlb_cmp,
.se_snprintf = hist_entry__tlb_snprintf,
.se_width_idx = HISTC_MEM_TLB,
};
-struct sort_entry sort_mem_lvl = {
+static struct sort_entry sort_mem_lvl = {
.se_header = "Memory access",
.se_cmp = sort__lvl_cmp,
.se_snprintf = hist_entry__lvl_snprintf,
.se_width_idx = HISTC_MEM_LVL,
};
-struct sort_entry sort_mem_snoop = {
+static struct sort_entry sort_mem_snoop = {
.se_header = "Snoop",
.se_cmp = sort__snoop_cmp,
.se_snprintf = hist_entry__snoop_snprintf,
.se_width_idx = HISTC_MEM_SNOOP,
};
-struct sort_entry sort_mem_dcacheline = {
+static struct sort_entry sort_mem_dcacheline = {
.se_header = "Data Cacheline",
.se_cmp = sort__dcacheline_cmp,
.se_snprintf = hist_entry__dcacheline_snprintf,
@@ -1998,7 +2118,7 @@ static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%.*s", width, out);
}
-struct sort_entry sort_mem_blocked = {
+static struct sort_entry sort_mem_blocked = {
.se_header = "Blocked",
.se_cmp = sort__blocked_cmp,
.se_snprintf = hist_entry__blocked_snprintf,
@@ -2039,7 +2159,7 @@ static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
return width;
}
-struct sort_entry sort_mem_phys_daddr = {
+static struct sort_entry sort_mem_phys_daddr = {
.se_header = "Data Physical Address",
.se_cmp = sort__phys_daddr_cmp,
.se_snprintf = hist_entry__phys_daddr_snprintf,
@@ -2068,7 +2188,7 @@ static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
get_page_size_name(mem_info__daddr(he->mem_info)->data_page_size, str));
}
-struct sort_entry sort_mem_data_page_size = {
+static struct sort_entry sort_mem_data_page_size = {
.se_header = "Data Page Size",
.se_cmp = sort__data_page_size_cmp,
.se_snprintf = hist_entry__data_page_size_snprintf,
@@ -2093,7 +2213,7 @@ static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
get_page_size_name(he->code_page_size, str));
}
-struct sort_entry sort_code_page_size = {
+static struct sort_entry sort_code_page_size = {
.se_header = "Code Page Size",
.se_cmp = sort__code_page_size_cmp,
.se_snprintf = hist_entry__code_page_size_snprintf,
@@ -2125,7 +2245,7 @@ static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%-*s", width, out);
}
-struct sort_entry sort_abort = {
+static struct sort_entry sort_abort = {
.se_header = "Transaction abort",
.se_cmp = sort__abort_cmp,
.se_snprintf = hist_entry__abort_snprintf,
@@ -2157,7 +2277,7 @@ static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%-*s", width, out);
}
-struct sort_entry sort_in_tx = {
+static struct sort_entry sort_in_tx = {
.se_header = "Branch in transaction",
.se_cmp = sort__in_tx_cmp,
.se_snprintf = hist_entry__in_tx_snprintf,
@@ -2229,7 +2349,7 @@ static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%-*s", width, buf);
}
-struct sort_entry sort_transaction = {
+static struct sort_entry sort_transaction = {
.se_header = "Transaction ",
.se_cmp = sort__transaction_cmp,
.se_snprintf = hist_entry__transaction_snprintf,
@@ -2268,7 +2388,7 @@ static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
}
-struct sort_entry sort_sym_size = {
+static struct sort_entry sort_sym_size = {
.se_header = "Symbol size",
.se_cmp = sort__sym_size_cmp,
.se_snprintf = hist_entry__sym_size_snprintf,
@@ -2307,7 +2427,7 @@ static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
}
-struct sort_entry sort_dso_size = {
+static struct sort_entry sort_dso_size = {
.se_header = "DSO size",
.se_cmp = sort__dso_size_cmp,
.se_snprintf = hist_entry__dso_size_snprintf,
@@ -2344,7 +2464,7 @@ static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%-#*llx", width, ip);
}
-struct sort_entry sort_addr = {
+static struct sort_entry sort_addr = {
.se_header = "Address",
.se_cmp = sort__addr_cmp,
.se_snprintf = hist_entry__addr_snprintf,
@@ -2462,7 +2582,7 @@ static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf,
he->mem_type_off, buf);
}
-struct sort_entry sort_type_offset = {
+static struct sort_entry sort_type_offset = {
.se_header = "Data Type Offset",
.se_cmp = sort__type_cmp,
.se_collapse = sort__typeoff_sort,
@@ -2474,7 +2594,26 @@ struct sort_entry sort_type_offset = {
/* --sort typecln */
-#define DEFAULT_CACHELINE_SIZE 64
+static int
+hist_entry__cln_size(struct hist_entry *he)
+{
+ int ret = 0;
+
+ if (he && he->hists) {
+ struct evsel *evsel = hists_to_evsel(he->hists);
+
+ if (evsel) {
+ struct perf_session *session = evsel__session(evsel);
+
+ ret = session->header.env.cln_size;
+ }
+ }
+
+ if (ret < 1)
+ ret = DEFAULT_CACHELINE_SIZE; // avoid div/0 later
+
+ return ret;
+}
static int64_t
sort__typecln_sort(struct hist_entry *left, struct hist_entry *right)
@@ -2482,11 +2621,9 @@ sort__typecln_sort(struct hist_entry *left, struct hist_entry *right)
struct annotated_data_type *left_type = left->mem_type;
struct annotated_data_type *right_type = right->mem_type;
int64_t left_cln, right_cln;
+ int64_t cln_size_left = hist_entry__cln_size(left);
+ int64_t cln_size_right = hist_entry__cln_size(right);
int64_t ret;
- int cln_size = cacheline_size();
-
- if (cln_size == 0)
- cln_size = DEFAULT_CACHELINE_SIZE;
if (!left_type) {
sort__type_init(left);
@@ -2502,8 +2639,8 @@ sort__typecln_sort(struct hist_entry *left, struct hist_entry *right)
if (ret)
return ret;
- left_cln = left->mem_type_off / cln_size;
- right_cln = right->mem_type_off / cln_size;
+ left_cln = left->mem_type_off / cln_size_left;
+ right_cln = right->mem_type_off / cln_size_right;
return left_cln - right_cln;
}
@@ -2511,16 +2648,13 @@ static int hist_entry__typecln_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width __maybe_unused)
{
struct annotated_data_type *he_type = he->mem_type;
- int cln_size = cacheline_size();
-
- if (cln_size == 0)
- cln_size = DEFAULT_CACHELINE_SIZE;
+ int cln_size = hist_entry__cln_size(he);
return repsep_snprintf(bf, size, "%s: cache-line %d", he_type->self.type_name,
he->mem_type_off / cln_size);
}
-struct sort_entry sort_type_cacheline = {
+static struct sort_entry sort_type_cacheline = {
.se_header = "Data Type Cacheline",
.se_cmp = sort__type_cmp,
.se_collapse = sort__typecln_sort,
@@ -2583,6 +2717,7 @@ static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_PID, "pid", sort_thread),
DIM(SORT_TGID, "tgid", sort_tgid),
DIM(SORT_COMM, "comm", sort_comm),
+ DIM(SORT_COMM_NODIGIT, "comm_nodigit", sort_comm_nodigit),
DIM(SORT_DSO, "dso", sort_dso),
DIM(SORT_SYM, "symbol", sort_sym),
DIM(SORT_PARENT, "parent", sort_parent),
@@ -3579,6 +3714,8 @@ static int __sort_dimension__update(struct sort_dimension *sd,
list->thread = 1;
} else if (sd->entry == &sort_comm) {
list->comm = 1;
+ } else if (sd->entry == &sort_comm_nodigit) {
+ list->comm_nodigit = list->comm = 1;
} else if (sd->entry == &sort_type_offset) {
symbol_conf.annotate_data_member = true;
} else if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) {
@@ -4040,6 +4177,7 @@ static bool get_elide(int idx, FILE *output)
case HISTC_DSO:
return __get_elide(symbol_conf.dso_list, "dso", output);
case HISTC_COMM:
+ case HISTC_COMM_NODIGIT:
return __get_elide(symbol_conf.comm_list, "comm", output);
default:
break;
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index d7787958e06b..c962e77e4b93 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -43,6 +43,7 @@ enum sort_type {
/* common sort keys */
SORT_PID,
SORT_COMM,
+ SORT_COMM_NODIGIT,
SORT_DSO,
SORT_SYM,
SORT_PARENT,
@@ -158,4 +159,5 @@ sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right);
int64_t
_sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r);
char *hist_entry__srcline(struct hist_entry *he);
+size_t sort__comm_nodigit_len(struct hist_entry *entry);
#endif /* __PERF_SORT_H */
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index 9be42f398440..db164d258163 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -8,10 +8,12 @@
#include "symbol.h"
#include "libdw.h"
#include "debug.h"
+#include "util.h"
#include <inttypes.h>
#include <string.h>
#include <linux/string.h>
+#include <linux/zalloc.h>
bool srcline_full_filename;
@@ -73,14 +75,6 @@ int inline_list__append_tail(struct symbol *symbol, char *srcline, struct inline
return 0;
}
-/* basename version that takes a const input string */
-static const char *gnu_basename(const char *path)
-{
- const char *base = strrchr(path, '/');
-
- return base ? base + 1 : path;
-}
-
char *srcline_from_fileline(const char *file, unsigned int line)
{
char *srcline;
@@ -89,7 +83,7 @@ char *srcline_from_fileline(const char *file, unsigned int line)
return NULL;
if (!srcline_full_filename)
- file = gnu_basename(file);
+ file = perf_basename(file);
if (asprintf(&srcline, "%s:%u", file, line) < 0)
return NULL;
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index dc2b66855f6c..993f4c4b8f44 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -897,7 +897,7 @@ static bool should_skip_zero_counter(struct perf_stat_config *config,
const struct aggr_cpu_id *id)
{
struct perf_cpu cpu;
- int idx;
+ unsigned int idx;
/*
* Skip unsupported default events when not verbose. (default events
@@ -1125,7 +1125,7 @@ static void print_no_aggr_metric(struct perf_stat_config *config,
struct evlist *evlist,
struct outstate *os)
{
- int all_idx;
+ unsigned int all_idx;
struct perf_cpu cpu;
perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.user_requested_cpus) {
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 59d2cd4f2188..bc2d44df7baf 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -13,7 +13,6 @@
#include "metricgroup.h"
#include "cgroup.h"
#include "units.h"
-#include <linux/zalloc.h>
#include "iostat.h"
#include "util/hashmap.h"
#include "tool_pmu.h"
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 976a06e63252..14d169e22e8f 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -246,9 +246,11 @@ void evlist__reset_prev_raw_counts(struct evlist *evlist)
static void evsel__copy_prev_raw_counts(struct evsel *evsel)
{
- int idx, nthreads = perf_thread_map__nr(evsel->core.threads);
+ int nthreads = perf_thread_map__nr(evsel->core.threads);
for (int thread = 0; thread < nthreads; thread++) {
+ unsigned int idx;
+
perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
*perf_counts(evsel->counts, idx, thread) =
*perf_counts(evsel->prev_raw_counts, idx, thread);
@@ -580,7 +582,7 @@ static void evsel__update_percore_stats(struct evsel *evsel, struct aggr_cpu_id
struct perf_counts_values counts = { 0, };
struct aggr_cpu_id id;
struct perf_cpu cpu;
- int idx;
+ unsigned int idx;
/* collect per-core counts */
perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
@@ -617,7 +619,7 @@ static void evsel__process_percore(struct evsel *evsel)
struct perf_stat_evsel *ps = evsel->stats;
struct aggr_cpu_id core_id;
struct perf_cpu cpu;
- int idx;
+ unsigned int idx;
if (!evsel->percore)
return;
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index b1d259f590e9..e360e7736c7b 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -726,7 +726,8 @@ static void scan_core_topology(int *map, struct topology *t, int nr_cpus)
static int str_to_bitmap(char *s, cpumask_t *b, int nr_cpus)
{
- int idx, ret = 0;
+ unsigned int idx;
+ int ret = 0;
struct perf_cpu_map *map;
struct perf_cpu cpu;
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 76912c62b6a0..7afa8a117139 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -372,10 +372,8 @@ static bool get_plt_sizes(struct dso *dso, GElf_Ehdr *ehdr, GElf_Shdr *shdr_plt,
*plt_entry_size = 12;
return true;
case EM_AARCH64:
- *plt_header_size = 32;
- *plt_entry_size = 16;
- return true;
case EM_LOONGARCH:
+ case EM_RISCV:
*plt_header_size = 32;
*plt_entry_size = 16;
return true;
@@ -1054,15 +1052,15 @@ void symsrc__destroy(struct symsrc *ss)
close(ss->fd);
}
-bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
+static bool elf__needs_adjust_symbols(const GElf_Ehdr *ehdr)
{
/*
* Usually vmlinux is an ELF file with type ET_EXEC for most
* architectures; except Arm64 kernel is linked with option
* '-share', so need to check type ET_DYN.
*/
- return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL ||
- ehdr.e_type == ET_DYN;
+ return ehdr->e_type == ET_EXEC || ehdr->e_type == ET_REL ||
+ ehdr->e_type == ET_DYN;
}
static Elf *read_gnu_debugdata(struct dso *dso, Elf *elf, const char *name, int *fd_ret)
@@ -1235,7 +1233,7 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
if (dso__kernel(dso) == DSO_SPACE__USER)
ss->adjust_symbols = true;
else
- ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
+ ss->adjust_symbols = elf__needs_adjust_symbols(&ehdr);
ss->name = strdup(name);
if (!ss->name) {
@@ -1356,8 +1354,12 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
char dso_name[PATH_MAX];
/* Adjust symbol to map to file offset */
- if (adjust_kernel_syms)
- sym->st_value -= shdr->sh_addr - shdr->sh_offset;
+ if (adjust_kernel_syms) {
+ if (dso__rel(dso))
+ sym->st_value += shdr->sh_offset;
+ else
+ sym->st_value -= shdr->sh_addr - shdr->sh_offset;
+ }
if (strcmp(section_name, (dso__short_name(curr_dso) + dso__short_name_len(dso))) == 0)
return 0;
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 8662001e1e25..fcaeeddbbb6b 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -26,7 +26,6 @@
#include "demangle-rust-v0.h"
#include "dso.h"
#include "util.h" // lsdir()
-#include "debug.h"
#include "event.h"
#include "machine.h"
#include "map.h"
@@ -66,9 +65,11 @@ struct symbol_conf symbol_conf = {
.time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */
.show_hist_headers = true,
.symfs = "",
+ .symfs_layout_flat = false,
.event_group = true,
.inline_name = true,
.res_sample = 0,
+ .addr2line_timeout_ms = 5 * 1000,
};
struct map_list_node {
@@ -2363,7 +2364,8 @@ static int setup_parallelism_bitmap(void)
{
struct perf_cpu_map *map;
struct perf_cpu cpu;
- int i, err = -1;
+ unsigned int i;
+ int err = -1;
if (symbol_conf.parallelism_list_str == NULL)
return 0;
@@ -2491,16 +2493,42 @@ int symbol__config_symfs(const struct option *opt __maybe_unused,
const char *dir, int unset __maybe_unused)
{
char *bf = NULL;
+ const char *layout_str;
+ char *dir_copy;
int ret;
- symbol_conf.symfs = strdup(dir);
- if (symbol_conf.symfs == NULL)
- return -ENOMEM;
+ layout_str = strrchr(dir, ',');
+ if (layout_str) {
+ size_t dir_len = layout_str - dir;
+
+ dir_copy = strndup(dir, dir_len);
+ if (dir_copy == NULL)
+ return -ENOMEM;
+
+ symbol_conf.symfs = dir_copy;
+
+ layout_str++;
+ if (!strcmp(layout_str, "flat"))
+ symbol_conf.symfs_layout_flat = true;
+ else if (!strcmp(layout_str, "hierarchy"))
+ symbol_conf.symfs_layout_flat = false;
+ else {
+ pr_err("Invalid layout: '%s', use 'hierarchy' or 'flat'\n",
+ layout_str);
+ free(dir_copy);
+ return -EINVAL;
+ }
+ } else {
+ symbol_conf.symfs = strdup(dir);
+ if (symbol_conf.symfs == NULL)
+ return -ENOMEM;
+ symbol_conf.symfs_layout_flat = false;
+ }
/* skip the locally configured cache if a symfs is given, and
* config buildid dir to symfs/.debug
*/
- ret = asprintf(&bf, "%s/%s", dir, ".debug");
+ ret = asprintf(&bf, "%s/%s", symbol_conf.symfs, ".debug");
if (ret < 0)
return -ENOMEM;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 3fb5d146d9b1..bd6eb90c8668 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -9,10 +9,12 @@
#include <linux/list.h>
#include <linux/rbtree.h>
#include <stdio.h>
+#include <errno.h>
#include "addr_location.h"
#include "path.h"
#include "symbol_conf.h"
#include "spark.h"
+#include "util.h"
#ifdef HAVE_LIBELF_SUPPORT
#include <libelf.h>
@@ -96,6 +98,9 @@ struct intlist;
static inline int __symbol__join_symfs(char *bf, size_t size, const char *path)
{
+ if (symbol_conf.symfs_layout_flat)
+ return path__join(bf, size, symbol_conf.symfs, perf_basename(path));
+
return path__join(bf, size, symbol_conf.symfs, path);
}
@@ -169,6 +174,11 @@ size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp);
size_t symbol__fprintf(struct symbol *sym, FILE *fp);
bool symbol__restricted_filename(const char *filename,
const char *restricted_filename);
+
+#define SYMFS_HELP "setup root directory which contains debug files:\n" \
+ "\t\t\t\t" "directory:\tLook for files with symbols relative to this directory.\n" \
+ "\t\t\t\t" "layout: \tLayout of files, 'hierarchy' matches full path (default), 'flat' only matches base name.\n"
+
int symbol__config_symfs(const struct option *opt __maybe_unused,
const char *dir, int unset __maybe_unused);
@@ -217,7 +227,6 @@ int setup_intlist(struct intlist **list, const char *list_str,
const char *list_name);
#ifdef HAVE_LIBELF_SUPPORT
-bool elf__needs_adjust_symbols(GElf_Ehdr ehdr);
void arch__sym_update(struct symbol *s, GElf_Sym *sym);
#endif
diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h
index 71bb17372a6c..6cd454d7c98e 100644
--- a/tools/perf/util/symbol_conf.h
+++ b/tools/perf/util/symbol_conf.h
@@ -51,7 +51,7 @@ struct symbol_conf {
report_block,
report_individual_block,
inline_name,
- disable_add2line_warn,
+ addr2line_disable_warn,
no_buildid_mmap2,
guest_code,
lazy_load_kernel_maps,
@@ -80,6 +80,7 @@ struct symbol_conf {
*bt_stop_list_str;
const char *addr2line_path;
enum a2l_style addr2line_style[MAX_A2L_STYLE];
+ int addr2line_timeout_ms;
unsigned long time_quantum;
struct strlist *dso_list,
*comm_list,
@@ -93,6 +94,7 @@ struct symbol_conf {
*tid_list,
*addr_list;
const char *symfs;
+ bool symfs_layout_flat;
int res_sample;
int pad_output_len_dso;
int group_sort_idx;
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index ddf1cbda1902..85bee747f4cd 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -1266,7 +1266,7 @@ static void synthesize_cpus(struct synthesize_cpu_map_data *data)
static void synthesize_mask(struct synthesize_cpu_map_data *data)
{
- int idx;
+ unsigned int idx;
struct perf_cpu cpu;
/* Due to padding, the 4bytes per entry mask variant is always smaller. */
diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h
index 84ebb9c940c6..bc2bff9c6842 100644
--- a/tools/perf/util/target.h
+++ b/tools/perf/util/target.h
@@ -49,22 +49,22 @@ uid_t parse_uid(const char *str);
int target__strerror(struct target *target, int errnum, char *buf, size_t buflen);
-static inline bool target__has_task(struct target *target)
+static inline bool target__has_task(const struct target *target)
{
return target->tid || target->pid;
}
-static inline bool target__has_cpu(struct target *target)
+static inline bool target__has_cpu(const struct target *target)
{
return target->system_wide || target->cpu_list;
}
-static inline bool target__none(struct target *target)
+static inline bool target__none(const struct target *target)
{
return !target__has_task(target) && !target__has_cpu(target);
}
-static inline bool target__enable_on_exec(struct target *target)
+static inline bool target__enable_on_exec(const struct target *target)
{
/*
* Normally enable_on_exec should be set if:
@@ -75,12 +75,12 @@ static inline bool target__enable_on_exec(struct target *target)
return target__none(target) && !target->initial_delay;
}
-static inline bool target__has_per_thread(struct target *target)
+static inline bool target__has_per_thread(const struct target *target)
{
return target->system_wide && target->per_thread;
}
-static inline bool target__uses_dummy_map(struct target *target)
+static inline bool target__uses_dummy_map(const struct target *target)
{
bool use_dummy = false;
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 5b39ce21e333..87d496e9dfa6 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -25,7 +25,6 @@
#include <unistd.h>
#include <sys/mman.h>
#include <linux/list.h>
-#include <linux/zalloc.h>
#ifndef REMOTE_UNWIND_LIBUNWIND
#include <libunwind.h>
#include <libunwind-ptrace.h>
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 8b893de35f77..25849434f0a4 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -77,8 +77,6 @@ bool sysctl__nmi_watchdog_enabled(void)
return nmi_watchdog;
}
-bool test_attr__enabled;
-
bool exclude_GH_default;
bool perf_host = true;
@@ -547,3 +545,11 @@ int scandirat(int dirfd, const char *dirp,
return err;
}
#endif
+
+/* basename version that takes a const input string */
+const char *perf_basename(const char *path)
+{
+ const char *base = strrchr(path, '/');
+
+ return base ? base + 1 : path;
+}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 394dbfa944ac..87a0818a8c76 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -30,7 +30,6 @@ extern bool perf_guest;
/* General helper functions */
void usage(const char *err) __noreturn;
-void die(const char *err, ...) __noreturn __printf(1, 2);
struct dirent;
struct strlist;
@@ -87,6 +86,8 @@ struct perf_debuginfod {
};
void perf_debuginfod_setup(struct perf_debuginfod *di);
+const char *perf_basename(const char *path);
+
char *filename_with_chroot(int pid, const char *filename);
int do_realloc_array_as_needed(void **arr, size_t *arr_sz, size_t x,
diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c
index ec72d29f3d58..6eaddfcf833e 100644
--- a/tools/perf/util/values.c
+++ b/tools/perf/util/values.c
@@ -13,9 +13,9 @@
int perf_read_values_init(struct perf_read_values *values)
{
values->threads_max = 16;
- values->pid = malloc(values->threads_max * sizeof(*values->pid));
- values->tid = malloc(values->threads_max * sizeof(*values->tid));
- values->value = zalloc(values->threads_max * sizeof(*values->value));
+ values->pid = calloc(values->threads_max, sizeof(*values->pid));
+ values->tid = calloc(values->threads_max, sizeof(*values->tid));
+ values->value = calloc(values->threads_max, sizeof(*values->value));
if (!values->pid || !values->tid || !values->value) {
pr_debug("failed to allocate read_values threads arrays");
goto out_free_pid;
@@ -96,7 +96,7 @@ static int perf_read_values__findnew_thread(struct perf_read_values *values,
i = values->threads;
- values->value[i] = zalloc(values->counters_max * sizeof(**values->value));
+ values->value[i] = calloc(values->counters_max, sizeof(**values->value));
if (!values->value[i]) {
pr_debug("failed to allocate read_values counters array");
return -ENOMEM;
diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
index dd9056ddb016..2faff1aead52 100644
--- a/tools/power/x86/intel-speed-select/isst-config.c
+++ b/tools/power/x86/intel-speed-select/isst-config.c
@@ -16,7 +16,7 @@ struct process_cmd_struct {
int arg;
};
-static const char *version_str = "v1.25";
+static const char *version_str = "v1.26";
static const int supported_api_ver = 3;
static struct isst_if_platform_info isst_platform_info;
@@ -26,7 +26,7 @@ static FILE *outf;
static int cpu_model;
static int cpu_stepping;
-static int extended_family;
+static int cpu_family;
#define MAX_CPUS_IN_ONE_REQ 512
static short max_target_cpus;
@@ -82,6 +82,11 @@ struct cpu_topology {
static int read_only;
+static void print_version(void)
+{
+ fprintf(outf, "Version %s\n", version_str);
+}
+
static void check_privilege(void)
{
if (!read_only)
@@ -158,7 +163,7 @@ int is_icx_platform(void)
static int is_dmr_plus_platform(void)
{
- if (extended_family == 0x04)
+ if (cpu_family == 19)
return 1;
return 0;
@@ -167,13 +172,14 @@ static int is_dmr_plus_platform(void)
static int update_cpu_model(void)
{
unsigned int ebx, ecx, edx;
- unsigned int fms, family;
+ unsigned int fms;
__cpuid(1, fms, ebx, ecx, edx);
- family = (fms >> 8) & 0xf;
- extended_family = (fms >> 20) & 0x0f;
+ cpu_family = (fms >> 8) & 0xf;
+ if (cpu_family == 0xf)
+ cpu_family += (fms >> 20) & 0xff;
cpu_model = (fms >> 4) & 0xf;
- if (family == 6 || family == 0xf)
+ if (cpu_family == 6 || cpu_family == 0xf)
cpu_model += ((fms >> 16) & 0xf) << 4;
cpu_stepping = fms & 0xf;
@@ -1137,8 +1143,9 @@ static int isst_fill_platform_info(void)
close(fd);
if (isst_platform_info.api_version > supported_api_ver) {
+ print_version();
printf("Incompatible API versions; Upgrade of tool is required\n");
- return -1;
+ exit(1);
}
set_platform_ops:
@@ -1744,6 +1751,9 @@ static int no_turbo(void)
return parse_int_file(0, "/sys/devices/system/cpu/intel_pstate/no_turbo");
}
+#define U32_MAX ((unsigned int)~0U)
+#define S32_MAX ((int)(U32_MAX >> 1))
+
static void adjust_scaling_max_from_base_freq(int cpu)
{
int base_freq, scaling_max_freq;
@@ -1751,7 +1761,7 @@ static void adjust_scaling_max_from_base_freq(int cpu)
scaling_max_freq = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_max_freq", cpu);
base_freq = get_cpufreq_base_freq(cpu);
if (scaling_max_freq < base_freq || no_turbo())
- set_cpufreq_scaling_min_max(cpu, 1, base_freq);
+ set_cpufreq_scaling_min_max(cpu, 1, S32_MAX);
}
static void adjust_scaling_min_from_base_freq(int cpu)
@@ -3191,12 +3201,6 @@ static void usage(void)
printf("\tTo get full turbo-freq information dump:\n");
printf("\t\tintel-speed-select turbo-freq info -l 0\n");
}
- exit(1);
-}
-
-static void print_version(void)
-{
- fprintf(outf, "Version %s\n", version_str);
exit(0);
}
@@ -3246,8 +3250,10 @@ static void cmdline(int argc, char **argv)
}
ret = update_cpu_model();
- if (ret)
- err(-1, "Invalid CPU model (%d)\n", cpu_model);
+ if (ret) {
+ fprintf(stderr, "Invalid CPU model (%d)\n", cpu_model);
+ exit(1);
+ }
printf("Intel(R) Speed Select Technology\n");
printf("Executing on CPU model:%d[0x%x]\n", cpu_model, cpu_model);
@@ -3311,6 +3317,7 @@ static void cmdline(int argc, char **argv)
break;
case 'v':
print_version();
+ exit(0);
break;
case 'b':
oob_mode = 1;
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index e9e8ef72395a..920694c3c1ec 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -191,6 +191,7 @@ struct msr_counter bic[] = {
{ 0x0, "Any%C0", NULL, 0, 0, 0, NULL, 0 },
{ 0x0, "GFX%C0", NULL, 0, 0, 0, NULL, 0 },
{ 0x0, "CPUGFX%", NULL, 0, 0, 0, NULL, 0 },
+ { 0x0, "Module", NULL, 0, 0, 0, NULL, 0 },
{ 0x0, "Core", NULL, 0, 0, 0, NULL, 0 },
{ 0x0, "CPU", NULL, 0, 0, 0, NULL, 0 },
{ 0x0, "APIC", NULL, 0, 0, 0, NULL, 0 },
@@ -264,6 +265,7 @@ enum bic_names {
BIC_Any_c0,
BIC_GFX_c0,
BIC_CPUGFX,
+ BIC_Module,
BIC_Core,
BIC_CPU,
BIC_APIC,
@@ -364,6 +366,7 @@ static void bic_groups_init(void)
SET_BIC(BIC_Node, &bic_group_topology);
SET_BIC(BIC_CoreCnt, &bic_group_topology);
SET_BIC(BIC_PkgCnt, &bic_group_topology);
+ SET_BIC(BIC_Module, &bic_group_topology);
SET_BIC(BIC_Core, &bic_group_topology);
SET_BIC(BIC_CPU, &bic_group_topology);
SET_BIC(BIC_Die, &bic_group_topology);
@@ -2383,6 +2386,7 @@ struct platform_counters {
struct cpu_topology {
int cpu_id;
int core_id; /* unique within a package */
+ int module_id;
int package_id;
int die_id;
int l3_id;
@@ -2404,6 +2408,8 @@ struct topo_params {
int allowed_cores;
int max_cpu_num;
int max_core_id; /* within a package */
+ int min_module_id; /* system wide */
+ int max_module_id; /* system wide */
int max_package_id;
int max_die_id;
int max_l3_id;
@@ -2427,11 +2433,17 @@ char *sys_lpi_file_debugfs = "/sys/kernel/debug/pmc_core/slp_s0_residency_usec";
int cpu_is_not_present(int cpu)
{
+ if (cpu < 0)
+ return 1;
+
return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
}
int cpu_is_not_allowed(int cpu)
{
+ if (cpu < 0)
+ return 1;
+
return !CPU_ISSET_S(cpu, cpu_allowed_setsize, cpu_allowed_set);
}
@@ -2443,6 +2455,22 @@ int cpu_is_not_allowed(int cpu)
#define PER_THREAD_PARAMS struct thread_data *t, struct core_data *c, struct pkg_data *p
+int has_allowed_lower_ht_sibling(int cpu)
+{
+ int i;
+
+ for (i = 0; i <= cpus[cpu].ht_id; ++i) {
+ int sibling_cpu_id = cpus[cpu].ht_sibling_cpu_id[i];
+
+ if (sibling_cpu_id == cpu)
+ return 0;
+
+ if (!cpu_is_not_allowed(sibling_cpu_id))
+ return 1;
+ }
+ return 0;
+}
+
int for_all_cpus(int (func) (struct thread_data *, struct core_data *, struct pkg_data *),
struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
{
@@ -2460,7 +2488,7 @@ int for_all_cpus(int (func) (struct thread_data *, struct core_data *, struct pk
if (cpu_is_not_allowed(cpu))
continue;
- if (cpus[cpu].ht_id > 0) /* skip HT sibling */
+ if (has_allowed_lower_ht_sibling(cpu)) /* skip HT sibling */
continue;
t = &thread_base[cpu];
@@ -2469,13 +2497,22 @@ int for_all_cpus(int (func) (struct thread_data *, struct core_data *, struct pk
retval |= func(t, c, p);
- /* Handle HT sibling now */
+ /* Handle other HT siblings now */
int i;
- for (i = MAX_HT_ID; i > 0; --i) { /* ht_id 0 is self */
- if (cpus[cpu].ht_sibling_cpu_id[i] <= 0)
+ for (i = 0; i <= MAX_HT_ID; ++i) {
+ int sibling_cpu_id = cpus[cpu].ht_sibling_cpu_id[i];
+
+ if (sibling_cpu_id < 0)
+ break;
+
+ if (sibling_cpu_id == cpu)
+ continue;
+
+ if (cpu_is_not_allowed(sibling_cpu_id))
continue;
- t = &thread_base[cpus[cpu].ht_sibling_cpu_id[i]];
+
+ t = &thread_base[sibling_cpu_id];
retval |= func(t, c, p);
}
@@ -2835,31 +2872,38 @@ void bic_lookup(cpu_set_t *ret_set, char *name_list, enum show_hide_mode mode)
static inline int print_name(int width, int *printed, char *delim, char *name, enum counter_type type, enum counter_format format)
{
UNUSED(type);
+ char *sep = (*printed)++ ? delim : "";
if (format == FORMAT_RAW && width >= 64)
- return (sprintf(outp, "%s%-8s", ((*printed)++ ? delim : ""), name));
+ return sprintf(outp, "%s%-8s", sep, name);
else
- return (sprintf(outp, "%s%s", ((*printed)++ ? delim : ""), name));
+ return sprintf(outp, "%s%s", sep, name);
}
static inline int print_hex_value(int width, int *printed, char *delim, unsigned long long value)
{
+ char *sep = (*printed)++ ? delim : "";
+
if (width <= 32)
- return (sprintf(outp, "%s%08x", ((*printed)++ ? delim : ""), (unsigned int)value));
+ return sprintf(outp, "%s%08llx", sep, value);
else
- return (sprintf(outp, "%s%016llx", ((*printed)++ ? delim : ""), value));
+ return sprintf(outp, "%s%016llx", sep, value);
}
static inline int print_decimal_value(int width, int *printed, char *delim, unsigned long long value)
{
+ char *sep = (*printed)++ ? delim : "";
+
UNUSED(width);
- return (sprintf(outp, "%s%lld", ((*printed)++ ? delim : ""), value));
+ return sprintf(outp, "%s%lld", sep, value);
}
static inline int print_float_value(int *printed, char *delim, double value)
{
- return (sprintf(outp, "%s%0.2f", ((*printed)++ ? delim : ""), value));
+ char *sep = (*printed)++ ? delim : "";
+
+ return sprintf(outp, "%s%0.2f", sep, value);
}
void print_header(char *delim)
@@ -2881,6 +2925,8 @@ void print_header(char *delim)
outp += sprintf(outp, "%sL3", (printed++ ? delim : ""));
if (DO_BIC(BIC_Node))
outp += sprintf(outp, "%sNode", (printed++ ? delim : ""));
+ if (DO_BIC(BIC_Module))
+ outp += sprintf(outp, "%sModule", (printed++ ? delim : ""));
if (DO_BIC(BIC_Core))
outp += sprintf(outp, "%sCore", (printed++ ? delim : ""));
if (DO_BIC(BIC_CPU))
@@ -3176,7 +3222,7 @@ int dump_counters(PER_THREAD_PARAMS)
}
if (c && is_cpu_first_thread_in_core(t, c)) {
- outp += sprintf(outp, "core: %d\n", cpus[t->cpu_id].core_id);
+ outp += sprintf(outp, "core: 0x%x\n", cpus[t->cpu_id].core_id);
outp += sprintf(outp, "c3: %016llX\n", c->c3);
outp += sprintf(outp, "c6: %016llX\n", c->c6);
outp += sprintf(outp, "c7: %016llX\n", c->c7);
@@ -3350,6 +3396,8 @@ int format_counters(PER_THREAD_PARAMS)
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
if (DO_BIC(BIC_Node))
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+ if (DO_BIC(BIC_Module))
+ outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
if (DO_BIC(BIC_Core))
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
if (DO_BIC(BIC_CPU))
@@ -3383,18 +3431,24 @@ int format_counters(PER_THREAD_PARAMS)
else
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
}
+ if (DO_BIC(BIC_Module)) {
+ if (c)
+ outp += sprintf(outp, "%s0x%x", (printed++ ? delim : ""), cpus[t->cpu_id].module_id);
+ else
+ outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+ }
if (DO_BIC(BIC_Core)) {
if (c)
- outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), cpus[t->cpu_id].core_id);
+ outp += sprintf(outp, "%s0x%x", (printed++ ? delim : ""), cpus[t->cpu_id].core_id);
else
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
}
if (DO_BIC(BIC_CPU))
outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->cpu_id);
if (DO_BIC(BIC_APIC))
- outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->apic_id);
+ outp += sprintf(outp, "%s0x%x", (printed++ ? delim : ""), t->apic_id);
if (DO_BIC(BIC_X2APIC))
- outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->x2apic_id);
+ outp += sprintf(outp, "%s0x%x", (printed++ ? delim : ""), t->x2apic_id);
}
if (DO_BIC(BIC_Avg_MHz))
@@ -5155,7 +5209,7 @@ static inline int get_rapl_num_domains(void)
if (!platform->has_per_core_rapl)
return topo.num_packages;
- return topo.num_cores;
+ return GLOBAL_CORE_ID(topo.max_core_id, topo.num_packages) + 1;
}
static inline int get_rapl_domain_id(int cpu)
@@ -6041,6 +6095,11 @@ int get_l3_id(int cpu)
return parse_int_file("/sys/devices/system/cpu/cpu%d/cache/index3/id", cpu);
}
+int get_module_id(int cpu)
+{
+ return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/cluster_id", cpu);
+}
+
int get_core_id(int cpu)
{
return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
@@ -6160,59 +6219,6 @@ static int parse_cpu_str(char *cpu_str, cpu_set_t *cpu_set, int cpu_set_size)
return 0;
}
-int set_thread_siblings(struct cpu_topology *thiscpu)
-{
- char path[80], character;
- FILE *filep;
- unsigned long map;
- int so, shift, sib_core;
- int cpu = thiscpu->cpu_id;
- int offset = topo.max_cpu_num + 1;
- size_t size;
- int thread_id = 0;
-
- thiscpu->put_ids = CPU_ALLOC((topo.max_cpu_num + 1));
- if (thiscpu->ht_id < 0)
- thiscpu->ht_id = thread_id++;
- if (!thiscpu->put_ids)
- return -1;
-
- size = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
- CPU_ZERO_S(size, thiscpu->put_ids);
-
- sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings", cpu);
- filep = fopen(path, "r");
-
- if (!filep) {
- warnx("%s: open failed", path);
- return -1;
- }
- do {
- offset -= BITMASK_SIZE;
- if (fscanf(filep, "%lx%c", &map, &character) != 2)
- err(1, "%s: failed to parse file", path);
- for (shift = 0; shift < BITMASK_SIZE; shift++) {
- if ((map >> shift) & 0x1) {
- so = shift + offset;
- sib_core = get_core_id(so);
- if (sib_core == thiscpu->core_id) {
- CPU_SET_S(so, size, thiscpu->put_ids);
- if ((so != cpu) && (cpus[so].ht_id < 0)) {
- cpus[so].ht_id = thread_id;
- cpus[cpu].ht_sibling_cpu_id[thread_id] = so;
- if (debug)
- fprintf(stderr, "%s: cpu%d.ht_sibling_cpu_id[%d] = %d\n", __func__, cpu, thread_id, so);
- thread_id += 1;
- }
- }
- }
- }
- } while (character == ',');
- fclose(filep);
-
- return CPU_COUNT_S(size, thiscpu->put_ids);
-}
-
/*
* run func(thread, core, package) in topology order
* skip non-present cpus
@@ -6236,7 +6242,7 @@ int for_all_cpus_2(int (func) (struct thread_data *, struct core_data *,
if (cpu_is_not_allowed(cpu))
continue;
- if (cpus[cpu].ht_id > 0) /* skip HT sibling */
+ if (has_allowed_lower_ht_sibling(cpu)) /* skip HT sibling */
continue;
t = &thread_base[cpu];
@@ -6251,11 +6257,20 @@ int for_all_cpus_2(int (func) (struct thread_data *, struct core_data *,
/* Handle HT sibling now */
int i;
- for (i = MAX_HT_ID; i > 0; --i) { /* ht_id 0 is self */
- if (cpus[cpu].ht_sibling_cpu_id[i] <= 0)
+ for (i = 0; i <= MAX_HT_ID; ++i) {
+ int sibling_cpu_id = cpus[cpu].ht_sibling_cpu_id[i];
+
+ if (sibling_cpu_id < 0)
+ break;
+
+ if (sibling_cpu_id == cpu)
+ continue;
+
+ if (cpu_is_not_allowed(sibling_cpu_id))
continue;
- t = &thread_base[cpus[cpu].ht_sibling_cpu_id[i]];
- t2 = &thread_base2[cpus[cpu].ht_sibling_cpu_id[i]];
+
+ t = &thread_base[sibling_cpu_id];
+ t2 = &thread_base2[sibling_cpu_id];
retval |= func(t, c, p, t2, c2, p2);
}
@@ -9475,6 +9490,37 @@ int dir_filter(const struct dirent *dirp)
return 0;
}
+int set_thread_siblings(struct cpu_topology *thiscpu)
+{
+ char path[80];
+ int cpu = thiscpu->cpu_id;
+ size_t size;
+ int ht_id = 0;
+ int i;
+
+ thiscpu->put_ids = CPU_ALLOC((topo.max_cpu_num + 1));
+ if (thiscpu->ht_id < 0)
+ thiscpu->ht_id = 0; /* first CPU in core */
+ if (!thiscpu->put_ids)
+ return -1;
+
+ size = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
+ CPU_ZERO_S(size, thiscpu->put_ids);
+
+ sprintf(path, "/sys/devices/system/cpu/cpu%d/topology", cpu);
+
+ initialize_cpu_set_from_sysfs(thiscpu->put_ids, path, "thread_siblings_list");
+
+ for (i = 0; i <= topo.max_cpu_num; ++i)
+ if (CPU_ISSET_S(i, size, thiscpu->put_ids)) {
+ cpus[i].ht_id = ht_id;
+ cpus[cpu].ht_sibling_cpu_id[ht_id] = i;
+ ht_id += 1;
+ }
+
+ return (ht_id - 1);
+}
+
void topology_probe(bool startup)
{
int i;
@@ -9505,6 +9551,8 @@ void topology_probe(bool startup)
cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
for_all_proc_cpus(mark_cpu_present);
+ if (debug)
+ print_cpu_set("present set", cpu_present_set);
/*
* Allocate and initialize cpu_possible_set
@@ -9515,6 +9563,8 @@ void topology_probe(bool startup)
cpu_possible_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
CPU_ZERO_S(cpu_possible_setsize, cpu_possible_set);
initialize_cpu_set_from_sysfs(cpu_possible_set, "/sys/devices/system/cpu", "possible");
+ if (debug)
+ print_cpu_set("possible set", cpu_possible_set);
/*
* Allocate and initialize cpu_effective_set
@@ -9525,6 +9575,8 @@ void topology_probe(bool startup)
cpu_effective_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
CPU_ZERO_S(cpu_effective_setsize, cpu_effective_set);
update_effective_set(startup);
+ if (debug)
+ print_cpu_set("effective set", cpu_effective_set);
/*
* Allocate and initialize cpu_allowed_set
@@ -9568,6 +9620,8 @@ void topology_probe(bool startup)
CPU_SET_S(i, cpu_allowed_setsize, cpu_allowed_set);
}
+ if (debug)
+ print_cpu_set("allowed set", cpu_allowed_set);
if (!CPU_COUNT_S(cpu_allowed_setsize, cpu_allowed_set))
err(-ENODEV, "No valid cpus found");
@@ -9590,6 +9644,7 @@ void topology_probe(bool startup)
* For online cpus
* find max_core_id, max_package_id, num_cores (per system)
*/
+ topo.min_module_id = 0x7FFFFFFF;
for (i = 0; i <= topo.max_cpu_num; ++i) {
int siblings;
@@ -9621,6 +9676,13 @@ void topology_probe(bool startup)
if (cpus[i].physical_node_id > topo.max_node_num)
topo.max_node_num = cpus[i].physical_node_id;
+ /* get module information */
+ cpus[i].module_id = get_module_id(i);
+ if (cpus[i].module_id > topo.max_module_id)
+ topo.max_module_id = cpus[i].module_id;
+ if (cpus[i].module_id < topo.min_module_id)
+ topo.min_module_id = cpus[i].module_id;
+
/* get core information */
cpus[i].core_id = get_core_id(i);
if (cpus[i].core_id > max_core_id)
@@ -9642,6 +9704,11 @@ void topology_probe(bool startup)
if (!summary_only)
BIC_PRESENT(BIC_Core);
+ if (debug > 1)
+ fprintf(outf, "min_module_id %d max_module_id %d\n", topo.min_module_id, topo.max_module_id);
+ if (!summary_only && (topo.min_module_id != topo.max_module_id))
+ BIC_PRESENT(BIC_Module);
+
topo.num_die = topo.max_die_id + 1;
if (debug > 1)
fprintf(outf, "max_die_id %d, sizing for %d die\n", topo.max_die_id, topo.num_die);
@@ -9671,12 +9738,18 @@ void topology_probe(bool startup)
return;
for (i = 0; i <= topo.max_cpu_num; ++i) {
+ int ht_id;
+
if (cpu_is_not_present(i))
continue;
fprintf(outf,
- "cpu %d pkg %d die %d l3 %d node %d lnode %d core %d thread %d\n",
+ "cpu %d pkg %d die %d l3 %d node %d lnode %d module 0x%x core %d ht_id %d",
i, cpus[i].package_id, cpus[i].die_id, cpus[i].l3_id,
- cpus[i].physical_node_id, cpus[i].logical_node_id, cpus[i].core_id, cpus[i].ht_id);
+ cpus[i].physical_node_id, cpus[i].logical_node_id, cpus[i].module_id, cpus[i].core_id, cpus[i].ht_id);
+ fprintf(outf, " siblings");
+ for (ht_id = 0; ht_id <= MAX_HT_ID; ++ht_id)
+ fprintf(outf, " %d", cpus[i].ht_sibling_cpu_id[ht_id]);
+ fprintf(outf, "\n");
}
}
@@ -9817,6 +9890,8 @@ void topology_update(void)
topo.allowed_cores = 0;
topo.allowed_packages = 0;
for_all_cpus(update_topo, ODD_COUNTERS);
+ if (debug)
+ fprintf(stderr, "allowed_cpus %d allowed_cores %d allowed_packages %d\n", topo.allowed_cpus, topo.allowed_cores, topo.allowed_packages);
}
void setup_all_buffers(bool startup)
@@ -10533,7 +10608,7 @@ int get_and_dump_counters(void)
void print_version()
{
- fprintf(outf, "turbostat version 2026.02.14 - Len Brown <lenb@kernel.org>\n");
+ fprintf(outf, "turbostat version 2026.04.21 - Len Brown <lenb@kernel.org>\n");
}
#define COMMAND_LINE_SIZE 2048
@@ -11449,7 +11524,7 @@ void cmdline(int argc, char **argv)
}
optind = 0;
- while ((opt = getopt_long_only(argc, argv, "+C:c:Dde:hi:Jn:N:o:qMST:v", long_options, &option_index)) != -1) {
+ while ((opt = getopt_long_only(argc, argv, "+C:c:Dde:hi:Jn:N:o:qMPST:v", long_options, &option_index)) != -1) {
switch (opt) {
case 'a':
parse_add_command(optarg);
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8 b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
index 0aa981c18e56..836553e9a92c 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
@@ -15,6 +15,8 @@ x86_energy_perf_policy \- Manage Energy vs. Performance Policy
.br
.RB "other: (\-\-force | \-\-hwp-enable | \-\-turbo-enable) value)"
.br
+.RB "soc-slider: --soc-slider-balance # | --soc-slider-offset # | --platform-profile <name>"
+.br
.RB "value: # | default | performance | balance-performance | balance-power | power"
.SH DESCRIPTION
\fBx86_energy_perf_policy\fP
@@ -154,6 +156,26 @@ level on this processor, specified in multiples of 100 MHz.
in the sliding window that HWP uses to maintain average frequency.
This parameter is meaningful only when the "desired" field above is non-zero.
Default is 0, allowing the HW to choose.
+.SH SOC SLIDER OPTIONS
+.PP
+Note that the Platform Profile Name must be "SoC Slider", and the
+Platform Profile must be "balanced" for the --soc-slider-balance
+and --soc-slider-offset options to take effect.
+.PP
+\fB--soc-slider-balance #\fP write numeric value to the SoC Slider.
+Values range from 0 to 6.
+Lower values result in higher performance,
+and higher values improve energy efficiency.
+Actual values are model specific.
+.PP
+\fB--soc-slider-offset #\fP write the numeric value to the Soc Slider Offset.
+The slider offset is the maximum value that software allows the SoC to
+autonomously add to the SoC Slider to improve energy efficiency.
+The value 0 prohibits the SoC from autonomously changing the slider.
+.PP
+\fB--platform-profile <name>"\fP set the platform profile to <name>.
+Available choices are in platform-profile-0/choices. The Soc Slider
+driver currently supports "low-power", "balanced", and "performance".
.SH OTHER OPTIONS
.PP
\fB-f, --force\fP writes the specified values without bounds checking.
@@ -208,6 +230,10 @@ runs only as root.
EPB: /sys/devices/system/cpu/cpu*/power/energy_perf_bias
EPP: /sys/devices/system/cpu/cpu*/cpufreq/energy_performance_preference
MSR: /dev/cpu/*/msr
+Platform Profile Name: /sys/class/platform-profile/platform-profile-0/name
+Platform Profile: /sys/class/platform-profile/platform-profile-0/profile
+SOC Slider Balanced: /sys/module/processor_thermal_soc_slider/parameters/slider_balance
+SOC Slider Balanced Offset: /sys/module/processor_thermal_soc_slider/parameters/slider_offset
.fi
.SH "SEE ALSO"
.nf
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
index ac37132207a4..0dc959e30076 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
@@ -4,7 +4,7 @@
* policy preference bias on recent X86 processors.
*/
/*
- * Copyright (c) 2010 - 2025 Intel Corporation.
+ * Copyright (c) 2010 - 2026 Intel Corporation.
* Len Brown <len.brown@intel.com>
*/
@@ -82,21 +82,36 @@ size_t cpu_setsize;
char *proc_stat = "/proc/stat";
-unsigned int has_epb; /* MSR_IA32_ENERGY_PERF_BIAS */
-unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */
+unsigned int has_epb; /* MSR_IA32_ENERGY_PERF_BIAS */
+unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */
/* IA32_HWP_REQUEST, IA32_HWP_STATUS */
-unsigned int has_hwp_notify; /* IA32_HWP_INTERRUPT */
+unsigned int has_hwp_notify; /* IA32_HWP_INTERRUPT */
unsigned int has_hwp_activity_window; /* IA32_HWP_REQUEST[bits 41:32] */
unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */
unsigned int has_hwp_request_pkg; /* IA32_HWP_REQUEST_PKG */
unsigned int bdx_highest_ratio;
+unsigned char update_soc_slider_balance;
+unsigned char update_soc_slider_offset;
+unsigned char update_platform_profile;
+int soc_slider_balance;
+int soc_slider_offset;
+char platform_profile[64];
+
#define PATH_TO_CPU "/sys/devices/system/cpu/"
#define SYSFS_PATH_MAX 255
+#define PATH_SOC_SLIDER_BALANCE "/sys/module/processor_thermal_soc_slider/parameters/slider_balance"
+#define PATH_SOC_SLIDER_OFFSET "/sys/module/processor_thermal_soc_slider/parameters/slider_offset"
+#define PATH_PLATFORM_PROFILE "/sys/class/platform-profile/platform-profile-0/profile"
+#define PATH_PLATFORM_PROFILE_NAME "/sys/class/platform-profile/platform-profile-0/name"
+#define POWER_SLIDER_NAME "SoC Power Slider"
static int use_android_msr_path;
+static unsigned int read_sysfs(const char *, char *, size_t);
+static int sysfs_read_string(const char *, char *, size_t);
+
/*
* maintain compatibility with original implementation, but don't document it:
*/
@@ -106,8 +121,8 @@ void usage(void)
fprintf(stderr, "scope: --cpu cpu-list [--hwp-use-pkg #] | --pkg pkg-list\n");
fprintf(stderr, "field: --all | --epb | --hwp-epp | --hwp-min | --hwp-max | --hwp-desired\n");
fprintf(stderr, "other: --hwp-enable | --turbo-enable (0 | 1) | --help | --force\n");
- fprintf(stderr,
- "value: ( # | \"normal\" | \"performance\" | \"balance-performance\" | \"balance-power\"| \"power\")\n");
+ fprintf(stderr, "soc-slider: --soc-slider-balance # | --soc-slider-offset # | --platform-profile <name>\n");
+ fprintf(stderr, "value: ( # | \"normal\" | \"performance\" | \"balance-performance\" | \"balance-power\"| \"power\")\n");
fprintf(stderr, "--hwp-window usec\n");
fprintf(stderr, "Specify only Energy Performance BIAS (legacy usage):\n");
@@ -135,6 +150,7 @@ int ratio_2_msr_perf(int ratio)
return msr_perf;
}
+
int msr_perf_2_ratio(int msr_perf)
{
int ratio;
@@ -143,8 +159,8 @@ int msr_perf_2_ratio(int msr_perf)
if (!bdx_highest_ratio)
return msr_perf;
- d = (double)msr_perf * (double) bdx_highest_ratio / 255.0;
- d = d + 0.5; /* round */
+ d = (double)msr_perf * (double)bdx_highest_ratio / 255.0;
+ d = d + 0.5; /* round */
ratio = (int)d;
if (debug)
@@ -152,6 +168,7 @@ int msr_perf_2_ratio(int msr_perf)
return ratio;
}
+
int parse_cmdline_epb(int i)
{
if (!has_epb)
@@ -198,6 +215,7 @@ int parse_cmdline_hwp_min(int i)
}
return i;
}
+
/*
* "power" changes hwp_max to cap.lowest
* All others leave it at cap.highest
@@ -217,6 +235,7 @@ int parse_cmdline_hwp_max(int i)
}
return i;
}
+
/*
* for --hwp-des, all strings leave it in autonomous mode
* If you want to change it, you need to explicitly pick a value
@@ -254,7 +273,7 @@ int parse_cmdline_hwp_window(int i)
fprintf(stderr, "--hwp-window: 0 for auto; 1 - 1270000000 usec for window duration\n");
usage();
}
- for (exponent = 0; ; ++exponent) {
+ for (exponent = 0;; ++exponent) {
if (debug)
printf("%d 10^%d\n", i, exponent);
@@ -268,6 +287,7 @@ int parse_cmdline_hwp_window(int i)
return (exponent << 7) | i;
}
+
int parse_cmdline_hwp_epp(int i)
{
update_hwp_epp = 1;
@@ -289,6 +309,7 @@ int parse_cmdline_hwp_epp(int i)
}
return i;
}
+
int parse_cmdline_turbo(int i)
{
update_turbo = 1;
@@ -508,7 +529,7 @@ void parse_cmdline_pkg(char *s)
}
}
-void for_packages(unsigned long long pkg_set, int (func)(int))
+void for_packages(unsigned long long pkg_set, int (func) (int))
{
int pkg_num;
@@ -518,9 +539,79 @@ void for_packages(unsigned long long pkg_set, int (func)(int))
}
}
+static int parse_cmdline_int(const char *s, int *out)
+{
+ char *endp;
+ long val;
+
+ val = strtol(s, &endp, 0);
+ if (endp == s || errno == ERANGE)
+ return -1;
+ if (*endp != '\0')
+ return -1;
+ if (val < INT_MIN || val > INT_MAX)
+ return -1;
+
+ *out = (int)val;
+ return 0;
+}
+
void print_version(void)
{
- printf("x86_energy_perf_policy 2025.11.22 Len Brown <lenb@kernel.org>\n");
+ printf("x86_energy_perf_policy 2026.04.25 Len Brown <lenb@kernel.org>\n");
+}
+
+static int platform_profile_access(int mode)
+{
+ if (access(PATH_PLATFORM_PROFILE, mode)) {
+ if (debug)
+ fprintf(stderr, "Can not access %s\n", PATH_PLATFORM_PROFILE);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int platform_profile_name_is(char *name)
+{
+ char buf[64];
+
+ if (sysfs_read_string(PATH_PLATFORM_PROFILE_NAME, buf, sizeof(buf)) != 0) {
+ if (debug)
+ fprintf(stderr, "Can not read %s\n", PATH_PLATFORM_PROFILE_NAME);
+ return 0;
+ }
+
+ if (strncmp(buf, name, 16)) {
+ if (debug)
+ fprintf(stderr, "%s does not match '%s'\n", PATH_PLATFORM_PROFILE_NAME, name);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int soc_slider_access(int mode)
+{
+ if (!platform_profile_access(R_OK))
+ return 0;
+
+ if (!platform_profile_name_is(POWER_SLIDER_NAME))
+ return 0;
+
+ if (access(PATH_SOC_SLIDER_BALANCE, mode)) {
+ if (debug)
+ fprintf(stderr, "Can not access %s\n", PATH_SOC_SLIDER_BALANCE);
+ return 0;
+ }
+
+ if (access(PATH_SOC_SLIDER_OFFSET, mode)) {
+ if (debug)
+ fprintf(stderr, "Can not access %s\n", PATH_SOC_SLIDER_OFFSET);
+ return 0;
+ }
+
+ return 1;
}
void cmdline(int argc, char **argv)
@@ -529,30 +620,32 @@ void cmdline(int argc, char **argv)
int option_index = 0;
static struct option long_options[] = {
- {"all", required_argument, 0, 'a'},
- {"cpu", required_argument, 0, 'c'},
- {"pkg", required_argument, 0, 'p'},
- {"debug", no_argument, 0, 'd'},
- {"hwp-desired", required_argument, 0, 'D'},
- {"epb", required_argument, 0, 'B'},
- {"force", no_argument, 0, 'f'},
- {"hwp-enable", no_argument, 0, 'e'},
- {"help", no_argument, 0, 'h'},
- {"hwp-epp", required_argument, 0, 'P'},
- {"hwp-min", required_argument, 0, 'm'},
- {"hwp-max", required_argument, 0, 'M'},
- {"read", no_argument, 0, 'r'},
- {"turbo-enable", required_argument, 0, 't'},
- {"hwp-use-pkg", required_argument, 0, 'u'},
- {"version", no_argument, 0, 'v'},
- {"hwp-window", required_argument, 0, 'w'},
- {0, 0, 0, 0 }
+ { "all", required_argument, 0, 'a' },
+ { "cpu", required_argument, 0, 'c' },
+ { "pkg", required_argument, 0, 'p' },
+ { "debug", no_argument, 0, 'd' },
+ { "hwp-desired", required_argument, 0, 'D' },
+ { "epb", required_argument, 0, 'B' },
+ { "force", no_argument, 0, 'f' },
+ { "hwp-enable", no_argument, 0, 'e' },
+ { "help", no_argument, 0, 'h' },
+ { "hwp-epp", required_argument, 0, 'P' },
+ { "hwp-min", required_argument, 0, 'm' },
+ { "hwp-max", required_argument, 0, 'M' },
+ { "read", no_argument, 0, 'r' },
+ { "turbo-enable", required_argument, 0, 't' },
+ { "hwp-use-pkg", required_argument, 0, 'u' },
+ { "version", no_argument, 0, 'v' },
+ { "hwp-window", required_argument, 0, 'w' },
+ { "soc-slider-balance", required_argument, 0, 'S' },
+ { "soc-slider-offset", required_argument, 0, 'O' },
+ { "platform-profile", required_argument, 0, 'F' },
+ { 0, 0, 0, 0 }
};
progname = argv[0];
- while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw:",
- long_options, &option_index)) != -1) {
+ while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw::S:O:F:", long_options, &option_index)) != -1) {
switch (opt) {
case 'a':
parse_cmdline_all(optarg);
@@ -579,12 +672,27 @@ void cmdline(int argc, char **argv)
case 'D':
req_update.hwp_desired = parse_cmdline_hwp_desired(parse_optarg_string(optarg));
break;
+ case 'F':
+ if (strlen(optarg) >= sizeof(platform_profile))
+ errx(1, "--platform-profile: value too long");
+ if (!platform_profile_access(W_OK))
+ errx(1, "Can not update platform-profile in '%s'", PATH_PLATFORM_PROFILE);
+ strcpy(platform_profile, optarg);
+ update_platform_profile = 1;
+ break;
case 'm':
req_update.hwp_min = parse_cmdline_hwp_min(parse_optarg_string(optarg));
break;
case 'M':
req_update.hwp_max = parse_cmdline_hwp_max(parse_optarg_string(optarg));
break;
+ case 'O':
+ if (parse_cmdline_int(optarg, &soc_slider_offset))
+ errx(1, "--soc-slider-offset: invalid value");
+ if (!soc_slider_access(W_OK))
+ errx(1, "Unable to write SOC Slider Offset");
+ update_soc_slider_offset = 1;
+ break;
case 'p':
parse_cmdline_pkg(optarg);
break;
@@ -594,6 +702,13 @@ void cmdline(int argc, char **argv)
case 'r':
/* v1 used -r to specify read-only mode, now the default */
break;
+ case 'S':
+ if (parse_cmdline_int(optarg, &soc_slider_balance))
+ errx(1, "--soc-slider-balance: invalid value");
+ if (!soc_slider_access(W_OK))
+ errx(1, "Unable to write SOC Slider-Balance in '%s'", PATH_SOC_SLIDER_BALANCE);
+ update_soc_slider_balance = 1;
+ break;
case 't':
turbo_update_value = parse_cmdline_turbo(parse_optarg_string(optarg));
break;
@@ -681,8 +796,7 @@ void err_on_hypervisor(void)
free(buffer);
if (hypervisor)
- err(-1,
- "not supported on this virtual machine");
+ err(-1, "not supported on this virtual machine");
}
int get_msr(int cpu, int offset, unsigned long long *msr)
@@ -694,9 +808,7 @@ int get_msr(int cpu, int offset, unsigned long long *msr)
sprintf(pathname, use_android_msr_path ? "/dev/msr%d" : "/dev/cpu/%d/msr", cpu);
fd = open(pathname, O_RDONLY);
if (fd < 0)
- err(-1, "%s open failed, try chown or chmod +r %s, or run as root",
- pathname, use_android_msr_path ? "/dev/msr*" : "/dev/cpu/*/msr");
-
+ err(-1, "%s open failed, try chown or chmod +r %s, or run as root", pathname, use_android_msr_path ? "/dev/msr*" : "/dev/cpu/*/msr");
retval = pread(fd, msr, sizeof(*msr), offset);
if (retval != sizeof(*msr)) {
@@ -720,8 +832,7 @@ int put_msr(int cpu, int offset, unsigned long long new_msr)
sprintf(pathname, use_android_msr_path ? "/dev/msr%d" : "/dev/cpu/%d/msr", cpu);
fd = open(pathname, O_RDWR);
if (fd < 0)
- err(-1, "%s open failed, try chown or chmod +r %s, or run as root",
- pathname, use_android_msr_path ? "/dev/msr*" : "/dev/cpu/*/msr");
+ err(-1, "%s open failed, try chown or chmod +r %s, or run as root", pathname, use_android_msr_path ? "/dev/msr*" : "/dev/cpu/*/msr");
retval = pwrite(fd, &new_msr, sizeof(new_msr), offset);
if (retval != sizeof(new_msr))
@@ -753,7 +864,7 @@ static unsigned int read_sysfs(const char *path, char *buf, size_t buflen)
buf[numread] = '\0';
close(fd);
- return (unsigned int) numread;
+ return (unsigned int)numread;
}
static unsigned int write_sysfs(const char *path, char *buf, size_t buflen)
@@ -767,14 +878,40 @@ static unsigned int write_sysfs(const char *path, char *buf, size_t buflen)
numwritten = write(fd, buf, buflen - 1);
if (numwritten < 1) {
- perror("write failed\n");
+ buf[strcspn(buf, "\n")] = '\0';
+ warn("Write '%s' to '%s' failed", buf, path);
close(fd);
return -1;
}
close(fd);
- return (unsigned int) numwritten;
+ return (unsigned int)numwritten;
+}
+
+static int sysfs_read_string(const char *path, char *buf, size_t buflen)
+{
+ unsigned int len;
+ size_t n;
+
+ len = read_sysfs(path, buf, buflen);
+ if (!len)
+ return -1;
+
+ n = strcspn(buf, "\n");
+ buf[n] = '\0';
+ return 0;
+}
+
+static int sysfs_write_string(const char *path, const char *buf)
+{
+ char tmp[128];
+ int len;
+
+ len = snprintf(tmp, sizeof(tmp), "%s\n", buf);
+ if (len < 0 || len >= (int)sizeof(tmp))
+ return -1;
+ return write_sysfs(path, tmp, (size_t)len + 1) ? 0 : -1;
}
void print_hwp_cap(int cpu, struct msr_hwp_cap *cap, char *str)
@@ -782,9 +919,9 @@ void print_hwp_cap(int cpu, struct msr_hwp_cap *cap, char *str)
if (cpu != -1)
printf("cpu%d: ", cpu);
- printf("HWP_CAP: low %d eff %d guar %d high %d\n",
- cap->lowest, cap->efficient, cap->guaranteed, cap->highest);
+ printf("HWP_CAP: low %d eff %d guar %d high %d\n", cap->lowest, cap->efficient, cap->guaranteed, cap->highest);
}
+
void read_hwp_cap(int cpu, struct msr_hwp_cap *cap, unsigned int msr_offset)
{
unsigned long long msr;
@@ -806,9 +943,9 @@ void print_hwp_request(int cpu, struct msr_hwp_request *h, char *str)
printf("%s", str);
printf("HWP_REQ: min %d max %d des %d epp %d window 0x%x (%d*10^%dus) use_pkg %d\n",
- h->hwp_min, h->hwp_max, h->hwp_desired, h->hwp_epp,
- h->hwp_window, h->hwp_window & 0x7F, (h->hwp_window >> 7) & 0x7, h->hwp_use_pkg);
+ h->hwp_min, h->hwp_max, h->hwp_desired, h->hwp_epp, h->hwp_window, h->hwp_window & 0x7F, (h->hwp_window >> 7) & 0x7, h->hwp_use_pkg);
}
+
void print_hwp_request_pkg(int pkg, struct msr_hwp_request *h, char *str)
{
printf("pkg%d: ", pkg);
@@ -817,9 +954,9 @@ void print_hwp_request_pkg(int pkg, struct msr_hwp_request *h, char *str)
printf("%s", str);
printf("HWP_REQ_PKG: min %d max %d des %d epp %d window 0x%x (%d*10^%dus)\n",
- h->hwp_min, h->hwp_max, h->hwp_desired, h->hwp_epp,
- h->hwp_window, h->hwp_window & 0x7F, (h->hwp_window >> 7) & 0x7);
+ h->hwp_min, h->hwp_max, h->hwp_desired, h->hwp_epp, h->hwp_window, h->hwp_window & 0x7F, (h->hwp_window >> 7) & 0x7);
}
+
void read_hwp_request_msr(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
{
unsigned long long msr;
@@ -840,9 +977,7 @@ void write_hwp_request_msr(int cpu, struct msr_hwp_request *hwp_req, unsigned in
if (debug > 1)
printf("cpu%d: requesting min %d max %d des %d epp %d window 0x%0x use_pkg %d\n",
- cpu, hwp_req->hwp_min, hwp_req->hwp_max,
- hwp_req->hwp_desired, hwp_req->hwp_epp,
- hwp_req->hwp_window, hwp_req->hwp_use_pkg);
+ cpu, hwp_req->hwp_min, hwp_req->hwp_max, hwp_req->hwp_desired, hwp_req->hwp_epp, hwp_req->hwp_window, hwp_req->hwp_use_pkg);
msr |= HWP_MIN_PERF(ratio_2_msr_perf(hwp_req->hwp_min));
msr |= HWP_MAX_PERF(ratio_2_msr_perf(hwp_req->hwp_max));
@@ -900,6 +1035,58 @@ static int set_epb_sysfs(int cpu, int val)
return (int)val;
}
+static void print_soc_slider(void)
+{
+ char buf[64];
+
+ if (!soc_slider_access(R_OK))
+ return;
+
+ if (sysfs_read_string(PATH_SOC_SLIDER_BALANCE, buf, sizeof(buf)) == 0)
+ printf("soc-slider-balance: %s\n", buf);
+
+ if (sysfs_read_string(PATH_SOC_SLIDER_OFFSET, buf, sizeof(buf)) == 0)
+ printf("soc-slider-offset: %s\n", buf);
+}
+
+static void print_platform_profile(void)
+{
+ char buf[64];
+
+ if (!platform_profile_access(R_OK))
+ return;
+
+ if (sysfs_read_string(PATH_PLATFORM_PROFILE_NAME, buf, sizeof(buf)) == 0)
+ printf("platform-profile-name: %s\n", buf);
+
+ if (sysfs_read_string(PATH_PLATFORM_PROFILE, buf, sizeof(buf)) == 0)
+ printf("platform-profile: %s\n", buf);
+}
+
+static int update_soc_slider(void)
+{
+ char tmp[32];
+
+ if (update_soc_slider_balance) {
+ snprintf(tmp, sizeof(tmp), "%d", soc_slider_balance);
+ if (sysfs_write_string(PATH_SOC_SLIDER_BALANCE, tmp))
+ err(1, "soc-slider-balance write failed");
+ }
+
+ if (update_soc_slider_offset) {
+ snprintf(tmp, sizeof(tmp), "%d", soc_slider_offset);
+ if (sysfs_write_string(PATH_SOC_SLIDER_OFFSET, tmp))
+ err(1, "soc-slider-offset write failed");
+ }
+
+ if (update_platform_profile) {
+ if (sysfs_write_string(PATH_PLATFORM_PROFILE, platform_profile))
+ err(1, "platform-profile write failed");
+ }
+
+ return 0;
+}
+
int print_cpu_msrs(int cpu)
{
struct msr_hwp_request req;
@@ -908,7 +1095,7 @@ int print_cpu_msrs(int cpu)
epb = get_epb_sysfs(cpu);
if (epb >= 0)
- printf("cpu%d: EPB %u\n", cpu, (unsigned int) epb);
+ printf("cpu%d: EPB %u\n", cpu, (unsigned int)epb);
if (!has_hwp)
return 0;
@@ -936,17 +1123,13 @@ int print_pkg_msrs(int pkg)
if (has_hwp_notify) {
get_msr(first_cpu_in_pkg[pkg], MSR_HWP_INTERRUPT, &msr);
fprintf(stderr,
- "pkg%d: MSR_HWP_INTERRUPT: 0x%08llx (Excursion_Min-%sabled, Guaranteed_Perf_Change-%sabled)\n",
- pkg, msr,
- ((msr) & 0x2) ? "EN" : "Dis",
- ((msr) & 0x1) ? "EN" : "Dis");
+ "pkg%d: MSR_HWP_INTERRUPT: 0x%08llx (Excursion_Min-%sabled, Guaranteed_Perf_Change-%sabled)\n",
+ pkg, msr, ((msr) & 0x2) ? "EN" : "Dis", ((msr) & 0x1) ? "EN" : "Dis");
}
get_msr(first_cpu_in_pkg[pkg], MSR_HWP_STATUS, &msr);
fprintf(stderr,
"pkg%d: MSR_HWP_STATUS: 0x%08llx (%sExcursion_Min, %sGuaranteed_Perf_Change)\n",
- pkg, msr,
- ((msr) & 0x4) ? "" : "No-",
- ((msr) & 0x1) ? "" : "No-");
+ pkg, msr, ((msr) & 0x4) ? "" : "No-", ((msr) & 0x1) ? "" : "No-");
return 0;
}
@@ -960,6 +1143,7 @@ int ratio_2_sysfs_khz(int ratio)
return ratio * bclk_khz;
}
+
/*
* If HWP is enabled and cpufreq sysfs attribtes are present,
* then update via sysfs. The intel_pstate driver may modify (clip)
@@ -976,8 +1160,7 @@ void update_cpufreq_scaling_freq(int is_max, int cpu, unsigned int ratio)
int retval;
int khz;
- sprintf(pathname, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_%s_freq",
- cpu, is_max ? "max" : "min");
+ sprintf(pathname, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_%s_freq", cpu, is_max ? "max" : "min");
fp = fopen(pathname, "w");
if (!fp) {
@@ -1029,19 +1212,16 @@ int verify_hwp_req_self_consistency(int cpu, struct msr_hwp_request *req)
{
/* fail if min > max requested */
if (req->hwp_min > req->hwp_max) {
- errx(1, "cpu%d: requested hwp-min %d > hwp_max %d",
- cpu, req->hwp_min, req->hwp_max);
+ errx(1, "cpu%d: requested hwp-min %d > hwp_max %d", cpu, req->hwp_min, req->hwp_max);
}
/* fail if desired > max requestd */
if (req->hwp_desired && (req->hwp_desired > req->hwp_max)) {
- errx(1, "cpu%d: requested hwp-desired %d > hwp_max %d",
- cpu, req->hwp_desired, req->hwp_max);
+ errx(1, "cpu%d: requested hwp-desired %d > hwp_max %d", cpu, req->hwp_desired, req->hwp_max);
}
/* fail if desired < min requestd */
if (req->hwp_desired && (req->hwp_desired < req->hwp_min)) {
- errx(1, "cpu%d: requested hwp-desired %d < requested hwp_min %d",
- cpu, req->hwp_desired, req->hwp_min);
+ errx(1, "cpu%d: requested hwp-desired %d < requested hwp_min %d", cpu, req->hwp_desired, req->hwp_min);
}
return 0;
@@ -1051,39 +1231,30 @@ int check_hwp_request_v_hwp_capabilities(int cpu, struct msr_hwp_request *req, s
{
if (update_hwp_max) {
if (req->hwp_max > cap->highest)
- errx(1, "cpu%d: requested max %d > capabilities highest %d, use --force?",
- cpu, req->hwp_max, cap->highest);
+ errx(1, "cpu%d: requested max %d > capabilities highest %d, use --force?", cpu, req->hwp_max, cap->highest);
if (req->hwp_max < cap->lowest)
- errx(1, "cpu%d: requested max %d < capabilities lowest %d, use --force?",
- cpu, req->hwp_max, cap->lowest);
+ errx(1, "cpu%d: requested max %d < capabilities lowest %d, use --force?", cpu, req->hwp_max, cap->lowest);
}
if (update_hwp_min) {
if (req->hwp_min > cap->highest)
- errx(1, "cpu%d: requested min %d > capabilities highest %d, use --force?",
- cpu, req->hwp_min, cap->highest);
+ errx(1, "cpu%d: requested min %d > capabilities highest %d, use --force?", cpu, req->hwp_min, cap->highest);
if (req->hwp_min < cap->lowest)
- errx(1, "cpu%d: requested min %d < capabilities lowest %d, use --force?",
- cpu, req->hwp_min, cap->lowest);
+ errx(1, "cpu%d: requested min %d < capabilities lowest %d, use --force?", cpu, req->hwp_min, cap->lowest);
}
if (update_hwp_min && update_hwp_max && (req->hwp_min > req->hwp_max))
- errx(1, "cpu%d: requested min %d > requested max %d",
- cpu, req->hwp_min, req->hwp_max);
+ errx(1, "cpu%d: requested min %d > requested max %d", cpu, req->hwp_min, req->hwp_max);
if (update_hwp_desired && req->hwp_desired) {
if (req->hwp_desired > req->hwp_max)
- errx(1, "cpu%d: requested desired %d > requested max %d, use --force?",
- cpu, req->hwp_desired, req->hwp_max);
+ errx(1, "cpu%d: requested desired %d > requested max %d, use --force?", cpu, req->hwp_desired, req->hwp_max);
if (req->hwp_desired < req->hwp_min)
- errx(1, "cpu%d: requested desired %d < requested min %d, use --force?",
- cpu, req->hwp_desired, req->hwp_min);
+ errx(1, "cpu%d: requested desired %d < requested min %d, use --force?", cpu, req->hwp_desired, req->hwp_min);
if (req->hwp_desired < cap->lowest)
- errx(1, "cpu%d: requested desired %d < capabilities lowest %d, use --force?",
- cpu, req->hwp_desired, cap->lowest);
+ errx(1, "cpu%d: requested desired %d < capabilities lowest %d, use --force?", cpu, req->hwp_desired, cap->lowest);
if (req->hwp_desired > cap->highest)
- errx(1, "cpu%d: requested desired %d > capabilities highest %d, use --force?",
- cpu, req->hwp_desired, cap->highest);
+ errx(1, "cpu%d: requested desired %d > capabilities highest %d, use --force?", cpu, req->hwp_desired, cap->highest);
}
return 0;
@@ -1134,6 +1305,7 @@ int update_hwp_request_msr(int cpu)
}
return 0;
}
+
int update_hwp_request_pkg_msr(int pkg)
{
struct msr_hwp_request req;
@@ -1205,8 +1377,7 @@ int update_cpu_epb_sysfs(int cpu)
set_epb_sysfs(cpu, new_epb);
if (verbose)
- printf("cpu%d: ENERGY_PERF_BIAS old: %d new: %d\n",
- cpu, epb, (unsigned int) new_epb);
+ printf("cpu%d: ENERGY_PERF_BIAS old: %d new: %d\n", cpu, epb, (unsigned int)new_epb);
return 0;
}
@@ -1222,7 +1393,7 @@ int update_cpu_msrs(int cpu)
turbo_is_present_and_disabled = ((msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE) != 0);
- if (turbo_update_value == 1) {
+ if (turbo_update_value == 1) {
if (turbo_is_present_and_disabled) {
msr &= ~MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
put_msr(cpu, MSR_IA32_MISC_ENABLE, msr);
@@ -1291,6 +1462,7 @@ int set_max_cpu_pkg_num(int cpu)
return 0;
}
+
int mark_cpu_present(int cpu)
{
CPU_SET_S(cpu, cpu_setsize, cpu_present_set);
@@ -1301,7 +1473,7 @@ int mark_cpu_present(int cpu)
* run func(cpu) on every cpu in /proc/stat
* return max_cpu number
*/
-int for_all_proc_cpus(int (func)(int))
+int for_all_proc_cpus(int (func) (int))
{
FILE *fp;
int cpu_num;
@@ -1328,7 +1500,7 @@ int for_all_proc_cpus(int (func)(int))
return 0;
}
-void for_all_cpus_in_set(size_t set_size, cpu_set_t *cpu_set, int (func)(int))
+void for_all_cpus_in_set(size_t set_size, cpu_set_t *cpu_set, int (func) (int))
{
int cpu_num;
@@ -1336,7 +1508,8 @@ void for_all_cpus_in_set(size_t set_size, cpu_set_t *cpu_set, int (func)(int))
if (CPU_ISSET_S(cpu_num, set_size, cpu_set))
func(cpu_num);
}
-int for_all_cpus_in_set_and(size_t set_size, cpu_set_t *cpu_set, int (func)(int))
+
+int for_all_cpus_in_set_and(size_t set_size, cpu_set_t *cpu_set, int (func) (int))
{
int cpu_num;
int retval = 1;
@@ -1385,7 +1558,7 @@ void verify_hwp_is_enabled(void)
{
int retval;
- if (!has_hwp) /* set in early_cpuid() */
+ if (!has_hwp) /* set in early_cpuid() */
return;
retval = for_all_cpus_in_set_and(cpu_setsize, cpu_selected_set, is_hwp_enabled_on_cpu);
@@ -1402,21 +1575,18 @@ int req_update_bounds_check(void)
return 0;
/* fail if min > max requested */
- if ((update_hwp_max && update_hwp_min) &&
- (req_update.hwp_min > req_update.hwp_max)) {
+ if ((update_hwp_max && update_hwp_min) && (req_update.hwp_min > req_update.hwp_max)) {
printf("hwp-min %d > hwp_max %d\n", req_update.hwp_min, req_update.hwp_max);
return -EINVAL;
}
/* fail if desired > max requestd */
- if (req_update.hwp_desired && update_hwp_max &&
- (req_update.hwp_desired > req_update.hwp_max)) {
+ if (req_update.hwp_desired && update_hwp_max && (req_update.hwp_desired > req_update.hwp_max)) {
printf("hwp-desired cannot be greater than hwp_max\n");
return -EINVAL;
}
/* fail if desired < min requestd */
- if (req_update.hwp_desired && update_hwp_min &&
- (req_update.hwp_desired < req_update.hwp_min)) {
+ if (req_update.hwp_desired && update_hwp_min && (req_update.hwp_desired < req_update.hwp_min)) {
printf("hwp-desired cannot be less than hwp_min\n");
return -EINVAL;
}
@@ -1459,9 +1629,7 @@ void probe_dev_msr(void)
}
}
-static void get_cpuid_or_exit(unsigned int leaf,
- unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
+static void get_cpuid_or_exit(unsigned int leaf, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
{
if (!__get_cpuid(leaf, eax, ebx, ecx, edx))
errx(1, "Processor not supported\n");
@@ -1515,8 +1683,7 @@ void parse_cpuid(void)
genuine_intel = 1;
if (debug)
- fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
- (char *)&ebx, (char *)&edx, (char *)&ecx);
+ fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ", (char *)&ebx, (char *)&edx, (char *)&ecx);
get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
family = (fms >> 8) & 0xf;
@@ -1526,23 +1693,18 @@ void parse_cpuid(void)
model += ((fms >> 16) & 0xf) << 4;
if (debug) {
- fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
- max_level, family, model, stepping, family, model, stepping);
+ fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n", max_level, family, model, stepping, family, model, stepping);
fprintf(stderr, "CPUID(1): %s %s %s %s %s %s %s %s\n",
ecx & (1 << 0) ? "SSE3" : "-",
ecx & (1 << 3) ? "MONITOR" : "-",
ecx & (1 << 7) ? "EIST" : "-",
ecx & (1 << 8) ? "TM2" : "-",
- edx & (1 << 4) ? "TSC" : "-",
- edx & (1 << 5) ? "MSR" : "-",
- edx & (1 << 22) ? "ACPI-TM" : "-",
- edx & (1 << 29) ? "TM" : "-");
+ edx & (1 << 4) ? "TSC" : "-", edx & (1 << 5) ? "MSR" : "-", edx & (1 << 22) ? "ACPI-TM" : "-", edx & (1 << 29) ? "TM" : "-");
}
if (!(edx & (1 << 5)))
errx(1, "CPUID: no MSR");
-
get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
/* turbo_is_enabled already set */
/* has_hwp already set */
@@ -1562,12 +1724,9 @@ void parse_cpuid(void)
turbo_is_enabled ? "" : "No-",
has_hwp ? "" : "No-",
has_hwp_notify ? "" : "No-",
- has_hwp_activity_window ? "" : "No-",
- has_hwp_epp ? "" : "No-",
- has_hwp_request_pkg ? "" : "No-",
- has_epb ? "" : "No-");
+ has_hwp_activity_window ? "" : "No-", has_hwp_epp ? "" : "No-", has_hwp_request_pkg ? "" : "No-", has_epb ? "" : "No-");
- return; /* success */
+ return; /* success */
}
int main(int argc, char **argv)
@@ -1577,7 +1736,7 @@ int main(int argc, char **argv)
probe_dev_msr();
init_data_structures();
- early_cpuid(); /* initial cpuid parse before cmdline */
+ early_cpuid(); /* initial cpuid parse before cmdline */
cmdline(argc, argv);
@@ -1586,7 +1745,7 @@ int main(int argc, char **argv)
parse_cpuid();
- /* If CPU-set and PKG-set are not initialized, default to all CPUs */
+ /* If CPU-set and PKG-set are not initialized, default to all CPUs */
if ((cpu_selected_set == 0) && (pkg_selected_set == 0))
cpu_selected_set = cpu_present_set;
@@ -1604,10 +1763,13 @@ int main(int argc, char **argv)
return -EINVAL;
/* display information only, no updates to settings */
- if (!update_epb && !update_turbo && !hwp_update_enabled()) {
+ if (!update_epb && !update_turbo && !hwp_update_enabled() && !update_soc_slider_balance && !update_soc_slider_offset && !update_platform_profile) {
if (cpu_selected_set)
for_all_cpus_in_set(cpu_setsize, cpu_selected_set, print_cpu_msrs);
+ print_soc_slider();
+ print_platform_profile();
+
if (has_hwp_request_pkg) {
if (pkg_selected_set == 0)
pkg_selected_set = pkg_present_set;
@@ -1628,5 +1790,8 @@ int main(int argc, char **argv)
} else if (pkg_selected_set)
for_packages(pkg_selected_set, update_hwp_request_pkg_msr);
+ if (update_soc_slider_balance || update_soc_slider_offset || update_platform_profile)
+ update_soc_slider();
+
return 0;
}
diff --git a/tools/sched_ext/scx_qmap.bpf.c b/tools/sched_ext/scx_qmap.bpf.c
index b68abb9e760b..aad698fe294b 100644
--- a/tools/sched_ext/scx_qmap.bpf.c
+++ b/tools/sched_ext/scx_qmap.bpf.c
@@ -159,13 +159,7 @@ static s32 pick_direct_dispatch_cpu(struct task_struct *p, s32 prev_cpu)
static struct task_ctx *lookup_task_ctx(struct task_struct *p)
{
- struct task_ctx *tctx;
-
- if (!(tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0))) {
- scx_bpf_error("task_ctx lookup failed");
- return NULL;
- }
- return tctx;
+ return bpf_task_storage_get(&task_ctx_stor, p, 0, 0);
}
s32 BPF_STRUCT_OPS(qmap_select_cpu, struct task_struct *p,
@@ -175,7 +169,7 @@ s32 BPF_STRUCT_OPS(qmap_select_cpu, struct task_struct *p,
s32 cpu;
if (!(tctx = lookup_task_ctx(p)))
- return -ESRCH;
+ return prev_cpu;
if (p->scx.weight < 2 && !(p->flags & PF_KTHREAD))
return prev_cpu;
@@ -540,13 +534,9 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev)
*/
if (prev) {
tctx = bpf_task_storage_get(&task_ctx_stor, prev, 0, 0);
- if (!tctx) {
- scx_bpf_error("task_ctx lookup failed");
- return;
- }
-
- tctx->core_sched_seq =
- core_sched_tail_seqs[weight_to_idx(prev->scx.weight)]++;
+ if (tctx)
+ tctx->core_sched_seq =
+ core_sched_tail_seqs[weight_to_idx(prev->scx.weight)]++;
}
}
@@ -584,10 +574,8 @@ static s64 task_qdist(struct task_struct *p)
s64 qdist;
tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0);
- if (!tctx) {
- scx_bpf_error("task_ctx lookup failed");
+ if (!tctx)
return 0;
- }
qdist = tctx->core_sched_seq - core_sched_head_seqs[idx];
diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
index 53d84a6874b7..2be1df80fcc9 100644
--- a/tools/testing/cxl/Kbuild
+++ b/tools/testing/cxl/Kbuild
@@ -11,8 +11,12 @@ ldflags-y += --wrap=devm_cxl_endpoint_decoders_setup
ldflags-y += --wrap=hmat_get_extended_linear_cache_size
ldflags-y += --wrap=devm_cxl_add_dport_by_dev
ldflags-y += --wrap=devm_cxl_switch_port_decoders_setup
+ldflags-y += --wrap=walk_hmem_resources
+ldflags-y += --wrap=region_intersects
+ldflags-y += --wrap=region_intersects_soft_reserve
DRIVERS := ../../../drivers
+DAX_HMEM_SRC := $(DRIVERS)/dax/hmem
CXL_SRC := $(DRIVERS)/cxl
CXL_CORE_SRC := $(DRIVERS)/cxl/core
ccflags-y := -I$(srctree)/drivers/cxl/
@@ -59,7 +63,7 @@ cxl_core-y += $(CXL_CORE_SRC)/hdm.o
cxl_core-y += $(CXL_CORE_SRC)/pmu.o
cxl_core-y += $(CXL_CORE_SRC)/cdat.o
cxl_core-$(CONFIG_TRACING) += $(CXL_CORE_SRC)/trace.o
-cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o
+cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o $(CXL_CORE_SRC)/region_pmem.o $(CXL_CORE_SRC)/region_dax.o
cxl_core-$(CONFIG_CXL_MCE) += $(CXL_CORE_SRC)/mce.o
cxl_core-$(CONFIG_CXL_FEATURES) += $(CXL_CORE_SRC)/features.o
cxl_core-$(CONFIG_CXL_EDAC_MEM_FEATURES) += $(CXL_CORE_SRC)/edac.o
@@ -70,6 +74,9 @@ cxl_core-y += config_check.o
cxl_core-y += cxl_core_test.o
cxl_core-y += cxl_core_exports.o
+obj-m += dax_hmem.o
+dax_hmem-y := $(DAX_HMEM_SRC)/hmem.o
+
KBUILD_CFLAGS := $(filter-out -Wmissing-prototypes -Wmissing-declarations, $(KBUILD_CFLAGS))
obj-m += test/
diff --git a/tools/testing/cxl/test/Kbuild b/tools/testing/cxl/test/Kbuild
index af50972c8b6d..c168e3c998a7 100644
--- a/tools/testing/cxl/test/Kbuild
+++ b/tools/testing/cxl/test/Kbuild
@@ -7,6 +7,7 @@ obj-m += cxl_mock_mem.o
obj-m += cxl_translate.o
cxl_test-y := cxl.o
+cxl_test-y += hmem_test.o
cxl_mock-y := mock.o
cxl_mock_mem-y := mem.o
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index 81e2aef3627a..418669927fb0 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -16,6 +16,7 @@
static int interleave_arithmetic;
static bool extended_linear_cache;
+static bool fail_autoassemble;
#define FAKE_QTG_ID 42
@@ -51,6 +52,31 @@ struct platform_device *cxl_mem_single[NR_MEM_SINGLE];
static struct platform_device *cxl_rch[NR_CXL_RCH];
static struct platform_device *cxl_rcd[NR_CXL_RCH];
+/*
+ * Decoder registry
+ *
+ * Record decoder programming so that the topology can be reconstructed
+ * after cxl_acpi unbind/bind. This allows a user-created region config
+ * to be replayed as if firmware had provided the region at enumeration
+ * time.
+ *
+ * Entries are keyed by a stable port identity (port->uport_dev) combined
+ * with the decoder id. Decoder state is saved at initialization and
+ * updated on commit and reset.
+ *
+ * On re-enumeration mock_init_hdm_decoder() consults this registry to
+ * restore enabled decoders. Disabled decoders are reinitialized to a
+ * clean default state rather than replaying stale programming.
+ */
+static DEFINE_XARRAY(decoder_registry);
+
+/*
+ * When set, decoder reset will not update the registry. This allows
+ * region destroy operations to reset live decoders without erasing
+ * the saved programming needed for replay after re-enumeration.
+ */
+static bool decoder_reset_preserve_registry;
+
static inline bool is_multi_bridge(struct device *dev)
{
int i;
@@ -704,6 +730,194 @@ static int map_targets(struct device *dev, void *data)
return 0;
}
+/*
+ * Build a stable registry key from the decoder's upstream port identity
+ * and decoder id.
+ *
+ * Decoder objects and cxl_port objects are reallocated on each enumeration,
+ * so their addresses cannot be used directly as replay keys. However,
+ * port->uport_dev is stable for a given topology across cxl_acpi unbind/bind
+ * in cxl_test, so use that as the port identity and pack the local decoder
+ * id into the low bits.
+ *
+ * The key is formed as:
+ * ((unsigned long)port->uport_dev << 4) | cxld->id
+ *
+ * The low bits hold the decoder id (which must fit in 4 bits) while
+ * the remaining bits identify the upstream port. This key is only used
+ * within cxl_test to locate saved decoder state during replay.
+ */
+static unsigned long cxld_registry_index(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+
+ dev_WARN_ONCE(&port->dev, cxld->id >= 16,
+ "decoder id:%d out of range\n", cxld->id);
+ return (((unsigned long)port->uport_dev) << 4) | cxld->id;
+}
+
+struct cxl_test_decoder {
+ union {
+ struct cxl_switch_decoder cxlsd;
+ struct cxl_endpoint_decoder cxled;
+ };
+ struct range dpa_range;
+};
+
+static struct cxl_test_decoder *cxld_registry_find(struct cxl_decoder *cxld)
+{
+ return xa_load(&decoder_registry, cxld_registry_index(cxld));
+}
+
+#define dbg_cxld(port, msg, cxld) \
+ do { \
+ struct cxl_decoder *___d = (cxld); \
+ dev_dbg((port)->uport_dev, \
+ "decoder%d: %s range: %#llx-%#llx iw: %d ig: %d flags: %#lx\n", \
+ ___d->id, msg, ___d->hpa_range.start, \
+ ___d->hpa_range.end + 1, ___d->interleave_ways, \
+ ___d->interleave_granularity, ___d->flags); \
+ } while (0)
+
+static int mock_decoder_commit(struct cxl_decoder *cxld);
+static void mock_decoder_reset(struct cxl_decoder *cxld);
+static void init_disabled_mock_decoder(struct cxl_decoder *cxld);
+
+static void cxld_copy(struct cxl_decoder *a, struct cxl_decoder *b)
+{
+ a->id = b->id;
+ a->hpa_range = b->hpa_range;
+ a->interleave_ways = b->interleave_ways;
+ a->interleave_granularity = b->interleave_granularity;
+ a->target_type = b->target_type;
+ a->flags = b->flags;
+ a->commit = mock_decoder_commit;
+ a->reset = mock_decoder_reset;
+}
+
+/*
+ * Restore decoder programming saved in the registry.
+ *
+ * Only decoders that were saved enabled are restored. Disabled decoders
+ * are left in their default inactive state so that stale programming is
+ * not resurrected after topology replay.
+ *
+ * For endpoint decoders this also restores the DPA reservation needed
+ * to reconstruct committed mappings.
+ */
+static int cxld_registry_restore(struct cxl_decoder *cxld,
+ struct cxl_test_decoder *td)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ int rc;
+
+ if (is_switch_decoder(&cxld->dev)) {
+ struct cxl_switch_decoder *cxlsd =
+ to_cxl_switch_decoder(&cxld->dev);
+
+ if (!(td->cxlsd.cxld.flags & CXL_DECODER_F_ENABLE))
+ return 0;
+
+ dbg_cxld(port, "restore", &td->cxlsd.cxld);
+ cxld_copy(cxld, &td->cxlsd.cxld);
+ WARN_ON(cxlsd->nr_targets != td->cxlsd.nr_targets);
+
+ /* Restore saved target intent; live dport binding happens later */
+ for (int i = 0; i < cxlsd->nr_targets; i++) {
+ cxlsd->target[i] = NULL;
+ cxld->target_map[i] = td->cxlsd.cxld.target_map[i];
+ }
+
+ port->commit_end = cxld->id;
+
+ } else {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+
+ if (!(td->cxled.cxld.flags & CXL_DECODER_F_ENABLE))
+ return 0;
+
+ dbg_cxld(port, "restore", &td->cxled.cxld);
+ cxld_copy(cxld, &td->cxled.cxld);
+ cxled->state = td->cxled.state;
+ cxled->skip = td->cxled.skip;
+ if (range_len(&td->dpa_range)) {
+ rc = devm_cxl_dpa_reserve(cxled, td->dpa_range.start,
+ range_len(&td->dpa_range),
+ td->cxled.skip);
+ if (rc) {
+ init_disabled_mock_decoder(cxld);
+ return rc;
+ }
+ }
+ port->commit_end = cxld->id;
+ }
+
+ return 0;
+}
+
+static void __cxld_registry_save(struct cxl_test_decoder *td,
+ struct cxl_decoder *cxld)
+{
+ if (is_switch_decoder(&cxld->dev)) {
+ struct cxl_switch_decoder *cxlsd =
+ to_cxl_switch_decoder(&cxld->dev);
+
+ cxld_copy(&td->cxlsd.cxld, cxld);
+ td->cxlsd.nr_targets = cxlsd->nr_targets;
+
+ /* Save target port_id as a stable identify for the dport */
+ for (int i = 0; i < cxlsd->nr_targets; i++) {
+ struct cxl_dport *dport;
+
+ if (!cxlsd->target[i])
+ continue;
+
+ dport = cxlsd->target[i];
+ td->cxlsd.cxld.target_map[i] = dport->port_id;
+ }
+ } else {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+
+ cxld_copy(&td->cxled.cxld, cxld);
+ td->cxled.state = cxled->state;
+ td->cxled.skip = cxled->skip;
+
+ if (!(cxld->flags & CXL_DECODER_F_ENABLE)) {
+ td->dpa_range.start = 0;
+ td->dpa_range.end = -1;
+ } else if (cxled->dpa_res) {
+ td->dpa_range.start = cxled->dpa_res->start;
+ td->dpa_range.end = cxled->dpa_res->end;
+ } else {
+ td->dpa_range.start = 0;
+ td->dpa_range.end = -1;
+ }
+ }
+}
+
+static void cxld_registry_save(struct cxl_test_decoder *td,
+ struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+
+ dbg_cxld(port, "save", cxld);
+ __cxld_registry_save(td, cxld);
+}
+
+static void cxld_registry_update(struct cxl_decoder *cxld)
+{
+ struct cxl_test_decoder *td = cxld_registry_find(cxld);
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+
+ if (WARN_ON_ONCE(!td))
+ return;
+
+ dbg_cxld(port, "update", cxld);
+ __cxld_registry_save(td, cxld);
+}
+
static int mock_decoder_commit(struct cxl_decoder *cxld)
{
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
@@ -723,6 +937,13 @@ static int mock_decoder_commit(struct cxl_decoder *cxld)
port->commit_end++;
cxld->flags |= CXL_DECODER_F_ENABLE;
+ if (is_endpoint_decoder(&cxld->dev)) {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+
+ cxled->state = CXL_DECODER_STATE_AUTO;
+ }
+ cxld_registry_update(cxld);
return 0;
}
@@ -743,6 +964,65 @@ static void mock_decoder_reset(struct cxl_decoder *cxld)
"%s: out of order reset, expected decoder%d.%d\n",
dev_name(&cxld->dev), port->id, port->commit_end);
cxld->flags &= ~CXL_DECODER_F_ENABLE;
+
+ if (is_endpoint_decoder(&cxld->dev)) {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+
+ cxled->state = CXL_DECODER_STATE_MANUAL;
+ cxled->skip = 0;
+ }
+ if (decoder_reset_preserve_registry)
+ dev_dbg(port->uport_dev, "decoder%d: skip registry update\n",
+ cxld->id);
+ else
+ cxld_registry_update(cxld);
+}
+
+static struct cxl_test_decoder *cxld_registry_new(struct cxl_decoder *cxld)
+{
+ struct cxl_test_decoder *td __free(kfree) =
+ kzalloc(sizeof(*td), GFP_KERNEL);
+ unsigned long key = cxld_registry_index(cxld);
+
+ if (!td)
+ return NULL;
+
+ if (xa_insert(&decoder_registry, key, td, GFP_KERNEL)) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ cxld_registry_save(td, cxld);
+ return no_free_ptr(td);
+}
+
+static void init_disabled_mock_decoder(struct cxl_decoder *cxld)
+{
+ cxld->hpa_range.start = 0;
+ cxld->hpa_range.end = -1;
+ cxld->interleave_ways = 1;
+ cxld->interleave_granularity = 0;
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
+ cxld->flags = 0;
+ cxld->commit = mock_decoder_commit;
+ cxld->reset = mock_decoder_reset;
+
+ if (is_switch_decoder(&cxld->dev)) {
+ struct cxl_switch_decoder *cxlsd =
+ to_cxl_switch_decoder(&cxld->dev);
+
+ for (int i = 0; i < cxlsd->nr_targets; i++) {
+ cxlsd->target[i] = NULL;
+ cxld->target_map[i] = 0;
+ }
+ } else {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+
+ cxled->state = CXL_DECODER_STATE_MANUAL;
+ cxled->skip = 0;
+ }
}
static void default_mock_decoder(struct cxl_decoder *cxld)
@@ -757,6 +1037,8 @@ static void default_mock_decoder(struct cxl_decoder *cxld)
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->commit = mock_decoder_commit;
cxld->reset = mock_decoder_reset;
+
+ WARN_ON_ONCE(!cxld_registry_new(cxld));
}
static int first_decoder(struct device *dev, const void *data)
@@ -771,13 +1053,29 @@ static int first_decoder(struct device *dev, const void *data)
return 0;
}
-static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
+/*
+ * Initialize a decoder during HDM enumeration.
+ *
+ * If a saved registry entry exists:
+ * - enabled decoders are restored from the saved programming
+ * - disabled decoders are initialized in a clean disabled state
+ *
+ * If no registry entry exists the decoder follows the normal mock
+ * initialization path, including the special auto-region setup for
+ * the first endpoints under host-bridge0.
+ *
+ * Returns true if decoder state was restored from the registry. In
+ * that case the saved decode configuration (including target mapping)
+ * has already been applied and the map_targets() is skipped.
+ */
+static bool mock_init_hdm_decoder(struct cxl_decoder *cxld)
{
struct acpi_cedt_cfmws *window = mock_cfmws[0];
struct platform_device *pdev = NULL;
struct cxl_endpoint_decoder *cxled;
struct cxl_switch_decoder *cxlsd;
struct cxl_port *port, *iter;
+ struct cxl_test_decoder *td;
struct cxl_memdev *cxlmd;
struct cxl_dport *dport;
struct device *dev;
@@ -804,6 +1102,24 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
port = NULL;
} while (port);
port = cxled_to_port(cxled);
+ } else {
+ port = to_cxl_port(cxld->dev.parent);
+ }
+
+ td = cxld_registry_find(cxld);
+ if (td) {
+ bool enabled;
+
+ if (is_switch_decoder(&cxld->dev))
+ enabled = td->cxlsd.cxld.flags & CXL_DECODER_F_ENABLE;
+ else
+ enabled = td->cxled.cxld.flags & CXL_DECODER_F_ENABLE;
+
+ if (enabled)
+ return !cxld_registry_restore(cxld, td);
+
+ init_disabled_mock_decoder(cxld);
+ return false;
}
/*
@@ -814,9 +1130,16 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
*
* See 'cxl list -BMPu -m cxl_mem.0,cxl_mem.4'
*/
- if (!hb0 || pdev->id % 4 || pdev->id > 4 || cxld->id > 0) {
+ if (!is_endpoint_decoder(&cxld->dev) || !hb0 || pdev->id % 4 ||
+ pdev->id > 4 || cxld->id > 0) {
default_mock_decoder(cxld);
- return;
+ return false;
+ }
+
+ /* Simulate missing cxl_mem.4 configuration */
+ if (hb0 && pdev->id == 4 && cxld->id == 0 && fail_autoassemble) {
+ default_mock_decoder(cxld);
+ return false;
}
base = window->base_hpa;
@@ -838,6 +1161,7 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
cxld->commit = mock_decoder_commit;
cxld->reset = mock_decoder_reset;
+ WARN_ON_ONCE(!cxld_registry_new(cxld));
/*
* Now that endpoint decoder is set up, walk up the hierarchy
* and setup the switch and root port decoders targeting @cxlmd.
@@ -859,14 +1183,14 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
/* put cxl_mem.4 second in the decode order */
if (pdev->id == 4) {
cxlsd->target[1] = dport;
- cxld->target_map[1] = dport->port_id;
+ cxlsd->cxld.target_map[1] = dport->port_id;
} else {
cxlsd->target[0] = dport;
- cxld->target_map[0] = dport->port_id;
+ cxlsd->cxld.target_map[0] = dport->port_id;
}
} else {
cxlsd->target[0] = dport;
- cxld->target_map[0] = dport->port_id;
+ cxlsd->cxld.target_map[0] = dport->port_id;
}
cxld = &cxlsd->cxld;
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
@@ -885,8 +1209,14 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
.start = base,
.end = base + mock_auto_region_size - 1,
};
+ cxld->commit = mock_decoder_commit;
+ cxld->reset = mock_decoder_reset;
+
+ cxld_registry_update(cxld);
put_device(dev);
}
+
+ return false;
}
static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
@@ -895,6 +1225,7 @@ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
struct cxl_port *port = cxlhdm->port;
struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
int target_count, i;
+ bool restored;
if (is_cxl_endpoint(port))
target_count = 0;
@@ -934,10 +1265,8 @@ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
}
ctx.target_map = cxld->target_map;
-
- mock_init_hdm_decoder(cxld);
-
- if (target_count) {
+ restored = mock_init_hdm_decoder(cxld);
+ if (target_count && !restored) {
rc = device_for_each_child(port->uport_dev, &ctx,
map_targets);
if (rc) {
@@ -1114,6 +1443,53 @@ static void mock_cxl_endpoint_parse_cdat(struct cxl_port *port)
cxl_endpoint_get_perf_coordinates(port, ep_c);
}
+/*
+ * Simulate that the first half of mock CXL Window 0 is "Soft Reserve" capacity
+ */
+static int mock_walk_hmem_resources(struct device *host, walk_hmem_fn fn)
+{
+ struct acpi_cedt_cfmws *cfmws = mock_cfmws[0];
+ struct resource window =
+ DEFINE_RES_MEM(cfmws->base_hpa, cfmws->window_size / 2);
+
+ dev_dbg(host, "walk cxl_test resource: %pr\n", &window);
+ return fn(host, 0, &window);
+}
+
+/*
+ * This should only be called by the dax_hmem case, treat mismatches (negative
+ * result) as "fallback to base region_intersects()". Simulate that the first
+ * half of mock CXL Window 0 is IORES_DESC_CXL capacity.
+ */
+static int mock_region_intersects(resource_size_t start, size_t size,
+ unsigned long flags, unsigned long desc)
+{
+ struct resource res = DEFINE_RES_MEM(start, size);
+ struct acpi_cedt_cfmws *cfmws = mock_cfmws[0];
+ struct resource window =
+ DEFINE_RES_MEM(cfmws->base_hpa, cfmws->window_size / 2);
+
+ if (resource_overlaps(&res, &window))
+ return REGION_INTERSECTS;
+ pr_debug("warning: no cxl_test CXL intersection for %pr\n", &res);
+ return -1;
+}
+
+
+static int
+mock_region_intersects_soft_reserve(resource_size_t start, size_t size)
+{
+ struct resource res = DEFINE_RES_MEM(start, size);
+ struct acpi_cedt_cfmws *cfmws = mock_cfmws[0];
+ struct resource window =
+ DEFINE_RES_MEM(cfmws->base_hpa, cfmws->window_size / 2);
+
+ if (resource_overlaps(&res, &window))
+ return REGION_INTERSECTS;
+ pr_debug("warning: no cxl_test soft reserve intersection for %pr\n", &res);
+ return -1;
+}
+
static struct cxl_mock_ops cxl_mock_ops = {
.is_mock_adev = is_mock_adev,
.is_mock_bridge = is_mock_bridge,
@@ -1129,6 +1505,9 @@ static struct cxl_mock_ops cxl_mock_ops = {
.devm_cxl_add_dport_by_dev = mock_cxl_add_dport_by_dev,
.hmat_get_extended_linear_cache_size =
mock_hmat_get_extended_linear_cache_size,
+ .walk_hmem_resources = mock_walk_hmem_resources,
+ .region_intersects = mock_region_intersects,
+ .region_intersects_soft_reserve = mock_region_intersects_soft_reserve,
.list = LIST_HEAD_INIT(cxl_mock_ops.list),
};
@@ -1415,6 +1794,33 @@ err_mem:
return rc;
}
+static ssize_t
+decoder_reset_preserve_registry_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", decoder_reset_preserve_registry);
+}
+
+static ssize_t
+decoder_reset_preserve_registry_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+
+ rc = kstrtobool(buf, &decoder_reset_preserve_registry);
+ if (rc)
+ return rc;
+ return count;
+}
+
+static DEVICE_ATTR_RW(decoder_reset_preserve_registry);
+
+static struct attribute *cxl_acpi_attrs[] = {
+ &dev_attr_decoder_reset_preserve_registry.attr, NULL
+};
+ATTRIBUTE_GROUPS(cxl_acpi);
+
static __init int cxl_test_init(void)
{
int rc, i;
@@ -1545,6 +1951,7 @@ static __init int cxl_test_init(void)
mock_companion(&acpi0017_mock, &cxl_acpi->dev);
acpi0017_mock.dev.bus = &platform_bus_type;
+ cxl_acpi->dev.groups = cxl_acpi_groups;
rc = platform_device_add(cxl_acpi);
if (rc)
@@ -1554,8 +1961,14 @@ static __init int cxl_test_init(void)
if (rc)
goto err_root;
+ rc = hmem_test_init();
+ if (rc)
+ goto err_mem;
+
return 0;
+err_mem:
+ cxl_mem_exit();
err_root:
platform_device_put(cxl_acpi);
err_rch:
@@ -1589,10 +2002,22 @@ err_gen_pool_create:
return rc;
}
+static void free_decoder_registry(void)
+{
+ unsigned long index;
+ void *entry;
+
+ xa_for_each(&decoder_registry, index, entry) {
+ xa_erase(&decoder_registry, index);
+ kfree(entry);
+ }
+}
+
static __exit void cxl_test_exit(void)
{
int i;
+ hmem_test_exit();
cxl_mem_exit();
platform_device_unregister(cxl_acpi);
cxl_rch_topo_exit();
@@ -1614,12 +2039,16 @@ static __exit void cxl_test_exit(void)
depopulate_all_mock_resources();
gen_pool_destroy(cxl_mock_pool);
unregister_cxl_mock_ops(&cxl_mock_ops);
+ free_decoder_registry();
+ xa_destroy(&decoder_registry);
}
module_param(interleave_arithmetic, int, 0444);
MODULE_PARM_DESC(interleave_arithmetic, "Modulo:0, XOR:1");
module_param(extended_linear_cache, bool, 0444);
MODULE_PARM_DESC(extended_linear_cache, "Enable extended linear cache support");
+module_param(fail_autoassemble, bool, 0444);
+MODULE_PARM_DESC(fail_autoassemble, "Simulate missing member of an auto-region");
module_init(cxl_test_init);
module_exit(cxl_test_exit);
MODULE_LICENSE("GPL v2");
diff --git a/tools/testing/cxl/test/hmem_test.c b/tools/testing/cxl/test/hmem_test.c
new file mode 100644
index 000000000000..3a1a089e1721
--- /dev/null
+++ b/tools/testing/cxl/test/hmem_test.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2026 Intel Corporation */
+#include <linux/moduleparam.h>
+#include <linux/workqueue.h>
+#include "../../../drivers/dax/bus.h"
+
+static bool hmem_test;
+
+static void hmem_test_work(struct work_struct *work)
+{
+}
+
+static void hmem_test_release(struct device *dev)
+{
+ struct hmem_platform_device *hpdev =
+ container_of(dev, typeof(*hpdev), pdev.dev);
+
+ memset(hpdev, 0, sizeof(*hpdev));
+}
+
+static struct hmem_platform_device hmem_test_device = {
+ .pdev = {
+ .name = "hmem_platform",
+ .id = 1,
+ .dev = {
+ .release = hmem_test_release,
+ },
+ },
+ .work = __WORK_INITIALIZER(hmem_test_device.work, hmem_test_work),
+};
+
+int hmem_test_init(void)
+{
+ if (!hmem_test)
+ return 0;
+
+ return platform_device_register(&hmem_test_device.pdev);
+}
+
+void hmem_test_exit(void)
+{
+ if (hmem_test)
+ platform_device_unregister(&hmem_test_device.pdev);
+}
+
+module_param(hmem_test, bool, 0444);
+MODULE_PARM_DESC(hmem_test, "Enable/disable the dax_hmem test platform device");
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index cb87e8c0e63c..271c7ad8cc32 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -1695,6 +1695,9 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
struct cxl_dpa_info range_info = { 0 };
int rc;
+ /* Increase async probe race window */
+ usleep_range(500*1000, 1000*1000);
+
mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
if (!mdata)
return -ENOMEM;
@@ -1716,7 +1719,7 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
if (rc)
return rc;
- mds = cxl_memdev_state_create(dev);
+ mds = cxl_memdev_state_create(dev, pdev->id + 1, 0);
if (IS_ERR(mds))
return PTR_ERR(mds);
@@ -1732,7 +1735,6 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mockmem_sanitize_work);
- cxlds->serial = pdev->id + 1;
if (is_rcd(pdev))
cxlds->rcd = true;
diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c
index b8fcb50c1027..6454b868b122 100644
--- a/tools/testing/cxl/test/mock.c
+++ b/tools/testing/cxl/test/mock.c
@@ -251,6 +251,56 @@ struct cxl_dport *__wrap_devm_cxl_add_dport_by_dev(struct cxl_port *port,
}
EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_add_dport_by_dev, "CXL");
+int __wrap_region_intersects(resource_size_t start, size_t size,
+ unsigned long flags, unsigned long desc)
+{
+ int rc = -1;
+ int index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (ops)
+ rc = ops->region_intersects(start, size, flags, desc);
+ if (rc < 0)
+ rc = region_intersects(start, size, flags, desc);
+ put_cxl_mock_ops(index);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(__wrap_region_intersects);
+
+int __wrap_region_intersects_soft_reserve(resource_size_t start, size_t size)
+{
+ int rc = -1;
+ int index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (ops)
+ rc = ops->region_intersects_soft_reserve(start, size);
+ if (rc < 0)
+ rc = region_intersects_soft_reserve(start, size);
+ put_cxl_mock_ops(index);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(__wrap_region_intersects_soft_reserve);
+
+int __wrap_walk_hmem_resources(struct device *host, walk_hmem_fn fn)
+{
+ int index, rc = 0;
+ bool is_mock = strcmp(dev_name(host), "hmem_platform.1") == 0;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (is_mock) {
+ if (ops)
+ rc = ops->walk_hmem_resources(host, fn);
+ } else {
+ rc = walk_hmem_resources(host, fn);
+ }
+ put_cxl_mock_ops(index);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(__wrap_walk_hmem_resources);
+
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("cxl_test: emulation module");
MODULE_IMPORT_NS("ACPI");
diff --git a/tools/testing/cxl/test/mock.h b/tools/testing/cxl/test/mock.h
index 2684b89c8aa2..4f57dc80ae7d 100644
--- a/tools/testing/cxl/test/mock.h
+++ b/tools/testing/cxl/test/mock.h
@@ -2,6 +2,7 @@
#include <linux/list.h>
#include <linux/acpi.h>
+#include <linux/dax.h>
#include <cxl.h>
struct cxl_mock_ops {
@@ -27,8 +28,15 @@ struct cxl_mock_ops {
int (*hmat_get_extended_linear_cache_size)(struct resource *backing_res,
int nid,
resource_size_t *cache_size);
+ int (*walk_hmem_resources)(struct device *host, walk_hmem_fn fn);
+ int (*region_intersects)(resource_size_t start, size_t size,
+ unsigned long flags, unsigned long desc);
+ int (*region_intersects_soft_reserve)(resource_size_t start,
+ size_t size);
};
+int hmem_test_init(void);
+void hmem_test_exit(void);
void register_cxl_mock_ops(struct cxl_mock_ops *ops);
void unregister_cxl_mock_ops(struct cxl_mock_ops *ops);
struct cxl_mock_ops *get_cxl_mock_ops(int *index);
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 112f9ca2444b..f94ed2e98887 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -1855,7 +1855,7 @@ sub save_logs {
my ($result, $basedir) = @_;
my @t = localtime;
my $date = sprintf "%04d%02d%02d%02d%02d%02d",
- 1900+$t[5],$t[4],$t[3],$t[2],$t[1],$t[0];
+ 1900+$t[5],$t[4]+1,$t[3],$t[2],$t[1],$t[0];
my $type = $build_type;
if ($type =~ /useconfig/) {
@@ -1878,6 +1878,12 @@ sub save_logs {
"testlog" => $testlog,
);
+ if (defined($opt{"LOG_FILE"})) {
+ if (-f $opt{"LOG_FILE"}) {
+ cp $opt{"LOG_FILE"}, "$dir/logfile";
+ }
+ }
+
while (my ($name, $source) = each(%files)) {
if (-f "$source") {
cp "$source", "$dir/$name" or
diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h
index 009b97bbdd22..b6b1d147fd75 100644
--- a/tools/testing/memblock/internal.h
+++ b/tools/testing/memblock/internal.h
@@ -11,9 +11,22 @@ static int memblock_debug = 1;
#define pr_warn_ratelimited(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#define K(x) ((x) << (PAGE_SHIFT-10))
+
bool mirrored_kernelcore = false;
struct page {};
+static inline void *page_address(struct page *page)
+{
+ BUG();
+ return page;
+}
+
+static inline struct page *virt_to_page(void *virt)
+{
+ BUG();
+ return virt;
+}
void memblock_free_pages(unsigned long pfn, unsigned int order)
{
@@ -23,10 +36,34 @@ static inline void accept_memory(phys_addr_t start, unsigned long size)
{
}
-static inline unsigned long free_reserved_area(void *start, void *end,
- int poison, const char *s)
+unsigned long free_reserved_area(void *start, void *end, int poison, const char *s);
+void free_reserved_page(struct page *page);
+
+static inline bool deferred_pages_enabled(void)
{
- return 0;
+ return false;
}
+#define for_each_valid_pfn(pfn, start_pfn, end_pfn) \
+ for ((pfn) = (start_pfn); (pfn) < (end_pfn); (pfn)++)
+
+static inline void *kasan_reset_tag(const void *addr)
+{
+ return (void *)addr;
+}
+
+static inline bool __is_kernel(unsigned long addr)
+{
+ return false;
+}
+
+#define for_each_valid_pfn(pfn, start_pfn, end_pfn) \
+ for ((pfn) = (start_pfn); (pfn) < (end_pfn); (pfn)++)
+
+static inline void init_deferred_page(unsigned long pfn, int nid)
+{
+}
+
+#define __SetPageReserved(p) ((void)(p))
+
#endif
diff --git a/tools/testing/memblock/linux/string_helpers.h b/tools/testing/memblock/linux/string_helpers.h
new file mode 100644
index 000000000000..dbf015cfff31
--- /dev/null
+++ b/tools/testing/memblock/linux/string_helpers.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_STRING_HELPERS_H_
+#define _LINUX_STRING_HELPERS_H_
+
+/*
+ * Header stub to avoid test build breakage; we don't need to
+ * actually implement string_get_size() as it's not used in the tests.
+ */
+
+#endif
diff --git a/tools/testing/memblock/mmzone.c b/tools/testing/memblock/mmzone.c
index d3d58851864e..e719450f81cb 100644
--- a/tools/testing/memblock/mmzone.c
+++ b/tools/testing/memblock/mmzone.c
@@ -11,10 +11,6 @@ struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
return NULL;
}
-void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid)
-{
-}
-
void atomic_long_set(atomic_long_t *v, long i)
{
}
diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
index feedd5ab7058..0607913a3022 100644
--- a/tools/testing/radix-tree/maple.c
+++ b/tools/testing/radix-tree/maple.c
@@ -2,7 +2,7 @@
/*
* maple_tree.c: Userspace testing for maple tree test-suite
* Copyright (c) 2018-2022 Oracle Corporation
- * Author: Liam R. Howlett <Liam.Howlett@Oracle.com>
+ * Author: Liam R. Howlett <liam@infradead.org>
*
* Any tests that require internal knowledge of the tree or threads and other
* difficult to handle in kernel tests.
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 984abb6d42ab..6e59b8f63e41 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -98,6 +98,7 @@ TARGETS += proc
TARGETS += pstore
TARGETS += ptrace
TARGETS += openat2
+TARGETS += rdma
TARGETS += resctrl
TARGETS += riscv
TARGETS += rlimits
@@ -213,14 +214,14 @@ export KHDR_INCLUDES
.DEFAULT_GOAL := all
all:
- @ret=0; \
+ @ret=1; \
for TARGET in $(TARGETS) $(INSTALL_DEP_TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
mkdir $$BUILD_TARGET -p; \
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET \
O=$(abs_objtree) \
$(if $(FORCE_TARGETS),|| exit); \
- [ $$? -eq 0 ] || ret=1; \
+ ret=$$((ret * $$?)); \
done; exit $$ret;
run_tests: all
@@ -278,7 +279,7 @@ ifdef INSTALL_PATH
install -m 744 kselftest/ksft.py $(INSTALL_PATH)/kselftest/
install -m 744 run_kselftest.sh $(INSTALL_PATH)/
rm -f $(TEST_LIST)
- @ret=0; \
+ @ret=1; \
for TARGET in $(TARGETS) $(INSTALL_DEP_TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install \
@@ -287,7 +288,7 @@ ifdef INSTALL_PATH
OBJ_PATH=$(INSTALL_PATH) \
O=$(abs_objtree) \
$(if $(FORCE_TARGETS),|| exit); \
- [ $$? -eq 0 ] || ret=1; \
+ ret=$$((ret * $$?)); \
done; exit $$ret;
diff --git a/tools/testing/selftests/arm64/gcs/gcs-util.h b/tools/testing/selftests/arm64/gcs/gcs-util.h
index c99a6b39ac14..7a81bb07ed4b 100644
--- a/tools/testing/selftests/arm64/gcs/gcs-util.h
+++ b/tools/testing/selftests/arm64/gcs/gcs-util.h
@@ -18,12 +18,6 @@
#ifndef NT_ARM_GCS
#define NT_ARM_GCS 0x410
-
-struct user_gcs {
- __u64 features_enabled;
- __u64 features_locked;
- __u64 gcspr_el0;
-};
#endif
/* Shadow Stack/Guarded Control Stack interface */
diff --git a/tools/testing/selftests/arm64/gcs/libc-gcs.c b/tools/testing/selftests/arm64/gcs/libc-gcs.c
index 17b2fabfec38..72e82bfbecc9 100644
--- a/tools/testing/selftests/arm64/gcs/libc-gcs.c
+++ b/tools/testing/selftests/arm64/gcs/libc-gcs.c
@@ -16,6 +16,7 @@
#include <asm/hwcap.h>
#include <asm/mman.h>
+#include <asm/ptrace.h>
#include <linux/compiler.h>
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 78e60040811e..6ef6872adbc3 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -751,6 +751,7 @@ TRUNNER_EXTRA_SOURCES := test_progs.c \
btf_helpers.c \
cap_helpers.c \
unpriv_helpers.c \
+ sysctl_helpers.c \
netlink_helpers.c \
jit_disasm_helpers.c \
io_helpers.c \
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
index f829b6f09bc9..fe30181e6336 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
@@ -112,6 +112,10 @@ static void test_cubic(void)
ASSERT_EQ(cubic_skel->bss->bpf_cubic_acked_called, 1, "pkts_acked called");
+ ASSERT_TRUE(cubic_skel->bss->nodelay_init_reject, "init reject nodelay option");
+ ASSERT_TRUE(cubic_skel->bss->nodelay_cwnd_event_tx_start_reject,
+ "cwnd_event_tx_start reject nodelay option");
+
bpf_link__destroy(link);
bpf_cubic__destroy(cubic_skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
index 653b0a20fab9..c62907732c19 100644
--- a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
+++ b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
@@ -7,24 +7,29 @@
* 3. call listen() for 1 server socket. (migration target)
* 4. update a map to migrate all child sockets
* to the last server socket (migrate_map[cookie] = 4)
- * 5. call shutdown() for first 4 server sockets
+ * 5. for TCP_ESTABLISHED and TCP_SYN_RECV cases, verify via epoll
+ * that the last server socket is not ready before migration.
+ * 6. call shutdown() for first 4 server sockets
* and migrate the requests in the accept queue
* to the last server socket.
- * 6. call listen() for the second server socket.
- * 7. call shutdown() for the last server
+ * 7. for TCP_ESTABLISHED and TCP_SYN_RECV cases, verify via epoll
+ * that the last server socket is ready after migration.
+ * 8. call listen() for the second server socket.
+ * 9. call shutdown() for the last server
* and migrate the requests in the accept queue
* to the second server socket.
- * 8. call listen() for the last server.
- * 9. call shutdown() for the second server
+ * 10. call listen() for the last server.
+ * 11. call shutdown() for the second server
* and migrate the requests in the accept queue
* to the last server socket.
- * 10. call accept() for the last server socket.
+ * 12. call accept() for the last server socket.
*
* Author: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
*/
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
+#include <sys/epoll.h>
#include "test_progs.h"
#include "test_migrate_reuseport.skel.h"
@@ -350,21 +355,51 @@ static int update_maps(struct migrate_reuseport_test_case *test_case,
static int migrate_dance(struct migrate_reuseport_test_case *test_case)
{
+ struct epoll_event ev = {
+ .events = EPOLLIN,
+ };
+ int epoll = -1, nfds;
int i, err;
+ if (test_case->state != BPF_TCP_NEW_SYN_RECV) {
+ epoll = epoll_create1(0);
+ if (!ASSERT_NEQ(epoll, -1, "epoll_create1"))
+ return -1;
+
+ ev.data.fd = test_case->servers[MIGRATED_TO];
+ if (!ASSERT_OK(epoll_ctl(epoll, EPOLL_CTL_ADD,
+ test_case->servers[MIGRATED_TO], &ev),
+ "epoll_ctl"))
+ goto close_epoll;
+
+ nfds = epoll_wait(epoll, &ev, 1, 0);
+ if (!ASSERT_EQ(nfds, 0, "epoll_wait 1"))
+ goto close_epoll;
+ }
+
/* Migrate TCP_ESTABLISHED and TCP_SYN_RECV requests
* to the last listener based on eBPF.
*/
for (i = 0; i < MIGRATED_TO; i++) {
err = shutdown(test_case->servers[i], SHUT_RDWR);
if (!ASSERT_OK(err, "shutdown"))
- return -1;
+ goto close_epoll;
}
/* No dance for TCP_NEW_SYN_RECV to migrate based on eBPF */
if (test_case->state == BPF_TCP_NEW_SYN_RECV)
return 0;
+ nfds = epoll_wait(epoll, &ev, 1, 0);
+ if (!ASSERT_EQ(nfds, 1, "epoll_wait 2")) {
+close_epoll:
+ if (epoll >= 0)
+ close(epoll);
+ return -1;
+ }
+
+ close(epoll);
+
/* Note that we use the second listener instead of the
* first one here.
*
diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf.c b/tools/testing/selftests/bpf/prog_tests/snprintf.c
index 594441acb707..4e4a82d54f79 100644
--- a/tools/testing/selftests/bpf/prog_tests/snprintf.c
+++ b/tools/testing/selftests/bpf/prog_tests/snprintf.c
@@ -114,7 +114,8 @@ static void test_snprintf_negative(void)
ASSERT_ERR(load_single_snprintf("%--------"), "invalid specifier 5");
ASSERT_ERR(load_single_snprintf("%lc"), "invalid specifier 6");
ASSERT_ERR(load_single_snprintf("%llc"), "invalid specifier 7");
- ASSERT_ERR(load_single_snprintf("\x80"), "non ascii character");
+ ASSERT_OK(load_single_snprintf("\x80"), "non ascii plain text");
+ ASSERT_ERR(load_single_snprintf("%\x80"), "non ascii in specifier");
ASSERT_ERR(load_single_snprintf("\x1"), "non printable character");
ASSERT_ERR(load_single_snprintf("%p%"), "invalid specifier 8");
ASSERT_ERR(load_single_snprintf("%s%"), "invalid specifier 9");
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
index 53637431ec5d..3a41c517b918 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
@@ -190,7 +190,7 @@ static int getsetsockopt(void)
fd = socket(AF_NETLINK, SOCK_RAW, 0);
if (fd < 0) {
log_err("Failed to create AF_NETLINK socket");
- return -1;
+ goto err;
}
buf.u32 = 1;
@@ -211,6 +211,21 @@ static int getsetsockopt(void)
}
ASSERT_EQ(optlen, 8, "Unexpected NETLINK_LIST_MEMBERSHIPS value");
+ /* Trick bpf_tcp_sock() with IPPROTO_TCP */
+ close(fd);
+ fd = socket(AF_INET, SOCK_RAW, IPPROTO_TCP);
+ if (!ASSERT_OK_FD(fd, "socket"))
+ goto err;
+
+ /* The BPF prog intercepts this before the kernel sees it, any
+ * optlen works. Go with 4 bytes for simplicity.
+ */
+ buf.u32 = 1;
+ optlen = sizeof(buf.u32);
+ err = setsockopt(fd, SOL_TCP, TCP_SAVED_SYN, &buf, optlen);
+ if (!ASSERT_ERR(err, "setsockopt(TCP_SAVED_SYN)"))
+ goto err;
+
free(big_buf);
close(fd);
return 0;
diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_data.h b/tools/testing/selftests/bpf/prog_tests/task_local_data.h
index 1e5c67c78ffb..8ae4fb2027f7 100644
--- a/tools/testing/selftests/bpf/prog_tests/task_local_data.h
+++ b/tools/testing/selftests/bpf/prog_tests/task_local_data.h
@@ -99,14 +99,20 @@ struct tld_meta_u {
struct tld_metadata metadata[];
};
+/*
+ * The unused field ensures map_val.start > 0. On the BPF side, __tld_fetch_key()
+ * calculates off by summing map_val.start and tld_key_t.off and treats off == 0
+ * as key not cached.
+ */
struct tld_data_u {
- __u64 start; /* offset of tld_data_u->data in a page */
+ __u64 unused;
char data[] __attribute__((aligned(8)));
};
struct tld_map_value {
void *data;
struct tld_meta_u *meta;
+ __u16 start; /* offset of tld_data_u->data in a page */
};
struct tld_meta_u * _Atomic tld_meta_p __attribute__((weak));
@@ -182,7 +188,7 @@ static int __tld_init_data_p(int map_fd)
* is a page in BTF.
*/
map_val.data = (void *)(TLD_PAGE_MASK & (intptr_t)data);
- data->start = (~TLD_PAGE_MASK & (intptr_t)data) + sizeof(struct tld_data_u);
+ map_val.start = (~TLD_PAGE_MASK & (intptr_t)data) + sizeof(struct tld_data_u);
map_val.meta = tld_meta_p;
err = bpf_map_update_elem(map_fd, &tid_fd, &map_val, 0);
@@ -241,7 +247,8 @@ retry:
* TLD_DYN_DATA_SIZE is allocated for tld_create_key()
*/
if (dyn_data) {
- if (off + TLD_ROUND_UP(size, 8) > tld_meta_p->size)
+ if (off + TLD_ROUND_UP(size, 8) > tld_meta_p->size ||
+ tld_meta_p->size > TLD_PAGE_SIZE - sizeof(struct tld_data_u))
return (tld_key_t){-E2BIG};
} else {
if (off + TLD_ROUND_UP(size, 8) > TLD_PAGE_SIZE - sizeof(struct tld_data_u))
diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
index 56685fc03c7e..80e6315da2a5 100644
--- a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
+++ b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
@@ -507,6 +507,10 @@ static void misc(void)
ASSERT_EQ(misc_skel->bss->nr_hwtstamp, 0, "nr_hwtstamp");
+ ASSERT_TRUE(misc_skel->bss->nodelay_est_ok, "nodelay_est_ok");
+ ASSERT_TRUE(misc_skel->bss->nodelay_hdr_len_reject, "nodelay_hdr_len_reject");
+ ASSERT_TRUE(misc_skel->bss->nodelay_write_hdr_reject, "nodelay_write_hdr_reject");
+
check_linum:
ASSERT_FALSE(check_error_linum(&sk_fds), "check_error_linum");
sk_fds_close(&sk_fds);
diff --git a/tools/testing/selftests/bpf/prog_tests/test_task_local_data.c b/tools/testing/selftests/bpf/prog_tests/test_task_local_data.c
index e219ff506b56..6a5806b36113 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_task_local_data.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_task_local_data.c
@@ -3,8 +3,14 @@
#include <bpf/btf.h>
#include <test_progs.h>
+/*
+ * Only a page is pinned to kernel, so the maximum amount of dynamic data
+ * allowed is page_size - sizeof(struct tld_data_u) - static TLD fields.
+ */
+#define TLD_DYN_DATA_SIZE_MAX (getpagesize() - sizeof(struct tld_data_u) - 8)
+
#define TLD_FREE_DATA_ON_THREAD_EXIT
-#define TLD_DYN_DATA_SIZE (getpagesize() - 8)
+#define TLD_DYN_DATA_SIZE TLD_DYN_DATA_SIZE_MAX
#include "task_local_data.h"
struct test_tld_struct {
@@ -24,12 +30,12 @@ TLD_DEFINE_KEY(value0_key, "value0", sizeof(int));
* sequentially. Users of task local data library should not touch
* library internal.
*/
-static void reset_tld(void)
+static void reset_tld(__u16 dyn_data_size)
{
if (tld_meta_p) {
/* Remove TLDs created by tld_create_key() */
tld_meta_p->cnt = 1;
- tld_meta_p->size = TLD_DYN_DATA_SIZE;
+ tld_meta_p->size = dyn_data_size + 8;
memset(&tld_meta_p->metadata[1], 0,
(TLD_MAX_DATA_CNT - 1) * sizeof(struct tld_metadata));
}
@@ -127,7 +133,7 @@ static void test_task_local_data_basic(void)
tld_key_t key;
int i, err;
- reset_tld();
+ reset_tld(TLD_DYN_DATA_SIZE_MAX);
ASSERT_OK(pthread_mutex_init(&global_mutex, NULL), "pthread_mutex_init");
@@ -147,11 +153,13 @@ static void test_task_local_data_basic(void)
/*
* Shouldn't be able to store data exceed a page. Create a TLD just big
- * enough to exceed a page. TLDs already created are int value0, int
- * value1, and struct test_tld_struct value2.
+ * enough to exceed a page. Data already contains struct tld_data_u,
+ * value0 and value1 of int type, and value 2 of struct test_tld_struct.
*/
- key = tld_create_key("value_not_exist",
- TLD_PAGE_SIZE - 2 * sizeof(int) - sizeof(struct test_tld_struct) + 1);
+ key = tld_create_key("value_not_exist", TLD_PAGE_SIZE + 1 -
+ sizeof(struct tld_data_u) -
+ TLD_ROUND_UP(sizeof(int), 8) * 2 -
+ TLD_ROUND_UP(sizeof(struct test_tld_struct), 8));
ASSERT_EQ(tld_key_err_or_zero(key), -E2BIG, "tld_create_key");
key = tld_create_key("value2", sizeof(struct test_tld_struct));
@@ -239,7 +247,7 @@ static void test_task_local_data_race(void)
tld_keys[0] = value0_key;
for (j = 0; j < 100; j++) {
- reset_tld();
+ reset_tld(TLD_DYN_DATA_SIZE_MAX);
for (i = 0; i < TEST_RACE_THREAD_NUM; i++) {
/*
@@ -288,10 +296,80 @@ out:
test_task_local_data__destroy(skel);
}
+static void test_task_local_data_dyn_size(__u16 dyn_data_size)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ struct test_task_local_data *skel;
+ int max_keys, i, err, fd, *data;
+ char name[TLD_NAME_LEN];
+ tld_key_t key;
+
+ reset_tld(dyn_data_size);
+
+ skel = test_task_local_data__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ tld_keys = calloc(TLD_MAX_DATA_CNT, sizeof(tld_key_t));
+ if (!ASSERT_OK_PTR(tld_keys, "calloc tld_keys"))
+ goto out;
+
+ fd = bpf_map__fd(skel->maps.tld_data_map);
+
+ /* Create as many int-sized TLDs as the dynamic data size allows */
+ max_keys = dyn_data_size / TLD_ROUND_UP(sizeof(int), 8);
+ for (i = 0; i < max_keys; i++) {
+ snprintf(name, TLD_NAME_LEN, "value_%d", i);
+ tld_keys[i] = tld_create_key(name, sizeof(int));
+ if (!ASSERT_FALSE(tld_key_is_err(tld_keys[i]), "tld_create_key"))
+ goto out;
+
+ data = tld_get_data(fd, tld_keys[i]);
+ if (!ASSERT_OK_PTR(data, "tld_get_data"))
+ goto out;
+ *data = i;
+ }
+
+ /* The next key should fail with E2BIG */
+ key = tld_create_key("overflow", sizeof(int));
+ ASSERT_EQ(tld_key_err_or_zero(key), -E2BIG, "tld_create_key overflow");
+
+ /* Verify data for value_i do not overlap */
+ for (i = 0; i < max_keys; i++) {
+ data = tld_get_data(fd, tld_keys[i]);
+ if (!ASSERT_OK_PTR(data, "tld_get_data"))
+ goto out;
+
+ ASSERT_EQ(*data, i, "tld_get_data value_i");
+ }
+
+ /* Verify BPF side can still read the static key */
+ data = tld_get_data(fd, value0_key);
+ if (!ASSERT_OK_PTR(data, "tld_get_data value0"))
+ goto out;
+ *data = 0xdeadbeef;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.task_main), &opts);
+ ASSERT_OK(err, "run task_main");
+ ASSERT_EQ(skel->bss->test_value0, 0xdeadbeef, "tld_get_data value0");
+
+out:
+ if (tld_keys) {
+ free(tld_keys);
+ tld_keys = NULL;
+ }
+ tld_free();
+ test_task_local_data__destroy(skel);
+}
+
void test_task_local_data(void)
{
if (test__start_subtest("task_local_data_basic"))
test_task_local_data_basic();
if (test__start_subtest("task_local_data_race"))
test_task_local_data_race();
+ if (test__start_subtest("task_local_data_dyn_size_small"))
+ test_task_local_data_dyn_size(64);
+ if (test__start_subtest("task_local_data_dyn_size_zero"))
+ test_task_local_data_dyn_size(0);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/token.c b/tools/testing/selftests/bpf/prog_tests/token.c
index b81dde283052..f2f5d36ae00a 100644
--- a/tools/testing/selftests/bpf/prog_tests/token.c
+++ b/tools/testing/selftests/bpf/prog_tests/token.c
@@ -1,9 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#define _GNU_SOURCE
-#include <test_progs.h>
#include <bpf/btf.h>
-#include "cap_helpers.h"
#include <fcntl.h>
#include <sched.h>
#include <signal.h>
@@ -15,9 +13,17 @@
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/un.h>
+
+#include "bpf_util.h"
+#include "cap_helpers.h"
+#include "sysctl_helpers.h"
+#include "test_progs.h"
+#include "trace_helpers.h"
+
#include "priv_map.skel.h"
#include "priv_prog.skel.h"
#include "dummy_st_ops_success.skel.h"
+#include "token_kallsyms.skel.h"
#include "token_lsm.skel.h"
#include "priv_freplace_prog.skel.h"
@@ -1045,6 +1051,58 @@ err_out:
return -EINVAL;
}
+static bool kallsyms_has_bpf_func(struct ksyms *ksyms, const char *func_name)
+{
+ char name[256];
+ int i;
+
+ for (i = 0; i < ksyms->sym_cnt; i++) {
+ if (sscanf(ksyms->syms[i].name, "bpf_prog_%*[^_]_%255s", name) == 1 &&
+ strcmp(name, func_name) == 0)
+ return true;
+ }
+ return false;
+}
+
+static int userns_obj_priv_prog_kallsyms(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ const char *func_names[] = { "xdp_main", "token_ksym_subprog" };
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ struct token_kallsyms *skel;
+ struct ksyms *ksyms = NULL;
+ char buf[256];
+ int i, err;
+
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+ opts.bpf_token_path = buf;
+ skel = token_kallsyms__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "token_kallsyms__open_opts"))
+ return -EINVAL;
+
+ err = token_kallsyms__load(skel);
+ if (!ASSERT_OK(err, "token_kallsyms__load"))
+ goto cleanup;
+
+ ksyms = load_kallsyms_local();
+ if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_local")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(func_names); i++) {
+ if (!ASSERT_TRUE(kallsyms_has_bpf_func(ksyms, func_names[i]),
+ func_names[i])) {
+ err = -EINVAL;
+ break;
+ }
+ }
+
+cleanup:
+ free_kallsyms_local(ksyms);
+ token_kallsyms__destroy(skel);
+ return err;
+}
+
#define bit(n) (1ULL << (n))
static int userns_bpf_token_info(int mnt_fd, struct token_lsm *lsm_skel)
@@ -1082,7 +1140,7 @@ cleanup:
return err;
}
-void test_token(void)
+void serial_test_token(void)
{
if (test__start_subtest("map_token")) {
struct bpffs_opts opts = {
@@ -1194,4 +1252,26 @@ void test_token(void)
subtest_userns(&opts, userns_bpf_token_info);
}
+ if (test__start_subtest("obj_priv_prog_kallsyms")) {
+ char perf_paranoid_orig[32] = {};
+ char kptr_restrict_orig[32] = {};
+ struct bpffs_opts opts = {
+ .cmds = bit(BPF_BTF_LOAD) | bit(BPF_PROG_LOAD),
+ .progs = bit(BPF_PROG_TYPE_XDP),
+ .attachs = ~0ULL,
+ };
+
+ if (sysctl_set_or_fail("/proc/sys/kernel/perf_event_paranoid", perf_paranoid_orig, "0"))
+ goto cleanup;
+ if (sysctl_set_or_fail("/proc/sys/kernel/kptr_restrict", kptr_restrict_orig, "0"))
+ goto cleanup;
+
+ subtest_userns(&opts, userns_obj_priv_prog_kallsyms);
+
+cleanup:
+ if (perf_paranoid_orig[0])
+ sysctl_set_or_fail("/proc/sys/kernel/perf_event_paranoid", NULL, perf_paranoid_orig);
+ if (kptr_restrict_orig[0])
+ sysctl_set_or_fail("/proc/sys/kernel/kptr_restrict", NULL, kptr_restrict_orig);
+ }
}
diff --git a/tools/testing/selftests/bpf/prog_tests/trace_printk.c b/tools/testing/selftests/bpf/prog_tests/trace_printk.c
index e56e88596d64..a5a8104c1ddd 100644
--- a/tools/testing/selftests/bpf/prog_tests/trace_printk.c
+++ b/tools/testing/selftests/bpf/prog_tests/trace_printk.c
@@ -6,18 +6,21 @@
#include "trace_printk.lskel.h"
#define SEARCHMSG "testing,testing"
+#define SEARCHMSG_UTF8 "中文,测试"
static void trace_pipe_cb(const char *str, void *data)
{
if (strstr(str, SEARCHMSG) != NULL)
- (*(int *)data)++;
+ ((int *)data)[0]++;
+ if (strstr(str, SEARCHMSG_UTF8))
+ ((int *)data)[1]++;
}
void serial_test_trace_printk(void)
{
struct trace_printk_lskel__bss *bss;
struct trace_printk_lskel *skel;
- int err = 0, found = 0;
+ int err = 0, found[2] = {};
skel = trace_printk_lskel__open();
if (!ASSERT_OK_PTR(skel, "trace_printk__open"))
@@ -46,11 +49,24 @@ void serial_test_trace_printk(void)
if (!ASSERT_GT(bss->trace_printk_ret, 0, "bss->trace_printk_ret"))
goto cleanup;
- /* verify our search string is in the trace buffer */
- ASSERT_OK(read_trace_pipe_iter(trace_pipe_cb, &found, 1000),
- "read_trace_pipe_iter");
+ if (!ASSERT_GT(bss->trace_printk_utf8_ran, 0, "bss->trace_printk_utf8_ran"))
+ goto cleanup;
+
+ if (!ASSERT_GT(bss->trace_printk_utf8_ret, 0, "bss->trace_printk_utf8_ret"))
+ goto cleanup;
+
+ if (!ASSERT_LT(bss->trace_printk_invalid_spec_ret, 0,
+ "bss->trace_printk_invalid_spec_ret"))
+ goto cleanup;
+
+ /* verify our search strings are in the trace buffer */
+ ASSERT_OK(read_trace_pipe_iter(trace_pipe_cb, found, 1000),
+ "read_trace_pipe_iter");
+
+ if (!ASSERT_EQ(found[0], bss->trace_printk_ran, "found"))
+ goto cleanup;
- if (!ASSERT_EQ(found, bss->trace_printk_ran, "found"))
+ if (!ASSERT_EQ(found[1], bss->trace_printk_utf8_ran, "found_utf8"))
goto cleanup;
cleanup:
diff --git a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c
index 472f4f9fa95f..64404602b9ab 100644
--- a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c
+++ b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c
@@ -8,6 +8,7 @@
#include "cap_helpers.h"
#include "bpf_util.h"
+#include "sysctl_helpers.h"
/* Using CAP_LAST_CAP is risky here, since it can get pulled in from
* an old /usr/include/linux/capability.h and be < CAP_BPF; as a result
@@ -36,26 +37,6 @@ static void process_perfbuf(void *ctx, int cpu, void *data, __u32 len)
got_perfbuf_val = *(__u32 *)data;
}
-static int sysctl_set(const char *sysctl_path, char *old_val, const char *new_val)
-{
- int ret = 0;
- FILE *fp;
-
- fp = fopen(sysctl_path, "r+");
- if (!fp)
- return -errno;
- if (old_val && fscanf(fp, "%s", old_val) <= 0) {
- ret = -ENOENT;
- } else if (!old_val || strcmp(old_val, new_val) != 0) {
- fseek(fp, 0, SEEK_SET);
- if (fprintf(fp, "%s", new_val) < 0)
- ret = -errno;
- }
- fclose(fp);
-
- return ret;
-}
-
static void test_unpriv_bpf_disabled_positive(struct test_unpriv_bpf_disabled *skel,
__u32 prog_id, int prog_fd, int perf_fd,
char **map_paths, int *map_fds)
diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c
index ce18a4db813f..ebd5a1e69f56 100644
--- a/tools/testing/selftests/bpf/progs/bpf_cubic.c
+++ b/tools/testing/selftests/bpf/progs/bpf_cubic.c
@@ -16,6 +16,7 @@
#include "bpf_tracing_net.h"
#include <bpf/bpf_tracing.h>
+#include <errno.h>
char _license[] SEC("license") = "GPL";
@@ -170,10 +171,18 @@ static void bictcp_hystart_reset(struct sock *sk)
ca->sample_cnt = 0;
}
+bool nodelay_init_reject = false;
+bool nodelay_cwnd_event_tx_start_reject = false;
+
SEC("struct_ops")
void BPF_PROG(bpf_cubic_init, struct sock *sk)
{
struct bpf_bictcp *ca = inet_csk_ca(sk);
+ int true_val = 1, ret;
+
+ ret = bpf_setsockopt(sk, SOL_TCP, TCP_NODELAY, &true_val, sizeof(true_val));
+ if (ret == -EOPNOTSUPP)
+ nodelay_init_reject = true;
bictcp_reset(ca);
@@ -189,8 +198,13 @@ void BPF_PROG(bpf_cubic_cwnd_event_tx_start, struct sock *sk)
{
struct bpf_bictcp *ca = inet_csk_ca(sk);
__u32 now = tcp_jiffies32;
+ int true_val = 1, ret;
__s32 delta;
+ ret = bpf_setsockopt(sk, SOL_TCP, TCP_NODELAY, &true_val, sizeof(true_val));
+ if (ret == -EOPNOTSUPP)
+ nodelay_cwnd_event_tx_start_reject = true;
+
delta = now - tcp_sk(sk)->lsndtime;
/* We were application limited (idle) for a while.
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_unix.c b/tools/testing/selftests/bpf/progs/bpf_iter_unix.c
index fea275df9e22..a2652c8c3616 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_unix.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_unix.c
@@ -7,6 +7,13 @@
char _license[] SEC("license") = "GPL";
+SEC(".maps") struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} sockmap;
+
static long sock_i_ino(const struct sock *sk)
{
const struct socket *sk_socket = sk->sk_socket;
@@ -76,5 +83,8 @@ int dump_unix(struct bpf_iter__unix *ctx)
BPF_SEQ_PRINTF(seq, "\n");
+ /* Test for deadlock. */
+ bpf_map_update_elem(&sockmap, &(int){0}, sk, 0);
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
index dcd78a3a9052..a0d7b15a24b1 100644
--- a/tools/testing/selftests/bpf/progs/bpf_misc.h
+++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
@@ -263,8 +263,8 @@
#if __clang_major__ >= 18 && defined(ENABLE_ATOMICS_TESTS) && \
(defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
- (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) || \
- (defined(__TARGET_ARCH_powerpc))
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_powerpc) || defined(__TARGET_ARCH_loongarch))
#define CAN_USE_LOAD_ACQ_STORE_REL
#endif
diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
index 6443b320c732..ee053b24e6ca 100644
--- a/tools/testing/selftests/bpf/progs/map_kptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
@@ -385,4 +385,19 @@ int kptr_xchg_possibly_null(struct __sk_buff *ctx)
return 0;
}
+SEC("?tc")
+__failure __msg("invalid kptr access, R")
+int reject_scalar_store_to_kptr(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ *(volatile u64 *)&v->unref_ptr = 0xBADC0DE;
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/sockopt_sk.c b/tools/testing/selftests/bpf/progs/sockopt_sk.c
index cb990a7d3d45..5e0b27e7855c 100644
--- a/tools/testing/selftests/bpf/progs/sockopt_sk.c
+++ b/tools/testing/selftests/bpf/progs/sockopt_sk.c
@@ -149,6 +149,20 @@ int _setsockopt(struct bpf_sockopt *ctx)
if (sk && sk->family == AF_NETLINK)
goto out;
+ if (sk && sk->family == AF_INET && sk->type == SOCK_RAW) {
+ struct bpf_tcp_sock *tp = bpf_tcp_sock(sk);
+
+ if (tp) {
+ char saved_syn[60];
+
+ bpf_getsockopt(sk, SOL_TCP, TCP_SAVED_SYN,
+ &saved_syn, sizeof(saved_syn));
+ goto consumed;
+ }
+
+ goto out;
+ }
+
/* Make sure bpf_get_netns_cookie is callable.
*/
if (bpf_get_netns_cookie(NULL) == 0)
@@ -224,6 +238,8 @@ int _setsockopt(struct bpf_sockopt *ctx)
return 0; /* couldn't get sk storage */
storage->val = optval[0];
+
+consumed:
ctx->optlen = -1; /* BPF has consumed this option, don't call kernel
* setsockopt handler.
*/
diff --git a/tools/testing/selftests/bpf/progs/task_local_data.bpf.h b/tools/testing/selftests/bpf/progs/task_local_data.bpf.h
index 1f396711f487..0df8a12fd61e 100644
--- a/tools/testing/selftests/bpf/progs/task_local_data.bpf.h
+++ b/tools/testing/selftests/bpf/progs/task_local_data.bpf.h
@@ -86,13 +86,14 @@ struct tld_meta_u {
};
struct tld_data_u {
- __u64 start; /* offset of tld_data_u->data in a page */
+ __u64 unused;
char data[__PAGE_SIZE - sizeof(__u64)] __attribute__((aligned(8)));
};
struct tld_map_value {
struct tld_data_u __uptr *data;
struct tld_meta_u __uptr *meta;
+ __u16 start; /* offset of tld_data_u->data in a page */
};
typedef struct tld_uptr_dummy {
@@ -176,7 +177,7 @@ static int __tld_fetch_key(struct tld_object *tld_obj, const char *name, int i_s
if (!tld_obj->data_map || !tld_obj->data_map->data || !tld_obj->data_map->meta)
return 0;
- start = tld_obj->data_map->data->start;
+ start = tld_obj->data_map->start;
cnt = tld_obj->data_map->meta->cnt;
metadata = tld_obj->data_map->meta->metadata;
diff --git a/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c b/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
index d487153a839d..ed5a0011b863 100644
--- a/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
+++ b/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
@@ -29,6 +29,10 @@ unsigned int nr_syn = 0;
unsigned int nr_fin = 0;
unsigned int nr_hwtstamp = 0;
+bool nodelay_est_ok = false;
+bool nodelay_hdr_len_reject = false;
+bool nodelay_write_hdr_reject = false;
+
/* Check the header received from the active side */
static int __check_active_hdr_in(struct bpf_sock_ops *skops, bool check_syn)
{
@@ -300,7 +304,7 @@ static int handle_passive_estab(struct bpf_sock_ops *skops)
SEC("sockops")
int misc_estab(struct bpf_sock_ops *skops)
{
- int true_val = 1;
+ int true_val = 1, false_val = 0, ret;
switch (skops->op) {
case BPF_SOCK_OPS_TCP_LISTEN_CB:
@@ -316,10 +320,19 @@ int misc_estab(struct bpf_sock_ops *skops)
case BPF_SOCK_OPS_PARSE_HDR_OPT_CB:
return handle_parse_hdr(skops);
case BPF_SOCK_OPS_HDR_OPT_LEN_CB:
+ ret = bpf_setsockopt(skops, SOL_TCP, TCP_NODELAY, &true_val, sizeof(true_val));
+ if (ret == -EOPNOTSUPP)
+ nodelay_hdr_len_reject = true;
return handle_hdr_opt_len(skops);
case BPF_SOCK_OPS_WRITE_HDR_OPT_CB:
+ ret = bpf_setsockopt(skops, SOL_TCP, TCP_NODELAY, &true_val, sizeof(true_val));
+ if (ret == -EOPNOTSUPP)
+ nodelay_write_hdr_reject = true;
return handle_write_hdr_opt(skops);
case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
+ ret = bpf_setsockopt(skops, SOL_TCP, TCP_NODELAY, &false_val, sizeof(false_val));
+ if (!ret)
+ nodelay_est_ok = true;
return handle_passive_estab(skops);
}
diff --git a/tools/testing/selftests/bpf/progs/timer_start_deadlock.c b/tools/testing/selftests/bpf/progs/timer_start_deadlock.c
index 019518ee18cd..afabd15bdac4 100644
--- a/tools/testing/selftests/bpf/progs/timer_start_deadlock.c
+++ b/tools/testing/selftests/bpf/progs/timer_start_deadlock.c
@@ -27,13 +27,13 @@ static int timer_cb(void *map, int *key, struct elem *value)
return 0;
}
-SEC("tp_btf/hrtimer_cancel")
-int BPF_PROG(tp_hrtimer_cancel, struct hrtimer *hrtimer)
+SEC("tp_btf/hrtimer_start")
+int BPF_PROG(tp_hrtimer_start, struct hrtimer *hrtimer, enum hrtimer_mode mode, bool was_armed)
{
struct bpf_timer *timer;
int key = 0;
- if (!in_timer_start)
+ if (!in_timer_start || !was_armed)
return 0;
tp_called = 1;
@@ -60,7 +60,7 @@ int start_timer(void *ctx)
/*
* call hrtimer_start() twice, so that 2nd call does
- * remove_hrtimer() and trace_hrtimer_cancel() tracepoint.
+ * trace_hrtimer_start(was_armed=1) tracepoint.
*/
in_timer_start = 1;
bpf_timer_start(timer, 1000000000, 0);
diff --git a/tools/testing/selftests/bpf/progs/token_kallsyms.c b/tools/testing/selftests/bpf/progs/token_kallsyms.c
new file mode 100644
index 000000000000..c9f9344f3eb2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/token_kallsyms.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+__weak
+int token_ksym_subprog(void)
+{
+ return 0;
+}
+
+SEC("xdp")
+int xdp_main(struct xdp_md *xdp)
+{
+ return token_ksym_subprog();
+}
diff --git a/tools/testing/selftests/bpf/progs/trace_printk.c b/tools/testing/selftests/bpf/progs/trace_printk.c
index 6695478c2b25..f4c538ec3ebd 100644
--- a/tools/testing/selftests/bpf/progs/trace_printk.c
+++ b/tools/testing/selftests/bpf/progs/trace_printk.c
@@ -10,13 +10,23 @@ char _license[] SEC("license") = "GPL";
int trace_printk_ret = 0;
int trace_printk_ran = 0;
+int trace_printk_invalid_spec_ret = 0;
+int trace_printk_utf8_ret = 0;
+int trace_printk_utf8_ran = 0;
const char fmt[] = "Testing,testing %d\n";
+static const char utf8_fmt[] = "中文,测试 %d\n";
+/* Non-ASCII bytes after '%' must still be rejected. */
+static const char invalid_spec_fmt[] = "%\x80\n";
SEC("fentry/" SYS_PREFIX "sys_nanosleep")
int sys_enter(void *ctx)
{
trace_printk_ret = bpf_trace_printk(fmt, sizeof(fmt),
++trace_printk_ran);
+ trace_printk_utf8_ret = bpf_trace_printk(utf8_fmt, sizeof(utf8_fmt),
+ ++trace_printk_utf8_ran);
+ trace_printk_invalid_spec_ret = bpf_trace_printk(invalid_spec_fmt,
+ sizeof(invalid_spec_fmt));
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/verifier_live_stack.c b/tools/testing/selftests/bpf/progs/verifier_live_stack.c
index b7a9fa10e84d..401152b2b64f 100644
--- a/tools/testing/selftests/bpf/progs/verifier_live_stack.c
+++ b/tools/testing/selftests/bpf/progs/verifier_live_stack.c
@@ -2647,3 +2647,196 @@ __naked void spill_join_with_imprecise_off(void)
"exit;"
::: __clobber_all);
}
+
+/*
+ * Same as spill_join_with_multi_off but the write is BPF_ST (store
+ * immediate) instead of BPF_STX. BPF_ST goes through
+ * clear_stack_for_all_offs() rather than spill_to_stack(), and that
+ * path also needs to join instead of overwriting.
+ *
+ * fp-8 = &fp-24
+ * fp-16 = &fp-32
+ * r1 = fp-8 or fp-16 (two offsets from branch)
+ * *(u64 *)(r1 + 0) = 0 -- BPF_ST with immediate
+ * r0 = *(u64 *)(r10 - 16) -- fill from fp-16
+ * r0 = *(u64 *)(r0 + 0) -- deref: should produce use
+ */
+SEC("socket")
+__log_level(2)
+__failure
+__msg("15: (7a) *(u64 *)(r1 +0) = 0 fp-8: fp0-24 -> fp0-24|fp0+0 fp-16: fp0-32 -> fp0-32|fp0+0")
+__msg("17: (79) r0 = *(u64 *)(r0 +0) ; use: fp0-32")
+__naked void st_imm_join_with_multi_off(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 24) = 0;"
+ "*(u64 *)(r10 - 32) = 0;"
+ "r1 = r10;"
+ "r1 += -24;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r1 = r10;"
+ "r1 += -32;"
+ "*(u64 *)(r10 - 16) = r1;"
+ /* create r1 with two candidate offsets: fp-8 or fp-16 */
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == 0 goto 1f;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "goto 2f;"
+"1:"
+ "r1 = r10;"
+ "r1 += -16;"
+"2:"
+ /* BPF_ST: store immediate through multi-offset r1 */
+ "*(u64 *)(r1 + 0) = 0;"
+ /* read back fp-16 and deref */
+ "r0 = *(u64 *)(r10 - 16);"
+ "r0 = *(u64 *)(r0 + 0);"
+ "r0 = 0;"
+ "exit;"
+ :: __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/*
+ * Check that BPF_ST with a known offset fully overwrites stack slot
+ * from the arg tracking point of view.
+ */
+SEC("socket")
+__log_level(2)
+__success
+__msg("5: (7a) *(u64 *)(r1 +0) = 0 fp-8: fp0-16 -> _{{$}}")
+__naked void st_imm_join_with_single_off(void)
+{
+ asm volatile (
+ "r2 = r10;"
+ "r2 += -16;"
+ "*(u64 *)(r10 - 8) = r2;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "*(u64 *)(r1 + 0) = 0;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/*
+ * Same as spill_join_with_imprecise_off but the write is BPF_ST.
+ * Use "r2 = -8; r1 += r2" to make arg tracking lose offset
+ * precision while the main verifier keeps r1 as fixed-offset.
+ *
+ * fp-8 = &fp-24
+ * fp-16 = &fp-32
+ * r1 = fp-8 (imprecise to arg tracking)
+ * *(u64 *)(r1 + 0) = 0 -- BPF_ST with immediate
+ * r0 = *(u64 *)(r10 - 16) -- fill from fp-16
+ * r0 = *(u64 *)(r0 + 0) -- deref: should produce use
+ */
+SEC("socket")
+__log_level(2)
+__success
+__msg("13: (79) r0 = *(u64 *)(r0 +0) ; use: fp0-32")
+__naked void st_imm_join_with_imprecise_off(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 24) = 0;"
+ "*(u64 *)(r10 - 32) = 0;"
+ "r1 = r10;"
+ "r1 += -24;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r1 = r10;"
+ "r1 += -32;"
+ "*(u64 *)(r10 - 16) = r1;"
+ /* r1 = fp-8 but arg tracking sees off_cnt == 0 */
+ "r1 = r10;"
+ "r2 = -8;"
+ "r1 += r2;"
+ /* store immediate through imprecise r1 */
+ "*(u64 *)(r1 + 0) = 0;"
+ /* read back fp-16 */
+ "r0 = *(u64 *)(r10 - 16);"
+ /* deref: should produce use */
+ "r0 = *(u64 *)(r0 + 0);"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/*
+ * Test that spilling through an ARG_IMPRECISE pointer joins with
+ * existing at_stack values. Subprog receives r1 = fp0-24 and
+ * r2 = map_value, creates an ARG_IMPRECISE pointer by joining caller
+ * and callee FP on two branches.
+ *
+ * Setup: callee spills &fp1-16 to fp1-8 (precise, tracked).
+ * Then writes map_value through ARG_IMPRECISE r1 — on path A
+ * this hits fp1-8, on path B it hits caller stack.
+ * Since spill_to_stack is skipped for ARG_IMPRECISE dst,
+ * fp1-8 tracking isn't joined with none.
+ *
+ * Expected after the imprecise write:
+ * - arg tracking should show fp1-8 = fp1-16|fp1+0 (joined with none)
+ * - read from fp1-8 and deref should produce use for fp1-16
+ * - write through it should NOT produce def for fp1-16
+ */
+SEC("socket")
+__log_level(2)
+__success
+__msg("26: (79) r0 = *(u64 *)(r10 -8) // r1=IMP3 r6=fp0-24 r7=fp1-16 fp-8=fp1-16|fp1+0")
+__naked void imprecise_dst_spill_join(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 24) = 0;"
+ /* map lookup for a valid non-FP pointer */
+ "*(u32 *)(r10 - 32) = 0;"
+ "r1 = %[map] ll;"
+ "r2 = r10;"
+ "r2 += -32;"
+ "call %[bpf_map_lookup_elem];"
+ "if r0 == 0 goto 1f;"
+ /* r1 = &caller_fp-24, r2 = map_value */
+ "r1 = r10;"
+ "r1 += -24;"
+ "r2 = r0;"
+ "call imprecise_dst_spill_join_sub;"
+"1:"
+ "r0 = 0;"
+ "exit;"
+ :: __imm_addr(map),
+ __imm(bpf_map_lookup_elem)
+ : __clobber_all);
+}
+
+static __used __naked void imprecise_dst_spill_join_sub(void)
+{
+ asm volatile (
+ /* r6 = &caller_fp-24 (frame=0), r8 = map_value */
+ "r6 = r1;"
+ "r8 = r2;"
+ /* spill &fp1-16 to fp1-8: at_stack[0] = fp1-16 */
+ "*(u64 *)(r10 - 16) = 0;"
+ "r7 = r10;"
+ "r7 += -16;"
+ "*(u64 *)(r10 - 8) = r7;"
+ /* branch to create ARG_IMPRECISE pointer */
+ "call %[bpf_get_prandom_u32];"
+ /* path B: r1 = caller fp-24 (frame=0) */
+ "r1 = r6;"
+ "if r0 == 0 goto 1f;"
+ /* path A: r1 = callee fp-8 (frame=1) */
+ "r1 = r10;"
+ "r1 += -8;"
+"1:"
+ /* r1 = ARG_IMPRECISE{mask=BIT(0)|BIT(1)}.
+ * Write map_value (non-FP) through r1. On path A this overwrites fp1-8.
+ * Should join at_stack[0] with none: fp1-16|fp1+0.
+ */
+ "*(u64 *)(r1 + 0) = r8;"
+ /* read fp1-8: should be fp1-16|fp1+0 (joined) */
+ "r0 = *(u64 *)(r10 - 8);"
+ "*(u64 *)(r0 + 0) = 42;"
+ "r0 = 0;"
+ "exit;"
+ :: __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_precision.c b/tools/testing/selftests/bpf/progs/verifier_precision.c
index 4794903aec8e..6f325876efdd 100644
--- a/tools/testing/selftests/bpf/progs/verifier_precision.c
+++ b/tools/testing/selftests/bpf/progs/verifier_precision.c
@@ -75,8 +75,8 @@ __naked int bpf_end_to_be(void)
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
- defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
- __clang_major__ >= 18
+ defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
+ defined(__TARGET_ARCH_loongarch)) && __clang_major__ >= 18
SEC("?raw_tp")
__success __log_level(2)
diff --git a/tools/testing/selftests/bpf/sysctl_helpers.c b/tools/testing/selftests/bpf/sysctl_helpers.c
new file mode 100644
index 000000000000..e2bd824f12d5
--- /dev/null
+++ b/tools/testing/selftests/bpf/sysctl_helpers.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+
+#include "sysctl_helpers.h"
+#include "test_progs.h"
+
+int sysctl_set(const char *sysctl_path, char *old_val, const char *new_val)
+{
+ int ret = 0;
+ FILE *fp;
+
+ fp = fopen(sysctl_path, "r+");
+ if (!fp)
+ return -errno;
+ if (old_val && fscanf(fp, "%s", old_val) <= 0) {
+ ret = -ENOENT;
+ } else if (!old_val || strcmp(old_val, new_val) != 0) {
+ fseek(fp, 0, SEEK_SET);
+ if (fprintf(fp, "%s", new_val) < 0)
+ ret = -errno;
+ }
+ fclose(fp);
+
+ return ret;
+}
+
+int sysctl_set_or_fail(const char *sysctl_path, char *old_val, const char *new_val)
+{
+ int err;
+
+ err = sysctl_set(sysctl_path, old_val, new_val);
+ if (err)
+ PRINT_FAIL("failed to set %s to %s: %s\n", sysctl_path, new_val, strerror(-err));
+ return err;
+}
diff --git a/tools/testing/selftests/bpf/sysctl_helpers.h b/tools/testing/selftests/bpf/sysctl_helpers.h
new file mode 100644
index 000000000000..35e37bfe1b3b
--- /dev/null
+++ b/tools/testing/selftests/bpf/sysctl_helpers.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SYSCTL_HELPERS_H
+#define __SYSCTL_HELPERS_H
+
+int sysctl_set(const char *sysctl_path, char *old_val, const char *new_val);
+int sysctl_set_or_fail(const char *sysctl_path, char *old_val, const char *new_val);
+
+#endif
diff --git a/tools/testing/selftests/cgroup/lib/cgroup_util.c b/tools/testing/selftests/cgroup/lib/cgroup_util.c
index 6a7295347e90..42f54936f4bb 100644
--- a/tools/testing/selftests/cgroup/lib/cgroup_util.c
+++ b/tools/testing/selftests/cgroup/lib/cgroup_util.c
@@ -106,8 +106,9 @@ int cg_read_strcmp(const char *cgroup, const char *control,
/* Handle the case of comparing against empty string */
if (!expected)
return -1;
- else
- size = strlen(expected) + 1;
+
+ /* needs size > 1, otherwise cg_read() reads 0 bytes */
+ size = (expected[0] == '\0') ? 2 : strlen(expected) + 1;
buf = malloc(size);
if (!buf)
diff --git a/tools/testing/selftests/cgroup/test_cpuset_v1_base.sh b/tools/testing/selftests/cgroup/test_cpuset_v1_base.sh
index 42a6628fb8bc..1c0444729e70 100755
--- a/tools/testing/selftests/cgroup/test_cpuset_v1_base.sh
+++ b/tools/testing/selftests/cgroup/test_cpuset_v1_base.sh
@@ -18,7 +18,7 @@ write_test() {
echo "testing $interface $value"
echo $value > $dir/$interface
new=$(cat $dir/$interface)
- [[ $value -ne $(cat $dir/$interface) ]] && {
+ [[ "$value" != "$new" ]] && {
echo "$interface write $value failed: new:$new"
exit 1
}
diff --git a/tools/testing/selftests/cgroup/test_kmem.c b/tools/testing/selftests/cgroup/test_kmem.c
index eeabd34bf083..12f59925500b 100644
--- a/tools/testing/selftests/cgroup/test_kmem.c
+++ b/tools/testing/selftests/cgroup/test_kmem.c
@@ -368,11 +368,15 @@ static int test_percpu_basic(const char *root)
for (i = 0; i < 1000; i++) {
child = cg_name_indexed(parent, "child", i);
- if (!child)
- return -1;
+ if (!child) {
+ ret = -1;
+ goto cleanup_children;
+ }
- if (cg_create(child))
+ if (cg_create(child)) {
+ free(child);
goto cleanup_children;
+ }
free(child);
}
diff --git a/tools/testing/selftests/drivers/net/README.rst b/tools/testing/selftests/drivers/net/README.rst
index c8588436c224..c6bed9a985bc 100644
--- a/tools/testing/selftests/drivers/net/README.rst
+++ b/tools/testing/selftests/drivers/net/README.rst
@@ -211,8 +211,8 @@ Avoid libraries and frameworks
Test files should be relatively self contained. The libraries should
only include very core or non-trivial code.
-It may be tempting to "factor out" the common code, but fight that urge.
-Library code increases the barrier of entry, and complexity in general.
+It may be tempting to "factor out" the common code to lib/py/, but fight that
+urge. Library code increases the barrier of entry, and complexity in general.
Avoid mixing test code and boilerplate
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -290,6 +290,12 @@ or::
def test(cfg, mode, protocol):
pass
+Linters
+~~~~~~~
+
+We expect clean ``ruff check`` and ``pylint --disable=R``.
+The code should be clean, avoid disabling pylint warnings explicitly!
+
Running tests CI-style
======================
diff --git a/tools/testing/selftests/drivers/net/bonding/lag_lib.sh b/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
index bf9bcd1b5ec0..f2e43b6c4c81 100644
--- a/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
+++ b/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
@@ -23,20 +23,9 @@ test_LAG_cleanup()
ip link set dev dummy2 master "$name"
elif [ "$driver" = "team" ]; then
name="team0"
- teamd -d -c '
- {
- "device": "'"$name"'",
- "runner": {
- "name": "'"$mode"'"
- },
- "ports": {
- "dummy1":
- {},
- "dummy2":
- {}
- }
- }
- '
+ ip link add "$name" type team
+ ip link set dev dummy1 master "$name"
+ ip link set dev dummy2 master "$name"
ip link set dev "$name" up
else
check_err 1
diff --git a/tools/testing/selftests/drivers/net/hw/Makefile b/tools/testing/selftests/drivers/net/hw/Makefile
index 85ca4d1ecf9e..82809d5b2478 100644
--- a/tools/testing/selftests/drivers/net/hw/Makefile
+++ b/tools/testing/selftests/drivers/net/hw/Makefile
@@ -31,6 +31,7 @@ TEST_PROGS = \
hw_stats_l3.sh \
hw_stats_l3_gre.sh \
iou-zcrx.py \
+ ipsec_vxlan.py \
irq.py \
loopback.sh \
nic_timestamp.py \
diff --git a/tools/testing/selftests/drivers/net/hw/config b/tools/testing/selftests/drivers/net/hw/config
index dd50cb8a7911..8c132ace2b8d 100644
--- a/tools/testing/selftests/drivers/net/hw/config
+++ b/tools/testing/selftests/drivers/net/hw/config
@@ -3,6 +3,10 @@ CONFIG_FAIL_FUNCTION=y
CONFIG_FAULT_INJECTION=y
CONFIG_FAULT_INJECTION_DEBUG_FS=y
CONFIG_FUNCTION_ERROR_INJECTION=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_ESP_OFFLOAD=y
+CONFIG_INET_ESP=y
+CONFIG_INET_ESP_OFFLOAD=y
CONFIG_IO_URING=y
CONFIG_IPV6=y
CONFIG_IPV6_GRE=y
@@ -14,3 +18,4 @@ CONFIG_NETKIT=y
CONFIG_NET_SCH_INGRESS=y
CONFIG_UDMABUF=y
CONFIG_VXLAN=y
+CONFIG_XFRM_USER=y
diff --git a/tools/testing/selftests/drivers/net/hw/ipsec_vxlan.py b/tools/testing/selftests/drivers/net/hw/ipsec_vxlan.py
new file mode 100755
index 000000000000..0740a4d85240
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/ipsec_vxlan.py
@@ -0,0 +1,204 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+"""Traffic test for VXLAN + IPsec crypto-offload."""
+
+import os
+
+from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_ge
+from lib.py import ksft_variants, KsftNamedVariant, KsftSkipEx
+from lib.py import CmdExitFailure, NetDrvEpEnv, cmd, defer, ethtool, ip
+from lib.py import Iperf3Runner
+
+# Inner tunnel addresses - TEST-NET-2 (RFC 5737) / doc prefix (RFC 3849)
+INNER_V4_LOCAL = "198.51.100.1"
+INNER_V4_REMOTE = "198.51.100.2"
+INNER_V6_LOCAL = "2001:db8:100::1"
+INNER_V6_REMOTE = "2001:db8:100::2"
+
+# ESP parameters
+SPI_OUT = "0x1000"
+SPI_IN = "0x1001"
+# 128-bit key + 32-bit salt = 20 bytes hex, 128-bit ICV
+ESP_AEAD = "aead 'rfc4106(gcm(aes))' 0x" + "01" * 20 + " 128"
+
+
+def xfrm(args, host=None):
+ """Runs 'ip xfrm' via shell to preserve parentheses in algo names."""
+ cmd(f"ip xfrm {args}", shell=True, host=host)
+
+
+def check_xfrm_offload_support():
+ """Skips if iproute2 lacks xfrm offload support."""
+ out = cmd("ip xfrm state help", fail=False)
+ if "offload" not in out.stdout + out.stderr:
+ raise KsftSkipEx("iproute2 too old, missing xfrm offload")
+
+
+def check_esp_hw_offload(cfg):
+ """Skips if device lacks esp-hw-offload support."""
+ check_xfrm_offload_support()
+ try:
+ feat = ethtool(f"-k {cfg.ifname}", json=True)[0]
+ except (CmdExitFailure, IndexError) as e:
+ raise KsftSkipEx(f"can't query features: {e}") from e
+ if not feat.get("esp-hw-offload", {}).get("active"):
+ raise KsftSkipEx("Device does not support esp-hw-offload")
+
+
+def get_tx_drops(cfg):
+ """Returns TX dropped counter from the physical device."""
+ stats = ip("-s -s link show dev " + cfg.ifname, json=True)[0]
+ return stats["stats64"]["tx"]["dropped"]
+
+
+def setup_vxlan_ipsec(cfg, outer_ipver, inner_ipver):
+ """Sets up VXLAN tunnel with IPsec transport-mode crypto-offload."""
+ vxlan_name = f"vx{os.getpid()}"
+ local_addr = cfg.addr_v[outer_ipver]
+ remote_addr = cfg.remote_addr_v[outer_ipver]
+
+ if inner_ipver == "4":
+ inner_local = f"{INNER_V4_LOCAL}/24"
+ inner_remote = f"{INNER_V4_REMOTE}/24"
+ addr_extra = ""
+ else:
+ inner_local = f"{INNER_V6_LOCAL}/64"
+ inner_remote = f"{INNER_V6_REMOTE}/64"
+ addr_extra = " nodad"
+
+ if outer_ipver == "6":
+ vxlan_opts = "udp6zerocsumtx udp6zerocsumrx"
+ else:
+ vxlan_opts = "noudpcsum"
+
+ # VXLAN tunnel - local side
+ ip(f"link add {vxlan_name} type vxlan id 100 dstport 4789 {vxlan_opts} "
+ f"local {local_addr} remote {remote_addr} dev {cfg.ifname}")
+ defer(ip, f"link del {vxlan_name}")
+ ip(f"addr add {inner_local} dev {vxlan_name}{addr_extra}")
+ ip(f"link set {vxlan_name} up")
+
+ # VXLAN tunnel - remote side
+ ip(f"link add {vxlan_name} type vxlan id 100 dstport 4789 {vxlan_opts} "
+ f"local {remote_addr} remote {local_addr} dev {cfg.remote_ifname}",
+ host=cfg.remote)
+ defer(ip, f"link del {vxlan_name}", host=cfg.remote)
+ ip(f"addr add {inner_remote} dev {vxlan_name}{addr_extra}",
+ host=cfg.remote)
+ ip(f"link set {vxlan_name} up", host=cfg.remote)
+
+ # xfrm state - local outbound SA
+ xfrm(f"state add src {local_addr} dst {remote_addr} "
+ f"proto esp spi {SPI_OUT} "
+ f"{ESP_AEAD} "
+ f"mode transport offload crypto dev {cfg.ifname} dir out")
+ defer(xfrm, f"state del src {local_addr} dst {remote_addr} "
+ f"proto esp spi {SPI_OUT}")
+
+ # xfrm state - local inbound SA
+ xfrm(f"state add src {remote_addr} dst {local_addr} "
+ f"proto esp spi {SPI_IN} "
+ f"{ESP_AEAD} "
+ f"mode transport offload crypto dev {cfg.ifname} dir in")
+ defer(xfrm, f"state del src {remote_addr} dst {local_addr} "
+ f"proto esp spi {SPI_IN}")
+
+ # xfrm state - remote outbound SA (mirror, software crypto)
+ xfrm(f"state add src {remote_addr} dst {local_addr} "
+ f"proto esp spi {SPI_IN} "
+ f"{ESP_AEAD} "
+ f"mode transport",
+ host=cfg.remote)
+ defer(xfrm, f"state del src {remote_addr} dst {local_addr} "
+ f"proto esp spi {SPI_IN}", host=cfg.remote)
+
+ # xfrm state - remote inbound SA (mirror, software crypto)
+ xfrm(f"state add src {local_addr} dst {remote_addr} "
+ f"proto esp spi {SPI_OUT} "
+ f"{ESP_AEAD} "
+ f"mode transport",
+ host=cfg.remote)
+ defer(xfrm, f"state del src {local_addr} dst {remote_addr} "
+ f"proto esp spi {SPI_OUT}", host=cfg.remote)
+
+ # xfrm policy - local out
+ xfrm(f"policy add src {local_addr} dst {remote_addr} "
+ f"proto udp dport 4789 dir out "
+ f"tmpl src {local_addr} dst {remote_addr} proto esp mode transport")
+ defer(xfrm, f"policy del src {local_addr} dst {remote_addr} "
+ f"proto udp dport 4789 dir out")
+
+ # xfrm policy - local in
+ xfrm(f"policy add src {remote_addr} dst {local_addr} "
+ f"proto udp dport 4789 dir in "
+ f"tmpl src {remote_addr} dst {local_addr} proto esp mode transport")
+ defer(xfrm, f"policy del src {remote_addr} dst {local_addr} "
+ f"proto udp dport 4789 dir in")
+
+ # xfrm policy - remote out
+ xfrm(f"policy add src {remote_addr} dst {local_addr} "
+ f"proto udp dport 4789 dir out "
+ f"tmpl src {remote_addr} dst {local_addr} proto esp mode transport",
+ host=cfg.remote)
+ defer(xfrm, f"policy del src {remote_addr} dst {local_addr} "
+ f"proto udp dport 4789 dir out", host=cfg.remote)
+
+ # xfrm policy - remote in
+ xfrm(f"policy add src {local_addr} dst {remote_addr} "
+ f"proto udp dport 4789 dir in "
+ f"tmpl src {local_addr} dst {remote_addr} proto esp mode transport",
+ host=cfg.remote)
+ defer(xfrm, f"policy del src {local_addr} dst {remote_addr} "
+ f"proto udp dport 4789 dir in", host=cfg.remote)
+
+
+def _vxlan_ipsec_variants():
+ """Generates outer/inner IP version variants."""
+ for outer in ["4", "6"]:
+ for inner in ["4", "6"]:
+ yield KsftNamedVariant(f"outer_v{outer}_inner_v{inner}", outer, inner)
+
+
+@ksft_variants(_vxlan_ipsec_variants())
+def test_vxlan_ipsec_crypto_offload(cfg, outer_ipver, inner_ipver):
+ """Tests VXLAN+IPsec crypto-offload has no TX drops."""
+ cfg.require_ipver(outer_ipver)
+ check_esp_hw_offload(cfg)
+
+ setup_vxlan_ipsec(cfg, outer_ipver, inner_ipver)
+
+ if inner_ipver == "4":
+ inner_local = INNER_V4_LOCAL
+ inner_remote = INNER_V4_REMOTE
+ ping = "ping"
+ else:
+ inner_local = INNER_V6_LOCAL
+ inner_remote = INNER_V6_REMOTE
+ ping = "ping -6"
+
+ cmd(f"{ping} -c 1 -W 2 {inner_remote}")
+
+ drops_before = get_tx_drops(cfg)
+
+ runner = Iperf3Runner(cfg, server_ip=inner_local,
+ client_ip=inner_remote)
+ bw_gbps = runner.measure_bandwidth(reverse=True)
+
+ cfg.wait_hw_stats_settle()
+ drops_after = get_tx_drops(cfg)
+
+ ksft_eq(drops_after - drops_before, 0,
+ comment="TX drops during VXLAN+IPsec")
+ ksft_ge(bw_gbps, 0.1,
+ comment="Minimum 100Mbps over VXLAN+IPsec")
+
+
+def main():
+ """Runs VXLAN+IPsec crypto-offload GSO selftest."""
+ with NetDrvEpEnv(__file__, nsim_test=False) as cfg:
+ ksft_run([test_vxlan_ipsec_crypto_offload], args=(cfg,))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/lib/py/load.py b/tools/testing/selftests/drivers/net/lib/py/load.py
index f181fa2d38fc..e24660e5c27f 100644
--- a/tools/testing/selftests/drivers/net/lib/py/load.py
+++ b/tools/testing/selftests/drivers/net/lib/py/load.py
@@ -48,7 +48,10 @@ class Iperf3Runner:
Starts the iperf3 client with the configured options.
"""
cmdline = self._build_client(streams, duration, reverse)
- return cmd(cmdline, background=background, host=self.env.remote)
+ kwargs = {"background": background, "host": self.env.remote}
+ if not background:
+ kwargs["timeout"] = duration + 5
+ return cmd(cmdline, **kwargs)
def measure_bandwidth(self, reverse=False):
"""
diff --git a/tools/testing/selftests/drivers/net/shaper.py b/tools/testing/selftests/drivers/net/shaper.py
index 11310f19bfa0..e39d270e688d 100755
--- a/tools/testing/selftests/drivers/net/shaper.py
+++ b/tools/testing/selftests/drivers/net/shaper.py
@@ -1,7 +1,10 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
-from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_true, KsftSkipEx
+import errno
+
+from lib.py import ksft_run, ksft_exit
+from lib.py import ksft_eq, ksft_raises, ksft_true, KsftSkipEx
from lib.py import EthtoolFamily, NetshaperFamily
from lib.py import NetDrvEnv
from lib.py import NlError
@@ -438,6 +441,21 @@ def queue_update(cfg, nl_shaper) -> None:
nl_shaper.delete({'ifindex': cfg.ifindex,
'handle': {'scope': 'queue', 'id': i}})
+def dup_leaves(cfg, nl_shaper) -> None:
+ """ Ensure that the kernel rejects duplicate leaves. """
+ if not cfg.groups:
+ raise KsftSkipEx("device does not support node scope")
+
+ with ksft_raises(NlError) as cm:
+ nl_shaper.group({
+ 'ifindex': cfg.ifindex,
+ 'leaves':[{'handle': {'scope': 'queue', 'id': 0}},
+ {'handle': {'scope': 'queue', 'id': 0}}],
+ 'handle': {'scope':'node'},
+ 'metric': 'bps',
+ 'bw-max': 10000})
+ ksft_eq(cm.exception.error, errno.EINVAL)
+
def main() -> None:
with NetDrvEnv(__file__, queue_count=4) as cfg:
cfg.queues = False
@@ -453,7 +471,9 @@ def main() -> None:
basic_groups,
qgroups,
delegation,
- queue_update], args=(cfg, NetshaperFamily()))
+ dup_leaves,
+ queue_update],
+ args=(cfg, NetshaperFamily()))
ksft_exit()
diff --git a/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh b/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh
index b1ec7755b783..26469f3be022 100755
--- a/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh
+++ b/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh
@@ -42,8 +42,6 @@ team_cleanup()
}
-require_command teamd
-
trap cleanup EXIT
tests_run
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe_module.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe_module.tc
new file mode 100644
index 000000000000..2915206777b6
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe_module.tc
@@ -0,0 +1,87 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Generic dynamic event - add/remove fprobe events on module
+# requires: dynamic_events "f[:[<group>/][<event>]] <func-name>[%return] [<args>]":README enabled_functions
+
+rmmod trace-events-sample ||:
+if ! modprobe trace-events-sample ; then
+ echo "No trace-events sample module - please make CONFIG_SAMPLE_TRACE_EVENTS=m"
+ exit_unresolved;
+fi
+trap "lsmod | grep -q trace_events_sample && rmmod trace-events-sample" EXIT
+
+echo 0 > events/enable
+echo > dynamic_events
+
+FUNC1='foo_bar*'
+FUNC2='vfs_read'
+
+:;: "Add an event on the test module" ;:
+echo "f:test1 $FUNC1" >> dynamic_events
+echo 1 > events/fprobes/test1/enable
+
+:;: "Ensure it is enabled" ;:
+funcs=`cat enabled_functions | wc -l`
+test $funcs -ne 0
+
+:;: "Check the enabled_functions is cleared on unloading" ;:
+rmmod trace-events-sample
+funcs=`cat enabled_functions | wc -l`
+test $funcs -eq 0
+
+:;: "Check it is kept clean" ;:
+modprobe trace-events-sample
+echo 1 > events/fprobes/test1/enable || echo "OK"
+funcs=`cat enabled_functions | wc -l`
+test $funcs -eq 0
+
+:;: "Add another event not on the test module" ;:
+echo "f:test2 $FUNC2" >> dynamic_events
+echo 1 > events/fprobes/test2/enable
+
+:;: "Ensure it is enabled" ;:
+ofuncs=`cat enabled_functions | wc -l`
+test $ofuncs -ne 0
+
+:;: "Disable and remove the first event"
+echo 0 > events/fprobes/test1/enable
+echo "-:fprobes/test1" >> dynamic_events
+funcs=`cat enabled_functions | wc -l`
+test $ofuncs -eq $funcs
+
+:;: "Disable and remove other events" ;:
+echo 0 > events/fprobes/enable
+echo > dynamic_events
+funcs=`cat enabled_functions | wc -l`
+test $funcs -eq 0
+
+rmmod trace-events-sample
+
+:;: "Add events on kernel and test module" ;:
+modprobe trace-events-sample
+echo "f:test1 $FUNC1" >> dynamic_events
+echo 1 > events/fprobes/test1/enable
+echo "f:test2 $FUNC2" >> dynamic_events
+echo 1 > events/fprobes/test2/enable
+ofuncs=`cat enabled_functions | wc -l`
+test $ofuncs -ne 0
+
+:;: "Unload module (ftrace entry should be removed)" ;:
+rmmod trace-events-sample
+funcs=`cat enabled_functions | wc -l`
+test $funcs -ne 0
+test $ofuncs -ne $funcs
+
+:;: "Disable and remove core-kernel fprobe event" ;:
+echo 0 > events/fprobes/test2/enable
+echo "-:fprobes/test2" >> dynamic_events
+
+:;: "Ensure ftrace is disabled." ;:
+funcs=`cat enabled_functions | wc -l`
+test $funcs -eq 0
+
+echo 0 > events/fprobes/enable
+echo > dynamic_events
+
+trap "" EXIT
+clear_trace
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_multiple_fprobe.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_multiple_fprobe.tc
new file mode 100644
index 000000000000..f2cbf2ffd29b
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_multiple_fprobe.tc
@@ -0,0 +1,69 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Generic dynamic event - add/remove multiple fprobe events on the same function
+# requires: dynamic_events "f[:[<group>/][<event>]] <func-name>[%return] [<args>]":README enabled_functions
+
+echo 0 > events/enable
+echo > dynamic_events
+
+PLACE=vfs_read
+PLACE2=vfs_open
+
+:;: 'Ensure no other ftrace user' ;:
+test `cat enabled_functions | wc -l` -eq 0 || exit_unresolved
+
+:;: 'Test case 1: leave entry event' ;:
+:;: 'Add entry and exit events on the same place' ;:
+echo "f:event1 ${PLACE}" >> dynamic_events
+echo "f:event2 ${PLACE}%return" >> dynamic_events
+
+:;: 'Enable both of them' ;:
+echo 1 > events/fprobes/enable
+test `cat enabled_functions | wc -l` -eq 1
+
+:;: 'Disable and remove exit event' ;:
+echo 0 > events/fprobes/event2/enable
+echo -:event2 >> dynamic_events
+
+:;: 'Disable and remove all events' ;:
+echo 0 > events/fprobes/enable
+echo > dynamic_events
+
+:;: 'Add another event' ;:
+echo "f:event3 ${PLACE2}%return" > dynamic_events
+echo 1 > events/fprobes/enable
+test `cat enabled_functions | wc -l` -eq 1
+
+:;: 'No other ftrace user' ;:
+echo 0 > events/fprobes/enable
+echo > dynamic_events
+test `cat enabled_functions | wc -l` -eq 0
+
+:;: 'Test case 2: leave exit event' ;:
+:;: 'Add entry and exit events on the same place' ;:
+echo "f:event1 ${PLACE}" >> dynamic_events
+echo "f:event2 ${PLACE}%return" >> dynamic_events
+
+:;: 'Enable both of them' ;:
+echo 1 > events/fprobes/enable
+test `cat enabled_functions | wc -l` -eq 1
+
+:;: 'Disable and remove entry event' ;:
+echo 0 > events/fprobes/event1/enable
+echo -:event1 >> dynamic_events
+
+:;: 'Disable and remove all events' ;:
+echo 0 > events/fprobes/enable
+echo > dynamic_events
+
+:;: 'Add another event' ;:
+echo "f:event3 ${PLACE2}" > dynamic_events
+echo 1 > events/fprobes/enable
+test `cat enabled_functions | wc -l` -eq 1
+
+:;: 'No other ftrace user' ;:
+echo 0 > events/fprobes/enable
+echo > dynamic_events
+test `cat enabled_functions | wc -l` -eq 0
+
+clear_trace
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index 6d809f08ab7b..60838b61a2da 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -450,7 +450,7 @@ static inline __noreturn __printf(1, 2) void ksft_exit_skip(const char *msg, ...
*/
if (ksft_plan || ksft_test_num()) {
ksft_cnt.ksft_xskip++;
- printf("ok %u # SKIP ", 1 + ksft_test_num());
+ printf("ok %u # SKIP ", ksft_test_num());
} else {
printf("1..0 # SKIP ");
}
diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh
index 6da3390825fe..311811dc55a0 100644
--- a/tools/testing/selftests/kselftest/runner.sh
+++ b/tools/testing/selftests/kselftest/runner.sh
@@ -1,8 +1,17 @@
-#!/bin/bash
+#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
#
# Runs a set of tests in a given subdirectory.
-. $(dirname "$(readlink -e "${BASH_SOURCE[0]}")")/ktap_helpers.sh
+
+# There isn't a shell-agnostic way to find the path of a sourced file,
+# so we must rely on BASE_DIR being set to find other tools.
+if [ -z "$BASE_DIR" ]; then
+ echo "Error: BASE_DIR must be set before sourcing." >&2
+ exit 1
+fi
+
+. ${BASE_DIR}/kselftest/ktap_helpers.sh
+
export timeout_rc=124
export logfile=/dev/stdout
export per_test_logging=
@@ -14,13 +23,6 @@ export RUN_IN_NETNS=
# over our soft timeout limit.
export kselftest_default_timeout=45
-# There isn't a shell-agnostic way to find the path of a sourced file,
-# so we must rely on BASE_DIR being set to find other tools.
-if [ -z "$BASE_DIR" ]; then
- echo "Error: BASE_DIR must be set before sourcing." >&2
- exit 1
-fi
-
TR_CMD=$(command -v tr)
# If Perl is unavailable, we must fall back to line-at-a-time prefixing
@@ -49,7 +51,6 @@ run_one()
{
DIR="$1"
TEST="$2"
- local rc test_num="$3"
BASENAME_TEST=$(basename $TEST)
@@ -106,7 +107,7 @@ run_one()
echo "# $TEST_HDR_MSG"
if [ ! -e "$TEST" ]; then
ktap_print_msg "Warning: file $TEST is missing!"
- ktap_test_fail "$test_num $TEST_HDR_MSG"
+ ktap_test_fail "$TEST_HDR_MSG"
rc=$KSFT_FAIL
else
if [ -x /usr/bin/stdbuf ]; then
@@ -125,7 +126,7 @@ run_one()
interpreter=$(head -n 1 "$TEST" | cut -c 3-)
cmd="$stdbuf $interpreter ./$BASENAME_TEST"
else
- ktap_test_fail "$test_num $TEST_HDR_MSG"
+ ktap_test_fail "$TEST_HDR_MSG"
return $KSFT_FAIL
fi
fi
@@ -136,15 +137,15 @@ run_one()
rc=$?
case "$rc" in
"$KSFT_PASS")
- ktap_test_pass "$test_num $TEST_HDR_MSG";;
+ ktap_test_pass "$TEST_HDR_MSG";;
"$KSFT_SKIP")
- ktap_test_skip "$test_num $TEST_HDR_MSG";;
+ ktap_test_skip "$TEST_HDR_MSG";;
"$KSFT_XFAIL")
- ktap_test_xfail "$test_num $TEST_HDR_MSG";;
+ ktap_test_xfail "$TEST_HDR_MSG";;
"$timeout_rc")
- ktap_test_fail "$test_num $TEST_HDR_MSG # TIMEOUT $kselftest_timeout seconds";;
+ ktap_test_fail "$TEST_HDR_MSG # TIMEOUT $kselftest_timeout seconds";;
*)
- ktap_test_fail "$test_num $TEST_HDR_MSG # exit=$rc";;
+ ktap_test_fail "$TEST_HDR_MSG # exit=$rc";;
esac
cd - >/dev/null
fi
@@ -159,7 +160,7 @@ in_netns()
BASE_DIR=$BASE_DIR
source $BASE_DIR/kselftest/runner.sh
logfile=$logfile
- run_one $DIR $TEST $test_num
+ run_one $DIR $TEST
EOF
}
@@ -172,7 +173,7 @@ run_in_netns()
ip netns add $netns
if [ $? -ne 0 ]; then
ktap_print_msg "Warning: Create namespace failed for $BASENAME_TEST"
- ktap_test_fail "$test_num selftests: $DIR: $BASENAME_TEST # Create NS failed"
+ ktap_test_fail "selftests: $DIR: $BASENAME_TEST # Create NS failed"
fi
ip -n $netns link set lo up
@@ -189,28 +190,26 @@ run_in_netns()
run_many()
{
DIR="${PWD#${BASE_DIR}/}"
- test_num=0
local rc
- pids=()
+ pids=
for TEST in "$@"; do
BASENAME_TEST=$(basename $TEST)
- test_num=$(( test_num + 1 ))
if [ -n "$per_test_logging" ]; then
logfile="$per_test_log_dir/$BASENAME_TEST"
cat /dev/null > "$logfile"
fi
if [ -n "$RUN_IN_NETNS" ]; then
run_in_netns &
- pids+=($!)
+ pids="$pids $!"
else
- run_one "$DIR" "$TEST" "$test_num"
+ run_one "$DIR" "$TEST"
fi
done
# These variables are outputs of ktap_helpers.sh but since we've
# run the test in a subprocess we need to update them manually
- for pid in "${pids[@]}"; do
+ for pid in $pids; do
wait "$pid"
rc=$?
case "$rc" in
diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
index 75fb016cd190..cfdce9cd252e 100644
--- a/tools/testing/selftests/kselftest_harness.h
+++ b/tools/testing/selftests/kselftest_harness.h
@@ -76,7 +76,7 @@ static inline void __kselftest_memset_safe(void *s, int c, size_t n)
memset(s, c, n);
}
-#define KSELFTEST_PRIO_TEST_F 20000
+#define KSELFTEST_PRIO_TEST 20000
#define KSELFTEST_PRIO_XFAIL 20001
#define TEST_TIMEOUT_DEFAULT 30
@@ -194,7 +194,7 @@ static inline void __kselftest_memset_safe(void *s, int c, size_t n)
.fixture = &_fixture_global, \
.termsig = _signal, \
.timeout = TEST_TIMEOUT_DEFAULT, }; \
- static void __attribute__((constructor)) _register_##test_name(void) \
+ static void __attribute__((constructor(KSELFTEST_PRIO_TEST))) _register_##test_name(void) \
{ \
__register_test(&_##test_name##_object); \
} \
@@ -238,7 +238,7 @@ static inline void __kselftest_memset_safe(void *s, int c, size_t n)
FIXTURE_VARIANT(fixture_name); \
static struct __fixture_metadata _##fixture_name##_fixture_object = \
{ .name = #fixture_name, }; \
- static void __attribute__((constructor)) \
+ static void __attribute__((constructor(KSELFTEST_PRIO_TEST))) \
_register_##fixture_name##_data(void) \
{ \
__register_fixture(&_##fixture_name##_fixture_object); \
@@ -364,7 +364,7 @@ static inline void __kselftest_memset_safe(void *s, int c, size_t n)
_##fixture_name##_##variant_name##_object = \
{ .name = #variant_name, \
.data = &_##fixture_name##_##variant_name##_variant}; \
- static void __attribute__((constructor)) \
+ static void __attribute__((constructor(KSELFTEST_PRIO_TEST))) \
_register_##fixture_name##_##variant_name(void) \
{ \
__register_fixture_variant(&_##fixture_name##_fixture_object, \
@@ -468,7 +468,7 @@ static inline void __kselftest_memset_safe(void *s, int c, size_t n)
fixture_name##_teardown(_metadata, self, variant); \
} \
static struct __test_metadata *_##fixture_name##_##test_name##_object; \
- static void __attribute__((constructor(KSELFTEST_PRIO_TEST_F))) \
+ static void __attribute__((constructor(KSELFTEST_PRIO_TEST))) \
_register_##fixture_name##_##test_name(void) \
{ \
struct __test_metadata *object = mmap(NULL, sizeof(*object), \
@@ -1323,7 +1323,7 @@ static int test_harness_run(int argc, char **argv)
return KSFT_FAIL;
}
-static void __attribute__((constructor)) __constructor_order_first(void)
+static void __attribute__((constructor(KSELFTEST_PRIO_TEST))) __constructor_order_first(void)
{
__constructor_order_forward = true;
}
diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c
index b058f27b2141..e5bbdb5bbdc3 100644
--- a/tools/testing/selftests/kvm/access_tracking_perf_test.c
+++ b/tools/testing/selftests/kvm/access_tracking_perf_test.c
@@ -101,15 +101,15 @@ struct test_params {
enum vm_mem_backing_src_type backing_src;
/* The amount of memory to allocate for each vCPU. */
- uint64_t vcpu_memory_bytes;
+ u64 vcpu_memory_bytes;
/* The number of vCPUs to create in the VM. */
int nr_vcpus;
};
-static uint64_t pread_uint64(int fd, const char *filename, uint64_t index)
+static u64 pread_u64(int fd, const char *filename, u64 index)
{
- uint64_t value;
+ u64 value;
off_t offset = index * sizeof(value);
TEST_ASSERT(pread(fd, &value, sizeof(value), offset) == sizeof(value),
@@ -123,13 +123,13 @@ static uint64_t pread_uint64(int fd, const char *filename, uint64_t index)
#define PAGEMAP_PRESENT (1ULL << 63)
#define PAGEMAP_PFN_MASK ((1ULL << 55) - 1)
-static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva)
+static u64 lookup_pfn(int pagemap_fd, struct kvm_vm *vm, gva_t gva)
{
- uint64_t hva = (uint64_t) addr_gva2hva(vm, gva);
- uint64_t entry;
- uint64_t pfn;
+ u64 hva = (u64)addr_gva2hva(vm, gva);
+ u64 entry;
+ u64 pfn;
- entry = pread_uint64(pagemap_fd, "pagemap", hva / getpagesize());
+ entry = pread_u64(pagemap_fd, "pagemap", hva / getpagesize());
if (!(entry & PAGEMAP_PRESENT))
return 0;
@@ -139,16 +139,16 @@ static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva)
return pfn;
}
-static bool is_page_idle(int page_idle_fd, uint64_t pfn)
+static bool is_page_idle(int page_idle_fd, u64 pfn)
{
- uint64_t bits = pread_uint64(page_idle_fd, "page_idle", pfn / 64);
+ u64 bits = pread_u64(page_idle_fd, "page_idle", pfn / 64);
return !!((bits >> (pfn % 64)) & 1);
}
-static void mark_page_idle(int page_idle_fd, uint64_t pfn)
+static void mark_page_idle(int page_idle_fd, u64 pfn)
{
- uint64_t bits = 1ULL << (pfn % 64);
+ u64 bits = 1ULL << (pfn % 64);
TEST_ASSERT(pwrite(page_idle_fd, &bits, 8, 8 * (pfn / 64)) == 8,
"Set page_idle bits for PFN 0x%" PRIx64, pfn);
@@ -174,11 +174,11 @@ static void pageidle_mark_vcpu_memory_idle(struct kvm_vm *vm,
struct memstress_vcpu_args *vcpu_args)
{
int vcpu_idx = vcpu_args->vcpu_idx;
- uint64_t base_gva = vcpu_args->gva;
- uint64_t pages = vcpu_args->pages;
- uint64_t page;
- uint64_t still_idle = 0;
- uint64_t no_pfn = 0;
+ gva_t base_gva = vcpu_args->gva;
+ u64 pages = vcpu_args->pages;
+ u64 page;
+ u64 still_idle = 0;
+ u64 no_pfn = 0;
int page_idle_fd;
int pagemap_fd;
@@ -193,8 +193,8 @@ static void pageidle_mark_vcpu_memory_idle(struct kvm_vm *vm,
TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap.");
for (page = 0; page < pages; page++) {
- uint64_t gva = base_gva + page * memstress_args.guest_page_size;
- uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva);
+ gva_t gva = base_gva + page * memstress_args.guest_page_size;
+ u64 pfn = lookup_pfn(pagemap_fd, vm, gva);
if (!pfn) {
no_pfn++;
@@ -297,10 +297,10 @@ static void lru_gen_mark_memory_idle(struct kvm_vm *vm)
lru_gen_last_gen = new_gen;
}
-static void assert_ucall(struct kvm_vcpu *vcpu, uint64_t expected_ucall)
+static void assert_ucall(struct kvm_vcpu *vcpu, u64 expected_ucall)
{
struct ucall uc;
- uint64_t actual_ucall = get_ucall(vcpu, &uc);
+ u64 actual_ucall = get_ucall(vcpu, &uc);
TEST_ASSERT(expected_ucall == actual_ucall,
"Guest exited unexpectedly (expected ucall %" PRIu64
@@ -417,7 +417,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
*/
test_pages = params->nr_vcpus * params->vcpu_memory_bytes /
max(memstress_args.guest_page_size,
- (uint64_t)getpagesize());
+ (u64)getpagesize());
memstress_start_vcpu_threads(nr_vcpus, vcpu_thread_main);
diff --git a/tools/testing/selftests/kvm/arch_timer.c b/tools/testing/selftests/kvm/arch_timer.c
index cf8fb67104f1..90c475a61b22 100644
--- a/tools/testing/selftests/kvm/arch_timer.c
+++ b/tools/testing/selftests/kvm/arch_timer.c
@@ -78,9 +78,9 @@ static void *test_vcpu_run(void *arg)
return NULL;
}
-static uint32_t test_get_pcpu(void)
+static u32 test_get_pcpu(void)
{
- uint32_t pcpu;
+ u32 pcpu;
unsigned int nproc_conf;
cpu_set_t online_cpuset;
@@ -98,7 +98,7 @@ static uint32_t test_get_pcpu(void)
static int test_migrate_vcpu(unsigned int vcpu_idx)
{
int ret;
- uint32_t new_pcpu = test_get_pcpu();
+ u32 new_pcpu = test_get_pcpu();
pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu);
diff --git a/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c b/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c
index 713005b6f508..8a019cbaf4c4 100644
--- a/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c
+++ b/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c
@@ -66,7 +66,7 @@ static void test_guest_raz(struct kvm_vcpu *vcpu)
}
}
-static uint64_t raz_wi_reg_ids[] = {
+static u64 raz_wi_reg_ids[] = {
KVM_ARM64_SYS_REG(SYS_ID_PFR0_EL1),
KVM_ARM64_SYS_REG(SYS_ID_PFR1_EL1),
KVM_ARM64_SYS_REG(SYS_ID_DFR0_EL1),
@@ -94,8 +94,8 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu)
int i;
for (i = 0; i < ARRAY_SIZE(raz_wi_reg_ids); i++) {
- uint64_t reg_id = raz_wi_reg_ids[i];
- uint64_t val;
+ u64 reg_id = raz_wi_reg_ids[i];
+ u64 val;
val = vcpu_get_reg(vcpu, reg_id);
TEST_ASSERT_EQ(val, 0);
@@ -111,7 +111,7 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu)
}
}
-static uint64_t raz_invariant_reg_ids[] = {
+static u64 raz_invariant_reg_ids[] = {
KVM_ARM64_SYS_REG(SYS_ID_AFR0_EL1),
KVM_ARM64_SYS_REG(sys_reg(3, 0, 0, 3, 3)),
KVM_ARM64_SYS_REG(SYS_ID_DFR1_EL1),
@@ -123,8 +123,8 @@ static void test_user_raz_invariant(struct kvm_vcpu *vcpu)
int i, r;
for (i = 0; i < ARRAY_SIZE(raz_invariant_reg_ids); i++) {
- uint64_t reg_id = raz_invariant_reg_ids[i];
- uint64_t val;
+ u64 reg_id = raz_invariant_reg_ids[i];
+ u64 val;
val = vcpu_get_reg(vcpu, reg_id);
TEST_ASSERT_EQ(val, 0);
@@ -142,7 +142,7 @@ static void test_user_raz_invariant(struct kvm_vcpu *vcpu)
static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu)
{
- uint64_t val, el0;
+ u64 val, el0;
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
diff --git a/tools/testing/selftests/kvm/arm64/arch_timer.c b/tools/testing/selftests/kvm/arm64/arch_timer.c
index d592a4515399..5fa5c0ec2b3e 100644
--- a/tools/testing/selftests/kvm/arm64/arch_timer.c
+++ b/tools/testing/selftests/kvm/arm64/arch_timer.c
@@ -56,7 +56,7 @@ static void guest_validate_irq(unsigned int intid,
struct test_vcpu_shared_data *shared_data)
{
enum guest_stage stage = shared_data->guest_stage;
- uint64_t xcnt = 0, xcnt_diff_us, cval = 0;
+ u64 xcnt = 0, xcnt_diff_us, cval = 0;
unsigned long xctl = 0;
unsigned int timer_irq = 0;
unsigned int accessor;
@@ -105,7 +105,7 @@ static void guest_validate_irq(unsigned int intid,
static void guest_irq_handler(struct ex_regs *regs)
{
unsigned int intid = gic_get_and_ack_irq();
- uint32_t cpu = guest_get_vcpuid();
+ u32 cpu = guest_get_vcpuid();
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
guest_validate_irq(intid, shared_data);
@@ -116,7 +116,7 @@ static void guest_irq_handler(struct ex_regs *regs)
static void guest_run_stage(struct test_vcpu_shared_data *shared_data,
enum guest_stage stage)
{
- uint32_t irq_iter, config_iter;
+ u32 irq_iter, config_iter;
shared_data->guest_stage = stage;
shared_data->nr_iter = 0;
@@ -140,7 +140,7 @@ static void guest_run_stage(struct test_vcpu_shared_data *shared_data,
static void guest_code(void)
{
- uint32_t cpu = guest_get_vcpuid();
+ u32 cpu = guest_get_vcpuid();
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
local_irq_disable();
diff --git a/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c b/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c
index 993c9e38e729..f7625eb711d6 100644
--- a/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c
+++ b/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c
@@ -23,25 +23,25 @@
#include "vgic.h"
/* Depends on counter width. */
-static uint64_t CVAL_MAX;
+static u64 CVAL_MAX;
/* tval is a signed 32-bit int. */
-static const int32_t TVAL_MAX = INT32_MAX;
-static const int32_t TVAL_MIN = INT32_MIN;
+static const s32 TVAL_MAX = INT32_MAX;
+static const s32 TVAL_MIN = INT32_MIN;
/* After how much time we say there is no IRQ. */
-static const uint32_t TIMEOUT_NO_IRQ_US = 50000;
+static const u32 TIMEOUT_NO_IRQ_US = 50000;
/* Counter value to use as the starting one for most tests. Set to CVAL_MAX/2 */
-static uint64_t DEF_CNT;
+static u64 DEF_CNT;
/* Number of runs. */
-static const uint32_t NR_TEST_ITERS_DEF = 5;
+static const u32 NR_TEST_ITERS_DEF = 5;
/* Default wait test time in ms. */
-static const uint32_t WAIT_TEST_MS = 10;
+static const u32 WAIT_TEST_MS = 10;
/* Default "long" wait test time in ms. */
-static const uint32_t LONG_WAIT_TEST_MS = 100;
+static const u32 LONG_WAIT_TEST_MS = 100;
/* Shared with IRQ handler. */
struct test_vcpu_shared_data {
@@ -53,9 +53,9 @@ struct test_args {
/* Virtual or physical timer and counter tests. */
enum arch_timer timer;
/* Delay used for most timer tests. */
- uint64_t wait_ms;
+ u64 wait_ms;
/* Delay used in the test_long_timer_delays test. */
- uint64_t long_wait_ms;
+ u64 long_wait_ms;
/* Number of iterations. */
int iterations;
/* Whether to test the physical timer. */
@@ -82,12 +82,12 @@ enum sync_cmd {
NO_USERSPACE_CMD,
};
-typedef void (*sleep_method_t)(enum arch_timer timer, uint64_t usec);
+typedef void (*sleep_method_t)(enum arch_timer timer, u64 usec);
-static void sleep_poll(enum arch_timer timer, uint64_t usec);
-static void sleep_sched_poll(enum arch_timer timer, uint64_t usec);
-static void sleep_in_userspace(enum arch_timer timer, uint64_t usec);
-static void sleep_migrate(enum arch_timer timer, uint64_t usec);
+static void sleep_poll(enum arch_timer timer, u64 usec);
+static void sleep_sched_poll(enum arch_timer timer, u64 usec);
+static void sleep_in_userspace(enum arch_timer timer, u64 usec);
+static void sleep_migrate(enum arch_timer timer, u64 usec);
sleep_method_t sleep_method[] = {
sleep_poll,
@@ -115,14 +115,14 @@ enum timer_view {
TIMER_TVAL,
};
-static void assert_irqs_handled(uint32_t n)
+static void assert_irqs_handled(u32 n)
{
int h = atomic_read(&shared_data.handled);
__GUEST_ASSERT(h == n, "Handled %d IRQS but expected %d", h, n);
}
-static void userspace_cmd(uint64_t cmd)
+static void userspace_cmd(u64 cmd)
{
GUEST_SYNC_ARGS(cmd, 0, 0, 0, 0);
}
@@ -132,12 +132,12 @@ static void userspace_migrate_vcpu(void)
userspace_cmd(USERSPACE_MIGRATE_SELF);
}
-static void userspace_sleep(uint64_t usecs)
+static void userspace_sleep(u64 usecs)
{
GUEST_SYNC_ARGS(USERSPACE_USLEEP, usecs, 0, 0, 0);
}
-static void set_counter(enum arch_timer timer, uint64_t counter)
+static void set_counter(enum arch_timer timer, u64 counter)
{
GUEST_SYNC_ARGS(SET_COUNTER_VALUE, counter, timer, 0, 0);
}
@@ -146,8 +146,8 @@ static void guest_irq_handler(struct ex_regs *regs)
{
unsigned int intid = gic_get_and_ack_irq();
enum arch_timer timer;
- uint64_t cnt, cval;
- uint32_t ctl;
+ u64 cnt, cval;
+ u32 ctl;
bool timer_condition, istatus;
if (intid == IAR_SPURIOUS) {
@@ -178,8 +178,8 @@ out:
gic_set_eoi(intid);
}
-static void set_cval_irq(enum arch_timer timer, uint64_t cval_cycles,
- uint32_t ctl)
+static void set_cval_irq(enum arch_timer timer, u64 cval_cycles,
+ u32 ctl)
{
atomic_set(&shared_data.handled, 0);
atomic_set(&shared_data.spurious, 0);
@@ -187,8 +187,8 @@ static void set_cval_irq(enum arch_timer timer, uint64_t cval_cycles,
timer_set_ctl(timer, ctl);
}
-static void set_tval_irq(enum arch_timer timer, uint64_t tval_cycles,
- uint32_t ctl)
+static void set_tval_irq(enum arch_timer timer, u64 tval_cycles,
+ u32 ctl)
{
atomic_set(&shared_data.handled, 0);
atomic_set(&shared_data.spurious, 0);
@@ -196,7 +196,7 @@ static void set_tval_irq(enum arch_timer timer, uint64_t tval_cycles,
timer_set_ctl(timer, ctl);
}
-static void set_xval_irq(enum arch_timer timer, uint64_t xval, uint32_t ctl,
+static void set_xval_irq(enum arch_timer timer, u64 xval, u32 ctl,
enum timer_view tv)
{
switch (tv) {
@@ -275,13 +275,13 @@ static void wait_migrate_poll_for_irq(void)
* Sleep for usec microseconds by polling in the guest or in
* userspace (e.g. userspace_cmd=USERSPACE_SCHEDULE).
*/
-static void guest_poll(enum arch_timer test_timer, uint64_t usec,
+static void guest_poll(enum arch_timer test_timer, u64 usec,
enum sync_cmd usp_cmd)
{
- uint64_t cycles = usec_to_cycles(usec);
+ u64 cycles = usec_to_cycles(usec);
/* Whichever timer we are testing with, sleep with the other. */
enum arch_timer sleep_timer = 1 - test_timer;
- uint64_t start = timer_get_cntct(sleep_timer);
+ u64 start = timer_get_cntct(sleep_timer);
while ((timer_get_cntct(sleep_timer) - start) < cycles) {
if (usp_cmd == NO_USERSPACE_CMD)
@@ -291,22 +291,22 @@ static void guest_poll(enum arch_timer test_timer, uint64_t usec,
}
}
-static void sleep_poll(enum arch_timer timer, uint64_t usec)
+static void sleep_poll(enum arch_timer timer, u64 usec)
{
guest_poll(timer, usec, NO_USERSPACE_CMD);
}
-static void sleep_sched_poll(enum arch_timer timer, uint64_t usec)
+static void sleep_sched_poll(enum arch_timer timer, u64 usec)
{
guest_poll(timer, usec, USERSPACE_SCHED_YIELD);
}
-static void sleep_migrate(enum arch_timer timer, uint64_t usec)
+static void sleep_migrate(enum arch_timer timer, u64 usec)
{
guest_poll(timer, usec, USERSPACE_MIGRATE_SELF);
}
-static void sleep_in_userspace(enum arch_timer timer, uint64_t usec)
+static void sleep_in_userspace(enum arch_timer timer, u64 usec)
{
userspace_sleep(usec);
}
@@ -315,15 +315,15 @@ static void sleep_in_userspace(enum arch_timer timer, uint64_t usec)
* Reset the timer state to some nice values like the counter not being close
* to the edge, and the control register masked and disabled.
*/
-static void reset_timer_state(enum arch_timer timer, uint64_t cnt)
+static void reset_timer_state(enum arch_timer timer, u64 cnt)
{
set_counter(timer, cnt);
timer_set_ctl(timer, CTL_IMASK);
}
-static void test_timer_xval(enum arch_timer timer, uint64_t xval,
+static void test_timer_xval(enum arch_timer timer, u64 xval,
enum timer_view tv, irq_wait_method_t wm, bool reset_state,
- uint64_t reset_cnt)
+ u64 reset_cnt)
{
local_irq_disable();
@@ -348,23 +348,23 @@ static void test_timer_xval(enum arch_timer timer, uint64_t xval,
* the "runner", like: tools/testing/selftests/kselftest/runner.sh.
*/
-static void test_timer_cval(enum arch_timer timer, uint64_t cval,
+static void test_timer_cval(enum arch_timer timer, u64 cval,
irq_wait_method_t wm, bool reset_state,
- uint64_t reset_cnt)
+ u64 reset_cnt)
{
test_timer_xval(timer, cval, TIMER_CVAL, wm, reset_state, reset_cnt);
}
-static void test_timer_tval(enum arch_timer timer, int32_t tval,
+static void test_timer_tval(enum arch_timer timer, s32 tval,
irq_wait_method_t wm, bool reset_state,
- uint64_t reset_cnt)
+ u64 reset_cnt)
{
- test_timer_xval(timer, (uint64_t) tval, TIMER_TVAL, wm, reset_state,
+ test_timer_xval(timer, (u64)tval, TIMER_TVAL, wm, reset_state,
reset_cnt);
}
-static void test_xval_check_no_irq(enum arch_timer timer, uint64_t xval,
- uint64_t usec, enum timer_view timer_view,
+static void test_xval_check_no_irq(enum arch_timer timer, u64 xval,
+ u64 usec, enum timer_view timer_view,
sleep_method_t guest_sleep)
{
local_irq_disable();
@@ -379,17 +379,17 @@ static void test_xval_check_no_irq(enum arch_timer timer, uint64_t xval,
assert_irqs_handled(0);
}
-static void test_cval_no_irq(enum arch_timer timer, uint64_t cval,
- uint64_t usec, sleep_method_t wm)
+static void test_cval_no_irq(enum arch_timer timer, u64 cval,
+ u64 usec, sleep_method_t wm)
{
test_xval_check_no_irq(timer, cval, usec, TIMER_CVAL, wm);
}
-static void test_tval_no_irq(enum arch_timer timer, int32_t tval, uint64_t usec,
+static void test_tval_no_irq(enum arch_timer timer, s32 tval, u64 usec,
sleep_method_t wm)
{
- /* tval will be cast to an int32_t in test_xval_check_no_irq */
- test_xval_check_no_irq(timer, (uint64_t) tval, usec, TIMER_TVAL, wm);
+ /* tval will be cast to an s32 in test_xval_check_no_irq */
+ test_xval_check_no_irq(timer, (u64)tval, usec, TIMER_TVAL, wm);
}
/* Test masking/unmasking a timer using the timer mask (not the IRQ mask). */
@@ -463,7 +463,7 @@ static void test_timers_fired_multiple_times(enum arch_timer timer)
* timeout for the wait: we use the wfi instruction.
*/
static void test_reprogramming_timer(enum arch_timer timer, irq_wait_method_t wm,
- int32_t delta_1_ms, int32_t delta_2_ms)
+ s32 delta_1_ms, s32 delta_2_ms)
{
local_irq_disable();
reset_timer_state(timer, DEF_CNT);
@@ -488,7 +488,7 @@ static void test_reprogramming_timer(enum arch_timer timer, irq_wait_method_t wm
static void test_reprogram_timers(enum arch_timer timer)
{
int i;
- uint64_t base_wait = test_args.wait_ms;
+ u64 base_wait = test_args.wait_ms;
for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
/*
@@ -504,8 +504,8 @@ static void test_reprogram_timers(enum arch_timer timer)
static void test_basic_functionality(enum arch_timer timer)
{
- int32_t tval = (int32_t) msec_to_cycles(test_args.wait_ms);
- uint64_t cval = DEF_CNT + msec_to_cycles(test_args.wait_ms);
+ s32 tval = (s32)msec_to_cycles(test_args.wait_ms);
+ u64 cval = DEF_CNT + msec_to_cycles(test_args.wait_ms);
int i;
for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
@@ -593,7 +593,7 @@ static void test_set_cnt_after_tval_max(enum arch_timer timer, irq_wait_method_t
reset_timer_state(timer, DEF_CNT);
set_cval_irq(timer,
- (uint64_t) TVAL_MAX +
+ (u64)TVAL_MAX +
msec_to_cycles(test_args.wait_ms) / 2, CTL_ENABLE);
set_counter(timer, TVAL_MAX);
@@ -608,7 +608,7 @@ static void test_set_cnt_after_tval_max(enum arch_timer timer, irq_wait_method_t
/* Test timers set for: cval = now + TVAL_MAX + wait_ms / 2 */
static void test_timers_above_tval_max(enum arch_timer timer)
{
- uint64_t cval;
+ u64 cval;
int i;
/*
@@ -638,8 +638,8 @@ static void test_timers_above_tval_max(enum arch_timer timer)
* sets the counter to cnt_1, the [c|t]val, the counter to cnt_2, and
* then waits for an IRQ.
*/
-static void test_set_cnt_after_xval(enum arch_timer timer, uint64_t cnt_1,
- uint64_t xval, uint64_t cnt_2,
+static void test_set_cnt_after_xval(enum arch_timer timer, u64 cnt_1,
+ u64 xval, u64 cnt_2,
irq_wait_method_t wm, enum timer_view tv)
{
local_irq_disable();
@@ -662,8 +662,8 @@ static void test_set_cnt_after_xval(enum arch_timer timer, uint64_t cnt_1,
* then waits for an IRQ.
*/
static void test_set_cnt_after_xval_no_irq(enum arch_timer timer,
- uint64_t cnt_1, uint64_t xval,
- uint64_t cnt_2,
+ u64 cnt_1, u64 xval,
+ u64 cnt_2,
sleep_method_t guest_sleep,
enum timer_view tv)
{
@@ -684,31 +684,31 @@ static void test_set_cnt_after_xval_no_irq(enum arch_timer timer,
timer_set_ctl(timer, CTL_IMASK);
}
-static void test_set_cnt_after_tval(enum arch_timer timer, uint64_t cnt_1,
- int32_t tval, uint64_t cnt_2,
+static void test_set_cnt_after_tval(enum arch_timer timer, u64 cnt_1,
+ s32 tval, u64 cnt_2,
irq_wait_method_t wm)
{
test_set_cnt_after_xval(timer, cnt_1, tval, cnt_2, wm, TIMER_TVAL);
}
-static void test_set_cnt_after_cval(enum arch_timer timer, uint64_t cnt_1,
- uint64_t cval, uint64_t cnt_2,
+static void test_set_cnt_after_cval(enum arch_timer timer, u64 cnt_1,
+ u64 cval, u64 cnt_2,
irq_wait_method_t wm)
{
test_set_cnt_after_xval(timer, cnt_1, cval, cnt_2, wm, TIMER_CVAL);
}
static void test_set_cnt_after_tval_no_irq(enum arch_timer timer,
- uint64_t cnt_1, int32_t tval,
- uint64_t cnt_2, sleep_method_t wm)
+ u64 cnt_1, s32 tval,
+ u64 cnt_2, sleep_method_t wm)
{
test_set_cnt_after_xval_no_irq(timer, cnt_1, tval, cnt_2, wm,
TIMER_TVAL);
}
static void test_set_cnt_after_cval_no_irq(enum arch_timer timer,
- uint64_t cnt_1, uint64_t cval,
- uint64_t cnt_2, sleep_method_t wm)
+ u64 cnt_1, u64 cval,
+ u64 cnt_2, sleep_method_t wm)
{
test_set_cnt_after_xval_no_irq(timer, cnt_1, cval, cnt_2, wm,
TIMER_CVAL);
@@ -718,7 +718,7 @@ static void test_set_cnt_after_cval_no_irq(enum arch_timer timer,
static void test_move_counters_ahead_of_timers(enum arch_timer timer)
{
int i;
- int32_t tval;
+ s32 tval;
for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
irq_wait_method_t wm = irq_wait_method[i];
@@ -730,8 +730,7 @@ static void test_move_counters_ahead_of_timers(enum arch_timer timer)
test_set_cnt_after_tval(timer, 0, -1, DEF_CNT + 1, wm);
test_set_cnt_after_tval(timer, 0, -1, TVAL_MAX, wm);
tval = TVAL_MAX;
- test_set_cnt_after_tval(timer, 0, tval, (uint64_t) tval + 1,
- wm);
+ test_set_cnt_after_tval(timer, 0, tval, (u64)tval + 1, wm);
}
}
@@ -754,8 +753,8 @@ static void test_move_counters_behind_timers(enum arch_timer timer)
static void test_timers_in_the_past(enum arch_timer timer)
{
- int32_t tval = -1 * (int32_t) msec_to_cycles(test_args.wait_ms);
- uint64_t cval;
+ s32 tval = -1 * (s32)msec_to_cycles(test_args.wait_ms);
+ u64 cval;
int i;
for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
@@ -790,8 +789,8 @@ static void test_timers_in_the_past(enum arch_timer timer)
static void test_long_timer_delays(enum arch_timer timer)
{
- int32_t tval = (int32_t) msec_to_cycles(test_args.long_wait_ms);
- uint64_t cval = DEF_CNT + msec_to_cycles(test_args.long_wait_ms);
+ s32 tval = (s32)msec_to_cycles(test_args.long_wait_ms);
+ u64 cval = DEF_CNT + msec_to_cycles(test_args.long_wait_ms);
int i;
for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
@@ -846,11 +845,11 @@ static void guest_code(enum arch_timer timer)
static cpu_set_t default_cpuset;
-static uint32_t next_pcpu(void)
+static u32 next_pcpu(void)
{
- uint32_t max = get_nprocs();
- uint32_t cur = sched_getcpu();
- uint32_t next = cur;
+ u32 max = get_nprocs();
+ u32 cur = sched_getcpu();
+ u32 next = cur;
cpu_set_t cpuset = default_cpuset;
TEST_ASSERT(max > 1, "Need at least two physical cpus");
@@ -862,7 +861,7 @@ static uint32_t next_pcpu(void)
return next;
}
-static void kvm_set_cntxct(struct kvm_vcpu *vcpu, uint64_t cnt,
+static void kvm_set_cntxct(struct kvm_vcpu *vcpu, u64 cnt,
enum arch_timer timer)
{
if (timer == PHYSICAL)
@@ -874,7 +873,7 @@ static void kvm_set_cntxct(struct kvm_vcpu *vcpu, uint64_t cnt,
static void handle_sync(struct kvm_vcpu *vcpu, struct ucall *uc)
{
enum sync_cmd cmd = uc->args[1];
- uint64_t val = uc->args[2];
+ u64 val = uc->args[2];
enum arch_timer timer = uc->args[3];
switch (cmd) {
@@ -1018,8 +1017,8 @@ static bool parse_args(int argc, char *argv[])
static void set_counter_defaults(void)
{
- const uint64_t MIN_ROLLOVER_SECS = 40ULL * 365 * 24 * 3600;
- uint64_t freq = read_sysreg(CNTFRQ_EL0);
+ const u64 MIN_ROLLOVER_SECS = 40ULL * 365 * 24 * 3600;
+ u64 freq = read_sysreg(CNTFRQ_EL0);
int width = ilog2(MIN_ROLLOVER_SECS * freq);
width = clamp(width, 56, 64);
diff --git a/tools/testing/selftests/kvm/arm64/debug-exceptions.c b/tools/testing/selftests/kvm/arm64/debug-exceptions.c
index 1d431de8729c..3eb4b1b6682d 100644
--- a/tools/testing/selftests/kvm/arm64/debug-exceptions.c
+++ b/tools/testing/selftests/kvm/arm64/debug-exceptions.c
@@ -31,14 +31,14 @@
extern unsigned char sw_bp, sw_bp2, hw_bp, hw_bp2, bp_svc, bp_brk, hw_wp, ss_start, hw_bp_ctx;
extern unsigned char iter_ss_begin, iter_ss_end;
-static volatile uint64_t sw_bp_addr, hw_bp_addr;
-static volatile uint64_t wp_addr, wp_data_addr;
-static volatile uint64_t svc_addr;
-static volatile uint64_t ss_addr[4], ss_idx;
-#define PC(v) ((uint64_t)&(v))
+static volatile u64 sw_bp_addr, hw_bp_addr;
+static volatile u64 wp_addr, wp_data_addr;
+static volatile u64 svc_addr;
+static volatile u64 ss_addr[4], ss_idx;
+#define PC(v) ((u64)&(v))
#define GEN_DEBUG_WRITE_REG(reg_name) \
-static void write_##reg_name(int num, uint64_t val) \
+static void write_##reg_name(int num, u64 val) \
{ \
switch (num) { \
case 0: \
@@ -102,8 +102,8 @@ GEN_DEBUG_WRITE_REG(dbgwvr)
static void reset_debug_state(void)
{
- uint8_t brps, wrps, i;
- uint64_t dfr0;
+ u8 brps, wrps, i;
+ u64 dfr0;
asm volatile("msr daifset, #8");
@@ -140,7 +140,7 @@ static void enable_os_lock(void)
static void enable_monitor_debug_exceptions(void)
{
- uint64_t mdscr;
+ u64 mdscr;
asm volatile("msr daifclr, #8");
@@ -149,9 +149,9 @@ static void enable_monitor_debug_exceptions(void)
isb();
}
-static void install_wp(uint8_t wpn, uint64_t addr)
+static void install_wp(u8 wpn, u64 addr)
{
- uint32_t wcr;
+ u32 wcr;
wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E;
write_dbgwcr(wpn, wcr);
@@ -162,9 +162,9 @@ static void install_wp(uint8_t wpn, uint64_t addr)
enable_monitor_debug_exceptions();
}
-static void install_hw_bp(uint8_t bpn, uint64_t addr)
+static void install_hw_bp(u8 bpn, u64 addr)
{
- uint32_t bcr;
+ u32 bcr;
bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E;
write_dbgbcr(bpn, bcr);
@@ -174,11 +174,10 @@ static void install_hw_bp(uint8_t bpn, uint64_t addr)
enable_monitor_debug_exceptions();
}
-static void install_wp_ctx(uint8_t addr_wp, uint8_t ctx_bp, uint64_t addr,
- uint64_t ctx)
+static void install_wp_ctx(u8 addr_wp, u8 ctx_bp, u64 addr, u64 ctx)
{
- uint32_t wcr;
- uint64_t ctx_bcr;
+ u32 wcr;
+ u64 ctx_bcr;
/* Setup a context-aware breakpoint for Linked Context ID Match */
ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
@@ -188,7 +187,7 @@ static void install_wp_ctx(uint8_t addr_wp, uint8_t ctx_bp, uint64_t addr,
/* Setup a linked watchpoint (linked to the context-aware breakpoint) */
wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E |
- DBGWCR_WT_LINK | ((uint32_t)ctx_bp << DBGWCR_LBN_SHIFT);
+ DBGWCR_WT_LINK | ((u32)ctx_bp << DBGWCR_LBN_SHIFT);
write_dbgwcr(addr_wp, wcr);
write_dbgwvr(addr_wp, addr);
isb();
@@ -196,10 +195,9 @@ static void install_wp_ctx(uint8_t addr_wp, uint8_t ctx_bp, uint64_t addr,
enable_monitor_debug_exceptions();
}
-void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr,
- uint64_t ctx)
+void install_hw_bp_ctx(u8 addr_bp, u8 ctx_bp, u64 addr, u64 ctx)
{
- uint32_t addr_bcr, ctx_bcr;
+ u32 addr_bcr, ctx_bcr;
/* Setup a context-aware breakpoint for Linked Context ID Match */
ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
@@ -213,7 +211,7 @@ void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr,
*/
addr_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
DBGBCR_BT_ADDR_LINK_CTX |
- ((uint32_t)ctx_bp << DBGBCR_LBN_SHIFT);
+ ((u32)ctx_bp << DBGBCR_LBN_SHIFT);
write_dbgbcr(addr_bp, addr_bcr);
write_dbgbvr(addr_bp, addr);
isb();
@@ -223,7 +221,7 @@ void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr,
static void install_ss(void)
{
- uint64_t mdscr;
+ u64 mdscr;
asm volatile("msr daifclr, #8");
@@ -234,9 +232,9 @@ static void install_ss(void)
static volatile char write_data;
-static void guest_code(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
+static void guest_code(u8 bpn, u8 wpn, u8 ctx_bpn)
{
- uint64_t ctx = 0xabcdef; /* a random context number */
+ u64 ctx = 0xabcdef; /* a random context number */
/* Software-breakpoint */
reset_debug_state();
@@ -377,8 +375,8 @@ static void guest_svc_handler(struct ex_regs *regs)
static void guest_code_ss(int test_cnt)
{
- uint64_t i;
- uint64_t bvr, wvr, w_bvr, w_wvr;
+ u64 i;
+ u64 bvr, wvr, w_bvr, w_wvr;
for (i = 0; i < test_cnt; i++) {
/* Bits [1:0] of dbg{b,w}vr are RES0 */
@@ -416,12 +414,12 @@ static void guest_code_ss(int test_cnt)
GUEST_DONE();
}
-static int debug_version(uint64_t id_aa64dfr0)
+static int debug_version(u64 id_aa64dfr0)
{
return FIELD_GET(ID_AA64DFR0_EL1_DebugVer, id_aa64dfr0);
}
-static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
+static void test_guest_debug_exceptions(u8 bpn, u8 wpn, u8 ctx_bpn)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
@@ -468,8 +466,8 @@ void test_single_step_from_userspace(int test_cnt)
struct kvm_vm *vm;
struct ucall uc;
struct kvm_run *run;
- uint64_t pc, cmd;
- uint64_t test_pc = 0;
+ u64 pc, cmd;
+ u64 test_pc = 0;
bool ss_enable = false;
struct kvm_guest_debug debug = {};
@@ -506,7 +504,7 @@ void test_single_step_from_userspace(int test_cnt)
"Unexpected pc 0x%lx (expected 0x%lx)",
pc, test_pc);
- if ((pc + 4) == (uint64_t)&iter_ss_end) {
+ if ((pc + 4) == (u64)&iter_ss_end) {
test_pc = 0;
debug.control = KVM_GUESTDBG_ENABLE;
ss_enable = false;
@@ -519,8 +517,8 @@ void test_single_step_from_userspace(int test_cnt)
* iter_ss_end, the pc for the next KVM_EXIT_DEBUG should
* be the current pc + 4.
*/
- if ((pc >= (uint64_t)&iter_ss_begin) &&
- (pc < (uint64_t)&iter_ss_end))
+ if ((pc >= (u64)&iter_ss_begin) &&
+ (pc < (u64)&iter_ss_end))
test_pc = pc + 4;
else
test_pc = 0;
@@ -533,9 +531,9 @@ void test_single_step_from_userspace(int test_cnt)
* Run debug testing using the various breakpoint#, watchpoint# and
* context-aware breakpoint# with the given ID_AA64DFR0_EL1 configuration.
*/
-void test_guest_debug_exceptions_all(uint64_t aa64dfr0)
+void test_guest_debug_exceptions_all(u64 aa64dfr0)
{
- uint8_t brp_num, wrp_num, ctx_brp_num, normal_brp_num, ctx_brp_base;
+ u8 brp_num, wrp_num, ctx_brp_num, normal_brp_num, ctx_brp_base;
int b, w, c;
/* Number of breakpoints */
@@ -580,7 +578,7 @@ int main(int argc, char *argv[])
struct kvm_vm *vm;
int opt;
int ss_iteration = 10000;
- uint64_t aa64dfr0;
+ u64 aa64dfr0;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
aa64dfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1));
diff --git a/tools/testing/selftests/kvm/arm64/hypercalls.c b/tools/testing/selftests/kvm/arm64/hypercalls.c
index bf038a0371f4..5d96cdf382c4 100644
--- a/tools/testing/selftests/kvm/arm64/hypercalls.c
+++ b/tools/testing/selftests/kvm/arm64/hypercalls.c
@@ -29,9 +29,9 @@
#define KVM_REG_ARM_VENDOR_HYP_BMAP_2_RESET_VAL 0
struct kvm_fw_reg_info {
- uint64_t reg; /* Register definition */
- uint64_t max_feat_bit; /* Bit that represents the upper limit of the feature-map */
- uint64_t reset_val; /* Reset value for the register */
+ u64 reg; /* Register definition */
+ u64 max_feat_bit; /* Bit that represents the upper limit of the feature-map */
+ u64 reset_val; /* Reset value for the register */
};
#define FW_REG_INFO(r) \
@@ -59,8 +59,8 @@ enum test_stage {
static int stage = TEST_STAGE_REG_IFACE;
struct test_hvc_info {
- uint32_t func_id;
- uint64_t arg1;
+ u32 func_id;
+ u64 arg1;
};
#define TEST_HVC_INFO(f, a1) \
@@ -152,9 +152,9 @@ static void guest_code(void)
}
struct st_time {
- uint32_t rev;
- uint32_t attr;
- uint64_t st_time;
+ u32 rev;
+ u32 attr;
+ u64 st_time;
};
#define STEAL_TIME_SIZE ((sizeof(struct st_time) + 63) & ~63)
@@ -162,7 +162,7 @@ struct st_time {
static void steal_time_init(struct kvm_vcpu *vcpu)
{
- uint64_t st_ipa = (ulong)ST_GPA_BASE;
+ u64 st_ipa = (ulong)ST_GPA_BASE;
unsigned int gpages;
gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE);
@@ -174,13 +174,13 @@ static void steal_time_init(struct kvm_vcpu *vcpu)
static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
{
- uint64_t val;
+ u64 val;
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
- uint64_t set_val;
+ u64 set_val;
/* First 'read' should be the reset value for the reg */
val = vcpu_get_reg(vcpu, reg_info->reg);
@@ -229,7 +229,7 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
{
- uint64_t val;
+ u64 val;
unsigned int i;
int ret;
diff --git a/tools/testing/selftests/kvm/arm64/idreg-idst.c b/tools/testing/selftests/kvm/arm64/idreg-idst.c
index 9ca9f125abdb..a3e84701d814 100644
--- a/tools/testing/selftests/kvm/arm64/idreg-idst.c
+++ b/tools/testing/selftests/kvm/arm64/idreg-idst.c
@@ -13,7 +13,7 @@ static volatile bool sys64, undef;
#define __check_sr_read(r) \
({ \
- uint64_t val; \
+ u64 val; \
\
sys64 = false; \
undef = false; \
@@ -101,7 +101,7 @@ int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- uint64_t mmfr2;
+ u64 mmfr2;
test_disable_default_vgic();
diff --git a/tools/testing/selftests/kvm/arm64/no-vgic.c b/tools/testing/selftests/kvm/arm64/no-vgic.c
index b14686ef17d1..25b2e3222f68 100644
--- a/tools/testing/selftests/kvm/arm64/no-vgic.c
+++ b/tools/testing/selftests/kvm/arm64/no-vgic.c
@@ -15,7 +15,7 @@ static volatile bool handled;
#define __check_sr_read(r) \
({ \
- uint64_t val; \
+ u64 val; \
\
handled = false; \
dsb(sy); \
@@ -33,7 +33,7 @@ static volatile bool handled;
#define __check_gicv5_gicr_op(r) \
({ \
- uint64_t val; \
+ u64 val; \
\
handled = false; \
dsb(sy); \
@@ -82,7 +82,7 @@ static volatile bool handled;
static void guest_code_gicv3(void)
{
- uint64_t val;
+ u64 val;
/*
* Check that we advertise that ID_AA64PFR0_EL1.GIC == 0, having
@@ -262,7 +262,7 @@ int main(int argc, char *argv[])
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
bool has_v3, has_v5;
- uint64_t pfr;
+ u64 pfr;
test_disable_default_vgic();
diff --git a/tools/testing/selftests/kvm/arm64/page_fault_test.c b/tools/testing/selftests/kvm/arm64/page_fault_test.c
index 4ccbd389d133..6bb3d82906b2 100644
--- a/tools/testing/selftests/kvm/arm64/page_fault_test.c
+++ b/tools/testing/selftests/kvm/arm64/page_fault_test.c
@@ -23,7 +23,7 @@
#define TEST_PTE_GVA 0xb0000000
#define TEST_DATA 0x0123456789ABCDEF
-static uint64_t *guest_test_memory = (uint64_t *)TEST_GVA;
+static u64 *guest_test_memory = (u64 *)TEST_GVA;
#define CMD_NONE (0)
#define CMD_SKIP_TEST (1ULL << 1)
@@ -48,7 +48,7 @@ static struct event_cnt {
struct test_desc {
const char *name;
- uint64_t mem_mark_cmd;
+ u64 mem_mark_cmd;
/* Skip the test if any prepare function returns false */
bool (*guest_prepare[PREPARE_FN_NR])(void);
void (*guest_test)(void);
@@ -59,8 +59,8 @@ struct test_desc {
void (*iabt_handler)(struct ex_regs *regs);
void (*mmio_handler)(struct kvm_vm *vm, struct kvm_run *run);
void (*fail_vcpu_run_handler)(int ret);
- uint32_t pt_memslot_flags;
- uint32_t data_memslot_flags;
+ u32 pt_memslot_flags;
+ u32 data_memslot_flags;
bool skip;
struct event_cnt expected_events;
};
@@ -70,9 +70,9 @@ struct test_params {
struct test_desc *test_desc;
};
-static inline void flush_tlb_page(uint64_t vaddr)
+static inline void flush_tlb_page(gva_t gva)
{
- uint64_t page = vaddr >> 12;
+ gva_t page = gva >> 12;
dsb(ishst);
asm volatile("tlbi vaae1is, %0" :: "r" (page));
@@ -82,7 +82,7 @@ static inline void flush_tlb_page(uint64_t vaddr)
static void guest_write64(void)
{
- uint64_t val;
+ u64 val;
WRITE_ONCE(*guest_test_memory, TEST_DATA);
val = READ_ONCE(*guest_test_memory);
@@ -92,8 +92,8 @@ static void guest_write64(void)
/* Check the system for atomic instructions. */
static bool guest_check_lse(void)
{
- uint64_t isar0 = read_sysreg(id_aa64isar0_el1);
- uint64_t atomic;
+ u64 isar0 = read_sysreg(id_aa64isar0_el1);
+ u64 atomic;
atomic = FIELD_GET(ID_AA64ISAR0_EL1_ATOMIC, isar0);
return atomic >= 2;
@@ -101,8 +101,8 @@ static bool guest_check_lse(void)
static bool guest_check_dc_zva(void)
{
- uint64_t dczid = read_sysreg(dczid_el0);
- uint64_t dzp = FIELD_GET(DCZID_EL0_DZP, dczid);
+ u64 dczid = read_sysreg(dczid_el0);
+ u64 dzp = FIELD_GET(DCZID_EL0_DZP, dczid);
return dzp == 0;
}
@@ -110,7 +110,7 @@ static bool guest_check_dc_zva(void)
/* Compare and swap instruction. */
static void guest_cas(void)
{
- uint64_t val;
+ u64 val;
GUEST_ASSERT(guest_check_lse());
asm volatile(".arch_extension lse\n"
@@ -122,7 +122,7 @@ static void guest_cas(void)
static void guest_read64(void)
{
- uint64_t val;
+ u64 val;
val = READ_ONCE(*guest_test_memory);
GUEST_ASSERT_EQ(val, 0);
@@ -131,7 +131,7 @@ static void guest_read64(void)
/* Address translation instruction */
static void guest_at(void)
{
- uint64_t par;
+ u64 par;
asm volatile("at s1e1r, %0" :: "r" (guest_test_memory));
isb();
@@ -148,7 +148,7 @@ static void guest_at(void)
*/
static void guest_dc_zva(void)
{
- uint16_t val;
+ u16 val;
asm volatile("dc zva, %0" :: "r" (guest_test_memory));
dsb(ish);
@@ -164,8 +164,8 @@ static void guest_dc_zva(void)
*/
static void guest_ld_preidx(void)
{
- uint64_t val;
- uint64_t addr = TEST_GVA - 8;
+ u64 val;
+ u64 addr = TEST_GVA - 8;
/*
* This ends up accessing "TEST_GVA + 8 - 8", where "TEST_GVA - 8" is
@@ -179,8 +179,8 @@ static void guest_ld_preidx(void)
static void guest_st_preidx(void)
{
- uint64_t val = TEST_DATA;
- uint64_t addr = TEST_GVA - 8;
+ u64 val = TEST_DATA;
+ u64 addr = TEST_GVA - 8;
asm volatile("str %0, [%1, #8]!"
: "+r" (val), "+r" (addr));
@@ -191,8 +191,8 @@ static void guest_st_preidx(void)
static bool guest_set_ha(void)
{
- uint64_t mmfr1 = read_sysreg(id_aa64mmfr1_el1);
- uint64_t hadbs, tcr;
+ u64 mmfr1 = read_sysreg(id_aa64mmfr1_el1);
+ u64 hadbs, tcr;
/* Skip if HA is not supported. */
hadbs = FIELD_GET(ID_AA64MMFR1_EL1_HAFDBS, mmfr1);
@@ -208,7 +208,7 @@ static bool guest_set_ha(void)
static bool guest_clear_pte_af(void)
{
- *((uint64_t *)TEST_PTE_GVA) &= ~PTE_AF;
+ *((u64 *)TEST_PTE_GVA) &= ~PTE_AF;
flush_tlb_page(TEST_GVA);
return true;
@@ -217,7 +217,7 @@ static bool guest_clear_pte_af(void)
static void guest_check_pte_af(void)
{
dsb(ish);
- GUEST_ASSERT_EQ(*((uint64_t *)TEST_PTE_GVA) & PTE_AF, PTE_AF);
+ GUEST_ASSERT_EQ(*((u64 *)TEST_PTE_GVA) & PTE_AF, PTE_AF);
}
static void guest_check_write_in_dirty_log(void)
@@ -302,26 +302,26 @@ static void no_iabt_handler(struct ex_regs *regs)
static struct uffd_args {
char *copy;
void *hva;
- uint64_t paging_size;
+ u64 paging_size;
} pt_args, data_args;
/* Returns true to continue the test, and false if it should be skipped. */
static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg,
struct uffd_args *args)
{
- uint64_t addr = msg->arg.pagefault.address;
- uint64_t flags = msg->arg.pagefault.flags;
+ u64 addr = msg->arg.pagefault.address;
+ u64 flags = msg->arg.pagefault.flags;
struct uffdio_copy copy;
int ret;
TEST_ASSERT(uffd_mode == UFFDIO_REGISTER_MODE_MISSING,
"The only expected UFFD mode is MISSING");
- TEST_ASSERT_EQ(addr, (uint64_t)args->hva);
+ TEST_ASSERT_EQ(addr, (u64)args->hva);
pr_debug("uffd fault: addr=%p write=%d\n",
(void *)addr, !!(flags & UFFD_PAGEFAULT_FLAG_WRITE));
- copy.src = (uint64_t)args->copy;
+ copy.src = (u64)args->copy;
copy.dst = addr;
copy.len = args->paging_size;
copy.mode = 0;
@@ -407,7 +407,7 @@ static bool punch_hole_in_backing_store(struct kvm_vm *vm,
struct userspace_mem_region *region)
{
void *hva = (void *)region->region.userspace_addr;
- uint64_t paging_size = region->region.memory_size;
+ u64 paging_size = region->region.memory_size;
int ret, fd = region->fd;
if (fd != -1) {
@@ -438,7 +438,7 @@ static void mmio_on_test_gpa_handler(struct kvm_vm *vm, struct kvm_run *run)
static void mmio_no_handler(struct kvm_vm *vm, struct kvm_run *run)
{
- uint64_t data;
+ u64 data;
memcpy(&data, run->mmio.data, sizeof(data));
pr_debug("addr=%lld len=%d w=%d data=%lx\n",
@@ -449,11 +449,11 @@ static void mmio_no_handler(struct kvm_vm *vm, struct kvm_run *run)
static bool check_write_in_dirty_log(struct kvm_vm *vm,
struct userspace_mem_region *region,
- uint64_t host_pg_nr)
+ u64 host_pg_nr)
{
unsigned long *bmap;
bool first_page_dirty;
- uint64_t size = region->region.memory_size;
+ u64 size = region->region.memory_size;
/* getpage_size() is not always equal to vm->page_size */
bmap = bitmap_zalloc(size / getpagesize());
@@ -468,7 +468,7 @@ static bool handle_cmd(struct kvm_vm *vm, int cmd)
{
struct userspace_mem_region *data_region, *pt_region;
bool continue_test = true;
- uint64_t pte_gpa, pte_pg;
+ u64 pte_gpa, pte_pg;
data_region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
pt_region = vm_get_mem_region(vm, MEM_REGION_PT);
@@ -510,7 +510,7 @@ void fail_vcpu_run_mmio_no_syndrome_handler(int ret)
events.fail_vcpu_runs += 1;
}
-typedef uint32_t aarch64_insn_t;
+typedef u32 aarch64_insn_t;
extern aarch64_insn_t __exec_test[2];
noinline void __return_0x77(void)
@@ -525,7 +525,7 @@ noinline void __return_0x77(void)
*/
static void load_exec_code_for_test(struct kvm_vm *vm)
{
- uint64_t *code;
+ u64 *code;
struct userspace_mem_region *region;
void *hva;
@@ -552,7 +552,7 @@ static void setup_abort_handlers(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
static void setup_gva_maps(struct kvm_vm *vm)
{
struct userspace_mem_region *region;
- uint64_t pte_gpa;
+ u64 pte_gpa;
region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
/* Map TEST_GVA first. This will install a new PTE. */
@@ -574,12 +574,12 @@ enum pf_test_memslots {
*/
static void setup_memslots(struct kvm_vm *vm, struct test_params *p)
{
- uint64_t backing_src_pagesz = get_backing_src_pagesz(p->src_type);
- uint64_t guest_page_size = vm->page_size;
- uint64_t max_gfn = vm_compute_max_gfn(vm);
+ u64 backing_src_pagesz = get_backing_src_pagesz(p->src_type);
+ u64 guest_page_size = vm->page_size;
+ u64 max_gfn = vm_compute_max_gfn(vm);
/* Enough for 2M of code when using 4K guest pages. */
- uint64_t code_npages = 512;
- uint64_t pt_size, data_size, data_gpa;
+ u64 code_npages = 512;
+ u64 pt_size, data_size, data_gpa;
/*
* This test requires 1 pgd, 2 pud, 4 pmd, and 6 pte pages when using
diff --git a/tools/testing/selftests/kvm/arm64/psci_test.c b/tools/testing/selftests/kvm/arm64/psci_test.c
index 98e49f710aef..e775faf20868 100644
--- a/tools/testing/selftests/kvm/arm64/psci_test.c
+++ b/tools/testing/selftests/kvm/arm64/psci_test.c
@@ -22,8 +22,7 @@
#define CPU_ON_ENTRY_ADDR 0xfeedf00dul
#define CPU_ON_CONTEXT_ID 0xdeadc0deul
-static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr,
- uint64_t context_id)
+static u64 psci_cpu_on(u64 target_cpu, u64 entry_addr, u64 context_id)
{
struct arm_smccc_res res;
@@ -33,8 +32,7 @@ static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr,
return res.a0;
}
-static uint64_t psci_affinity_info(uint64_t target_affinity,
- uint64_t lowest_affinity_level)
+static u64 psci_affinity_info(u64 target_affinity, u64 lowest_affinity_level)
{
struct arm_smccc_res res;
@@ -44,7 +42,7 @@ static uint64_t psci_affinity_info(uint64_t target_affinity,
return res.a0;
}
-static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id)
+static u64 psci_system_suspend(u64 entry_addr, u64 context_id)
{
struct arm_smccc_res res;
@@ -54,7 +52,7 @@ static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id)
return res.a0;
}
-static uint64_t psci_system_off2(uint64_t type, uint64_t cookie)
+static u64 psci_system_off2(u64 type, u64 cookie)
{
struct arm_smccc_res res;
@@ -63,7 +61,7 @@ static uint64_t psci_system_off2(uint64_t type, uint64_t cookie)
return res.a0;
}
-static uint64_t psci_features(uint32_t func_id)
+static u64 psci_features(u32 func_id)
{
struct arm_smccc_res res;
@@ -110,7 +108,7 @@ static void enter_guest(struct kvm_vcpu *vcpu)
static void assert_vcpu_reset(struct kvm_vcpu *vcpu)
{
- uint64_t obs_pc, obs_x0;
+ u64 obs_pc, obs_x0;
obs_pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc));
obs_x0 = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.regs[0]));
@@ -123,9 +121,9 @@ static void assert_vcpu_reset(struct kvm_vcpu *vcpu)
obs_x0, CPU_ON_CONTEXT_ID);
}
-static void guest_test_cpu_on(uint64_t target_cpu)
+static void guest_test_cpu_on(u64 target_cpu)
{
- uint64_t target_state;
+ u64 target_state;
GUEST_ASSERT(!psci_cpu_on(target_cpu, CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID));
@@ -142,7 +140,7 @@ static void guest_test_cpu_on(uint64_t target_cpu)
static void host_test_cpu_on(void)
{
struct kvm_vcpu *source, *target;
- uint64_t target_mpidr;
+ u64 target_mpidr;
struct kvm_vm *vm;
struct ucall uc;
@@ -166,7 +164,7 @@ static void host_test_cpu_on(void)
static void guest_test_system_suspend(void)
{
- uint64_t ret;
+ u64 ret;
/* assert that SYSTEM_SUSPEND is discoverable */
GUEST_ASSERT(!psci_features(PSCI_1_0_FN_SYSTEM_SUSPEND));
@@ -200,7 +198,7 @@ static void host_test_system_suspend(void)
static void guest_test_system_off2(void)
{
- uint64_t ret;
+ u64 ret;
/* assert that SYSTEM_OFF2 is discoverable */
GUEST_ASSERT(psci_features(PSCI_1_3_FN_SYSTEM_OFF2) &
@@ -238,7 +236,7 @@ static void host_test_system_off2(void)
{
struct kvm_vcpu *source, *target;
struct kvm_mp_state mps;
- uint64_t psci_version = 0;
+ u64 psci_version = 0;
int nr_shutdowns = 0;
struct kvm_run *run;
struct ucall uc;
diff --git a/tools/testing/selftests/kvm/arm64/sea_to_user.c b/tools/testing/selftests/kvm/arm64/sea_to_user.c
index 573dd790aeb8..e96d8982c28b 100644
--- a/tools/testing/selftests/kvm/arm64/sea_to_user.c
+++ b/tools/testing/selftests/kvm/arm64/sea_to_user.c
@@ -51,18 +51,16 @@
#define EINJ_OFFSET 0x01234badUL
#define EINJ_GVA ((START_GVA) + (EINJ_OFFSET))
-static vm_paddr_t einj_gpa;
+static gpa_t einj_gpa;
static void *einj_hva;
-static uint64_t einj_hpa;
+static u64 einj_hpa;
static bool far_invalid;
-static uint64_t translate_to_host_paddr(unsigned long vaddr)
+static u64 translate_hva_to_hpa(unsigned long hva)
{
- uint64_t pinfo;
- int64_t offset = vaddr / getpagesize() * sizeof(pinfo);
+ u64 pinfo;
+ s64 offset = hva / getpagesize() * sizeof(pinfo);
int fd;
- uint64_t page_addr;
- uint64_t paddr;
fd = open("/proc/self/pagemap", O_RDONLY);
if (fd < 0)
@@ -77,12 +75,11 @@ static uint64_t translate_to_host_paddr(unsigned long vaddr)
if ((pinfo & PAGE_PRESENT) == 0)
ksft_exit_fail_perror("Page not present");
- page_addr = (pinfo & PAGE_PHYSICAL) << MIN_PAGE_SHIFT;
- paddr = page_addr + (vaddr & (getpagesize() - 1));
- return paddr;
+ return ((pinfo & PAGE_PHYSICAL) << MIN_PAGE_SHIFT) +
+ (hva & (getpagesize() - 1));
}
-static void write_einj_entry(const char *einj_path, uint64_t val)
+static void write_einj_entry(const char *einj_path, u64 val)
{
char cmd[256] = {0};
FILE *cmdfile = NULL;
@@ -96,7 +93,7 @@ static void write_einj_entry(const char *einj_path, uint64_t val)
ksft_exit_fail_perror("Failed to write EINJ entry");
}
-static void inject_uer(uint64_t paddr)
+static void inject_uer(u64 hpa)
{
if (access("/sys/firmware/acpi/tables/EINJ", R_OK) == -1)
ksft_test_result_skip("EINJ table no available in firmware");
@@ -106,7 +103,7 @@ static void inject_uer(uint64_t paddr)
write_einj_entry(EINJ_ETYPE, ERROR_TYPE_MEMORY_UER);
write_einj_entry(EINJ_FLAGS, MASK_MEMORY_UER);
- write_einj_entry(EINJ_ADDR, paddr);
+ write_einj_entry(EINJ_ADDR, hpa);
write_einj_entry(EINJ_MASK, ~0x0UL);
write_einj_entry(EINJ_NOTRIGGER, 1);
write_einj_entry(EINJ_DOIT, 1);
@@ -145,10 +142,10 @@ static void setup_sigbus_handler(void)
static void guest_code(void)
{
- uint64_t guest_data;
+ u64 guest_data;
/* Consumes error will cause a SEA. */
- guest_data = *(uint64_t *)EINJ_GVA;
+ guest_data = *(u64 *)EINJ_GVA;
GUEST_FAIL("Poison not protected by SEA: gva=%#lx, guest_data=%#lx\n",
EINJ_GVA, guest_data);
@@ -253,8 +250,8 @@ static struct kvm_vm *vm_create_with_sea_handler(struct kvm_vcpu **vcpu)
size_t backing_page_size;
size_t guest_page_size;
size_t alignment;
- uint64_t num_guest_pages;
- vm_paddr_t start_gpa;
+ u64 num_guest_pages;
+ gpa_t start_gpa;
enum vm_mem_backing_src_type src_type = VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB;
struct kvm_vm *vm;
@@ -278,7 +275,7 @@ static struct kvm_vm *vm_create_with_sea_handler(struct kvm_vcpu **vcpu)
vm_userspace_mem_region_add(
/*vm=*/vm,
/*src_type=*/src_type,
- /*guest_paddr=*/start_gpa,
+ /*gpa=*/start_gpa,
/*slot=*/1,
/*npages=*/num_guest_pages,
/*flags=*/0);
@@ -292,18 +289,18 @@ static struct kvm_vm *vm_create_with_sea_handler(struct kvm_vcpu **vcpu)
static void vm_inject_memory_uer(struct kvm_vm *vm)
{
- uint64_t guest_data;
+ u64 guest_data;
einj_gpa = addr_gva2gpa(vm, EINJ_GVA);
einj_hva = addr_gva2hva(vm, EINJ_GVA);
/* Populate certain data before injecting UER. */
- *(uint64_t *)einj_hva = 0xBAADCAFE;
- guest_data = *(uint64_t *)einj_hva;
+ *(u64 *)einj_hva = 0xBAADCAFE;
+ guest_data = *(u64 *)einj_hva;
ksft_print_msg("Before EINJect: data=%#lx\n",
guest_data);
- einj_hpa = translate_to_host_paddr((unsigned long)einj_hva);
+ einj_hpa = translate_hva_to_hpa((unsigned long)einj_hva);
ksft_print_msg("EINJ_GVA=%#lx, einj_gpa=%#lx, einj_hva=%p, einj_hpa=%#lx\n",
EINJ_GVA, einj_gpa, einj_hva, einj_hpa);
diff --git a/tools/testing/selftests/kvm/arm64/set_id_regs.c b/tools/testing/selftests/kvm/arm64/set_id_regs.c
index 3a7e5fe9ae7a..7429a1055df5 100644
--- a/tools/testing/selftests/kvm/arm64/set_id_regs.c
+++ b/tools/testing/selftests/kvm/arm64/set_id_regs.c
@@ -30,20 +30,20 @@ struct reg_ftr_bits {
char *name;
bool sign;
enum ftr_type type;
- uint8_t shift;
- uint64_t mask;
+ u8 shift;
+ u64 mask;
/*
* For FTR_EXACT, safe_val is used as the exact safe value.
* For FTR_LOWER_SAFE, safe_val is used as the minimal safe value.
*/
- int64_t safe_val;
+ s64 safe_val;
/* Allowed to be changed by the host after run */
bool mutable;
};
struct test_feature_reg {
- uint32_t reg;
+ u32 reg;
const struct reg_ftr_bits *ftr_bits;
};
@@ -275,9 +275,9 @@ static void guest_code(void)
}
/* Return a safe value to a given ftr_bits an ftr value */
-uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
+u64 get_safe_value(const struct reg_ftr_bits *ftr_bits, u64 ftr)
{
- uint64_t ftr_max = ftr_bits->mask >> ftr_bits->shift;
+ u64 ftr_max = ftr_bits->mask >> ftr_bits->shift;
TEST_ASSERT(ftr_max > 1, "This test doesn't support single bit features");
@@ -329,16 +329,16 @@ uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
}
/* Return an invalid value to a given ftr_bits an ftr value */
-uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
+u64 get_invalid_value(const struct reg_ftr_bits *ftr_bits, u64 ftr)
{
- uint64_t ftr_max = ftr_bits->mask >> ftr_bits->shift;
+ u64 ftr_max = ftr_bits->mask >> ftr_bits->shift;
TEST_ASSERT(ftr_max > 1, "This test doesn't support single bit features");
if (ftr_bits->sign == FTR_UNSIGNED) {
switch (ftr_bits->type) {
case FTR_EXACT:
- ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
+ ftr = max((u64)ftr_bits->safe_val + 1, ftr + 1);
break;
case FTR_LOWER_SAFE:
ftr++;
@@ -358,7 +358,7 @@ uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
} else if (ftr != ftr_max) {
switch (ftr_bits->type) {
case FTR_EXACT:
- ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
+ ftr = max((u64)ftr_bits->safe_val + 1, ftr + 1);
break;
case FTR_LOWER_SAFE:
ftr++;
@@ -382,12 +382,12 @@ uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
return ftr;
}
-static uint64_t test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg,
- const struct reg_ftr_bits *ftr_bits)
+static u64 test_reg_set_success(struct kvm_vcpu *vcpu, u64 reg,
+ const struct reg_ftr_bits *ftr_bits)
{
- uint8_t shift = ftr_bits->shift;
- uint64_t mask = ftr_bits->mask;
- uint64_t val, new_val, ftr;
+ u8 shift = ftr_bits->shift;
+ u64 mask = ftr_bits->mask;
+ u64 val, new_val, ftr;
val = vcpu_get_reg(vcpu, reg);
ftr = (val & mask) >> shift;
@@ -405,12 +405,12 @@ static uint64_t test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg,
return new_val;
}
-static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg,
+static void test_reg_set_fail(struct kvm_vcpu *vcpu, u64 reg,
const struct reg_ftr_bits *ftr_bits)
{
- uint8_t shift = ftr_bits->shift;
- uint64_t mask = ftr_bits->mask;
- uint64_t val, old_val, ftr;
+ u8 shift = ftr_bits->shift;
+ u64 mask = ftr_bits->mask;
+ u64 val, old_val, ftr;
int r;
val = vcpu_get_reg(vcpu, reg);
@@ -431,7 +431,7 @@ static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg,
TEST_ASSERT_EQ(val, old_val);
}
-static uint64_t test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE];
+static u64 test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE];
#define encoding_to_range_idx(encoding) \
KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(encoding), sys_reg_Op1(encoding), \
@@ -441,7 +441,7 @@ static uint64_t test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE];
static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only)
{
- uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
+ u64 masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
struct reg_mask_range range = {
.addr = (__u64)masks,
};
@@ -458,8 +458,8 @@ static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only)
for (int i = 0; i < ARRAY_SIZE(test_regs); i++) {
const struct reg_ftr_bits *ftr_bits = test_regs[i].ftr_bits;
- uint32_t reg_id = test_regs[i].reg;
- uint64_t reg = KVM_ARM64_SYS_REG(reg_id);
+ u32 reg_id = test_regs[i].reg;
+ u64 reg = KVM_ARM64_SYS_REG(reg_id);
int idx;
/* Get the index to masks array for the idreg */
@@ -489,11 +489,11 @@ static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only)
#define MPAM_IDREG_TEST 6
static void test_user_set_mpam_reg(struct kvm_vcpu *vcpu)
{
- uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
+ u64 masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
struct reg_mask_range range = {
.addr = (__u64)masks,
};
- uint64_t val;
+ u64 val;
int idx, err;
/*
@@ -584,13 +584,13 @@ static void test_user_set_mpam_reg(struct kvm_vcpu *vcpu)
#define MTE_IDREG_TEST 1
static void test_user_set_mte_reg(struct kvm_vcpu *vcpu)
{
- uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
+ u64 masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
struct reg_mask_range range = {
.addr = (__u64)masks,
};
- uint64_t val;
- uint64_t mte;
- uint64_t mte_frac;
+ u64 val;
+ u64 mte;
+ u64 mte_frac;
int idx, err;
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
@@ -644,7 +644,7 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu)
ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac no longer 0xF\n");
}
-static uint64_t reset_mutable_bits(uint32_t id, uint64_t val)
+static u64 reset_mutable_bits(u32 id, u64 val)
{
struct test_feature_reg *reg = NULL;
@@ -674,7 +674,7 @@ static void test_guest_reg_read(struct kvm_vcpu *vcpu)
struct ucall uc;
while (!done) {
- uint64_t val;
+ u64 val;
vcpu_run(vcpu);
@@ -707,7 +707,7 @@ static void test_guest_reg_read(struct kvm_vcpu *vcpu)
static void test_clidr(struct kvm_vcpu *vcpu)
{
- uint64_t clidr;
+ u64 clidr;
int level;
clidr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1));
@@ -772,10 +772,10 @@ static void test_vcpu_non_ftr_id_regs(struct kvm_vcpu *vcpu)
ksft_test_result_pass("%s\n", __func__);
}
-static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, uint32_t encoding)
+static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, u32 encoding)
{
size_t idx = encoding_to_range_idx(encoding);
- uint64_t observed;
+ u64 observed;
observed = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding));
TEST_ASSERT_EQ(reset_mutable_bits(encoding, test_reg_vals[idx]),
@@ -808,7 +808,7 @@ int main(void)
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
bool aarch64_only;
- uint64_t val, el0;
+ u64 val, el0;
int test_cnt, i, j;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES));
diff --git a/tools/testing/selftests/kvm/arm64/smccc_filter.c b/tools/testing/selftests/kvm/arm64/smccc_filter.c
index 1763b9d45400..21e41880261b 100644
--- a/tools/testing/selftests/kvm/arm64/smccc_filter.c
+++ b/tools/testing/selftests/kvm/arm64/smccc_filter.c
@@ -37,7 +37,7 @@ static bool test_runs_at_el2(void)
for (conduit = test_runs_at_el2() ? SMC_INSN : HVC_INSN; \
conduit <= SMC_INSN; conduit++)
-static void guest_main(uint32_t func_id, enum smccc_conduit conduit)
+static void guest_main(u32 func_id, enum smccc_conduit conduit)
{
struct arm_smccc_res res;
@@ -49,7 +49,7 @@ static void guest_main(uint32_t func_id, enum smccc_conduit conduit)
GUEST_SYNC(res.a0);
}
-static int __set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions,
+static int __set_smccc_filter(struct kvm_vm *vm, u32 start, u32 nr_functions,
enum kvm_smccc_filter_action action)
{
struct kvm_smccc_filter filter = {
@@ -62,7 +62,7 @@ static int __set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_fun
KVM_ARM_VM_SMCCC_FILTER, &filter);
}
-static void set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions,
+static void set_smccc_filter(struct kvm_vm *vm, u32 start, u32 nr_functions,
enum kvm_smccc_filter_action action)
{
int ret = __set_smccc_filter(vm, start, nr_functions, action);
@@ -112,7 +112,7 @@ static void test_filter_reserved_range(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm = setup_vm(&vcpu);
- uint32_t smc64_fn;
+ u32 smc64_fn;
int r;
r = __set_smccc_filter(vm, ARM_SMCCC_ARCH_WORKAROUND_1,
@@ -217,7 +217,7 @@ static void test_filter_denied(void)
}
}
-static void expect_call_fwd_to_user(struct kvm_vcpu *vcpu, uint32_t func_id,
+static void expect_call_fwd_to_user(struct kvm_vcpu *vcpu, u32 func_id,
enum smccc_conduit conduit)
{
struct kvm_run *run = vcpu->run;
diff --git a/tools/testing/selftests/kvm/arm64/vgic_init.c b/tools/testing/selftests/kvm/arm64/vgic_init.c
index 8d6d3a4ae4db..47e34b43afb2 100644
--- a/tools/testing/selftests/kvm/arm64/vgic_init.c
+++ b/tools/testing/selftests/kvm/arm64/vgic_init.c
@@ -19,7 +19,7 @@
#define NR_VCPUS 4
-#define REG_OFFSET(vcpu, offset) (((uint64_t)vcpu << 32) | offset)
+#define REG_OFFSET(vcpu, offset) (((u64)vcpu << 32) | offset)
#define VGIC_DEV_IS_V2(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V2)
#define VGIC_DEV_IS_V3(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V3)
@@ -27,10 +27,10 @@
struct vm_gic {
struct kvm_vm *vm;
int gic_fd;
- uint32_t gic_dev_type;
+ u32 gic_dev_type;
};
-static uint64_t max_phys_size;
+static u64 max_phys_size;
/*
* Helpers to access a redistributor register and verify the ioctl() failed or
@@ -39,17 +39,17 @@ static uint64_t max_phys_size;
static void v3_redist_reg_get_errno(int gicv3_fd, int vcpu, int offset,
int want, const char *msg)
{
- uint32_t ignored_val;
+ u32 ignored_val;
int ret = __kvm_device_attr_get(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS,
REG_OFFSET(vcpu, offset), &ignored_val);
TEST_ASSERT(ret && errno == want, "%s; want errno = %d", msg, want);
}
-static void v3_redist_reg_get(int gicv3_fd, int vcpu, int offset, uint32_t want,
+static void v3_redist_reg_get(int gicv3_fd, int vcpu, int offset, u32 want,
const char *msg)
{
- uint32_t val;
+ u32 val;
kvm_device_attr_get(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS,
REG_OFFSET(vcpu, offset), &val);
@@ -71,8 +71,8 @@ static int run_vcpu(struct kvm_vcpu *vcpu)
return __vcpu_run(vcpu) ? -errno : 0;
}
-static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type,
- uint32_t nr_vcpus,
+static struct vm_gic vm_gic_create_with_vcpus(u32 gic_dev_type,
+ u32 nr_vcpus,
struct kvm_vcpu *vcpus[])
{
struct vm_gic v;
@@ -84,7 +84,7 @@ static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type,
return v;
}
-static struct vm_gic vm_gic_create_barebones(uint32_t gic_dev_type)
+static struct vm_gic vm_gic_create_barebones(u32 gic_dev_type)
{
struct vm_gic v;
@@ -103,9 +103,9 @@ static void vm_gic_destroy(struct vm_gic *v)
}
struct vgic_region_attr {
- uint64_t attr;
- uint64_t size;
- uint64_t alignment;
+ u64 attr;
+ u64 size;
+ u64 alignment;
};
struct vgic_region_attr gic_v3_dist_region = {
@@ -143,7 +143,7 @@ struct vgic_region_attr gic_v2_cpu_region = {
static void subtest_dist_rdist(struct vm_gic *v)
{
int ret;
- uint64_t addr;
+ u64 addr;
struct vgic_region_attr rdist; /* CPU interface in GICv2*/
struct vgic_region_attr dist;
@@ -223,7 +223,7 @@ static void subtest_dist_rdist(struct vm_gic *v)
/* Test the new REDIST region API */
static void subtest_v3_redist_regions(struct vm_gic *v)
{
- uint64_t addr, expected_addr;
+ u64 addr, expected_addr;
int ret;
ret = __kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
@@ -332,7 +332,7 @@ static void subtest_v3_redist_regions(struct vm_gic *v)
* VGIC KVM device is created and initialized before the secondary CPUs
* get created
*/
-static void test_vgic_then_vcpus(uint32_t gic_dev_type)
+static void test_vgic_then_vcpus(u32 gic_dev_type)
{
struct kvm_vcpu *vcpus[NR_VCPUS];
struct vm_gic v;
@@ -353,7 +353,7 @@ static void test_vgic_then_vcpus(uint32_t gic_dev_type)
}
/* All the VCPUs are created before the VGIC KVM device gets initialized */
-static void test_vcpus_then_vgic(uint32_t gic_dev_type)
+static void test_vcpus_then_vgic(u32 gic_dev_type)
{
struct kvm_vcpu *vcpus[NR_VCPUS];
struct vm_gic v;
@@ -408,7 +408,7 @@ static void test_v3_new_redist_regions(void)
struct kvm_vcpu *vcpus[NR_VCPUS];
void *dummy = NULL;
struct vm_gic v;
- uint64_t addr;
+ u64 addr;
int ret;
v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus);
@@ -460,7 +460,7 @@ static void test_v3_new_redist_regions(void)
static void test_v3_typer_accesses(void)
{
struct vm_gic v;
- uint64_t addr;
+ u64 addr;
int ret, i;
v.vm = vm_create(NR_VCPUS);
@@ -518,7 +518,7 @@ static void test_v3_typer_accesses(void)
}
static struct vm_gic vm_gic_v3_create_with_vcpuids(int nr_vcpus,
- uint32_t vcpuids[])
+ u32 vcpuids[])
{
struct vm_gic v;
int i;
@@ -544,9 +544,9 @@ static struct vm_gic vm_gic_v3_create_with_vcpuids(int nr_vcpus,
*/
static void test_v3_last_bit_redist_regions(void)
{
- uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 };
+ u32 vcpuids[] = { 0, 3, 5, 4, 1, 2 };
struct vm_gic v;
- uint64_t addr;
+ u64 addr;
v = vm_gic_v3_create_with_vcpuids(ARRAY_SIZE(vcpuids), vcpuids);
@@ -578,9 +578,9 @@ static void test_v3_last_bit_redist_regions(void)
/* Test last bit with legacy region */
static void test_v3_last_bit_single_rdist(void)
{
- uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 };
+ u32 vcpuids[] = { 0, 3, 5, 4, 1, 2 };
struct vm_gic v;
- uint64_t addr;
+ u64 addr;
v = vm_gic_v3_create_with_vcpuids(ARRAY_SIZE(vcpuids), vcpuids);
@@ -606,7 +606,7 @@ static void test_v3_redist_ipa_range_check_at_vcpu_run(void)
struct kvm_vcpu *vcpus[NR_VCPUS];
struct vm_gic v;
int ret, i;
- uint64_t addr;
+ u64 addr;
v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, 1, vcpus);
@@ -638,7 +638,7 @@ static void test_v3_its_region(void)
{
struct kvm_vcpu *vcpus[NR_VCPUS];
struct vm_gic v;
- uint64_t addr;
+ u64 addr;
int its_fd, ret;
v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus);
@@ -717,11 +717,11 @@ static void test_v3_nassgicap(void)
/*
* Returns 0 if it's possible to create GIC device of a given type (V2 or V3).
*/
-int test_kvm_device(uint32_t gic_dev_type)
+int test_kvm_device(u32 gic_dev_type)
{
struct kvm_vcpu *vcpus[NR_VCPUS];
struct vm_gic v;
- uint32_t other;
+ u32 other;
int ret;
v.vm = vm_create_with_vcpus(NR_VCPUS, guest_code, vcpus);
@@ -968,7 +968,7 @@ static void test_v3_sysregs(void)
kvm_vm_free(vm);
}
-void run_tests(uint32_t gic_dev_type)
+void run_tests(u32 gic_dev_type)
{
test_vcpus_then_vgic(gic_dev_type);
test_vgic_then_vcpus(gic_dev_type);
diff --git a/tools/testing/selftests/kvm/arm64/vgic_irq.c b/tools/testing/selftests/kvm/arm64/vgic_irq.c
index 2fb2c7939fe9..5e231998617e 100644
--- a/tools/testing/selftests/kvm/arm64/vgic_irq.c
+++ b/tools/testing/selftests/kvm/arm64/vgic_irq.c
@@ -24,12 +24,12 @@
* function.
*/
struct test_args {
- uint32_t nr_irqs; /* number of KVM supported IRQs. */
+ u32 nr_irqs; /* number of KVM supported IRQs. */
bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */
bool level_sensitive; /* 1 is level, 0 is edge */
int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */
bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */
- uint32_t shared_data;
+ u32 shared_data;
};
/*
@@ -64,15 +64,15 @@ typedef enum {
struct kvm_inject_args {
kvm_inject_cmd cmd;
- uint32_t first_intid;
- uint32_t num;
+ u32 first_intid;
+ u32 num;
int level;
bool expect_failure;
};
/* Used on the guest side to perform the hypercall. */
-static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
- uint32_t num, int level, bool expect_failure);
+static void kvm_inject_call(kvm_inject_cmd cmd, u32 first_intid,
+ u32 num, int level, bool expect_failure);
/* Used on the host side to get the hypercall info. */
static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
@@ -133,8 +133,8 @@ static struct kvm_inject_desc set_active_fns[] = {
for_each_supported_inject_fn((args), (t), (f))
/* Shared between the guest main thread and the IRQ handlers. */
-volatile uint64_t irq_handled;
-volatile uint32_t irqnr_received[MAX_SPI + 1];
+volatile u64 irq_handled;
+volatile u32 irqnr_received[MAX_SPI + 1];
static void reset_stats(void)
{
@@ -145,25 +145,25 @@ static void reset_stats(void)
irqnr_received[i] = 0;
}
-static uint64_t gic_read_ap1r0(void)
+static u64 gic_read_ap1r0(void)
{
- uint64_t reg = read_sysreg_s(SYS_ICC_AP1R0_EL1);
+ u64 reg = read_sysreg_s(SYS_ICC_AP1R0_EL1);
dsb(sy);
return reg;
}
-static void gic_write_ap1r0(uint64_t val)
+static void gic_write_ap1r0(u64 val)
{
write_sysreg_s(val, SYS_ICC_AP1R0_EL1);
isb();
}
-static void guest_set_irq_line(uint32_t intid, uint32_t level);
+static void guest_set_irq_line(u32 intid, u32 level);
static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive)
{
- uint32_t intid = gic_get_and_ack_irq();
+ u32 intid = gic_get_and_ack_irq();
if (intid == IAR_SPURIOUS)
return;
@@ -189,8 +189,8 @@ static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive)
GUEST_ASSERT(!gic_irq_get_pending(intid));
}
-static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
- uint32_t num, int level, bool expect_failure)
+static void kvm_inject_call(kvm_inject_cmd cmd, u32 first_intid,
+ u32 num, int level, bool expect_failure)
{
struct kvm_inject_args args = {
.cmd = cmd,
@@ -204,7 +204,7 @@ static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
#define GUEST_ASSERT_IAR_EMPTY() \
do { \
- uint32_t _intid; \
+ u32 _intid; \
_intid = gic_get_and_ack_irq(); \
GUEST_ASSERT(_intid == IAR_SPURIOUS); \
} while (0)
@@ -237,13 +237,13 @@ static void reset_priorities(struct test_args *args)
gic_set_priority(i, IRQ_DEFAULT_PRIO_REG);
}
-static void guest_set_irq_line(uint32_t intid, uint32_t level)
+static void guest_set_irq_line(u32 intid, u32 level)
{
kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level, false);
}
static void test_inject_fail(struct test_args *args,
- uint32_t intid, kvm_inject_cmd cmd)
+ u32 intid, kvm_inject_cmd cmd)
{
reset_stats();
@@ -255,10 +255,10 @@ static void test_inject_fail(struct test_args *args,
}
static void guest_inject(struct test_args *args,
- uint32_t first_intid, uint32_t num,
- kvm_inject_cmd cmd)
+ u32 first_intid, u32 num,
+ kvm_inject_cmd cmd)
{
- uint32_t i;
+ u32 i;
reset_stats();
@@ -292,10 +292,10 @@ static void guest_inject(struct test_args *args,
* deactivated yet.
*/
static void guest_restore_active(struct test_args *args,
- uint32_t first_intid, uint32_t num,
- kvm_inject_cmd cmd)
+ u32 first_intid, u32 num,
+ kvm_inject_cmd cmd)
{
- uint32_t prio, intid, ap1r;
+ u32 prio, intid, ap1r;
int i;
/*
@@ -342,9 +342,9 @@ static void guest_restore_active(struct test_args *args,
* This function should only be used in test_inject_preemption (with IRQs
* masked).
*/
-static uint32_t wait_for_and_activate_irq(void)
+static u32 wait_for_and_activate_irq(void)
{
- uint32_t intid;
+ u32 intid;
do {
asm volatile("wfi" : : : "memory");
@@ -360,11 +360,11 @@ static uint32_t wait_for_and_activate_irq(void)
* interrupts for the whole test.
*/
static void test_inject_preemption(struct test_args *args,
- uint32_t first_intid, int num,
+ u32 first_intid, int num,
const unsigned long *exclude,
kvm_inject_cmd cmd)
{
- uint32_t intid, prio, step = KVM_PRIO_STEPS;
+ u32 intid, prio, step = KVM_PRIO_STEPS;
int i;
/* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
@@ -379,7 +379,7 @@ static void test_inject_preemption(struct test_args *args,
local_irq_disable();
for (i = 0; i < num; i++) {
- uint32_t tmp;
+ u32 tmp;
intid = i + first_intid;
if (exclude && test_bit(i, exclude))
@@ -431,7 +431,7 @@ static void test_inject_preemption(struct test_args *args,
static void test_injection(struct test_args *args, struct kvm_inject_desc *f)
{
- uint32_t nr_irqs = args->nr_irqs;
+ u32 nr_irqs = args->nr_irqs;
if (f->sgi) {
guest_inject(args, MIN_SGI, 1, f->cmd);
@@ -451,7 +451,7 @@ static void test_injection(struct test_args *args, struct kvm_inject_desc *f)
static void test_injection_failure(struct test_args *args,
struct kvm_inject_desc *f)
{
- uint32_t bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, };
+ u32 bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, };
int i;
for (i = 0; i < ARRAY_SIZE(bad_intid); i++)
@@ -490,7 +490,7 @@ static void test_restore_active(struct test_args *args, struct kvm_inject_desc *
static void guest_code(struct test_args *args)
{
- uint32_t i, nr_irqs = args->nr_irqs;
+ u32 i, nr_irqs = args->nr_irqs;
bool level_sensitive = args->level_sensitive;
struct kvm_inject_desc *f, *inject_fns;
@@ -529,8 +529,8 @@ static void guest_code(struct test_args *args)
GUEST_DONE();
}
-static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level,
- struct test_args *test_args, bool expect_failure)
+static void kvm_irq_line_check(struct kvm_vm *vm, u32 intid, int level,
+ struct test_args *test_args, bool expect_failure)
{
int ret;
@@ -548,8 +548,8 @@ static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level,
}
}
-void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level,
- bool expect_failure)
+void kvm_irq_set_level_info_check(int gic_fd, u32 intid, int level,
+ bool expect_failure)
{
if (!expect_failure) {
kvm_irq_set_level_info(gic_fd, intid, level);
@@ -573,17 +573,18 @@ void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level,
}
static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
- uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
- bool expect_failure)
+ u32 intid, u32 num,
+ u32 kvm_max_routes,
+ bool expect_failure)
{
struct kvm_irq_routing *routing;
int ret;
- uint64_t i;
+ u64 i;
assert(num <= kvm_max_routes && kvm_max_routes <= KVM_MAX_IRQ_ROUTES);
routing = kvm_gsi_routing_create();
- for (i = intid; i < (uint64_t)intid + num; i++)
+ for (i = intid; i < (u64)intid + num; i++)
kvm_gsi_routing_irqchip_add(routing, i - MIN_SPI, i - MIN_SPI);
if (!expect_failure) {
@@ -591,7 +592,7 @@ static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
} else {
ret = _kvm_gsi_routing_write(vm, routing);
/* The kernel only checks e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS */
- if (((uint64_t)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS)
+ if (((u64)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS)
TEST_ASSERT(ret != 0 && errno == EINVAL,
"Bad intid %u did not cause KVM_SET_GSI_ROUTING "
"error: rc: %i errno: %i", intid, ret, errno);
@@ -602,7 +603,7 @@ static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
}
}
-static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid,
+static void kvm_irq_write_ispendr_check(int gic_fd, u32 intid,
struct kvm_vcpu *vcpu,
bool expect_failure)
{
@@ -618,13 +619,13 @@ static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid,
}
static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
- uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
- bool expect_failure)
+ u32 intid, u32 num, u32 kvm_max_routes,
+ bool expect_failure)
{
int fd[MAX_SPI];
- uint64_t val;
+ u64 val;
int ret, f;
- uint64_t i;
+ u64 i;
/*
* There is no way to try injecting an SGI or PPI as the interface
@@ -643,29 +644,29 @@ static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
* that no actual interrupt was injected for those cases.
*/
- for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
+ for (f = 0, i = intid; i < (u64)intid + num; i++, f++)
fd[f] = kvm_new_eventfd();
- for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
- assert(i <= (uint64_t)UINT_MAX);
+ for (f = 0, i = intid; i < (u64)intid + num; i++, f++) {
+ assert(i <= (u64)UINT_MAX);
kvm_assign_irqfd(vm, i - MIN_SPI, fd[f]);
}
- for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
+ for (f = 0, i = intid; i < (u64)intid + num; i++, f++) {
val = 1;
- ret = write(fd[f], &val, sizeof(uint64_t));
- TEST_ASSERT(ret == sizeof(uint64_t),
+ ret = write(fd[f], &val, sizeof(u64));
+ TEST_ASSERT(ret == sizeof(u64),
__KVM_SYSCALL_ERROR("write()", ret));
}
- for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
+ for (f = 0, i = intid; i < (u64)intid + num; i++, f++)
kvm_close(fd[f]);
}
/* handles the valid case: intid=0xffffffff num=1 */
#define for_each_intid(first, num, tmp, i) \
for ((tmp) = (i) = (first); \
- (tmp) < (uint64_t)(first) + (uint64_t)(num); \
+ (tmp) < (u64)(first) + (u64)(num); \
(tmp)++, (i)++)
static void run_guest_cmd(struct kvm_vcpu *vcpu, int gic_fd,
@@ -673,13 +674,13 @@ static void run_guest_cmd(struct kvm_vcpu *vcpu, int gic_fd,
struct test_args *test_args)
{
kvm_inject_cmd cmd = inject_args->cmd;
- uint32_t intid = inject_args->first_intid;
- uint32_t num = inject_args->num;
+ u32 intid = inject_args->first_intid;
+ u32 num = inject_args->num;
int level = inject_args->level;
bool expect_failure = inject_args->expect_failure;
struct kvm_vm *vm = vcpu->vm;
- uint64_t tmp;
- uint32_t i;
+ u64 tmp;
+ u32 i;
/* handles the valid case: intid=0xffffffff num=1 */
assert(intid < UINT_MAX - num || num == 1);
@@ -731,7 +732,7 @@ static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
struct kvm_inject_args *args)
{
struct kvm_inject_args *kvm_args_hva;
- vm_vaddr_t kvm_args_gva;
+ gva_t kvm_args_gva;
kvm_args_gva = uc->args[1];
kvm_args_hva = (struct kvm_inject_args *)addr_gva2hva(vm, kvm_args_gva);
@@ -745,14 +746,14 @@ static void print_args(struct test_args *args)
args->eoi_split);
}
-static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
+static void test_vgic(u32 nr_irqs, bool level_sensitive, bool eoi_split)
{
struct ucall uc;
int gic_fd;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_inject_args inject_args;
- vm_vaddr_t args_gva;
+ gva_t args_gva;
struct test_args args = {
.nr_irqs = nr_irqs,
@@ -770,7 +771,7 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
vcpu_init_descriptor_tables(vcpu);
/* Setup the guest args page (so it gets the args). */
- args_gva = vm_vaddr_alloc_page(vm);
+ args_gva = vm_alloc_page(vm);
memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args));
vcpu_args_set(vcpu, 1, args_gva);
@@ -810,7 +811,7 @@ static void guest_code_asym_dir(struct test_args *args, int cpuid)
gic_set_priority_mask(CPU_PRIO_MASK);
if (cpuid == 0) {
- uint32_t intid;
+ u32 intid;
local_irq_disable();
@@ -848,7 +849,7 @@ static void guest_code_asym_dir(struct test_args *args, int cpuid)
static void guest_code_group_en(struct test_args *args, int cpuid)
{
- uint32_t intid;
+ u32 intid;
gic_init(GIC_V3, 2);
@@ -896,7 +897,7 @@ static void guest_code_group_en(struct test_args *args, int cpuid)
static void guest_code_timer_spi(struct test_args *args, int cpuid)
{
- uint32_t intid;
+ u32 intid;
u64 val;
gic_init(GIC_V3, 2);
@@ -986,7 +987,7 @@ static void test_vgic_two_cpus(void *gcode)
struct kvm_vcpu *vcpus[2];
struct test_args args = {};
struct kvm_vm *vm;
- vm_vaddr_t args_gva;
+ gva_t args_gva;
int gic_fd, ret;
vm = vm_create_with_vcpus(2, gcode, vcpus);
@@ -996,7 +997,7 @@ static void test_vgic_two_cpus(void *gcode)
vcpu_init_descriptor_tables(vcpus[1]);
/* Setup the guest args page (so it gets the args). */
- args_gva = vm_vaddr_alloc_page(vm);
+ args_gva = vm_alloc_page(vm);
memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args));
vcpu_args_set(vcpus[0], 2, args_gva, 0);
vcpu_args_set(vcpus[1], 2, args_gva, 1);
@@ -1033,7 +1034,7 @@ static void help(const char *name)
int main(int argc, char **argv)
{
- uint32_t nr_irqs = 64;
+ u32 nr_irqs = 64;
bool default_args = true;
bool level_sensitive = false;
int opt;
diff --git a/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c b/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c
index e857a605f577..d64d434d3f06 100644
--- a/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c
+++ b/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c
@@ -23,7 +23,7 @@
#define GIC_LPI_OFFSET 8192
static size_t nr_iterations = 1000;
-static vm_paddr_t gpa_base;
+static gpa_t gpa_base;
static struct kvm_vm *vm;
static struct kvm_vcpu **vcpus;
@@ -35,14 +35,14 @@ static struct test_data {
u32 nr_devices;
u32 nr_event_ids;
- vm_paddr_t device_table;
- vm_paddr_t collection_table;
- vm_paddr_t cmdq_base;
+ gpa_t device_table;
+ gpa_t collection_table;
+ gpa_t cmdq_base;
void *cmdq_base_va;
- vm_paddr_t itt_tables;
+ gpa_t itt_tables;
- vm_paddr_t lpi_prop_table;
- vm_paddr_t lpi_pend_tables;
+ gpa_t lpi_prop_table;
+ gpa_t lpi_pend_tables;
} test_data = {
.nr_cpus = 1,
.nr_devices = 1,
@@ -73,7 +73,7 @@ static void guest_setup_its_mappings(void)
/* Round-robin the LPIs to all of the vCPUs in the VM */
coll_id = 0;
for (device_id = 0; device_id < nr_devices; device_id++) {
- vm_paddr_t itt_base = test_data.itt_tables + (device_id * SZ_64K);
+ gpa_t itt_base = test_data.itt_tables + (device_id * SZ_64K);
its_send_mapd_cmd(test_data.cmdq_base_va, device_id,
itt_base, SZ_64K, true);
@@ -188,7 +188,7 @@ static void setup_test_data(void)
size_t pages_per_64k = vm_calc_num_guest_pages(vm->mode, SZ_64K);
u32 nr_devices = test_data.nr_devices;
u32 nr_cpus = test_data.nr_cpus;
- vm_paddr_t cmdq_base;
+ gpa_t cmdq_base;
test_data.device_table = vm_phy_pages_alloc(vm, pages_per_64k,
gpa_base,
@@ -224,7 +224,7 @@ static void setup_gic(void)
static void signal_lpi(u32 device_id, u32 event_id)
{
- vm_paddr_t db_addr = GITS_BASE_GPA + GITS_TRANSLATER;
+ gpa_t db_addr = GITS_BASE_GPA + GITS_TRANSLATER;
struct kvm_msi msi = {
.address_lo = db_addr,
diff --git a/tools/testing/selftests/kvm/arm64/vgic_v5.c b/tools/testing/selftests/kvm/arm64/vgic_v5.c
index 3ce6cf37a629..d785b660d847 100644
--- a/tools/testing/selftests/kvm/arm64/vgic_v5.c
+++ b/tools/testing/selftests/kvm/arm64/vgic_v5.c
@@ -17,10 +17,10 @@
struct vm_gic {
struct kvm_vm *vm;
int gic_fd;
- uint32_t gic_dev_type;
+ u32 gic_dev_type;
};
-static uint64_t max_phys_size;
+static u64 max_phys_size;
#define GUEST_CMD_IRQ_CDIA 10
#define GUEST_CMD_IRQ_DIEOI 11
@@ -96,7 +96,7 @@ static void vm_gic_destroy(struct vm_gic *v)
kvm_vm_free(v->vm);
}
-static void test_vgic_v5_ppis(uint32_t gic_dev_type)
+static void test_vgic_v5_ppis(u32 gic_dev_type)
{
struct kvm_vcpu *vcpus[NR_VCPUS];
struct ucall uc;
@@ -173,7 +173,7 @@ done:
/*
* Returns 0 if it's possible to create GIC device of a given type (V5).
*/
-int test_kvm_device(uint32_t gic_dev_type)
+int test_kvm_device(u32 gic_dev_type)
{
struct kvm_vcpu *vcpus[NR_VCPUS];
struct vm_gic v;
@@ -199,7 +199,7 @@ int test_kvm_device(uint32_t gic_dev_type)
return 0;
}
-void run_tests(uint32_t gic_dev_type)
+void run_tests(u32 gic_dev_type)
{
pr_info("Test VGICv5 PPIs\n");
test_vgic_v5_ppis(gic_dev_type);
diff --git a/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c b/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c
index ae36325c022f..22223395969e 100644
--- a/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c
+++ b/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c
@@ -33,20 +33,20 @@ struct vpmu_vm {
static struct vpmu_vm vpmu_vm;
struct pmreg_sets {
- uint64_t set_reg_id;
- uint64_t clr_reg_id;
+ u64 set_reg_id;
+ u64 clr_reg_id;
};
#define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}
-static uint64_t get_pmcr_n(uint64_t pmcr)
+static u64 get_pmcr_n(u64 pmcr)
{
return FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
}
-static uint64_t get_counters_mask(uint64_t n)
+static u64 get_counters_mask(u64 n)
{
- uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
+ u64 mask = BIT(ARMV8_PMU_CYCLE_IDX);
if (n)
mask |= GENMASK(n - 1, 0);
@@ -89,7 +89,7 @@ static inline void write_sel_evtyper(int sel, unsigned long val)
static void pmu_disable_reset(void)
{
- uint64_t pmcr = read_sysreg(pmcr_el0);
+ u64 pmcr = read_sysreg(pmcr_el0);
/* Reset all counters, disabling them */
pmcr &= ~ARMV8_PMU_PMCR_E;
@@ -169,7 +169,7 @@ struct pmc_accessor pmc_accessors[] = {
#define GUEST_ASSERT_BITMAP_REG(regname, mask, set_expected) \
{ \
- uint64_t _tval = read_sysreg(regname); \
+ u64 _tval = read_sysreg(regname); \
\
if (set_expected) \
__GUEST_ASSERT((_tval & mask), \
@@ -185,7 +185,7 @@ struct pmc_accessor pmc_accessors[] = {
* Check if @mask bits in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers
* are set or cleared as specified in @set_expected.
*/
-static void check_bitmap_pmu_regs(uint64_t mask, bool set_expected)
+static void check_bitmap_pmu_regs(u64 mask, bool set_expected)
{
GUEST_ASSERT_BITMAP_REG(pmcntenset_el0, mask, set_expected);
GUEST_ASSERT_BITMAP_REG(pmcntenclr_el0, mask, set_expected);
@@ -207,7 +207,7 @@ static void check_bitmap_pmu_regs(uint64_t mask, bool set_expected)
*/
static void test_bitmap_pmu_regs(int pmc_idx, bool set_op)
{
- uint64_t pmcr_n, test_bit = BIT(pmc_idx);
+ u64 pmcr_n, test_bit = BIT(pmc_idx);
bool set_expected = false;
if (set_op) {
@@ -232,7 +232,7 @@ static void test_bitmap_pmu_regs(int pmc_idx, bool set_op)
*/
static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
{
- uint64_t write_data, read_data;
+ u64 write_data, read_data;
/* Disable all PMCs and reset all PMCs to zero. */
pmu_disable_reset();
@@ -287,11 +287,11 @@ static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
}
#define INVALID_EC (-1ul)
-uint64_t expected_ec = INVALID_EC;
+u64 expected_ec = INVALID_EC;
static void guest_sync_handler(struct ex_regs *regs)
{
- uint64_t esr, ec;
+ u64 esr, ec;
esr = read_sysreg(esr_el1);
ec = ESR_ELx_EC(esr);
@@ -351,9 +351,9 @@ static void test_access_invalid_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
* if reading/writing PMU registers for implemented or unimplemented
* counters works as expected.
*/
-static void guest_code(uint64_t expected_pmcr_n)
+static void guest_code(u64 expected_pmcr_n)
{
- uint64_t pmcr, pmcr_n, unimp_mask;
+ u64 pmcr, pmcr_n, unimp_mask;
int i, pmc;
__GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS,
@@ -402,12 +402,12 @@ static void guest_code(uint64_t expected_pmcr_n)
static void create_vpmu_vm(void *guest_code)
{
struct kvm_vcpu_init init;
- uint8_t pmuver, ec;
- uint64_t dfr0, irq = 23;
+ u8 pmuver, ec;
+ u64 dfr0, irq = 23;
struct kvm_device_attr irq_attr = {
.group = KVM_ARM_VCPU_PMU_V3_CTRL,
.attr = KVM_ARM_VCPU_PMU_V3_IRQ,
- .addr = (uint64_t)&irq,
+ .addr = (u64)&irq,
};
/* The test creates the vpmu_vm multiple times. Ensure a clean state */
@@ -443,7 +443,7 @@ static void destroy_vpmu_vm(void)
kvm_vm_free(vpmu_vm.vm);
}
-static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n)
+static void run_vcpu(struct kvm_vcpu *vcpu, u64 pmcr_n)
{
struct ucall uc;
@@ -489,9 +489,9 @@ static void test_create_vpmu_vm_with_nr_counters(unsigned int nr_counters, bool
* Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n,
* and run the test.
*/
-static void run_access_test(uint64_t pmcr_n)
+static void run_access_test(u64 pmcr_n)
{
- uint64_t sp;
+ u64 sp;
struct kvm_vcpu *vcpu;
struct kvm_vcpu_init init;
@@ -514,7 +514,7 @@ static void run_access_test(uint64_t pmcr_n)
aarch64_vcpu_setup(vcpu, &init);
vcpu_init_descriptor_tables(vcpu);
vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), sp);
- vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
+ vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (u64)guest_code);
run_vcpu(vcpu, pmcr_n);
@@ -531,12 +531,12 @@ static struct pmreg_sets validity_check_reg_sets[] = {
* Create a VM, and check if KVM handles the userspace accesses of
* the PMU register sets in @validity_check_reg_sets[] correctly.
*/
-static void run_pmregs_validity_test(uint64_t pmcr_n)
+static void run_pmregs_validity_test(u64 pmcr_n)
{
int i;
struct kvm_vcpu *vcpu;
- uint64_t set_reg_id, clr_reg_id, reg_val;
- uint64_t valid_counters_mask, max_counters_mask;
+ u64 set_reg_id, clr_reg_id, reg_val;
+ u64 valid_counters_mask, max_counters_mask;
test_create_vpmu_vm_with_nr_counters(pmcr_n, false);
vcpu = vpmu_vm.vcpu;
@@ -588,7 +588,7 @@ static void run_pmregs_validity_test(uint64_t pmcr_n)
* the vCPU to @pmcr_n, which is larger than the host value.
* The attempt should fail as @pmcr_n is too big to set for the vCPU.
*/
-static void run_error_test(uint64_t pmcr_n)
+static void run_error_test(u64 pmcr_n)
{
pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n);
@@ -600,9 +600,9 @@ static void run_error_test(uint64_t pmcr_n)
* Return the default number of implemented PMU event counters excluding
* the cycle counter (i.e. PMCR_EL0.N value) for the guest.
*/
-static uint64_t get_pmcr_n_limit(void)
+static u64 get_pmcr_n_limit(void)
{
- uint64_t pmcr;
+ u64 pmcr;
create_vpmu_vm(guest_code);
pmcr = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0));
@@ -624,7 +624,7 @@ static bool kvm_supports_nr_counters_attr(void)
int main(void)
{
- uint64_t i, pmcr_n;
+ u64 i, pmcr_n;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
TEST_REQUIRE(kvm_supports_vgic_v3());
diff --git a/tools/testing/selftests/kvm/coalesced_io_test.c b/tools/testing/selftests/kvm/coalesced_io_test.c
index 60cb25454899..df4ed5e3877c 100644
--- a/tools/testing/selftests/kvm/coalesced_io_test.c
+++ b/tools/testing/selftests/kvm/coalesced_io_test.c
@@ -14,16 +14,16 @@
struct kvm_coalesced_io {
struct kvm_coalesced_mmio_ring *ring;
- uint32_t ring_size;
- uint64_t mmio_gpa;
- uint64_t *mmio;
+ u32 ring_size;
+ u64 mmio_gpa;
+ u64 *mmio;
/*
* x86-only, but define pio_port for all architectures to minimize the
* amount of #ifdeffery and complexity, without having to sacrifice
* verbose error messages.
*/
- uint8_t pio_port;
+ u8 pio_port;
};
static struct kvm_coalesced_io kvm_builtin_io_ring;
@@ -70,13 +70,13 @@ static void guest_code(struct kvm_coalesced_io *io)
static void vcpu_run_and_verify_io_exit(struct kvm_vcpu *vcpu,
struct kvm_coalesced_io *io,
- uint32_t ring_start,
- uint32_t expected_exit)
+ u32 ring_start,
+ u32 expected_exit)
{
const bool want_pio = expected_exit == KVM_EXIT_IO;
struct kvm_coalesced_mmio_ring *ring = io->ring;
struct kvm_run *run = vcpu->run;
- uint32_t pio_value;
+ u32 pio_value;
WRITE_ONCE(ring->first, ring_start);
WRITE_ONCE(ring->last, ring_start);
@@ -88,13 +88,13 @@ static void vcpu_run_and_verify_io_exit(struct kvm_vcpu *vcpu,
* data_offset is garbage, e.g. an MMIO gpa.
*/
if (run->exit_reason == KVM_EXIT_IO)
- pio_value = *(uint32_t *)((void *)run + run->io.data_offset);
+ pio_value = *(u32 *)((void *)run + run->io.data_offset);
else
pio_value = 0;
TEST_ASSERT((!want_pio && (run->exit_reason == KVM_EXIT_MMIO && run->mmio.is_write &&
run->mmio.phys_addr == io->mmio_gpa && run->mmio.len == 8 &&
- *(uint64_t *)run->mmio.data == io->mmio_gpa + io->ring_size - 1)) ||
+ *(u64 *)run->mmio.data == io->mmio_gpa + io->ring_size - 1)) ||
(want_pio && (run->exit_reason == KVM_EXIT_IO && run->io.port == io->pio_port &&
run->io.direction == KVM_EXIT_IO_OUT && run->io.count == 1 &&
pio_value == io->pio_port + io->ring_size - 1)),
@@ -105,14 +105,14 @@ static void vcpu_run_and_verify_io_exit(struct kvm_vcpu *vcpu,
want_pio ? (unsigned long long)io->pio_port : io->mmio_gpa,
(want_pio ? io->pio_port : io->mmio_gpa) + io->ring_size - 1, run->exit_reason,
run->exit_reason == KVM_EXIT_MMIO ? "MMIO" : run->exit_reason == KVM_EXIT_IO ? "PIO" : "other",
- run->mmio.phys_addr, run->mmio.is_write, run->mmio.len, *(uint64_t *)run->mmio.data,
+ run->mmio.phys_addr, run->mmio.is_write, run->mmio.len, *(u64 *)run->mmio.data,
run->io.port, run->io.direction, run->io.size, run->io.count, pio_value);
}
static void vcpu_run_and_verify_coalesced_io(struct kvm_vcpu *vcpu,
struct kvm_coalesced_io *io,
- uint32_t ring_start,
- uint32_t expected_exit)
+ u32 ring_start,
+ u32 expected_exit)
{
struct kvm_coalesced_mmio_ring *ring = io->ring;
int i;
@@ -124,18 +124,18 @@ static void vcpu_run_and_verify_coalesced_io(struct kvm_vcpu *vcpu,
ring->first, ring->last, io->ring_size, ring_start);
for (i = 0; i < io->ring_size - 1; i++) {
- uint32_t idx = (ring->first + i) % io->ring_size;
+ u32 idx = (ring->first + i) % io->ring_size;
struct kvm_coalesced_mmio *entry = &ring->coalesced_mmio[idx];
#ifdef __x86_64__
if (i & 1)
TEST_ASSERT(entry->phys_addr == io->pio_port &&
entry->len == 4 && entry->pio &&
- *(uint32_t *)entry->data == io->pio_port + i,
+ *(u32 *)entry->data == io->pio_port + i,
"Wanted 4-byte port I/O 0x%x = 0x%x in entry %u, got %u-byte %s 0x%llx = 0x%x",
io->pio_port, io->pio_port + i, i,
entry->len, entry->pio ? "PIO" : "MMIO",
- entry->phys_addr, *(uint32_t *)entry->data);
+ entry->phys_addr, *(u32 *)entry->data);
else
#endif
TEST_ASSERT(entry->phys_addr == io->mmio_gpa &&
@@ -143,12 +143,12 @@ static void vcpu_run_and_verify_coalesced_io(struct kvm_vcpu *vcpu,
"Wanted 8-byte MMIO to 0x%lx = %lx in entry %u, got %u-byte %s 0x%llx = 0x%lx",
io->mmio_gpa, io->mmio_gpa + i, i,
entry->len, entry->pio ? "PIO" : "MMIO",
- entry->phys_addr, *(uint64_t *)entry->data);
+ entry->phys_addr, *(u64 *)entry->data);
}
}
static void test_coalesced_io(struct kvm_vcpu *vcpu,
- struct kvm_coalesced_io *io, uint32_t ring_start)
+ struct kvm_coalesced_io *io, u32 ring_start)
{
struct kvm_coalesced_mmio_ring *ring = io->ring;
@@ -219,11 +219,11 @@ int main(int argc, char *argv[])
* the MMIO GPA identity mapped in the guest.
*/
.mmio_gpa = 4ull * SZ_1G,
- .mmio = (uint64_t *)(4ull * SZ_1G),
+ .mmio = (u64 *)(4ull * SZ_1G),
.pio_port = 0x80,
};
- virt_map(vm, (uint64_t)kvm_builtin_io_ring.mmio, kvm_builtin_io_ring.mmio_gpa, 1);
+ virt_map(vm, (u64)kvm_builtin_io_ring.mmio, kvm_builtin_io_ring.mmio_gpa, 1);
sync_global_to_guest(vm, kvm_builtin_io_ring);
vcpu_args_set(vcpu, 1, &kvm_builtin_io_ring);
diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index 0202b78f8680..302c4923d093 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -24,7 +24,7 @@
#ifdef __NR_userfaultfd
static int nr_vcpus = 1;
-static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
+static u64 guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static size_t demand_paging_size;
static char *guest_data_prototype;
@@ -58,7 +58,7 @@ static int handle_uffd_page_request(int uffd_mode, int uffd,
struct uffd_msg *msg)
{
pid_t tid = syscall(__NR_gettid);
- uint64_t addr = msg->arg.pagefault.address;
+ u64 addr = msg->arg.pagefault.address;
struct timespec start;
struct timespec ts_diff;
int r;
@@ -68,7 +68,7 @@ static int handle_uffd_page_request(int uffd_mode, int uffd,
if (uffd_mode == UFFDIO_REGISTER_MODE_MISSING) {
struct uffdio_copy copy;
- copy.src = (uint64_t)guest_data_prototype;
+ copy.src = (u64)guest_data_prototype;
copy.dst = addr;
copy.len = demand_paging_size;
copy.mode = 0;
@@ -138,7 +138,7 @@ struct test_params {
bool partition_vcpu_memory_access;
};
-static void prefault_mem(void *alias, uint64_t len)
+static void prefault_mem(void *alias, u64 len)
{
size_t p;
@@ -154,7 +154,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct memstress_vcpu_args *vcpu_args;
struct test_params *p = arg;
struct uffd_desc **uffd_descs = NULL;
- uint64_t uffd_region_size;
+ u64 uffd_region_size;
struct timespec start;
struct timespec ts_diff;
double vcpu_paging_rate;
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index 0a1ea1d1e2d8..ef779fa91827 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -24,7 +24,7 @@
#define TEST_HOST_LOOP_N 2UL
static int nr_vcpus = 1;
-static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
+static u64 guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static bool run_vcpus_while_disabling_dirty_logging;
/* Host variables */
@@ -37,7 +37,7 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
int vcpu_idx = vcpu_args->vcpu_idx;
- uint64_t pages_count = 0;
+ u64 pages_count = 0;
struct kvm_run *run;
struct timespec start;
struct timespec ts_diff;
@@ -93,11 +93,11 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
struct test_params {
unsigned long iterations;
- uint64_t phys_offset;
+ u64 phys_offset;
bool partition_vcpu_memory_access;
enum vm_mem_backing_src_type backing_src;
int slots;
- uint32_t write_percent;
+ u32 write_percent;
bool random_access;
};
@@ -106,9 +106,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct test_params *p = arg;
struct kvm_vm *vm;
unsigned long **bitmaps;
- uint64_t guest_num_pages;
- uint64_t host_num_pages;
- uint64_t pages_per_slot;
+ u64 guest_num_pages;
+ u64 host_num_pages;
+ u64 pages_per_slot;
struct timespec start;
struct timespec ts_diff;
struct timespec get_dirty_log_total = (struct timespec){0};
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 7627b328f18a..12446a4b6e8d 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -74,11 +74,11 @@
* the host. READ/WRITE_ONCE() should also be used with anything
* that may change.
*/
-static uint64_t host_page_size;
-static uint64_t guest_page_size;
-static uint64_t guest_num_pages;
-static uint64_t iteration;
-static uint64_t nr_writes;
+static u64 host_page_size;
+static u64 guest_page_size;
+static u64 guest_num_pages;
+static u64 iteration;
+static u64 nr_writes;
static bool vcpu_stop;
/*
@@ -86,13 +86,13 @@ static bool vcpu_stop;
* This will be set to the topmost valid physical address minus
* the test memory size.
*/
-static uint64_t guest_test_phys_mem;
+static u64 guest_test_phys_mem;
/*
* Guest virtual memory offset of the testing memory slot.
* Must not conflict with identity mapped test code.
*/
-static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
+static u64 guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
/*
* Continuously write to the first 8 bytes of a random pages within
@@ -100,10 +100,10 @@ static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
*/
static void guest_code(void)
{
- uint64_t addr;
+ u64 addr;
#ifdef __s390x__
- uint64_t i;
+ u64 i;
/*
* On s390x, all pages of a 1M segment are initially marked as dirty
@@ -113,7 +113,7 @@ static void guest_code(void)
*/
for (i = 0; i < guest_num_pages; i++) {
addr = guest_test_virt_mem + i * guest_page_size;
- vcpu_arch_put_guest(*(uint64_t *)addr, READ_ONCE(iteration));
+ vcpu_arch_put_guest(*(u64 *)addr, READ_ONCE(iteration));
nr_writes++;
}
#endif
@@ -125,7 +125,7 @@ static void guest_code(void)
* guest_page_size;
addr = align_down(addr, host_page_size);
- vcpu_arch_put_guest(*(uint64_t *)addr, READ_ONCE(iteration));
+ vcpu_arch_put_guest(*(u64 *)addr, READ_ONCE(iteration));
nr_writes++;
}
@@ -138,11 +138,11 @@ static bool host_quit;
/* Points to the test VM memory region on which we track dirty logs */
static void *host_test_mem;
-static uint64_t host_num_pages;
+static u64 host_num_pages;
/* For statistics only */
-static uint64_t host_dirty_count;
-static uint64_t host_clear_count;
+static u64 host_dirty_count;
+static u64 host_clear_count;
/* Whether dirty ring reset is requested, or finished */
static sem_t sem_vcpu_stop;
@@ -169,7 +169,7 @@ static bool dirty_ring_vcpu_ring_full;
* dirty gfn we've collected, so that if a mismatch of data found later in the
* verifying process, we let it pass.
*/
-static uint64_t dirty_ring_last_page = -1ULL;
+static u64 dirty_ring_last_page = -1ULL;
/*
* In addition to the above, it is possible (especially if this
@@ -213,7 +213,7 @@ static uint64_t dirty_ring_last_page = -1ULL;
* and also don't fail when it is reported in the next iteration, together with
* an outdated iteration count.
*/
-static uint64_t dirty_ring_prev_iteration_last_page;
+static u64 dirty_ring_prev_iteration_last_page;
enum log_mode_t {
/* Only use KVM_GET_DIRTY_LOG for logging */
@@ -236,7 +236,7 @@ static enum log_mode_t host_log_mode_option = LOG_MODE_ALL;
/* Logging mode for current run */
static enum log_mode_t host_log_mode;
static pthread_t vcpu_thread;
-static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT;
+static u32 test_dirty_ring_count = TEST_DIRTY_RING_COUNT;
static bool clear_log_supported(void)
{
@@ -255,15 +255,15 @@ static void clear_log_create_vm_done(struct kvm_vm *vm)
}
static void dirty_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
- void *bitmap, uint32_t num_pages,
- uint32_t *unused)
+ void *bitmap, u32 num_pages,
+ u32 *unused)
{
kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
}
static void clear_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
- void *bitmap, uint32_t num_pages,
- uint32_t *unused)
+ void *bitmap, u32 num_pages,
+ u32 *unused)
{
kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages);
@@ -297,8 +297,8 @@ static bool dirty_ring_supported(void)
static void dirty_ring_create_vm_done(struct kvm_vm *vm)
{
- uint64_t pages;
- uint32_t limit;
+ u64 pages;
+ u32 limit;
/*
* We rely on vcpu exit due to full dirty ring state. Adjust
@@ -333,12 +333,12 @@ static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
smp_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET);
}
-static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
- int slot, void *bitmap,
- uint32_t num_pages, uint32_t *fetch_index)
+static u32 dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
+ int slot, void *bitmap,
+ u32 num_pages, u32 *fetch_index)
{
struct kvm_dirty_gfn *cur;
- uint32_t count = 0;
+ u32 count = 0;
while (true) {
cur = &dirty_gfns[*fetch_index % test_dirty_ring_count];
@@ -359,10 +359,10 @@ static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
}
static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
- void *bitmap, uint32_t num_pages,
- uint32_t *ring_buf_idx)
+ void *bitmap, u32 num_pages,
+ u32 *ring_buf_idx)
{
- uint32_t count, cleared;
+ u32 count, cleared;
/* Only have one vcpu */
count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu),
@@ -404,8 +404,8 @@ struct log_mode {
void (*create_vm_done)(struct kvm_vm *vm);
/* Hook to collect the dirty pages into the bitmap provided */
void (*collect_dirty_pages) (struct kvm_vcpu *vcpu, int slot,
- void *bitmap, uint32_t num_pages,
- uint32_t *ring_buf_idx);
+ void *bitmap, u32 num_pages,
+ u32 *ring_buf_idx);
/* Hook to call when after each vcpu run */
void (*after_vcpu_run)(struct kvm_vcpu *vcpu);
} log_modes[LOG_MODE_NUM] = {
@@ -459,8 +459,8 @@ static void log_mode_create_vm_done(struct kvm_vm *vm)
}
static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
- void *bitmap, uint32_t num_pages,
- uint32_t *ring_buf_idx)
+ void *bitmap, u32 num_pages,
+ u32 *ring_buf_idx)
{
struct log_mode *mode = &log_modes[host_log_mode];
@@ -494,11 +494,11 @@ static void *vcpu_worker(void *data)
static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long **bmap)
{
- uint64_t page, nr_dirty_pages = 0, nr_clean_pages = 0;
- uint64_t step = vm_num_host_pages(mode, 1);
+ u64 page, nr_dirty_pages = 0, nr_clean_pages = 0;
+ u64 step = vm_num_host_pages(mode, 1);
for (page = 0; page < host_num_pages; page += step) {
- uint64_t val = *(uint64_t *)(host_test_mem + page * host_page_size);
+ u64 val = *(u64 *)(host_test_mem + page * host_page_size);
bool bmap0_dirty = __test_and_clear_bit_le(page, bmap[0]);
/*
@@ -575,7 +575,7 @@ static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long **bmap)
}
static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu,
- uint64_t extra_mem_pages, void *guest_code)
+ u64 extra_mem_pages, void *guest_code)
{
struct kvm_vm *vm;
@@ -592,7 +592,7 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu,
struct test_params {
unsigned long iterations;
unsigned long interval;
- uint64_t phys_offset;
+ u64 phys_offset;
};
static void run_test(enum vm_guest_mode mode, void *arg)
@@ -601,7 +601,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
unsigned long *bmap[2];
- uint32_t ring_buf_idx = 0;
+ u32 ring_buf_idx = 0;
int sem_val;
if (!log_mode_supported()) {
@@ -667,7 +667,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
/* Cache the HVA pointer of the region */
- host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
+ host_test_mem = addr_gpa2hva(vm, (gpa_t)guest_test_phys_mem);
/* Export the shared variables to the guest */
sync_global_to_guest(vm, host_page_size);
diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
index f4644c9d2d3b..216f10644c1a 100644
--- a/tools/testing/selftests/kvm/get-reg-list.c
+++ b/tools/testing/selftests/kvm/get-reg-list.c
@@ -216,7 +216,7 @@ static void run_test(struct vcpu_reg_list *c)
* since we don't know the capabilities of any new registers.
*/
for_each_present_blessed_reg(i) {
- uint8_t addr[2048 / 8];
+ u8 addr[2048 / 8];
struct kvm_one_reg reg = {
.id = reg_list->reg[i],
.addr = (__u64)&addr,
diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c
index ec7644aae999..253e748c1d4a 100644
--- a/tools/testing/selftests/kvm/guest_memfd_test.c
+++ b/tools/testing/selftests/kvm/guest_memfd_test.c
@@ -171,7 +171,7 @@ static void test_numa_allocation(int fd, size_t total_size)
kvm_munmap(mem, total_size);
}
-static void test_collapse(int fd, uint64_t flags)
+static void test_collapse(int fd, u64 flags)
{
const size_t pmd_size = get_trans_hugepagesz();
void *reserved_addr;
@@ -346,7 +346,7 @@ static void test_invalid_punch_hole(int fd, size_t total_size)
}
static void test_create_guest_memfd_invalid_sizes(struct kvm_vm *vm,
- uint64_t guest_memfd_flags)
+ u64 guest_memfd_flags)
{
size_t size;
int fd;
@@ -389,8 +389,8 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
static void test_guest_memfd_flags(struct kvm_vm *vm)
{
- uint64_t valid_flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS);
- uint64_t flag;
+ u64 valid_flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS);
+ u64 flag;
int fd;
for (flag = BIT(0); flag; flag <<= 1) {
@@ -419,7 +419,7 @@ do { \
#define gmem_test(__test, __vm, __flags) \
__gmem_test(__test, __vm, __flags, page_size * 4)
-static void __test_guest_memfd(struct kvm_vm *vm, uint64_t flags)
+static void __test_guest_memfd(struct kvm_vm *vm, u64 flags)
{
test_create_guest_memfd_multiple(vm);
test_create_guest_memfd_invalid_sizes(vm, flags);
@@ -452,7 +452,7 @@ static void __test_guest_memfd(struct kvm_vm *vm, uint64_t flags)
static void test_guest_memfd(unsigned long vm_type)
{
struct kvm_vm *vm = vm_create_barebones_type(vm_type);
- uint64_t flags;
+ u64 flags;
test_guest_memfd_flags(vm);
@@ -470,7 +470,7 @@ static void test_guest_memfd(unsigned long vm_type)
kvm_vm_free(vm);
}
-static void guest_code(uint8_t *mem, uint64_t size)
+static void guest_code(u8 *mem, u64 size)
{
size_t i;
@@ -489,12 +489,12 @@ static void test_guest_memfd_guest(void)
* the guest's code, stack, and page tables, and low memory contains
* the PCI hole and other MMIO regions that need to be avoided.
*/
- const uint64_t gpa = SZ_4G;
+ const gpa_t gpa = SZ_4G;
const int slot = 1;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- uint8_t *mem;
+ u8 *mem;
size_t size;
int fd, i;
@@ -510,7 +510,12 @@ static void test_guest_memfd_guest(void)
"Default VM type should support INIT_SHARED, supported flags = 0x%x",
vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS));
- size = vm->page_size;
+ /*
+ * Use the max of the host or guest page size for all operations, as
+ * KVM requires guest_memfd files and memslots to be sized to multiples
+ * of the host page size.
+ */
+ size = max_t(size_t, vm->page_size, page_size);
fd = vm_create_guest_memfd(vm, size, GUEST_MEMFD_FLAG_MMAP |
GUEST_MEMFD_FLAG_INIT_SHARED);
vm_set_user_memory_region2(vm, slot, KVM_MEM_GUEST_MEMFD, gpa, size, NULL, fd, 0);
@@ -519,7 +524,7 @@ static void test_guest_memfd_guest(void)
memset(mem, 0xaa, size);
kvm_munmap(mem, size);
- virt_pg_map(vm, gpa, gpa);
+ virt_map(vm, gpa, gpa, size / vm->page_size);
vcpu_args_set(vcpu, 2, gpa, size);
vcpu_run(vcpu);
diff --git a/tools/testing/selftests/kvm/guest_print_test.c b/tools/testing/selftests/kvm/guest_print_test.c
index bcf582852db9..79d3fc326e91 100644
--- a/tools/testing/selftests/kvm/guest_print_test.c
+++ b/tools/testing/selftests/kvm/guest_print_test.c
@@ -16,22 +16,22 @@
#include "ucall_common.h"
struct guest_vals {
- uint64_t a;
- uint64_t b;
- uint64_t type;
+ u64 a;
+ u64 b;
+ u64 type;
};
static struct guest_vals vals;
/* GUEST_PRINTF()/GUEST_ASSERT_FMT() does not support float or double. */
#define TYPE_LIST \
-TYPE(test_type_i64, I64, "%ld", int64_t) \
-TYPE(test_type_u64, U64u, "%lu", uint64_t) \
-TYPE(test_type_x64, U64x, "0x%lx", uint64_t) \
-TYPE(test_type_X64, U64X, "0x%lX", uint64_t) \
-TYPE(test_type_u32, U32u, "%u", uint32_t) \
-TYPE(test_type_x32, U32x, "0x%x", uint32_t) \
-TYPE(test_type_X32, U32X, "0x%X", uint32_t) \
+TYPE(test_type_i64, I64, "%ld", s64) \
+TYPE(test_type_u64, U64u, "%lu", u64) \
+TYPE(test_type_x64, U64x, "0x%lx", u64) \
+TYPE(test_type_X64, U64X, "0x%lX", u64) \
+TYPE(test_type_u32, U32u, "%u", u32) \
+TYPE(test_type_x32, U32x, "0x%x", u32) \
+TYPE(test_type_X32, U32X, "0x%X", u32) \
TYPE(test_type_int, INT, "%d", int) \
TYPE(test_type_char, CHAR, "%c", char) \
TYPE(test_type_str, STR, "'%s'", const char *) \
@@ -56,7 +56,7 @@ static void fn(struct kvm_vcpu *vcpu, T a, T b) \
\
snprintf(expected_printf, UCALL_BUFFER_LEN, PRINTF_FMT_##ext, a, b); \
snprintf(expected_assert, UCALL_BUFFER_LEN, ASSERT_FMT_##ext, a, b); \
- vals = (struct guest_vals){ (uint64_t)a, (uint64_t)b, TYPE_##ext }; \
+ vals = (struct guest_vals){ (u64)a, (u64)b, TYPE_##ext }; \
sync_global_to_guest(vcpu->vm, vals); \
run_test(vcpu, expected_printf, expected_assert); \
}
diff --git a/tools/testing/selftests/kvm/hardware_disable_test.c b/tools/testing/selftests/kvm/hardware_disable_test.c
index 94bd6ed24cf3..3147f5c97e94 100644
--- a/tools/testing/selftests/kvm/hardware_disable_test.c
+++ b/tools/testing/selftests/kvm/hardware_disable_test.c
@@ -80,7 +80,7 @@ static inline void check_join(pthread_t thread, void **retval)
TEST_ASSERT(r == 0, "%s: failed to join thread", __func__);
}
-static void run_test(uint32_t run)
+static void run_test(u32 run)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
@@ -88,7 +88,7 @@ static void run_test(uint32_t run)
pthread_t threads[VCPU_NUM];
pthread_t throw_away;
void *b;
- uint32_t i, j;
+ u32 i, j;
CPU_ZERO(&cpu_set);
for (i = 0; i < VCPU_NUM; i++)
@@ -149,7 +149,7 @@ void wait_for_child_setup(pid_t pid)
int main(int argc, char **argv)
{
- uint32_t i;
+ u32 i;
int s, r;
pid_t pid;
diff --git a/tools/testing/selftests/kvm/include/arm64/arch_timer.h b/tools/testing/selftests/kvm/include/arm64/arch_timer.h
index e2c4e9f0010f..a5836d4ab7ee 100644
--- a/tools/testing/selftests/kvm/include/arm64/arch_timer.h
+++ b/tools/testing/selftests/kvm/include/arm64/arch_timer.h
@@ -18,20 +18,20 @@ enum arch_timer {
#define CTL_ISTATUS (1 << 2)
#define msec_to_cycles(msec) \
- (timer_get_cntfrq() * (uint64_t)(msec) / 1000)
+ (timer_get_cntfrq() * (u64)(msec) / 1000)
#define usec_to_cycles(usec) \
- (timer_get_cntfrq() * (uint64_t)(usec) / 1000000)
+ (timer_get_cntfrq() * (u64)(usec) / 1000000)
#define cycles_to_usec(cycles) \
- ((uint64_t)(cycles) * 1000000 / timer_get_cntfrq())
+ ((u64)(cycles) * 1000000 / timer_get_cntfrq())
-static inline uint32_t timer_get_cntfrq(void)
+static inline u32 timer_get_cntfrq(void)
{
return read_sysreg(cntfrq_el0);
}
-static inline uint64_t timer_get_cntct(enum arch_timer timer)
+static inline u64 timer_get_cntct(enum arch_timer timer)
{
isb();
@@ -48,7 +48,7 @@ static inline uint64_t timer_get_cntct(enum arch_timer timer)
return 0;
}
-static inline void timer_set_cval(enum arch_timer timer, uint64_t cval)
+static inline void timer_set_cval(enum arch_timer timer, u64 cval)
{
switch (timer) {
case VIRTUAL:
@@ -64,7 +64,7 @@ static inline void timer_set_cval(enum arch_timer timer, uint64_t cval)
isb();
}
-static inline uint64_t timer_get_cval(enum arch_timer timer)
+static inline u64 timer_get_cval(enum arch_timer timer)
{
switch (timer) {
case VIRTUAL:
@@ -79,7 +79,7 @@ static inline uint64_t timer_get_cval(enum arch_timer timer)
return 0;
}
-static inline void timer_set_tval(enum arch_timer timer, int32_t tval)
+static inline void timer_set_tval(enum arch_timer timer, s32 tval)
{
switch (timer) {
case VIRTUAL:
@@ -95,7 +95,7 @@ static inline void timer_set_tval(enum arch_timer timer, int32_t tval)
isb();
}
-static inline int32_t timer_get_tval(enum arch_timer timer)
+static inline s32 timer_get_tval(enum arch_timer timer)
{
isb();
switch (timer) {
@@ -111,7 +111,7 @@ static inline int32_t timer_get_tval(enum arch_timer timer)
return 0;
}
-static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl)
+static inline void timer_set_ctl(enum arch_timer timer, u32 ctl)
{
switch (timer) {
case VIRTUAL:
@@ -127,7 +127,7 @@ static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl)
isb();
}
-static inline uint32_t timer_get_ctl(enum arch_timer timer)
+static inline u32 timer_get_ctl(enum arch_timer timer)
{
switch (timer) {
case VIRTUAL:
@@ -142,15 +142,15 @@ static inline uint32_t timer_get_ctl(enum arch_timer timer)
return 0;
}
-static inline void timer_set_next_cval_ms(enum arch_timer timer, uint32_t msec)
+static inline void timer_set_next_cval_ms(enum arch_timer timer, u32 msec)
{
- uint64_t now_ct = timer_get_cntct(timer);
- uint64_t next_ct = now_ct + msec_to_cycles(msec);
+ u64 now_ct = timer_get_cntct(timer);
+ u64 next_ct = now_ct + msec_to_cycles(msec);
timer_set_cval(timer, next_ct);
}
-static inline void timer_set_next_tval_ms(enum arch_timer timer, uint32_t msec)
+static inline void timer_set_next_tval_ms(enum arch_timer timer, u32 msec)
{
timer_set_tval(timer, msec_to_cycles(msec));
}
diff --git a/tools/testing/selftests/kvm/include/arm64/delay.h b/tools/testing/selftests/kvm/include/arm64/delay.h
index 329e4f5079ea..6a5d4634af2c 100644
--- a/tools/testing/selftests/kvm/include/arm64/delay.h
+++ b/tools/testing/selftests/kvm/include/arm64/delay.h
@@ -8,10 +8,10 @@
#include "arch_timer.h"
-static inline void __delay(uint64_t cycles)
+static inline void __delay(u64 cycles)
{
enum arch_timer timer = VIRTUAL;
- uint64_t start = timer_get_cntct(timer);
+ u64 start = timer_get_cntct(timer);
while ((timer_get_cntct(timer) - start) < cycles)
cpu_relax();
diff --git a/tools/testing/selftests/kvm/include/arm64/gic.h b/tools/testing/selftests/kvm/include/arm64/gic.h
index cc7a7f34ed37..615745093c98 100644
--- a/tools/testing/selftests/kvm/include/arm64/gic.h
+++ b/tools/testing/selftests/kvm/include/arm64/gic.h
@@ -48,8 +48,8 @@ void gic_set_dir(unsigned int intid);
* split is true, EOI drops the priority and deactivates the interrupt.
*/
void gic_set_eoi_split(bool split);
-void gic_set_priority_mask(uint64_t mask);
-void gic_set_priority(uint32_t intid, uint32_t prio);
+void gic_set_priority_mask(u64 mask);
+void gic_set_priority(u32 intid, u32 prio);
void gic_irq_set_active(unsigned int intid);
void gic_irq_clear_active(unsigned int intid);
bool gic_irq_get_active(unsigned int intid);
@@ -59,7 +59,7 @@ bool gic_irq_get_pending(unsigned int intid);
void gic_irq_set_config(unsigned int intid, bool is_edge);
void gic_irq_set_group(unsigned int intid, bool group);
-void gic_rdist_enable_lpis(vm_paddr_t cfg_table, size_t cfg_table_size,
- vm_paddr_t pend_table);
+void gic_rdist_enable_lpis(gpa_t cfg_table, size_t cfg_table_size,
+ gpa_t pend_table);
#endif /* SELFTEST_KVM_GIC_H */
diff --git a/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h b/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h
index 58feef3eb386..a43a407e2d5c 100644
--- a/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h
+++ b/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h
@@ -5,11 +5,10 @@
#include <linux/sizes.h>
-void its_init(vm_paddr_t coll_tbl, size_t coll_tbl_sz,
- vm_paddr_t device_tbl, size_t device_tbl_sz,
- vm_paddr_t cmdq, size_t cmdq_size);
+void its_init(gpa_t coll_tbl, size_t coll_tbl_sz, gpa_t device_tbl,
+ size_t device_tbl_sz, gpa_t cmdq, size_t cmdq_size);
-void its_send_mapd_cmd(void *cmdq_base, u32 device_id, vm_paddr_t itt_base,
+void its_send_mapd_cmd(void *cmdq_base, u32 device_id, gpa_t itt_base,
size_t itt_size, bool valid);
void its_send_mapc_cmd(void *cmdq_base, u32 vcpu_id, u32 collection_id, bool valid);
void its_send_mapti_cmd(void *cmdq_base, u32 device_id, u32 event_id,
diff --git a/tools/testing/selftests/kvm/include/arm64/processor.h b/tools/testing/selftests/kvm/include/arm64/processor.h
index ac97a1c436fc..b8a902ba8573 100644
--- a/tools/testing/selftests/kvm/include/arm64/processor.h
+++ b/tools/testing/selftests/kvm/include/arm64/processor.h
@@ -128,7 +128,7 @@
#define PTE_ADDR_51_50_LPA2_SHIFT 8
void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init);
-struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id,
struct kvm_vcpu_init *init, void *guest_code);
struct ex_regs {
@@ -167,8 +167,8 @@ enum {
(v) == VECTOR_SYNC_LOWER_64 || \
(v) == VECTOR_SYNC_LOWER_32)
-void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
- uint32_t *ipa16k, uint32_t *ipa64k);
+void aarch64_get_supported_page_sizes(u32 ipa, u32 *ipa4k,
+ u32 *ipa16k, u32 *ipa64k);
void vm_init_descriptor_tables(struct kvm_vm *vm);
void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);
@@ -179,8 +179,8 @@ void vm_install_exception_handler(struct kvm_vm *vm,
void vm_install_sync_handler(struct kvm_vm *vm,
int vector, int ec, handler_fn handler);
-uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level);
-uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva);
+u64 *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level);
+u64 *virt_get_pte_hva(struct kvm_vm *vm, gva_t gva);
static inline void cpu_relax(void)
{
@@ -287,9 +287,9 @@ struct arm_smccc_res {
* @res: pointer to write the return values from registers x0-x3
*
*/
-void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
- uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
- uint64_t arg6, struct arm_smccc_res *res);
+void smccc_hvc(u32 function_id, u64 arg0, u64 arg1,
+ u64 arg2, u64 arg3, u64 arg4, u64 arg5,
+ u64 arg6, struct arm_smccc_res *res);
/**
* smccc_smc - Invoke a SMCCC function using the smc conduit
@@ -298,9 +298,9 @@ void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
* @res: pointer to write the return values from registers x0-x3
*
*/
-void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
- uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
- uint64_t arg6, struct arm_smccc_res *res);
+void smccc_smc(u32 function_id, u64 arg0, u64 arg1,
+ u64 arg2, u64 arg3, u64 arg4, u64 arg5,
+ u64 arg6, struct arm_smccc_res *res);
/* Execute a Wait For Interrupt instruction. */
void wfi(void);
diff --git a/tools/testing/selftests/kvm/include/arm64/ucall.h b/tools/testing/selftests/kvm/include/arm64/ucall.h
index 4ec801f37f00..2210d3d94c40 100644
--- a/tools/testing/selftests/kvm/include/arm64/ucall.h
+++ b/tools/testing/selftests/kvm/include/arm64/ucall.h
@@ -10,9 +10,9 @@
* ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each
* VM), it must not be accessed from host code.
*/
-extern vm_vaddr_t *ucall_exit_mmio_addr;
+extern gva_t *ucall_exit_mmio_addr;
-static inline void ucall_arch_do_ucall(vm_vaddr_t uc)
+static inline void ucall_arch_do_ucall(gva_t uc)
{
WRITE_ONCE(*ucall_exit_mmio_addr, uc);
}
diff --git a/tools/testing/selftests/kvm/include/arm64/vgic.h b/tools/testing/selftests/kvm/include/arm64/vgic.h
index 688beccc9436..1f8b04373987 100644
--- a/tools/testing/selftests/kvm/include/arm64/vgic.h
+++ b/tools/testing/selftests/kvm/include/arm64/vgic.h
@@ -11,27 +11,27 @@
#include "kvm_util.h"
#define REDIST_REGION_ATTR_ADDR(count, base, flags, index) \
- (((uint64_t)(count) << 52) | \
- ((uint64_t)((base) >> 16) << 16) | \
- ((uint64_t)(flags) << 12) | \
+ (((u64)(count) << 52) | \
+ ((u64)((base) >> 16) << 16) | \
+ ((u64)(flags) << 12) | \
index)
bool kvm_supports_vgic_v3(void);
-int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs);
+int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs);
void __vgic_v3_init(int fd);
-int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs);
+int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs);
#define VGIC_MAX_RESERVED 1023
-void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level);
-int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level);
+void kvm_irq_set_level_info(int gic_fd, u32 intid, int level);
+int _kvm_irq_set_level_info(int gic_fd, u32 intid, int level);
-void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level);
-int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level);
+void kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level);
+int _kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level);
/* The vcpu arg only applies to private interrupts. */
-void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu);
-void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu);
+void kvm_irq_write_ispendr(int gic_fd, u32 intid, struct kvm_vcpu *vcpu);
+void kvm_irq_write_isactiver(int gic_fd, u32 intid, struct kvm_vcpu *vcpu);
#define KVM_IRQCHIP_NUM_PINS (1020 - 32)
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index f861242b4ae8..2ecaaa0e9965 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -58,7 +58,7 @@ struct kvm_binary_stats {
struct kvm_vcpu {
struct list_head list;
- uint32_t id;
+ u32 id;
int fd;
struct kvm_vm *vm;
struct kvm_run *run;
@@ -70,8 +70,8 @@ struct kvm_vcpu {
#endif
struct kvm_binary_stats stats;
struct kvm_dirty_gfn *dirty_gfns;
- uint32_t fetch_index;
- uint32_t dirty_gfns_count;
+ u32 fetch_index;
+ u32 dirty_gfns_count;
};
struct userspace_mem_regions {
@@ -90,7 +90,7 @@ enum kvm_mem_region_type {
struct kvm_mmu {
bool pgd_created;
- uint64_t pgd;
+ u64 pgd;
int pgtable_levels;
struct kvm_mmu_arch arch;
@@ -105,16 +105,16 @@ struct kvm_vm {
unsigned int page_shift;
unsigned int pa_bits;
unsigned int va_bits;
- uint64_t max_gfn;
+ u64 max_gfn;
struct list_head vcpus;
struct userspace_mem_regions regions;
struct sparsebit *vpages_valid;
struct sparsebit *vpages_mapped;
bool has_irqchip;
- vm_paddr_t ucall_mmio_addr;
- vm_vaddr_t handlers;
- uint32_t dirty_ring_size;
- uint64_t gpa_tag_mask;
+ gpa_t ucall_mmio_addr;
+ gva_t handlers;
+ u32 dirty_ring_size;
+ gpa_t gpa_tag_mask;
/*
* "mmu" is the guest's stage-1, with a short name because the vast
@@ -132,7 +132,7 @@ struct kvm_vm {
* allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE]
* memslot.
*/
- uint32_t memslots[NR_MEM_REGIONS];
+ u32 memslots[NR_MEM_REGIONS];
};
struct vcpu_reg_sublist {
@@ -164,7 +164,7 @@ struct vcpu_reg_list {
else
struct userspace_mem_region *
-memslot2region(struct kvm_vm *vm, uint32_t memslot);
+memslot2region(struct kvm_vm *vm, u32 memslot);
static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm,
enum kvm_mem_region_type type)
@@ -213,13 +213,13 @@ enum vm_guest_mode {
};
struct vm_shape {
- uint32_t type;
- uint8_t mode;
- uint8_t pad0;
- uint16_t pad1;
+ u32 type;
+ u8 mode;
+ u8 pad0;
+ u16 pad1;
};
-kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t));
+kvm_static_assert(sizeof(struct vm_shape) == sizeof(u64));
#define VM_TYPE_DEFAULT 0
@@ -404,21 +404,22 @@ static inline int vm_check_cap(struct kvm_vm *vm, long cap)
return ret;
}
-static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
+static inline int __vm_enable_cap(struct kvm_vm *vm, u32 cap, u64 arg0)
{
struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
}
-static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
+
+static inline void vm_enable_cap(struct kvm_vm *vm, u32 cap, u64 arg0)
{
struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
}
-static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa,
- uint64_t size, uint64_t attributes)
+static inline void vm_set_memory_attributes(struct kvm_vm *vm, gpa_t gpa,
+ u64 size, u64 attributes)
{
struct kvm_memory_attributes attr = {
.attributes = attributes,
@@ -438,35 +439,35 @@ static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa,
}
-static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa,
- uint64_t size)
+static inline void vm_mem_set_private(struct kvm_vm *vm, gpa_t gpa,
+ u64 size)
{
vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
}
-static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa,
- uint64_t size)
+static inline void vm_mem_set_shared(struct kvm_vm *vm, gpa_t gpa,
+ u64 size)
{
vm_set_memory_attributes(vm, gpa, size, 0);
}
-void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
+void vm_guest_mem_fallocate(struct kvm_vm *vm, gpa_t gpa, u64 size,
bool punch_hole);
-static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa,
- uint64_t size)
+static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, gpa_t gpa,
+ u64 size)
{
vm_guest_mem_fallocate(vm, gpa, size, true);
}
-static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa,
- uint64_t size)
+static inline void vm_guest_mem_allocate(struct kvm_vm *vm, gpa_t gpa,
+ u64 size)
{
vm_guest_mem_fallocate(vm, gpa, size, false);
}
-void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
-const char *vm_guest_mode_string(uint32_t i);
+void vm_enable_dirty_ring(struct kvm_vm *vm, u32 ring_size);
+const char *vm_guest_mode_string(u32 i);
void kvm_vm_free(struct kvm_vm *vmp);
void kvm_vm_restart(struct kvm_vm *vmp);
@@ -474,7 +475,7 @@ void kvm_vm_release(struct kvm_vm *vmp);
void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
int kvm_memfd_alloc(size_t size, bool hugepages);
-void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
+void vm_dump(FILE *stream, struct kvm_vm *vm, u8 indent);
static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
{
@@ -484,7 +485,7 @@ static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
}
static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
- uint64_t first_page, uint32_t num_pages)
+ u64 first_page, u32 num_pages)
{
struct kvm_clear_dirty_log args = {
.dirty_bitmap = log,
@@ -496,14 +497,14 @@ static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log
vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args);
}
-static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
+static inline u32 kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
{
return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
}
static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm,
- uint64_t address,
- uint64_t size, bool pio)
+ u64 address,
+ u64 size, bool pio)
{
struct kvm_coalesced_mmio_zone zone = {
.addr = address,
@@ -515,8 +516,8 @@ static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm,
}
static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm,
- uint64_t address,
- uint64_t size, bool pio)
+ u64 address,
+ u64 size, bool pio)
{
struct kvm_coalesced_mmio_zone zone = {
.addr = address,
@@ -535,8 +536,8 @@ static inline int vm_get_stats_fd(struct kvm_vm *vm)
return fd;
}
-static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd,
- uint32_t flags)
+static inline int __kvm_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd,
+ u32 flags)
{
struct kvm_irqfd irqfd = {
.fd = eventfd,
@@ -548,20 +549,19 @@ static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd,
return __vm_ioctl(vm, KVM_IRQFD, &irqfd);
}
-static inline void kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd,
- uint32_t flags)
+static inline void kvm_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd, u32 flags)
{
int ret = __kvm_irqfd(vm, gsi, eventfd, flags);
TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_IRQFD, ret, vm);
}
-static inline void kvm_assign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd)
+static inline void kvm_assign_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd)
{
kvm_irqfd(vm, gsi, eventfd, 0);
}
-static inline void kvm_deassign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd)
+static inline void kvm_deassign_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd)
{
kvm_irqfd(vm, gsi, eventfd, KVM_IRQFD_FLAG_DEASSIGN);
}
@@ -610,15 +610,15 @@ static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc
}
void read_stat_data(int stats_fd, struct kvm_stats_header *header,
- struct kvm_stats_desc *desc, uint64_t *data,
+ struct kvm_stats_desc *desc, u64 *data,
size_t max_elements);
void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
- uint64_t *data, size_t max_elements);
+ u64 *data, size_t max_elements);
#define __get_stat(stats, stat) \
({ \
- uint64_t data; \
+ u64 data; \
\
kvm_get_stat(stats, #stat, &data, 1); \
data; \
@@ -664,8 +664,8 @@ static inline bool is_smt_on(void)
void vm_create_irqchip(struct kvm_vm *vm);
-static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
- uint64_t flags)
+static inline int __vm_create_guest_memfd(struct kvm_vm *vm, u64 size,
+ u64 flags)
{
struct kvm_create_guest_memfd guest_memfd = {
.size = size,
@@ -675,8 +675,8 @@ static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
}
-static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
- uint64_t flags)
+static inline int vm_create_guest_memfd(struct kvm_vm *vm, u64 size,
+ u64 flags)
{
int fd = __vm_create_guest_memfd(vm, size, flags);
@@ -684,24 +684,23 @@ static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
return fd;
}
-void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
- uint64_t gpa, uint64_t size, void *hva);
-int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
- uint64_t gpa, uint64_t size, void *hva);
-void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
- uint64_t gpa, uint64_t size, void *hva,
- uint32_t guest_memfd, uint64_t guest_memfd_offset);
-int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
- uint64_t gpa, uint64_t size, void *hva,
- uint32_t guest_memfd, uint64_t guest_memfd_offset);
+void vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
+ gpa_t gpa, u64 size, void *hva);
+int __vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
+ gpa_t gpa, u64 size, void *hva);
+void vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
+ gpa_t gpa, u64 size, void *hva,
+ u32 guest_memfd, u64 guest_memfd_offset);
+int __vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
+ gpa_t gpa, u64 size, void *hva,
+ u32 guest_memfd, u64 guest_memfd_offset);
void vm_userspace_mem_region_add(struct kvm_vm *vm,
enum vm_mem_backing_src_type src_type,
- uint64_t gpa, uint32_t slot, uint64_t npages,
- uint32_t flags);
+ gpa_t gpa, u32 slot, u64 npages, u32 flags);
void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
- uint64_t gpa, uint32_t slot, uint64_t npages, uint32_t flags,
- int guest_memfd_fd, uint64_t guest_memfd_offset);
+ gpa_t gpa, u32 slot, u64 npages, u32 flags,
+ int guest_memfd_fd, u64 guest_memfd_offset);
#ifndef vm_arch_has_protected_memory
static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
@@ -710,36 +709,34 @@ static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
}
#endif
-void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
-void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot);
-void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
-void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
-struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
-void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
-vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
-vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
-vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
- enum kvm_mem_region_type type);
-vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
- vm_vaddr_t vaddr_min,
- enum kvm_mem_region_type type);
-vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
-vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
- enum kvm_mem_region_type type);
-vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
-
-void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
+void vm_mem_region_set_flags(struct kvm_vm *vm, u32 slot, u32 flags);
+void vm_mem_region_reload(struct kvm_vm *vm, u32 slot);
+void vm_mem_region_move(struct kvm_vm *vm, u32 slot, u64 new_gpa);
+void vm_mem_region_delete(struct kvm_vm *vm, u32 slot);
+struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id);
+void vm_populate_gva_bitmap(struct kvm_vm *vm);
+gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t min_gva);
+gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva);
+gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva,
+ enum kvm_mem_region_type type);
+gva_t vm_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t min_gva,
+ enum kvm_mem_region_type type);
+gva_t vm_alloc_pages(struct kvm_vm *vm, int nr_pages);
+gva_t __vm_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type);
+gva_t vm_alloc_page(struct kvm_vm *vm);
+
+void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa,
unsigned int npages);
-void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
-void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
-vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
-void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
+void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa);
+void *addr_gva2hva(struct kvm_vm *vm, gva_t gva);
+gpa_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
+void *addr_gpa2alias(struct kvm_vm *vm, gpa_t gpa);
#ifndef vcpu_arch_put_guest
#define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0)
#endif
-static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa)
+static inline gpa_t vm_untag_gpa(struct kvm_vm *vm, gpa_t gpa)
{
return gpa & ~vm->gpa_tag_mask;
}
@@ -755,8 +752,8 @@ static inline int __vcpu_run(struct kvm_vcpu *vcpu)
void vcpu_run_complete_io(struct kvm_vcpu *vcpu);
struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu);
-static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap,
- uint64_t arg0)
+static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, u32 cap,
+ u64 arg0)
{
struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
@@ -811,31 +808,34 @@ static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
vcpu_ioctl(vcpu, KVM_SET_FPU, fpu);
}
-static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
+static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, u64 id, void *addr)
{
- struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
+ struct kvm_one_reg reg = { .id = id, .addr = (u64)addr };
return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
}
-static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
+
+static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val)
{
- struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
+ struct kvm_one_reg reg = { .id = id, .addr = (u64)&val };
return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
}
-static inline uint64_t vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id)
+
+static inline u64 vcpu_get_reg(struct kvm_vcpu *vcpu, u64 id)
{
- uint64_t val;
- struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
+ u64 val;
+ struct kvm_one_reg reg = { .id = id, .addr = (u64)&val };
TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
return val;
}
-static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
+
+static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val)
{
- struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
+ struct kvm_one_reg reg = { .id = id, .addr = (u64)&val };
TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
@@ -880,75 +880,75 @@ static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
return fd;
}
-int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr);
+int __kvm_has_device_attr(int dev_fd, u32 group, u64 attr);
-static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
+static inline void kvm_has_device_attr(int dev_fd, u32 group, u64 attr)
{
int ret = __kvm_has_device_attr(dev_fd, group, attr);
TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
}
-int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val);
+int __kvm_device_attr_get(int dev_fd, u32 group, u64 attr, void *val);
-static inline void kvm_device_attr_get(int dev_fd, uint32_t group,
- uint64_t attr, void *val)
+static inline void kvm_device_attr_get(int dev_fd, u32 group,
+ u64 attr, void *val)
{
int ret = __kvm_device_attr_get(dev_fd, group, attr, val);
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret));
}
-int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val);
+int __kvm_device_attr_set(int dev_fd, u32 group, u64 attr, void *val);
-static inline void kvm_device_attr_set(int dev_fd, uint32_t group,
- uint64_t attr, void *val)
+static inline void kvm_device_attr_set(int dev_fd, u32 group,
+ u64 attr, void *val)
{
int ret = __kvm_device_attr_set(dev_fd, group, attr, val);
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
}
-static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
- uint64_t attr)
+static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, u32 group,
+ u64 attr)
{
return __kvm_has_device_attr(vcpu->fd, group, attr);
}
-static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
- uint64_t attr)
+static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, u32 group,
+ u64 attr)
{
kvm_has_device_attr(vcpu->fd, group, attr);
}
-static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
- uint64_t attr, void *val)
+static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, u32 group,
+ u64 attr, void *val)
{
return __kvm_device_attr_get(vcpu->fd, group, attr, val);
}
-static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
- uint64_t attr, void *val)
+static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, u32 group,
+ u64 attr, void *val)
{
kvm_device_attr_get(vcpu->fd, group, attr, val);
}
-static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
- uint64_t attr, void *val)
+static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, u32 group,
+ u64 attr, void *val)
{
return __kvm_device_attr_set(vcpu->fd, group, attr, val);
}
-static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
- uint64_t attr, void *val)
+static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, u32 group,
+ u64 attr, void *val)
{
kvm_device_attr_set(vcpu->fd, group, attr, val);
}
-int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
-int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
+int __kvm_test_create_device(struct kvm_vm *vm, u64 type);
+int __kvm_create_device(struct kvm_vm *vm, u64 type);
-static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type)
+static inline int kvm_create_device(struct kvm_vm *vm, u64 type)
{
int fd = __kvm_create_device(vm, type);
@@ -964,7 +964,7 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
* Input Args:
* vcpu - vCPU
* num - number of arguments
- * ... - arguments, each of type uint64_t
+ * ... - arguments, each of type u64
*
* Output Args: None
*
@@ -972,40 +972,38 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
*
* Sets the first @num input parameters for the function at @vcpu's entry point,
* per the C calling convention of the architecture, to the values given as
- * variable args. Each of the variable args is expected to be of type uint64_t.
+ * variable args. Each of the variable args is expected to be of type u64.
* The maximum @num can be is specific to the architecture.
*/
void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...);
-void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
-int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
+void kvm_irq_line(struct kvm_vm *vm, u32 irq, int level);
+int _kvm_irq_line(struct kvm_vm *vm, u32 irq, int level);
#define KVM_MAX_IRQ_ROUTES 4096
struct kvm_irq_routing *kvm_gsi_routing_create(void);
void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
- uint32_t gsi, uint32_t pin);
+ u32 gsi, u32 pin);
int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
const char *exit_reason_str(unsigned int exit_reason);
-vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
- uint32_t memslot);
-vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
- vm_paddr_t paddr_min, uint32_t memslot,
- bool protected);
-vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
+gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot);
+gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, gpa_t min_gpa,
+ u32 memslot, bool protected);
+gpa_t vm_alloc_page_table(struct kvm_vm *vm);
-static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
- vm_paddr_t paddr_min, uint32_t memslot)
+static inline gpa_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+ gpa_t min_gpa, u32 memslot)
{
/*
* By default, allocate memory as protected for VMs that support
* protected memory, as the majority of memory for such VMs is
* protected, i.e. using shared memory is effectively opt-in.
*/
- return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
+ return __vm_phy_pages_alloc(vm, num, min_gpa, memslot,
vm_arch_has_protected_memory(vm));
}
@@ -1016,8 +1014,8 @@ static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
* calculate the amount of memory needed for per-vCPU data, e.g. stacks.
*/
struct kvm_vm *____vm_create(struct vm_shape shape);
-struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
- uint64_t nr_extra_pages);
+struct kvm_vm *__vm_create(struct vm_shape shape, u32 nr_runnable_vcpus,
+ u64 nr_extra_pages);
static inline struct kvm_vm *vm_create_barebones(void)
{
@@ -1034,16 +1032,16 @@ static inline struct kvm_vm *vm_create_barebones_type(unsigned long type)
return ____vm_create(shape);
}
-static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
+static inline struct kvm_vm *vm_create(u32 nr_runnable_vcpus)
{
return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0);
}
-struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
- uint64_t extra_mem_pages,
+struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, u32 nr_vcpus,
+ u64 extra_mem_pages,
void *guest_code, struct kvm_vcpu *vcpus[]);
-static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
+static inline struct kvm_vm *vm_create_with_vcpus(u32 nr_vcpus,
void *guest_code,
struct kvm_vcpu *vcpus[])
{
@@ -1054,7 +1052,7 @@ static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
struct kvm_vcpu **vcpu,
- uint64_t extra_mem_pages,
+ u64 extra_mem_pages,
void *guest_code);
/*
@@ -1062,7 +1060,7 @@ struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
* additional pages of guest memory. Returns the VM and vCPU (via out param).
*/
static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
- uint64_t extra_mem_pages,
+ u64 extra_mem_pages,
void *guest_code)
{
return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu,
@@ -1084,7 +1082,7 @@ static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape
struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
-void kvm_set_files_rlimit(uint32_t nr_vcpus);
+void kvm_set_files_rlimit(u32 nr_vcpus);
int __pin_task_to_cpu(pthread_t task, int cpu);
@@ -1115,7 +1113,7 @@ static inline int pin_self_to_any_cpu(void)
}
void kvm_print_vcpu_pinning_help(void);
-void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
+void kvm_parse_vcpu_pinning(const char *pcpus_string, u32 vcpu_to_pcpu[],
int nr_vcpus);
unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
@@ -1131,12 +1129,12 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
}
#define sync_global_to_guest(vm, g) ({ \
- typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
+ typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \
memcpy(_p, &(g), sizeof(g)); \
})
#define sync_global_from_guest(vm, g) ({ \
- typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
+ typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \
memcpy(&(g), _p, sizeof(g)); \
})
@@ -1147,7 +1145,7 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
* undesirable to change the host's copy of the global.
*/
#define write_guest_global(vm, g, val) ({ \
- typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
+ typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \
typeof(g) _val = val; \
\
memcpy(_p, &(_val), sizeof(g)); \
@@ -1156,10 +1154,10 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
- uint8_t indent);
+ u8 indent);
static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
- uint8_t indent)
+ u8 indent)
{
vcpu_arch_dump(stream, vcpu, indent);
}
@@ -1171,10 +1169,10 @@ static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
* vm - Virtual Machine
* vcpu_id - The id of the VCPU to add to the VM.
*/
-struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id);
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code);
-static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id,
void *guest_code)
{
struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id);
@@ -1185,10 +1183,10 @@ static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
}
/* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
-struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
+struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, u32 vcpu_id);
static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm,
- uint32_t vcpu_id)
+ u32 vcpu_id)
{
return vm_arch_vcpu_recreate(vm, vcpu_id);
}
@@ -1203,27 +1201,15 @@ static inline void virt_pgd_alloc(struct kvm_vm *vm)
}
/*
- * VM Virtual Page Map
- *
- * Input Args:
- * vm - Virtual Machine
- * vaddr - VM Virtual Address
- * paddr - VM Physical Address
- * memslot - Memory region slot for new virtual translation tables
- *
- * Output Args: None
- *
- * Return: None
- *
* Within @vm, creates a virtual translation for the page starting
- * at @vaddr to the page starting at @paddr.
+ * at @gva to the page starting at @gpa.
*/
-void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa);
-static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
+static inline void virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
{
- virt_arch_pg_map(vm, vaddr, paddr);
- sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
+ virt_arch_pg_map(vm, gva, gpa);
+ sparsebit_set(vm->vpages_mapped, gva >> vm->page_shift);
}
@@ -1242,9 +1228,9 @@ static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr
* Returns the VM physical address of the translated VM virtual
* address given by @gva.
*/
-vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
+gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva);
-static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+static inline gpa_t addr_gva2gpa(struct kvm_vm *vm, gva_t gva)
{
return addr_arch_gva2gpa(vm, gva);
}
@@ -1264,9 +1250,9 @@ static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
* Dumps to the FILE stream given by @stream, the contents of all the
* virtual translation tables for the VM given by @vm.
*/
-void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
+void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent);
-static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+static inline void virt_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
{
virt_arch_dump(stream, vm, indent);
}
@@ -1277,7 +1263,7 @@ static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
}
-static inline uint64_t vm_page_align(struct kvm_vm *vm, uint64_t v)
+static inline u64 vm_page_align(struct kvm_vm *vm, u64 v)
{
return (v + vm->page_size - 1) & ~(vm->page_size - 1);
}
@@ -1293,9 +1279,9 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus);
void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm);
void kvm_arch_vm_release(struct kvm_vm *vm);
-bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);
+bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa);
-uint32_t guest_get_vcpuid(void);
+u32 guest_get_vcpuid(void);
bool kvm_arch_has_default_irqchip(void);
diff --git a/tools/testing/selftests/kvm/include/kvm_util_types.h b/tools/testing/selftests/kvm/include/kvm_util_types.h
index 0366e9bce7f9..ed0087e31674 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_types.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_types.h
@@ -2,6 +2,8 @@
#ifndef SELFTEST_KVM_UTIL_TYPES_H
#define SELFTEST_KVM_UTIL_TYPES_H
+#include <linux/types.h>
+
/*
* Provide a version of static_assert() that is guaranteed to have an optional
* message param. _GNU_SOURCE is defined for all KVM selftests, _GNU_SOURCE
@@ -14,9 +16,9 @@
#define __kvm_static_assert(expr, msg, ...) _Static_assert(expr, msg)
#define kvm_static_assert(expr, ...) __kvm_static_assert(expr, ##__VA_ARGS__, #expr)
-typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
-typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
+typedef u64 gpa_t; /* Virtual Machine (Guest) physical address */
+typedef u64 gva_t; /* Virtual Machine (Guest) virtual address */
-#define INVALID_GPA (~(uint64_t)0)
+#define INVALID_GPA (~(u64)0)
#endif /* SELFTEST_KVM_UTIL_TYPES_H */
diff --git a/tools/testing/selftests/kvm/include/loongarch/arch_timer.h b/tools/testing/selftests/kvm/include/loongarch/arch_timer.h
index 2ed106b32c81..3888aeeb3524 100644
--- a/tools/testing/selftests/kvm/include/loongarch/arch_timer.h
+++ b/tools/testing/selftests/kvm/include/loongarch/arch_timer.h
@@ -70,9 +70,9 @@ static inline void timer_set_next_cmp_ms(unsigned int msec, bool period)
csr_write(val, LOONGARCH_CSR_TCFG);
}
-static inline void __delay(uint64_t cycles)
+static inline void __delay(u64 cycles)
{
- uint64_t start = timer_get_cycles();
+ u64 start = timer_get_cycles();
while ((timer_get_cycles() - start) < cycles)
cpu_relax();
diff --git a/tools/testing/selftests/kvm/include/loongarch/ucall.h b/tools/testing/selftests/kvm/include/loongarch/ucall.h
index 4ec801f37f00..2210d3d94c40 100644
--- a/tools/testing/selftests/kvm/include/loongarch/ucall.h
+++ b/tools/testing/selftests/kvm/include/loongarch/ucall.h
@@ -10,9 +10,9 @@
* ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each
* VM), it must not be accessed from host code.
*/
-extern vm_vaddr_t *ucall_exit_mmio_addr;
+extern gva_t *ucall_exit_mmio_addr;
-static inline void ucall_arch_do_ucall(vm_vaddr_t uc)
+static inline void ucall_arch_do_ucall(gva_t uc)
{
WRITE_ONCE(*ucall_exit_mmio_addr, uc);
}
diff --git a/tools/testing/selftests/kvm/include/memstress.h b/tools/testing/selftests/kvm/include/memstress.h
index 9071eb6dea60..0d1d6230cc05 100644
--- a/tools/testing/selftests/kvm/include/memstress.h
+++ b/tools/testing/selftests/kvm/include/memstress.h
@@ -20,9 +20,9 @@
#define MEMSTRESS_MEM_SLOT_INDEX 1
struct memstress_vcpu_args {
- uint64_t gpa;
- uint64_t gva;
- uint64_t pages;
+ gpa_t gpa;
+ gva_t gva;
+ u64 pages;
/* Only used by the host userspace part of the vCPU thread */
struct kvm_vcpu *vcpu;
@@ -32,11 +32,11 @@ struct memstress_vcpu_args {
struct memstress_args {
struct kvm_vm *vm;
/* The starting address and size of the guest test region. */
- uint64_t gpa;
- uint64_t size;
- uint64_t guest_page_size;
- uint32_t random_seed;
- uint32_t write_percent;
+ gpa_t gpa;
+ u64 size;
+ u64 guest_page_size;
+ u32 random_seed;
+ u32 write_percent;
/* Run vCPUs in L2 instead of L1, if the architecture supports it. */
bool nested;
@@ -45,7 +45,7 @@ struct memstress_args {
/* True if all vCPUs are pinned to pCPUs */
bool pin_vcpus;
/* The vCPU=>pCPU pinning map. Only valid if pin_vcpus is true. */
- uint32_t vcpu_to_pcpu[KVM_MAX_VCPUS];
+ u32 vcpu_to_pcpu[KVM_MAX_VCPUS];
/* Test is done, stop running vCPUs. */
bool stop_vcpus;
@@ -56,27 +56,27 @@ struct memstress_args {
extern struct memstress_args memstress_args;
struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
- uint64_t vcpu_memory_bytes, int slots,
+ u64 vcpu_memory_bytes, int slots,
enum vm_mem_backing_src_type backing_src,
bool partition_vcpu_memory_access);
void memstress_destroy_vm(struct kvm_vm *vm);
-void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent);
+void memstress_set_write_percent(struct kvm_vm *vm, u32 write_percent);
void memstress_set_random_access(struct kvm_vm *vm, bool random_access);
void memstress_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct memstress_vcpu_args *));
void memstress_join_vcpu_threads(int vcpus);
-void memstress_guest_code(uint32_t vcpu_id);
+void memstress_guest_code(u32 vcpu_id);
-uint64_t memstress_nested_pages(int nr_vcpus);
+u64 memstress_nested_pages(int nr_vcpus);
void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);
void memstress_enable_dirty_logging(struct kvm_vm *vm, int slots);
void memstress_disable_dirty_logging(struct kvm_vm *vm, int slots);
void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots);
void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
- int slots, uint64_t pages_per_slot);
-unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot);
+ int slots, u64 pages_per_slot);
+unsigned long **memstress_alloc_bitmaps(int slots, u64 pages_per_slot);
void memstress_free_bitmaps(unsigned long *bitmaps[], int slots);
#endif /* SELFTEST_KVM_MEMSTRESS_H */
diff --git a/tools/testing/selftests/kvm/include/riscv/arch_timer.h b/tools/testing/selftests/kvm/include/riscv/arch_timer.h
index 225d81dad064..28ffc014da2a 100644
--- a/tools/testing/selftests/kvm/include/riscv/arch_timer.h
+++ b/tools/testing/selftests/kvm/include/riscv/arch_timer.h
@@ -14,25 +14,25 @@
static unsigned long timer_freq;
#define msec_to_cycles(msec) \
- ((timer_freq) * (uint64_t)(msec) / 1000)
+ ((timer_freq) * (u64)(msec) / 1000)
#define usec_to_cycles(usec) \
- ((timer_freq) * (uint64_t)(usec) / 1000000)
+ ((timer_freq) * (u64)(usec) / 1000000)
#define cycles_to_usec(cycles) \
- ((uint64_t)(cycles) * 1000000 / (timer_freq))
+ ((u64)(cycles) * 1000000 / (timer_freq))
-static inline uint64_t timer_get_cycles(void)
+static inline u64 timer_get_cycles(void)
{
return csr_read(CSR_TIME);
}
-static inline void timer_set_cmp(uint64_t cval)
+static inline void timer_set_cmp(u64 cval)
{
csr_write(CSR_STIMECMP, cval);
}
-static inline uint64_t timer_get_cmp(void)
+static inline u64 timer_get_cmp(void)
{
return csr_read(CSR_STIMECMP);
}
@@ -47,17 +47,17 @@ static inline void timer_irq_disable(void)
csr_clear(CSR_SIE, IE_TIE);
}
-static inline void timer_set_next_cmp_ms(uint32_t msec)
+static inline void timer_set_next_cmp_ms(u32 msec)
{
- uint64_t now_ct = timer_get_cycles();
- uint64_t next_ct = now_ct + msec_to_cycles(msec);
+ u64 now_ct = timer_get_cycles();
+ u64 next_ct = now_ct + msec_to_cycles(msec);
timer_set_cmp(next_ct);
}
-static inline void __delay(uint64_t cycles)
+static inline void __delay(u64 cycles)
{
- uint64_t start = timer_get_cycles();
+ u64 start = timer_get_cycles();
while ((timer_get_cycles() - start) < cycles)
cpu_relax();
diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h
index 4dade8c4d18e..e3acf2ae9881 100644
--- a/tools/testing/selftests/kvm/include/riscv/processor.h
+++ b/tools/testing/selftests/kvm/include/riscv/processor.h
@@ -25,8 +25,7 @@
#define GET_RM(insn) (((insn) & INSN_MASK_FUNCT3) >> INSN_SHIFT_FUNCT3)
#define GET_CSR_NUM(insn) (((insn) & INSN_CSR_MASK) >> INSN_CSR_SHIFT)
-static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype,
- uint64_t idx, uint64_t size)
+static inline u64 __kvm_reg_id(u64 type, u64 subtype, u64 idx, u64 size)
{
return KVM_REG_RISCV | type | subtype | idx | size;
}
@@ -62,14 +61,14 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype,
KVM_REG_RISCV_SBI_SINGLE, \
idx, KVM_REG_SIZE_ULONG)
-bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext);
+bool __vcpu_has_ext(struct kvm_vcpu *vcpu, u64 ext);
-static inline bool __vcpu_has_isa_ext(struct kvm_vcpu *vcpu, uint64_t isa_ext)
+static inline bool __vcpu_has_isa_ext(struct kvm_vcpu *vcpu, u64 isa_ext)
{
return __vcpu_has_ext(vcpu, RISCV_ISA_EXT_REG(isa_ext));
}
-static inline bool __vcpu_has_sbi_ext(struct kvm_vcpu *vcpu, uint64_t sbi_ext)
+static inline bool __vcpu_has_sbi_ext(struct kvm_vcpu *vcpu, u64 sbi_ext)
{
return __vcpu_has_ext(vcpu, RISCV_SBI_EXT_REG(sbi_ext));
}
diff --git a/tools/testing/selftests/kvm/include/riscv/ucall.h b/tools/testing/selftests/kvm/include/riscv/ucall.h
index a695ae36f3e0..2de7c6a36096 100644
--- a/tools/testing/selftests/kvm/include/riscv/ucall.h
+++ b/tools/testing/selftests/kvm/include/riscv/ucall.h
@@ -7,11 +7,11 @@
#define UCALL_EXIT_REASON KVM_EXIT_RISCV_SBI
-static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
+static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa)
{
}
-static inline void ucall_arch_do_ucall(vm_vaddr_t uc)
+static inline void ucall_arch_do_ucall(gva_t uc)
{
sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT,
KVM_RISCV_SELFTESTS_SBI_UCALL,
diff --git a/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h b/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h
index b0ed71302722..6deaf18fec22 100644
--- a/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h
+++ b/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h
@@ -8,6 +8,6 @@
#ifndef SELFTEST_KVM_DIAG318_TEST_HANDLER
#define SELFTEST_KVM_DIAG318_TEST_HANDLER
-uint64_t get_diag318_info(void);
+u64 get_diag318_info(void);
#endif
diff --git a/tools/testing/selftests/kvm/include/s390/facility.h b/tools/testing/selftests/kvm/include/s390/facility.h
index 00a1ced6538b..41a265742666 100644
--- a/tools/testing/selftests/kvm/include/s390/facility.h
+++ b/tools/testing/selftests/kvm/include/s390/facility.h
@@ -16,7 +16,7 @@
/* alt_stfle_fac_list[16] + stfle_fac_list[16] */
#define NB_STFL_DOUBLEWORDS 32
-extern uint64_t stfl_doublewords[NB_STFL_DOUBLEWORDS];
+extern u64 stfl_doublewords[NB_STFL_DOUBLEWORDS];
extern bool stfle_flag;
static inline bool test_bit_inv(unsigned long nr, const unsigned long *ptr)
@@ -24,7 +24,7 @@ static inline bool test_bit_inv(unsigned long nr, const unsigned long *ptr)
return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
-static inline void stfle(uint64_t *fac, unsigned int nb_doublewords)
+static inline void stfle(u64 *fac, unsigned int nb_doublewords)
{
register unsigned long r0 asm("0") = nb_doublewords - 1;
diff --git a/tools/testing/selftests/kvm/include/s390/ucall.h b/tools/testing/selftests/kvm/include/s390/ucall.h
index 8035a872a351..3907d629304f 100644
--- a/tools/testing/selftests/kvm/include/s390/ucall.h
+++ b/tools/testing/selftests/kvm/include/s390/ucall.h
@@ -6,11 +6,11 @@
#define UCALL_EXIT_REASON KVM_EXIT_S390_SIEIC
-static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
+static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa)
{
}
-static inline void ucall_arch_do_ucall(vm_vaddr_t uc)
+static inline void ucall_arch_do_ucall(gva_t uc)
{
/* Exit via DIAGNOSE 0x501 (normally used for breakpoints) */
asm volatile ("diag 0,%0,0x501" : : "a"(uc) : "memory");
diff --git a/tools/testing/selftests/kvm/include/sparsebit.h b/tools/testing/selftests/kvm/include/sparsebit.h
index bc760761e1a3..e027e5790946 100644
--- a/tools/testing/selftests/kvm/include/sparsebit.h
+++ b/tools/testing/selftests/kvm/include/sparsebit.h
@@ -6,7 +6,7 @@
*
* Header file that describes API to the sparsebit library.
* This library provides a memory efficient means of storing
- * the settings of bits indexed via a uint64_t. Memory usage
+ * the settings of bits indexed via a u64. Memory usage
* is reasonable, significantly less than (2^64 / 8) bytes, as
* long as bits that are mostly set or mostly cleared are close
* to each other. This library is efficient in memory usage
@@ -25,8 +25,8 @@ extern "C" {
#endif
struct sparsebit;
-typedef uint64_t sparsebit_idx_t;
-typedef uint64_t sparsebit_num_t;
+typedef u64 sparsebit_idx_t;
+typedef u64 sparsebit_num_t;
struct sparsebit *sparsebit_alloc(void);
void sparsebit_free(struct sparsebit **sbitp);
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index b4872ba8ed12..d9b433b834f1 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -22,6 +22,8 @@
#include <sys/mman.h>
#include "kselftest.h"
+#include <linux/types.h>
+
#define msecs_to_usecs(msec) ((msec) * 1000ULL)
static inline __printf(1, 2) int _no_printf(const char *format, ...) { return 0; }
@@ -99,25 +101,25 @@ do { \
size_t parse_size(const char *size);
-int64_t timespec_to_ns(struct timespec ts);
-struct timespec timespec_add_ns(struct timespec ts, int64_t ns);
+s64 timespec_to_ns(struct timespec ts);
+struct timespec timespec_add_ns(struct timespec ts, s64 ns);
struct timespec timespec_add(struct timespec ts1, struct timespec ts2);
struct timespec timespec_sub(struct timespec ts1, struct timespec ts2);
struct timespec timespec_elapsed(struct timespec start);
struct timespec timespec_div(struct timespec ts, int divisor);
struct guest_random_state {
- uint32_t seed;
+ u32 seed;
};
-extern uint32_t guest_random_seed;
+extern u32 guest_random_seed;
extern struct guest_random_state guest_rng;
-struct guest_random_state new_guest_random_state(uint32_t seed);
-uint32_t guest_random_u32(struct guest_random_state *state);
+struct guest_random_state new_guest_random_state(u32 seed);
+u32 guest_random_u32(struct guest_random_state *state);
static inline bool __guest_random_bool(struct guest_random_state *state,
- uint8_t percent)
+ u8 percent)
{
return (guest_random_u32(state) % 100) < percent;
}
@@ -127,9 +129,9 @@ static inline bool guest_random_bool(struct guest_random_state *state)
return __guest_random_bool(state, 50);
}
-static inline uint64_t guest_random_u64(struct guest_random_state *state)
+static inline u64 guest_random_u64(struct guest_random_state *state)
{
- return ((uint64_t)guest_random_u32(state) << 32) | guest_random_u32(state);
+ return ((u64)guest_random_u32(state) << 32) | guest_random_u32(state);
}
enum vm_mem_backing_src_type {
@@ -158,7 +160,7 @@ enum vm_mem_backing_src_type {
struct vm_mem_backing_src_alias {
const char *name;
- uint32_t flag;
+ u32 flag;
};
#define MIN_RUN_DELAY_NS 200000UL
@@ -166,9 +168,9 @@ struct vm_mem_backing_src_alias {
bool thp_configured(void);
size_t get_trans_hugepagesz(void);
size_t get_def_hugetlb_pagesz(void);
-const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i);
-size_t get_backing_src_pagesz(uint32_t i);
-bool is_backing_src_hugetlb(uint32_t i);
+const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(u32 i);
+size_t get_backing_src_pagesz(u32 i);
+bool is_backing_src_hugetlb(u32 i);
void backing_src_help(const char *flag);
enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name);
long get_run_delay(void);
@@ -189,18 +191,18 @@ static inline bool backing_src_can_be_huge(enum vm_mem_backing_src_type t)
}
/* Aligns x up to the next multiple of size. Size must be a power of 2. */
-static inline uint64_t align_up(uint64_t x, uint64_t size)
+static inline u64 align_up(u64 x, u64 size)
{
- uint64_t mask = size - 1;
+ u64 mask = size - 1;
TEST_ASSERT(size != 0 && !(size & (size - 1)),
"size not a power of 2: %lu", size);
return ((x + mask) & ~mask);
}
-static inline uint64_t align_down(uint64_t x, uint64_t size)
+static inline u64 align_down(u64 x, u64 size)
{
- uint64_t x_aligned_up = align_up(x, size);
+ u64 x_aligned_up = align_up(x, size);
if (x == x_aligned_up)
return x;
@@ -215,7 +217,7 @@ static inline void *align_ptr_up(void *x, size_t size)
int atoi_paranoid(const char *num_str);
-static inline uint32_t atoi_positive(const char *name, const char *num_str)
+static inline u32 atoi_positive(const char *name, const char *num_str)
{
int num = atoi_paranoid(num_str);
@@ -223,7 +225,7 @@ static inline uint32_t atoi_positive(const char *name, const char *num_str)
return num;
}
-static inline uint32_t atoi_non_negative(const char *name, const char *num_str)
+static inline u32 atoi_non_negative(const char *name, const char *num_str)
{
int num = atoi_paranoid(num_str);
diff --git a/tools/testing/selftests/kvm/include/timer_test.h b/tools/testing/selftests/kvm/include/timer_test.h
index 9b6edaafe6d4..b7d5d2c84701 100644
--- a/tools/testing/selftests/kvm/include/timer_test.h
+++ b/tools/testing/selftests/kvm/include/timer_test.h
@@ -18,21 +18,21 @@
/* Timer test cmdline parameters */
struct test_args {
- uint32_t nr_vcpus;
- uint32_t nr_iter;
- uint32_t timer_period_ms;
- uint32_t migration_freq_ms;
- uint32_t timer_err_margin_us;
+ u32 nr_vcpus;
+ u32 nr_iter;
+ u32 timer_period_ms;
+ u32 migration_freq_ms;
+ u32 timer_err_margin_us;
/* Members of struct kvm_arm_counter_offset */
- uint64_t counter_offset;
- uint64_t reserved;
+ u64 counter_offset;
+ u64 reserved;
};
/* Shared variables between host and guest */
struct test_vcpu_shared_data {
- uint32_t nr_iter;
+ u32 nr_iter;
int guest_stage;
- uint64_t xcnt;
+ u64 xcnt;
};
extern struct test_args test_args;
diff --git a/tools/testing/selftests/kvm/include/ucall_common.h b/tools/testing/selftests/kvm/include/ucall_common.h
index d9d6581b8d4f..cbdcb0a50c4f 100644
--- a/tools/testing/selftests/kvm/include/ucall_common.h
+++ b/tools/testing/selftests/kvm/include/ucall_common.h
@@ -21,26 +21,26 @@ enum {
#define UCALL_BUFFER_LEN 1024
struct ucall {
- uint64_t cmd;
- uint64_t args[UCALL_MAX_ARGS];
+ u64 cmd;
+ u64 args[UCALL_MAX_ARGS];
char buffer[UCALL_BUFFER_LEN];
/* Host virtual address of this struct. */
struct ucall *hva;
};
-void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa);
-void ucall_arch_do_ucall(vm_vaddr_t uc);
+void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa);
+void ucall_arch_do_ucall(gva_t uc);
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu);
-void ucall(uint64_t cmd, int nargs, ...);
-__printf(2, 3) void ucall_fmt(uint64_t cmd, const char *fmt, ...);
-__printf(5, 6) void ucall_assert(uint64_t cmd, const char *exp,
+void ucall(u64 cmd, int nargs, ...);
+__printf(2, 3) void ucall_fmt(u64 cmd, const char *fmt, ...);
+__printf(5, 6) void ucall_assert(u64 cmd, const char *exp,
const char *file, unsigned int line,
const char *fmt, ...);
-uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);
-void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa);
-int ucall_nr_pages_required(uint64_t page_size);
+u64 get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);
+void ucall_init(struct kvm_vm *vm, gpa_t mmio_gpa);
+int ucall_nr_pages_required(u64 page_size);
/*
* Perform userspace call without any associated data. This bare call avoids
@@ -48,7 +48,7 @@ int ucall_nr_pages_required(uint64_t page_size);
* the full ucall() are problematic and/or unwanted. Note, this will come out
* as UCALL_NONE on the backend.
*/
-#define GUEST_UCALL_NONE() ucall_arch_do_ucall((vm_vaddr_t)NULL)
+#define GUEST_UCALL_NONE() ucall_arch_do_ucall((gva_t)NULL)
#define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \
ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
diff --git a/tools/testing/selftests/kvm/include/userfaultfd_util.h b/tools/testing/selftests/kvm/include/userfaultfd_util.h
index 60f7f9d435dc..0bc1dc16600e 100644
--- a/tools/testing/selftests/kvm/include/userfaultfd_util.h
+++ b/tools/testing/selftests/kvm/include/userfaultfd_util.h
@@ -25,7 +25,7 @@ struct uffd_reader_args {
struct uffd_desc {
int uffd;
- uint64_t num_readers;
+ u64 num_readers;
/* Holds the write ends of the pipes for killing the readers. */
int *pipefds;
pthread_t *readers;
@@ -33,8 +33,8 @@ struct uffd_desc {
};
struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
- void *hva, uint64_t len,
- uint64_t num_readers,
+ void *hva, u64 len,
+ u64 num_readers,
uffd_handler_t handler);
void uffd_stop_demand_paging(struct uffd_desc *uffd);
diff --git a/tools/testing/selftests/kvm/include/x86/apic.h b/tools/testing/selftests/kvm/include/x86/apic.h
index 5ca6bacbd70e..31887bdc3d6c 100644
--- a/tools/testing/selftests/kvm/include/x86/apic.h
+++ b/tools/testing/selftests/kvm/include/x86/apic.h
@@ -79,42 +79,42 @@ void apic_disable(void);
void xapic_enable(void);
void x2apic_enable(void);
-static inline uint32_t get_bsp_flag(void)
+static inline u32 get_bsp_flag(void)
{
return rdmsr(MSR_IA32_APICBASE) & MSR_IA32_APICBASE_BSP;
}
-static inline uint32_t xapic_read_reg(unsigned int reg)
+static inline u32 xapic_read_reg(unsigned int reg)
{
- return ((volatile uint32_t *)APIC_DEFAULT_GPA)[reg >> 2];
+ return ((volatile u32 *)APIC_DEFAULT_GPA)[reg >> 2];
}
-static inline void xapic_write_reg(unsigned int reg, uint32_t val)
+static inline void xapic_write_reg(unsigned int reg, u32 val)
{
- ((volatile uint32_t *)APIC_DEFAULT_GPA)[reg >> 2] = val;
+ ((volatile u32 *)APIC_DEFAULT_GPA)[reg >> 2] = val;
}
-static inline uint64_t x2apic_read_reg(unsigned int reg)
+static inline u64 x2apic_read_reg(unsigned int reg)
{
return rdmsr(APIC_BASE_MSR + (reg >> 4));
}
-static inline uint8_t x2apic_write_reg_safe(unsigned int reg, uint64_t value)
+static inline u8 x2apic_write_reg_safe(unsigned int reg, u64 value)
{
return wrmsr_safe(APIC_BASE_MSR + (reg >> 4), value);
}
-static inline void x2apic_write_reg(unsigned int reg, uint64_t value)
+static inline void x2apic_write_reg(unsigned int reg, u64 value)
{
- uint8_t fault = x2apic_write_reg_safe(reg, value);
+ u8 fault = x2apic_write_reg_safe(reg, value);
__GUEST_ASSERT(!fault, "Unexpected fault 0x%x on WRMSR(%x) = %lx\n",
fault, APIC_BASE_MSR + (reg >> 4), value);
}
-static inline void x2apic_write_reg_fault(unsigned int reg, uint64_t value)
+static inline void x2apic_write_reg_fault(unsigned int reg, u64 value)
{
- uint8_t fault = x2apic_write_reg_safe(reg, value);
+ u8 fault = x2apic_write_reg_safe(reg, value);
__GUEST_ASSERT(fault == GP_VECTOR,
"Wanted #GP on WRMSR(%x) = %lx, got 0x%x\n",
diff --git a/tools/testing/selftests/kvm/include/x86/evmcs.h b/tools/testing/selftests/kvm/include/x86/evmcs.h
index 5a74bb30e2f8..be79bda024bf 100644
--- a/tools/testing/selftests/kvm/include/x86/evmcs.h
+++ b/tools/testing/selftests/kvm/include/x86/evmcs.h
@@ -10,9 +10,9 @@
#include "hyperv.h"
#include "vmx.h"
-#define u16 uint16_t
-#define u32 uint32_t
-#define u64 uint64_t
+#define u16 u16
+#define u32 u32
+#define u64 u64
#define EVMCS_VERSION 1
@@ -245,7 +245,7 @@ static inline void evmcs_enable(void)
enable_evmcs = true;
}
-static inline int evmcs_vmptrld(uint64_t vmcs_pa, void *vmcs)
+static inline int evmcs_vmptrld(u64 vmcs_pa, void *vmcs)
{
current_vp_assist->current_nested_vmcs = vmcs_pa;
current_vp_assist->enlighten_vmentry = 1;
@@ -265,7 +265,7 @@ static inline bool load_evmcs(struct hyperv_test_pages *hv)
return true;
}
-static inline int evmcs_vmptrst(uint64_t *value)
+static inline int evmcs_vmptrst(u64 *value)
{
*value = current_vp_assist->current_nested_vmcs &
~HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
@@ -273,7 +273,7 @@ static inline int evmcs_vmptrst(uint64_t *value)
return 0;
}
-static inline int evmcs_vmread(uint64_t encoding, uint64_t *value)
+static inline int evmcs_vmread(u64 encoding, u64 *value)
{
switch (encoding) {
case GUEST_RIP:
@@ -672,7 +672,7 @@ static inline int evmcs_vmread(uint64_t encoding, uint64_t *value)
return 0;
}
-static inline int evmcs_vmwrite(uint64_t encoding, uint64_t value)
+static inline int evmcs_vmwrite(u64 encoding, u64 value)
{
switch (encoding) {
case GUEST_RIP:
@@ -1226,9 +1226,9 @@ static inline int evmcs_vmlaunch(void)
"pop %%rbp;"
: [ret]"=&a"(ret)
: [host_rsp]"r"
- ((uint64_t)&current_evmcs->host_rsp),
+ ((u64)&current_evmcs->host_rsp),
[host_rip]"r"
- ((uint64_t)&current_evmcs->host_rip)
+ ((u64)&current_evmcs->host_rip)
: "memory", "cc", "rbx", "r8", "r9", "r10",
"r11", "r12", "r13", "r14", "r15");
return ret;
@@ -1265,9 +1265,9 @@ static inline int evmcs_vmresume(void)
"pop %%rbp;"
: [ret]"=&a"(ret)
: [host_rsp]"r"
- ((uint64_t)&current_evmcs->host_rsp),
+ ((u64)&current_evmcs->host_rsp),
[host_rip]"r"
- ((uint64_t)&current_evmcs->host_rip)
+ ((u64)&current_evmcs->host_rip)
: "memory", "cc", "rbx", "r8", "r9", "r10",
"r11", "r12", "r13", "r14", "r15");
return ret;
diff --git a/tools/testing/selftests/kvm/include/x86/hyperv.h b/tools/testing/selftests/kvm/include/x86/hyperv.h
index f13e532be240..78003f5a22f3 100644
--- a/tools/testing/selftests/kvm/include/x86/hyperv.h
+++ b/tools/testing/selftests/kvm/include/x86/hyperv.h
@@ -254,12 +254,12 @@
* Issue a Hyper-V hypercall. Returns exception vector raised or 0, 'hv_status'
* is set to the hypercall status (if no exception occurred).
*/
-static inline uint8_t __hyperv_hypercall(u64 control, vm_vaddr_t input_address,
- vm_vaddr_t output_address,
- uint64_t *hv_status)
+static inline u8 __hyperv_hypercall(u64 control, gva_t input_address,
+ gva_t output_address,
+ u64 *hv_status)
{
- uint64_t error_code;
- uint8_t vector;
+ u64 error_code;
+ u8 vector;
/* Note both the hypercall and the "asm safe" clobber r9-r11. */
asm volatile("mov %[output_address], %%r8\n\t"
@@ -274,11 +274,11 @@ static inline uint8_t __hyperv_hypercall(u64 control, vm_vaddr_t input_address,
}
/* Issue a Hyper-V hypercall and assert that it succeeded. */
-static inline void hyperv_hypercall(u64 control, vm_vaddr_t input_address,
- vm_vaddr_t output_address)
+static inline void hyperv_hypercall(u64 control, gva_t input_address,
+ gva_t output_address)
{
- uint64_t hv_status;
- uint8_t vector;
+ u64 hv_status;
+ u8 vector;
vector = __hyperv_hypercall(control, input_address, output_address, &hv_status);
@@ -327,27 +327,27 @@ struct hv_vp_assist_page {
extern struct hv_vp_assist_page *current_vp_assist;
-int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist);
+int enable_vp_assist(u64 vp_assist_pa, void *vp_assist);
struct hyperv_test_pages {
/* VP assist page */
void *vp_assist_hva;
- uint64_t vp_assist_gpa;
+ u64 vp_assist_gpa;
void *vp_assist;
/* Partition assist page */
void *partition_assist_hva;
- uint64_t partition_assist_gpa;
+ u64 partition_assist_gpa;
void *partition_assist;
/* Enlightened VMCS */
void *enlightened_vmcs_hva;
- uint64_t enlightened_vmcs_gpa;
+ u64 enlightened_vmcs_gpa;
void *enlightened_vmcs;
};
struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm,
- vm_vaddr_t *p_hv_pages_gva);
+ gva_t *p_hv_pages_gva);
/* HV_X64_MSR_TSC_INVARIANT_CONTROL bits */
#define HV_INVARIANT_TSC_EXPOSED BIT_ULL(0)
diff --git a/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h b/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h
index be35d26bb320..c33ab6e04171 100644
--- a/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h
+++ b/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h
@@ -11,19 +11,19 @@
extern bool is_forced_emulation_enabled;
struct pte_masks {
- uint64_t present;
- uint64_t writable;
- uint64_t user;
- uint64_t readable;
- uint64_t executable;
- uint64_t accessed;
- uint64_t dirty;
- uint64_t huge;
- uint64_t nx;
- uint64_t c;
- uint64_t s;
+ u64 present;
+ u64 writable;
+ u64 user;
+ u64 readable;
+ u64 executable;
+ u64 accessed;
+ u64 dirty;
+ u64 huge;
+ u64 nx;
+ u64 c;
+ u64 s;
- uint64_t always_set;
+ u64 always_set;
};
struct kvm_mmu_arch {
@@ -33,12 +33,12 @@ struct kvm_mmu_arch {
struct kvm_mmu;
struct kvm_vm_arch {
- vm_vaddr_t gdt;
- vm_vaddr_t tss;
- vm_vaddr_t idt;
+ gva_t gdt;
+ gva_t tss;
+ gva_t idt;
- uint64_t c_bit;
- uint64_t s_bit;
+ u64 c_bit;
+ u64 s_bit;
int sev_fd;
bool is_pt_protected;
};
@@ -62,7 +62,7 @@ do { \
: "+m" (mem) \
: "r" (val) : "memory"); \
} else { \
- uint64_t __old = READ_ONCE(mem); \
+ u64 __old = READ_ONCE(mem); \
\
__asm__ __volatile__(KVM_FEP LOCK_PREFIX "cmpxchg %[new], %[ptr]" \
: [ptr] "+m" (mem), [old] "+a" (__old) \
diff --git a/tools/testing/selftests/kvm/include/x86/pmu.h b/tools/testing/selftests/kvm/include/x86/pmu.h
index 72575eadb63a..98537cc8840d 100644
--- a/tools/testing/selftests/kvm/include/x86/pmu.h
+++ b/tools/testing/selftests/kvm/include/x86/pmu.h
@@ -6,8 +6,8 @@
#define SELFTEST_KVM_PMU_H
#include <stdbool.h>
-#include <stdint.h>
+#include <linux/types.h>
#include <linux/bits.h>
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
@@ -104,14 +104,15 @@ enum amd_pmu_zen_events {
NR_AMD_ZEN_EVENTS,
};
-extern const uint64_t intel_pmu_arch_events[];
-extern const uint64_t amd_pmu_zen_events[];
+extern const u64 intel_pmu_arch_events[];
+extern const u64 amd_pmu_zen_events[];
enum pmu_errata {
INSTRUCTIONS_RETIRED_OVERCOUNT,
BRANCHES_RETIRED_OVERCOUNT,
};
-extern uint64_t pmu_errata_mask;
+
+extern u64 pmu_errata_mask;
void kvm_init_pmu_errata(void);
diff --git a/tools/testing/selftests/kvm/include/x86/processor.h b/tools/testing/selftests/kvm/include/x86/processor.h
index d8634a760a60..77f576ee7789 100644
--- a/tools/testing/selftests/kvm/include/x86/processor.h
+++ b/tools/testing/selftests/kvm/include/x86/processor.h
@@ -23,7 +23,7 @@ extern bool host_cpu_is_intel;
extern bool host_cpu_is_amd;
extern bool host_cpu_is_hygon;
extern bool host_cpu_is_amd_compatible;
-extern uint64_t guest_tsc_khz;
+extern u64 guest_tsc_khz;
#ifndef MAX_NR_CPUID_ENTRIES
#define MAX_NR_CPUID_ENTRIES 100
@@ -399,17 +399,17 @@ struct gpr64_regs {
};
struct desc64 {
- uint16_t limit0;
- uint16_t base0;
+ u16 limit0;
+ u16 base0;
unsigned base1:8, type:4, s:1, dpl:2, p:1;
unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8;
- uint32_t base3;
- uint32_t zero1;
+ u32 base3;
+ u32 zero1;
} __attribute__((packed));
struct desc_ptr {
- uint16_t size;
- uint64_t address;
+ u16 size;
+ u64 address;
} __attribute__((packed));
struct kvm_x86_state {
@@ -427,18 +427,18 @@ struct kvm_x86_state {
struct kvm_msrs msrs;
};
-static inline uint64_t get_desc64_base(const struct desc64 *desc)
+static inline u64 get_desc64_base(const struct desc64 *desc)
{
- return (uint64_t)desc->base3 << 32 |
- (uint64_t)desc->base2 << 24 |
- (uint64_t)desc->base1 << 16 |
- (uint64_t)desc->base0;
+ return (u64)desc->base3 << 32 |
+ (u64)desc->base2 << 24 |
+ (u64)desc->base1 << 16 |
+ (u64)desc->base0;
}
-static inline uint64_t rdtsc(void)
+static inline u64 rdtsc(void)
{
- uint32_t eax, edx;
- uint64_t tsc_val;
+ u32 eax, edx;
+ u64 tsc_val;
/*
* The lfence is to wait (on Intel CPUs) until all previous
* instructions have been executed. If software requires RDTSC to be
@@ -446,39 +446,39 @@ static inline uint64_t rdtsc(void)
* execute LFENCE immediately after RDTSC
*/
__asm__ __volatile__("lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
- tsc_val = ((uint64_t)edx) << 32 | eax;
+ tsc_val = ((u64)edx) << 32 | eax;
return tsc_val;
}
-static inline uint64_t rdtscp(uint32_t *aux)
+static inline u64 rdtscp(u32 *aux)
{
- uint32_t eax, edx;
+ u32 eax, edx;
__asm__ __volatile__("rdtscp" : "=a"(eax), "=d"(edx), "=c"(*aux));
- return ((uint64_t)edx) << 32 | eax;
+ return ((u64)edx) << 32 | eax;
}
-static inline uint64_t rdmsr(uint32_t msr)
+static inline u64 rdmsr(u32 msr)
{
- uint32_t a, d;
+ u32 a, d;
__asm__ __volatile__("rdmsr" : "=a"(a), "=d"(d) : "c"(msr) : "memory");
- return a | ((uint64_t) d << 32);
+ return a | ((u64)d << 32);
}
-static inline void wrmsr(uint32_t msr, uint64_t value)
+static inline void wrmsr(u32 msr, u64 value)
{
- uint32_t a = value;
- uint32_t d = value >> 32;
+ u32 a = value;
+ u32 d = value >> 32;
__asm__ __volatile__("wrmsr" :: "a"(a), "d"(d), "c"(msr) : "memory");
}
-static inline uint16_t inw(uint16_t port)
+static inline u16 inw(u16 port)
{
- uint16_t tmp;
+ u16 tmp;
__asm__ __volatile__("in %%dx, %%ax"
: /* output */ "=a" (tmp)
@@ -487,120 +487,120 @@ static inline uint16_t inw(uint16_t port)
return tmp;
}
-static inline uint16_t get_es(void)
+static inline u16 get_es(void)
{
- uint16_t es;
+ u16 es;
__asm__ __volatile__("mov %%es, %[es]"
: /* output */ [es]"=rm"(es));
return es;
}
-static inline uint16_t get_cs(void)
+static inline u16 get_cs(void)
{
- uint16_t cs;
+ u16 cs;
__asm__ __volatile__("mov %%cs, %[cs]"
: /* output */ [cs]"=rm"(cs));
return cs;
}
-static inline uint16_t get_ss(void)
+static inline u16 get_ss(void)
{
- uint16_t ss;
+ u16 ss;
__asm__ __volatile__("mov %%ss, %[ss]"
: /* output */ [ss]"=rm"(ss));
return ss;
}
-static inline uint16_t get_ds(void)
+static inline u16 get_ds(void)
{
- uint16_t ds;
+ u16 ds;
__asm__ __volatile__("mov %%ds, %[ds]"
: /* output */ [ds]"=rm"(ds));
return ds;
}
-static inline uint16_t get_fs(void)
+static inline u16 get_fs(void)
{
- uint16_t fs;
+ u16 fs;
__asm__ __volatile__("mov %%fs, %[fs]"
: /* output */ [fs]"=rm"(fs));
return fs;
}
-static inline uint16_t get_gs(void)
+static inline u16 get_gs(void)
{
- uint16_t gs;
+ u16 gs;
__asm__ __volatile__("mov %%gs, %[gs]"
: /* output */ [gs]"=rm"(gs));
return gs;
}
-static inline uint16_t get_tr(void)
+static inline u16 get_tr(void)
{
- uint16_t tr;
+ u16 tr;
__asm__ __volatile__("str %[tr]"
: /* output */ [tr]"=rm"(tr));
return tr;
}
-static inline uint64_t get_cr0(void)
+static inline u64 get_cr0(void)
{
- uint64_t cr0;
+ u64 cr0;
__asm__ __volatile__("mov %%cr0, %[cr0]"
: /* output */ [cr0]"=r"(cr0));
return cr0;
}
-static inline void set_cr0(uint64_t val)
+static inline void set_cr0(u64 val)
{
__asm__ __volatile__("mov %0, %%cr0" : : "r" (val) : "memory");
}
-static inline uint64_t get_cr3(void)
+static inline u64 get_cr3(void)
{
- uint64_t cr3;
+ u64 cr3;
__asm__ __volatile__("mov %%cr3, %[cr3]"
: /* output */ [cr3]"=r"(cr3));
return cr3;
}
-static inline void set_cr3(uint64_t val)
+static inline void set_cr3(u64 val)
{
__asm__ __volatile__("mov %0, %%cr3" : : "r" (val) : "memory");
}
-static inline uint64_t get_cr4(void)
+static inline u64 get_cr4(void)
{
- uint64_t cr4;
+ u64 cr4;
__asm__ __volatile__("mov %%cr4, %[cr4]"
: /* output */ [cr4]"=r"(cr4));
return cr4;
}
-static inline void set_cr4(uint64_t val)
+static inline void set_cr4(u64 val)
{
__asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory");
}
-static inline uint64_t get_cr8(void)
+static inline u64 get_cr8(void)
{
- uint64_t cr8;
+ u64 cr8;
__asm__ __volatile__("mov %%cr8, %[cr8]" : [cr8]"=r"(cr8));
return cr8;
}
-static inline void set_cr8(uint64_t val)
+static inline void set_cr8(u64 val)
{
__asm__ __volatile__("mov %0, %%cr8" : : "r" (val) : "memory");
}
@@ -651,14 +651,14 @@ static inline struct desc_ptr get_idt(void)
return idt;
}
-static inline void outl(uint16_t port, uint32_t value)
+static inline void outl(u16 port, u32 value)
{
__asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value));
}
-static inline void __cpuid(uint32_t function, uint32_t index,
- uint32_t *eax, uint32_t *ebx,
- uint32_t *ecx, uint32_t *edx)
+static inline void __cpuid(u32 function, u32 index,
+ u32 *eax, u32 *ebx,
+ u32 *ecx, u32 *edx)
{
*eax = function;
*ecx = index;
@@ -672,35 +672,35 @@ static inline void __cpuid(uint32_t function, uint32_t index,
: "memory");
}
-static inline void cpuid(uint32_t function,
- uint32_t *eax, uint32_t *ebx,
- uint32_t *ecx, uint32_t *edx)
+static inline void cpuid(u32 function,
+ u32 *eax, u32 *ebx,
+ u32 *ecx, u32 *edx)
{
return __cpuid(function, 0, eax, ebx, ecx, edx);
}
-static inline uint32_t this_cpu_fms(void)
+static inline u32 this_cpu_fms(void)
{
- uint32_t eax, ebx, ecx, edx;
+ u32 eax, ebx, ecx, edx;
cpuid(1, &eax, &ebx, &ecx, &edx);
return eax;
}
-static inline uint32_t this_cpu_family(void)
+static inline u32 this_cpu_family(void)
{
return x86_family(this_cpu_fms());
}
-static inline uint32_t this_cpu_model(void)
+static inline u32 this_cpu_model(void)
{
return x86_model(this_cpu_fms());
}
static inline bool this_cpu_vendor_string_is(const char *vendor)
{
- const uint32_t *chunk = (const uint32_t *)vendor;
- uint32_t eax, ebx, ecx, edx;
+ const u32 *chunk = (const u32 *)vendor;
+ u32 eax, ebx, ecx, edx;
cpuid(0, &eax, &ebx, &ecx, &edx);
return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
@@ -724,10 +724,9 @@ static inline bool this_cpu_is_hygon(void)
return this_cpu_vendor_string_is("HygonGenuine");
}
-static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index,
- uint8_t reg, uint8_t lo, uint8_t hi)
+static inline u32 __this_cpu_has(u32 function, u32 index, u8 reg, u8 lo, u8 hi)
{
- uint32_t gprs[4];
+ u32 gprs[4];
__cpuid(function, index,
&gprs[KVM_CPUID_EAX], &gprs[KVM_CPUID_EBX],
@@ -742,7 +741,7 @@ static inline bool this_cpu_has(struct kvm_x86_cpu_feature feature)
feature.reg, feature.bit, feature.bit);
}
-static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property)
+static inline u32 this_cpu_property(struct kvm_x86_cpu_property property)
{
return __this_cpu_has(property.function, property.index,
property.reg, property.lo_bit, property.hi_bit);
@@ -750,7 +749,7 @@ static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property)
static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property)
{
- uint32_t max_leaf;
+ u32 max_leaf;
switch (property.function & 0xc0000000) {
case 0:
@@ -770,7 +769,7 @@ static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property)
static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
{
- uint32_t nr_bits;
+ u32 nr_bits;
if (feature.f.reg == KVM_CPUID_EBX) {
nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
@@ -782,13 +781,13 @@ static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
return nr_bits > feature.f.bit || this_cpu_has(feature.f);
}
-static __always_inline uint64_t this_cpu_supported_xcr0(void)
+static __always_inline u64 this_cpu_supported_xcr0(void)
{
if (!this_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
return 0;
return this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
- ((uint64_t)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
+ ((u64)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
}
typedef u32 __attribute__((vector_size(16))) sse128_t;
@@ -867,7 +866,7 @@ static inline void cpu_relax(void)
static inline void udelay(unsigned long usec)
{
- uint64_t start, now, cycles;
+ u64 start, now, cycles;
GUEST_ASSERT(guest_tsc_khz);
cycles = guest_tsc_khz / 1000 * usec;
@@ -898,8 +897,8 @@ void kvm_x86_state_cleanup(struct kvm_x86_state *state);
const struct kvm_msr_list *kvm_get_msr_index_list(void);
const struct kvm_msr_list *kvm_get_feature_msr_index_list(void);
-bool kvm_msr_is_in_save_restore_list(uint32_t msr_index);
-uint64_t kvm_get_feature_msr(uint64_t msr_index);
+bool kvm_msr_is_in_save_restore_list(u32 msr_index);
+u64 kvm_get_feature_msr(u64 msr_index);
static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu,
struct kvm_msrs *msrs)
@@ -954,20 +953,20 @@ static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs)
}
const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
- uint32_t function, uint32_t index);
+ u32 function, u32 index);
const struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
-static inline uint32_t kvm_cpu_fms(void)
+static inline u32 kvm_cpu_fms(void)
{
return get_cpuid_entry(kvm_get_supported_cpuid(), 0x1, 0)->eax;
}
-static inline uint32_t kvm_cpu_family(void)
+static inline u32 kvm_cpu_family(void)
{
return x86_family(kvm_cpu_fms());
}
-static inline uint32_t kvm_cpu_model(void)
+static inline u32 kvm_cpu_model(void)
{
return x86_model(kvm_cpu_fms());
}
@@ -980,17 +979,17 @@ static inline bool kvm_cpu_has(struct kvm_x86_cpu_feature feature)
return kvm_cpuid_has(kvm_get_supported_cpuid(), feature);
}
-uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
- struct kvm_x86_cpu_property property);
+u32 kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
+ struct kvm_x86_cpu_property property);
-static inline uint32_t kvm_cpu_property(struct kvm_x86_cpu_property property)
+static inline u32 kvm_cpu_property(struct kvm_x86_cpu_property property)
{
return kvm_cpuid_property(kvm_get_supported_cpuid(), property);
}
static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property)
{
- uint32_t max_leaf;
+ u32 max_leaf;
switch (property.function & 0xc0000000) {
case 0:
@@ -1010,7 +1009,7 @@ static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property)
static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
{
- uint32_t nr_bits;
+ u32 nr_bits;
if (feature.f.reg == KVM_CPUID_EBX) {
nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
@@ -1022,13 +1021,13 @@ static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
return nr_bits > feature.f.bit || kvm_cpu_has(feature.f);
}
-static __always_inline uint64_t kvm_cpu_supported_xcr0(void)
+static __always_inline u64 kvm_cpu_supported_xcr0(void)
{
if (!kvm_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
return 0;
return kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
- ((uint64_t)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
+ ((u64)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
}
static inline size_t kvm_cpuid2_size(int nr_entries)
@@ -1062,8 +1061,8 @@ static inline void vcpu_get_cpuid(struct kvm_vcpu *vcpu)
}
static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
- uint32_t function,
- uint32_t index)
+ u32 function,
+ u32 index)
{
TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first (or equivalent)");
@@ -1074,7 +1073,7 @@ static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *v
}
static inline struct kvm_cpuid_entry2 *vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
- uint32_t function)
+ u32 function)
{
return __vcpu_get_cpuid_entry(vcpu, function, 0);
}
@@ -1104,10 +1103,10 @@ static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu)
void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
struct kvm_x86_cpu_property property,
- uint32_t value);
-void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr);
+ u32 value);
+void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, u8 maxphyaddr);
-void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function);
+void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, u32 function);
static inline bool vcpu_cpuid_has(struct kvm_vcpu *vcpu,
struct kvm_x86_cpu_feature feature)
@@ -1135,8 +1134,8 @@ static inline void vcpu_clear_cpuid_feature(struct kvm_vcpu *vcpu,
vcpu_set_or_clear_cpuid_feature(vcpu, feature, false);
}
-uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index);
-int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value);
+u64 vcpu_get_msr(struct kvm_vcpu *vcpu, u64 msr_index);
+int _vcpu_set_msr(struct kvm_vcpu *vcpu, u64 msr_index, u64 msr_value);
/*
* Assert on an MSR access(es) and pretty print the MSR name when possible.
@@ -1161,14 +1160,14 @@ do { \
* is changing, etc. This is NOT an exhaustive list! The intent is to filter
* out MSRs that are not durable _and_ that a selftest wants to write.
*/
-static inline bool is_durable_msr(uint32_t msr)
+static inline bool is_durable_msr(u32 msr)
{
return msr != MSR_IA32_TSC;
}
#define vcpu_set_msr(vcpu, msr, val) \
do { \
- uint64_t r, v = val; \
+ u64 r, v = val; \
\
TEST_ASSERT_MSR(_vcpu_set_msr(vcpu, msr, v) == 1, \
"KVM_SET_MSRS failed on %s, value = 0x%lx", msr, #msr, v); \
@@ -1182,28 +1181,28 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
void kvm_init_vm_address_properties(struct kvm_vm *vm);
struct ex_regs {
- uint64_t rax, rcx, rdx, rbx;
- uint64_t rbp, rsi, rdi;
- uint64_t r8, r9, r10, r11;
- uint64_t r12, r13, r14, r15;
- uint64_t vector;
- uint64_t error_code;
- uint64_t rip;
- uint64_t cs;
- uint64_t rflags;
+ u64 rax, rcx, rdx, rbx;
+ u64 rbp, rsi, rdi;
+ u64 r8, r9, r10, r11;
+ u64 r12, r13, r14, r15;
+ u64 vector;
+ u64 error_code;
+ u64 rip;
+ u64 cs;
+ u64 rflags;
};
struct idt_entry {
- uint16_t offset0;
- uint16_t selector;
- uint16_t ist : 3;
- uint16_t : 5;
- uint16_t type : 4;
- uint16_t : 1;
- uint16_t dpl : 2;
- uint16_t p : 1;
- uint16_t offset1;
- uint32_t offset2; uint32_t reserved;
+ u16 offset0;
+ u16 selector;
+ u16 ist : 3;
+ u16 : 5;
+ u16 type : 4;
+ u16 : 1;
+ u16 dpl : 2;
+ u16 p : 1;
+ u16 offset1;
+ u32 offset2; u32 reserved;
};
void vm_install_exception_handler(struct kvm_vm *vm, int vector,
@@ -1262,8 +1261,8 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
#define kvm_asm_safe(insn, inputs...) \
({ \
- uint64_t ign_error_code; \
- uint8_t vector; \
+ u64 ign_error_code; \
+ u8 vector; \
\
asm volatile(KVM_ASM_SAFE(insn) \
: KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \
@@ -1274,7 +1273,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
#define kvm_asm_safe_ec(insn, error_code, inputs...) \
({ \
- uint8_t vector; \
+ u8 vector; \
\
asm volatile(KVM_ASM_SAFE(insn) \
: KVM_ASM_SAFE_OUTPUTS(vector, error_code) \
@@ -1285,8 +1284,8 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
#define kvm_asm_safe_fep(insn, inputs...) \
({ \
- uint64_t ign_error_code; \
- uint8_t vector; \
+ u64 ign_error_code; \
+ u8 vector; \
\
asm volatile(KVM_ASM_SAFE_FEP(insn) \
: KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \
@@ -1297,7 +1296,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
#define kvm_asm_safe_ec_fep(insn, error_code, inputs...) \
({ \
- uint8_t vector; \
+ u8 vector; \
\
asm volatile(KVM_ASM_SAFE_FEP(insn) \
: KVM_ASM_SAFE_OUTPUTS(vector, error_code) \
@@ -1307,11 +1306,11 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
})
#define BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP) \
-static inline uint8_t insn##_safe ##_fep(uint32_t idx, uint64_t *val) \
+static inline u8 insn##_safe ##_fep(u32 idx, u64 *val) \
{ \
- uint64_t error_code; \
- uint8_t vector; \
- uint32_t a, d; \
+ u64 error_code; \
+ u8 vector; \
+ u32 a, d; \
\
asm volatile(KVM_ASM_SAFE##_FEP(#insn) \
: "=a"(a), "=d"(d), \
@@ -1319,7 +1318,7 @@ static inline uint8_t insn##_safe ##_fep(uint32_t idx, uint64_t *val) \
: "c"(idx) \
: KVM_ASM_SAFE_CLOBBERS); \
\
- *val = (uint64_t)a | ((uint64_t)d << 32); \
+ *val = (u64)a | ((u64)d << 32); \
return vector; \
}
@@ -1335,12 +1334,12 @@ BUILD_READ_U64_SAFE_HELPERS(rdmsr)
BUILD_READ_U64_SAFE_HELPERS(rdpmc)
BUILD_READ_U64_SAFE_HELPERS(xgetbv)
-static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val)
+static inline u8 wrmsr_safe(u32 msr, u64 val)
{
return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr));
}
-static inline uint8_t xsetbv_safe(uint32_t index, uint64_t value)
+static inline u8 xsetbv_safe(u32 index, u64 value)
{
u32 eax = value;
u32 edx = value >> 32;
@@ -1395,23 +1394,20 @@ static inline bool kvm_is_lbrv_enabled(void)
return !!get_kvm_amd_param_integer("lbrv");
}
-uint64_t *vm_get_pte(struct kvm_vm *vm, uint64_t vaddr);
+u64 *vm_get_pte(struct kvm_vm *vm, gva_t gva);
-uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
- uint64_t a3);
-uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1);
-void xen_hypercall(uint64_t nr, uint64_t a0, void *a1);
+u64 kvm_hypercall(u64 nr, u64 a0, u64 a1, u64 a2, u64 a3);
+u64 __xen_hypercall(u64 nr, u64 a0, void *a1);
+void xen_hypercall(u64 nr, u64 a0, void *a1);
-static inline uint64_t __kvm_hypercall_map_gpa_range(uint64_t gpa,
- uint64_t size, uint64_t flags)
+static inline u64 __kvm_hypercall_map_gpa_range(gpa_t gpa, u64 size, u64 flags)
{
return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0);
}
-static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
- uint64_t flags)
+static inline void kvm_hypercall_map_gpa_range(gpa_t gpa, u64 size, u64 flags)
{
- uint64_t ret = __kvm_hypercall_map_gpa_range(gpa, size, flags);
+ u64 ret = __kvm_hypercall_map_gpa_range(gpa, size, flags);
GUEST_ASSERT(!ret);
}
@@ -1456,7 +1452,7 @@ static inline void cli(void)
asm volatile ("cli");
}
-void __vm_xsave_require_permission(uint64_t xfeature, const char *name);
+void __vm_xsave_require_permission(u64 xfeature, const char *name);
#define vm_xsave_require_permission(xfeature) \
__vm_xsave_require_permission(xfeature, #xfeature)
@@ -1511,17 +1507,17 @@ enum pg_level {
void tdp_mmu_init(struct kvm_vm *vm, int pgtable_levels,
struct pte_masks *pte_masks);
-void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
- uint64_t paddr, int level);
-void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- uint64_t nr_bytes, int level);
+void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva,
+ gpa_t gpa, int level);
+void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa,
+ u64 nr_bytes, int level);
void vm_enable_tdp(struct kvm_vm *vm);
bool kvm_cpu_has_tdp(void);
-void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, uint64_t size);
+void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size);
void tdp_identity_map_default_memslots(struct kvm_vm *vm);
-void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size);
-uint64_t *tdp_get_pte(struct kvm_vm *vm, uint64_t l2_gpa);
+void tdp_identity_map_1g(struct kvm_vm *vm, u64 addr, u64 size);
+u64 *tdp_get_pte(struct kvm_vm *vm, u64 l2_gpa);
/*
* Basic CPU control in CR0
diff --git a/tools/testing/selftests/kvm/include/x86/sev.h b/tools/testing/selftests/kvm/include/x86/sev.h
index 008b4169f5e2..1af44c151d60 100644
--- a/tools/testing/selftests/kvm/include/x86/sev.h
+++ b/tools/testing/selftests/kvm/include/x86/sev.h
@@ -46,16 +46,16 @@ static inline bool is_sev_vm(struct kvm_vm *vm)
return is_sev_es_vm(vm) || vm->type == KVM_X86_SEV_VM;
}
-void sev_vm_launch(struct kvm_vm *vm, uint32_t policy);
-void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement);
+void sev_vm_launch(struct kvm_vm *vm, u32 policy);
+void sev_vm_launch_measure(struct kvm_vm *vm, u8 *measurement);
void sev_vm_launch_finish(struct kvm_vm *vm);
-void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy);
+void snp_vm_launch_start(struct kvm_vm *vm, u64 policy);
void snp_vm_launch_update(struct kvm_vm *vm);
void snp_vm_launch_finish(struct kvm_vm *vm);
-struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
+struct kvm_vm *vm_sev_create_with_one_vcpu(u32 type, void *guest_code,
struct kvm_vcpu **cpu);
-void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement);
+void vm_sev_launch(struct kvm_vm *vm, u64 policy, u8 *measurement);
kvm_static_assert(SEV_RET_SUCCESS == 0);
@@ -85,7 +85,7 @@ static inline u64 snp_default_policy(void)
unsigned long raw; \
} sev_cmd = { .c = { \
.id = (cmd), \
- .data = (uint64_t)(arg), \
+ .data = (u64)(arg), \
.sev_fd = (vm)->arch.sev_fd, \
} }; \
\
@@ -120,8 +120,8 @@ static inline void sev_register_encrypted_memory(struct kvm_vm *vm,
vm_ioctl(vm, KVM_MEMORY_ENCRYPT_REG_REGION, &range);
}
-static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa,
- uint64_t size)
+static inline void sev_launch_update_data(struct kvm_vm *vm, gpa_t gpa,
+ u64 size)
{
struct kvm_sev_launch_update_data update_data = {
.uaddr = (unsigned long)addr_gpa2hva(vm, gpa),
@@ -131,8 +131,8 @@ static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa,
vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_DATA, &update_data);
}
-static inline void snp_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa,
- uint64_t hva, uint64_t size, uint8_t type)
+static inline void snp_launch_update_data(struct kvm_vm *vm, gpa_t gpa,
+ u64 hva, u64 size, u8 type)
{
struct kvm_sev_snp_launch_update update_data = {
.uaddr = hva,
diff --git a/tools/testing/selftests/kvm/include/x86/smm.h b/tools/testing/selftests/kvm/include/x86/smm.h
index 19337c34f13e..2d1afa09819b 100644
--- a/tools/testing/selftests/kvm/include/x86/smm.h
+++ b/tools/testing/selftests/kvm/include/x86/smm.h
@@ -8,8 +8,7 @@
#define SMRAM_MEMSLOT ((1 << 16) | 1)
#define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
-void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
- uint64_t smram_gpa,
+void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, u64 smram_gpa,
const void *smi_handler, size_t handler_size);
void inject_smi(struct kvm_vcpu *vcpu);
diff --git a/tools/testing/selftests/kvm/include/x86/svm_util.h b/tools/testing/selftests/kvm/include/x86/svm_util.h
index 5d7c42534bc4..6c013eb838be 100644
--- a/tools/testing/selftests/kvm/include/x86/svm_util.h
+++ b/tools/testing/selftests/kvm/include/x86/svm_util.h
@@ -16,20 +16,20 @@ struct svm_test_data {
/* VMCB */
struct vmcb *vmcb; /* gva */
void *vmcb_hva;
- uint64_t vmcb_gpa;
+ u64 vmcb_gpa;
/* host state-save area */
struct vmcb_save_area *save_area; /* gva */
void *save_area_hva;
- uint64_t save_area_gpa;
+ u64 save_area_gpa;
/* MSR-Bitmap */
void *msr; /* gva */
void *msr_hva;
- uint64_t msr_gpa;
+ u64 msr_gpa;
/* NPT */
- uint64_t ncr3_gpa;
+ u64 ncr3_gpa;
};
static inline void vmmcall(void)
@@ -56,9 +56,9 @@ static inline void vmmcall(void)
"clgi\n" \
)
-struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva);
+struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, gva_t *p_svm_gva);
void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp);
-void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa);
+void run_guest(struct vmcb *vmcb, u64 vmcb_gpa);
static inline bool kvm_cpu_has_npt(void)
{
diff --git a/tools/testing/selftests/kvm/include/x86/ucall.h b/tools/testing/selftests/kvm/include/x86/ucall.h
index d3825dcc3cd9..0e4950041e3e 100644
--- a/tools/testing/selftests/kvm/include/x86/ucall.h
+++ b/tools/testing/selftests/kvm/include/x86/ucall.h
@@ -6,7 +6,7 @@
#define UCALL_EXIT_REASON KVM_EXIT_IO
-static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
+static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa)
{
}
diff --git a/tools/testing/selftests/kvm/include/x86/vmx.h b/tools/testing/selftests/kvm/include/x86/vmx.h
index 92b918700d24..90fffaf91595 100644
--- a/tools/testing/selftests/kvm/include/x86/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86/vmx.h
@@ -285,16 +285,16 @@ enum vmcs_field {
};
struct vmx_msr_entry {
- uint32_t index;
- uint32_t reserved;
- uint64_t value;
+ u32 index;
+ u32 reserved;
+ u64 value;
} __attribute__ ((aligned(16)));
#include "evmcs.h"
-static inline int vmxon(uint64_t phys)
+static inline int vmxon(u64 phys)
{
- uint8_t ret;
+ u8 ret;
__asm__ __volatile__ ("vmxon %[pa]; setna %[ret]"
: [ret]"=rm"(ret)
@@ -309,9 +309,9 @@ static inline void vmxoff(void)
__asm__ __volatile__("vmxoff");
}
-static inline int vmclear(uint64_t vmcs_pa)
+static inline int vmclear(u64 vmcs_pa)
{
- uint8_t ret;
+ u8 ret;
__asm__ __volatile__ ("vmclear %[pa]; setna %[ret]"
: [ret]"=rm"(ret)
@@ -321,9 +321,9 @@ static inline int vmclear(uint64_t vmcs_pa)
return ret;
}
-static inline int vmptrld(uint64_t vmcs_pa)
+static inline int vmptrld(u64 vmcs_pa)
{
- uint8_t ret;
+ u8 ret;
if (enable_evmcs)
return -1;
@@ -336,10 +336,10 @@ static inline int vmptrld(uint64_t vmcs_pa)
return ret;
}
-static inline int vmptrst(uint64_t *value)
+static inline int vmptrst(u64 *value)
{
- uint64_t tmp;
- uint8_t ret;
+ u64 tmp;
+ u8 ret;
if (enable_evmcs)
return evmcs_vmptrst(value);
@@ -356,9 +356,9 @@ static inline int vmptrst(uint64_t *value)
* A wrapper around vmptrst that ignores errors and returns zero if the
* vmptrst instruction fails.
*/
-static inline uint64_t vmptrstz(void)
+static inline u64 vmptrstz(void)
{
- uint64_t value = 0;
+ u64 value = 0;
vmptrst(&value);
return value;
}
@@ -391,8 +391,8 @@ static inline int vmlaunch(void)
"pop %%rcx;"
"pop %%rbp;"
: [ret]"=&a"(ret)
- : [host_rsp]"r"((uint64_t)HOST_RSP),
- [host_rip]"r"((uint64_t)HOST_RIP)
+ : [host_rsp]"r"((u64)HOST_RSP),
+ [host_rip]"r"((u64)HOST_RIP)
: "memory", "cc", "rbx", "r8", "r9", "r10",
"r11", "r12", "r13", "r14", "r15");
return ret;
@@ -426,8 +426,8 @@ static inline int vmresume(void)
"pop %%rcx;"
"pop %%rbp;"
: [ret]"=&a"(ret)
- : [host_rsp]"r"((uint64_t)HOST_RSP),
- [host_rip]"r"((uint64_t)HOST_RIP)
+ : [host_rsp]"r"((u64)HOST_RSP),
+ [host_rip]"r"((u64)HOST_RIP)
: "memory", "cc", "rbx", "r8", "r9", "r10",
"r11", "r12", "r13", "r14", "r15");
return ret;
@@ -447,10 +447,10 @@ static inline void vmcall(void)
"r10", "r11", "r12", "r13", "r14", "r15");
}
-static inline int vmread(uint64_t encoding, uint64_t *value)
+static inline int vmread(u64 encoding, u64 *value)
{
- uint64_t tmp;
- uint8_t ret;
+ u64 tmp;
+ u8 ret;
if (enable_evmcs)
return evmcs_vmread(encoding, value);
@@ -468,16 +468,16 @@ static inline int vmread(uint64_t encoding, uint64_t *value)
* A wrapper around vmread that ignores errors and returns zero if the
* vmread instruction fails.
*/
-static inline uint64_t vmreadz(uint64_t encoding)
+static inline u64 vmreadz(u64 encoding)
{
- uint64_t value = 0;
+ u64 value = 0;
vmread(encoding, &value);
return value;
}
-static inline int vmwrite(uint64_t encoding, uint64_t value)
+static inline int vmwrite(u64 encoding, u64 value)
{
- uint8_t ret;
+ u8 ret;
if (enable_evmcs)
return evmcs_vmwrite(encoding, value);
@@ -490,41 +490,41 @@ static inline int vmwrite(uint64_t encoding, uint64_t value)
return ret;
}
-static inline uint32_t vmcs_revision(void)
+static inline u32 vmcs_revision(void)
{
return rdmsr(MSR_IA32_VMX_BASIC);
}
struct vmx_pages {
void *vmxon_hva;
- uint64_t vmxon_gpa;
+ u64 vmxon_gpa;
void *vmxon;
void *vmcs_hva;
- uint64_t vmcs_gpa;
+ u64 vmcs_gpa;
void *vmcs;
void *msr_hva;
- uint64_t msr_gpa;
+ u64 msr_gpa;
void *msr;
void *shadow_vmcs_hva;
- uint64_t shadow_vmcs_gpa;
+ u64 shadow_vmcs_gpa;
void *shadow_vmcs;
void *vmread_hva;
- uint64_t vmread_gpa;
+ u64 vmread_gpa;
void *vmread;
void *vmwrite_hva;
- uint64_t vmwrite_gpa;
+ u64 vmwrite_gpa;
void *vmwrite;
void *apic_access_hva;
- uint64_t apic_access_gpa;
+ u64 apic_access_gpa;
void *apic_access;
- uint64_t eptp_gpa;
+ u64 eptp_gpa;
};
union vmx_basic {
@@ -550,7 +550,7 @@ union vmx_ctrl_msr {
};
};
-struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
+struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, gva_t *p_vmx_gva);
bool prepare_for_vmx_operation(struct vmx_pages *vmx);
void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
bool load_vmcs(struct vmx_pages *vmx);
diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c
index c60a24a92829..fc5242fb956f 100644
--- a/tools/testing/selftests/kvm/kvm_page_table_test.c
+++ b/tools/testing/selftests/kvm/kvm_page_table_test.c
@@ -46,12 +46,12 @@ static const char * const test_stage_string[] = {
struct test_args {
struct kvm_vm *vm;
- uint64_t guest_test_virt_mem;
- uint64_t host_page_size;
- uint64_t host_num_pages;
- uint64_t large_page_size;
- uint64_t large_num_pages;
- uint64_t host_pages_per_lpage;
+ u64 guest_test_virt_mem;
+ u64 host_page_size;
+ u64 host_num_pages;
+ u64 large_page_size;
+ u64 large_num_pages;
+ u64 host_pages_per_lpage;
enum vm_mem_backing_src_type src_type;
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
};
@@ -63,7 +63,7 @@ struct test_args {
static enum test_stage guest_test_stage;
/* Host variables */
-static uint32_t nr_vcpus = 1;
+static u32 nr_vcpus = 1;
static struct test_args test_args;
static enum test_stage *current_stage;
static bool host_quit;
@@ -77,19 +77,19 @@ static sem_t test_stage_completed;
* This will be set to the topmost valid physical address minus
* the test memory size.
*/
-static uint64_t guest_test_phys_mem;
+static u64 guest_test_phys_mem;
/*
* Guest virtual memory offset of the testing memory slot.
* Must not conflict with identity mapped test code.
*/
-static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
+static u64 guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
static void guest_code(bool do_write)
{
struct test_args *p = &test_args;
enum test_stage *current_stage = &guest_test_stage;
- uint64_t addr;
+ u64 addr;
int i, j;
while (true) {
@@ -113,9 +113,9 @@ static void guest_code(bool do_write)
case KVM_CREATE_MAPPINGS:
for (i = 0; i < p->large_num_pages; i++) {
if (do_write)
- *(uint64_t *)addr = 0x0123456789ABCDEF;
+ *(u64 *)addr = 0x0123456789ABCDEF;
else
- READ_ONCE(*(uint64_t *)addr);
+ READ_ONCE(*(u64 *)addr);
addr += p->large_page_size;
}
@@ -131,7 +131,7 @@ static void guest_code(bool do_write)
case KVM_UPDATE_MAPPINGS:
if (p->src_type == VM_MEM_SRC_ANONYMOUS) {
for (i = 0; i < p->host_num_pages; i++) {
- *(uint64_t *)addr = 0x0123456789ABCDEF;
+ *(u64 *)addr = 0x0123456789ABCDEF;
addr += p->host_page_size;
}
break;
@@ -142,7 +142,7 @@ static void guest_code(bool do_write)
* Write to the first host page in each large
* page region, and triger break of large pages.
*/
- *(uint64_t *)addr = 0x0123456789ABCDEF;
+ *(u64 *)addr = 0x0123456789ABCDEF;
/*
* Access the middle host pages in each large
@@ -152,7 +152,7 @@ static void guest_code(bool do_write)
*/
addr += p->large_page_size / 2;
for (j = 0; j < p->host_pages_per_lpage / 2; j++) {
- READ_ONCE(*(uint64_t *)addr);
+ READ_ONCE(*(u64 *)addr);
addr += p->host_page_size;
}
}
@@ -167,7 +167,7 @@ static void guest_code(bool do_write)
*/
case KVM_ADJUST_MAPPINGS:
for (i = 0; i < p->host_num_pages; i++) {
- READ_ONCE(*(uint64_t *)addr);
+ READ_ONCE(*(u64 *)addr);
addr += p->host_page_size;
}
break;
@@ -227,8 +227,8 @@ static void *vcpu_worker(void *data)
}
struct test_params {
- uint64_t phys_offset;
- uint64_t test_mem_size;
+ u64 phys_offset;
+ u64 test_mem_size;
enum vm_mem_backing_src_type src_type;
};
@@ -237,12 +237,12 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
int ret;
struct test_params *p = arg;
enum vm_mem_backing_src_type src_type = p->src_type;
- uint64_t large_page_size = get_backing_src_pagesz(src_type);
- uint64_t guest_page_size = vm_guest_mode_params[mode].page_size;
- uint64_t host_page_size = getpagesize();
- uint64_t test_mem_size = p->test_mem_size;
- uint64_t guest_num_pages;
- uint64_t alignment;
+ u64 large_page_size = get_backing_src_pagesz(src_type);
+ u64 guest_page_size = vm_guest_mode_params[mode].page_size;
+ u64 host_page_size = getpagesize();
+ u64 test_mem_size = p->test_mem_size;
+ u64 guest_num_pages;
+ u64 alignment;
void *host_test_mem;
struct kvm_vm *vm;
@@ -281,7 +281,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
/* Cache the HVA pointer of the region */
- host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
+ host_test_mem = addr_gpa2hva(vm, (gpa_t)guest_test_phys_mem);
/* Export shared structure test_args to guest */
sync_global_to_guest(vm, test_args);
@@ -292,7 +292,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
ret = sem_init(&test_stage_completed, 0, 0);
TEST_ASSERT(ret == 0, "Error in sem_init");
- current_stage = addr_gva2hva(vm, (vm_vaddr_t)(&guest_test_stage));
+ current_stage = addr_gva2hva(vm, (gva_t)(&guest_test_stage));
*current_stage = NUM_TEST_STAGES;
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
@@ -304,7 +304,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
pr_info("Guest physical test memory offset: 0x%lx\n",
guest_test_phys_mem);
pr_info("Host virtual test memory offset: 0x%lx\n",
- (uint64_t)host_test_mem);
+ (u64)host_test_mem);
pr_info("Number of testing vCPUs: %d\n", nr_vcpus);
return vm;
diff --git a/tools/testing/selftests/kvm/lib/arm64/gic.c b/tools/testing/selftests/kvm/lib/arm64/gic.c
index b023868fe0b8..011dfe1dfcb3 100644
--- a/tools/testing/selftests/kvm/lib/arm64/gic.c
+++ b/tools/testing/selftests/kvm/lib/arm64/gic.c
@@ -50,7 +50,7 @@ static void gic_dist_init(enum gic_type type, unsigned int nr_cpus)
void gic_init(enum gic_type type, unsigned int nr_cpus)
{
- uint32_t cpu = guest_get_vcpuid();
+ u32 cpu = guest_get_vcpuid();
GUEST_ASSERT(type < GIC_TYPE_MAX);
GUEST_ASSERT(nr_cpus);
@@ -73,7 +73,7 @@ void gic_irq_disable(unsigned int intid)
unsigned int gic_get_and_ack_irq(void)
{
- uint64_t irqstat;
+ u64 irqstat;
unsigned int intid;
GUEST_ASSERT(gic_common_ops);
@@ -102,7 +102,7 @@ void gic_set_eoi_split(bool split)
gic_common_ops->gic_set_eoi_split(split);
}
-void gic_set_priority_mask(uint64_t pmr)
+void gic_set_priority_mask(u64 pmr)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_set_priority_mask(pmr);
diff --git a/tools/testing/selftests/kvm/lib/arm64/gic_private.h b/tools/testing/selftests/kvm/lib/arm64/gic_private.h
index b6a7e30c3eb1..6d393f5c5685 100644
--- a/tools/testing/selftests/kvm/lib/arm64/gic_private.h
+++ b/tools/testing/selftests/kvm/lib/arm64/gic_private.h
@@ -12,20 +12,20 @@ struct gic_common_ops {
void (*gic_cpu_init)(unsigned int cpu);
void (*gic_irq_enable)(unsigned int intid);
void (*gic_irq_disable)(unsigned int intid);
- uint64_t (*gic_read_iar)(void);
- void (*gic_write_eoir)(uint32_t irq);
- void (*gic_write_dir)(uint32_t irq);
+ u64 (*gic_read_iar)(void);
+ void (*gic_write_eoir)(u32 irq);
+ void (*gic_write_dir)(u32 irq);
void (*gic_set_eoi_split)(bool split);
- void (*gic_set_priority_mask)(uint64_t mask);
- void (*gic_set_priority)(uint32_t intid, uint32_t prio);
- void (*gic_irq_set_active)(uint32_t intid);
- void (*gic_irq_clear_active)(uint32_t intid);
- bool (*gic_irq_get_active)(uint32_t intid);
- void (*gic_irq_set_pending)(uint32_t intid);
- void (*gic_irq_clear_pending)(uint32_t intid);
- bool (*gic_irq_get_pending)(uint32_t intid);
- void (*gic_irq_set_config)(uint32_t intid, bool is_edge);
- void (*gic_irq_set_group)(uint32_t intid, bool group);
+ void (*gic_set_priority_mask)(u64 mask);
+ void (*gic_set_priority)(u32 intid, u32 prio);
+ void (*gic_irq_set_active)(u32 intid);
+ void (*gic_irq_clear_active)(u32 intid);
+ bool (*gic_irq_get_active)(u32 intid);
+ void (*gic_irq_set_pending)(u32 intid);
+ void (*gic_irq_clear_pending)(u32 intid);
+ bool (*gic_irq_get_pending)(u32 intid);
+ void (*gic_irq_set_config)(u32 intid, bool is_edge);
+ void (*gic_irq_set_group)(u32 intid, bool group);
};
extern const struct gic_common_ops gicv3_ops;
diff --git a/tools/testing/selftests/kvm/lib/arm64/gic_v3.c b/tools/testing/selftests/kvm/lib/arm64/gic_v3.c
index 50754a27f493..a99a53accfe9 100644
--- a/tools/testing/selftests/kvm/lib/arm64/gic_v3.c
+++ b/tools/testing/selftests/kvm/lib/arm64/gic_v3.c
@@ -50,13 +50,13 @@ static void gicv3_gicd_wait_for_rwp(void)
}
}
-static inline volatile void *gicr_base_cpu(uint32_t cpu)
+static inline volatile void *gicr_base_cpu(u32 cpu)
{
/* Align all the redistributors sequentially */
return GICR_BASE_GVA + cpu * SZ_64K * 2;
}
-static void gicv3_gicr_wait_for_rwp(uint32_t cpu)
+static void gicv3_gicr_wait_for_rwp(u32 cpu)
{
unsigned int count = 100000; /* 1s */
@@ -66,7 +66,7 @@ static void gicv3_gicr_wait_for_rwp(uint32_t cpu)
}
}
-static void gicv3_wait_for_rwp(uint32_t cpu_or_dist)
+static void gicv3_wait_for_rwp(u32 cpu_or_dist)
{
if (cpu_or_dist & DIST_BIT)
gicv3_gicd_wait_for_rwp();
@@ -91,34 +91,34 @@ static enum gicv3_intid_range get_intid_range(unsigned int intid)
return INVALID_RANGE;
}
-static uint64_t gicv3_read_iar(void)
+static u64 gicv3_read_iar(void)
{
- uint64_t irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1);
+ u64 irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1);
dsb(sy);
return irqstat;
}
-static void gicv3_write_eoir(uint32_t irq)
+static void gicv3_write_eoir(u32 irq)
{
write_sysreg_s(irq, SYS_ICC_EOIR1_EL1);
isb();
}
-static void gicv3_write_dir(uint32_t irq)
+static void gicv3_write_dir(u32 irq)
{
write_sysreg_s(irq, SYS_ICC_DIR_EL1);
isb();
}
-static void gicv3_set_priority_mask(uint64_t mask)
+static void gicv3_set_priority_mask(u64 mask)
{
write_sysreg_s(mask, SYS_ICC_PMR_EL1);
}
static void gicv3_set_eoi_split(bool split)
{
- uint32_t val;
+ u32 val;
/*
* All other fields are read-only, so no need to read CTLR first. In
@@ -129,29 +129,29 @@ static void gicv3_set_eoi_split(bool split)
isb();
}
-uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, uint64_t offset)
+u32 gicv3_reg_readl(u32 cpu_or_dist, u64 offset)
{
volatile void *base = cpu_or_dist & DIST_BIT ? GICD_BASE_GVA
: sgi_base_from_redist(gicr_base_cpu(cpu_or_dist));
return readl(base + offset);
}
-void gicv3_reg_writel(uint32_t cpu_or_dist, uint64_t offset, uint32_t reg_val)
+void gicv3_reg_writel(u32 cpu_or_dist, u64 offset, u32 reg_val)
{
volatile void *base = cpu_or_dist & DIST_BIT ? GICD_BASE_GVA
: sgi_base_from_redist(gicr_base_cpu(cpu_or_dist));
writel(reg_val, base + offset);
}
-uint32_t gicv3_getl_fields(uint32_t cpu_or_dist, uint64_t offset, uint32_t mask)
+u32 gicv3_getl_fields(u32 cpu_or_dist, u64 offset, u32 mask)
{
return gicv3_reg_readl(cpu_or_dist, offset) & mask;
}
-void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset,
- uint32_t mask, uint32_t reg_val)
+void gicv3_setl_fields(u32 cpu_or_dist, u64 offset,
+ u32 mask, u32 reg_val)
{
- uint32_t tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask;
+ u32 tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask;
tmp |= (reg_val & mask);
gicv3_reg_writel(cpu_or_dist, offset, tmp);
@@ -165,14 +165,14 @@ void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset,
* map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being
* marked as "Reserved" in the Distributor map.
*/
-static void gicv3_access_reg(uint32_t intid, uint64_t offset,
- uint32_t reg_bits, uint32_t bits_per_field,
- bool write, uint32_t *val)
+static void gicv3_access_reg(u32 intid, u64 offset,
+ u32 reg_bits, u32 bits_per_field,
+ bool write, u32 *val)
{
- uint32_t cpu = guest_get_vcpuid();
+ u32 cpu = guest_get_vcpuid();
enum gicv3_intid_range intid_range = get_intid_range(intid);
- uint32_t fields_per_reg, index, mask, shift;
- uint32_t cpu_or_dist;
+ u32 fields_per_reg, index, mask, shift;
+ u32 cpu_or_dist;
GUEST_ASSERT(bits_per_field <= reg_bits);
GUEST_ASSERT(!write || *val < (1U << bits_per_field));
@@ -197,32 +197,32 @@ static void gicv3_access_reg(uint32_t intid, uint64_t offset,
*val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift;
}
-static void gicv3_write_reg(uint32_t intid, uint64_t offset,
- uint32_t reg_bits, uint32_t bits_per_field, uint32_t val)
+static void gicv3_write_reg(u32 intid, u64 offset,
+ u32 reg_bits, u32 bits_per_field, u32 val)
{
gicv3_access_reg(intid, offset, reg_bits,
bits_per_field, true, &val);
}
-static uint32_t gicv3_read_reg(uint32_t intid, uint64_t offset,
- uint32_t reg_bits, uint32_t bits_per_field)
+static u32 gicv3_read_reg(u32 intid, u64 offset,
+ u32 reg_bits, u32 bits_per_field)
{
- uint32_t val;
+ u32 val;
gicv3_access_reg(intid, offset, reg_bits,
bits_per_field, false, &val);
return val;
}
-static void gicv3_set_priority(uint32_t intid, uint32_t prio)
+static void gicv3_set_priority(u32 intid, u32 prio)
{
gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio);
}
/* Sets the intid to be level-sensitive or edge-triggered. */
-static void gicv3_irq_set_config(uint32_t intid, bool is_edge)
+static void gicv3_irq_set_config(u32 intid, bool is_edge)
{
- uint32_t val;
+ u32 val;
/* N/A for private interrupts. */
GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE);
@@ -230,57 +230,57 @@ static void gicv3_irq_set_config(uint32_t intid, bool is_edge)
gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val);
}
-static void gicv3_irq_enable(uint32_t intid)
+static void gicv3_irq_enable(u32 intid)
{
bool is_spi = get_intid_range(intid) == SPI_RANGE;
- uint32_t cpu = guest_get_vcpuid();
+ u32 cpu = guest_get_vcpuid();
gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1);
gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
}
-static void gicv3_irq_disable(uint32_t intid)
+static void gicv3_irq_disable(u32 intid)
{
bool is_spi = get_intid_range(intid) == SPI_RANGE;
- uint32_t cpu = guest_get_vcpuid();
+ u32 cpu = guest_get_vcpuid();
gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1);
gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
}
-static void gicv3_irq_set_active(uint32_t intid)
+static void gicv3_irq_set_active(u32 intid)
{
gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1);
}
-static void gicv3_irq_clear_active(uint32_t intid)
+static void gicv3_irq_clear_active(u32 intid)
{
gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1);
}
-static bool gicv3_irq_get_active(uint32_t intid)
+static bool gicv3_irq_get_active(u32 intid)
{
return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1);
}
-static void gicv3_irq_set_pending(uint32_t intid)
+static void gicv3_irq_set_pending(u32 intid)
{
gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1);
}
-static void gicv3_irq_clear_pending(uint32_t intid)
+static void gicv3_irq_clear_pending(u32 intid)
{
gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1);
}
-static bool gicv3_irq_get_pending(uint32_t intid)
+static bool gicv3_irq_get_pending(u32 intid)
{
return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1);
}
static void gicv3_enable_redist(volatile void *redist_base)
{
- uint32_t val = readl(redist_base + GICR_WAKER);
+ u32 val = readl(redist_base + GICR_WAKER);
unsigned int count = 100000; /* 1s */
val &= ~GICR_WAKER_ProcessorSleep;
@@ -293,10 +293,10 @@ static void gicv3_enable_redist(volatile void *redist_base)
}
}
-static void gicv3_set_group(uint32_t intid, bool grp)
+static void gicv3_set_group(u32 intid, bool grp)
{
- uint32_t cpu_or_dist;
- uint32_t val;
+ u32 cpu_or_dist;
+ u32 val;
cpu_or_dist = (get_intid_range(intid) == SPI_RANGE) ? DIST_BIT : guest_get_vcpuid();
val = gicv3_reg_readl(cpu_or_dist, GICD_IGROUPR + (intid / 32) * 4);
@@ -424,8 +424,8 @@ const struct gic_common_ops gicv3_ops = {
.gic_irq_set_group = gicv3_set_group,
};
-void gic_rdist_enable_lpis(vm_paddr_t cfg_table, size_t cfg_table_size,
- vm_paddr_t pend_table)
+void gic_rdist_enable_lpis(gpa_t cfg_table, size_t cfg_table_size,
+ gpa_t pend_table)
{
volatile void *rdist_base = gicr_base_cpu(guest_get_vcpuid());
diff --git a/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c b/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c
index 7f9fdcf42ae6..1188b578121d 100644
--- a/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c
+++ b/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c
@@ -54,7 +54,7 @@ static unsigned long its_find_baser(unsigned int type)
return -1;
}
-static void its_install_table(unsigned int type, vm_paddr_t base, size_t size)
+static void its_install_table(unsigned int type, gpa_t base, size_t size)
{
unsigned long offset = its_find_baser(type);
u64 baser;
@@ -69,7 +69,7 @@ static void its_install_table(unsigned int type, vm_paddr_t base, size_t size)
its_write_u64(offset, baser);
}
-static void its_install_cmdq(vm_paddr_t base, size_t size)
+static void its_install_cmdq(gpa_t base, size_t size)
{
u64 cbaser;
@@ -82,9 +82,8 @@ static void its_install_cmdq(vm_paddr_t base, size_t size)
its_write_u64(GITS_CBASER, cbaser);
}
-void its_init(vm_paddr_t coll_tbl, size_t coll_tbl_sz,
- vm_paddr_t device_tbl, size_t device_tbl_sz,
- vm_paddr_t cmdq, size_t cmdq_size)
+void its_init(gpa_t coll_tbl, size_t coll_tbl_sz, gpa_t device_tbl,
+ size_t device_tbl_sz, gpa_t cmdq, size_t cmdq_size)
{
u32 ctlr;
@@ -204,7 +203,7 @@ static void its_send_cmd(void *cmdq_base, struct its_cmd_block *cmd)
}
}
-void its_send_mapd_cmd(void *cmdq_base, u32 device_id, vm_paddr_t itt_base,
+void its_send_mapd_cmd(void *cmdq_base, u32 device_id, gpa_t itt_base,
size_t itt_size, bool valid)
{
struct its_cmd_block cmd = {};
diff --git a/tools/testing/selftests/kvm/lib/arm64/processor.c b/tools/testing/selftests/kvm/lib/arm64/processor.c
index 43ea40edc533..01325bf4d36f 100644
--- a/tools/testing/selftests/kvm/lib/arm64/processor.c
+++ b/tools/testing/selftests/kvm/lib/arm64/processor.c
@@ -19,20 +19,20 @@
#define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
-static vm_vaddr_t exception_handlers;
+static gva_t exception_handlers;
-static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
+static u64 pgd_index(struct kvm_vm *vm, gva_t gva)
{
unsigned int shift = (vm->mmu.pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
- uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
+ u64 mask = (1UL << (vm->va_bits - shift)) - 1;
return (gva >> shift) & mask;
}
-static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
+static u64 pud_index(struct kvm_vm *vm, gva_t gva)
{
unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
- uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
+ u64 mask = (1UL << (vm->page_shift - 3)) - 1;
TEST_ASSERT(vm->mmu.pgtable_levels == 4,
"Mode %d does not have 4 page table levels", vm->mode);
@@ -40,10 +40,10 @@ static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
return (gva >> shift) & mask;
}
-static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
+static u64 pmd_index(struct kvm_vm *vm, gva_t gva)
{
unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
- uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
+ u64 mask = (1UL << (vm->page_shift - 3)) - 1;
TEST_ASSERT(vm->mmu.pgtable_levels >= 3,
"Mode %d does not have >= 3 page table levels", vm->mode);
@@ -51,9 +51,9 @@ static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
return (gva >> shift) & mask;
}
-static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
+static u64 pte_index(struct kvm_vm *vm, gva_t gva)
{
- uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
+ u64 mask = (1UL << (vm->page_shift - 3)) - 1;
return (gva >> vm->page_shift) & mask;
}
@@ -63,9 +63,9 @@ static inline bool use_lpa2_pte_format(struct kvm_vm *vm)
(vm->pa_bits > 48 || vm->va_bits > 48);
}
-static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs)
+static u64 addr_pte(struct kvm_vm *vm, u64 pa, u64 attrs)
{
- uint64_t pte;
+ u64 pte;
if (use_lpa2_pte_format(vm)) {
pte = pa & PTE_ADDR_MASK_LPA2(vm->page_shift);
@@ -81,9 +81,9 @@ static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs)
return pte;
}
-static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte)
+static u64 pte_addr(struct kvm_vm *vm, u64 pte)
{
- uint64_t pa;
+ u64 pa;
if (use_lpa2_pte_format(vm)) {
pa = pte & PTE_ADDR_MASK_LPA2(vm->page_shift);
@@ -97,13 +97,13 @@ static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte)
return pa;
}
-static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
+static u64 ptrs_per_pgd(struct kvm_vm *vm)
{
unsigned int shift = (vm->mmu.pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
return 1 << (vm->va_bits - shift);
}
-static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
+static u64 __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
{
return 1 << (vm->page_shift - 3);
}
@@ -121,47 +121,46 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
vm->mmu.pgd_created = true;
}
-static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- uint64_t flags)
+static void _virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa,
+ u64 flags)
{
- uint8_t attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT);
- uint64_t pg_attr;
- uint64_t *ptep;
+ u8 attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT);
+ u64 pg_attr;
+ u64 *ptep;
- TEST_ASSERT((vaddr % vm->page_size) == 0,
+ TEST_ASSERT((gva % vm->page_size) == 0,
"Virtual address not on page boundary,\n"
- " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
- TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
- (vaddr >> vm->page_shift)),
- "Invalid virtual address, vaddr: 0x%lx", vaddr);
- TEST_ASSERT((paddr % vm->page_size) == 0,
- "Physical address not on page boundary,\n"
- " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
- TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
- "Physical address beyond beyond maximum supported,\n"
- " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
- paddr, vm->max_gfn, vm->page_size);
-
- ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, vaddr) * 8;
+ " gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size);
+ TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
+ "Invalid virtual address, gva: 0x%lx", gva);
+ TEST_ASSERT((gpa % vm->page_size) == 0,
+ "Physical address not on page boundary,\n"
+ " gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size);
+ TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
+ "Physical address beyond beyond maximum supported,\n"
+ " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+ gpa, vm->max_gfn, vm->page_size);
+
+ ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, gva) * 8;
if (!*ptep)
*ptep = addr_pte(vm, vm_alloc_page_table(vm),
PGD_TYPE_TABLE | PTE_VALID);
switch (vm->mmu.pgtable_levels) {
case 4:
- ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
+ ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
if (!*ptep)
*ptep = addr_pte(vm, vm_alloc_page_table(vm),
PUD_TYPE_TABLE | PTE_VALID);
/* fall through */
case 3:
- ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
+ ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
if (!*ptep)
*ptep = addr_pte(vm, vm_alloc_page_table(vm),
PMD_TYPE_TABLE | PTE_VALID);
/* fall through */
case 2:
- ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
+ ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
break;
default:
TEST_FAIL("Page table levels must be 2, 3, or 4");
@@ -171,19 +170,19 @@ static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
if (!use_lpa2_pte_format(vm))
pg_attr |= PTE_SHARED;
- *ptep = addr_pte(vm, paddr, pg_attr);
+ *ptep = addr_pte(vm, gpa, pg_attr);
}
-void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
{
- uint64_t attr_idx = MT_NORMAL;
+ u64 attr_idx = MT_NORMAL;
- _virt_pg_map(vm, vaddr, paddr, attr_idx);
+ _virt_pg_map(vm, gva, gpa, attr_idx);
}
-uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level)
+u64 *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level)
{
- uint64_t *ptep;
+ u64 *ptep;
if (!vm->mmu.pgd_created)
goto unmapped_gva;
@@ -225,23 +224,23 @@ unmapped_gva:
exit(EXIT_FAILURE);
}
-uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva)
+u64 *virt_get_pte_hva(struct kvm_vm *vm, gva_t gva)
{
return virt_get_pte_hva_at_level(vm, gva, 3);
}
-vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
{
- uint64_t *ptep = virt_get_pte_hva(vm, gva);
+ u64 *ptep = virt_get_pte_hva(vm, gva);
return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
}
-static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
+static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, u64 page, int level)
{
#ifdef DEBUG
static const char * const type[] = { "", "pud", "pmd", "pte" };
- uint64_t pte, *ptep;
+ u64 pte, *ptep;
if (level == 4)
return;
@@ -256,10 +255,10 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t p
#endif
}
-void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
{
int level = 4 - (vm->mmu.pgtable_levels - 1);
- uint64_t pgd, *ptep;
+ u64 pgd, *ptep;
if (!vm->mmu.pgd_created)
return;
@@ -298,7 +297,7 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
{
struct kvm_vcpu_init default_init = { .target = -1, };
struct kvm_vm *vm = vcpu->vm;
- uint64_t sctlr_el1, tcr_el1, ttbr0_el1;
+ u64 sctlr_el1, tcr_el1, ttbr0_el1;
if (!init) {
kvm_get_default_vcpu_target(vm, &default_init);
@@ -397,9 +396,9 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
HCR_EL2_RW | HCR_EL2_TGE | HCR_EL2_E2H);
}
-void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
{
- uint64_t pstate, pc;
+ u64 pstate, pc;
pstate = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate));
pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc));
@@ -410,29 +409,29 @@ void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
{
- vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
+ vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (u64)guest_code);
}
-static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id,
struct kvm_vcpu_init *init)
{
size_t stack_size;
- uint64_t stack_vaddr;
+ gva_t stack_gva;
struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
vm->page_size;
- stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
- DEFAULT_ARM64_GUEST_STACK_VADDR_MIN,
- MEM_REGION_DATA);
+ stack_gva = __vm_alloc(vm, stack_size,
+ DEFAULT_ARM64_GUEST_STACK_VADDR_MIN,
+ MEM_REGION_DATA);
aarch64_vcpu_setup(vcpu, init);
- vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), stack_vaddr + stack_size);
+ vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), stack_gva + stack_size);
return vcpu;
}
-struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id,
struct kvm_vcpu_init *init, void *guest_code)
{
struct kvm_vcpu *vcpu = __aarch64_vcpu_add(vm, vcpu_id, init);
@@ -442,7 +441,7 @@ struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
return vcpu;
}
-struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
{
return __aarch64_vcpu_add(vm, vcpu_id, NULL);
}
@@ -459,13 +458,13 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
for (i = 0; i < num; i++) {
vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.regs[i]),
- va_arg(ap, uint64_t));
+ va_arg(ap, u64));
}
va_end(ap);
}
-void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec)
+void kvm_exit_unexpected_exception(int vector, u64 ec, bool valid_ec)
{
ucall(UCALL_UNHANDLED, 3, vector, ec, valid_ec);
while (1)
@@ -498,7 +497,7 @@ void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
{
extern char vectors;
- vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_VBAR_EL1), (uint64_t)&vectors);
+ vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_VBAR_EL1), (u64)&vectors);
}
void route_exception(struct ex_regs *regs, int vector)
@@ -536,10 +535,10 @@ unexpected_exception:
void vm_init_descriptor_tables(struct kvm_vm *vm)
{
- vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
- vm->page_size, MEM_REGION_DATA);
+ vm->handlers = __vm_alloc(vm, sizeof(struct handlers), vm->page_size,
+ MEM_REGION_DATA);
- *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
+ *(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers;
}
void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec,
@@ -563,13 +562,13 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
handlers->exception_handlers[vector][0] = handler;
}
-uint32_t guest_get_vcpuid(void)
+u32 guest_get_vcpuid(void)
{
return read_sysreg(tpidr_el1);
}
-static uint32_t max_ipa_for_page_size(uint32_t vm_ipa, uint32_t gran,
- uint32_t not_sup_val, uint32_t ipa52_min_val)
+static u32 max_ipa_for_page_size(u32 vm_ipa, u32 gran,
+ u32 not_sup_val, u32 ipa52_min_val)
{
if (gran == not_sup_val)
return 0;
@@ -579,16 +578,16 @@ static uint32_t max_ipa_for_page_size(uint32_t vm_ipa, uint32_t gran,
return min(vm_ipa, 48U);
}
-void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
- uint32_t *ipa16k, uint32_t *ipa64k)
+void aarch64_get_supported_page_sizes(u32 ipa, u32 *ipa4k,
+ u32 *ipa16k, u32 *ipa64k)
{
struct kvm_vcpu_init preferred_init;
int kvm_fd, vm_fd, vcpu_fd, err;
- uint64_t val;
- uint32_t gran;
+ u64 val;
+ u32 gran;
struct kvm_one_reg reg = {
.id = KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1),
- .addr = (uint64_t)&val,
+ .addr = (u64)&val,
};
kvm_fd = open_kvm_dev_path_or_exit();
@@ -646,17 +645,17 @@ void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
: "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7")
-void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
- uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
- uint64_t arg6, struct arm_smccc_res *res)
+void smccc_hvc(u32 function_id, u64 arg0, u64 arg1,
+ u64 arg2, u64 arg3, u64 arg4, u64 arg5,
+ u64 arg6, struct arm_smccc_res *res)
{
__smccc_call(hvc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
arg6, res);
}
-void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
- uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
- uint64_t arg6, struct arm_smccc_res *res)
+void smccc_smc(u32 function_id, u64 arg0, u64 arg1,
+ u64 arg2, u64 arg3, u64 arg4, u64 arg5,
+ u64 arg6, struct arm_smccc_res *res)
{
__smccc_call(smc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
arg6, res);
@@ -671,7 +670,7 @@ void kvm_selftest_arch_init(void)
guest_modes_append_default();
}
-void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
+void vm_populate_gva_bitmap(struct kvm_vm *vm)
{
/*
* arm64 selftests use only TTBR0_EL1, meaning that the valid VA space
diff --git a/tools/testing/selftests/kvm/lib/arm64/ucall.c b/tools/testing/selftests/kvm/lib/arm64/ucall.c
index ddab0ce89d4d..e0550ad5aa75 100644
--- a/tools/testing/selftests/kvm/lib/arm64/ucall.c
+++ b/tools/testing/selftests/kvm/lib/arm64/ucall.c
@@ -6,17 +6,17 @@
*/
#include "kvm_util.h"
-vm_vaddr_t *ucall_exit_mmio_addr;
+gva_t *ucall_exit_mmio_addr;
-void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
+void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa)
{
- vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
+ gva_t mmio_gva = vm_unused_gva_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
virt_map(vm, mmio_gva, mmio_gpa, 1);
vm->ucall_mmio_addr = mmio_gpa;
- write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva);
+ write_guest_global(vm, ucall_exit_mmio_addr, (gva_t *)mmio_gva);
}
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
@@ -25,9 +25,9 @@ void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
if (run->exit_reason == KVM_EXIT_MMIO &&
run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) {
- TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(uint64_t),
+ TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(u64),
"Unexpected ucall exit mmio address access");
- return (void *)(*((uint64_t *)run->mmio.data));
+ return (void *)(*((u64 *)run->mmio.data));
}
return NULL;
diff --git a/tools/testing/selftests/kvm/lib/arm64/vgic.c b/tools/testing/selftests/kvm/lib/arm64/vgic.c
index d0f7bd0984b8..4ecebf3146a2 100644
--- a/tools/testing/selftests/kvm/lib/arm64/vgic.c
+++ b/tools/testing/selftests/kvm/lib/arm64/vgic.c
@@ -41,10 +41,10 @@ bool kvm_supports_vgic_v3(void)
* redistributor regions of the guest. Since it depends on the number of
* vCPUs for the VM, it must be called after all the vCPUs have been created.
*/
-int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs)
+int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs)
{
int gic_fd;
- uint64_t attr;
+ u64 attr;
unsigned int nr_gic_pages;
/* Distributor setup */
@@ -77,7 +77,7 @@ void __vgic_v3_init(int fd)
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
}
-int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs)
+int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs)
{
unsigned int nr_vcpus_created = 0;
struct list_head *iter;
@@ -104,11 +104,11 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs)
}
/* should only work for level sensitive interrupts */
-int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
+int _kvm_irq_set_level_info(int gic_fd, u32 intid, int level)
{
- uint64_t attr = 32 * (intid / 32);
- uint64_t index = intid % 32;
- uint64_t val;
+ u64 attr = 32 * (intid / 32);
+ u64 index = intid % 32;
+ u64 val;
int ret;
ret = __kvm_device_attr_get(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO,
@@ -122,16 +122,16 @@ int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
return ret;
}
-void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
+void kvm_irq_set_level_info(int gic_fd, u32 intid, int level)
{
int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, ret));
}
-int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
+int _kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level)
{
- uint32_t irq = intid & KVM_ARM_IRQ_NUM_MASK;
+ u32 irq = intid & KVM_ARM_IRQ_NUM_MASK;
TEST_ASSERT(!INTID_IS_SGI(intid), "KVM_IRQ_LINE's interface itself "
"doesn't allow injecting SGIs. There's no mask for it.");
@@ -144,23 +144,23 @@ int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
return _kvm_irq_line(vm, irq, level);
}
-void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
+void kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level)
{
int ret = _kvm_arm_irq_line(vm, intid, level);
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret));
}
-static void vgic_poke_irq(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu,
- uint64_t reg_off)
+static void vgic_poke_irq(int gic_fd, u32 intid, struct kvm_vcpu *vcpu,
+ u64 reg_off)
{
- uint64_t reg = intid / 32;
- uint64_t index = intid % 32;
- uint64_t attr = reg_off + reg * 4;
- uint64_t val;
+ u64 reg = intid / 32;
+ u64 index = intid % 32;
+ u64 attr = reg_off + reg * 4;
+ u64 val;
bool intid_is_private = INTID_IS_SGI(intid) || INTID_IS_PPI(intid);
- uint32_t group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
+ u32 group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
: KVM_DEV_ARM_VGIC_GRP_DIST_REGS;
if (intid_is_private) {
@@ -183,12 +183,12 @@ static void vgic_poke_irq(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu,
kvm_device_attr_set(gic_fd, group, attr, &val);
}
-void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu)
+void kvm_irq_write_ispendr(int gic_fd, u32 intid, struct kvm_vcpu *vcpu)
{
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISPENDR);
}
-void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu)
+void kvm_irq_write_isactiver(int gic_fd, u32 intid, struct kvm_vcpu *vcpu)
{
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISACTIVER);
}
diff --git a/tools/testing/selftests/kvm/lib/elf.c b/tools/testing/selftests/kvm/lib/elf.c
index f34d926d9735..b689c4df4a01 100644
--- a/tools/testing/selftests/kvm/lib/elf.c
+++ b/tools/testing/selftests/kvm/lib/elf.c
@@ -156,21 +156,20 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
TEST_ASSERT(phdr.p_memsz > 0, "Unexpected loadable segment "
"memsize of 0,\n"
" phdr index: %u p_memsz: 0x%" PRIx64,
- n1, (uint64_t) phdr.p_memsz);
- vm_vaddr_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size);
- vm_vaddr_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1;
+ n1, (u64)phdr.p_memsz);
+ gva_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size);
+ gva_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1;
seg_vend |= vm->page_size - 1;
size_t seg_size = seg_vend - seg_vstart + 1;
- vm_vaddr_t vaddr = __vm_vaddr_alloc(vm, seg_size, seg_vstart,
- MEM_REGION_CODE);
- TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate "
+ gva_t gva = __vm_alloc(vm, seg_size, seg_vstart, MEM_REGION_CODE);
+ TEST_ASSERT(gva == seg_vstart, "Unable to allocate "
"virtual memory for segment at requested min addr,\n"
" segment idx: %u\n"
" seg_vstart: 0x%lx\n"
- " vaddr: 0x%lx",
- n1, seg_vstart, vaddr);
- memset(addr_gva2hva(vm, vaddr), 0, seg_size);
+ " gva: 0x%lx",
+ n1, seg_vstart, gva);
+ memset(addr_gva2hva(vm, gva), 0, seg_size);
/* TODO(lhuemill): Set permissions of each memory segment
* based on the least-significant 3 bits of phdr.p_flags.
*/
diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c
index ce3099630397..7a96c43b5704 100644
--- a/tools/testing/selftests/kvm/lib/guest_modes.c
+++ b/tools/testing/selftests/kvm/lib/guest_modes.c
@@ -20,7 +20,7 @@ void guest_modes_append_default(void)
#ifdef __aarch64__
{
unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
- uint32_t ipa4k, ipa16k, ipa64k;
+ u32 ipa4k, ipa16k, ipa64k;
int i;
aarch64_get_supported_page_sizes(limit, &ipa4k, &ipa16k, &ipa64k);
diff --git a/tools/testing/selftests/kvm/lib/guest_sprintf.c b/tools/testing/selftests/kvm/lib/guest_sprintf.c
index 74627514c4d4..7a33965349a7 100644
--- a/tools/testing/selftests/kvm/lib/guest_sprintf.c
+++ b/tools/testing/selftests/kvm/lib/guest_sprintf.c
@@ -35,8 +35,8 @@ static int skip_atoi(const char **s)
({ \
int __res; \
\
- __res = ((uint64_t) n) % (uint32_t) base; \
- n = ((uint64_t) n) / (uint32_t) base; \
+ __res = ((u64)n) % (u32)base; \
+ n = ((u64)n) / (u32)base; \
__res; \
})
@@ -119,7 +119,7 @@ int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args)
{
char *str, *end;
const char *s;
- uint64_t num;
+ u64 num;
int i, base;
int len;
@@ -216,7 +216,7 @@ repeat:
while (--field_width > 0)
APPEND_BUFFER_SAFE(str, end, ' ');
APPEND_BUFFER_SAFE(str, end,
- (uint8_t)va_arg(args, int));
+ (u8)va_arg(args, int));
while (--field_width > 0)
APPEND_BUFFER_SAFE(str, end, ' ');
continue;
@@ -240,7 +240,7 @@ repeat:
flags |= SPECIAL | SMALL | ZEROPAD;
}
str = number(str, end,
- (uint64_t)va_arg(args, void *), 16,
+ (u64)va_arg(args, void *), 16,
field_width, precision, flags);
continue;
@@ -284,15 +284,15 @@ repeat:
continue;
}
if (qualifier == 'l')
- num = va_arg(args, uint64_t);
+ num = va_arg(args, u64);
else if (qualifier == 'h') {
- num = (uint16_t)va_arg(args, int);
+ num = (u16)va_arg(args, int);
if (flags & SIGN)
- num = (int16_t)num;
+ num = (s16)num;
} else if (flags & SIGN)
num = va_arg(args, int);
else
- num = va_arg(args, uint32_t);
+ num = va_arg(args, u32);
str = number(str, end, num, base, field_width, precision, flags);
}
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index f5e076591c64..2a76eca7029d 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -20,9 +20,9 @@
#define KVM_UTIL_MIN_PFN 2
-uint32_t guest_random_seed;
+u32 guest_random_seed;
struct guest_random_state guest_rng;
-static uint32_t last_guest_seed;
+static u32 last_guest_seed;
static size_t vcpu_mmap_sz(void);
@@ -165,7 +165,7 @@ unsigned int kvm_check_cap(long cap)
return (unsigned int)ret;
}
-void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
+void vm_enable_dirty_ring(struct kvm_vm *vm, u32 ring_size)
{
if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL))
vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size);
@@ -189,7 +189,7 @@ static void vm_open(struct kvm_vm *vm)
vm->stats.fd = -1;
}
-const char *vm_guest_mode_string(uint32_t i)
+const char *vm_guest_mode_string(u32 i)
{
static const char * const strings[] = {
[VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages",
@@ -267,7 +267,7 @@ _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params)
* based on the MSB of the VA. On architectures with this behavior
* the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1].
*/
-__weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
+__weak void vm_populate_gva_bitmap(struct kvm_vm *vm)
{
sparsebit_set_num(vm->vpages_valid,
0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
@@ -385,7 +385,7 @@ struct kvm_vm *____vm_create(struct vm_shape shape)
/* Limit to VA-bit canonical virtual addresses. */
vm->vpages_valid = sparsebit_alloc();
- vm_vaddr_populate_bitmap(vm);
+ vm_populate_gva_bitmap(vm);
/* Limit physical addresses to PA-bits. */
vm->max_gfn = vm_compute_max_gfn(vm);
@@ -396,12 +396,12 @@ struct kvm_vm *____vm_create(struct vm_shape shape)
return vm;
}
-static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
- uint32_t nr_runnable_vcpus,
- uint64_t extra_mem_pages)
+static u64 vm_nr_pages_required(enum vm_guest_mode mode,
+ u32 nr_runnable_vcpus,
+ u64 extra_mem_pages)
{
- uint64_t page_size = vm_guest_mode_params[mode].page_size;
- uint64_t nr_pages;
+ u64 page_size = vm_guest_mode_params[mode].page_size;
+ u64 nr_pages;
TEST_ASSERT(nr_runnable_vcpus,
"Use vm_create_barebones() for VMs that _never_ have vCPUs");
@@ -435,7 +435,7 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
return vm_adjust_num_guest_pages(mode, nr_pages);
}
-void kvm_set_files_rlimit(uint32_t nr_vcpus)
+void kvm_set_files_rlimit(u32 nr_vcpus)
{
/*
* Each vCPU will open two file descriptors: the vCPU itself and the
@@ -476,10 +476,10 @@ static bool is_guest_memfd_required(struct vm_shape shape)
#endif
}
-struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
- uint64_t nr_extra_pages)
+struct kvm_vm *__vm_create(struct vm_shape shape, u32 nr_runnable_vcpus,
+ u64 nr_extra_pages)
{
- uint64_t nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus,
+ u64 nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus,
nr_extra_pages);
struct userspace_mem_region *slot0;
struct kvm_vm *vm;
@@ -546,8 +546,8 @@ struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
* extra_mem_pages is only used to calculate the maximum page table size,
* no real memory allocation for non-slot0 memory in this function.
*/
-struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
- uint64_t extra_mem_pages,
+struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, u32 nr_vcpus,
+ u64 extra_mem_pages,
void *guest_code, struct kvm_vcpu *vcpus[])
{
struct kvm_vm *vm;
@@ -566,7 +566,7 @@ struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
struct kvm_vcpu **vcpu,
- uint64_t extra_mem_pages,
+ u64 extra_mem_pages,
void *guest_code)
{
struct kvm_vcpu *vcpus[1];
@@ -614,7 +614,7 @@ void kvm_vm_restart(struct kvm_vm *vmp)
}
__weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm,
- uint32_t vcpu_id)
+ u32 vcpu_id)
{
return __vm_vcpu_add(vm, vcpu_id);
}
@@ -636,9 +636,9 @@ int __pin_task_to_cpu(pthread_t task, int cpu)
return pthread_setaffinity_np(task, sizeof(cpuset), &cpuset);
}
-static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
+static u32 parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
{
- uint32_t pcpu = atoi_non_negative("CPU number", cpu_str);
+ u32 pcpu = atoi_non_negative("CPU number", cpu_str);
TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask),
"Not allowed to run on pCPU '%d', check cgroups?", pcpu);
@@ -662,7 +662,7 @@ void kvm_print_vcpu_pinning_help(void)
" (default: no pinning)\n", name, name);
}
-void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
+void kvm_parse_vcpu_pinning(const char *pcpus_string, u32 vcpu_to_pcpu[],
int nr_vcpus)
{
cpu_set_t allowed_mask;
@@ -715,15 +715,15 @@ void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
* region exists.
*/
static struct userspace_mem_region *
-userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
+userspace_mem_region_find(struct kvm_vm *vm, u64 start, u64 end)
{
struct rb_node *node;
for (node = vm->regions.gpa_tree.rb_node; node; ) {
struct userspace_mem_region *region =
container_of(node, struct userspace_mem_region, gpa_node);
- uint64_t existing_start = region->region.guest_phys_addr;
- uint64_t existing_end = region->region.guest_phys_addr
+ u64 existing_start = region->region.guest_phys_addr;
+ u64 existing_end = region->region.guest_phys_addr
+ region->region.memory_size - 1;
if (start <= existing_end && end >= existing_start)
return region;
@@ -918,8 +918,8 @@ static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree,
}
-int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
- uint64_t gpa, uint64_t size, void *hva)
+int __vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
+ gpa_t gpa, u64 size, void *hva)
{
struct kvm_userspace_memory_region region = {
.slot = slot,
@@ -932,8 +932,8 @@ int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags
return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region);
}
-void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
- uint64_t gpa, uint64_t size, void *hva)
+void vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
+ gpa_t gpa, u64 size, void *hva)
{
int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva);
@@ -945,9 +945,9 @@ void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
__TEST_REQUIRE(kvm_has_cap(KVM_CAP_USER_MEMORY2), \
"KVM selftests now require KVM_SET_USER_MEMORY_REGION2 (introduced in v6.8)")
-int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
- uint64_t gpa, uint64_t size, void *hva,
- uint32_t guest_memfd, uint64_t guest_memfd_offset)
+int __vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
+ gpa_t gpa, u64 size, void *hva,
+ u32 guest_memfd, u64 guest_memfd_offset)
{
struct kvm_userspace_memory_region2 region = {
.slot = slot,
@@ -964,9 +964,9 @@ int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flag
return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, &region);
}
-void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
- uint64_t gpa, uint64_t size, void *hva,
- uint32_t guest_memfd, uint64_t guest_memfd_offset)
+void vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
+ gpa_t gpa, u64 size, void *hva,
+ u32 guest_memfd, u64 guest_memfd_offset)
{
int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva,
guest_memfd, guest_memfd_offset);
@@ -978,8 +978,8 @@ void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags
/* FIXME: This thing needs to be ripped apart and rewritten. */
void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
- uint64_t gpa, uint32_t slot, uint64_t npages, uint32_t flags,
- int guest_memfd, uint64_t guest_memfd_offset)
+ gpa_t gpa, u32 slot, u64 npages, u32 flags,
+ int guest_memfd, u64 guest_memfd_offset)
{
int ret;
struct userspace_mem_region *region;
@@ -1016,8 +1016,8 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
" requested gpa: 0x%lx npages: 0x%lx page_size: 0x%x\n"
" existing gpa: 0x%lx size: 0x%lx",
gpa, npages, vm->page_size,
- (uint64_t) region->region.guest_phys_addr,
- (uint64_t) region->region.memory_size);
+ (u64)region->region.guest_phys_addr,
+ (u64)region->region.memory_size);
/* Confirm no region with the requested slot already exists. */
hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
@@ -1027,11 +1027,11 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
TEST_FAIL("A mem region with the requested slot "
"already exists.\n"
- " requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
- " existing slot: %u paddr: 0x%lx size: 0x%lx",
+ " requested slot: %u gpa: 0x%lx npages: 0x%lx\n"
+ " existing slot: %u gpa: 0x%lx size: 0x%lx",
slot, gpa, npages, region->region.slot,
- (uint64_t) region->region.guest_phys_addr,
- (uint64_t) region->region.memory_size);
+ (u64)region->region.guest_phys_addr,
+ (u64)region->region.memory_size);
}
/* Allocate and initialize new mem region structure. */
@@ -1085,7 +1085,7 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
if (flags & KVM_MEM_GUEST_MEMFD) {
if (guest_memfd < 0) {
- uint32_t guest_memfd_flags = 0;
+ u32 guest_memfd_flags = 0;
TEST_ASSERT(!guest_memfd_offset,
"Offset must be zero when creating new guest_memfd");
guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags);
@@ -1141,8 +1141,7 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
void vm_userspace_mem_region_add(struct kvm_vm *vm,
enum vm_mem_backing_src_type src_type,
- uint64_t gpa, uint32_t slot, uint64_t npages,
- uint32_t flags)
+ gpa_t gpa, u32 slot, u64 npages, u32 flags)
{
vm_mem_add(vm, src_type, gpa, slot, npages, flags, -1, 0);
}
@@ -1163,7 +1162,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
* memory slot ID).
*/
struct userspace_mem_region *
-memslot2region(struct kvm_vm *vm, uint32_t memslot)
+memslot2region(struct kvm_vm *vm, u32 memslot)
{
struct userspace_mem_region *region;
@@ -1194,7 +1193,7 @@ memslot2region(struct kvm_vm *vm, uint32_t memslot)
* Sets the flags of the memory region specified by the value of slot,
* to the values given by flags.
*/
-void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
+void vm_mem_region_set_flags(struct kvm_vm *vm, u32 slot, u32 flags)
{
int ret;
struct userspace_mem_region *region;
@@ -1210,7 +1209,7 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
ret, errno, slot, flags);
}
-void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot)
+void vm_mem_region_reload(struct kvm_vm *vm, u32 slot)
{
struct userspace_mem_region *region = memslot2region(vm, slot);
struct kvm_userspace_memory_region2 tmp = region->region;
@@ -1234,7 +1233,7 @@ void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot)
*
* Change the gpa of a memory region.
*/
-void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
+void vm_mem_region_move(struct kvm_vm *vm, u32 slot, u64 new_gpa)
{
struct userspace_mem_region *region;
int ret;
@@ -1263,7 +1262,7 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
*
* Delete a memory region.
*/
-void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
+void vm_mem_region_delete(struct kvm_vm *vm, u32 slot)
{
struct userspace_mem_region *region = memslot2region(vm, slot);
@@ -1273,18 +1272,18 @@ void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
__vm_mem_region_delete(vm, region);
}
-void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
+void vm_guest_mem_fallocate(struct kvm_vm *vm, u64 base, u64 size,
bool punch_hole)
{
const int mode = FALLOC_FL_KEEP_SIZE | (punch_hole ? FALLOC_FL_PUNCH_HOLE : 0);
struct userspace_mem_region *region;
- uint64_t end = base + size;
- uint64_t gpa, len;
+ u64 end = base + size;
+ gpa_t gpa, len;
off_t fd_offset;
int ret;
for (gpa = base; gpa < end; gpa += len) {
- uint64_t offset;
+ u64 offset;
region = userspace_mem_region_find(vm, gpa, gpa);
TEST_ASSERT(region && region->region.flags & KVM_MEM_GUEST_MEMFD,
@@ -1292,7 +1291,7 @@ void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
offset = gpa - region->region.guest_phys_addr;
fd_offset = region->region.guest_memfd_offset + offset;
- len = min_t(uint64_t, end - gpa, region->region.memory_size - offset);
+ len = min_t(u64, end - gpa, region->region.memory_size - offset);
ret = fallocate(region->region.guest_memfd, mode, fd_offset, len);
TEST_ASSERT(!ret, "fallocate() failed to %s at %lx (len = %lu), fd = %d, mode = %x, offset = %lx",
@@ -1317,7 +1316,7 @@ static size_t vcpu_mmap_sz(void)
return ret;
}
-static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id)
+static bool vcpu_exists(struct kvm_vm *vm, u32 vcpu_id)
{
struct kvm_vcpu *vcpu;
@@ -1333,7 +1332,7 @@ static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id)
* Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
* No additional vCPU setup is done. Returns the vCPU.
*/
-struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
+struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
{
struct kvm_vcpu *vcpu;
@@ -1367,33 +1366,18 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
}
/*
- * VM Virtual Address Unused Gap
- *
- * Input Args:
- * vm - Virtual Machine
- * sz - Size (bytes)
- * vaddr_min - Minimum Virtual Address
- *
- * Output Args: None
- *
- * Return:
- * Lowest virtual address at or above vaddr_min, with at least
- * sz unused bytes. TEST_ASSERT failure if no area of at least
- * size sz is available.
- *
- * Within the VM specified by vm, locates the lowest starting virtual
- * address >= vaddr_min, that has at least sz unallocated bytes. A
+ * Within the VM specified by @vm, locates the lowest starting guest virtual
+ * address >= @min_gva, that has at least @sz unallocated bytes. A
* TEST_ASSERT failure occurs for invalid input or no area of at least
- * sz unallocated bytes >= vaddr_min is available.
+ * @sz unallocated bytes >= @min_gva is available.
*/
-vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
- vm_vaddr_t vaddr_min)
+gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t min_gva)
{
- uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
+ u64 pages = (sz + vm->page_size - 1) >> vm->page_shift;
/* Determine lowest permitted virtual page index. */
- uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
- if ((pgidx_start * vm->page_size) < vaddr_min)
+ u64 pgidx_start = (min_gva + vm->page_size - 1) >> vm->page_shift;
+ if ((pgidx_start * vm->page_size) < min_gva)
goto no_va_found;
/* Loop over section with enough valid virtual page indexes. */
@@ -1430,7 +1414,7 @@ vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
} while (pgidx_start != 0);
no_va_found:
- TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages);
+ TEST_FAIL("No gva of specified pages available, pages: 0x%lx", pages);
/* NOT REACHED */
return -1;
@@ -1452,145 +1436,91 @@ va_found:
return pgidx_start * vm->page_size;
}
-static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
- vm_vaddr_t vaddr_min,
- enum kvm_mem_region_type type,
- bool protected)
+static gva_t ____vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva,
+ enum kvm_mem_region_type type, bool protected)
{
- uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
+ u64 pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
virt_pgd_alloc(vm);
- vm_paddr_t paddr = __vm_phy_pages_alloc(vm, pages,
- KVM_UTIL_MIN_PFN * vm->page_size,
- vm->memslots[type], protected);
+ gpa_t gpa = __vm_phy_pages_alloc(vm, pages,
+ KVM_UTIL_MIN_PFN * vm->page_size,
+ vm->memslots[type], protected);
/*
* Find an unused range of virtual page addresses of at least
* pages in length.
*/
- vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
+ gva_t gva_start = vm_unused_gva_gap(vm, sz, min_gva);
/* Map the virtual pages. */
- for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
- pages--, vaddr += vm->page_size, paddr += vm->page_size) {
+ for (gva_t gva = gva_start; pages > 0;
+ pages--, gva += vm->page_size, gpa += vm->page_size) {
- virt_pg_map(vm, vaddr, paddr);
+ virt_pg_map(vm, gva, gpa);
}
- return vaddr_start;
+ return gva_start;
}
-vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
- enum kvm_mem_region_type type)
+gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva,
+ enum kvm_mem_region_type type)
{
- return ____vm_vaddr_alloc(vm, sz, vaddr_min, type,
- vm_arch_has_protected_memory(vm));
+ return ____vm_alloc(vm, sz, min_gva, type,
+ vm_arch_has_protected_memory(vm));
}
-vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
- vm_vaddr_t vaddr_min,
- enum kvm_mem_region_type type)
+gva_t vm_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t min_gva,
+ enum kvm_mem_region_type type)
{
- return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, false);
+ return ____vm_alloc(vm, sz, min_gva, type, false);
}
/*
- * VM Virtual Address Allocate
- *
- * Input Args:
- * vm - Virtual Machine
- * sz - Size in bytes
- * vaddr_min - Minimum starting virtual address
- *
- * Output Args: None
- *
- * Return:
- * Starting guest virtual address
- *
- * Allocates at least sz bytes within the virtual address space of the vm
- * given by vm. The allocated bytes are mapped to a virtual address >=
- * the address given by vaddr_min. Note that each allocation uses a
- * a unique set of pages, with the minimum real allocation being at least
- * a page. The allocated physical space comes from the TEST_DATA memory region.
+ * Allocates at least sz bytes within the virtual address space of the VM
+ * given by @vm. The allocated bytes are mapped to a virtual address >= the
+ * address given by @min_gva. Note that each allocation uses a a unique set
+ * of pages, with the minimum real allocation being at least a page. The
+ * allocated physical space comes from the TEST_DATA memory region.
*/
-vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
+gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva)
{
- return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA);
+ return __vm_alloc(vm, sz, min_gva, MEM_REGION_TEST_DATA);
}
-/*
- * VM Virtual Address Allocate Pages
- *
- * Input Args:
- * vm - Virtual Machine
- *
- * Output Args: None
- *
- * Return:
- * Starting guest virtual address
- *
- * Allocates at least N system pages worth of bytes within the virtual address
- * space of the vm.
- */
-vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages)
+gva_t vm_alloc_pages(struct kvm_vm *vm, int nr_pages)
{
- return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
+ return vm_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
}
-vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type)
+gva_t __vm_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type)
{
- return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type);
+ return __vm_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type);
}
-/*
- * VM Virtual Address Allocate Page
- *
- * Input Args:
- * vm - Virtual Machine
- *
- * Output Args: None
- *
- * Return:
- * Starting guest virtual address
- *
- * Allocates at least one system page worth of bytes within the virtual address
- * space of the vm.
- */
-vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm)
+gva_t vm_alloc_page(struct kvm_vm *vm)
{
- return vm_vaddr_alloc_pages(vm, 1);
+ return vm_alloc_pages(vm, 1);
}
/*
- * Map a range of VM virtual address to the VM's physical address
- *
- * Input Args:
- * vm - Virtual Machine
- * vaddr - Virtuall address to map
- * paddr - VM Physical Address
- * npages - The number of pages to map
+ * Map a range of VM virtual address to the VM's physical address.
*
- * Output Args: None
- *
- * Return: None
- *
- * Within the VM given by @vm, creates a virtual translation for
- * @npages starting at @vaddr to the page range starting at @paddr.
+ * Within the VM given by @vm, creates a virtual translation for @npages
+ * starting at @gva to the page range starting at @gpa.
*/
-void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- unsigned int npages)
+void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa, unsigned int npages)
{
size_t page_size = vm->page_size;
size_t size = npages * page_size;
- TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
- TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
+ TEST_ASSERT(gva + size > gva, "Vaddr overflow");
+ TEST_ASSERT(gpa + size > gpa, "Paddr overflow");
while (npages--) {
- virt_pg_map(vm, vaddr, paddr);
+ virt_pg_map(vm, gva, gpa);
- vaddr += page_size;
- paddr += page_size;
+ gva += page_size;
+ gpa += page_size;
}
}
@@ -1611,7 +1541,7 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
* address providing the memory to the vm physical address is returned.
* A TEST_ASSERT failure occurs if no region containing gpa exists.
*/
-void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
+void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa)
{
struct userspace_mem_region *region;
@@ -1644,7 +1574,7 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
* VM physical address is returned. A TEST_ASSERT failure occurs if no
* region containing hva exists.
*/
-vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
+gpa_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
{
struct rb_node *node;
@@ -1655,7 +1585,7 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
if (hva >= region->host_mem) {
if (hva <= (region->host_mem
+ region->region.memory_size - 1))
- return (vm_paddr_t)((uintptr_t)
+ return (gpa_t)((uintptr_t)
region->region.guest_phys_addr
+ (hva - (uintptr_t)region->host_mem));
@@ -1687,7 +1617,7 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
* memory without mapping said memory in the guest's address space. And, for
* userfaultfd-based demand paging, to do so without triggering userfaults.
*/
-void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
+void *addr_gpa2alias(struct kvm_vm *vm, gpa_t gpa)
{
struct userspace_mem_region *region;
uintptr_t offset;
@@ -1781,8 +1711,8 @@ struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu)
void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
{
- uint32_t page_size = getpagesize();
- uint32_t size = vcpu->vm->dirty_ring_size;
+ u32 page_size = getpagesize();
+ u32 size = vcpu->vm->dirty_ring_size;
TEST_ASSERT(size > 0, "Should enable dirty ring first");
@@ -1811,7 +1741,7 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
* Device Ioctl
*/
-int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
+int __kvm_has_device_attr(int dev_fd, u32 group, u64 attr)
{
struct kvm_device_attr attribute = {
.group = group,
@@ -1822,7 +1752,7 @@ int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute);
}
-int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type)
+int __kvm_test_create_device(struct kvm_vm *vm, u64 type)
{
struct kvm_create_device create_dev = {
.type = type,
@@ -1832,7 +1762,7 @@ int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type)
return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
}
-int __kvm_create_device(struct kvm_vm *vm, uint64_t type)
+int __kvm_create_device(struct kvm_vm *vm, u64 type)
{
struct kvm_create_device create_dev = {
.type = type,
@@ -1846,7 +1776,7 @@ int __kvm_create_device(struct kvm_vm *vm, uint64_t type)
return err ? : create_dev.fd;
}
-int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val)
+int __kvm_device_attr_get(int dev_fd, u32 group, u64 attr, void *val)
{
struct kvm_device_attr kvmattr = {
.group = group,
@@ -1858,7 +1788,7 @@ int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val)
return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr);
}
-int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val)
+int __kvm_device_attr_set(int dev_fd, u32 group, u64 attr, void *val)
{
struct kvm_device_attr kvmattr = {
.group = group,
@@ -1874,7 +1804,7 @@ int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val)
* IRQ related functions.
*/
-int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
+int _kvm_irq_line(struct kvm_vm *vm, u32 irq, int level)
{
struct kvm_irq_level irq_level = {
.irq = irq,
@@ -1884,7 +1814,7 @@ int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level);
}
-void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
+void kvm_irq_line(struct kvm_vm *vm, u32 irq, int level)
{
int ret = _kvm_irq_line(vm, irq, level);
@@ -1906,7 +1836,7 @@ struct kvm_irq_routing *kvm_gsi_routing_create(void)
}
void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
- uint32_t gsi, uint32_t pin)
+ u32 gsi, u32 pin)
{
int i;
@@ -1956,7 +1886,7 @@ void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
* Dumps the current state of the VM given by vm, to the FILE stream
* given by stream.
*/
-void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+void vm_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
{
int ctr;
struct userspace_mem_region *region;
@@ -1969,8 +1899,8 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
"host_virt: %p\n", indent + 2, "",
- (uint64_t) region->region.guest_phys_addr,
- (uint64_t) region->region.memory_size,
+ (u64)region->region.guest_phys_addr,
+ (u64)region->region.memory_size,
region->host_mem);
fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
sparsebit_dump(stream, region->unused_phy_pages, 0);
@@ -2077,7 +2007,7 @@ const char *exit_reason_str(unsigned int exit_reason)
* Input Args:
* vm - Virtual Machine
* num - number of pages
- * paddr_min - Physical address minimum
+ * min_gpa - Physical address minimum
* memslot - Memory region to allocate page from
* protected - True if the pages will be used as protected/private memory
*
@@ -2087,29 +2017,29 @@ const char *exit_reason_str(unsigned int exit_reason)
* Starting physical address
*
* Within the VM specified by vm, locates a range of available physical
- * pages at or above paddr_min. If found, the pages are marked as in use
+ * pages at or above min_gpa. If found, the pages are marked as in use
* and their base address is returned. A TEST_ASSERT failure occurs if
- * not enough pages are available at or above paddr_min.
+ * not enough pages are available at or above min_gpa.
*/
-vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
- vm_paddr_t paddr_min, uint32_t memslot,
- bool protected)
+gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+ gpa_t min_gpa, u32 memslot,
+ bool protected)
{
struct userspace_mem_region *region;
sparsebit_idx_t pg, base;
TEST_ASSERT(num > 0, "Must allocate at least one page");
- TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
+ TEST_ASSERT((min_gpa % vm->page_size) == 0, "Min physical address "
"not divisible by page size.\n"
- " paddr_min: 0x%lx page_size: 0x%x",
- paddr_min, vm->page_size);
+ " min_gpa: 0x%lx page_size: 0x%x",
+ min_gpa, vm->page_size);
region = memslot2region(vm, memslot);
TEST_ASSERT(!protected || region->protected_phy_pages,
"Region doesn't support protected memory");
- base = pg = paddr_min >> vm->page_shift;
+ base = pg = min_gpa >> vm->page_shift;
do {
for (; pg < base + num; ++pg) {
if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
@@ -2121,8 +2051,8 @@ vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
if (pg == 0) {
fprintf(stderr, "No guest physical page available, "
- "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
- paddr_min, vm->page_size, memslot);
+ "min_gpa: 0x%lx page_size: 0x%x memslot: %u\n",
+ min_gpa, vm->page_size, memslot);
fputs("---- vm dump ----\n", stderr);
vm_dump(stderr, vm, 2);
abort();
@@ -2137,13 +2067,12 @@ vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
return base * vm->page_size;
}
-vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
- uint32_t memslot)
+gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot)
{
- return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
+ return vm_phy_pages_alloc(vm, 1, min_gpa, memslot);
}
-vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
+gpa_t vm_alloc_page_table(struct kvm_vm *vm)
{
return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR,
vm->memslots[MEM_REGION_PT]);
@@ -2161,7 +2090,7 @@ vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
* Return:
* Equivalent host virtual address
*/
-void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
+void *addr_gva2hva(struct kvm_vm *vm, gva_t gva)
{
return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
}
@@ -2259,7 +2188,7 @@ struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
* Read the data values of a specified stat from the binary stats interface.
*/
void read_stat_data(int stats_fd, struct kvm_stats_header *header,
- struct kvm_stats_desc *desc, uint64_t *data,
+ struct kvm_stats_desc *desc, u64 *data,
size_t max_elements)
{
size_t nr_elements = min_t(ssize_t, desc->size, max_elements);
@@ -2280,7 +2209,7 @@ void read_stat_data(int stats_fd, struct kvm_stats_header *header,
}
void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
- uint64_t *data, size_t max_elements)
+ u64 *data, size_t max_elements)
{
struct kvm_stats_desc *desc;
size_t size_desc;
@@ -2357,7 +2286,7 @@ void __attribute((constructor)) kvm_selftest_init(void)
kvm_selftest_arch_init();
}
-bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr)
+bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa)
{
sparsebit_idx_t pg = 0;
struct userspace_mem_region *region;
@@ -2365,10 +2294,10 @@ bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr)
if (!vm_arch_has_protected_memory(vm))
return false;
- region = userspace_mem_region_find(vm, paddr, paddr);
- TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr);
+ region = userspace_mem_region_find(vm, gpa, gpa);
+ TEST_ASSERT(region, "No vm physical memory at 0x%lx", gpa);
- pg = paddr >> vm->page_shift;
+ pg = gpa >> vm->page_shift;
return sparsebit_is_set(region->protected_phy_pages, pg);
}
diff --git a/tools/testing/selftests/kvm/lib/loongarch/processor.c b/tools/testing/selftests/kvm/lib/loongarch/processor.c
index ee4ad3b1d2a4..64d91fb76522 100644
--- a/tools/testing/selftests/kvm/lib/loongarch/processor.c
+++ b/tools/testing/selftests/kvm/lib/loongarch/processor.c
@@ -12,32 +12,32 @@
#define LOONGARCH_PAGE_TABLE_PHYS_MIN 0x200000
#define LOONGARCH_GUEST_STACK_VADDR_MIN 0x200000
-static vm_paddr_t invalid_pgtable[4];
-static vm_vaddr_t exception_handlers;
+static gpa_t invalid_pgtable[4];
+static gva_t exception_handlers;
-static uint64_t virt_pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
+static u64 virt_pte_index(struct kvm_vm *vm, gva_t gva, int level)
{
unsigned int shift;
- uint64_t mask;
+ u64 mask;
shift = level * (vm->page_shift - 3) + vm->page_shift;
mask = (1UL << (vm->page_shift - 3)) - 1;
return (gva >> shift) & mask;
}
-static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
+static u64 pte_addr(struct kvm_vm *vm, u64 entry)
{
return entry & ~((0x1UL << vm->page_shift) - 1);
}
-static uint64_t ptrs_per_pte(struct kvm_vm *vm)
+static u64 ptrs_per_pte(struct kvm_vm *vm)
{
return 1 << (vm->page_shift - 3);
}
-static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t child)
+static void virt_set_pgtable(struct kvm_vm *vm, gpa_t table, gpa_t child)
{
- uint64_t *ptep;
+ u64 *ptep;
int i, ptrs_per_pte;
ptep = addr_gpa2hva(vm, table);
@@ -49,7 +49,7 @@ static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t chi
void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
int i;
- vm_paddr_t child, table;
+ gpa_t child, table;
if (vm->mmu.pgd_created)
return;
@@ -67,16 +67,16 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
vm->mmu.pgd_created = true;
}
-static int virt_pte_none(uint64_t *ptep, int level)
+static int virt_pte_none(u64 *ptep, int level)
{
return *ptep == invalid_pgtable[level];
}
-static uint64_t *virt_populate_pte(struct kvm_vm *vm, vm_vaddr_t gva, int alloc)
+static u64 *virt_populate_pte(struct kvm_vm *vm, gva_t gva, int alloc)
{
int level;
- uint64_t *ptep;
- vm_paddr_t child;
+ u64 *ptep;
+ gpa_t child;
if (!vm->mmu.pgd_created)
goto unmapped_gva;
@@ -106,43 +106,42 @@ unmapped_gva:
exit(EXIT_FAILURE);
}
-vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
{
- uint64_t *ptep;
+ u64 *ptep;
ptep = virt_populate_pte(vm, gva, 0);
- TEST_ASSERT(*ptep != 0, "Virtual address vaddr: 0x%lx not mapped\n", gva);
+ TEST_ASSERT(*ptep != 0, "Virtual address gva: 0x%lx not mapped\n", gva);
return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
}
-void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
{
- uint32_t prot_bits;
- uint64_t *ptep;
+ u32 prot_bits;
+ u64 *ptep;
- TEST_ASSERT((vaddr % vm->page_size) == 0,
+ TEST_ASSERT((gva % vm->page_size) == 0,
"Virtual address not on page boundary,\n"
- "vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
- TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
- (vaddr >> vm->page_shift)),
- "Invalid virtual address, vaddr: 0x%lx", vaddr);
- TEST_ASSERT((paddr % vm->page_size) == 0,
+ "gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size);
+ TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
+ "Invalid virtual address, gva: 0x%lx", gva);
+ TEST_ASSERT((gpa % vm->page_size) == 0,
"Physical address not on page boundary,\n"
- "paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
- TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
+ "gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size);
+ TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond maximum supported,\n"
- "paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
- paddr, vm->max_gfn, vm->page_size);
+ "gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+ gpa, vm->max_gfn, vm->page_size);
- ptep = virt_populate_pte(vm, vaddr, 1);
+ ptep = virt_populate_pte(vm, gva, 1);
prot_bits = _PAGE_PRESENT | __READABLE | __WRITEABLE | _CACHE_CC | _PAGE_USER;
- WRITE_ONCE(*ptep, paddr | prot_bits);
+ WRITE_ONCE(*ptep, gpa | prot_bits);
}
-static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
+static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, u64 page, int level)
{
- uint64_t pte, *ptep;
+ u64 pte, *ptep;
static const char * const type[] = { "pte", "pmd", "pud", "pgd"};
if (level < 0)
@@ -158,7 +157,7 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t p
}
}
-void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
{
int level;
@@ -169,7 +168,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
pte_dump(stream, vm, indent, vm->mmu.pgd, level);
}
-void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
{
}
@@ -206,8 +205,9 @@ void vm_init_descriptor_tables(struct kvm_vm *vm)
{
void *addr;
- vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
- LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
+ vm->handlers = __vm_alloc(vm, sizeof(struct handlers),
+ LOONGARCH_GUEST_STACK_VADDR_MIN,
+ MEM_REGION_DATA);
addr = addr_gva2hva(vm, vm->handlers);
memset(addr, 0, vm->page_size);
@@ -223,7 +223,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, handler_fn hand
handlers->exception_handlers[vector] = handler;
}
-uint32_t guest_get_vcpuid(void)
+u32 guest_get_vcpuid(void)
{
return csr_read(LOONGARCH_CSR_CPUID);
}
@@ -241,36 +241,36 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
va_start(ap, num);
for (i = 0; i < num; i++)
- regs.gpr[i + 4] = va_arg(ap, uint64_t);
+ regs.gpr[i + 4] = va_arg(ap, u64);
va_end(ap);
vcpu_regs_set(vcpu, &regs);
}
-static void loongarch_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
+static void loongarch_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val)
{
__vcpu_set_reg(vcpu, id, val);
}
-static void loongarch_set_cpucfg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
+static void loongarch_set_cpucfg(struct kvm_vcpu *vcpu, u64 id, u64 val)
{
- uint64_t cfgid;
+ u64 cfgid;
cfgid = KVM_REG_LOONGARCH_CPUCFG | KVM_REG_SIZE_U64 | 8 * id;
__vcpu_set_reg(vcpu, cfgid, val);
}
-static void loongarch_get_csr(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
+static void loongarch_get_csr(struct kvm_vcpu *vcpu, u64 id, void *addr)
{
- uint64_t csrid;
+ u64 csrid;
csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id;
__vcpu_get_reg(vcpu, csrid, addr);
}
-static void loongarch_set_csr(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
+static void loongarch_set_csr(struct kvm_vcpu *vcpu, u64 id, u64 val)
{
- uint64_t csrid;
+ u64 csrid;
csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id;
__vcpu_set_reg(vcpu, csrid, val);
@@ -354,8 +354,8 @@ void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
loongarch_set_csr(vcpu, LOONGARCH_CSR_STLBPGSIZE, PS_DEFAULT_SIZE);
/* LOONGARCH_CSR_KS1 is used for exception stack */
- val = __vm_vaddr_alloc(vm, vm->page_size,
- LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
+ val = __vm_alloc(vm, vm->page_size, LOONGARCH_GUEST_STACK_VADDR_MIN,
+ MEM_REGION_DATA);
TEST_ASSERT(val != 0, "No memory for exception stack");
val = val + vm->page_size;
loongarch_set_csr(vcpu, LOONGARCH_CSR_KS1, val);
@@ -369,23 +369,23 @@ void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
loongarch_set_csr(vcpu, LOONGARCH_CSR_TMID, vcpu->id);
}
-struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
{
size_t stack_size;
- uint64_t stack_vaddr;
+ u64 stack_gva;
struct kvm_regs regs;
struct kvm_vcpu *vcpu;
vcpu = __vm_vcpu_add(vm, vcpu_id);
stack_size = vm->page_size;
- stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
- LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
- TEST_ASSERT(stack_vaddr != 0, "No memory for vm stack");
+ stack_gva = __vm_alloc(vm, stack_size,
+ LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
+ TEST_ASSERT(stack_gva != 0, "No memory for vm stack");
loongarch_vcpu_setup(vcpu);
/* Setup guest general purpose registers */
vcpu_regs_get(vcpu, &regs);
- regs.gpr[3] = stack_vaddr + stack_size;
+ regs.gpr[3] = stack_gva + stack_size;
vcpu_regs_set(vcpu, &regs);
return vcpu;
@@ -397,6 +397,6 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
/* Setup guest PC register */
vcpu_regs_get(vcpu, &regs);
- regs.pc = (uint64_t)guest_code;
+ regs.pc = (u64)guest_code;
vcpu_regs_set(vcpu, &regs);
}
diff --git a/tools/testing/selftests/kvm/lib/loongarch/ucall.c b/tools/testing/selftests/kvm/lib/loongarch/ucall.c
index fc6cbb50573f..cd49a3440ead 100644
--- a/tools/testing/selftests/kvm/lib/loongarch/ucall.c
+++ b/tools/testing/selftests/kvm/lib/loongarch/ucall.c
@@ -9,17 +9,17 @@
* ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each
* VM), it must not be accessed from host code.
*/
-vm_vaddr_t *ucall_exit_mmio_addr;
+gva_t *ucall_exit_mmio_addr;
-void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
+void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa)
{
- vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
+ gva_t mmio_gva = vm_unused_gva_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
virt_map(vm, mmio_gva, mmio_gpa, 1);
vm->ucall_mmio_addr = mmio_gpa;
- write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva);
+ write_guest_global(vm, ucall_exit_mmio_addr, (gva_t *)mmio_gva);
}
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
@@ -28,10 +28,10 @@ void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
if (run->exit_reason == KVM_EXIT_MMIO &&
run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) {
- TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(uint64_t),
+ TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(u64),
"Unexpected ucall exit mmio address access");
- return (void *)(*((uint64_t *)run->mmio.data));
+ return (void *)(*((u64 *)run->mmio.data));
}
return NULL;
diff --git a/tools/testing/selftests/kvm/lib/memstress.c b/tools/testing/selftests/kvm/lib/memstress.c
index 1ea735d66e15..6dcd15910a06 100644
--- a/tools/testing/selftests/kvm/lib/memstress.c
+++ b/tools/testing/selftests/kvm/lib/memstress.c
@@ -16,7 +16,7 @@ struct memstress_args memstress_args;
* Guest virtual memory offset of the testing memory slot.
* Must not conflict with identity mapped test code.
*/
-static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
+static u64 guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
struct vcpu_thread {
/* The index of the vCPU. */
@@ -44,15 +44,15 @@ static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
* Continuously write to the first 8 bytes of each page in the
* specified region.
*/
-void memstress_guest_code(uint32_t vcpu_idx)
+void memstress_guest_code(u32 vcpu_idx)
{
struct memstress_args *args = &memstress_args;
struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx];
struct guest_random_state rand_state;
- uint64_t gva;
- uint64_t pages;
- uint64_t addr;
- uint64_t page;
+ gva_t gva;
+ u64 pages;
+ u64 addr;
+ u64 page;
int i;
rand_state = new_guest_random_state(guest_random_seed + vcpu_idx);
@@ -76,9 +76,9 @@ void memstress_guest_code(uint32_t vcpu_idx)
addr = gva + (page * args->guest_page_size);
if (__guest_random_bool(&rand_state, args->write_percent))
- *(uint64_t *)addr = 0x0123456789ABCDEF;
+ *(u64 *)addr = 0x0123456789ABCDEF;
else
- READ_ONCE(*(uint64_t *)addr);
+ READ_ONCE(*(u64 *)addr);
}
GUEST_SYNC(1);
@@ -87,7 +87,7 @@ void memstress_guest_code(uint32_t vcpu_idx)
void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
struct kvm_vcpu *vcpus[],
- uint64_t vcpu_memory_bytes,
+ u64 vcpu_memory_bytes,
bool partition_vcpu_memory_access)
{
struct memstress_args *args = &memstress_args;
@@ -122,15 +122,15 @@ void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
}
struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
- uint64_t vcpu_memory_bytes, int slots,
+ u64 vcpu_memory_bytes, int slots,
enum vm_mem_backing_src_type backing_src,
bool partition_vcpu_memory_access)
{
struct memstress_args *args = &memstress_args;
struct kvm_vm *vm;
- uint64_t guest_num_pages, slot0_pages = 0;
- uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
- uint64_t region_end_gfn;
+ u64 guest_num_pages, slot0_pages = 0;
+ u64 backing_src_pagesz = get_backing_src_pagesz(backing_src);
+ u64 region_end_gfn;
int i;
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
@@ -202,8 +202,8 @@ struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
/* Add extra memory slots for testing */
for (i = 0; i < slots; i++) {
- uint64_t region_pages = guest_num_pages / slots;
- vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i;
+ u64 region_pages = guest_num_pages / slots;
+ gpa_t region_start = args->gpa + region_pages * args->guest_page_size * i;
vm_userspace_mem_region_add(vm, backing_src, region_start,
MEMSTRESS_MEM_SLOT_INDEX + i,
@@ -232,7 +232,7 @@ void memstress_destroy_vm(struct kvm_vm *vm)
kvm_vm_free(vm);
}
-void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent)
+void memstress_set_write_percent(struct kvm_vm *vm, u32 write_percent)
{
memstress_args.write_percent = write_percent;
sync_global_to_guest(vm, memstress_args.write_percent);
@@ -244,7 +244,7 @@ void memstress_set_random_access(struct kvm_vm *vm, bool random_access)
sync_global_to_guest(vm, memstress_args.random_access);
}
-uint64_t __weak memstress_nested_pages(int nr_vcpus)
+u64 __weak memstress_nested_pages(int nr_vcpus)
{
return 0;
}
@@ -349,7 +349,7 @@ void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int sl
}
void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
- int slots, uint64_t pages_per_slot)
+ int slots, u64 pages_per_slot)
{
int i;
@@ -360,7 +360,7 @@ void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
}
}
-unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot)
+unsigned long **memstress_alloc_bitmaps(int slots, u64 pages_per_slot)
{
unsigned long **bitmaps;
int i;
diff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c
index 067c6b2c15b0..ded5429f3448 100644
--- a/tools/testing/selftests/kvm/lib/riscv/processor.c
+++ b/tools/testing/selftests/kvm/lib/riscv/processor.c
@@ -15,9 +15,9 @@
#define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN 0xac0000
-static vm_vaddr_t exception_handlers;
+static gva_t exception_handlers;
-bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext)
+bool __vcpu_has_ext(struct kvm_vcpu *vcpu, u64 ext)
{
unsigned long value = 0;
int ret;
@@ -27,32 +27,32 @@ bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext)
return !ret && !!value;
}
-static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
+static u64 pte_addr(struct kvm_vm *vm, u64 entry)
{
return ((entry & PGTBL_PTE_ADDR_MASK) >> PGTBL_PTE_ADDR_SHIFT) <<
PGTBL_PAGE_SIZE_SHIFT;
}
-static uint64_t ptrs_per_pte(struct kvm_vm *vm)
+static u64 ptrs_per_pte(struct kvm_vm *vm)
{
- return PGTBL_PAGE_SIZE / sizeof(uint64_t);
+ return PGTBL_PAGE_SIZE / sizeof(u64);
}
-static uint64_t pte_index_mask[] = {
+static u64 pte_index_mask[] = {
PGTBL_L0_INDEX_MASK,
PGTBL_L1_INDEX_MASK,
PGTBL_L2_INDEX_MASK,
PGTBL_L3_INDEX_MASK,
};
-static uint32_t pte_index_shift[] = {
+static u32 pte_index_shift[] = {
PGTBL_L0_INDEX_SHIFT,
PGTBL_L1_INDEX_SHIFT,
PGTBL_L2_INDEX_SHIFT,
PGTBL_L3_INDEX_SHIFT,
};
-static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
+static u64 pte_index(struct kvm_vm *vm, gva_t gva, int level)
{
TEST_ASSERT(level > -1,
"Negative page table level (%d) not possible", level);
@@ -75,26 +75,25 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
vm->mmu.pgd_created = true;
}
-void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
{
- uint64_t *ptep, next_ppn;
+ u64 *ptep, next_ppn;
int level = vm->mmu.pgtable_levels - 1;
- TEST_ASSERT((vaddr % vm->page_size) == 0,
+ TEST_ASSERT((gva % vm->page_size) == 0,
"Virtual address not on page boundary,\n"
- " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
- TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
- (vaddr >> vm->page_shift)),
- "Invalid virtual address, vaddr: 0x%lx", vaddr);
- TEST_ASSERT((paddr % vm->page_size) == 0,
+ " gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size);
+ TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
+ "Invalid virtual address, gva: 0x%lx", gva);
+ TEST_ASSERT((gpa % vm->page_size) == 0,
"Physical address not on page boundary,\n"
- " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
- TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
+ " gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size);
+ TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond maximum supported,\n"
- " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
- paddr, vm->max_gfn, vm->page_size);
+ " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+ gpa, vm->max_gfn, vm->page_size);
- ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, vaddr, level) * 8;
+ ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, gva, level) * 8;
if (!*ptep) {
next_ppn = vm_alloc_page_table(vm) >> PGTBL_PAGE_SIZE_SHIFT;
*ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) |
@@ -104,7 +103,7 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
while (level > -1) {
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) +
- pte_index(vm, vaddr, level) * 8;
+ pte_index(vm, gva, level) * 8;
if (!*ptep && level > 0) {
next_ppn = vm_alloc_page_table(vm) >>
PGTBL_PAGE_SIZE_SHIFT;
@@ -114,14 +113,14 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
level--;
}
- paddr = paddr >> PGTBL_PAGE_SIZE_SHIFT;
- *ptep = (paddr << PGTBL_PTE_ADDR_SHIFT) |
+ gpa = gpa >> PGTBL_PAGE_SIZE_SHIFT;
+ *ptep = (gpa << PGTBL_PTE_ADDR_SHIFT) |
PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK;
}
-vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
{
- uint64_t *ptep;
+ u64 *ptep;
int level = vm->mmu.pgtable_levels - 1;
if (!vm->mmu.pgd_created)
@@ -148,12 +147,12 @@ unmapped_gva:
exit(1);
}
-static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent,
- uint64_t page, int level)
+static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent,
+ u64 page, int level)
{
#ifdef DEBUG
static const char *const type[] = { "pte", "pmd", "pud", "p4d"};
- uint64_t pte, *ptep;
+ u64 pte, *ptep;
if (level < 0)
return;
@@ -170,11 +169,11 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent,
#endif
}
-void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
{
struct kvm_mmu *mmu = &vm->mmu;
int level = mmu->pgtable_levels - 1;
- uint64_t pgd, *ptep;
+ u64 pgd, *ptep;
if (!mmu->pgd_created)
return;
@@ -233,7 +232,7 @@ void riscv_vcpu_mmu_setup(struct kvm_vcpu *vcpu)
vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(satp), satp);
}
-void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
{
struct kvm_riscv_core core;
@@ -311,20 +310,20 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code);
}
-struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
{
int r;
size_t stack_size;
- unsigned long stack_vaddr;
+ unsigned long stack_gva;
unsigned long current_gp = 0;
struct kvm_mp_state mps;
struct kvm_vcpu *vcpu;
stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
vm->page_size;
- stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
- DEFAULT_RISCV_GUEST_STACK_VADDR_MIN,
- MEM_REGION_DATA);
+ stack_gva = __vm_alloc(vm, stack_size,
+ DEFAULT_RISCV_GUEST_STACK_VADDR_MIN,
+ MEM_REGION_DATA);
vcpu = __vm_vcpu_add(vm, vcpu_id);
riscv_vcpu_mmu_setup(vcpu);
@@ -344,7 +343,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.gp), current_gp);
/* Setup stack pointer and program counter of guest */
- vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size);
+ vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_gva + stack_size);
/* Setup sscratch for guest_get_vcpuid() */
vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(sscratch), vcpu_id);
@@ -358,7 +357,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
{
va_list ap;
- uint64_t id = RISCV_CORE_REG(regs.a0);
+ u64 id = RISCV_CORE_REG(regs.a0);
int i;
TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
@@ -393,7 +392,7 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
id = RISCV_CORE_REG(regs.a7);
break;
}
- vcpu_set_reg(vcpu, id, va_arg(ap, uint64_t));
+ vcpu_set_reg(vcpu, id, va_arg(ap, u64));
}
va_end(ap);
@@ -449,10 +448,10 @@ void vcpu_init_vector_tables(struct kvm_vcpu *vcpu)
void vm_init_vector_tables(struct kvm_vm *vm)
{
- vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
- vm->page_size, MEM_REGION_DATA);
+ vm->handlers = __vm_alloc(vm, sizeof(struct handlers), vm->page_size,
+ MEM_REGION_DATA);
- *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
+ *(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers;
}
void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler)
@@ -470,7 +469,7 @@ void vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handle
handlers->exception_handlers[1][0] = handler;
}
-uint32_t guest_get_vcpuid(void)
+u32 guest_get_vcpuid(void)
{
return csr_read(CSR_SSCRATCH);
}
@@ -544,10 +543,10 @@ void kvm_selftest_arch_init(void)
unsigned long riscv64_get_satp_mode(void)
{
int kvm_fd, vm_fd, vcpu_fd, err;
- uint64_t val;
+ u64 val;
struct kvm_one_reg reg = {
.id = RISCV_CONFIG_REG(satp_mode),
- .addr = (uint64_t)&val,
+ .addr = (u64)&val,
};
kvm_fd = open_kvm_dev_path_or_exit();
diff --git a/tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c b/tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c
index 2c432fa164f1..f5480473f192 100644
--- a/tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c
+++ b/tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c
@@ -13,7 +13,7 @@
static void guest_code(void)
{
- uint64_t diag318_info = 0x12345678;
+ u64 diag318_info = 0x12345678;
asm volatile ("diag %0,0,0x318\n" : : "d" (diag318_info));
}
@@ -23,13 +23,13 @@ static void guest_code(void)
* we create an ad-hoc VM here to handle the instruction then extract the
* necessary data. It is up to the caller to decide what to do with that data.
*/
-static uint64_t diag318_handler(void)
+static u64 diag318_handler(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
- uint64_t reg;
- uint64_t diag318_info;
+ u64 reg;
+ u64 diag318_info;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_run(vcpu);
@@ -51,9 +51,9 @@ static uint64_t diag318_handler(void)
return diag318_info;
}
-uint64_t get_diag318_info(void)
+u64 get_diag318_info(void)
{
- static uint64_t diag318_info;
+ static u64 diag318_info;
static bool printed_skip;
/*
diff --git a/tools/testing/selftests/kvm/lib/s390/facility.c b/tools/testing/selftests/kvm/lib/s390/facility.c
index d540812d911a..9a778054f07f 100644
--- a/tools/testing/selftests/kvm/lib/s390/facility.c
+++ b/tools/testing/selftests/kvm/lib/s390/facility.c
@@ -10,5 +10,5 @@
#include "facility.h"
-uint64_t stfl_doublewords[NB_STFL_DOUBLEWORDS];
+u64 stfl_doublewords[NB_STFL_DOUBLEWORDS];
bool stfle_flag;
diff --git a/tools/testing/selftests/kvm/lib/s390/processor.c b/tools/testing/selftests/kvm/lib/s390/processor.c
index 6a9a660413a7..a9adb3782b35 100644
--- a/tools/testing/selftests/kvm/lib/s390/processor.c
+++ b/tools/testing/selftests/kvm/lib/s390/processor.c
@@ -12,7 +12,7 @@
void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
- vm_paddr_t paddr;
+ gpa_t gpa;
TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
vm->page_size);
@@ -20,12 +20,12 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
if (vm->mmu.pgd_created)
return;
- paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
+ gpa = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
KVM_GUEST_PAGE_TABLE_MIN_PADDR,
vm->memslots[MEM_REGION_PT]);
- memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);
+ memset(addr_gpa2hva(vm, gpa), 0xff, PAGES_PER_REGION * vm->page_size);
- vm->mmu.pgd = paddr;
+ vm->mmu.pgd = gpa;
vm->mmu.pgd_created = true;
}
@@ -34,9 +34,9 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
* a page table (ri == 4). Returns a suitable region/segment table entry
* which points to the freshly allocated pages.
*/
-static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri)
+static u64 virt_alloc_region(struct kvm_vm *vm, int ri)
{
- uint64_t taddr;
+ u64 taddr;
taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1,
KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
@@ -47,26 +47,24 @@ static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri)
| ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
}
-void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
{
int ri, idx;
- uint64_t *entry;
+ u64 *entry;
TEST_ASSERT((gva % vm->page_size) == 0,
- "Virtual address not on page boundary,\n"
- " vaddr: 0x%lx vm->page_size: 0x%x",
- gva, vm->page_size);
- TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
- (gva >> vm->page_shift)),
- "Invalid virtual address, vaddr: 0x%lx",
- gva);
+ "Virtual address not on page boundary,\n"
+ " gva: 0x%lx vm->page_size: 0x%x",
+ gva, vm->page_size);
+ TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
+ "Invalid virtual address, gva: 0x%lx", gva);
TEST_ASSERT((gpa % vm->page_size) == 0,
"Physical address not on page boundary,\n"
- " paddr: 0x%lx vm->page_size: 0x%x",
+ " gpa: 0x%lx vm->page_size: 0x%x",
gva, vm->page_size);
TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond beyond maximum supported,\n"
- " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+ " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
gva, vm->max_gfn, vm->page_size);
/* Walk through region and segment tables */
@@ -86,10 +84,10 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
entry[idx] = gpa;
}
-vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
{
int ri, idx;
- uint64_t *entry;
+ u64 *entry;
TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
vm->page_size);
@@ -111,10 +109,10 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
return (entry[idx] & ~0xffful) + (gva & 0xffful);
}
-static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent,
- uint64_t ptea_start)
+static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, u8 indent,
+ u64 ptea_start)
{
- uint64_t *pte, ptea;
+ u64 *pte, ptea;
for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) {
pte = addr_gpa2hva(vm, ptea);
@@ -125,10 +123,10 @@ static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent,
}
}
-static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent,
- uint64_t reg_tab_addr)
+static void virt_dump_region(FILE *stream, struct kvm_vm *vm, u8 indent,
+ u64 reg_tab_addr)
{
- uint64_t addr, *entry;
+ u64 addr, *entry;
for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) {
entry = addr_gpa2hva(vm, addr);
@@ -147,7 +145,7 @@ static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent,
}
}
-void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
{
if (!vm->mmu.pgd_created)
return;
@@ -160,10 +158,10 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
vcpu->run->psw_addr = (uintptr_t)guest_code;
}
-struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
{
size_t stack_size = DEFAULT_STACK_PGS * getpagesize();
- uint64_t stack_vaddr;
+ u64 stack_gva;
struct kvm_regs regs;
struct kvm_sregs sregs;
struct kvm_vcpu *vcpu;
@@ -171,15 +169,14 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
vm->page_size);
- stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
- DEFAULT_GUEST_STACK_VADDR_MIN,
- MEM_REGION_DATA);
+ stack_gva = __vm_alloc(vm, stack_size, DEFAULT_GUEST_STACK_VADDR_MIN,
+ MEM_REGION_DATA);
vcpu = __vm_vcpu_add(vm, vcpu_id);
/* Setup guest registers */
vcpu_regs_get(vcpu, &regs);
- regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160;
+ regs.gprs[15] = stack_gva + (DEFAULT_STACK_PGS * getpagesize()) - 160;
vcpu_regs_set(vcpu, &regs);
vcpu_sregs_get(vcpu, &sregs);
@@ -206,13 +203,13 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
vcpu_regs_get(vcpu, &regs);
for (i = 0; i < num; i++)
- regs.gprs[i + 2] = va_arg(ap, uint64_t);
+ regs.gprs[i + 2] = va_arg(ap, u64);
vcpu_regs_set(vcpu, &regs);
va_end(ap);
}
-void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
{
fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr);
diff --git a/tools/testing/selftests/kvm/lib/sparsebit.c b/tools/testing/selftests/kvm/lib/sparsebit.c
index a99188f87a38..4d845000de15 100644
--- a/tools/testing/selftests/kvm/lib/sparsebit.c
+++ b/tools/testing/selftests/kvm/lib/sparsebit.c
@@ -76,11 +76,11 @@
* the use of a binary-search tree, where each node contains at least
* the following members:
*
- * typedef uint64_t sparsebit_idx_t;
- * typedef uint64_t sparsebit_num_t;
+ * typedef u64 sparsebit_idx_t;
+ * typedef u64 sparsebit_num_t;
*
* sparsebit_idx_t idx;
- * uint32_t mask;
+ * u32 mask;
* sparsebit_num_t num_after;
*
* The idx member contains the bit index of the first bit described by this
@@ -162,7 +162,7 @@
#define DUMP_LINE_MAX 100 /* Does not include indent amount */
-typedef uint32_t mask_t;
+typedef u32 mask_t;
#define MASK_BITS (sizeof(mask_t) * CHAR_BIT)
struct node {
@@ -2056,9 +2056,9 @@ unsigned char get8(void)
return ch;
}
-uint64_t get64(void)
+u64 get64(void)
{
- uint64_t x;
+ u64 x;
x = get8();
x = (x << 8) | get8();
@@ -2074,9 +2074,9 @@ int main(void)
{
s = sparsebit_alloc();
for (;;) {
- uint8_t op = get8() & 0xf;
- uint64_t first = get64();
- uint64_t last = get64();
+ u8 op = get8() & 0xf;
+ u64 first = get64();
+ u64 last = get64();
operate(op, first, last);
}
diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c
index 8a1848586a85..bab1bd2b775b 100644
--- a/tools/testing/selftests/kvm/lib/test_util.c
+++ b/tools/testing/selftests/kvm/lib/test_util.c
@@ -30,15 +30,15 @@ void __attribute__((used)) expect_sigbus_handler(int signum)
* Park-Miller LCG using standard constants.
*/
-struct guest_random_state new_guest_random_state(uint32_t seed)
+struct guest_random_state new_guest_random_state(u32 seed)
{
struct guest_random_state s = {.seed = seed};
return s;
}
-uint32_t guest_random_u32(struct guest_random_state *state)
+u32 guest_random_u32(struct guest_random_state *state)
{
- state->seed = (uint64_t)state->seed * 48271 % ((uint32_t)(1 << 31) - 1);
+ state->seed = (u64)state->seed * 48271 % ((u32)(1 << 31) - 1);
return state->seed;
}
@@ -83,12 +83,12 @@ size_t parse_size(const char *size)
return base << shift;
}
-int64_t timespec_to_ns(struct timespec ts)
+s64 timespec_to_ns(struct timespec ts)
{
- return (int64_t)ts.tv_nsec + 1000000000LL * (int64_t)ts.tv_sec;
+ return (s64)ts.tv_nsec + 1000000000LL * (s64)ts.tv_sec;
}
-struct timespec timespec_add_ns(struct timespec ts, int64_t ns)
+struct timespec timespec_add_ns(struct timespec ts, s64 ns)
{
struct timespec res;
@@ -101,15 +101,15 @@ struct timespec timespec_add_ns(struct timespec ts, int64_t ns)
struct timespec timespec_add(struct timespec ts1, struct timespec ts2)
{
- int64_t ns1 = timespec_to_ns(ts1);
- int64_t ns2 = timespec_to_ns(ts2);
+ s64 ns1 = timespec_to_ns(ts1);
+ s64 ns2 = timespec_to_ns(ts2);
return timespec_add_ns((struct timespec){0}, ns1 + ns2);
}
struct timespec timespec_sub(struct timespec ts1, struct timespec ts2)
{
- int64_t ns1 = timespec_to_ns(ts1);
- int64_t ns2 = timespec_to_ns(ts2);
+ s64 ns1 = timespec_to_ns(ts1);
+ s64 ns2 = timespec_to_ns(ts2);
return timespec_add_ns((struct timespec){0}, ns1 - ns2);
}
@@ -123,7 +123,7 @@ struct timespec timespec_elapsed(struct timespec start)
struct timespec timespec_div(struct timespec ts, int divisor)
{
- int64_t ns = timespec_to_ns(ts) / divisor;
+ s64 ns = timespec_to_ns(ts) / divisor;
return timespec_add_ns((struct timespec){0}, ns);
}
@@ -225,7 +225,7 @@ size_t get_def_hugetlb_pagesz(void)
#define ANON_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS)
#define ANON_HUGE_FLAGS (ANON_FLAGS | MAP_HUGETLB)
-const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i)
+const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(u32 i)
{
static const struct vm_mem_backing_src_alias aliases[] = {
[VM_MEM_SRC_ANONYMOUS] = {
@@ -317,9 +317,9 @@ const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i)
#define MAP_HUGE_PAGE_SIZE(x) (1ULL << ((x >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK))
-size_t get_backing_src_pagesz(uint32_t i)
+size_t get_backing_src_pagesz(u32 i)
{
- uint32_t flag = vm_mem_backing_src_alias(i)->flag;
+ u32 flag = vm_mem_backing_src_alias(i)->flag;
switch (i) {
case VM_MEM_SRC_ANONYMOUS:
@@ -335,7 +335,7 @@ size_t get_backing_src_pagesz(uint32_t i)
}
}
-bool is_backing_src_hugetlb(uint32_t i)
+bool is_backing_src_hugetlb(u32 i)
{
return !!(vm_mem_backing_src_alias(i)->flag & MAP_HUGETLB);
}
diff --git a/tools/testing/selftests/kvm/lib/ucall_common.c b/tools/testing/selftests/kvm/lib/ucall_common.c
index 42151e571953..029ce21f9f2f 100644
--- a/tools/testing/selftests/kvm/lib/ucall_common.c
+++ b/tools/testing/selftests/kvm/lib/ucall_common.c
@@ -14,7 +14,7 @@ struct ucall_header {
struct ucall ucalls[KVM_MAX_VCPUS];
};
-int ucall_nr_pages_required(uint64_t page_size)
+int ucall_nr_pages_required(u64 page_size)
{
return align_up(sizeof(struct ucall_header), page_size) / page_size;
}
@@ -25,16 +25,16 @@ int ucall_nr_pages_required(uint64_t page_size)
*/
static struct ucall_header *ucall_pool;
-void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
+void ucall_init(struct kvm_vm *vm, gpa_t mmio_gpa)
{
struct ucall_header *hdr;
struct ucall *uc;
- vm_vaddr_t vaddr;
+ gva_t gva;
int i;
- vaddr = vm_vaddr_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR,
- MEM_REGION_DATA);
- hdr = (struct ucall_header *)addr_gva2hva(vm, vaddr);
+ gva = vm_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR,
+ MEM_REGION_DATA);
+ hdr = (struct ucall_header *)addr_gva2hva(vm, gva);
memset(hdr, 0, sizeof(*hdr));
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
@@ -42,7 +42,7 @@ void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
uc->hva = uc;
}
- write_guest_global(vm, ucall_pool, (struct ucall_header *)vaddr);
+ write_guest_global(vm, ucall_pool, (struct ucall_header *)gva);
ucall_arch_init(vm, mmio_gpa);
}
@@ -79,7 +79,7 @@ static void ucall_free(struct ucall *uc)
clear_bit(uc - ucall_pool->ucalls, ucall_pool->in_use);
}
-void ucall_assert(uint64_t cmd, const char *exp, const char *file,
+void ucall_assert(u64 cmd, const char *exp, const char *file,
unsigned int line, const char *fmt, ...)
{
struct ucall *uc;
@@ -88,20 +88,20 @@ void ucall_assert(uint64_t cmd, const char *exp, const char *file,
uc = ucall_alloc();
uc->cmd = cmd;
- WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (uint64_t)(exp));
- WRITE_ONCE(uc->args[GUEST_FILE], (uint64_t)(file));
+ WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (u64)(exp));
+ WRITE_ONCE(uc->args[GUEST_FILE], (u64)(file));
WRITE_ONCE(uc->args[GUEST_LINE], line);
va_start(va, fmt);
guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va);
va_end(va);
- ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
+ ucall_arch_do_ucall((gva_t)uc->hva);
ucall_free(uc);
}
-void ucall_fmt(uint64_t cmd, const char *fmt, ...)
+void ucall_fmt(u64 cmd, const char *fmt, ...)
{
struct ucall *uc;
va_list va;
@@ -113,12 +113,12 @@ void ucall_fmt(uint64_t cmd, const char *fmt, ...)
guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va);
va_end(va);
- ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
+ ucall_arch_do_ucall((gva_t)uc->hva);
ucall_free(uc);
}
-void ucall(uint64_t cmd, int nargs, ...)
+void ucall(u64 cmd, int nargs, ...)
{
struct ucall *uc;
va_list va;
@@ -132,15 +132,15 @@ void ucall(uint64_t cmd, int nargs, ...)
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
- WRITE_ONCE(uc->args[i], va_arg(va, uint64_t));
+ WRITE_ONCE(uc->args[i], va_arg(va, u64));
va_end(va);
- ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
+ ucall_arch_do_ucall((gva_t)uc->hva);
ucall_free(uc);
}
-uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
+u64 get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{
struct ucall ucall;
void *addr;
diff --git a/tools/testing/selftests/kvm/lib/userfaultfd_util.c b/tools/testing/selftests/kvm/lib/userfaultfd_util.c
index 5bde176cedd5..ef8d76f71f83 100644
--- a/tools/testing/selftests/kvm/lib/userfaultfd_util.c
+++ b/tools/testing/selftests/kvm/lib/userfaultfd_util.c
@@ -27,7 +27,7 @@ static void *uffd_handler_thread_fn(void *arg)
{
struct uffd_reader_args *reader_args = (struct uffd_reader_args *)arg;
int uffd = reader_args->uffd;
- int64_t pages = 0;
+ s64 pages = 0;
struct timespec start;
struct timespec ts_diff;
struct epoll_event evt;
@@ -100,8 +100,8 @@ static void *uffd_handler_thread_fn(void *arg)
}
struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
- void *hva, uint64_t len,
- uint64_t num_readers,
+ void *hva, u64 len,
+ u64 num_readers,
uffd_handler_t handler)
{
struct uffd_desc *uffd_desc;
@@ -109,7 +109,7 @@ struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
int uffd;
struct uffdio_api uffdio_api;
struct uffdio_register uffdio_register;
- uint64_t expected_ioctls = ((uint64_t) 1) << _UFFDIO_COPY;
+ u64 expected_ioctls = ((u64)1) << _UFFDIO_COPY;
int ret, i;
PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n",
@@ -132,7 +132,7 @@ struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
/* In order to get minor faults, prefault via the alias. */
if (is_minor)
- expected_ioctls = ((uint64_t) 1) << _UFFDIO_CONTINUE;
+ expected_ioctls = ((u64)1) << _UFFDIO_CONTINUE;
uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
TEST_ASSERT(uffd >= 0, "uffd creation failed, errno: %d", errno);
@@ -141,9 +141,9 @@ struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
uffdio_api.features = 0;
TEST_ASSERT(ioctl(uffd, UFFDIO_API, &uffdio_api) != -1,
"ioctl UFFDIO_API failed: %" PRIu64,
- (uint64_t)uffdio_api.api);
+ (u64)uffdio_api.api);
- uffdio_register.range.start = (uint64_t)hva;
+ uffdio_register.range.start = (u64)hva;
uffdio_register.range.len = len;
uffdio_register.mode = uffd_mode;
TEST_ASSERT(ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) != -1,
diff --git a/tools/testing/selftests/kvm/lib/x86/apic.c b/tools/testing/selftests/kvm/lib/x86/apic.c
index 89153a333e83..5182fd0d6a76 100644
--- a/tools/testing/selftests/kvm/lib/x86/apic.c
+++ b/tools/testing/selftests/kvm/lib/x86/apic.c
@@ -14,7 +14,7 @@ void apic_disable(void)
void xapic_enable(void)
{
- uint64_t val = rdmsr(MSR_IA32_APICBASE);
+ u64 val = rdmsr(MSR_IA32_APICBASE);
/* Per SDM: to enable xAPIC when in x2APIC must first disable APIC */
if (val & MSR_IA32_APICBASE_EXTD) {
diff --git a/tools/testing/selftests/kvm/lib/x86/hyperv.c b/tools/testing/selftests/kvm/lib/x86/hyperv.c
index 15bc8cd583aa..d200c5c26e2e 100644
--- a/tools/testing/selftests/kvm/lib/x86/hyperv.c
+++ b/tools/testing/selftests/kvm/lib/x86/hyperv.c
@@ -76,23 +76,23 @@ bool kvm_hv_cpu_has(struct kvm_x86_cpu_feature feature)
}
struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm,
- vm_vaddr_t *p_hv_pages_gva)
+ gva_t *p_hv_pages_gva)
{
- vm_vaddr_t hv_pages_gva = vm_vaddr_alloc_page(vm);
+ gva_t hv_pages_gva = vm_alloc_page(vm);
struct hyperv_test_pages *hv = addr_gva2hva(vm, hv_pages_gva);
/* Setup of a region of guest memory for the VP Assist page. */
- hv->vp_assist = (void *)vm_vaddr_alloc_page(vm);
+ hv->vp_assist = (void *)vm_alloc_page(vm);
hv->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->vp_assist);
hv->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->vp_assist);
/* Setup of a region of guest memory for the partition assist page. */
- hv->partition_assist = (void *)vm_vaddr_alloc_page(vm);
+ hv->partition_assist = (void *)vm_alloc_page(vm);
hv->partition_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->partition_assist);
hv->partition_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->partition_assist);
/* Setup of a region of guest memory for the enlightened VMCS. */
- hv->enlightened_vmcs = (void *)vm_vaddr_alloc_page(vm);
+ hv->enlightened_vmcs = (void *)vm_alloc_page(vm);
hv->enlightened_vmcs_hva = addr_gva2hva(vm, (uintptr_t)hv->enlightened_vmcs);
hv->enlightened_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)hv->enlightened_vmcs);
@@ -100,9 +100,9 @@ struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm,
return hv;
}
-int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
+int enable_vp_assist(u64 vp_assist_pa, void *vp_assist)
{
- uint64_t val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
+ u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
wrmsr(HV_X64_MSR_VP_ASSIST_PAGE, val);
diff --git a/tools/testing/selftests/kvm/lib/x86/memstress.c b/tools/testing/selftests/kvm/lib/x86/memstress.c
index f53414ba7103..61cf952cd2dc 100644
--- a/tools/testing/selftests/kvm/lib/x86/memstress.c
+++ b/tools/testing/selftests/kvm/lib/x86/memstress.c
@@ -16,7 +16,7 @@
#include "svm_util.h"
#include "vmx.h"
-void memstress_l2_guest_code(uint64_t vcpu_id)
+void memstress_l2_guest_code(u64 vcpu_id)
{
memstress_guest_code(vcpu_id);
vmcall();
@@ -32,7 +32,7 @@ __asm__(
#define L2_GUEST_STACK_SIZE 64
-static void l1_vmx_code(struct vmx_pages *vmx, uint64_t vcpu_id)
+static void l1_vmx_code(struct vmx_pages *vmx, u64 vcpu_id)
{
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
unsigned long *rsp;
@@ -51,7 +51,7 @@ static void l1_vmx_code(struct vmx_pages *vmx, uint64_t vcpu_id)
GUEST_DONE();
}
-static void l1_svm_code(struct svm_test_data *svm, uint64_t vcpu_id)
+static void l1_svm_code(struct svm_test_data *svm, u64 vcpu_id)
{
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
unsigned long *rsp;
@@ -67,7 +67,7 @@ static void l1_svm_code(struct svm_test_data *svm, uint64_t vcpu_id)
}
-static void memstress_l1_guest_code(void *data, uint64_t vcpu_id)
+static void memstress_l1_guest_code(void *data, u64 vcpu_id)
{
if (this_cpu_has(X86_FEATURE_VMX))
l1_vmx_code(data, vcpu_id);
@@ -75,7 +75,7 @@ static void memstress_l1_guest_code(void *data, uint64_t vcpu_id)
l1_svm_code(data, vcpu_id);
}
-uint64_t memstress_nested_pages(int nr_vcpus)
+u64 memstress_nested_pages(int nr_vcpus)
{
/*
* 513 page tables is enough to identity-map 256 TiB of L2 with 1G
@@ -87,7 +87,7 @@ uint64_t memstress_nested_pages(int nr_vcpus)
static void memstress_setup_ept_mappings(struct kvm_vm *vm)
{
- uint64_t start, end;
+ u64 start, end;
/*
* Identity map the first 4G and the test region with 1G pages so that
@@ -104,7 +104,7 @@ static void memstress_setup_ept_mappings(struct kvm_vm *vm)
void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
{
struct kvm_regs regs;
- vm_vaddr_t nested_gva;
+ gva_t nested_gva;
int vcpu_id;
TEST_REQUIRE(kvm_cpu_has_tdp());
diff --git a/tools/testing/selftests/kvm/lib/x86/pmu.c b/tools/testing/selftests/kvm/lib/x86/pmu.c
index 34cb57d1d671..0851b74b4e46 100644
--- a/tools/testing/selftests/kvm/lib/x86/pmu.c
+++ b/tools/testing/selftests/kvm/lib/x86/pmu.c
@@ -11,7 +11,7 @@
#include "processor.h"
#include "pmu.h"
-const uint64_t intel_pmu_arch_events[] = {
+const u64 intel_pmu_arch_events[] = {
INTEL_ARCH_CPU_CYCLES,
INTEL_ARCH_INSTRUCTIONS_RETIRED,
INTEL_ARCH_REFERENCE_CYCLES,
@@ -28,7 +28,7 @@ const uint64_t intel_pmu_arch_events[] = {
};
kvm_static_assert(ARRAY_SIZE(intel_pmu_arch_events) == NR_INTEL_ARCH_EVENTS);
-const uint64_t amd_pmu_zen_events[] = {
+const u64 amd_pmu_zen_events[] = {
AMD_ZEN_CORE_CYCLES,
AMD_ZEN_INSTRUCTIONS_RETIRED,
AMD_ZEN_BRANCHES_RETIRED,
@@ -50,7 +50,7 @@ kvm_static_assert(ARRAY_SIZE(amd_pmu_zen_events) == NR_AMD_ZEN_EVENTS);
* be overcounted on these certain instructions, but for Clearwater Forest
* only "Instruction Retired" event is overcounted on these instructions.
*/
-static uint64_t get_pmu_errata(void)
+static u64 get_pmu_errata(void)
{
if (!this_cpu_is_intel())
return 0;
@@ -72,7 +72,7 @@ static uint64_t get_pmu_errata(void)
}
}
-uint64_t pmu_errata_mask;
+u64 pmu_errata_mask;
void kvm_init_pmu_errata(void)
{
diff --git a/tools/testing/selftests/kvm/lib/x86/processor.c b/tools/testing/selftests/kvm/lib/x86/processor.c
index 01f0f97d4430..b51467d70f6e 100644
--- a/tools/testing/selftests/kvm/lib/x86/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86/processor.c
@@ -21,13 +21,13 @@
#define KERNEL_DS 0x10
#define KERNEL_TSS 0x18
-vm_vaddr_t exception_handlers;
+gva_t exception_handlers;
bool host_cpu_is_amd;
bool host_cpu_is_intel;
bool host_cpu_is_hygon;
bool host_cpu_is_amd_compatible;
bool is_forced_emulation_enabled;
-uint64_t guest_tsc_khz;
+u64 guest_tsc_khz;
const char *ex_str(int vector)
{
@@ -62,7 +62,7 @@ const char *ex_str(int vector)
}
}
-static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
+static void regs_dump(FILE *stream, struct kvm_regs *regs, u8 indent)
{
fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
"rcx: 0x%.16llx rdx: 0x%.16llx\n",
@@ -86,7 +86,7 @@ static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
}
static void segment_dump(FILE *stream, struct kvm_segment *segment,
- uint8_t indent)
+ u8 indent)
{
fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x "
"selector: 0x%.4x type: 0x%.2x\n",
@@ -103,7 +103,7 @@ static void segment_dump(FILE *stream, struct kvm_segment *segment,
}
static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
- uint8_t indent)
+ u8 indent)
{
fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x "
"padding: 0x%.4x 0x%.4x 0x%.4x\n",
@@ -111,7 +111,7 @@ static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
dtable->padding[0], dtable->padding[1], dtable->padding[2]);
}
-static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent)
+static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, u8 indent)
{
unsigned int i;
@@ -207,37 +207,37 @@ void tdp_mmu_init(struct kvm_vm *vm, int pgtable_levels,
}
static void *virt_get_pte(struct kvm_vm *vm, struct kvm_mmu *mmu,
- uint64_t *parent_pte, uint64_t vaddr, int level)
+ u64 *parent_pte, gva_t gva, int level)
{
- uint64_t pt_gpa = PTE_GET_PA(*parent_pte);
- uint64_t *page_table = addr_gpa2hva(vm, pt_gpa);
- int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
+ u64 pt_gpa = PTE_GET_PA(*parent_pte);
+ u64 *page_table = addr_gpa2hva(vm, pt_gpa);
+ int index = (gva >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
TEST_ASSERT((*parent_pte == mmu->pgd) || is_present_pte(mmu, parent_pte),
"Parent PTE (level %d) not PRESENT for gva: 0x%08lx",
- level + 1, vaddr);
+ level + 1, gva);
return &page_table[index];
}
-static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
- struct kvm_mmu *mmu,
- uint64_t *parent_pte,
- uint64_t vaddr,
- uint64_t paddr,
- int current_level,
- int target_level)
+static u64 *virt_create_upper_pte(struct kvm_vm *vm,
+ struct kvm_mmu *mmu,
+ u64 *parent_pte,
+ gva_t gva,
+ gpa_t gpa,
+ int current_level,
+ int target_level)
{
- uint64_t *pte = virt_get_pte(vm, mmu, parent_pte, vaddr, current_level);
+ u64 *pte = virt_get_pte(vm, mmu, parent_pte, gva, current_level);
- paddr = vm_untag_gpa(vm, paddr);
+ gpa = vm_untag_gpa(vm, gpa);
if (!is_present_pte(mmu, pte)) {
*pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |
PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |
PTE_ALWAYS_SET_MASK(mmu);
if (current_level == target_level)
- *pte |= PTE_HUGE_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
+ *pte |= PTE_HUGE_MASK(mmu) | (gpa & PHYSICAL_PAGE_MASK);
else
*pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
} else {
@@ -247,39 +247,39 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
* this level.
*/
TEST_ASSERT(current_level != target_level,
- "Cannot create hugepage at level: %u, vaddr: 0x%lx",
- current_level, vaddr);
+ "Cannot create hugepage at level: %u, gva: 0x%lx",
+ current_level, gva);
TEST_ASSERT(!is_huge_pte(mmu, pte),
- "Cannot create page table at level: %u, vaddr: 0x%lx",
- current_level, vaddr);
+ "Cannot create page table at level: %u, gva: 0x%lx",
+ current_level, gva);
}
return pte;
}
-void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
- uint64_t paddr, int level)
+void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva,
+ gpa_t gpa, int level)
{
- const uint64_t pg_size = PG_LEVEL_SIZE(level);
- uint64_t *pte = &mmu->pgd;
+ const u64 pg_size = PG_LEVEL_SIZE(level);
+ u64 *pte = &mmu->pgd;
int current_level;
TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
"Unknown or unsupported guest mode: 0x%x", vm->mode);
- TEST_ASSERT((vaddr % pg_size) == 0,
+ TEST_ASSERT((gva % pg_size) == 0,
"Virtual address not aligned,\n"
- "vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size);
- TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)),
- "Invalid virtual address, vaddr: 0x%lx", vaddr);
- TEST_ASSERT((paddr % pg_size) == 0,
+ "gva: 0x%lx page size: 0x%lx", gva, pg_size);
+ TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
+ "Invalid virtual address, gva: 0x%lx", gva);
+ TEST_ASSERT((gpa % pg_size) == 0,
"Physical address not aligned,\n"
- " paddr: 0x%lx page size: 0x%lx", paddr, pg_size);
- TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
+ " gpa: 0x%lx page size: 0x%lx", gpa, pg_size);
+ TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond maximum supported,\n"
- " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
- paddr, vm->max_gfn, vm->page_size);
- TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr,
- "Unexpected bits in paddr: %lx", paddr);
+ " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+ gpa, vm->max_gfn, vm->page_size);
+ TEST_ASSERT(vm_untag_gpa(vm, gpa) == gpa,
+ "Unexpected bits in gpa: %lx", gpa);
TEST_ASSERT(!PTE_EXECUTABLE_MASK(mmu) || !PTE_NX_MASK(mmu),
"X and NX bit masks cannot be used simultaneously");
@@ -291,40 +291,40 @@ void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
for (current_level = mmu->pgtable_levels;
current_level > PG_LEVEL_4K;
current_level--) {
- pte = virt_create_upper_pte(vm, mmu, pte, vaddr, paddr,
+ pte = virt_create_upper_pte(vm, mmu, pte, gva, gpa,
current_level, level);
if (is_huge_pte(mmu, pte))
return;
}
/* Fill in page table entry. */
- pte = virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K);
+ pte = virt_get_pte(vm, mmu, pte, gva, PG_LEVEL_4K);
TEST_ASSERT(!is_present_pte(mmu, pte),
- "PTE already present for 4k page at vaddr: 0x%lx", vaddr);
+ "PTE already present for 4k page at gva: 0x%lx", gva);
*pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |
PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |
- PTE_ALWAYS_SET_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
+ PTE_ALWAYS_SET_MASK(mmu) | (gpa & PHYSICAL_PAGE_MASK);
/*
* Neither SEV nor TDX supports shared page tables, so only the final
* leaf PTE needs manually set the C/S-bit.
*/
- if (vm_is_gpa_protected(vm, paddr))
+ if (vm_is_gpa_protected(vm, gpa))
*pte |= PTE_C_BIT_MASK(mmu);
else
*pte |= PTE_S_BIT_MASK(mmu);
}
-void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
{
- __virt_pg_map(vm, &vm->mmu, vaddr, paddr, PG_LEVEL_4K);
+ __virt_pg_map(vm, &vm->mmu, gva, gpa, PG_LEVEL_4K);
}
-void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- uint64_t nr_bytes, int level)
+void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa,
+ u64 nr_bytes, int level)
{
- uint64_t pg_size = PG_LEVEL_SIZE(level);
- uint64_t nr_pages = nr_bytes / pg_size;
+ u64 pg_size = PG_LEVEL_SIZE(level);
+ u64 nr_pages = nr_bytes / pg_size;
int i;
TEST_ASSERT(nr_bytes % pg_size == 0,
@@ -332,16 +332,16 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
nr_bytes, pg_size);
for (i = 0; i < nr_pages; i++) {
- __virt_pg_map(vm, &vm->mmu, vaddr, paddr, level);
- sparsebit_set_num(vm->vpages_mapped, vaddr >> vm->page_shift,
+ __virt_pg_map(vm, &vm->mmu, gva, gpa, level);
+ sparsebit_set_num(vm->vpages_mapped, gva >> vm->page_shift,
nr_bytes / PAGE_SIZE);
- vaddr += pg_size;
- paddr += pg_size;
+ gva += pg_size;
+ gpa += pg_size;
}
}
-static bool vm_is_target_pte(struct kvm_mmu *mmu, uint64_t *pte,
+static bool vm_is_target_pte(struct kvm_mmu *mmu, u64 *pte,
int *level, int current_level)
{
if (is_huge_pte(mmu, pte)) {
@@ -354,13 +354,13 @@ static bool vm_is_target_pte(struct kvm_mmu *mmu, uint64_t *pte,
return *level == current_level;
}
-static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm,
- struct kvm_mmu *mmu,
- uint64_t vaddr,
- int *level)
+static u64 *__vm_get_page_table_entry(struct kvm_vm *vm,
+ struct kvm_mmu *mmu,
+ gva_t gva,
+ int *level)
{
int va_width = 12 + (mmu->pgtable_levels) * 9;
- uint64_t *pte = &mmu->pgd;
+ u64 *pte = &mmu->pgd;
int current_level;
TEST_ASSERT(!vm->arch.is_pt_protected,
@@ -371,49 +371,46 @@ static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm,
TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
"Unknown or unsupported guest mode: 0x%x", vm->mode);
- TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
- (vaddr >> vm->page_shift)),
- "Invalid virtual address, vaddr: 0x%lx",
- vaddr);
+ TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
+ "Invalid virtual address, gva: 0x%lx", gva);
/*
- * Check that the vaddr is a sign-extended va_width value.
+ * Check that the gva is a sign-extended va_width value.
*/
- TEST_ASSERT(vaddr ==
- (((int64_t)vaddr << (64 - va_width) >> (64 - va_width))),
+ TEST_ASSERT(gva == (((s64)gva << (64 - va_width) >> (64 - va_width))),
"Canonical check failed. The virtual address is invalid.");
for (current_level = mmu->pgtable_levels;
current_level > PG_LEVEL_4K;
current_level--) {
- pte = virt_get_pte(vm, mmu, pte, vaddr, current_level);
+ pte = virt_get_pte(vm, mmu, pte, gva, current_level);
if (vm_is_target_pte(mmu, pte, level, current_level))
return pte;
}
- return virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K);
+ return virt_get_pte(vm, mmu, pte, gva, PG_LEVEL_4K);
}
-uint64_t *tdp_get_pte(struct kvm_vm *vm, uint64_t l2_gpa)
+u64 *tdp_get_pte(struct kvm_vm *vm, u64 l2_gpa)
{
int level = PG_LEVEL_4K;
return __vm_get_page_table_entry(vm, &vm->stage2_mmu, l2_gpa, &level);
}
-uint64_t *vm_get_pte(struct kvm_vm *vm, uint64_t vaddr)
+u64 *vm_get_pte(struct kvm_vm *vm, gva_t gva)
{
int level = PG_LEVEL_4K;
- return __vm_get_page_table_entry(vm, &vm->mmu, vaddr, &level);
+ return __vm_get_page_table_entry(vm, &vm->mmu, gva, &level);
}
-void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
{
struct kvm_mmu *mmu = &vm->mmu;
- uint64_t *pml4e, *pml4e_start;
- uint64_t *pdpe, *pdpe_start;
- uint64_t *pde, *pde_start;
- uint64_t *pte, *pte_start;
+ u64 *pml4e, *pml4e_start;
+ u64 *pdpe, *pdpe_start;
+ u64 *pde, *pde_start;
+ u64 *pte, *pte_start;
if (!mmu->pgd_created)
return;
@@ -423,8 +420,8 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
fprintf(stream, "%*s index hvaddr gpaddr "
"addr w exec dirty\n",
indent, "");
- pml4e_start = (uint64_t *) addr_gpa2hva(vm, mmu->pgd);
- for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
+ pml4e_start = (u64 *)addr_gpa2hva(vm, mmu->pgd);
+ for (u16 n1 = 0; n1 <= 0x1ffu; n1++) {
pml4e = &pml4e_start[n1];
if (!is_present_pte(mmu, pml4e))
continue;
@@ -436,7 +433,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
is_writable_pte(mmu, pml4e), is_nx_pte(mmu, pml4e));
pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK);
- for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
+ for (u16 n2 = 0; n2 <= 0x1ffu; n2++) {
pdpe = &pdpe_start[n2];
if (!is_present_pte(mmu, pdpe))
continue;
@@ -449,7 +446,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
is_nx_pte(mmu, pdpe));
pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK);
- for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
+ for (u16 n3 = 0; n3 <= 0x1ffu; n3++) {
pde = &pde_start[n3];
if (!is_present_pte(mmu, pde))
continue;
@@ -461,7 +458,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
is_nx_pte(mmu, pde));
pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK);
- for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
+ for (u16 n4 = 0; n4 <= 0x1ffu; n4++) {
pte = &pte_start[n4];
if (!is_present_pte(mmu, pte))
continue;
@@ -475,10 +472,10 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
is_writable_pte(mmu, pte),
is_nx_pte(mmu, pte),
is_dirty_pte(mmu, pte),
- ((uint64_t) n1 << 27)
- | ((uint64_t) n2 << 18)
- | ((uint64_t) n3 << 9)
- | ((uint64_t) n4));
+ ((u64)n1 << 27)
+ | ((u64)n2 << 18)
+ | ((u64)n3 << 9)
+ | ((u64)n4));
}
}
}
@@ -498,26 +495,24 @@ bool kvm_cpu_has_tdp(void)
return kvm_cpu_has_ept() || kvm_cpu_has_npt();
}
-void __tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
- uint64_t size, int level)
+void __tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size, int level)
{
size_t page_size = PG_LEVEL_SIZE(level);
size_t npages = size / page_size;
- TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow");
- TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
+ TEST_ASSERT(l2_gpa + size > l2_gpa, "L2 GPA overflow");
+ TEST_ASSERT(gpa + size > gpa, "GPA overflow");
while (npages--) {
- __virt_pg_map(vm, &vm->stage2_mmu, nested_paddr, paddr, level);
- nested_paddr += page_size;
- paddr += page_size;
+ __virt_pg_map(vm, &vm->stage2_mmu, l2_gpa, gpa, level);
+ l2_gpa += page_size;
+ gpa += page_size;
}
}
-void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
- uint64_t size)
+void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size)
{
- __tdp_map(vm, nested_paddr, paddr, size, PG_LEVEL_4K);
+ __tdp_map(vm, l2_gpa, gpa, size, PG_LEVEL_4K);
}
/* Prepare an identity extended page table that maps all the
@@ -525,7 +520,7 @@ void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
*/
void tdp_identity_map_default_memslots(struct kvm_vm *vm)
{
- uint32_t s, memslot = 0;
+ u32 s, memslot = 0;
sparsebit_idx_t i, last;
struct userspace_mem_region *region = memslot2region(vm, memslot);
@@ -540,13 +535,13 @@ void tdp_identity_map_default_memslots(struct kvm_vm *vm)
if (i > last)
break;
- tdp_map(vm, (uint64_t)i << vm->page_shift,
- (uint64_t)i << vm->page_shift, 1 << vm->page_shift);
+ tdp_map(vm, (u64)i << vm->page_shift,
+ (u64)i << vm->page_shift, 1 << vm->page_shift);
}
}
/* Identity map a region with 1GiB Pages. */
-void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size)
+void tdp_identity_map_1g(struct kvm_vm *vm, u64 addr, u64 size)
{
__tdp_map(vm, addr, addr, size, PG_LEVEL_1G);
}
@@ -618,10 +613,10 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_segment *segp)
segp->present = true;
}
-vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
{
int level = PG_LEVEL_NONE;
- uint64_t *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level);
+ u64 *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level);
TEST_ASSERT(is_present_pte(&vm->mmu, pte),
"Leaf PTE not PRESENT for gva: 0x%08lx", gva);
@@ -633,7 +628,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level));
}
-static void kvm_seg_set_tss_64bit(vm_vaddr_t base, struct kvm_segment *segp)
+static void kvm_seg_set_tss_64bit(gva_t base, struct kvm_segment *segp)
{
memset(segp, 0, sizeof(*segp));
segp->base = base;
@@ -746,16 +741,16 @@ static void vm_init_descriptor_tables(struct kvm_vm *vm)
struct kvm_segment seg;
int i;
- vm->arch.gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
- vm->arch.idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
- vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
- vm->arch.tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
+ vm->arch.gdt = __vm_alloc_page(vm, MEM_REGION_DATA);
+ vm->arch.idt = __vm_alloc_page(vm, MEM_REGION_DATA);
+ vm->handlers = __vm_alloc_page(vm, MEM_REGION_DATA);
+ vm->arch.tss = __vm_alloc_page(vm, MEM_REGION_DATA);
/* Handlers have the same address in both address spaces.*/
for (i = 0; i < NUM_INTERRUPTS; i++)
set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, KERNEL_CS);
- *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
+ *(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers;
kvm_seg_set_kernel_code_64bit(&seg);
kvm_seg_fill_gdt_64bit(vm, &seg);
@@ -770,9 +765,9 @@ static void vm_init_descriptor_tables(struct kvm_vm *vm)
void vm_install_exception_handler(struct kvm_vm *vm, int vector,
void (*handler)(struct ex_regs *))
{
- vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers);
+ gva_t *handlers = (gva_t *)addr_gva2hva(vm, vm->handlers);
- handlers[vector] = (vm_vaddr_t)handler;
+ handlers[vector] = (gva_t)handler;
}
void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
@@ -821,18 +816,17 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
vcpu_regs_set(vcpu, &regs);
}
-struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
{
struct kvm_mp_state mp_state;
struct kvm_regs regs;
- vm_vaddr_t stack_vaddr;
+ gva_t stack_gva;
struct kvm_vcpu *vcpu;
- stack_vaddr = __vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
- DEFAULT_GUEST_STACK_VADDR_MIN,
- MEM_REGION_DATA);
+ stack_gva = __vm_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
+ DEFAULT_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
- stack_vaddr += DEFAULT_STACK_PGS * getpagesize();
+ stack_gva += DEFAULT_STACK_PGS * getpagesize();
/*
* Align stack to match calling sequence requirements in section "The
@@ -843,9 +837,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
* If this code is ever used to launch a vCPU with 32-bit entry point it
* may need to subtract 4 bytes instead of 8 bytes.
*/
- TEST_ASSERT(IS_ALIGNED(stack_vaddr, PAGE_SIZE),
- "__vm_vaddr_alloc() did not provide a page-aligned address");
- stack_vaddr -= 8;
+ TEST_ASSERT(IS_ALIGNED(stack_gva, PAGE_SIZE),
+ "__vm_alloc() did not provide a page-aligned address");
+ stack_gva -= 8;
vcpu = __vm_vcpu_add(vm, vcpu_id);
vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
@@ -855,7 +849,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
/* Setup guest general purpose registers */
vcpu_regs_get(vcpu, &regs);
regs.rflags = regs.rflags | 0x2;
- regs.rsp = stack_vaddr;
+ regs.rsp = stack_gva;
vcpu_regs_set(vcpu, &regs);
/* Setup the MP state */
@@ -872,7 +866,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
return vcpu;
}
-struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id)
+struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, u32 vcpu_id)
{
struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
@@ -907,9 +901,9 @@ const struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
return kvm_supported_cpuid;
}
-static uint32_t __kvm_cpu_has(const struct kvm_cpuid2 *cpuid,
- uint32_t function, uint32_t index,
- uint8_t reg, uint8_t lo, uint8_t hi)
+static u32 __kvm_cpu_has(const struct kvm_cpuid2 *cpuid,
+ u32 function, u32 index,
+ u8 reg, u8 lo, u8 hi)
{
const struct kvm_cpuid_entry2 *entry;
int i;
@@ -936,14 +930,14 @@ bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
feature.reg, feature.bit, feature.bit);
}
-uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
- struct kvm_x86_cpu_property property)
+u32 kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
+ struct kvm_x86_cpu_property property)
{
return __kvm_cpu_has(cpuid, property.function, property.index,
property.reg, property.lo_bit, property.hi_bit);
}
-uint64_t kvm_get_feature_msr(uint64_t msr_index)
+u64 kvm_get_feature_msr(u64 msr_index)
{
struct {
struct kvm_msrs header;
@@ -962,7 +956,7 @@ uint64_t kvm_get_feature_msr(uint64_t msr_index)
return buffer.entry.data;
}
-void __vm_xsave_require_permission(uint64_t xfeature, const char *name)
+void __vm_xsave_require_permission(u64 xfeature, const char *name)
{
int kvm_fd;
u64 bitmask;
@@ -1019,7 +1013,7 @@ void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid)
void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
struct kvm_x86_cpu_property property,
- uint32_t value)
+ u32 value)
{
struct kvm_cpuid_entry2 *entry;
@@ -1034,7 +1028,7 @@ void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
TEST_ASSERT_EQ(kvm_cpuid_property(vcpu->cpuid, property), value);
}
-void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function)
+void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, u32 function)
{
struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, function);
@@ -1063,7 +1057,7 @@ void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
vcpu_set_cpuid(vcpu);
}
-uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index)
+u64 vcpu_get_msr(struct kvm_vcpu *vcpu, u64 msr_index)
{
struct {
struct kvm_msrs header;
@@ -1078,7 +1072,7 @@ uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index)
return buffer.entry.data;
}
-int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value)
+int _vcpu_set_msr(struct kvm_vcpu *vcpu, u64 msr_index, u64 msr_value)
{
struct {
struct kvm_msrs header;
@@ -1106,28 +1100,28 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
vcpu_regs_get(vcpu, &regs);
if (num >= 1)
- regs.rdi = va_arg(ap, uint64_t);
+ regs.rdi = va_arg(ap, u64);
if (num >= 2)
- regs.rsi = va_arg(ap, uint64_t);
+ regs.rsi = va_arg(ap, u64);
if (num >= 3)
- regs.rdx = va_arg(ap, uint64_t);
+ regs.rdx = va_arg(ap, u64);
if (num >= 4)
- regs.rcx = va_arg(ap, uint64_t);
+ regs.rcx = va_arg(ap, u64);
if (num >= 5)
- regs.r8 = va_arg(ap, uint64_t);
+ regs.r8 = va_arg(ap, u64);
if (num >= 6)
- regs.r9 = va_arg(ap, uint64_t);
+ regs.r9 = va_arg(ap, u64);
vcpu_regs_set(vcpu, &regs);
va_end(ap);
}
-void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
{
struct kvm_regs regs;
struct kvm_sregs sregs;
@@ -1196,7 +1190,7 @@ const struct kvm_msr_list *kvm_get_feature_msr_index_list(void)
return list;
}
-bool kvm_msr_is_in_save_restore_list(uint32_t msr_index)
+bool kvm_msr_is_in_save_restore_list(u32 msr_index)
{
const struct kvm_msr_list *list = kvm_get_msr_index_list();
int i;
@@ -1327,7 +1321,7 @@ void kvm_init_vm_address_properties(struct kvm_vm *vm)
}
const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
- uint32_t function, uint32_t index)
+ u32 function, u32 index)
{
int i;
@@ -1344,7 +1338,7 @@ const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
#define X86_HYPERCALL(inputs...) \
({ \
- uint64_t r; \
+ u64 r; \
\
asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t" \
"jnz 1f\n\t" \
@@ -1359,18 +1353,17 @@ const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
r; \
})
-uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
- uint64_t a3)
+u64 kvm_hypercall(u64 nr, u64 a0, u64 a1, u64 a2, u64 a3)
{
return X86_HYPERCALL("a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3));
}
-uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1)
+u64 __xen_hypercall(u64 nr, u64 a0, void *a1)
{
return X86_HYPERCALL("a"(nr), "D"(a0), "S"(a1));
}
-void xen_hypercall(uint64_t nr, uint64_t a0, void *a1)
+void xen_hypercall(u64 nr, u64 a0, void *a1)
{
GUEST_ASSERT(!__xen_hypercall(nr, a0, a1));
}
@@ -1379,7 +1372,7 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
{
const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
unsigned long ht_gfn, max_gfn, max_pfn;
- uint8_t maxphyaddr, guest_maxphyaddr;
+ u8 maxphyaddr, guest_maxphyaddr;
/*
* Use "guest MAXPHYADDR" from KVM if it's available. Guest MAXPHYADDR
@@ -1453,8 +1446,7 @@ bool kvm_arch_has_default_irqchip(void)
return true;
}
-void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
- uint64_t smram_gpa,
+void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, u64 smram_gpa,
const void *smi_handler, size_t handler_size)
{
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, smram_gpa,
diff --git a/tools/testing/selftests/kvm/lib/x86/sev.c b/tools/testing/selftests/kvm/lib/x86/sev.c
index c3a9838f4806..93f916903461 100644
--- a/tools/testing/selftests/kvm/lib/x86/sev.c
+++ b/tools/testing/selftests/kvm/lib/x86/sev.c
@@ -15,10 +15,10 @@
* expression would cause us to quit the loop.
*/
static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region,
- uint8_t page_type, bool private)
+ u8 page_type, bool private)
{
const struct sparsebit *protected_phy_pages = region->protected_phy_pages;
- const vm_paddr_t gpa_base = region->region.guest_phys_addr;
+ const gpa_t gpa_base = region->region.guest_phys_addr;
const sparsebit_idx_t lowest_page_in_region = gpa_base >> vm->page_shift;
sparsebit_idx_t i, j;
@@ -29,15 +29,15 @@ static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *regio
sev_register_encrypted_memory(vm, region);
sparsebit_for_each_set_range(protected_phy_pages, i, j) {
- const uint64_t size = (j - i + 1) * vm->page_size;
- const uint64_t offset = (i - lowest_page_in_region) * vm->page_size;
+ const u64 size = (j - i + 1) * vm->page_size;
+ const u64 offset = (i - lowest_page_in_region) * vm->page_size;
if (private)
vm_mem_set_private(vm, gpa_base + offset, size);
if (is_sev_snp_vm(vm))
snp_launch_update_data(vm, gpa_base + offset,
- (uint64_t)addr_gpa2hva(vm, gpa_base + offset),
+ (u64)addr_gpa2hva(vm, gpa_base + offset),
size, page_type);
else
sev_launch_update_data(vm, gpa_base + offset, size);
@@ -79,7 +79,7 @@ void snp_vm_init(struct kvm_vm *vm)
vm_sev_ioctl(vm, KVM_SEV_INIT2, &init);
}
-void sev_vm_launch(struct kvm_vm *vm, uint32_t policy)
+void sev_vm_launch(struct kvm_vm *vm, u32 policy)
{
struct kvm_sev_launch_start launch_start = {
.policy = policy,
@@ -103,7 +103,7 @@ void sev_vm_launch(struct kvm_vm *vm, uint32_t policy)
vm->arch.is_pt_protected = true;
}
-void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement)
+void sev_vm_launch_measure(struct kvm_vm *vm, u8 *measurement)
{
struct kvm_sev_launch_measure launch_measure;
struct kvm_sev_guest_status guest_status;
@@ -131,7 +131,7 @@ void sev_vm_launch_finish(struct kvm_vm *vm)
TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_RUNNING);
}
-void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy)
+void snp_vm_launch_start(struct kvm_vm *vm, u64 policy)
{
struct kvm_sev_snp_launch_start launch_start = {
.policy = policy,
@@ -158,7 +158,7 @@ void snp_vm_launch_finish(struct kvm_vm *vm)
vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_FINISH, &launch_finish);
}
-struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
+struct kvm_vm *vm_sev_create_with_one_vcpu(u32 type, void *guest_code,
struct kvm_vcpu **cpu)
{
struct vm_shape shape = {
@@ -174,7 +174,7 @@ struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
return vm;
}
-void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement)
+void vm_sev_launch(struct kvm_vm *vm, u64 policy, u8 *measurement)
{
if (is_sev_snp_vm(vm)) {
vm_enable_cap(vm, KVM_CAP_EXIT_HYPERCALL, BIT(KVM_HC_MAP_GPA_RANGE));
diff --git a/tools/testing/selftests/kvm/lib/x86/svm.c b/tools/testing/selftests/kvm/lib/x86/svm.c
index eb20b00112c7..3b01605ab016 100644
--- a/tools/testing/selftests/kvm/lib/x86/svm.c
+++ b/tools/testing/selftests/kvm/lib/x86/svm.c
@@ -28,20 +28,20 @@ u64 rflags;
* Pointer to structure with the addresses of the SVM areas.
*/
struct svm_test_data *
-vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva)
+vcpu_alloc_svm(struct kvm_vm *vm, gva_t *p_svm_gva)
{
- vm_vaddr_t svm_gva = vm_vaddr_alloc_page(vm);
+ gva_t svm_gva = vm_alloc_page(vm);
struct svm_test_data *svm = addr_gva2hva(vm, svm_gva);
- svm->vmcb = (void *)vm_vaddr_alloc_page(vm);
+ svm->vmcb = (void *)vm_alloc_page(vm);
svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb);
svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb);
- svm->save_area = (void *)vm_vaddr_alloc_page(vm);
+ svm->save_area = (void *)vm_alloc_page(vm);
svm->save_area_hva = addr_gva2hva(vm, (uintptr_t)svm->save_area);
svm->save_area_gpa = addr_gva2gpa(vm, (uintptr_t)svm->save_area);
- svm->msr = (void *)vm_vaddr_alloc_page(vm);
+ svm->msr = (void *)vm_alloc_page(vm);
svm->msr_hva = addr_gva2hva(vm, (uintptr_t)svm->msr);
svm->msr_gpa = addr_gva2gpa(vm, (uintptr_t)svm->msr);
memset(svm->msr_hva, 0, getpagesize());
@@ -84,14 +84,14 @@ void vm_enable_npt(struct kvm_vm *vm)
void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp)
{
struct vmcb *vmcb = svm->vmcb;
- uint64_t vmcb_gpa = svm->vmcb_gpa;
+ u64 vmcb_gpa = svm->vmcb_gpa;
struct vmcb_save_area *save = &vmcb->save;
struct vmcb_control_area *ctrl = &vmcb->control;
u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
| SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
| SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
- uint64_t efer;
+ u64 efer;
efer = rdmsr(MSR_EFER);
wrmsr(MSR_EFER, efer | EFER_SVME);
@@ -158,7 +158,7 @@ void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_r
* for now. registers involved in LOAD/SAVE_GPR_C are eventually
* unmodified so they do not need to be in the clobber list.
*/
-void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa)
+void run_guest(struct vmcb *vmcb, u64 vmcb_gpa)
{
asm volatile (
"vmload %[vmcb_gpa]\n\t"
diff --git a/tools/testing/selftests/kvm/lib/x86/ucall.c b/tools/testing/selftests/kvm/lib/x86/ucall.c
index 1265cecc7dd1..e7dd5791959b 100644
--- a/tools/testing/selftests/kvm/lib/x86/ucall.c
+++ b/tools/testing/selftests/kvm/lib/x86/ucall.c
@@ -6,9 +6,9 @@
*/
#include "kvm_util.h"
-#define UCALL_PIO_PORT ((uint16_t)0x1000)
+#define UCALL_PIO_PORT ((u16)0x1000)
-void ucall_arch_do_ucall(vm_vaddr_t uc)
+void ucall_arch_do_ucall(gva_t uc)
{
/*
* FIXME: Revert this hack (the entire commit that added it) once nVMX
diff --git a/tools/testing/selftests/kvm/lib/x86/vmx.c b/tools/testing/selftests/kvm/lib/x86/vmx.c
index c87b340362a9..67642759e4a0 100644
--- a/tools/testing/selftests/kvm/lib/x86/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86/vmx.c
@@ -27,7 +27,7 @@ struct hv_vp_assist_page *current_vp_assist;
int vcpu_enable_evmcs(struct kvm_vcpu *vcpu)
{
- uint16_t evmcs_ver;
+ u16 evmcs_ver;
vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
(unsigned long)&evmcs_ver);
@@ -79,39 +79,39 @@ void vm_enable_ept(struct kvm_vm *vm)
* Pointer to structure with the addresses of the VMX areas.
*/
struct vmx_pages *
-vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva)
+vcpu_alloc_vmx(struct kvm_vm *vm, gva_t *p_vmx_gva)
{
- vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm);
+ gva_t vmx_gva = vm_alloc_page(vm);
struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva);
/* Setup of a region of guest memory for the vmxon region. */
- vmx->vmxon = (void *)vm_vaddr_alloc_page(vm);
+ vmx->vmxon = (void *)vm_alloc_page(vm);
vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon);
vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon);
/* Setup of a region of guest memory for a vmcs. */
- vmx->vmcs = (void *)vm_vaddr_alloc_page(vm);
+ vmx->vmcs = (void *)vm_alloc_page(vm);
vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs);
vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs);
/* Setup of a region of guest memory for the MSR bitmap. */
- vmx->msr = (void *)vm_vaddr_alloc_page(vm);
+ vmx->msr = (void *)vm_alloc_page(vm);
vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr);
vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr);
memset(vmx->msr_hva, 0, getpagesize());
/* Setup of a region of guest memory for the shadow VMCS. */
- vmx->shadow_vmcs = (void *)vm_vaddr_alloc_page(vm);
+ vmx->shadow_vmcs = (void *)vm_alloc_page(vm);
vmx->shadow_vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->shadow_vmcs);
vmx->shadow_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->shadow_vmcs);
/* Setup of a region of guest memory for the VMREAD and VMWRITE bitmaps. */
- vmx->vmread = (void *)vm_vaddr_alloc_page(vm);
+ vmx->vmread = (void *)vm_alloc_page(vm);
vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread);
vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread);
memset(vmx->vmread_hva, 0, getpagesize());
- vmx->vmwrite = (void *)vm_vaddr_alloc_page(vm);
+ vmx->vmwrite = (void *)vm_alloc_page(vm);
vmx->vmwrite_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmwrite);
vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite);
memset(vmx->vmwrite_hva, 0, getpagesize());
@@ -125,8 +125,8 @@ vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva)
bool prepare_for_vmx_operation(struct vmx_pages *vmx)
{
- uint64_t feature_control;
- uint64_t required;
+ u64 feature_control;
+ u64 required;
unsigned long cr0;
unsigned long cr4;
@@ -160,7 +160,7 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx)
wrmsr(MSR_IA32_FEAT_CTL, feature_control | required);
/* Enter VMX root operation. */
- *(uint32_t *)(vmx->vmxon) = vmcs_revision();
+ *(u32 *)(vmx->vmxon) = vmcs_revision();
if (vmxon(vmx->vmxon_gpa))
return false;
@@ -170,7 +170,7 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx)
bool load_vmcs(struct vmx_pages *vmx)
{
/* Load a VMCS. */
- *(uint32_t *)(vmx->vmcs) = vmcs_revision();
+ *(u32 *)(vmx->vmcs) = vmcs_revision();
if (vmclear(vmx->vmcs_gpa))
return false;
@@ -178,14 +178,14 @@ bool load_vmcs(struct vmx_pages *vmx)
return false;
/* Setup shadow VMCS, do not load it yet. */
- *(uint32_t *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul;
+ *(u32 *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul;
if (vmclear(vmx->shadow_vmcs_gpa))
return false;
return true;
}
-static bool ept_vpid_cap_supported(uint64_t mask)
+static bool ept_vpid_cap_supported(u64 mask)
{
return rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & mask;
}
@@ -200,7 +200,7 @@ bool ept_1g_pages_supported(void)
*/
static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
{
- uint32_t sec_exec_ctl = 0;
+ u32 sec_exec_ctl = 0;
vmwrite(VIRTUAL_PROCESSOR_ID, 0);
vmwrite(POSTED_INTR_NV, 0);
@@ -208,7 +208,7 @@ static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS));
if (vmx->eptp_gpa) {
- uint64_t eptp = vmx->eptp_gpa | EPTP_WB | EPTP_PWL_4;
+ u64 eptp = vmx->eptp_gpa | EPTP_WB | EPTP_PWL_4;
TEST_ASSERT((vmx->eptp_gpa & ~PHYSICAL_PAGE_MASK) == 0,
"Illegal bits set in vmx->eptp_gpa");
@@ -259,7 +259,7 @@ static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
*/
static inline void init_vmcs_host_state(void)
{
- uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS);
+ u32 exit_controls = vmreadz(VM_EXIT_CONTROLS);
vmwrite(HOST_ES_SELECTOR, get_es());
vmwrite(HOST_CS_SELECTOR, get_cs());
@@ -358,8 +358,8 @@ static inline void init_vmcs_guest_state(void *rip, void *rsp)
vmwrite(GUEST_GDTR_BASE, vmreadz(HOST_GDTR_BASE));
vmwrite(GUEST_IDTR_BASE, vmreadz(HOST_IDTR_BASE));
vmwrite(GUEST_DR7, 0x400);
- vmwrite(GUEST_RSP, (uint64_t)rsp);
- vmwrite(GUEST_RIP, (uint64_t)rip);
+ vmwrite(GUEST_RSP, (u64)rsp);
+ vmwrite(GUEST_RIP, (u64)rip);
vmwrite(GUEST_RFLAGS, 2);
vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, 0);
vmwrite(GUEST_SYSENTER_ESP, vmreadz(HOST_IA32_SYSENTER_ESP));
@@ -375,7 +375,7 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
bool kvm_cpu_has_ept(void)
{
- uint64_t ctrl;
+ u64 ctrl;
if (!kvm_cpu_has(X86_FEATURE_VMX))
return false;
@@ -390,7 +390,7 @@ bool kvm_cpu_has_ept(void)
void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm)
{
- vmx->apic_access = (void *)vm_vaddr_alloc_page(vm);
+ vmx->apic_access = (void *)vm_alloc_page(vm);
vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access);
vmx->apic_access_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->apic_access);
}
diff --git a/tools/testing/selftests/kvm/loongarch/arch_timer.c b/tools/testing/selftests/kvm/loongarch/arch_timer.c
index 355ecac30954..a7279ded8518 100644
--- a/tools/testing/selftests/kvm/loongarch/arch_timer.c
+++ b/tools/testing/selftests/kvm/loongarch/arch_timer.c
@@ -27,8 +27,8 @@ static void do_idle(void)
static void guest_irq_handler(struct ex_regs *regs)
{
unsigned int intid;
- uint32_t cpu = guest_get_vcpuid();
- uint64_t xcnt, val, cfg, xcnt_diff_us;
+ u32 cpu = guest_get_vcpuid();
+ u64 xcnt, val, cfg, xcnt_diff_us;
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
intid = !!(regs->estat & BIT(INT_TI));
@@ -62,10 +62,10 @@ static void guest_irq_handler(struct ex_regs *regs)
WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1);
}
-static void guest_test_period_timer(uint32_t cpu)
+static void guest_test_period_timer(u32 cpu)
{
- uint32_t irq_iter, config_iter;
- uint64_t us;
+ u32 irq_iter, config_iter;
+ u64 us;
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
shared_data->nr_iter = test_args.nr_iter;
@@ -86,10 +86,10 @@ static void guest_test_period_timer(uint32_t cpu)
irq_iter);
}
-static void guest_test_oneshot_timer(uint32_t cpu)
+static void guest_test_oneshot_timer(u32 cpu)
{
- uint32_t irq_iter, config_iter;
- uint64_t us;
+ u32 irq_iter, config_iter;
+ u64 us;
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
shared_data->nr_iter = 0;
@@ -112,10 +112,10 @@ static void guest_test_oneshot_timer(uint32_t cpu)
}
}
-static void guest_test_emulate_timer(uint32_t cpu)
+static void guest_test_emulate_timer(u32 cpu)
{
- uint32_t config_iter;
- uint64_t xcnt_diff_us, us;
+ u32 config_iter;
+ u64 xcnt_diff_us, us;
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
local_irq_disable();
@@ -136,9 +136,9 @@ static void guest_test_emulate_timer(uint32_t cpu)
local_irq_enable();
}
-static void guest_time_count_test(uint32_t cpu)
+static void guest_time_count_test(u32 cpu)
{
- uint32_t config_iter;
+ u32 config_iter;
unsigned long start, end, prev, us;
/* Assuming that test case starts to run in 1 second */
@@ -165,7 +165,7 @@ static void guest_time_count_test(uint32_t cpu)
static void guest_code(void)
{
- uint32_t cpu = guest_get_vcpuid();
+ u32 cpu = guest_get_vcpuid();
/* must run at first */
guest_time_count_test(cpu);
diff --git a/tools/testing/selftests/kvm/loongarch/pmu_test.c b/tools/testing/selftests/kvm/loongarch/pmu_test.c
index 88bb530e336e..ec3fefb9ea97 100644
--- a/tools/testing/selftests/kvm/loongarch/pmu_test.c
+++ b/tools/testing/selftests/kvm/loongarch/pmu_test.c
@@ -15,7 +15,7 @@ static int pmu_irq_count;
/* Check PMU support */
static bool has_pmu_support(void)
{
- uint32_t cfg6;
+ u32 cfg6;
/* Read CPUCFG6 to check PMU */
cfg6 = read_cpucfg(LOONGARCH_CPUCFG6);
@@ -34,7 +34,7 @@ static bool has_pmu_support(void)
/* Dump PMU capabilities */
static void dump_pmu_caps(void)
{
- uint32_t cfg6;
+ u32 cfg6;
int nr_counters, counter_bits;
cfg6 = read_cpucfg(LOONGARCH_CPUCFG6);
@@ -51,8 +51,8 @@ static void dump_pmu_caps(void)
static void guest_pmu_base_test(void)
{
int i;
- uint32_t cfg6, pmnum;
- uint64_t cnt[4];
+ u32 cfg6, pmnum;
+ u64 cnt[4];
cfg6 = read_cpucfg(LOONGARCH_CPUCFG6);
pmnum = (cfg6 >> 4) & 0xf;
@@ -114,7 +114,7 @@ static void guest_irq_handler(struct ex_regs *regs)
static void guest_pmu_interrupt_test(void)
{
- uint64_t cnt;
+ u64 cnt;
csr_write(PMU_OVERFLOW - 1, LOONGARCH_CSR_PERFCNTR0);
csr_write(PMU_ENVENT_ENABLED | CSR_PERFCTRL_PMIE | LOONGARCH_PMU_EVENT_CYCLES, LOONGARCH_CSR_PERFCTRL0);
diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
index 3cdfa3b19b85..9c7578a098c3 100644
--- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c
+++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
@@ -30,7 +30,7 @@
static int nr_vcpus = 1;
-static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
+static u64 guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
{
@@ -55,10 +55,10 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
}
static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
- uint64_t nr_modifications)
+ u64 nr_modifications)
{
- uint64_t pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size;
- uint64_t gpa;
+ u64 pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size;
+ gpa_t gpa;
int i;
/*
@@ -78,7 +78,7 @@ static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
struct test_params {
useconds_t delay;
- uint64_t nr_iterations;
+ u64 nr_iterations;
bool partition_vcpu_memory_access;
bool disable_slot_zap_quirk;
};
diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
index 5087d082c4b0..3d02db371422 100644
--- a/tools/testing/selftests/kvm/memslot_perf_test.c
+++ b/tools/testing/selftests/kvm/memslot_perf_test.c
@@ -85,17 +85,17 @@ struct vm_data {
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
pthread_t vcpu_thread;
- uint32_t nslots;
- uint64_t npages;
- uint64_t pages_per_slot;
+ u32 nslots;
+ u64 npages;
+ u64 pages_per_slot;
void **hva_slots;
bool mmio_ok;
- uint64_t mmio_gpa_min;
- uint64_t mmio_gpa_max;
+ u64 mmio_gpa_min;
+ u64 mmio_gpa_max;
};
struct sync_area {
- uint32_t guest_page_size;
+ u32 guest_page_size;
atomic_bool start_flag;
atomic_bool exit_flag;
atomic_bool sync_flag;
@@ -186,12 +186,12 @@ static void wait_for_vcpu(void)
"sem_timedwait() failed: %d", errno);
}
-static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages)
+static void *vm_gpa2hva(struct vm_data *data, gpa_t gpa, u64 *rempages)
{
- uint64_t gpage, pgoffs;
- uint32_t slot, slotoffs;
+ gpa_t gpage, pgoffs;
+ u32 slot, slotoffs;
void *base;
- uint32_t guest_page_size = data->vm->page_size;
+ u32 guest_page_size = data->vm->page_size;
TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate");
TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size,
@@ -200,11 +200,11 @@ static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages)
gpage = gpa / guest_page_size;
pgoffs = gpa % guest_page_size;
- slot = min(gpage / data->pages_per_slot, (uint64_t)data->nslots - 1);
+ slot = min(gpage / data->pages_per_slot, (u64)data->nslots - 1);
slotoffs = gpage - (slot * data->pages_per_slot);
if (rempages) {
- uint64_t slotpages;
+ u64 slotpages;
if (slot == data->nslots - 1)
slotpages = data->npages - slot * data->pages_per_slot;
@@ -217,12 +217,12 @@ static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages)
}
base = data->hva_slots[slot];
- return (uint8_t *)base + slotoffs * guest_page_size + pgoffs;
+ return (u8 *)base + slotoffs * guest_page_size + pgoffs;
}
-static uint64_t vm_slot2gpa(struct vm_data *data, uint32_t slot)
+static u64 vm_slot2gpa(struct vm_data *data, u32 slot)
{
- uint32_t guest_page_size = data->vm->page_size;
+ u32 guest_page_size = data->vm->page_size;
TEST_ASSERT(slot < data->nslots, "Too high slot number");
@@ -243,8 +243,8 @@ static struct vm_data *alloc_vm(void)
return data;
}
-static bool check_slot_pages(uint32_t host_page_size, uint32_t guest_page_size,
- uint64_t pages_per_slot, uint64_t rempages)
+static bool check_slot_pages(u32 host_page_size, u32 guest_page_size,
+ u64 pages_per_slot, u64 rempages)
{
if (!pages_per_slot)
return false;
@@ -259,11 +259,11 @@ static bool check_slot_pages(uint32_t host_page_size, uint32_t guest_page_size,
}
-static uint64_t get_max_slots(struct vm_data *data, uint32_t host_page_size)
+static u64 get_max_slots(struct vm_data *data, u32 host_page_size)
{
- uint32_t guest_page_size = data->vm->page_size;
- uint64_t mempages, pages_per_slot, rempages;
- uint64_t slots;
+ u32 guest_page_size = data->vm->page_size;
+ u64 mempages, pages_per_slot, rempages;
+ u64 slots;
mempages = data->npages;
slots = data->nslots;
@@ -281,13 +281,13 @@ static uint64_t get_max_slots(struct vm_data *data, uint32_t host_page_size)
return 0;
}
-static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
- void *guest_code, uint64_t mem_size,
+static bool prepare_vm(struct vm_data *data, int nslots, u64 *maxslots,
+ void *guest_code, u64 mem_size,
struct timespec *slot_runtime)
{
- uint64_t mempages, rempages;
- uint64_t guest_addr;
- uint32_t slot, host_page_size, guest_page_size;
+ u64 mempages, rempages;
+ u64 guest_addr;
+ u32 slot, host_page_size, guest_page_size;
struct timespec tstart;
struct sync_area *sync;
@@ -317,7 +317,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
clock_gettime(CLOCK_MONOTONIC, &tstart);
for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) {
- uint64_t npages;
+ u64 npages;
npages = data->pages_per_slot;
if (slot == data->nslots)
@@ -331,8 +331,8 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
*slot_runtime = timespec_elapsed(tstart);
for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) {
- uint64_t npages;
- uint64_t gpa;
+ u64 npages;
+ gpa_t gpa;
npages = data->pages_per_slot;
if (slot == data->nslots)
@@ -448,7 +448,7 @@ static bool guest_perform_sync(void)
static void guest_code_test_memslot_move(void)
{
struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
- uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);
+ u32 page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);
uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr);
GUEST_SYNC(0);
@@ -460,7 +460,7 @@ static void guest_code_test_memslot_move(void)
for (ptr = base; ptr < base + MEM_TEST_MOVE_SIZE;
ptr += page_size)
- *(uint64_t *)ptr = MEM_TEST_VAL_1;
+ *(u64 *)ptr = MEM_TEST_VAL_1;
/*
* No host sync here since the MMIO exits are so expensive
@@ -477,7 +477,7 @@ static void guest_code_test_memslot_move(void)
static void guest_code_test_memslot_map(void)
{
struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
- uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);
+ u32 page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);
GUEST_SYNC(0);
@@ -489,7 +489,7 @@ static void guest_code_test_memslot_map(void)
for (ptr = MEM_TEST_GPA;
ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2;
ptr += page_size)
- *(uint64_t *)ptr = MEM_TEST_VAL_1;
+ *(u64 *)ptr = MEM_TEST_VAL_1;
if (!guest_perform_sync())
break;
@@ -497,7 +497,7 @@ static void guest_code_test_memslot_map(void)
for (ptr = MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2;
ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE;
ptr += page_size)
- *(uint64_t *)ptr = MEM_TEST_VAL_2;
+ *(u64 *)ptr = MEM_TEST_VAL_2;
if (!guest_perform_sync())
break;
@@ -526,13 +526,13 @@ static void guest_code_test_memslot_unmap(void)
*
* Just access a single page to be on the safe side.
*/
- *(uint64_t *)ptr = MEM_TEST_VAL_1;
+ *(u64 *)ptr = MEM_TEST_VAL_1;
if (!guest_perform_sync())
break;
ptr += MEM_TEST_UNMAP_SIZE / 2;
- *(uint64_t *)ptr = MEM_TEST_VAL_2;
+ *(u64 *)ptr = MEM_TEST_VAL_2;
if (!guest_perform_sync())
break;
@@ -544,7 +544,7 @@ static void guest_code_test_memslot_unmap(void)
static void guest_code_test_memslot_rw(void)
{
struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
- uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);
+ u32 page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);
GUEST_SYNC(0);
@@ -555,17 +555,17 @@ static void guest_code_test_memslot_rw(void)
for (ptr = MEM_TEST_GPA;
ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += page_size)
- *(uint64_t *)ptr = MEM_TEST_VAL_1;
+ *(u64 *)ptr = MEM_TEST_VAL_1;
if (!guest_perform_sync())
break;
for (ptr = MEM_TEST_GPA + page_size / 2;
ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += page_size) {
- uint64_t val = *(uint64_t *)ptr;
+ u64 val = *(u64 *)ptr;
GUEST_ASSERT_EQ(val, MEM_TEST_VAL_2);
- *(uint64_t *)ptr = 0;
+ *(u64 *)ptr = 0;
}
if (!guest_perform_sync())
@@ -577,10 +577,10 @@ static void guest_code_test_memslot_rw(void)
static bool test_memslot_move_prepare(struct vm_data *data,
struct sync_area *sync,
- uint64_t *maxslots, bool isactive)
+ u64 *maxslots, bool isactive)
{
- uint32_t guest_page_size = data->vm->page_size;
- uint64_t movesrcgpa, movetestgpa;
+ u32 guest_page_size = data->vm->page_size;
+ u64 movesrcgpa, movetestgpa;
#ifdef __x86_64__
if (disable_slot_zap_quirk)
@@ -590,7 +590,7 @@ static bool test_memslot_move_prepare(struct vm_data *data,
movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
if (isactive) {
- uint64_t lastpages;
+ u64 lastpages;
vm_gpa2hva(data, movesrcgpa, &lastpages);
if (lastpages * guest_page_size < MEM_TEST_MOVE_SIZE / 2) {
@@ -613,21 +613,21 @@ static bool test_memslot_move_prepare(struct vm_data *data,
static bool test_memslot_move_prepare_active(struct vm_data *data,
struct sync_area *sync,
- uint64_t *maxslots)
+ u64 *maxslots)
{
return test_memslot_move_prepare(data, sync, maxslots, true);
}
static bool test_memslot_move_prepare_inactive(struct vm_data *data,
struct sync_area *sync,
- uint64_t *maxslots)
+ u64 *maxslots)
{
return test_memslot_move_prepare(data, sync, maxslots, false);
}
static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync)
{
- uint64_t movesrcgpa;
+ u64 movesrcgpa;
movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
vm_mem_region_move(data->vm, data->nslots - 1 + 1,
@@ -636,13 +636,13 @@ static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync)
}
static void test_memslot_do_unmap(struct vm_data *data,
- uint64_t offsp, uint64_t count)
+ u64 offsp, u64 count)
{
- uint64_t gpa, ctr;
- uint32_t guest_page_size = data->vm->page_size;
+ gpa_t gpa, ctr;
+ u32 guest_page_size = data->vm->page_size;
for (gpa = MEM_TEST_GPA + offsp * guest_page_size, ctr = 0; ctr < count; ) {
- uint64_t npages;
+ u64 npages;
void *hva;
int ret;
@@ -661,11 +661,11 @@ static void test_memslot_do_unmap(struct vm_data *data,
}
static void test_memslot_map_unmap_check(struct vm_data *data,
- uint64_t offsp, uint64_t valexp)
+ u64 offsp, u64 valexp)
{
- uint64_t gpa;
- uint64_t *val;
- uint32_t guest_page_size = data->vm->page_size;
+ gpa_t gpa;
+ u64 *val;
+ u32 guest_page_size = data->vm->page_size;
if (!map_unmap_verify)
return;
@@ -680,8 +680,8 @@ static void test_memslot_map_unmap_check(struct vm_data *data,
static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync)
{
- uint32_t guest_page_size = data->vm->page_size;
- uint64_t guest_pages = MEM_TEST_MAP_SIZE / guest_page_size;
+ u32 guest_page_size = data->vm->page_size;
+ u64 guest_pages = MEM_TEST_MAP_SIZE / guest_page_size;
/*
* Unmap the second half of the test area while guest writes to (maps)
@@ -718,11 +718,11 @@ static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync)
static void test_memslot_unmap_loop_common(struct vm_data *data,
struct sync_area *sync,
- uint64_t chunk)
+ u64 chunk)
{
- uint32_t guest_page_size = data->vm->page_size;
- uint64_t guest_pages = MEM_TEST_UNMAP_SIZE / guest_page_size;
- uint64_t ctr;
+ u32 guest_page_size = data->vm->page_size;
+ u64 guest_pages = MEM_TEST_UNMAP_SIZE / guest_page_size;
+ u64 ctr;
/*
* Wait for the guest to finish mapping page(s) in the first half
@@ -746,9 +746,9 @@ static void test_memslot_unmap_loop_common(struct vm_data *data,
static void test_memslot_unmap_loop(struct vm_data *data,
struct sync_area *sync)
{
- uint32_t host_page_size = getpagesize();
- uint32_t guest_page_size = data->vm->page_size;
- uint64_t guest_chunk_pages = guest_page_size >= host_page_size ?
+ u32 host_page_size = getpagesize();
+ u32 guest_page_size = data->vm->page_size;
+ u64 guest_chunk_pages = guest_page_size >= host_page_size ?
1 : host_page_size / guest_page_size;
test_memslot_unmap_loop_common(data, sync, guest_chunk_pages);
@@ -757,27 +757,27 @@ static void test_memslot_unmap_loop(struct vm_data *data,
static void test_memslot_unmap_loop_chunked(struct vm_data *data,
struct sync_area *sync)
{
- uint32_t guest_page_size = data->vm->page_size;
- uint64_t guest_chunk_pages = MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size;
+ u32 guest_page_size = data->vm->page_size;
+ u64 guest_chunk_pages = MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size;
test_memslot_unmap_loop_common(data, sync, guest_chunk_pages);
}
static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync)
{
- uint64_t gptr;
- uint32_t guest_page_size = data->vm->page_size;
+ u64 gptr;
+ u32 guest_page_size = data->vm->page_size;
for (gptr = MEM_TEST_GPA + guest_page_size / 2;
gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += guest_page_size)
- *(uint64_t *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2;
+ *(u64 *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2;
host_perform_sync(sync);
for (gptr = MEM_TEST_GPA;
gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += guest_page_size) {
- uint64_t *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL);
- uint64_t val = *vptr;
+ u64 *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL);
+ u64 val = *vptr;
TEST_ASSERT(val == MEM_TEST_VAL_1,
"Guest written values should read back correctly (is %"PRIu64" @ %"PRIx64")",
@@ -790,21 +790,21 @@ static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync)
struct test_data {
const char *name;
- uint64_t mem_size;
+ u64 mem_size;
void (*guest_code)(void);
bool (*prepare)(struct vm_data *data, struct sync_area *sync,
- uint64_t *maxslots);
+ u64 *maxslots);
void (*loop)(struct vm_data *data, struct sync_area *sync);
};
-static bool test_execute(int nslots, uint64_t *maxslots,
+static bool test_execute(int nslots, u64 *maxslots,
unsigned int maxtime,
const struct test_data *tdata,
- uint64_t *nloops,
+ u64 *nloops,
struct timespec *slot_runtime,
struct timespec *guest_runtime)
{
- uint64_t mem_size = tdata->mem_size ? : MEM_SIZE;
+ u64 mem_size = tdata->mem_size ? : MEM_SIZE;
struct vm_data *data;
struct sync_area *sync;
struct timespec tstart;
@@ -924,8 +924,8 @@ static void help(char *name, struct test_args *targs)
static bool check_memory_sizes(void)
{
- uint32_t host_page_size = getpagesize();
- uint32_t guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
+ u32 host_page_size = getpagesize();
+ u32 guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
if (host_page_size > SZ_64K || guest_page_size > SZ_64K) {
pr_info("Unsupported page size on host (0x%x) or guest (0x%x)\n",
@@ -961,7 +961,7 @@ static bool check_memory_sizes(void)
static bool parse_args(int argc, char *argv[],
struct test_args *targs)
{
- uint32_t max_mem_slots;
+ u32 max_mem_slots;
int opt;
while ((opt = getopt(argc, argv, "hvdqs:f:e:l:r:")) != -1) {
@@ -1040,8 +1040,8 @@ static bool parse_args(int argc, char *argv[],
struct test_result {
struct timespec slot_runtime, guest_runtime, iter_runtime;
- int64_t slottimens, runtimens;
- uint64_t nloops;
+ s64 slottimens, runtimens;
+ u64 nloops;
};
static bool test_loop(const struct test_data *data,
@@ -1049,7 +1049,7 @@ static bool test_loop(const struct test_data *data,
struct test_result *rbestslottime,
struct test_result *rbestruntime)
{
- uint64_t maxslots;
+ u64 maxslots;
struct test_result result = {};
if (!test_execute(targs->nslots, &maxslots, targs->seconds, data,
diff --git a/tools/testing/selftests/kvm/mmu_stress_test.c b/tools/testing/selftests/kvm/mmu_stress_test.c
index 51c070556f3e..54d281419d31 100644
--- a/tools/testing/selftests/kvm/mmu_stress_test.c
+++ b/tools/testing/selftests/kvm/mmu_stress_test.c
@@ -20,19 +20,19 @@
static bool mprotect_ro_done;
static bool all_vcpus_hit_ro_fault;
-static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
+static void guest_code(u64 start_gpa, u64 end_gpa, u64 stride)
{
- uint64_t gpa;
+ gpa_t gpa;
int i;
for (i = 0; i < 2; i++) {
for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
- vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa);
+ vcpu_arch_put_guest(*((volatile u64 *)gpa), gpa);
GUEST_SYNC(i);
}
for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
- *((volatile uint64_t *)gpa);
+ *((volatile u64 *)gpa);
GUEST_SYNC(2);
/*
@@ -55,7 +55,7 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
#elif defined(__aarch64__)
asm volatile("str %0, [%0]" :: "r" (gpa) : "memory");
#else
- vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa);
+ vcpu_arch_put_guest(*((volatile u64 *)gpa), gpa);
#endif
} while (!READ_ONCE(mprotect_ro_done) || !READ_ONCE(all_vcpus_hit_ro_fault));
@@ -68,7 +68,7 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
#endif
for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
- vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa);
+ vcpu_arch_put_guest(*((volatile u64 *)gpa), gpa);
GUEST_SYNC(4);
GUEST_ASSERT(0);
@@ -76,8 +76,8 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
struct vcpu_info {
struct kvm_vcpu *vcpu;
- uint64_t start_gpa;
- uint64_t end_gpa;
+ u64 start_gpa;
+ u64 end_gpa;
};
static int nr_vcpus;
@@ -203,10 +203,10 @@ static void *vcpu_worker(void *data)
}
static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus,
- uint64_t start_gpa, uint64_t end_gpa)
+ u64 start_gpa, u64 end_gpa)
{
struct vcpu_info *info;
- uint64_t gpa, nr_bytes;
+ gpa_t gpa, nr_bytes;
pthread_t *threads;
int i;
@@ -217,7 +217,7 @@ static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus,
TEST_ASSERT(info, "Failed to allocate vCPU gpa ranges");
nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) &
- ~((uint64_t)vm->page_size - 1);
+ ~((u64)vm->page_size - 1);
TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus);
for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) {
@@ -278,11 +278,11 @@ int main(int argc, char *argv[])
* just below the 4gb boundary. This test could create memory at
* 1gb-3gb,but it's simpler to skip straight to 4gb.
*/
- const uint64_t start_gpa = SZ_4G;
+ const u64 start_gpa = SZ_4G;
const int first_slot = 1;
struct timespec time_start, time_run1, time_reset, time_run2, time_ro, time_rw;
- uint64_t max_gpa, gpa, slot_size, max_mem, i;
+ u64 max_gpa, gpa, slot_size, max_mem, i;
int max_slots, slot, opt, fd;
bool hugepages = false;
struct kvm_vcpu **vcpus;
@@ -347,7 +347,7 @@ int main(int argc, char *argv[])
/* Pre-fault the memory to avoid taking mmap_sem on guest page faults. */
for (i = 0; i < slot_size; i += vm->page_size)
- ((uint8_t *)mem)[i] = 0xaa;
+ ((u8 *)mem)[i] = 0xaa;
gpa = 0;
for (slot = first_slot; slot < max_slots; slot++) {
diff --git a/tools/testing/selftests/kvm/pre_fault_memory_test.c b/tools/testing/selftests/kvm/pre_fault_memory_test.c
index f3de0386ba7b..fcb57fd034e6 100644
--- a/tools/testing/selftests/kvm/pre_fault_memory_test.c
+++ b/tools/testing/selftests/kvm/pre_fault_memory_test.c
@@ -17,13 +17,13 @@
#define TEST_NPAGES (TEST_SIZE / PAGE_SIZE)
#define TEST_SLOT 10
-static void guest_code(uint64_t base_gva)
+static void guest_code(u64 base_gva)
{
- volatile uint64_t val __used;
+ volatile u64 val __used;
int i;
for (i = 0; i < TEST_NPAGES; i++) {
- uint64_t *src = (uint64_t *)(base_gva + i * PAGE_SIZE);
+ u64 *src = (u64 *)(base_gva + i * PAGE_SIZE);
val = *src;
}
@@ -33,8 +33,8 @@ static void guest_code(uint64_t base_gva)
struct slot_worker_data {
struct kvm_vm *vm;
- u64 gpa;
- uint32_t flags;
+ gpa_t gpa;
+ u32 flags;
bool worker_ready;
bool prefault_ready;
bool recreate_slot;
@@ -161,7 +161,7 @@ static void pre_fault_memory(struct kvm_vcpu *vcpu, u64 base_gpa, u64 offset,
static void __test_pre_fault_memory(unsigned long vm_type, bool private)
{
- uint64_t gpa, gva, alignment, guest_page_size;
+ gpa_t gpa, gva, alignment, guest_page_size;
const struct vm_shape shape = {
.mode = VM_MODE_DEFAULT,
.type = vm_type,
diff --git a/tools/testing/selftests/kvm/riscv/arch_timer.c b/tools/testing/selftests/kvm/riscv/arch_timer.c
index f962fefc48fa..d67c918ee310 100644
--- a/tools/testing/selftests/kvm/riscv/arch_timer.c
+++ b/tools/testing/selftests/kvm/riscv/arch_timer.c
@@ -17,9 +17,9 @@ static int timer_irq = IRQ_S_TIMER;
static void guest_irq_handler(struct pt_regs *regs)
{
- uint64_t xcnt, xcnt_diff_us, cmp;
+ u64 xcnt, xcnt_diff_us, cmp;
unsigned int intid = regs->cause & ~CAUSE_IRQ_FLAG;
- uint32_t cpu = guest_get_vcpuid();
+ u32 cpu = guest_get_vcpuid();
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
timer_irq_disable();
@@ -40,7 +40,7 @@ static void guest_irq_handler(struct pt_regs *regs)
static void guest_run(struct test_vcpu_shared_data *shared_data)
{
- uint32_t irq_iter, config_iter;
+ u32 irq_iter, config_iter;
shared_data->nr_iter = 0;
shared_data->guest_stage = 0;
@@ -66,7 +66,7 @@ static void guest_run(struct test_vcpu_shared_data *shared_data)
static void guest_code(void)
{
- uint32_t cpu = guest_get_vcpuid();
+ u32 cpu = guest_get_vcpuid();
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
timer_irq_disable();
diff --git a/tools/testing/selftests/kvm/riscv/ebreak_test.c b/tools/testing/selftests/kvm/riscv/ebreak_test.c
index 739d17befb5a..3f44b045a22e 100644
--- a/tools/testing/selftests/kvm/riscv/ebreak_test.c
+++ b/tools/testing/selftests/kvm/riscv/ebreak_test.c
@@ -8,10 +8,10 @@
#include "kvm_util.h"
#include "ucall_common.h"
-#define LABEL_ADDRESS(v) ((uint64_t)&(v))
+#define LABEL_ADDRESS(v) ((u64)&(v))
extern unsigned char sw_bp_1, sw_bp_2;
-static uint64_t sw_bp_addr;
+static u64 sw_bp_addr;
static void guest_code(void)
{
@@ -37,7 +37,7 @@ int main(void)
{
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
- uint64_t pc;
+ u64 pc;
struct kvm_guest_debug debug = {
.control = KVM_GUESTDBG_ENABLE,
};
diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c
index 8d6b951434eb..8d6fdb5d38b8 100644
--- a/tools/testing/selftests/kvm/riscv/get-reg-list.c
+++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c
@@ -162,7 +162,7 @@ bool check_reject_set(int err)
}
static int override_vector_reg_size(struct kvm_vcpu *vcpu, struct vcpu_reg_sublist *s,
- uint64_t feature)
+ u64 feature)
{
unsigned long vlenb_reg = 0;
int rc;
@@ -197,7 +197,7 @@ void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
{
unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 };
struct vcpu_reg_sublist *s;
- uint64_t feature;
+ u64 feature;
int rc;
for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++)
diff --git a/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c b/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
index cec1621ace23..e56a3dd6a51e 100644
--- a/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
+++ b/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
@@ -24,7 +24,7 @@ union sbi_pmu_ctr_info ctrinfo_arr[RISCV_MAX_PMU_COUNTERS];
/* Snapshot shared memory data */
#define PMU_SNAPSHOT_GPA_BASE BIT(30)
static void *snapshot_gva;
-static vm_paddr_t snapshot_gpa;
+static gpa_t snapshot_gpa;
static int vcpu_shared_irq_count;
static int counter_in_use;
@@ -86,7 +86,7 @@ unsigned long pmu_csr_read_num(int csr_num)
#undef switchcase_csr_read
}
-static inline void dummy_func_loop(uint64_t iter)
+static inline void dummy_func_loop(u64 iter)
{
int i = 0;
@@ -259,7 +259,7 @@ static inline void verify_sbi_requirement_assert(void)
__GUEST_ASSERT(0, "SBI implementation version doesn't support PMU Snapshot");
}
-static void snapshot_set_shmem(vm_paddr_t gpa, unsigned long flags)
+static void snapshot_set_shmem(gpa_t gpa, unsigned long flags)
{
unsigned long lo = (unsigned long)gpa;
#if __riscv_xlen == 32
@@ -610,7 +610,7 @@ static void test_vm_setup_snapshot_mem(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
virt_map(vm, PMU_SNAPSHOT_GPA_BASE, PMU_SNAPSHOT_GPA_BASE, 1);
snapshot_gva = (void *)(PMU_SNAPSHOT_GPA_BASE);
- snapshot_gpa = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)snapshot_gva);
+ snapshot_gpa = addr_gva2gpa(vcpu->vm, (gva_t)snapshot_gva);
sync_global_to_guest(vcpu->vm, snapshot_gva);
sync_global_to_guest(vcpu->vm, snapshot_gpa);
}
diff --git a/tools/testing/selftests/kvm/s390/debug_test.c b/tools/testing/selftests/kvm/s390/debug_test.c
index ad8095968601..751c61c0f056 100644
--- a/tools/testing/selftests/kvm/s390/debug_test.c
+++ b/tools/testing/selftests/kvm/s390/debug_test.c
@@ -17,7 +17,7 @@ asm("int_handler:\n"
"j .\n");
static struct kvm_vm *test_step_int_1(struct kvm_vcpu **vcpu, void *guest_code,
- size_t new_psw_off, uint64_t *new_psw)
+ size_t new_psw_off, u64 *new_psw)
{
struct kvm_guest_debug debug = {};
struct kvm_regs regs;
@@ -27,7 +27,7 @@ static struct kvm_vm *test_step_int_1(struct kvm_vcpu **vcpu, void *guest_code,
vm = vm_create_with_one_vcpu(vcpu, guest_code);
lowcore = addr_gpa2hva(vm, 0);
new_psw[0] = (*vcpu)->run->psw_mask;
- new_psw[1] = (uint64_t)int_handler;
+ new_psw[1] = (u64)int_handler;
memcpy(lowcore + new_psw_off, new_psw, 16);
vcpu_regs_get(*vcpu, &regs);
regs.gprs[2] = -1;
@@ -42,7 +42,7 @@ static struct kvm_vm *test_step_int_1(struct kvm_vcpu **vcpu, void *guest_code,
static void test_step_int(void *guest_code, size_t new_psw_off)
{
struct kvm_vcpu *vcpu;
- uint64_t new_psw[2];
+ u64 new_psw[2];
struct kvm_vm *vm;
vm = test_step_int_1(&vcpu, guest_code, new_psw_off, new_psw);
@@ -79,7 +79,7 @@ static void test_step_pgm_diag(void)
.u.pgm.code = PGM_SPECIFICATION,
};
struct kvm_vcpu *vcpu;
- uint64_t new_psw[2];
+ u64 new_psw[2];
struct kvm_vm *vm;
vm = test_step_int_1(&vcpu, test_step_pgm_diag_guest_code,
diff --git a/tools/testing/selftests/kvm/s390/irq_routing.c b/tools/testing/selftests/kvm/s390/irq_routing.c
index 7819a0af19a8..f3839284ac08 100644
--- a/tools/testing/selftests/kvm/s390/irq_routing.c
+++ b/tools/testing/selftests/kvm/s390/irq_routing.c
@@ -27,7 +27,7 @@ static void test(void)
struct kvm_irq_routing *routing;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- vm_paddr_t mem;
+ gpa_t mem;
int ret;
struct kvm_irq_routing_entry ue = {
diff --git a/tools/testing/selftests/kvm/s390/memop.c b/tools/testing/selftests/kvm/s390/memop.c
index 4374b4cd2a80..0244848621b3 100644
--- a/tools/testing/selftests/kvm/s390/memop.c
+++ b/tools/testing/selftests/kvm/s390/memop.c
@@ -34,7 +34,7 @@ enum mop_access_mode {
struct mop_desc {
uintptr_t gaddr;
uintptr_t gaddr_v;
- uint64_t set_flags;
+ u64 set_flags;
unsigned int f_check : 1;
unsigned int f_inject : 1;
unsigned int f_key : 1;
@@ -42,19 +42,19 @@ struct mop_desc {
unsigned int _set_flags : 1;
unsigned int _sida_offset : 1;
unsigned int _ar : 1;
- uint32_t size;
+ u32 size;
enum mop_target target;
enum mop_access_mode mode;
void *buf;
- uint32_t sida_offset;
+ u32 sida_offset;
void *old;
- uint8_t old_value[16];
+ u8 old_value[16];
bool *cmpxchg_success;
- uint8_t ar;
- uint8_t key;
+ u8 ar;
+ u8 key;
};
-const uint8_t NO_KEY = 0xff;
+const u8 NO_KEY = 0xff;
static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc *desc)
{
@@ -85,7 +85,7 @@ static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc *desc)
ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE;
if (desc->mode == CMPXCHG) {
ksmo.op = KVM_S390_MEMOP_ABSOLUTE_CMPXCHG;
- ksmo.old_addr = (uint64_t)desc->old;
+ ksmo.old_addr = (u64)desc->old;
memcpy(desc->old_value, desc->old, desc->size);
}
break;
@@ -230,8 +230,8 @@ static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo,
#define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
#define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
-static uint8_t __aligned(PAGE_SIZE) mem1[65536];
-static uint8_t __aligned(PAGE_SIZE) mem2[65536];
+static u8 __aligned(PAGE_SIZE) mem1[65536];
+static u8 __aligned(PAGE_SIZE) mem2[65536];
struct test_default {
struct kvm_vm *kvm_vm;
@@ -296,7 +296,7 @@ static void prepare_mem12(void)
TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!")
static void default_write_read(struct test_info copy_cpu, struct test_info mop_cpu,
- enum mop_target mop_target, uint32_t size, uint8_t key)
+ enum mop_target mop_target, u32 size, u8 key)
{
prepare_mem12();
CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size,
@@ -308,7 +308,7 @@ static void default_write_read(struct test_info copy_cpu, struct test_info mop_c
}
static void default_read(struct test_info copy_cpu, struct test_info mop_cpu,
- enum mop_target mop_target, uint32_t size, uint8_t key)
+ enum mop_target mop_target, u32 size, u8 key)
{
prepare_mem12();
CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, GADDR_V(mem1));
@@ -318,12 +318,12 @@ static void default_read(struct test_info copy_cpu, struct test_info mop_cpu,
ASSERT_MEM_EQ(mem1, mem2, size);
}
-static void default_cmpxchg(struct test_default *test, uint8_t key)
+static void default_cmpxchg(struct test_default *test, u8 key)
{
for (int size = 1; size <= 16; size *= 2) {
for (int offset = 0; offset < 16; offset += size) {
- uint8_t __aligned(16) new[16] = {};
- uint8_t __aligned(16) old[16];
+ u8 __aligned(16) new[16] = {};
+ u8 __aligned(16) old[16];
bool succ;
prepare_mem12();
@@ -400,7 +400,7 @@ static void test_copy_access_register(void)
kvm_vm_free(t.kvm_vm);
}
-static void set_storage_key_range(void *addr, size_t len, uint8_t key)
+static void set_storage_key_range(void *addr, size_t len, u8 key)
{
uintptr_t _addr, abs, i;
int not_mapped = 0;
@@ -483,13 +483,13 @@ static __uint128_t cut_to_size(int size, __uint128_t val)
{
switch (size) {
case 1:
- return (uint8_t)val;
+ return (u8)val;
case 2:
- return (uint16_t)val;
+ return (u16)val;
case 4:
- return (uint32_t)val;
+ return (u32)val;
case 8:
- return (uint64_t)val;
+ return (u64)val;
case 16:
return val;
}
@@ -501,10 +501,10 @@ static bool popcount_eq(__uint128_t a, __uint128_t b)
{
unsigned int count_a, count_b;
- count_a = __builtin_popcountl((uint64_t)(a >> 64)) +
- __builtin_popcountl((uint64_t)a);
- count_b = __builtin_popcountl((uint64_t)(b >> 64)) +
- __builtin_popcountl((uint64_t)b);
+ count_a = __builtin_popcountl((u64)(a >> 64)) +
+ __builtin_popcountl((u64)a);
+ count_b = __builtin_popcountl((u64)(b >> 64)) +
+ __builtin_popcountl((u64)b);
return count_a == count_b;
}
@@ -553,7 +553,7 @@ static __uint128_t permutate_bits(bool guest, int i, int size, __uint128_t old)
if (swap) {
int i, j;
__uint128_t new;
- uint8_t byte0, byte1;
+ u8 byte0, byte1;
rand = rand * 3 + 1;
i = rand % size;
@@ -585,28 +585,28 @@ static bool _cmpxchg(int size, void *target, __uint128_t *old_addr, __uint128_t
switch (size) {
case 4: {
- uint32_t old = *old_addr;
+ u32 old = *old_addr;
asm volatile ("cs %[old],%[new],%[address]"
: [old] "+d" (old),
- [address] "+Q" (*(uint32_t *)(target))
- : [new] "d" ((uint32_t)new)
+ [address] "+Q" (*(u32 *)(target))
+ : [new] "d" ((u32)new)
: "cc"
);
- ret = old == (uint32_t)*old_addr;
+ ret = old == (u32)*old_addr;
*old_addr = old;
return ret;
}
case 8: {
- uint64_t old = *old_addr;
+ u64 old = *old_addr;
asm volatile ("csg %[old],%[new],%[address]"
: [old] "+d" (old),
- [address] "+Q" (*(uint64_t *)(target))
- : [new] "d" ((uint64_t)new)
+ [address] "+Q" (*(u64 *)(target))
+ : [new] "d" ((u64)new)
: "cc"
);
- ret = old == (uint64_t)*old_addr;
+ ret = old == (u64)*old_addr;
*old_addr = old;
return ret;
}
@@ -811,10 +811,10 @@ static void test_errors_cmpxchg_key(void)
static void test_termination(void)
{
struct test_default t = test_default_init(guest_error_key);
- uint64_t prefix;
- uint64_t teid;
- uint64_t teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61);
- uint64_t psw[2];
+ u64 prefix;
+ u64 teid;
+ u64 teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61);
+ u64 psw[2];
HOST_SYNC(t.vcpu, STAGE_INITED);
HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
@@ -855,7 +855,7 @@ static void test_errors_key_storage_prot_override(void)
kvm_vm_free(t.kvm_vm);
}
-const uint64_t last_page_addr = -PAGE_SIZE;
+const u64 last_page_addr = -PAGE_SIZE;
static void guest_copy_key_fetch_prot_override(void)
{
@@ -878,10 +878,10 @@ static void guest_copy_key_fetch_prot_override(void)
static void test_copy_key_fetch_prot_override(void)
{
struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
- vm_vaddr_t guest_0_page, guest_last_page;
+ gva_t guest_0_page, guest_last_page;
- guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
- guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
+ guest_0_page = vm_alloc(t.kvm_vm, PAGE_SIZE, 0);
+ guest_last_page = vm_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
if (guest_0_page != 0 || guest_last_page != last_page_addr) {
print_skip("did not allocate guest pages at required positions");
goto out;
@@ -917,10 +917,10 @@ out:
static void test_errors_key_fetch_prot_override_not_enabled(void)
{
struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
- vm_vaddr_t guest_0_page, guest_last_page;
+ gva_t guest_0_page, guest_last_page;
- guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
- guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
+ guest_0_page = vm_alloc(t.kvm_vm, PAGE_SIZE, 0);
+ guest_last_page = vm_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
if (guest_0_page != 0 || guest_last_page != last_page_addr) {
print_skip("did not allocate guest pages at required positions");
goto out;
@@ -938,10 +938,10 @@ out:
static void test_errors_key_fetch_prot_override_enabled(void)
{
struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
- vm_vaddr_t guest_0_page, guest_last_page;
+ gva_t guest_0_page, guest_last_page;
- guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
- guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
+ guest_0_page = vm_alloc(t.kvm_vm, PAGE_SIZE, 0);
+ guest_last_page = vm_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
if (guest_0_page != 0 || guest_last_page != last_page_addr) {
print_skip("did not allocate guest pages at required positions");
goto out;
diff --git a/tools/testing/selftests/kvm/s390/resets.c b/tools/testing/selftests/kvm/s390/resets.c
index b58f75b381e5..e3c7a2f148f9 100644
--- a/tools/testing/selftests/kvm/s390/resets.c
+++ b/tools/testing/selftests/kvm/s390/resets.c
@@ -20,7 +20,7 @@
struct kvm_s390_irq buf[ARBITRARY_NON_ZERO_VCPU_ID + LOCAL_IRQS];
-static uint8_t regs_null[512];
+static u8 regs_null[512];
static void guest_code_initial(void)
{
@@ -57,9 +57,9 @@ static void guest_code_initial(void)
);
}
-static void test_one_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t value)
+static void test_one_reg(struct kvm_vcpu *vcpu, u64 id, u64 value)
{
- uint64_t eval_reg;
+ u64 eval_reg;
eval_reg = vcpu_get_reg(vcpu, id);
TEST_ASSERT(eval_reg == value, "value == 0x%lx", value);
diff --git a/tools/testing/selftests/kvm/s390/shared_zeropage_test.c b/tools/testing/selftests/kvm/s390/shared_zeropage_test.c
index bba0d9a6dcc8..a9e5a01200b8 100644
--- a/tools/testing/selftests/kvm/s390/shared_zeropage_test.c
+++ b/tools/testing/selftests/kvm/s390/shared_zeropage_test.c
@@ -13,7 +13,7 @@
#include "kselftest.h"
#include "ucall_common.h"
-static void set_storage_key(void *addr, uint8_t skey)
+static void set_storage_key(void *addr, u8 skey)
{
asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
}
diff --git a/tools/testing/selftests/kvm/s390/tprot.c b/tools/testing/selftests/kvm/s390/tprot.c
index 12d5e1cb62e3..8054d2b178f0 100644
--- a/tools/testing/selftests/kvm/s390/tprot.c
+++ b/tools/testing/selftests/kvm/s390/tprot.c
@@ -14,12 +14,12 @@
#define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
#define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
-static __aligned(PAGE_SIZE) uint8_t pages[2][PAGE_SIZE];
-static uint8_t *const page_store_prot = pages[0];
-static uint8_t *const page_fetch_prot = pages[1];
+static __aligned(PAGE_SIZE) u8 pages[2][PAGE_SIZE];
+static u8 *const page_store_prot = pages[0];
+static u8 *const page_fetch_prot = pages[1];
/* Nonzero return value indicates that address not mapped */
-static int set_storage_key(void *addr, uint8_t key)
+static int set_storage_key(void *addr, u8 key)
{
int not_mapped = 0;
@@ -44,9 +44,9 @@ enum permission {
TRANSL_UNAVAIL = 3,
};
-static enum permission test_protection(void *addr, uint8_t key)
+static enum permission test_protection(void *addr, u8 key)
{
- uint64_t mask;
+ u64 mask;
asm volatile (
"tprot %[addr], 0(%[key])\n"
@@ -72,7 +72,7 @@ enum stage {
struct test {
enum stage stage;
void *addr;
- uint8_t key;
+ u8 key;
enum permission expected;
} tests[] = {
/*
@@ -146,7 +146,7 @@ static enum stage perform_next_stage(int *i, bool mapped_0)
/*
* Some fetch protection override tests require that page 0
* be mapped, however, when the hosts tries to map that page via
- * vm_vaddr_alloc, it may happen that some other page gets mapped
+ * vm_alloc, it may happen that some other page gets mapped
* instead.
* In order to skip these tests we detect this inside the guest
*/
@@ -207,7 +207,7 @@ int main(int argc, char *argv[])
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
- vm_vaddr_t guest_0_page;
+ gva_t guest_0_page;
ksft_print_header();
ksft_set_plan(STAGE_END);
@@ -216,10 +216,10 @@ int main(int argc, char *argv[])
run = vcpu->run;
HOST_SYNC(vcpu, STAGE_INIT_SIMPLE);
- mprotect(addr_gva2hva(vm, (vm_vaddr_t)pages), PAGE_SIZE * 2, PROT_READ);
+ mprotect(addr_gva2hva(vm, (gva_t)pages), PAGE_SIZE * 2, PROT_READ);
HOST_SYNC(vcpu, TEST_SIMPLE);
- guest_0_page = vm_vaddr_alloc(vm, PAGE_SIZE, 0);
+ guest_0_page = vm_alloc(vm, PAGE_SIZE, 0);
if (guest_0_page != 0) {
/* Use NO_TAP so we don't get a PASS print */
HOST_SYNC_NO_TAP(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE);
@@ -229,7 +229,7 @@ int main(int argc, char *argv[])
HOST_SYNC(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE);
}
if (guest_0_page == 0)
- mprotect(addr_gva2hva(vm, (vm_vaddr_t)0), PAGE_SIZE, PROT_READ);
+ mprotect(addr_gva2hva(vm, (gva_t)0), PAGE_SIZE, PROT_READ);
run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
run->kvm_dirty_regs = KVM_SYNC_CRS;
HOST_SYNC(vcpu, TEST_FETCH_PROT_OVERRIDE);
diff --git a/tools/testing/selftests/kvm/s390/ucontrol_test.c b/tools/testing/selftests/kvm/s390/ucontrol_test.c
index 50bc1c38225a..b8c6f37b53e0 100644
--- a/tools/testing/selftests/kvm/s390/ucontrol_test.c
+++ b/tools/testing/selftests/kvm/s390/ucontrol_test.c
@@ -111,7 +111,7 @@ FIXTURE(uc_kvm)
uintptr_t base_hva;
uintptr_t code_hva;
int kvm_run_size;
- vm_paddr_t pgd;
+ gpa_t pgd;
void *vm_mem;
int vcpu_fd;
int kvm_fd;
@@ -269,7 +269,7 @@ TEST(uc_cap_hpage)
}
/* calculate host virtual addr from guest physical addr */
-static void *gpa2hva(FIXTURE_DATA(uc_kvm) *self, u64 gpa)
+static void *gpa2hva(FIXTURE_DATA(uc_kvm) *self, gpa_t gpa)
{
return (void *)(self->base_hva - self->base_gpa + gpa);
}
@@ -571,7 +571,7 @@ TEST_F(uc_kvm, uc_skey)
{
struct kvm_s390_sie_block *sie_block = self->sie_block;
struct kvm_sync_regs *sync_regs = &self->run->s.regs;
- u64 test_vaddr = VM_MEM_SIZE - (SZ_1M / 2);
+ u64 test_gva = VM_MEM_SIZE - (SZ_1M / 2);
struct kvm_run *run = self->run;
const u8 skeyvalue = 0x34;
@@ -583,7 +583,7 @@ TEST_F(uc_kvm, uc_skey)
/* set register content for test_skey_asm to access not mapped memory */
sync_regs->gprs[1] = skeyvalue;
sync_regs->gprs[5] = self->base_gpa;
- sync_regs->gprs[6] = test_vaddr;
+ sync_regs->gprs[6] = test_gva;
run->kvm_dirty_regs |= KVM_SYNC_GPRS;
/* DAT disabled + 64 bit mode */
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c
index a398dc3a8c4b..9b919a231c93 100644
--- a/tools/testing/selftests/kvm/set_memory_region_test.c
+++ b/tools/testing/selftests/kvm/set_memory_region_test.c
@@ -30,19 +30,19 @@
#define MEM_REGION_GPA 0xc0000000
#define MEM_REGION_SLOT 10
-static const uint64_t MMIO_VAL = 0xbeefull;
+static const u64 MMIO_VAL = 0xbeefull;
-extern const uint64_t final_rip_start;
-extern const uint64_t final_rip_end;
+extern const u64 final_rip_start;
+extern const u64 final_rip_end;
static sem_t vcpu_ready;
-static inline uint64_t guest_spin_on_val(uint64_t spin_val)
+static inline u64 guest_spin_on_val(u64 spin_val)
{
- uint64_t val;
+ u64 val;
do {
- val = READ_ONCE(*((uint64_t *)MEM_REGION_GPA));
+ val = READ_ONCE(*((u64 *)MEM_REGION_GPA));
} while (val == spin_val);
GUEST_SYNC(0);
@@ -54,7 +54,7 @@ static void *vcpu_worker(void *data)
struct kvm_vcpu *vcpu = data;
struct kvm_run *run = vcpu->run;
struct ucall uc;
- uint64_t cmd;
+ u64 cmd;
/*
* Loop until the guest is done. Re-enter the guest on all MMIO exits,
@@ -111,8 +111,8 @@ static struct kvm_vm *spawn_vm(struct kvm_vcpu **vcpu, pthread_t *vcpu_thread,
void *guest_code)
{
struct kvm_vm *vm;
- uint64_t *hva;
- uint64_t gpa;
+ u64 *hva;
+ gpa_t gpa;
vm = vm_create_with_one_vcpu(vcpu, guest_code);
@@ -144,7 +144,7 @@ static struct kvm_vm *spawn_vm(struct kvm_vcpu **vcpu, pthread_t *vcpu_thread,
static void guest_code_move_memory_region(void)
{
- uint64_t val;
+ u64 val;
GUEST_SYNC(0);
@@ -180,7 +180,7 @@ static void test_move_memory_region(bool disable_slot_zap_quirk)
pthread_t vcpu_thread;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- uint64_t *hva;
+ u64 *hva;
vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_move_memory_region);
@@ -224,7 +224,7 @@ static void test_move_memory_region(bool disable_slot_zap_quirk)
static void guest_code_delete_memory_region(void)
{
struct desc_ptr idt;
- uint64_t val;
+ u64 val;
/*
* Clobber the IDT so that a #PF due to the memory region being deleted
@@ -345,8 +345,8 @@ static void test_zero_memory_regions(void)
static void test_invalid_memory_region_flags(void)
{
- uint32_t supported_flags = KVM_MEM_LOG_DIRTY_PAGES;
- const uint32_t v2_only_flags = KVM_MEM_GUEST_MEMFD;
+ u32 supported_flags = KVM_MEM_LOG_DIRTY_PAGES;
+ const u32 v2_only_flags = KVM_MEM_GUEST_MEMFD;
struct kvm_vm *vm;
int r, i;
@@ -410,8 +410,8 @@ static void test_add_max_memory_regions(void)
{
int ret;
struct kvm_vm *vm;
- uint32_t max_mem_slots;
- uint32_t slot;
+ u32 max_mem_slots;
+ u32 slot;
void *mem, *mem_aligned, *mem_extra;
size_t alignment = 1;
@@ -434,16 +434,16 @@ static void test_add_max_memory_regions(void)
for (slot = 0; slot < max_mem_slots; slot++)
vm_set_user_memory_region(vm, slot, 0,
- ((uint64_t)slot * MEM_REGION_SIZE),
+ ((u64)slot * MEM_REGION_SIZE),
MEM_REGION_SIZE,
- mem_aligned + (uint64_t)slot * MEM_REGION_SIZE);
+ mem_aligned + (u64)slot * MEM_REGION_SIZE);
/* Check it cannot be added memory slots beyond the limit */
mem_extra = kvm_mmap(MEM_REGION_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1);
ret = __vm_set_user_memory_region(vm, max_mem_slots, 0,
- (uint64_t)max_mem_slots * MEM_REGION_SIZE,
+ (u64)max_mem_slots * MEM_REGION_SIZE,
MEM_REGION_SIZE, mem_extra);
TEST_ASSERT(ret == -1 && errno == EINVAL,
"Adding one more memory slot should fail with EINVAL");
@@ -556,7 +556,7 @@ static void guest_code_mmio_during_vectoring(void)
set_idt(&idt_desc);
/* Generate a #GP by dereferencing a non-canonical address */
- *((uint8_t *)NONCANONICAL) = 0x1;
+ *((u8 *)NONCANONICAL) = 0x1;
GUEST_ASSERT(0);
}
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c
index efe56a10d13e..76fcdd1fd3cb 100644
--- a/tools/testing/selftests/kvm/steal_time.c
+++ b/tools/testing/selftests/kvm/steal_time.c
@@ -25,7 +25,7 @@
#define ST_GPA_BASE (1 << 30)
static void *st_gva[NR_VCPUS];
-static uint64_t guest_stolen_time[NR_VCPUS];
+static u64 guest_stolen_time[NR_VCPUS];
#if defined(__x86_64__)
@@ -42,9 +42,9 @@ static void check_status(struct kvm_steal_time *st)
static void guest_code(int cpu)
{
struct kvm_steal_time *st = st_gva[cpu];
- uint32_t version;
+ u32 version;
- GUEST_ASSERT_EQ(rdmsr(MSR_KVM_STEAL_TIME), ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED));
+ GUEST_ASSERT_EQ(rdmsr(MSR_KVM_STEAL_TIME), ((u64)st_gva[cpu] | KVM_MSR_ENABLED));
memset(st, 0, sizeof(*st));
GUEST_SYNC(0);
@@ -67,7 +67,7 @@ static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
return kvm_cpu_has(X86_FEATURE_KVM_STEAL_TIME);
}
-static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
+static void steal_time_init(struct kvm_vcpu *vcpu, u32 i)
{
/* ST_GPA_BASE is identity mapped */
st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
@@ -76,7 +76,7 @@ static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED);
}
-static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
+static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx)
{
struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
@@ -118,12 +118,12 @@ static void check_steal_time_uapi(void)
#define PV_TIME_ST 0xc5000021
struct st_time {
- uint32_t rev;
- uint32_t attr;
- uint64_t st_time;
+ u32 rev;
+ u32 attr;
+ u64 st_time;
};
-static int64_t smccc(uint32_t func, uint64_t arg)
+static s64 smccc(u32 func, u64 arg)
{
struct arm_smccc_res res;
@@ -140,7 +140,7 @@ static void check_status(struct st_time *st)
static void guest_code(int cpu)
{
struct st_time *st;
- int64_t status;
+ s64 status;
status = smccc(SMCCC_ARCH_FEATURES, PV_TIME_FEATURES);
GUEST_ASSERT_EQ(status, 0);
@@ -175,15 +175,15 @@ static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
return !__vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
}
-static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
+static void steal_time_init(struct kvm_vcpu *vcpu, u32 i)
{
struct kvm_vm *vm = vcpu->vm;
- uint64_t st_ipa;
+ u64 st_ipa;
struct kvm_device_attr dev = {
.group = KVM_ARM_VCPU_PVTIME_CTRL,
.attr = KVM_ARM_VCPU_PVTIME_IPA,
- .addr = (uint64_t)&st_ipa,
+ .addr = (u64)&st_ipa,
};
/* ST_GPA_BASE is identity mapped */
@@ -194,7 +194,7 @@ static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
}
-static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
+static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx)
{
struct st_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
@@ -208,7 +208,7 @@ static void check_steal_time_uapi(void)
{
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
- uint64_t st_ipa;
+ u64 st_ipa;
int ret;
vm = vm_create_with_one_vcpu(&vcpu, NULL);
@@ -216,10 +216,12 @@ static void check_steal_time_uapi(void)
struct kvm_device_attr dev = {
.group = KVM_ARM_VCPU_PVTIME_CTRL,
.attr = KVM_ARM_VCPU_PVTIME_IPA,
- .addr = (uint64_t)&st_ipa,
+ .addr = (u64)&st_ipa,
};
vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, 1, 0);
+ virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, 1);
st_ipa = (ulong)ST_GPA_BASE | 1;
ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
@@ -239,17 +241,17 @@ static void check_steal_time_uapi(void)
/* SBI STA shmem must have 64-byte alignment */
#define STEAL_TIME_SIZE ((sizeof(struct sta_struct) + 63) & ~63)
-static vm_paddr_t st_gpa[NR_VCPUS];
+static gpa_t st_gpa[NR_VCPUS];
struct sta_struct {
- uint32_t sequence;
- uint32_t flags;
- uint64_t steal;
- uint8_t preempted;
- uint8_t pad[47];
+ u32 sequence;
+ u32 flags;
+ u64 steal;
+ u8 preempted;
+ u8 pad[47];
} __packed;
-static void sta_set_shmem(vm_paddr_t gpa, unsigned long flags)
+static void sta_set_shmem(gpa_t gpa, unsigned long flags)
{
unsigned long lo = (unsigned long)gpa;
#if __riscv_xlen == 32
@@ -272,7 +274,7 @@ static void check_status(struct sta_struct *st)
static void guest_code(int cpu)
{
struct sta_struct *st = st_gva[cpu];
- uint32_t sequence;
+ u32 sequence;
long out_val = 0;
bool probe;
@@ -297,7 +299,7 @@ static void guest_code(int cpu)
static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
{
- uint64_t id = RISCV_SBI_EXT_REG(KVM_RISCV_SBI_EXT_STA);
+ u64 id = RISCV_SBI_EXT_REG(KVM_RISCV_SBI_EXT_STA);
unsigned long enabled = vcpu_get_reg(vcpu, id);
TEST_ASSERT(enabled == 0 || enabled == 1, "Expected boolean result");
@@ -305,16 +307,16 @@ static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
return enabled;
}
-static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
+static void steal_time_init(struct kvm_vcpu *vcpu, u32 i)
{
/* ST_GPA_BASE is identity mapped */
st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
- st_gpa[i] = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)st_gva[i]);
+ st_gpa[i] = addr_gva2gpa(vcpu->vm, (gva_t)st_gva[i]);
sync_global_to_guest(vcpu->vm, st_gva[i]);
sync_global_to_guest(vcpu->vm, st_gpa[i]);
}
-static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
+static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx)
{
struct sta_struct *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
int i;
@@ -335,7 +337,7 @@ static void check_steal_time_uapi(void)
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
struct kvm_one_reg reg;
- uint64_t shmem;
+ u64 shmem;
int ret;
vm = vm_create_with_one_vcpu(&vcpu, NULL);
@@ -345,7 +347,7 @@ static void check_steal_time_uapi(void)
KVM_REG_RISCV_SBI_STATE |
KVM_REG_RISCV_SBI_STA |
KVM_REG_RISCV_SBI_STA_REG(shmem_lo);
- reg.addr = (uint64_t)&shmem;
+ reg.addr = (u64)&shmem;
shmem = ST_GPA_BASE + 1;
ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
@@ -388,7 +390,7 @@ static void check_status(struct kvm_steal_time *st)
static void guest_code(int cpu)
{
- uint32_t version;
+ u32 version;
struct kvm_steal_time *st = st_gva[cpu];
memset(st, 0, sizeof(*st));
@@ -410,11 +412,11 @@ static void guest_code(int cpu)
static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
{
int err;
- uint64_t val;
+ u64 val;
struct kvm_device_attr attr = {
.group = KVM_LOONGARCH_VCPU_CPUCFG,
.attr = CPUCFG_KVM_FEATURE,
- .addr = (uint64_t)&val,
+ .addr = (u64)&val,
};
err = __vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &attr);
@@ -428,15 +430,15 @@ static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
return val & BIT(KVM_FEATURE_STEAL_TIME);
}
-static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
+static void steal_time_init(struct kvm_vcpu *vcpu, u32 i)
{
int err;
- uint64_t st_gpa;
+ u64 st_gpa;
struct kvm_vm *vm = vcpu->vm;
struct kvm_device_attr attr = {
.group = KVM_LOONGARCH_VCPU_PVTIME_CTRL,
.attr = KVM_LOONGARCH_VCPU_PVTIME_GPA,
- .addr = (uint64_t)&st_gpa,
+ .addr = (u64)&st_gpa,
};
/* ST_GPA_BASE is identity mapped */
@@ -451,7 +453,7 @@ static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
TEST_ASSERT(err == 0, "Fail to set PV stealtime GPA");
}
-static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
+static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx)
{
struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
@@ -461,6 +463,11 @@ static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
ksft_print_msg(" version: %d\n", st->version);
ksft_print_msg(" preempted: %d\n", st->preempted);
}
+
+static void check_steal_time_uapi(void)
+{
+
+}
#endif
static void *do_steal_time(void *arg)
diff --git a/tools/testing/selftests/kvm/system_counter_offset_test.c b/tools/testing/selftests/kvm/system_counter_offset_test.c
index 513d421a9bff..dc5e30b7b77f 100644
--- a/tools/testing/selftests/kvm/system_counter_offset_test.c
+++ b/tools/testing/selftests/kvm/system_counter_offset_test.c
@@ -17,7 +17,7 @@
#ifdef __x86_64__
struct test_case {
- uint64_t tsc_offset;
+ u64 tsc_offset;
};
static struct test_case test_cases[] = {
@@ -39,12 +39,12 @@ static void setup_system_counter(struct kvm_vcpu *vcpu, struct test_case *test)
&test->tsc_offset);
}
-static uint64_t guest_read_system_counter(struct test_case *test)
+static u64 guest_read_system_counter(struct test_case *test)
{
return rdtsc();
}
-static uint64_t host_read_guest_system_counter(struct test_case *test)
+static u64 host_read_guest_system_counter(struct test_case *test)
{
return rdtsc() + test->tsc_offset;
}
@@ -69,9 +69,9 @@ static void guest_main(void)
}
}
-static void handle_sync(struct ucall *uc, uint64_t start, uint64_t end)
+static void handle_sync(struct ucall *uc, u64 start, u64 end)
{
- uint64_t obs = uc->args[2];
+ u64 obs = uc->args[2];
TEST_ASSERT(start <= obs && obs <= end,
"unexpected system counter value: %"PRIu64" expected range: [%"PRIu64", %"PRIu64"]",
@@ -88,7 +88,7 @@ static void handle_abort(struct ucall *uc)
static void enter_guest(struct kvm_vcpu *vcpu)
{
- uint64_t start, end;
+ u64 start, end;
struct ucall uc;
int i;
diff --git a/tools/testing/selftests/kvm/x86/amx_test.c b/tools/testing/selftests/kvm/x86/amx_test.c
index 37b166260ee3..4e63da2b1889 100644
--- a/tools/testing/selftests/kvm/x86/amx_test.c
+++ b/tools/testing/selftests/kvm/x86/amx_test.c
@@ -80,10 +80,10 @@ static inline void __tilerelease(void)
asm volatile(".byte 0xc4, 0xe2, 0x78, 0x49, 0xc0" ::);
}
-static inline void __xsavec(struct xstate *xstate, uint64_t rfbm)
+static inline void __xsavec(struct xstate *xstate, u64 rfbm)
{
- uint32_t rfbm_lo = rfbm;
- uint32_t rfbm_hi = rfbm >> 32;
+ u32 rfbm_lo = rfbm;
+ u32 rfbm_hi = rfbm >> 32;
asm volatile("xsavec (%%rdi)"
: : "D" (xstate), "a" (rfbm_lo), "d" (rfbm_hi)
@@ -236,7 +236,7 @@ int main(int argc, char *argv[])
struct kvm_x86_state *state;
struct kvm_x86_state *tile_state = NULL;
int xsave_restore_size;
- vm_vaddr_t amx_cfg, tiledata, xstate;
+ gva_t amx_cfg, tiledata, xstate;
struct ucall uc;
int ret;
@@ -263,15 +263,15 @@ int main(int argc, char *argv[])
vcpu_regs_get(vcpu, &regs1);
/* amx cfg for guest_code */
- amx_cfg = vm_vaddr_alloc_page(vm);
+ amx_cfg = vm_alloc_page(vm);
memset(addr_gva2hva(vm, amx_cfg), 0x0, getpagesize());
/* amx tiledata for guest_code */
- tiledata = vm_vaddr_alloc_pages(vm, 2);
+ tiledata = vm_alloc_pages(vm, 2);
memset(addr_gva2hva(vm, tiledata), rand() | 1, 2 * getpagesize());
/* XSAVE state for guest_code */
- xstate = vm_vaddr_alloc_pages(vm, DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE));
+ xstate = vm_alloc_pages(vm, DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE));
memset(addr_gva2hva(vm, xstate), 0, PAGE_SIZE * DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE));
vcpu_args_set(vcpu, 3, amx_cfg, tiledata, xstate);
diff --git a/tools/testing/selftests/kvm/x86/aperfmperf_test.c b/tools/testing/selftests/kvm/x86/aperfmperf_test.c
index 8b15a13df939..c91660103137 100644
--- a/tools/testing/selftests/kvm/x86/aperfmperf_test.c
+++ b/tools/testing/selftests/kvm/x86/aperfmperf_test.c
@@ -35,9 +35,9 @@ static int open_dev_msr(int cpu)
return open_path_or_exit(path, O_RDONLY);
}
-static uint64_t read_dev_msr(int msr_fd, uint32_t msr)
+static u64 read_dev_msr(int msr_fd, u32 msr)
{
- uint64_t data;
+ u64 data;
ssize_t rc;
rc = pread(msr_fd, &data, sizeof(data), msr);
@@ -107,8 +107,8 @@ static void guest_code(void *nested_test_data)
static void guest_no_aperfmperf(void)
{
- uint64_t msr_val;
- uint8_t vector;
+ u64 msr_val;
+ u8 vector;
vector = rdmsr_safe(MSR_IA32_APERF, &msr_val);
GUEST_ASSERT(vector == GP_VECTOR);
@@ -122,8 +122,8 @@ static void guest_no_aperfmperf(void)
int main(int argc, char *argv[])
{
const bool has_nested = kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX);
- uint64_t host_aperf_before, host_mperf_before;
- vm_vaddr_t nested_test_data_gva;
+ u64 host_aperf_before, host_mperf_before;
+ gva_t nested_test_data_gva;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
int msr_fd, cpu, i;
@@ -166,8 +166,8 @@ int main(int argc, char *argv[])
host_mperf_before = read_dev_msr(msr_fd, MSR_IA32_MPERF);
for (i = 0; i <= NUM_ITERATIONS * (1 + has_nested); i++) {
- uint64_t host_aperf_after, host_mperf_after;
- uint64_t guest_aperf, guest_mperf;
+ u64 host_aperf_after, host_mperf_after;
+ u64 guest_aperf, guest_mperf;
struct ucall uc;
vcpu_run(vcpu);
diff --git a/tools/testing/selftests/kvm/x86/apic_bus_clock_test.c b/tools/testing/selftests/kvm/x86/apic_bus_clock_test.c
index f8916bb34405..404f0028e110 100644
--- a/tools/testing/selftests/kvm/x86/apic_bus_clock_test.c
+++ b/tools/testing/selftests/kvm/x86/apic_bus_clock_test.c
@@ -19,8 +19,8 @@
* timer frequency.
*/
static const struct {
- const uint32_t tdcr;
- const uint32_t divide_count;
+ const u32 tdcr;
+ const u32 divide_count;
} tdcrs[] = {
{0x0, 2},
{0x1, 4},
@@ -42,12 +42,12 @@ static void apic_enable(void)
xapic_enable();
}
-static uint32_t apic_read_reg(unsigned int reg)
+static u32 apic_read_reg(unsigned int reg)
{
return is_x2apic ? x2apic_read_reg(reg) : xapic_read_reg(reg);
}
-static void apic_write_reg(unsigned int reg, uint32_t val)
+static void apic_write_reg(unsigned int reg, u32 val)
{
if (is_x2apic)
x2apic_write_reg(reg, val);
@@ -55,12 +55,12 @@ static void apic_write_reg(unsigned int reg, uint32_t val)
xapic_write_reg(reg, val);
}
-static void apic_guest_code(uint64_t apic_hz, uint64_t delay_ms)
+static void apic_guest_code(u64 apic_hz, u64 delay_ms)
{
- uint64_t tsc_hz = guest_tsc_khz * 1000;
- const uint32_t tmict = ~0u;
- uint64_t tsc0, tsc1, freq;
- uint32_t tmcct;
+ u64 tsc_hz = guest_tsc_khz * 1000;
+ const u32 tmict = ~0u;
+ u64 tsc0, tsc1, freq;
+ u32 tmcct;
int i;
apic_enable();
@@ -121,7 +121,7 @@ static void test_apic_bus_clock(struct kvm_vcpu *vcpu)
}
}
-static void run_apic_bus_clock_test(uint64_t apic_hz, uint64_t delay_ms,
+static void run_apic_bus_clock_test(u64 apic_hz, u64 delay_ms,
bool x2apic)
{
struct kvm_vcpu *vcpu;
@@ -168,8 +168,8 @@ int main(int argc, char *argv[])
* Arbitrarilty default to 25MHz for the APIC bus frequency, which is
* different enough from the default 1GHz to be interesting.
*/
- uint64_t apic_hz = 25 * 1000 * 1000;
- uint64_t delay_ms = 100;
+ u64 apic_hz = 25 * 1000 * 1000;
+ u64 delay_ms = 100;
int opt;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_APIC_BUS_CYCLES_NS));
diff --git a/tools/testing/selftests/kvm/x86/cpuid_test.c b/tools/testing/selftests/kvm/x86/cpuid_test.c
index f9ed14996977..ef0ddd240887 100644
--- a/tools/testing/selftests/kvm/x86/cpuid_test.c
+++ b/tools/testing/selftests/kvm/x86/cpuid_test.c
@@ -140,10 +140,10 @@ static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
}
}
-struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva, struct kvm_cpuid2 *cpuid)
+struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, gva_t *p_gva, struct kvm_cpuid2 *cpuid)
{
int size = sizeof(*cpuid) + cpuid->nent * sizeof(cpuid->entries[0]);
- vm_vaddr_t gva = vm_vaddr_alloc(vm, size, KVM_UTIL_MIN_VADDR);
+ gva_t gva = vm_alloc(vm, size, KVM_UTIL_MIN_VADDR);
struct kvm_cpuid2 *guest_cpuids = addr_gva2hva(vm, gva);
memcpy(guest_cpuids, cpuid, size);
@@ -217,7 +217,7 @@ static void test_get_cpuid2(struct kvm_vcpu *vcpu)
int main(void)
{
struct kvm_vcpu *vcpu;
- vm_vaddr_t cpuid_gva;
+ gva_t cpuid_gva;
struct kvm_vm *vm;
int stage;
diff --git a/tools/testing/selftests/kvm/x86/debug_regs.c b/tools/testing/selftests/kvm/x86/debug_regs.c
index 2d814c1d1dc4..0dfaf03cd0a0 100644
--- a/tools/testing/selftests/kvm/x86/debug_regs.c
+++ b/tools/testing/selftests/kvm/x86/debug_regs.c
@@ -16,7 +16,7 @@
#define IRQ_VECTOR 0xAA
/* For testing data access debug BP */
-uint32_t guest_value;
+u32 guest_value;
extern unsigned char sw_bp, hw_bp, write_data, ss_start, bd_start;
@@ -86,7 +86,7 @@ int main(void)
struct kvm_run *run;
struct kvm_vm *vm;
struct ucall uc;
- uint64_t cmd;
+ u64 cmd;
int i;
/* Instruction lengths starting at ss_start */
int ss_size[6] = {
diff --git a/tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c b/tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c
index b0d2b04a7ff2..388ba4101f97 100644
--- a/tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c
+++ b/tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c
@@ -23,7 +23,7 @@
#define SLOTS 2
#define ITERATIONS 2
-static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
+static u64 guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static enum vm_mem_backing_src_type backing_src = VM_MEM_SRC_ANONYMOUS_HUGETLB;
@@ -33,10 +33,10 @@ static int iteration;
static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
struct kvm_page_stats {
- uint64_t pages_4k;
- uint64_t pages_2m;
- uint64_t pages_1g;
- uint64_t hugepages;
+ u64 pages_4k;
+ u64 pages_2m;
+ u64 pages_1g;
+ u64 hugepages;
};
static void get_page_stats(struct kvm_vm *vm, struct kvm_page_stats *stats, const char *stage)
@@ -89,9 +89,9 @@ static void run_test(enum vm_guest_mode mode, void *unused)
{
struct kvm_vm *vm;
unsigned long **bitmaps;
- uint64_t guest_num_pages;
- uint64_t host_num_pages;
- uint64_t pages_per_slot;
+ u64 guest_num_pages;
+ u64 host_num_pages;
+ u64 pages_per_slot;
int i;
struct kvm_page_stats stats_populated;
struct kvm_page_stats stats_dirty_logging_enabled;
diff --git a/tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c b/tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c
index af7c90103396..5b3aef109cfc 100644
--- a/tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c
+++ b/tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c
@@ -29,13 +29,13 @@
* SMI handler: runs in real-address mode.
* Reports SMRAM_STAGE via port IO, then does RSM.
*/
-static uint8_t smi_handler[] = {
+static u8 smi_handler[] = {
0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */
0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */
0x0f, 0xaa, /* rsm */
};
-static inline void sync_with_host(uint64_t phase)
+static inline void sync_with_host(u64 phase)
{
asm volatile("in $" XSTR(SYNC_PORT) ", %%al \n"
: "+a" (phase));
@@ -73,7 +73,7 @@ static void guest_code(struct vmx_pages *vmx_pages,
int main(int argc, char *argv[])
{
- vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0;
+ gva_t vmx_pages_gva = 0, hv_pages_gva = 0;
struct hyperv_test_pages *hv;
struct hv_enlightened_vmcs *evmcs;
struct kvm_vcpu *vcpu;
diff --git a/tools/testing/selftests/kvm/x86/fastops_test.c b/tools/testing/selftests/kvm/x86/fastops_test.c
index 8926cfe0e209..c0d30ccd8767 100644
--- a/tools/testing/selftests/kvm/x86/fastops_test.c
+++ b/tools/testing/selftests/kvm/x86/fastops_test.c
@@ -15,7 +15,7 @@
"pop %[flags]\n\t"
#define flags_constraint(flags_val) [flags]"=r"(flags_val)
-#define bt_constraint(__bt_val) [bt_val]"rm"((uint32_t)__bt_val)
+#define bt_constraint(__bt_val) [bt_val]"rm"((u32)__bt_val)
#define guest_execute_fastop_1(FEP, insn, __val, __flags) \
({ \
@@ -28,17 +28,17 @@
#define guest_test_fastop_1(insn, type_t, __val) \
({ \
type_t val = __val, ex_val = __val, input = __val; \
- uint64_t flags, ex_flags; \
+ u64 flags, ex_flags; \
\
guest_execute_fastop_1("", insn, ex_val, ex_flags); \
guest_execute_fastop_1(KVM_FEP, insn, val, flags); \
\
__GUEST_ASSERT(val == ex_val, \
"Wanted 0x%lx for '%s 0x%lx', got 0x%lx", \
- (uint64_t)ex_val, insn, (uint64_t)input, (uint64_t)val); \
+ (u64)ex_val, insn, (u64)input, (u64)val); \
__GUEST_ASSERT(flags == ex_flags, \
"Wanted flags 0x%lx for '%s 0x%lx', got 0x%lx", \
- ex_flags, insn, (uint64_t)input, flags); \
+ ex_flags, insn, (u64)input, flags); \
})
#define guest_execute_fastop_2(FEP, insn, __input, __output, __flags) \
@@ -52,18 +52,18 @@
#define guest_test_fastop_2(insn, type_t, __val1, __val2) \
({ \
type_t input = __val1, input2 = __val2, output = __val2, ex_output = __val2; \
- uint64_t flags, ex_flags; \
+ u64 flags, ex_flags; \
\
guest_execute_fastop_2("", insn, input, ex_output, ex_flags); \
guest_execute_fastop_2(KVM_FEP, insn, input, output, flags); \
\
__GUEST_ASSERT(output == ex_output, \
"Wanted 0x%lx for '%s 0x%lx 0x%lx', got 0x%lx", \
- (uint64_t)ex_output, insn, (uint64_t)input, \
- (uint64_t)input2, (uint64_t)output); \
+ (u64)ex_output, insn, (u64)input, \
+ (u64)input2, (u64)output); \
__GUEST_ASSERT(flags == ex_flags, \
"Wanted flags 0x%lx for '%s 0x%lx, 0x%lx', got 0x%lx", \
- ex_flags, insn, (uint64_t)input, (uint64_t)input2, flags); \
+ ex_flags, insn, (u64)input, (u64)input2, flags); \
})
#define guest_execute_fastop_cl(FEP, insn, __shift, __output, __flags) \
@@ -77,25 +77,25 @@
#define guest_test_fastop_cl(insn, type_t, __val1, __val2) \
({ \
type_t output = __val2, ex_output = __val2, input = __val2; \
- uint8_t shift = __val1; \
- uint64_t flags, ex_flags; \
+ u8 shift = __val1; \
+ u64 flags, ex_flags; \
\
guest_execute_fastop_cl("", insn, shift, ex_output, ex_flags); \
guest_execute_fastop_cl(KVM_FEP, insn, shift, output, flags); \
\
__GUEST_ASSERT(output == ex_output, \
"Wanted 0x%lx for '%s 0x%x, 0x%lx', got 0x%lx", \
- (uint64_t)ex_output, insn, shift, (uint64_t)input, \
- (uint64_t)output); \
+ (u64)ex_output, insn, shift, (u64)input, \
+ (u64)output); \
__GUEST_ASSERT(flags == ex_flags, \
"Wanted flags 0x%lx for '%s 0x%x, 0x%lx', got 0x%lx", \
- ex_flags, insn, shift, (uint64_t)input, flags); \
+ ex_flags, insn, shift, (u64)input, flags); \
})
#define guest_execute_fastop_div(__KVM_ASM_SAFE, insn, __a, __d, __rm, __flags) \
({ \
- uint64_t ign_error_code; \
- uint8_t vector; \
+ u64 ign_error_code; \
+ u8 vector; \
\
__asm__ __volatile__(fastop(__KVM_ASM_SAFE(insn " %[denom]")) \
: "+a"(__a), "+d"(__d), flags_constraint(__flags), \
@@ -109,8 +109,8 @@
({ \
type_t _a = __val1, _d = __val1, rm = __val2; \
type_t a = _a, d = _d, ex_a = _a, ex_d = _d; \
- uint64_t flags, ex_flags; \
- uint8_t v, ex_v; \
+ u64 flags, ex_flags; \
+ u8 v, ex_v; \
\
ex_v = guest_execute_fastop_div(KVM_ASM_SAFE, insn, ex_a, ex_d, rm, ex_flags); \
v = guest_execute_fastop_div(KVM_ASM_SAFE_FEP, insn, a, d, rm, flags); \
@@ -118,17 +118,17 @@
GUEST_ASSERT_EQ(v, ex_v); \
__GUEST_ASSERT(v == ex_v, \
"Wanted vector 0x%x for '%s 0x%lx:0x%lx/0x%lx', got 0x%x", \
- ex_v, insn, (uint64_t)_a, (uint64_t)_d, (uint64_t)rm, v); \
+ ex_v, insn, (u64)_a, (u64)_d, (u64)rm, v); \
__GUEST_ASSERT(a == ex_a && d == ex_d, \
"Wanted 0x%lx:0x%lx for '%s 0x%lx:0x%lx/0x%lx', got 0x%lx:0x%lx",\
- (uint64_t)ex_a, (uint64_t)ex_d, insn, (uint64_t)_a, \
- (uint64_t)_d, (uint64_t)rm, (uint64_t)a, (uint64_t)d); \
+ (u64)ex_a, (u64)ex_d, insn, (u64)_a, \
+ (u64)_d, (u64)rm, (u64)a, (u64)d); \
__GUEST_ASSERT(v || ex_v || (flags == ex_flags), \
"Wanted flags 0x%lx for '%s 0x%lx:0x%lx/0x%lx', got 0x%lx", \
- ex_flags, insn, (uint64_t)_a, (uint64_t)_d, (uint64_t)rm, flags);\
+ ex_flags, insn, (u64)_a, (u64)_d, (u64)rm, flags);\
})
-static const uint64_t vals[] = {
+static const u64 vals[] = {
0,
1,
2,
@@ -185,10 +185,10 @@ if (sizeof(type_t) != 1) { \
static void guest_code(void)
{
- guest_test_fastops(uint8_t, "b");
- guest_test_fastops(uint16_t, "w");
- guest_test_fastops(uint32_t, "l");
- guest_test_fastops(uint64_t, "q");
+ guest_test_fastops(u8, "b");
+ guest_test_fastops(u16, "w");
+ guest_test_fastops(u32, "l");
+ guest_test_fastops(u64, "q");
GUEST_DONE();
}
diff --git a/tools/testing/selftests/kvm/x86/feature_msrs_test.c b/tools/testing/selftests/kvm/x86/feature_msrs_test.c
index a72f13ae2edb..158550701771 100644
--- a/tools/testing/selftests/kvm/x86/feature_msrs_test.c
+++ b/tools/testing/selftests/kvm/x86/feature_msrs_test.c
@@ -12,7 +12,7 @@
#include "kvm_util.h"
#include "processor.h"
-static bool is_kvm_controlled_msr(uint32_t msr)
+static bool is_kvm_controlled_msr(u32 msr)
{
return msr == MSR_IA32_VMX_CR0_FIXED1 || msr == MSR_IA32_VMX_CR4_FIXED1;
}
@@ -21,7 +21,7 @@ static bool is_kvm_controlled_msr(uint32_t msr)
* For VMX MSRs with a "true" variant, KVM requires userspace to set the "true"
* MSR, and doesn't allow setting the hidden version.
*/
-static bool is_hidden_vmx_msr(uint32_t msr)
+static bool is_hidden_vmx_msr(u32 msr)
{
switch (msr) {
case MSR_IA32_VMX_PINBASED_CTLS:
@@ -34,15 +34,15 @@ static bool is_hidden_vmx_msr(uint32_t msr)
}
}
-static bool is_quirked_msr(uint32_t msr)
+static bool is_quirked_msr(u32 msr)
{
return msr != MSR_AMD64_DE_CFG;
}
-static void test_feature_msr(uint32_t msr)
+static void test_feature_msr(u32 msr)
{
- const uint64_t supported_mask = kvm_get_feature_msr(msr);
- uint64_t reset_value = is_quirked_msr(msr) ? supported_mask : 0;
+ const u64 supported_mask = kvm_get_feature_msr(msr);
+ u64 reset_value = is_quirked_msr(msr) ? supported_mask : 0;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
diff --git a/tools/testing/selftests/kvm/x86/fix_hypercall_test.c b/tools/testing/selftests/kvm/x86/fix_hypercall_test.c
index 00b6e85735dd..753a0e730ea8 100644
--- a/tools/testing/selftests/kvm/x86/fix_hypercall_test.c
+++ b/tools/testing/selftests/kvm/x86/fix_hypercall_test.c
@@ -26,18 +26,18 @@ static void guest_ud_handler(struct ex_regs *regs)
regs->rip += HYPERCALL_INSN_SIZE;
}
-static const uint8_t vmx_vmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xc1 };
-static const uint8_t svm_vmmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xd9 };
+static const u8 vmx_vmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xc1 };
+static const u8 svm_vmmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xd9 };
-extern uint8_t hypercall_insn[HYPERCALL_INSN_SIZE];
-static uint64_t do_sched_yield(uint8_t apic_id)
+extern u8 hypercall_insn[HYPERCALL_INSN_SIZE];
+static u64 do_sched_yield(u8 apic_id)
{
- uint64_t ret;
+ u64 ret;
asm volatile("hypercall_insn:\n\t"
".byte 0xcc,0xcc,0xcc\n\t"
: "=a"(ret)
- : "a"((uint64_t)KVM_HC_SCHED_YIELD), "b"((uint64_t)apic_id)
+ : "a"((u64)KVM_HC_SCHED_YIELD), "b"((u64)apic_id)
: "memory");
return ret;
@@ -45,9 +45,9 @@ static uint64_t do_sched_yield(uint8_t apic_id)
static void guest_main(void)
{
- const uint8_t *native_hypercall_insn;
- const uint8_t *other_hypercall_insn;
- uint64_t ret;
+ const u8 *native_hypercall_insn;
+ const u8 *other_hypercall_insn;
+ u64 ret;
if (host_cpu_is_intel) {
native_hypercall_insn = vmx_vmcall;
@@ -72,7 +72,7 @@ static void guest_main(void)
* the "right" hypercall.
*/
if (quirk_disabled) {
- GUEST_ASSERT(ret == (uint64_t)-EFAULT);
+ GUEST_ASSERT(ret == (u64)-EFAULT);
GUEST_ASSERT(!memcmp(other_hypercall_insn, hypercall_insn,
HYPERCALL_INSN_SIZE));
} else {
diff --git a/tools/testing/selftests/kvm/x86/flds_emulation.h b/tools/testing/selftests/kvm/x86/flds_emulation.h
index 37b1a9f52864..fd6b6c67199a 100644
--- a/tools/testing/selftests/kvm/x86/flds_emulation.h
+++ b/tools/testing/selftests/kvm/x86/flds_emulation.h
@@ -12,7 +12,7 @@
* KVM to emulate the instruction (e.g. by providing an MMIO address) to
* exercise emulation failures.
*/
-static inline void flds(uint64_t address)
+static inline void flds(u64 address)
{
__asm__ __volatile__(FLDS_MEM_EAX :: "a"(address));
}
@@ -21,8 +21,8 @@ static inline void handle_flds_emulation_failure_exit(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
struct kvm_regs regs;
- uint8_t *insn_bytes;
- uint64_t flags;
+ u8 *insn_bytes;
+ u64 flags;
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR);
diff --git a/tools/testing/selftests/kvm/x86/hwcr_msr_test.c b/tools/testing/selftests/kvm/x86/hwcr_msr_test.c
index 10b1b0ba374e..8e20a03b3329 100644
--- a/tools/testing/selftests/kvm/x86/hwcr_msr_test.c
+++ b/tools/testing/selftests/kvm/x86/hwcr_msr_test.c
@@ -10,11 +10,11 @@
void test_hwcr_bit(struct kvm_vcpu *vcpu, unsigned int bit)
{
- const uint64_t ignored = BIT_ULL(3) | BIT_ULL(6) | BIT_ULL(8);
- const uint64_t valid = BIT_ULL(18) | BIT_ULL(24);
- const uint64_t legal = ignored | valid;
- uint64_t val = BIT_ULL(bit);
- uint64_t actual;
+ const u64 ignored = BIT_ULL(3) | BIT_ULL(6) | BIT_ULL(8);
+ const u64 valid = BIT_ULL(18) | BIT_ULL(24);
+ const u64 legal = ignored | valid;
+ u64 val = BIT_ULL(bit);
+ u64 actual;
int r;
r = _vcpu_set_msr(vcpu, MSR_K7_HWCR, val);
diff --git a/tools/testing/selftests/kvm/x86/hyperv_clock.c b/tools/testing/selftests/kvm/x86/hyperv_clock.c
index e058bc676cd6..c083cea546dc 100644
--- a/tools/testing/selftests/kvm/x86/hyperv_clock.c
+++ b/tools/testing/selftests/kvm/x86/hyperv_clock.c
@@ -98,7 +98,7 @@ static inline void check_tsc_msr_tsc_page(struct ms_hyperv_tsc_page *tsc_page)
GUEST_ASSERT(r2 >= t1 && r2 - t2 < 100000);
}
-static void guest_main(struct ms_hyperv_tsc_page *tsc_page, vm_paddr_t tsc_page_gpa)
+static void guest_main(struct ms_hyperv_tsc_page *tsc_page, gpa_t tsc_page_gpa)
{
u64 tsc_scale, tsc_offset;
@@ -208,7 +208,7 @@ int main(void)
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
- vm_vaddr_t tsc_page_gva;
+ gva_t tsc_page_gva;
int stage;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_TIME));
@@ -218,7 +218,7 @@ int main(void)
vcpu_set_hv_cpuid(vcpu);
- tsc_page_gva = vm_vaddr_alloc_page(vm);
+ tsc_page_gva = vm_alloc_page(vm);
memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize());
TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0,
"TSC page has to be page aligned");
diff --git a/tools/testing/selftests/kvm/x86/hyperv_evmcs.c b/tools/testing/selftests/kvm/x86/hyperv_evmcs.c
index 74cf19661309..c7fa114aee20 100644
--- a/tools/testing/selftests/kvm/x86/hyperv_evmcs.c
+++ b/tools/testing/selftests/kvm/x86/hyperv_evmcs.c
@@ -30,7 +30,7 @@ static void guest_nmi_handler(struct ex_regs *regs)
{
}
-static inline void rdmsr_from_l2(uint32_t msr)
+static inline void rdmsr_from_l2(u32 msr)
{
/* Currently, L1 doesn't preserve GPRs during vmexits. */
__asm__ __volatile__ ("rdmsr" : : "c"(msr) :
@@ -76,7 +76,7 @@ void l2_guest_code(void)
}
void guest_code(struct vmx_pages *vmx_pages, struct hyperv_test_pages *hv_pages,
- vm_vaddr_t hv_hcall_page_gpa)
+ gpa_t hv_hcall_page_gpa)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
@@ -231,8 +231,8 @@ static struct kvm_vcpu *save_restore_vm(struct kvm_vm *vm,
int main(int argc, char *argv[])
{
- vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0;
- vm_vaddr_t hcall_page;
+ gva_t vmx_pages_gva = 0, hv_pages_gva = 0;
+ gva_t hcall_page;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
@@ -246,7 +246,7 @@ int main(int argc, char *argv[])
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- hcall_page = vm_vaddr_alloc_pages(vm, 1);
+ hcall_page = vm_alloc_pages(vm, 1);
memset(addr_gva2hva(vm, hcall_page), 0x0, getpagesize());
vcpu_set_hv_cpuid(vcpu);
diff --git a/tools/testing/selftests/kvm/x86/hyperv_extended_hypercalls.c b/tools/testing/selftests/kvm/x86/hyperv_extended_hypercalls.c
index 949e08e98f31..ae047db7b1be 100644
--- a/tools/testing/selftests/kvm/x86/hyperv_extended_hypercalls.c
+++ b/tools/testing/selftests/kvm/x86/hyperv_extended_hypercalls.c
@@ -15,19 +15,19 @@
/* Any value is fine */
#define EXT_CAPABILITIES 0xbull
-static void guest_code(vm_paddr_t in_pg_gpa, vm_paddr_t out_pg_gpa,
- vm_vaddr_t out_pg_gva)
+static void guest_code(gpa_t in_pg_gpa, gpa_t out_pg_gpa,
+ gva_t out_pg_gva)
{
- uint64_t *output_gva;
+ u64 *output_gva;
wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
wrmsr(HV_X64_MSR_HYPERCALL, in_pg_gpa);
- output_gva = (uint64_t *)out_pg_gva;
+ output_gva = (u64 *)out_pg_gva;
hyperv_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, in_pg_gpa, out_pg_gpa);
- /* TLFS states output will be a uint64_t value */
+ /* TLFS states output will be a u64 value */
GUEST_ASSERT_EQ(*output_gva, EXT_CAPABILITIES);
GUEST_DONE();
@@ -35,12 +35,12 @@ static void guest_code(vm_paddr_t in_pg_gpa, vm_paddr_t out_pg_gpa,
int main(void)
{
- vm_vaddr_t hcall_out_page;
- vm_vaddr_t hcall_in_page;
+ gva_t hcall_out_page;
+ gva_t hcall_in_page;
struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
- uint64_t *outval;
+ u64 *outval;
struct ucall uc;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_CPUID));
@@ -57,11 +57,11 @@ int main(void)
vcpu_set_hv_cpuid(vcpu);
/* Hypercall input */
- hcall_in_page = vm_vaddr_alloc_pages(vm, 1);
+ hcall_in_page = vm_alloc_pages(vm, 1);
memset(addr_gva2hva(vm, hcall_in_page), 0x0, vm->page_size);
/* Hypercall output */
- hcall_out_page = vm_vaddr_alloc_pages(vm, 1);
+ hcall_out_page = vm_alloc_pages(vm, 1);
memset(addr_gva2hva(vm, hcall_out_page), 0x0, vm->page_size);
vcpu_args_set(vcpu, 3, addr_gva2gpa(vm, hcall_in_page),
diff --git a/tools/testing/selftests/kvm/x86/hyperv_features.c b/tools/testing/selftests/kvm/x86/hyperv_features.c
index 130b9ce7e5dd..7347f1fe5157 100644
--- a/tools/testing/selftests/kvm/x86/hyperv_features.c
+++ b/tools/testing/selftests/kvm/x86/hyperv_features.c
@@ -22,27 +22,27 @@
KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EBX, 0)
struct msr_data {
- uint32_t idx;
+ u32 idx;
bool fault_expected;
bool write;
u64 write_val;
};
struct hcall_data {
- uint64_t control;
- uint64_t expect;
+ u64 control;
+ u64 expect;
bool ud_expected;
};
-static bool is_write_only_msr(uint32_t msr)
+static bool is_write_only_msr(u32 msr)
{
return msr == HV_X64_MSR_EOI;
}
static void guest_msr(struct msr_data *msr)
{
- uint8_t vector = 0;
- uint64_t msr_val = 0;
+ u8 vector = 0;
+ u64 msr_val = 0;
GUEST_ASSERT(msr->idx);
@@ -82,10 +82,10 @@ done:
GUEST_DONE();
}
-static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
+static void guest_hcall(gpa_t pgs_gpa, struct hcall_data *hcall)
{
u64 res, input, output;
- uint8_t vector;
+ u8 vector;
GUEST_ASSERT_NE(hcall->control, 0);
@@ -134,14 +134,14 @@ static void guest_test_msrs_access(void)
struct kvm_vm *vm;
struct ucall uc;
int stage = 0;
- vm_vaddr_t msr_gva;
+ gva_t msr_gva;
struct msr_data *msr;
bool has_invtsc = kvm_cpu_has(X86_FEATURE_INVTSC);
while (true) {
vm = vm_create_with_one_vcpu(&vcpu, guest_msr);
- msr_gva = vm_vaddr_alloc_page(vm);
+ msr_gva = vm_alloc_page(vm);
memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
msr = addr_gva2hva(vm, msr_gva);
@@ -523,17 +523,17 @@ static void guest_test_hcalls_access(void)
struct kvm_vm *vm;
struct ucall uc;
int stage = 0;
- vm_vaddr_t hcall_page, hcall_params;
+ gva_t hcall_page, hcall_params;
struct hcall_data *hcall;
while (true) {
vm = vm_create_with_one_vcpu(&vcpu, guest_hcall);
/* Hypercall input/output */
- hcall_page = vm_vaddr_alloc_pages(vm, 2);
+ hcall_page = vm_alloc_pages(vm, 2);
memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
- hcall_params = vm_vaddr_alloc_page(vm);
+ hcall_params = vm_alloc_page(vm);
memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
hcall = addr_gva2hva(vm, hcall_params);
diff --git a/tools/testing/selftests/kvm/x86/hyperv_ipi.c b/tools/testing/selftests/kvm/x86/hyperv_ipi.c
index ca61836c4e32..771535f9aad3 100644
--- a/tools/testing/selftests/kvm/x86/hyperv_ipi.c
+++ b/tools/testing/selftests/kvm/x86/hyperv_ipi.c
@@ -18,7 +18,7 @@
#define IPI_VECTOR 0xfe
-static volatile uint64_t ipis_rcvd[RECEIVER_VCPU_ID_2 + 1];
+static volatile u64 ipis_rcvd[RECEIVER_VCPU_ID_2 + 1];
struct hv_vpset {
u64 format;
@@ -45,13 +45,13 @@ struct hv_send_ipi_ex {
struct hv_vpset vp_set;
};
-static inline void hv_init(vm_vaddr_t pgs_gpa)
+static inline void hv_init(gpa_t pgs_gpa)
{
wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
}
-static void receiver_code(void *hcall_page, vm_vaddr_t pgs_gpa)
+static void receiver_code(void *hcall_page, gpa_t pgs_gpa)
{
u32 vcpu_id;
@@ -85,7 +85,7 @@ static inline void nop_loop(void)
asm volatile("nop");
}
-static void sender_guest_code(void *hcall_page, vm_vaddr_t pgs_gpa)
+static void sender_guest_code(void *hcall_page, gpa_t pgs_gpa)
{
struct hv_send_ipi *ipi = (struct hv_send_ipi *)hcall_page;
struct hv_send_ipi_ex *ipi_ex = (struct hv_send_ipi_ex *)hcall_page;
@@ -243,7 +243,7 @@ int main(int argc, char *argv[])
{
struct kvm_vm *vm;
struct kvm_vcpu *vcpu[3];
- vm_vaddr_t hcall_page;
+ gva_t hcall_page;
pthread_t threads[2];
int stage = 1, r;
struct ucall uc;
@@ -253,7 +253,7 @@ int main(int argc, char *argv[])
vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code);
/* Hypercall input/output */
- hcall_page = vm_vaddr_alloc_pages(vm, 2);
+ hcall_page = vm_alloc_pages(vm, 2);
memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
diff --git a/tools/testing/selftests/kvm/x86/hyperv_svm_test.c b/tools/testing/selftests/kvm/x86/hyperv_svm_test.c
index 0ddb63229bcb..7a62f6a9d606 100644
--- a/tools/testing/selftests/kvm/x86/hyperv_svm_test.c
+++ b/tools/testing/selftests/kvm/x86/hyperv_svm_test.c
@@ -21,7 +21,7 @@
#define L2_GUEST_STACK_SIZE 256
/* Exit to L1 from L2 with RDMSR instruction */
-static inline void rdmsr_from_l2(uint32_t msr)
+static inline void rdmsr_from_l2(u32 msr)
{
/* Currently, L1 doesn't preserve GPRs during vmexits. */
__asm__ __volatile__ ("rdmsr" : : "c"(msr) :
@@ -67,7 +67,7 @@ void l2_guest_code(void)
static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm,
struct hyperv_test_pages *hv_pages,
- vm_vaddr_t pgs_gpa)
+ gpa_t pgs_gpa)
{
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
struct vmcb *vmcb = svm->vmcb;
@@ -149,8 +149,8 @@ static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm,
int main(int argc, char *argv[])
{
- vm_vaddr_t nested_gva = 0, hv_pages_gva = 0;
- vm_vaddr_t hcall_page;
+ gva_t nested_gva = 0, hv_pages_gva = 0;
+ gva_t hcall_page;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
@@ -165,7 +165,7 @@ int main(int argc, char *argv[])
vcpu_alloc_svm(vm, &nested_gva);
vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva);
- hcall_page = vm_vaddr_alloc_pages(vm, 1);
+ hcall_page = vm_alloc_pages(vm, 1);
memset(addr_gva2hva(vm, hcall_page), 0x0, getpagesize());
vcpu_args_set(vcpu, 3, nested_gva, hv_pages_gva, addr_gva2gpa(vm, hcall_page));
diff --git a/tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c b/tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
index c542cc4762b1..15ee8b7bfc11 100644
--- a/tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
+++ b/tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
@@ -61,14 +61,14 @@ struct hv_tlb_flush_ex {
* - GVAs of the test pages' PTEs
*/
struct test_data {
- vm_vaddr_t hcall_gva;
- vm_paddr_t hcall_gpa;
- vm_vaddr_t test_pages;
- vm_vaddr_t test_pages_pte[NTEST_PAGES];
+ gva_t hcall_gva;
+ gpa_t hcall_gpa;
+ gva_t test_pages;
+ gva_t test_pages_pte[NTEST_PAGES];
};
/* 'Worker' vCPU code checking the contents of the test page */
-static void worker_guest_code(vm_vaddr_t test_data)
+static void worker_guest_code(gva_t test_data)
{
struct test_data *data = (struct test_data *)test_data;
u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX);
@@ -133,12 +133,12 @@ static void set_expected_val(void *addr, u64 val, int vcpu_id)
* Update PTEs swapping two test pages.
* TODO: use swap()/xchg() when these are provided.
*/
-static void swap_two_test_pages(vm_paddr_t pte_gva1, vm_paddr_t pte_gva2)
+static void swap_two_test_pages(gpa_t pte_gva1, gpa_t pte_gva2)
{
- uint64_t tmp = *(uint64_t *)pte_gva1;
+ u64 tmp = *(u64 *)pte_gva1;
- *(uint64_t *)pte_gva1 = *(uint64_t *)pte_gva2;
- *(uint64_t *)pte_gva2 = tmp;
+ *(u64 *)pte_gva1 = *(u64 *)pte_gva2;
+ *(u64 *)pte_gva2 = tmp;
}
/*
@@ -196,12 +196,12 @@ static inline void post_test(struct test_data *data, u64 exp1, u64 exp2)
#define TESTVAL2 0x0202020202020202
/* Main vCPU doing the test */
-static void sender_guest_code(vm_vaddr_t test_data)
+static void sender_guest_code(gva_t test_data)
{
struct test_data *data = (struct test_data *)test_data;
struct hv_tlb_flush *flush = (struct hv_tlb_flush *)data->hcall_gva;
struct hv_tlb_flush_ex *flush_ex = (struct hv_tlb_flush_ex *)data->hcall_gva;
- vm_paddr_t hcall_gpa = data->hcall_gpa;
+ gpa_t hcall_gpa = data->hcall_gpa;
int i, stage = 1;
wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
@@ -581,9 +581,9 @@ int main(int argc, char *argv[])
struct kvm_vm *vm;
struct kvm_vcpu *vcpu[3];
pthread_t threads[2];
- vm_vaddr_t test_data_page, gva;
- vm_paddr_t gpa;
- uint64_t *pte;
+ gva_t test_data_page, gva;
+ gpa_t gpa;
+ u64 *pte;
struct test_data *data;
struct ucall uc;
int stage = 1, r, i;
@@ -593,11 +593,11 @@ int main(int argc, char *argv[])
vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code);
/* Test data page */
- test_data_page = vm_vaddr_alloc_page(vm);
+ test_data_page = vm_alloc_page(vm);
data = (struct test_data *)addr_gva2hva(vm, test_data_page);
/* Hypercall input/output */
- data->hcall_gva = vm_vaddr_alloc_pages(vm, 2);
+ data->hcall_gva = vm_alloc_pages(vm, 2);
data->hcall_gpa = addr_gva2gpa(vm, data->hcall_gva);
memset(addr_gva2hva(vm, data->hcall_gva), 0x0, 2 * PAGE_SIZE);
@@ -606,7 +606,7 @@ int main(int argc, char *argv[])
* and the test will swap their mappings. The third page keeps the indication
* about the current state of mappings.
*/
- data->test_pages = vm_vaddr_alloc_pages(vm, NTEST_PAGES + 1);
+ data->test_pages = vm_alloc_pages(vm, NTEST_PAGES + 1);
for (i = 0; i < NTEST_PAGES; i++)
memset(addr_gva2hva(vm, data->test_pages + PAGE_SIZE * i),
(u8)(i + 1), PAGE_SIZE);
@@ -617,7 +617,7 @@ int main(int argc, char *argv[])
* Get PTE pointers for test pages and map them inside the guest.
* Use separate page for each PTE for simplicity.
*/
- gva = vm_vaddr_unused_gap(vm, NTEST_PAGES * PAGE_SIZE, KVM_UTIL_MIN_VADDR);
+ gva = vm_unused_gva_gap(vm, NTEST_PAGES * PAGE_SIZE, KVM_UTIL_MIN_VADDR);
for (i = 0; i < NTEST_PAGES; i++) {
pte = vm_get_pte(vm, data->test_pages + i * PAGE_SIZE);
gpa = addr_hva2gpa(vm, pte);
diff --git a/tools/testing/selftests/kvm/x86/kvm_buslock_test.c b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c
index d88500c118eb..52014a3210c8 100644
--- a/tools/testing/selftests/kvm/x86/kvm_buslock_test.c
+++ b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c
@@ -73,7 +73,7 @@ static void guest_code(void *test_data)
int main(int argc, char *argv[])
{
const bool has_nested = kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX);
- vm_vaddr_t nested_test_data_gva;
+ gva_t nested_test_data_gva;
struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
diff --git a/tools/testing/selftests/kvm/x86/kvm_clock_test.c b/tools/testing/selftests/kvm/x86/kvm_clock_test.c
index 5bc12222d87a..5ad4aeb8e373 100644
--- a/tools/testing/selftests/kvm/x86/kvm_clock_test.c
+++ b/tools/testing/selftests/kvm/x86/kvm_clock_test.c
@@ -17,8 +17,8 @@
#include "processor.h"
struct test_case {
- uint64_t kvmclock_base;
- int64_t realtime_offset;
+ u64 kvmclock_base;
+ s64 realtime_offset;
};
static struct test_case test_cases[] = {
@@ -31,7 +31,7 @@ static struct test_case test_cases[] = {
#define GUEST_SYNC_CLOCK(__stage, __val) \
GUEST_SYNC_ARGS(__stage, __val, 0, 0, 0)
-static void guest_main(vm_paddr_t pvti_pa, struct pvclock_vcpu_time_info *pvti)
+static void guest_main(gpa_t pvti_pa, struct pvclock_vcpu_time_info *pvti)
{
int i;
@@ -52,7 +52,7 @@ static inline void assert_flags(struct kvm_clock_data *data)
static void handle_sync(struct ucall *uc, struct kvm_clock_data *start,
struct kvm_clock_data *end)
{
- uint64_t obs, exp_lo, exp_hi;
+ u64 obs, exp_lo, exp_hi;
obs = uc->args[2];
exp_lo = start->clock;
@@ -135,8 +135,8 @@ static void enter_guest(struct kvm_vcpu *vcpu)
int main(void)
{
struct kvm_vcpu *vcpu;
- vm_vaddr_t pvti_gva;
- vm_paddr_t pvti_gpa;
+ gva_t pvti_gva;
+ gpa_t pvti_gpa;
struct kvm_vm *vm;
int flags;
@@ -147,7 +147,7 @@ int main(void)
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
- pvti_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000);
+ pvti_gva = vm_alloc(vm, getpagesize(), 0x10000);
pvti_gpa = addr_gva2gpa(vm, pvti_gva);
vcpu_args_set(vcpu, 2, pvti_gpa, pvti_gva);
diff --git a/tools/testing/selftests/kvm/x86/kvm_pv_test.c b/tools/testing/selftests/kvm/x86/kvm_pv_test.c
index 1b805cbdb47b..8ed5fa635021 100644
--- a/tools/testing/selftests/kvm/x86/kvm_pv_test.c
+++ b/tools/testing/selftests/kvm/x86/kvm_pv_test.c
@@ -13,7 +13,7 @@
#include "processor.h"
struct msr_data {
- uint32_t idx;
+ u32 idx;
const char *name;
};
@@ -40,8 +40,8 @@ static struct msr_data msrs_to_test[] = {
static void test_msr(struct msr_data *msr)
{
- uint64_t ignored;
- uint8_t vector;
+ u64 ignored;
+ u8 vector;
PR_MSR(msr);
@@ -53,7 +53,7 @@ static void test_msr(struct msr_data *msr)
}
struct hcall_data {
- uint64_t nr;
+ u64 nr;
const char *name;
};
@@ -73,7 +73,7 @@ static struct hcall_data hcalls_to_test[] = {
static void test_hcall(struct hcall_data *hc)
{
- uint64_t r;
+ u64 r;
PR_HCALL(hc);
r = kvm_hypercall(hc->nr, 0, 0, 0, 0);
diff --git a/tools/testing/selftests/kvm/x86/monitor_mwait_test.c b/tools/testing/selftests/kvm/x86/monitor_mwait_test.c
index e45c028d2a7e..9c156cf7db0e 100644
--- a/tools/testing/selftests/kvm/x86/monitor_mwait_test.c
+++ b/tools/testing/selftests/kvm/x86/monitor_mwait_test.c
@@ -67,7 +67,7 @@ static void guest_monitor_wait(void *arg)
int main(int argc, char *argv[])
{
- uint64_t disabled_quirks;
+ u64 disabled_quirks;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
diff --git a/tools/testing/selftests/kvm/x86/nested_close_kvm_test.c b/tools/testing/selftests/kvm/x86/nested_close_kvm_test.c
index f001cb836bfa..761fec293408 100644
--- a/tools/testing/selftests/kvm/x86/nested_close_kvm_test.c
+++ b/tools/testing/selftests/kvm/x86/nested_close_kvm_test.c
@@ -67,7 +67,7 @@ static void l1_guest_code(void *data)
int main(int argc, char *argv[])
{
- vm_vaddr_t guest_gva;
+ gva_t guest_gva;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
diff --git a/tools/testing/selftests/kvm/x86/nested_dirty_log_test.c b/tools/testing/selftests/kvm/x86/nested_dirty_log_test.c
index 619229bbd693..0e67cce83570 100644
--- a/tools/testing/selftests/kvm/x86/nested_dirty_log_test.c
+++ b/tools/testing/selftests/kvm/x86/nested_dirty_log_test.c
@@ -47,10 +47,10 @@
#define TEST_SYNC_WRITE_FAULT BIT(1)
#define TEST_SYNC_NO_FAULT BIT(2)
-static void l2_guest_code(vm_vaddr_t base)
+static void l2_guest_code(gva_t base)
{
- vm_vaddr_t page0 = TEST_GUEST_ADDR(base, 0);
- vm_vaddr_t page1 = TEST_GUEST_ADDR(base, 1);
+ gva_t page0 = TEST_GUEST_ADDR(base, 0);
+ gva_t page1 = TEST_GUEST_ADDR(base, 1);
READ_ONCE(*(u64 *)page0);
GUEST_SYNC(page0 | TEST_SYNC_READ_FAULT);
@@ -143,7 +143,7 @@ static void l1_guest_code(void *data)
static void test_handle_ucall_sync(struct kvm_vm *vm, u64 arg,
unsigned long *bmap)
{
- vm_vaddr_t gva = arg & ~(PAGE_SIZE - 1);
+ gva_t gva = arg & ~(PAGE_SIZE - 1);
int page_nr, i;
/*
@@ -198,7 +198,7 @@ static void test_handle_ucall_sync(struct kvm_vm *vm, u64 arg,
static void test_dirty_log(bool nested_tdp)
{
- vm_vaddr_t nested_gva = 0;
+ gva_t nested_gva = 0;
unsigned long *bmap;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
diff --git a/tools/testing/selftests/kvm/x86/nested_emulation_test.c b/tools/testing/selftests/kvm/x86/nested_emulation_test.c
index abc824dba04f..fb7dcbe53ac7 100644
--- a/tools/testing/selftests/kvm/x86/nested_emulation_test.c
+++ b/tools/testing/selftests/kvm/x86/nested_emulation_test.c
@@ -13,8 +13,8 @@ enum {
struct emulated_instruction {
const char name[32];
- uint8_t opcode[15];
- uint32_t exit_reason[NR_VIRTUALIZATION_FLAVORS];
+ u8 opcode[15];
+ u32 exit_reason[NR_VIRTUALIZATION_FLAVORS];
};
static struct emulated_instruction instructions[] = {
@@ -32,13 +32,13 @@ static struct emulated_instruction instructions[] = {
},
};
-static uint8_t kvm_fep[] = { 0x0f, 0x0b, 0x6b, 0x76, 0x6d }; /* ud2 ; .ascii "kvm" */
-static uint8_t l2_guest_code[sizeof(kvm_fep) + 15];
-static uint8_t *l2_instruction = &l2_guest_code[sizeof(kvm_fep)];
+static u8 kvm_fep[] = { 0x0f, 0x0b, 0x6b, 0x76, 0x6d }; /* ud2 ; .ascii "kvm" */
+static u8 l2_guest_code[sizeof(kvm_fep) + 15];
+static u8 *l2_instruction = &l2_guest_code[sizeof(kvm_fep)];
-static uint32_t get_instruction_length(struct emulated_instruction *insn)
+static u32 get_instruction_length(struct emulated_instruction *insn)
{
- uint32_t i;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(insn->opcode) && insn->opcode[i]; i++)
;
@@ -81,8 +81,8 @@ static void guest_code(void *test_data)
for (i = 0; i < ARRAY_SIZE(instructions); i++) {
struct emulated_instruction *insn = &instructions[i];
- uint32_t insn_len = get_instruction_length(insn);
- uint32_t exit_insn_len;
+ u32 insn_len = get_instruction_length(insn);
+ u32 exit_insn_len;
u32 exit_reason;
/*
@@ -122,7 +122,7 @@ static void guest_code(void *test_data)
int main(int argc, char *argv[])
{
- vm_vaddr_t nested_test_data_gva;
+ gva_t nested_test_data_gva;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
diff --git a/tools/testing/selftests/kvm/x86/nested_exceptions_test.c b/tools/testing/selftests/kvm/x86/nested_exceptions_test.c
index 3641a42934ac..186e980aa8ee 100644
--- a/tools/testing/selftests/kvm/x86/nested_exceptions_test.c
+++ b/tools/testing/selftests/kvm/x86/nested_exceptions_test.c
@@ -72,7 +72,7 @@ static void l2_ss_injected_tf_test(void)
}
static void svm_run_l2(struct svm_test_data *svm, void *l2_code, int vector,
- uint32_t error_code)
+ u32 error_code)
{
struct vmcb *vmcb = svm->vmcb;
struct vmcb_control_area *ctrl = &vmcb->control;
@@ -111,7 +111,7 @@ static void l1_svm_code(struct svm_test_data *svm)
GUEST_DONE();
}
-static void vmx_run_l2(void *l2_code, int vector, uint32_t error_code)
+static void vmx_run_l2(void *l2_code, int vector, u32 error_code)
{
GUEST_ASSERT(!vmwrite(GUEST_RIP, (u64)l2_code));
@@ -216,7 +216,7 @@ static void queue_ss_exception(struct kvm_vcpu *vcpu, bool inject)
*/
int main(int argc, char *argv[])
{
- vm_vaddr_t nested_test_data_gva;
+ gva_t nested_test_data_gva;
struct kvm_vcpu_events events;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
diff --git a/tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c b/tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c
index a6b6da9cf7fe..11fd2467d823 100644
--- a/tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c
+++ b/tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c
@@ -78,7 +78,7 @@ int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- vm_vaddr_t guest_gva = 0;
+ gva_t guest_gva = 0;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) ||
kvm_cpu_has(X86_FEATURE_SVM));
diff --git a/tools/testing/selftests/kvm/x86/nested_set_state_test.c b/tools/testing/selftests/kvm/x86/nested_set_state_test.c
index 0f2102b43629..831380732671 100644
--- a/tools/testing/selftests/kvm/x86/nested_set_state_test.c
+++ b/tools/testing/selftests/kvm/x86/nested_set_state_test.c
@@ -250,14 +250,14 @@ void test_vmx_nested_state(struct kvm_vcpu *vcpu)
static void vcpu_efer_enable_svm(struct kvm_vcpu *vcpu)
{
- uint64_t old_efer = vcpu_get_msr(vcpu, MSR_EFER);
+ u64 old_efer = vcpu_get_msr(vcpu, MSR_EFER);
vcpu_set_msr(vcpu, MSR_EFER, old_efer | EFER_SVME);
}
static void vcpu_efer_disable_svm(struct kvm_vcpu *vcpu)
{
- uint64_t old_efer = vcpu_get_msr(vcpu, MSR_EFER);
+ u64 old_efer = vcpu_get_msr(vcpu, MSR_EFER);
vcpu_set_msr(vcpu, MSR_EFER, old_efer & ~EFER_SVME);
}
diff --git a/tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c
index 2839f650e5c9..f0e4adac4751 100644
--- a/tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c
+++ b/tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c
@@ -53,9 +53,9 @@ enum {
/* The virtual machine object. */
static struct kvm_vm *vm;
-static void check_ia32_tsc_adjust(int64_t max)
+static void check_ia32_tsc_adjust(s64 max)
{
- int64_t adjust;
+ s64 adjust;
adjust = rdmsr(MSR_IA32_TSC_ADJUST);
GUEST_SYNC(adjust);
@@ -64,7 +64,7 @@ static void check_ia32_tsc_adjust(int64_t max)
static void l2_guest_code(void)
{
- uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
+ u64 l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
@@ -88,7 +88,7 @@ static void l1_guest_code(void *data)
*/
if (this_cpu_has(X86_FEATURE_VMX)) {
struct vmx_pages *vmx_pages = data;
- uint32_t control;
+ u32 control;
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
GUEST_ASSERT(load_vmcs(vmx_pages));
@@ -117,7 +117,7 @@ static void l1_guest_code(void *data)
GUEST_DONE();
}
-static void report(int64_t val)
+static void report(s64 val)
{
pr_info("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE);
@@ -125,7 +125,7 @@ static void report(int64_t val)
int main(int argc, char *argv[])
{
- vm_vaddr_t nested_gva;
+ gva_t nested_gva;
struct kvm_vcpu *vcpu;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) ||
diff --git a/tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c b/tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c
index 4260c9e4f489..190e93af20a1 100644
--- a/tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c
+++ b/tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c
@@ -19,7 +19,7 @@
/* L2 is scaled up (from L1's perspective) by this factor */
#define L2_SCALE_FACTOR 4ULL
-#define TSC_OFFSET_L2 ((uint64_t) -33125236320908)
+#define TSC_OFFSET_L2 ((u64)-33125236320908)
#define TSC_MULTIPLIER_L2 (L2_SCALE_FACTOR << 48)
#define L2_GUEST_STACK_SIZE 64
@@ -35,9 +35,9 @@ enum { USLEEP, UCHECK_L1, UCHECK_L2 };
* measurements, a difference of 1% between the actual and the expected value
* is tolerated.
*/
-static void compare_tsc_freq(uint64_t actual, uint64_t expected)
+static void compare_tsc_freq(u64 actual, u64 expected)
{
- uint64_t tolerance, thresh_low, thresh_high;
+ u64 tolerance, thresh_low, thresh_high;
tolerance = expected / 100;
thresh_low = expected - tolerance;
@@ -55,7 +55,7 @@ static void compare_tsc_freq(uint64_t actual, uint64_t expected)
static void check_tsc_freq(int level)
{
- uint64_t tsc_start, tsc_end, tsc_freq;
+ u64 tsc_start, tsc_end, tsc_freq;
/*
* Reading the TSC twice with about a second's difference should give
@@ -106,7 +106,7 @@ static void l1_svm_code(struct svm_test_data *svm)
static void l1_vmx_code(struct vmx_pages *vmx_pages)
{
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
- uint32_t control;
+ u32 control;
/* check that L1's frequency looks alright before launching L2 */
check_tsc_freq(UCHECK_L1);
@@ -152,14 +152,14 @@ int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- vm_vaddr_t guest_gva = 0;
-
- uint64_t tsc_start, tsc_end;
- uint64_t tsc_khz;
- uint64_t l1_scale_factor;
- uint64_t l0_tsc_freq = 0;
- uint64_t l1_tsc_freq = 0;
- uint64_t l2_tsc_freq = 0;
+ gva_t guest_gva = 0;
+
+ u64 tsc_start, tsc_end;
+ u64 tsc_khz;
+ u64 l1_scale_factor;
+ u64 l0_tsc_freq = 0;
+ u64 l1_tsc_freq = 0;
+ u64 l2_tsc_freq = 0;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) ||
kvm_cpu_has(X86_FEATURE_SVM));
diff --git a/tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c b/tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c
index 71717118d692..85d3f4cc76f3 100644
--- a/tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c
+++ b/tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c
@@ -128,7 +128,7 @@ static void l1_guest_code(struct svm_test_data *svm)
int main(int argc, char *argv[])
{
- vm_vaddr_t nested_gva = 0;
+ gva_t nested_gva = 0;
struct vmcb *test_vmcb[2];
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
diff --git a/tools/testing/selftests/kvm/x86/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86/nx_huge_pages_test.c
index c0d84827f736..70950067b989 100644
--- a/tools/testing/selftests/kvm/x86/nx_huge_pages_test.c
+++ b/tools/testing/selftests/kvm/x86/nx_huge_pages_test.c
@@ -32,7 +32,7 @@
#define RETURN_OPCODE 0xC3
/* Call the specified memory address. */
-static void guest_do_CALL(uint64_t target)
+static void guest_do_CALL(u64 target)
{
((void (*)(void)) target)();
}
@@ -46,14 +46,14 @@ static void guest_do_CALL(uint64_t target)
*/
void guest_code(void)
{
- uint64_t hpage_1 = HPAGE_GVA;
- uint64_t hpage_2 = hpage_1 + (PAGE_SIZE * 512);
- uint64_t hpage_3 = hpage_2 + (PAGE_SIZE * 512);
+ u64 hpage_1 = HPAGE_GVA;
+ u64 hpage_2 = hpage_1 + (PAGE_SIZE * 512);
+ u64 hpage_3 = hpage_2 + (PAGE_SIZE * 512);
- READ_ONCE(*(uint64_t *)hpage_1);
+ READ_ONCE(*(u64 *)hpage_1);
GUEST_SYNC(1);
- READ_ONCE(*(uint64_t *)hpage_2);
+ READ_ONCE(*(u64 *)hpage_2);
GUEST_SYNC(2);
guest_do_CALL(hpage_1);
@@ -62,10 +62,10 @@ void guest_code(void)
guest_do_CALL(hpage_3);
GUEST_SYNC(4);
- READ_ONCE(*(uint64_t *)hpage_1);
+ READ_ONCE(*(u64 *)hpage_1);
GUEST_SYNC(5);
- READ_ONCE(*(uint64_t *)hpage_3);
+ READ_ONCE(*(u64 *)hpage_3);
GUEST_SYNC(6);
}
@@ -107,7 +107,7 @@ void run_test(int reclaim_period_ms, bool disable_nx_huge_pages,
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- uint64_t nr_bytes;
+ u64 nr_bytes;
void *hva;
int r;
diff --git a/tools/testing/selftests/kvm/x86/platform_info_test.c b/tools/testing/selftests/kvm/x86/platform_info_test.c
index 9cbf283ebc55..80bb07e6531c 100644
--- a/tools/testing/selftests/kvm/x86/platform_info_test.c
+++ b/tools/testing/selftests/kvm/x86/platform_info_test.c
@@ -23,8 +23,8 @@
static void guest_code(void)
{
- uint64_t msr_platform_info;
- uint8_t vector;
+ u64 msr_platform_info;
+ u8 vector;
GUEST_SYNC(true);
msr_platform_info = rdmsr(MSR_PLATFORM_INFO);
@@ -42,7 +42,7 @@ int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- uint64_t msr_platform_info;
+ u64 msr_platform_info;
struct ucall uc;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_MSR_PLATFORM_INFO));
diff --git a/tools/testing/selftests/kvm/x86/pmu_counters_test.c b/tools/testing/selftests/kvm/x86/pmu_counters_test.c
index 3eaa216b96c0..dc6afac3aa91 100644
--- a/tools/testing/selftests/kvm/x86/pmu_counters_test.c
+++ b/tools/testing/selftests/kvm/x86/pmu_counters_test.c
@@ -30,9 +30,9 @@
#define NUM_INSNS_RETIRED (NUM_LOOPS * NUM_INSNS_PER_LOOP + NUM_EXTRA_INSNS)
/* Track which architectural events are supported by hardware. */
-static uint32_t hardware_pmu_arch_events;
+static u32 hardware_pmu_arch_events;
-static uint8_t kvm_pmu_version;
+static u8 kvm_pmu_version;
static bool kvm_has_perf_caps;
#define X86_PMU_FEATURE_NULL \
@@ -57,7 +57,7 @@ struct kvm_intel_pmu_event {
* kvm_x86_pmu_feature use syntax that's only valid in function scope, and the
* compiler often thinks the feature definitions aren't compile-time constants.
*/
-static struct kvm_intel_pmu_event intel_event_to_feature(uint8_t idx)
+static struct kvm_intel_pmu_event intel_event_to_feature(u8 idx)
{
const struct kvm_intel_pmu_event __intel_event_to_feature[] = {
[INTEL_ARCH_CPU_CYCLES_INDEX] = { X86_PMU_FEATURE_CPU_CYCLES, X86_PMU_FEATURE_CPU_CYCLES_FIXED },
@@ -89,8 +89,8 @@ static struct kvm_intel_pmu_event intel_event_to_feature(uint8_t idx)
static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
void *guest_code,
- uint8_t pmu_version,
- uint64_t perf_capabilities)
+ u8 pmu_version,
+ u64 perf_capabilities)
{
struct kvm_vm *vm;
@@ -132,7 +132,7 @@ static void run_vcpu(struct kvm_vcpu *vcpu)
} while (uc.cmd != UCALL_DONE);
}
-static uint8_t guest_get_pmu_version(void)
+static u8 guest_get_pmu_version(void)
{
/*
* Return the effective PMU version, i.e. the minimum between what KVM
@@ -141,7 +141,7 @@ static uint8_t guest_get_pmu_version(void)
* supported by KVM to verify KVM doesn't freak out and do something
* bizarre with an architecturally valid, but unsupported, version.
*/
- return min_t(uint8_t, kvm_pmu_version, this_cpu_property(X86_PROPERTY_PMU_VERSION));
+ return min_t(u8, kvm_pmu_version, this_cpu_property(X86_PROPERTY_PMU_VERSION));
}
/*
@@ -153,9 +153,9 @@ static uint8_t guest_get_pmu_version(void)
* Sanity check that in all cases, the event doesn't count when it's disabled,
* and that KVM correctly emulates the write of an arbitrary value.
*/
-static void guest_assert_event_count(uint8_t idx, uint32_t pmc, uint32_t pmc_msr)
+static void guest_assert_event_count(u8 idx, u32 pmc, u32 pmc_msr)
{
- uint64_t count;
+ u64 count;
count = _rdpmc(pmc);
if (!(hardware_pmu_arch_events & BIT(idx)))
@@ -236,7 +236,7 @@ do { \
FEP "xor %%eax, %%eax\n\t" \
FEP "xor %%edx, %%edx\n\t" \
"wrmsr\n\t" \
- :: "a"((uint32_t)_value), "d"(_value >> 32), \
+ :: "a"((u32)_value), "d"(_value >> 32), \
"c"(_msr), "D"(_msr), [m]"m"(kvm_pmu_version) \
); \
} while (0)
@@ -255,8 +255,8 @@ do { \
guest_assert_event_count(_idx, _pmc, _pmc_msr); \
} while (0)
-static void __guest_test_arch_event(uint8_t idx, uint32_t pmc, uint32_t pmc_msr,
- uint32_t ctrl_msr, uint64_t ctrl_msr_value)
+static void __guest_test_arch_event(u8 idx, u32 pmc, u32 pmc_msr,
+ u32 ctrl_msr, u64 ctrl_msr_value)
{
GUEST_TEST_EVENT(idx, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, "");
@@ -264,14 +264,14 @@ static void __guest_test_arch_event(uint8_t idx, uint32_t pmc, uint32_t pmc_msr,
GUEST_TEST_EVENT(idx, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, KVM_FEP);
}
-static void guest_test_arch_event(uint8_t idx)
+static void guest_test_arch_event(u8 idx)
{
- uint32_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
- uint32_t pmu_version = guest_get_pmu_version();
+ u32 nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
+ u32 pmu_version = guest_get_pmu_version();
/* PERF_GLOBAL_CTRL exists only for Architectural PMU Version 2+. */
bool guest_has_perf_global_ctrl = pmu_version >= 2;
struct kvm_x86_pmu_feature gp_event, fixed_event;
- uint32_t base_pmc_msr;
+ u32 base_pmc_msr;
unsigned int i;
/* The host side shouldn't invoke this without a guest PMU. */
@@ -289,7 +289,7 @@ static void guest_test_arch_event(uint8_t idx)
GUEST_ASSERT(nr_gp_counters);
for (i = 0; i < nr_gp_counters; i++) {
- uint64_t eventsel = ARCH_PERFMON_EVENTSEL_OS |
+ u64 eventsel = ARCH_PERFMON_EVENTSEL_OS |
ARCH_PERFMON_EVENTSEL_ENABLE |
intel_pmu_arch_events[idx];
@@ -320,7 +320,7 @@ static void guest_test_arch_event(uint8_t idx)
static void guest_test_arch_events(void)
{
- uint8_t i;
+ u8 i;
for (i = 0; i < NR_INTEL_ARCH_EVENTS; i++)
guest_test_arch_event(i);
@@ -328,8 +328,8 @@ static void guest_test_arch_events(void)
GUEST_DONE();
}
-static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities,
- uint8_t length, uint32_t unavailable_mask)
+static void test_arch_events(u8 pmu_version, u64 perf_capabilities,
+ u8 length, u32 unavailable_mask)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
@@ -373,11 +373,11 @@ __GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector, \
"Expected " #insn "(0x%x) to yield 0x%lx, got 0x%lx", \
msr, expected, val);
-static void guest_test_rdpmc(uint32_t rdpmc_idx, bool expect_success,
- uint64_t expected_val)
+static void guest_test_rdpmc(u32 rdpmc_idx, bool expect_success,
+ u64 expected_val)
{
- uint8_t vector;
- uint64_t val;
+ u8 vector;
+ u64 val;
vector = rdpmc_safe(rdpmc_idx, &val);
GUEST_ASSERT_PMC_MSR_ACCESS(RDPMC, rdpmc_idx, !expect_success, vector);
@@ -393,19 +393,19 @@ static void guest_test_rdpmc(uint32_t rdpmc_idx, bool expect_success,
GUEST_ASSERT_PMC_VALUE(RDPMC, rdpmc_idx, val, expected_val);
}
-static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters,
- uint8_t nr_counters, uint32_t or_mask)
+static void guest_rd_wr_counters(u32 base_msr, u8 nr_possible_counters,
+ u8 nr_counters, u32 or_mask)
{
const bool pmu_has_fast_mode = !guest_get_pmu_version();
- uint8_t i;
+ u8 i;
for (i = 0; i < nr_possible_counters; i++) {
/*
* TODO: Test a value that validates full-width writes and the
* width of the counters.
*/
- const uint64_t test_val = 0xffff;
- const uint32_t msr = base_msr + i;
+ const u64 test_val = 0xffff;
+ const u32 msr = base_msr + i;
/*
* Fixed counters are supported if the counter is less than the
@@ -418,12 +418,12 @@ static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters
* KVM drops writes to MSR_P6_PERFCTR[0|1] if the counters are
* unsupported, i.e. doesn't #GP and reads back '0'.
*/
- const uint64_t expected_val = expect_success ? test_val : 0;
+ const u64 expected_val = expect_success ? test_val : 0;
const bool expect_gp = !expect_success && msr != MSR_P6_PERFCTR0 &&
msr != MSR_P6_PERFCTR1;
- uint32_t rdpmc_idx;
- uint8_t vector;
- uint64_t val;
+ u32 rdpmc_idx;
+ u8 vector;
+ u64 val;
vector = wrmsr_safe(msr, test_val);
GUEST_ASSERT_PMC_MSR_ACCESS(WRMSR, msr, expect_gp, vector);
@@ -461,9 +461,9 @@ static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters
static void guest_test_gp_counters(void)
{
- uint8_t pmu_version = guest_get_pmu_version();
- uint8_t nr_gp_counters = 0;
- uint32_t base_msr;
+ u8 pmu_version = guest_get_pmu_version();
+ u8 nr_gp_counters = 0;
+ u32 base_msr;
if (pmu_version)
nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
@@ -477,7 +477,7 @@ static void guest_test_gp_counters(void)
* counters, of which there are none.
*/
if (pmu_version > 1) {
- uint64_t global_ctrl = rdmsr(MSR_CORE_PERF_GLOBAL_CTRL);
+ u64 global_ctrl = rdmsr(MSR_CORE_PERF_GLOBAL_CTRL);
if (nr_gp_counters)
GUEST_ASSERT_EQ(global_ctrl, GENMASK_ULL(nr_gp_counters - 1, 0));
@@ -495,8 +495,8 @@ static void guest_test_gp_counters(void)
GUEST_DONE();
}
-static void test_gp_counters(uint8_t pmu_version, uint64_t perf_capabilities,
- uint8_t nr_gp_counters)
+static void test_gp_counters(u8 pmu_version, u64 perf_capabilities,
+ u8 nr_gp_counters)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
@@ -514,9 +514,9 @@ static void test_gp_counters(uint8_t pmu_version, uint64_t perf_capabilities,
static void guest_test_fixed_counters(void)
{
- uint64_t supported_bitmask = 0;
- uint8_t nr_fixed_counters = 0;
- uint8_t i;
+ u64 supported_bitmask = 0;
+ u8 nr_fixed_counters = 0;
+ u8 i;
/* Fixed counters require Architectural vPMU Version 2+. */
if (guest_get_pmu_version() >= 2)
@@ -533,8 +533,8 @@ static void guest_test_fixed_counters(void)
nr_fixed_counters, supported_bitmask);
for (i = 0; i < MAX_NR_FIXED_COUNTERS; i++) {
- uint8_t vector;
- uint64_t val;
+ u8 vector;
+ u64 val;
if (i >= nr_fixed_counters && !(supported_bitmask & BIT_ULL(i))) {
vector = wrmsr_safe(MSR_CORE_PERF_FIXED_CTR_CTRL,
@@ -561,9 +561,8 @@ static void guest_test_fixed_counters(void)
GUEST_DONE();
}
-static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities,
- uint8_t nr_fixed_counters,
- uint32_t supported_bitmask)
+static void test_fixed_counters(u8 pmu_version, u64 perf_capabilities,
+ u8 nr_fixed_counters, u32 supported_bitmask)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
@@ -583,14 +582,14 @@ static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities,
static void test_intel_counters(void)
{
- uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
- uint8_t nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
- uint8_t pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION);
+ u8 nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
+ u8 nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
+ u8 pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION);
unsigned int i;
- uint8_t v, j;
- uint32_t k;
+ u8 v, j;
+ u32 k;
- const uint64_t perf_caps[] = {
+ const u64 perf_caps[] = {
0,
PMU_CAP_FW_WRITES,
};
@@ -602,7 +601,7 @@ static void test_intel_counters(void)
* as alternating bit sequencues, e.g. to detect if KVM is checking the
* wrong bit(s).
*/
- const uint32_t unavailable_masks[] = {
+ const u32 unavailable_masks[] = {
0x0,
0xffffffffu,
0xaaaaaaaau,
@@ -620,7 +619,7 @@ static void test_intel_counters(void)
* Intel, i.e. is the last version that is guaranteed to be backwards
* compatible with KVM's existing behavior.
*/
- uint8_t max_pmu_version = max_t(typeof(pmu_version), pmu_version, 5);
+ u8 max_pmu_version = max_t(typeof(pmu_version), pmu_version, 5);
/*
* Detect the existence of events that aren't supported by selftests.
diff --git a/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c
index 93b61c077991..c1232344fda8 100644
--- a/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c
@@ -53,11 +53,11 @@ static const struct __kvm_pmu_event_filter base_event_filter = {
};
struct {
- uint64_t loads;
- uint64_t stores;
- uint64_t loads_stores;
- uint64_t branches_retired;
- uint64_t instructions_retired;
+ u64 loads;
+ u64 stores;
+ u64 loads_stores;
+ u64 branches_retired;
+ u64 instructions_retired;
} pmc_results;
/*
@@ -75,9 +75,9 @@ static void guest_gp_handler(struct ex_regs *regs)
*
* Return on success. GUEST_SYNC(0) on error.
*/
-static void check_msr(uint32_t msr, uint64_t bits_to_flip)
+static void check_msr(u32 msr, u64 bits_to_flip)
{
- uint64_t v = rdmsr(msr) ^ bits_to_flip;
+ u64 v = rdmsr(msr) ^ bits_to_flip;
wrmsr(msr, v);
if (rdmsr(msr) != v)
@@ -89,10 +89,10 @@ static void check_msr(uint32_t msr, uint64_t bits_to_flip)
GUEST_SYNC(-EIO);
}
-static void run_and_measure_loop(uint32_t msr_base)
+static void run_and_measure_loop(u32 msr_base)
{
- const uint64_t branches_retired = rdmsr(msr_base + 0);
- const uint64_t insn_retired = rdmsr(msr_base + 1);
+ const u64 branches_retired = rdmsr(msr_base + 0);
+ const u64 insn_retired = rdmsr(msr_base + 1);
__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
@@ -147,7 +147,7 @@ static void amd_guest_code(void)
* Run the VM to the next GUEST_SYNC(value), and return the value passed
* to the sync. Any other exit from the guest is fatal.
*/
-static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu)
+static u64 run_vcpu_to_sync(struct kvm_vcpu *vcpu)
{
struct ucall uc;
@@ -161,7 +161,7 @@ static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu)
static void run_vcpu_and_sync_pmc_results(struct kvm_vcpu *vcpu)
{
- uint64_t r;
+ u64 r;
memset(&pmc_results, 0, sizeof(pmc_results));
sync_global_to_guest(vcpu->vm, pmc_results);
@@ -182,7 +182,7 @@ static void run_vcpu_and_sync_pmc_results(struct kvm_vcpu *vcpu)
*/
static bool sanity_check_pmu(struct kvm_vcpu *vcpu)
{
- uint64_t r;
+ u64 r;
vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler);
r = run_vcpu_to_sync(vcpu);
@@ -195,7 +195,7 @@ static bool sanity_check_pmu(struct kvm_vcpu *vcpu)
* Remove the first occurrence of 'event' (if any) from the filter's
* event list.
*/
-static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event)
+static void remove_event(struct __kvm_pmu_event_filter *f, u64 event)
{
bool found = false;
int i;
@@ -212,8 +212,8 @@ static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event)
#define ASSERT_PMC_COUNTING_INSTRUCTIONS() \
do { \
- uint64_t br = pmc_results.branches_retired; \
- uint64_t ir = pmc_results.instructions_retired; \
+ u64 br = pmc_results.branches_retired; \
+ u64 ir = pmc_results.instructions_retired; \
bool br_matched = this_pmu_has_errata(BRANCHES_RETIRED_OVERCOUNT) ? \
br >= NUM_BRANCHES : br == NUM_BRANCHES; \
\
@@ -228,8 +228,8 @@ do { \
#define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS() \
do { \
- uint64_t br = pmc_results.branches_retired; \
- uint64_t ir = pmc_results.instructions_retired; \
+ u64 br = pmc_results.branches_retired; \
+ u64 ir = pmc_results.instructions_retired; \
\
TEST_ASSERT(!br, "%s: Branch instructions retired = %lu (expected 0)", \
__func__, br); \
@@ -378,7 +378,7 @@ static bool use_amd_pmu(void)
static bool supports_event_mem_inst_retired(void)
{
- uint32_t eax, ebx, ecx, edx;
+ u32 eax, ebx, ecx, edx;
cpuid(1, &eax, &ebx, &ecx, &edx);
if (x86_family(eax) == 0x6) {
@@ -415,15 +415,15 @@ static bool supports_event_mem_inst_retired(void)
#define EXCLUDE_MASKED_ENTRY(event_select, mask, match) \
KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, true)
-static void masked_events_guest_test(uint32_t msr_base)
+static void masked_events_guest_test(u32 msr_base)
{
/*
* The actual value of the counters don't determine the outcome of
* the test. Only that they are zero or non-zero.
*/
- const uint64_t loads = rdmsr(msr_base + 0);
- const uint64_t stores = rdmsr(msr_base + 1);
- const uint64_t loads_stores = rdmsr(msr_base + 2);
+ const u64 loads = rdmsr(msr_base + 0);
+ const u64 stores = rdmsr(msr_base + 1);
+ const u64 loads_stores = rdmsr(msr_base + 2);
int val;
@@ -476,7 +476,7 @@ static void amd_masked_events_guest_code(void)
}
static void run_masked_events_test(struct kvm_vcpu *vcpu,
- const uint64_t masked_events[],
+ const u64 masked_events[],
const int nmasked_events)
{
struct __kvm_pmu_event_filter f = {
@@ -485,7 +485,7 @@ static void run_masked_events_test(struct kvm_vcpu *vcpu,
.flags = KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
};
- memcpy(f.events, masked_events, sizeof(uint64_t) * nmasked_events);
+ memcpy(f.events, masked_events, sizeof(u64) * nmasked_events);
test_with_filter(vcpu, &f);
}
@@ -494,12 +494,12 @@ static void run_masked_events_test(struct kvm_vcpu *vcpu,
#define ALLOW_LOADS_STORES BIT(2)
struct masked_events_test {
- uint64_t intel_events[MAX_TEST_EVENTS];
- uint64_t intel_event_end;
- uint64_t amd_events[MAX_TEST_EVENTS];
- uint64_t amd_event_end;
+ u64 intel_events[MAX_TEST_EVENTS];
+ u64 intel_event_end;
+ u64 amd_events[MAX_TEST_EVENTS];
+ u64 amd_event_end;
const char *msg;
- uint32_t flags;
+ u32 flags;
};
/*
@@ -582,9 +582,9 @@ const struct masked_events_test test_cases[] = {
};
static int append_test_events(const struct masked_events_test *test,
- uint64_t *events, int nevents)
+ u64 *events, int nevents)
{
- const uint64_t *evts;
+ const u64 *evts;
int i;
evts = use_intel_pmu() ? test->intel_events : test->amd_events;
@@ -603,7 +603,7 @@ static bool bool_eq(bool a, bool b)
return a == b;
}
-static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events,
+static void run_masked_events_tests(struct kvm_vcpu *vcpu, u64 *events,
int nevents)
{
int ntests = ARRAY_SIZE(test_cases);
@@ -630,7 +630,7 @@ static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events,
}
}
-static void add_dummy_events(uint64_t *events, int nevents)
+static void add_dummy_events(u64 *events, int nevents)
{
int i;
@@ -650,7 +650,7 @@ static void add_dummy_events(uint64_t *events, int nevents)
static void test_masked_events(struct kvm_vcpu *vcpu)
{
int nevents = KVM_PMU_EVENT_FILTER_MAX_EVENTS - MAX_TEST_EVENTS;
- uint64_t events[KVM_PMU_EVENT_FILTER_MAX_EVENTS];
+ u64 events[KVM_PMU_EVENT_FILTER_MAX_EVENTS];
/* Run the test cases against a sparse PMU event filter. */
run_masked_events_tests(vcpu, events, 0);
@@ -668,8 +668,8 @@ static int set_pmu_event_filter(struct kvm_vcpu *vcpu,
return __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
}
-static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event,
- uint32_t flags, uint32_t action)
+static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, u64 event,
+ u32 flags, u32 action)
{
struct __kvm_pmu_event_filter f = {
.nevents = 1,
@@ -685,9 +685,9 @@ static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event,
static void test_filter_ioctl(struct kvm_vcpu *vcpu)
{
- uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
+ u8 nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
struct __kvm_pmu_event_filter f;
- uint64_t e = ~0ul;
+ u64 e = ~0ul;
int r;
/*
@@ -729,7 +729,7 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed");
}
-static void intel_run_fixed_counter_guest_code(uint8_t idx)
+static void intel_run_fixed_counter_guest_code(u8 idx)
{
for (;;) {
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
@@ -745,8 +745,8 @@ static void intel_run_fixed_counter_guest_code(uint8_t idx)
}
}
-static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu,
- uint32_t action, uint32_t bitmap)
+static u64 test_with_fixed_counter_filter(struct kvm_vcpu *vcpu,
+ u32 action, u32 bitmap)
{
struct __kvm_pmu_event_filter f = {
.action = action,
@@ -757,9 +757,9 @@ static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu,
return run_vcpu_to_sync(vcpu);
}
-static uint64_t test_set_gp_and_fixed_event_filter(struct kvm_vcpu *vcpu,
- uint32_t action,
- uint32_t bitmap)
+static u64 test_set_gp_and_fixed_event_filter(struct kvm_vcpu *vcpu,
+ u32 action,
+ u32 bitmap)
{
struct __kvm_pmu_event_filter f = base_event_filter;
@@ -770,12 +770,12 @@ static uint64_t test_set_gp_and_fixed_event_filter(struct kvm_vcpu *vcpu,
return run_vcpu_to_sync(vcpu);
}
-static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx,
- uint8_t nr_fixed_counters)
+static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, u8 idx,
+ u8 nr_fixed_counters)
{
unsigned int i;
- uint32_t bitmap;
- uint64_t count;
+ u32 bitmap;
+ u64 count;
TEST_ASSERT(nr_fixed_counters < sizeof(bitmap) * 8,
"Invalid nr_fixed_counters");
@@ -815,10 +815,10 @@ static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx,
static void test_fixed_counter_bitmap(void)
{
- uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
+ u8 nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
- uint8_t idx;
+ u8 idx;
/*
* Check that pmu_event_filter works as expected when it's applied to
diff --git a/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c b/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
index 1969f4ab9b28..1d2f5d4fd45d 100644
--- a/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
+++ b/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
@@ -23,13 +23,13 @@
#include <processor.h>
#define BASE_DATA_SLOT 10
-#define BASE_DATA_GPA ((uint64_t)(1ull << 32))
-#define PER_CPU_DATA_SIZE ((uint64_t)(SZ_2M + PAGE_SIZE))
+#define BASE_DATA_GPA ((u64)(1ull << 32))
+#define PER_CPU_DATA_SIZE ((u64)(SZ_2M + PAGE_SIZE))
/* Horrific macro so that the line info is captured accurately :-( */
#define memcmp_g(gpa, pattern, size) \
do { \
- uint8_t *mem = (uint8_t *)gpa; \
+ u8 *mem = (u8 *)gpa; \
size_t i; \
\
for (i = 0; i < size; i++) \
@@ -38,7 +38,7 @@ do { \
pattern, i, gpa + i, mem[i]); \
} while (0)
-static void memcmp_h(uint8_t *mem, uint64_t gpa, uint8_t pattern, size_t size)
+static void memcmp_h(u8 *mem, gpa_t gpa, u8 pattern, size_t size)
{
size_t i;
@@ -70,13 +70,13 @@ enum ucall_syncs {
SYNC_PRIVATE,
};
-static void guest_sync_shared(uint64_t gpa, uint64_t size,
- uint8_t current_pattern, uint8_t new_pattern)
+static void guest_sync_shared(gpa_t gpa, u64 size,
+ u8 current_pattern, u8 new_pattern)
{
GUEST_SYNC5(SYNC_SHARED, gpa, size, current_pattern, new_pattern);
}
-static void guest_sync_private(uint64_t gpa, uint64_t size, uint8_t pattern)
+static void guest_sync_private(gpa_t gpa, u64 size, u8 pattern)
{
GUEST_SYNC4(SYNC_PRIVATE, gpa, size, pattern);
}
@@ -86,10 +86,10 @@ static void guest_sync_private(uint64_t gpa, uint64_t size, uint8_t pattern)
#define MAP_GPA_SHARED BIT(1)
#define MAP_GPA_DO_FALLOCATE BIT(2)
-static void guest_map_mem(uint64_t gpa, uint64_t size, bool map_shared,
+static void guest_map_mem(gpa_t gpa, u64 size, bool map_shared,
bool do_fallocate)
{
- uint64_t flags = MAP_GPA_SET_ATTRIBUTES;
+ u64 flags = MAP_GPA_SET_ATTRIBUTES;
if (map_shared)
flags |= MAP_GPA_SHARED;
@@ -98,19 +98,19 @@ static void guest_map_mem(uint64_t gpa, uint64_t size, bool map_shared,
kvm_hypercall_map_gpa_range(gpa, size, flags);
}
-static void guest_map_shared(uint64_t gpa, uint64_t size, bool do_fallocate)
+static void guest_map_shared(gpa_t gpa, u64 size, bool do_fallocate)
{
guest_map_mem(gpa, size, true, do_fallocate);
}
-static void guest_map_private(uint64_t gpa, uint64_t size, bool do_fallocate)
+static void guest_map_private(gpa_t gpa, u64 size, bool do_fallocate)
{
guest_map_mem(gpa, size, false, do_fallocate);
}
struct {
- uint64_t offset;
- uint64_t size;
+ u64 offset;
+ u64 size;
} static const test_ranges[] = {
GUEST_STAGE(0, PAGE_SIZE),
GUEST_STAGE(0, SZ_2M),
@@ -119,11 +119,11 @@ struct {
GUEST_STAGE(SZ_2M, PAGE_SIZE),
};
-static void guest_test_explicit_conversion(uint64_t base_gpa, bool do_fallocate)
+static void guest_test_explicit_conversion(u64 base_gpa, bool do_fallocate)
{
- const uint8_t def_p = 0xaa;
- const uint8_t init_p = 0xcc;
- uint64_t j;
+ const u8 def_p = 0xaa;
+ const u8 init_p = 0xcc;
+ u64 j;
int i;
/* Memory should be shared by default. */
@@ -134,12 +134,12 @@ static void guest_test_explicit_conversion(uint64_t base_gpa, bool do_fallocate)
memcmp_g(base_gpa, init_p, PER_CPU_DATA_SIZE);
for (i = 0; i < ARRAY_SIZE(test_ranges); i++) {
- uint64_t gpa = base_gpa + test_ranges[i].offset;
- uint64_t size = test_ranges[i].size;
- uint8_t p1 = 0x11;
- uint8_t p2 = 0x22;
- uint8_t p3 = 0x33;
- uint8_t p4 = 0x44;
+ gpa_t gpa = base_gpa + test_ranges[i].offset;
+ u64 size = test_ranges[i].size;
+ u8 p1 = 0x11;
+ u8 p2 = 0x22;
+ u8 p3 = 0x33;
+ u8 p4 = 0x44;
/*
* Set the test region to pattern one to differentiate it from
@@ -214,10 +214,10 @@ skip:
}
}
-static void guest_punch_hole(uint64_t gpa, uint64_t size)
+static void guest_punch_hole(gpa_t gpa, u64 size)
{
/* "Mapping" memory shared via fallocate() is done via PUNCH_HOLE. */
- uint64_t flags = MAP_GPA_SHARED | MAP_GPA_DO_FALLOCATE;
+ u64 flags = MAP_GPA_SHARED | MAP_GPA_DO_FALLOCATE;
kvm_hypercall_map_gpa_range(gpa, size, flags);
}
@@ -227,9 +227,9 @@ static void guest_punch_hole(uint64_t gpa, uint64_t size)
* proper conversion. Freeing (PUNCH_HOLE) should zap SPTEs, and reallocating
* (subsequent fault) should zero memory.
*/
-static void guest_test_punch_hole(uint64_t base_gpa, bool precise)
+static void guest_test_punch_hole(u64 base_gpa, bool precise)
{
- const uint8_t init_p = 0xcc;
+ const u8 init_p = 0xcc;
int i;
/*
@@ -239,8 +239,8 @@ static void guest_test_punch_hole(uint64_t base_gpa, bool precise)
guest_map_private(base_gpa, PER_CPU_DATA_SIZE, false);
for (i = 0; i < ARRAY_SIZE(test_ranges); i++) {
- uint64_t gpa = base_gpa + test_ranges[i].offset;
- uint64_t size = test_ranges[i].size;
+ gpa_t gpa = base_gpa + test_ranges[i].offset;
+ u64 size = test_ranges[i].size;
/*
* Free all memory before each iteration, even for the !precise
@@ -268,7 +268,7 @@ static void guest_test_punch_hole(uint64_t base_gpa, bool precise)
}
}
-static void guest_code(uint64_t base_gpa)
+static void guest_code(u64 base_gpa)
{
/*
* Run the conversion test twice, with and without doing fallocate() on
@@ -289,8 +289,8 @@ static void guest_code(uint64_t base_gpa)
static void handle_exit_hypercall(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint64_t gpa = run->hypercall.args[0];
- uint64_t size = run->hypercall.args[1] * PAGE_SIZE;
+ gpa_t gpa = run->hypercall.args[0];
+ u64 size = run->hypercall.args[1] * PAGE_SIZE;
bool set_attributes = run->hypercall.args[2] & MAP_GPA_SET_ATTRIBUTES;
bool map_shared = run->hypercall.args[2] & MAP_GPA_SHARED;
bool do_fallocate = run->hypercall.args[2] & MAP_GPA_DO_FALLOCATE;
@@ -337,7 +337,7 @@ static void *__test_mem_conversions(void *__vcpu)
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
case UCALL_SYNC: {
- uint64_t gpa = uc.args[1];
+ gpa_t gpa = uc.args[1];
size_t size = uc.args[2];
size_t i;
@@ -347,7 +347,7 @@ static void *__test_mem_conversions(void *__vcpu)
for (i = 0; i < size; i += vm->page_size) {
size_t nr_bytes = min_t(size_t, vm->page_size, size - i);
- uint8_t *hva = addr_gpa2hva(vm, gpa + i);
+ u8 *hva = addr_gpa2hva(vm, gpa + i);
/* In all cases, the host should observe the shared data. */
memcmp_h(hva, gpa + i, uc.args[3], nr_bytes);
@@ -366,8 +366,8 @@ static void *__test_mem_conversions(void *__vcpu)
}
}
-static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t nr_vcpus,
- uint32_t nr_memslots)
+static void test_mem_conversions(enum vm_mem_backing_src_type src_type, u32 nr_vcpus,
+ u32 nr_memslots)
{
/*
* Allocate enough memory so that each vCPU's chunk of memory can be
@@ -402,7 +402,7 @@ static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t
KVM_MEM_GUEST_MEMFD, memfd, slot_size * i);
for (i = 0; i < nr_vcpus; i++) {
- uint64_t gpa = BASE_DATA_GPA + i * per_cpu_size;
+ gpa_t gpa = BASE_DATA_GPA + i * per_cpu_size;
vcpu_args_set(vcpus[i], 1, gpa);
@@ -450,8 +450,8 @@ static void usage(const char *cmd)
int main(int argc, char *argv[])
{
enum vm_mem_backing_src_type src_type = DEFAULT_VM_MEM_SRC;
- uint32_t nr_memslots = 1;
- uint32_t nr_vcpus = 1;
+ u32 nr_memslots = 1;
+ u32 nr_vcpus = 1;
int opt;
TEST_REQUIRE(kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM));
diff --git a/tools/testing/selftests/kvm/x86/private_mem_kvm_exits_test.c b/tools/testing/selftests/kvm/x86/private_mem_kvm_exits_test.c
index 13e72fcec8dd..10db9fe6d906 100644
--- a/tools/testing/selftests/kvm/x86/private_mem_kvm_exits_test.c
+++ b/tools/testing/selftests/kvm/x86/private_mem_kvm_exits_test.c
@@ -17,17 +17,17 @@
#define EXITS_TEST_SIZE (EXITS_TEST_NPAGES * PAGE_SIZE)
#define EXITS_TEST_SLOT 10
-static uint64_t guest_repeatedly_read(void)
+static u64 guest_repeatedly_read(void)
{
- volatile uint64_t value;
+ volatile u64 value;
while (true)
- value = *((uint64_t *) EXITS_TEST_GVA);
+ value = *((u64 *)EXITS_TEST_GVA);
return value;
}
-static uint32_t run_vcpu_get_exit_reason(struct kvm_vcpu *vcpu)
+static u32 run_vcpu_get_exit_reason(struct kvm_vcpu *vcpu)
{
int r;
@@ -50,7 +50,7 @@ static void test_private_access_memslot_deleted(void)
struct kvm_vcpu *vcpu;
pthread_t vm_thread;
void *thread_return;
- uint32_t exit_reason;
+ u32 exit_reason;
vm = vm_create_shape_with_one_vcpu(protected_vm_shape, &vcpu,
guest_repeatedly_read);
@@ -72,7 +72,7 @@ static void test_private_access_memslot_deleted(void)
vm_mem_region_delete(vm, EXITS_TEST_SLOT);
pthread_join(vm_thread, &thread_return);
- exit_reason = (uint32_t)(uint64_t)thread_return;
+ exit_reason = (u32)(u64)thread_return;
TEST_ASSERT_EQ(exit_reason, KVM_EXIT_MEMORY_FAULT);
TEST_ASSERT_EQ(vcpu->run->memory_fault.flags, KVM_MEMORY_EXIT_FLAG_PRIVATE);
@@ -86,7 +86,7 @@ static void test_private_access_memslot_not_private(void)
{
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
- uint32_t exit_reason;
+ u32 exit_reason;
vm = vm_create_shape_with_one_vcpu(protected_vm_shape, &vcpu,
guest_repeatedly_read);
diff --git a/tools/testing/selftests/kvm/x86/set_boot_cpu_id.c b/tools/testing/selftests/kvm/x86/set_boot_cpu_id.c
index 49913784bc82..8e3898646c69 100644
--- a/tools/testing/selftests/kvm/x86/set_boot_cpu_id.c
+++ b/tools/testing/selftests/kvm/x86/set_boot_cpu_id.c
@@ -86,11 +86,11 @@ static void run_vcpu(struct kvm_vcpu *vcpu)
}
}
-static struct kvm_vm *create_vm(uint32_t nr_vcpus, uint32_t bsp_vcpu_id,
+static struct kvm_vm *create_vm(u32 nr_vcpus, u32 bsp_vcpu_id,
struct kvm_vcpu *vcpus[])
{
struct kvm_vm *vm;
- uint32_t i;
+ u32 i;
vm = vm_create(nr_vcpus);
@@ -104,7 +104,7 @@ static struct kvm_vm *create_vm(uint32_t nr_vcpus, uint32_t bsp_vcpu_id,
return vm;
}
-static void run_vm_bsp(uint32_t bsp_vcpu_id)
+static void run_vm_bsp(u32 bsp_vcpu_id)
{
struct kvm_vcpu *vcpus[2];
struct kvm_vm *vm;
diff --git a/tools/testing/selftests/kvm/x86/set_sregs_test.c b/tools/testing/selftests/kvm/x86/set_sregs_test.c
index f4095a3d1278..8e654cc9ab16 100644
--- a/tools/testing/selftests/kvm/x86/set_sregs_test.c
+++ b/tools/testing/selftests/kvm/x86/set_sregs_test.c
@@ -46,9 +46,9 @@ do { \
X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE | \
X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT)
-static uint64_t calc_supported_cr4_feature_bits(void)
+static u64 calc_supported_cr4_feature_bits(void)
{
- uint64_t cr4 = KVM_ALWAYS_ALLOWED_CR4;
+ u64 cr4 = KVM_ALWAYS_ALLOWED_CR4;
if (kvm_cpu_has(X86_FEATURE_UMIP))
cr4 |= X86_CR4_UMIP;
@@ -74,7 +74,7 @@ static uint64_t calc_supported_cr4_feature_bits(void)
return cr4;
}
-static void test_cr_bits(struct kvm_vcpu *vcpu, uint64_t cr4)
+static void test_cr_bits(struct kvm_vcpu *vcpu, u64 cr4)
{
struct kvm_sregs sregs;
int rc, i;
diff --git a/tools/testing/selftests/kvm/x86/sev_init2_tests.c b/tools/testing/selftests/kvm/x86/sev_init2_tests.c
index b238615196ad..8eeba2327c7c 100644
--- a/tools/testing/selftests/kvm/x86/sev_init2_tests.c
+++ b/tools/testing/selftests/kvm/x86/sev_init2_tests.c
@@ -34,7 +34,7 @@ static int __sev_ioctl(int vm_fd, int cmd_id, void *data)
{
struct kvm_sev_cmd cmd = {
.id = cmd_id,
- .data = (uint64_t)data,
+ .data = (u64)data,
.sev_fd = open_sev_dev_path_or_exit(),
};
int ret;
@@ -94,7 +94,7 @@ void test_vm_types(void)
"VM type is KVM_X86_SW_PROTECTED_VM");
}
-void test_flags(uint32_t vm_type)
+void test_flags(u32 vm_type)
{
int i;
@@ -104,7 +104,7 @@ void test_flags(uint32_t vm_type)
"invalid flag");
}
-void test_features(uint32_t vm_type, uint64_t supported_features)
+void test_features(u32 vm_type, u64 supported_features)
{
int i;
diff --git a/tools/testing/selftests/kvm/x86/sev_smoke_test.c b/tools/testing/selftests/kvm/x86/sev_smoke_test.c
index 8bd37a476f15..1a49ee391586 100644
--- a/tools/testing/selftests/kvm/x86/sev_smoke_test.c
+++ b/tools/testing/selftests/kvm/x86/sev_smoke_test.c
@@ -13,9 +13,9 @@
#include "linux/psp-sev.h"
#include "sev.h"
-static void guest_sev_test_msr(uint32_t msr)
+static void guest_sev_test_msr(u32 msr)
{
- uint64_t val = rdmsr(msr);
+ u64 val = rdmsr(msr);
wrmsr(msr, val);
GUEST_ASSERT(val == rdmsr(msr));
@@ -23,7 +23,7 @@ static void guest_sev_test_msr(uint32_t msr)
#define guest_sev_test_reg(reg) \
do { \
- uint64_t val = get_##reg(); \
+ u64 val = get_##reg(); \
\
set_##reg(val); \
GUEST_ASSERT(val == get_##reg()); \
@@ -42,7 +42,7 @@ static void guest_sev_test_regs(void)
static void guest_snp_code(void)
{
- uint64_t sev_msr = rdmsr(MSR_AMD64_SEV);
+ u64 sev_msr = rdmsr(MSR_AMD64_SEV);
GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ENABLED);
GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ES_ENABLED);
@@ -104,19 +104,19 @@ static void compare_xsave(u8 *from_host, u8 *from_guest)
abort();
}
-static void test_sync_vmsa(uint32_t type, uint64_t policy)
+static void test_sync_vmsa(u32 type, u64 policy)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- vm_vaddr_t gva;
+ gva_t gva;
void *hva;
double x87val = M_PI;
struct kvm_xsave __attribute__((aligned(64))) xsave = { 0 };
vm = vm_sev_create_with_one_vcpu(type, guest_code_xsave, &vcpu);
- gva = vm_vaddr_alloc_shared(vm, PAGE_SIZE, KVM_UTIL_MIN_VADDR,
- MEM_REGION_TEST_DATA);
+ gva = vm_alloc_shared(vm, PAGE_SIZE, KVM_UTIL_MIN_VADDR,
+ MEM_REGION_TEST_DATA);
hva = addr_gva2hva(vm, gva);
vcpu_args_set(vcpu, 1, gva);
@@ -150,7 +150,7 @@ static void test_sync_vmsa(uint32_t type, uint64_t policy)
kvm_vm_free(vm);
}
-static void test_sev(void *guest_code, uint32_t type, uint64_t policy)
+static void test_sev(void *guest_code, u32 type, u64 policy)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
@@ -201,7 +201,7 @@ static void guest_shutdown_code(void)
__asm__ __volatile__("ud2");
}
-static void test_sev_shutdown(uint32_t type, uint64_t policy)
+static void test_sev_shutdown(u32 type, u64 policy)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
@@ -218,7 +218,7 @@ static void test_sev_shutdown(uint32_t type, uint64_t policy)
kvm_vm_free(vm);
}
-static void test_sev_smoke(void *guest, uint32_t type, uint64_t policy)
+static void test_sev_smoke(void *guest, u32 type, u64 policy)
{
const u64 xf_mask = XFEATURE_MASK_X87_AVX;
diff --git a/tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c b/tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c
index 0e8aec568010..3dca85e95478 100644
--- a/tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c
+++ b/tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c
@@ -20,8 +20,8 @@
static void guest_code(bool tdp_enabled)
{
- uint64_t error_code;
- uint64_t vector;
+ u64 error_code;
+ u64 vector;
vector = kvm_asm_safe_ec(FLDS_MEM_EAX, error_code, "a"(MEM_REGION_GVA));
@@ -47,8 +47,8 @@ int main(int argc, char *argv[])
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
- uint64_t *hva;
- uint64_t gpa;
+ u64 *hva;
+ gpa_t gpa;
int rc;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_SMALLER_MAXPHYADDR));
diff --git a/tools/testing/selftests/kvm/x86/smm_test.c b/tools/testing/selftests/kvm/x86/smm_test.c
index ade8412bf94a..740051167dbd 100644
--- a/tools/testing/selftests/kvm/x86/smm_test.c
+++ b/tools/testing/selftests/kvm/x86/smm_test.c
@@ -34,13 +34,13 @@
* independent subset of asm here.
* SMI handler always report back fixed stage SMRAM_STAGE.
*/
-uint8_t smi_handler[] = {
+u8 smi_handler[] = {
0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */
0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */
0x0f, 0xaa, /* rsm */
};
-static inline void sync_with_host(uint64_t phase)
+static inline void sync_with_host(u64 phase)
{
asm volatile("in $" XSTR(SYNC_PORT)", %%al \n"
: "+a" (phase));
@@ -65,7 +65,7 @@ static void guest_code(void *arg)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
- uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
+ u64 apicbase = rdmsr(MSR_IA32_APICBASE);
struct svm_test_data *svm = arg;
struct vmx_pages *vmx_pages = arg;
@@ -113,7 +113,7 @@ static void guest_code(void *arg)
int main(int argc, char *argv[])
{
- vm_vaddr_t nested_gva = 0;
+ gva_t nested_gva = 0;
struct kvm_vcpu *vcpu;
struct kvm_regs regs;
diff --git a/tools/testing/selftests/kvm/x86/state_test.c b/tools/testing/selftests/kvm/x86/state_test.c
index 992a52504a4a..409c6cc9f921 100644
--- a/tools/testing/selftests/kvm/x86/state_test.c
+++ b/tools/testing/selftests/kvm/x86/state_test.c
@@ -144,8 +144,8 @@ static void __attribute__((__flatten__)) guest_code(void *arg)
GUEST_SYNC(1);
if (this_cpu_has(X86_FEATURE_XSAVE)) {
- uint64_t supported_xcr0 = this_cpu_supported_xcr0();
- uint8_t buffer[PAGE_SIZE];
+ u64 supported_xcr0 = this_cpu_supported_xcr0();
+ u8 buffer[PAGE_SIZE];
memset(buffer, 0xcc, sizeof(buffer));
@@ -172,8 +172,8 @@ static void __attribute__((__flatten__)) guest_code(void *arg)
}
if (this_cpu_has(X86_FEATURE_MPX)) {
- uint64_t bounds[2] = { 10, 0xffffffffull };
- uint64_t output[2] = { };
+ u64 bounds[2] = { 10, 0xffffffffull };
+ u64 output[2] = { };
GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_BNDREGS);
GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_BNDCSR);
@@ -257,8 +257,8 @@ void check_nested_state(int stage, struct kvm_x86_state *state)
int main(int argc, char *argv[])
{
- uint64_t *xstate_bv, saved_xstate_bv;
- vm_vaddr_t nested_gva = 0;
+ u64 *xstate_bv, saved_xstate_bv;
+ gva_t nested_gva = 0;
struct kvm_cpuid2 empty_cpuid = {};
struct kvm_regs regs1, regs2;
struct kvm_vcpu *vcpu, *vcpuN;
@@ -331,7 +331,7 @@ int main(int argc, char *argv[])
* supported features, even if something goes awry in saving
* the original snapshot.
*/
- xstate_bv = (void *)&((uint8_t *)state->xsave->region)[512];
+ xstate_bv = (void *)&((u8 *)state->xsave->region)[512];
saved_xstate_bv = *xstate_bv;
vcpuN = __vm_vcpu_add(vm, vcpu->id + 1);
diff --git a/tools/testing/selftests/kvm/x86/svm_int_ctl_test.c b/tools/testing/selftests/kvm/x86/svm_int_ctl_test.c
index 917b6066cfc1..d3cc5e4f7883 100644
--- a/tools/testing/selftests/kvm/x86/svm_int_ctl_test.c
+++ b/tools/testing/selftests/kvm/x86/svm_int_ctl_test.c
@@ -82,7 +82,7 @@ static void l1_guest_code(struct svm_test_data *svm)
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
- vm_vaddr_t svm_gva;
+ gva_t svm_gva;
struct kvm_vm *vm;
struct ucall uc;
diff --git a/tools/testing/selftests/kvm/x86/svm_lbr_nested_state.c b/tools/testing/selftests/kvm/x86/svm_lbr_nested_state.c
index ff99438824d3..7fbfaa054c95 100644
--- a/tools/testing/selftests/kvm/x86/svm_lbr_nested_state.c
+++ b/tools/testing/selftests/kvm/x86/svm_lbr_nested_state.c
@@ -97,9 +97,9 @@ void test_lbrv_nested_state(bool nested_lbrv)
{
struct kvm_x86_state *state = NULL;
struct kvm_vcpu *vcpu;
- vm_vaddr_t svm_gva;
struct kvm_vm *vm;
struct ucall uc;
+ gva_t svm_gva;
pr_info("Testing with nested LBRV %s\n", nested_lbrv ? "enabled" : "disabled");
diff --git a/tools/testing/selftests/kvm/x86/svm_nested_clear_efer_svme.c b/tools/testing/selftests/kvm/x86/svm_nested_clear_efer_svme.c
index a521a9eed061..6a89eaffc657 100644
--- a/tools/testing/selftests/kvm/x86/svm_nested_clear_efer_svme.c
+++ b/tools/testing/selftests/kvm/x86/svm_nested_clear_efer_svme.c
@@ -38,7 +38,7 @@ int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- vm_vaddr_t nested_gva = 0;
+ gva_t nested_gva = 0;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
diff --git a/tools/testing/selftests/kvm/x86/svm_nested_shutdown_test.c b/tools/testing/selftests/kvm/x86/svm_nested_shutdown_test.c
index 00135cbba35e..c6ea3d609a62 100644
--- a/tools/testing/selftests/kvm/x86/svm_nested_shutdown_test.c
+++ b/tools/testing/selftests/kvm/x86/svm_nested_shutdown_test.c
@@ -42,7 +42,7 @@ static void l1_guest_code(struct svm_test_data *svm, struct idt_entry *idt)
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
- vm_vaddr_t svm_gva;
+ gva_t svm_gva;
struct kvm_vm *vm;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
diff --git a/tools/testing/selftests/kvm/x86/svm_nested_soft_inject_test.c b/tools/testing/selftests/kvm/x86/svm_nested_soft_inject_test.c
index 4bd1655f9e6d..f72f11d4c4f8 100644
--- a/tools/testing/selftests/kvm/x86/svm_nested_soft_inject_test.c
+++ b/tools/testing/selftests/kvm/x86/svm_nested_soft_inject_test.c
@@ -76,7 +76,7 @@ static void l2_guest_code_nmi(void)
ud2();
}
-static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t idt_alt)
+static void l1_guest_code(struct svm_test_data *svm, u64 is_nmi, u64 idt_alt)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
@@ -144,8 +144,8 @@ static void run_test(bool is_nmi)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- vm_vaddr_t svm_gva;
- vm_vaddr_t idt_alt_vm;
+ gva_t svm_gva;
+ gva_t idt_alt_vm;
struct kvm_guest_debug debug;
pr_info("Running %s test\n", is_nmi ? "NMI" : "soft int");
@@ -161,14 +161,14 @@ static void run_test(bool is_nmi)
if (!is_nmi) {
void *idt, *idt_alt;
- idt_alt_vm = vm_vaddr_alloc_page(vm);
+ idt_alt_vm = vm_alloc_page(vm);
idt_alt = addr_gva2hva(vm, idt_alt_vm);
idt = addr_gva2hva(vm, vm->arch.idt);
memcpy(idt_alt, idt, getpagesize());
} else {
idt_alt_vm = 0;
}
- vcpu_args_set(vcpu, 3, svm_gva, (uint64_t)is_nmi, (uint64_t)idt_alt_vm);
+ vcpu_args_set(vcpu, 3, svm_gva, (u64)is_nmi, (u64)idt_alt_vm);
memset(&debug, 0, sizeof(debug));
vcpu_guest_debug_set(vcpu, &debug);
diff --git a/tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c b/tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c
index 569869bed20b..a4935ce2fb99 100644
--- a/tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c
+++ b/tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c
@@ -28,28 +28,28 @@ static void l2_code(void)
vmcall();
}
-static void l1_vmrun(struct svm_test_data *svm, u64 gpa)
+static void l1_vmrun(struct svm_test_data *svm, gpa_t gpa)
{
generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
asm volatile ("vmrun %[gpa]" : : [gpa] "a" (gpa) : "memory");
}
-static void l1_vmload(struct svm_test_data *svm, u64 gpa)
+static void l1_vmload(struct svm_test_data *svm, gpa_t gpa)
{
generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
asm volatile ("vmload %[gpa]" : : [gpa] "a" (gpa) : "memory");
}
-static void l1_vmsave(struct svm_test_data *svm, u64 gpa)
+static void l1_vmsave(struct svm_test_data *svm, gpa_t gpa)
{
generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
asm volatile ("vmsave %[gpa]" : : [gpa] "a" (gpa) : "memory");
}
-static void l1_vmexit(struct svm_test_data *svm, u64 gpa)
+static void l1_vmexit(struct svm_test_data *svm, gpa_t gpa)
{
generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
@@ -74,7 +74,7 @@ static u64 unmappable_gpa(struct kvm_vcpu *vcpu)
static void test_invalid_vmcb12(struct kvm_vcpu *vcpu)
{
- vm_vaddr_t nested_gva = 0;
+ gva_t nested_gva = 0;
struct ucall uc;
@@ -90,7 +90,7 @@ static void test_invalid_vmcb12(struct kvm_vcpu *vcpu)
static void test_unmappable_vmcb12(struct kvm_vcpu *vcpu)
{
- vm_vaddr_t nested_gva = 0;
+ gva_t nested_gva = 0;
vcpu_alloc_svm(vcpu->vm, &nested_gva);
vcpu_args_set(vcpu, 2, nested_gva, unmappable_gpa(vcpu));
@@ -103,7 +103,7 @@ static void test_unmappable_vmcb12(struct kvm_vcpu *vcpu)
static void test_unmappable_vmcb12_vmexit(struct kvm_vcpu *vcpu)
{
struct kvm_x86_state *state;
- vm_vaddr_t nested_gva = 0;
+ gva_t nested_gva = 0;
struct ucall uc;
/*
diff --git a/tools/testing/selftests/kvm/x86/svm_vmcall_test.c b/tools/testing/selftests/kvm/x86/svm_vmcall_test.c
index 8a62cca28cfb..b1887242f3b8 100644
--- a/tools/testing/selftests/kvm/x86/svm_vmcall_test.c
+++ b/tools/testing/selftests/kvm/x86/svm_vmcall_test.c
@@ -36,7 +36,7 @@ static void l1_guest_code(struct svm_test_data *svm)
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
- vm_vaddr_t svm_gva;
+ gva_t svm_gva;
struct kvm_vm *vm;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
diff --git a/tools/testing/selftests/kvm/x86/sync_regs_test.c b/tools/testing/selftests/kvm/x86/sync_regs_test.c
index 8fa3948b0170..e0c52321f87c 100644
--- a/tools/testing/selftests/kvm/x86/sync_regs_test.c
+++ b/tools/testing/selftests/kvm/x86/sync_regs_test.c
@@ -20,7 +20,7 @@
#include "kvm_util.h"
#include "processor.h"
-#define UCALL_PIO_PORT ((uint16_t)0x1000)
+#define UCALL_PIO_PORT ((u16)0x1000)
struct ucall uc_none = {
.cmd = UCALL_NONE,
diff --git a/tools/testing/selftests/kvm/x86/triple_fault_event_test.c b/tools/testing/selftests/kvm/x86/triple_fault_event_test.c
index 56306a19144a..f1c488e0d497 100644
--- a/tools/testing/selftests/kvm/x86/triple_fault_event_test.c
+++ b/tools/testing/selftests/kvm/x86/triple_fault_event_test.c
@@ -72,13 +72,13 @@ int main(void)
if (has_vmx) {
- vm_vaddr_t vmx_pages_gva;
+ gva_t vmx_pages_gva;
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_vmx);
vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vcpu, 1, vmx_pages_gva);
} else {
- vm_vaddr_t svm_gva;
+ gva_t svm_gva;
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_svm);
vcpu_alloc_svm(vm, &svm_gva);
diff --git a/tools/testing/selftests/kvm/x86/tsc_msrs_test.c b/tools/testing/selftests/kvm/x86/tsc_msrs_test.c
index 12b0964f4f13..91583969a14f 100644
--- a/tools/testing/selftests/kvm/x86/tsc_msrs_test.c
+++ b/tools/testing/selftests/kvm/x86/tsc_msrs_test.c
@@ -95,7 +95,7 @@ int main(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- uint64_t val;
+ u64 val;
ksft_print_header();
ksft_set_plan(5);
diff --git a/tools/testing/selftests/kvm/x86/tsc_scaling_sync.c b/tools/testing/selftests/kvm/x86/tsc_scaling_sync.c
index 59c7304f805e..59da8d4da607 100644
--- a/tools/testing/selftests/kvm/x86/tsc_scaling_sync.c
+++ b/tools/testing/selftests/kvm/x86/tsc_scaling_sync.c
@@ -21,10 +21,10 @@ pthread_spinlock_t create_lock;
#define TEST_TSC_KHZ 2345678UL
#define TEST_TSC_OFFSET 200000000
-uint64_t tsc_sync;
+u64 tsc_sync;
static void guest_code(void)
{
- uint64_t start_tsc, local_tsc, tmp;
+ u64 start_tsc, local_tsc, tmp;
start_tsc = rdtsc();
do {
diff --git a/tools/testing/selftests/kvm/x86/ucna_injection_test.c b/tools/testing/selftests/kvm/x86/ucna_injection_test.c
index 1e5e564523b3..df1ec8209c76 100644
--- a/tools/testing/selftests/kvm/x86/ucna_injection_test.c
+++ b/tools/testing/selftests/kvm/x86/ucna_injection_test.c
@@ -45,7 +45,7 @@
#define MCI_CTL2_RESERVED_BIT BIT_ULL(29)
-static uint64_t supported_mcg_caps;
+static u64 supported_mcg_caps;
/*
* Record states about the injected UCNA.
@@ -53,30 +53,30 @@ static uint64_t supported_mcg_caps;
* handler. Variables without the 'i_' prefixes are recorded in guest main
* execution thread.
*/
-static volatile uint64_t i_ucna_rcvd;
-static volatile uint64_t i_ucna_addr;
-static volatile uint64_t ucna_addr;
-static volatile uint64_t ucna_addr2;
+static volatile u64 i_ucna_rcvd;
+static volatile u64 i_ucna_addr;
+static volatile u64 ucna_addr;
+static volatile u64 ucna_addr2;
struct thread_params {
struct kvm_vcpu *vcpu;
- uint64_t *p_i_ucna_rcvd;
- uint64_t *p_i_ucna_addr;
- uint64_t *p_ucna_addr;
- uint64_t *p_ucna_addr2;
+ u64 *p_i_ucna_rcvd;
+ u64 *p_i_ucna_addr;
+ u64 *p_ucna_addr;
+ u64 *p_ucna_addr2;
};
static void verify_apic_base_addr(void)
{
- uint64_t msr = rdmsr(MSR_IA32_APICBASE);
- uint64_t base = GET_APIC_BASE(msr);
+ u64 msr = rdmsr(MSR_IA32_APICBASE);
+ u64 base = GET_APIC_BASE(msr);
GUEST_ASSERT(base == APIC_DEFAULT_GPA);
}
static void ucna_injection_guest_code(void)
{
- uint64_t ctl2;
+ u64 ctl2;
verify_apic_base_addr();
xapic_enable();
@@ -106,7 +106,7 @@ static void ucna_injection_guest_code(void)
static void cmci_disabled_guest_code(void)
{
- uint64_t ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK));
+ u64 ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK));
wrmsr(MSR_IA32_MCx_CTL2(UCNA_BANK), ctl2 | MCI_CTL2_CMCI_EN);
GUEST_DONE();
@@ -114,7 +114,7 @@ static void cmci_disabled_guest_code(void)
static void cmci_enabled_guest_code(void)
{
- uint64_t ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK));
+ u64 ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK));
wrmsr(MSR_IA32_MCx_CTL2(UCNA_BANK), ctl2 | MCI_CTL2_RESERVED_BIT);
GUEST_DONE();
@@ -145,14 +145,15 @@ static void run_vcpu_expect_gp(struct kvm_vcpu *vcpu)
printf("vCPU received GP in guest.\n");
}
-static void inject_ucna(struct kvm_vcpu *vcpu, uint64_t addr) {
+static void inject_ucna(struct kvm_vcpu *vcpu, u64 addr)
+{
/*
* A UCNA error is indicated with VAL=1, UC=1, PCC=0, S=0 and AR=0 in
* the IA32_MCi_STATUS register.
* MSCOD=1 (BIT[16] - MscodDataRdErr).
* MCACOD=0x0090 (Memory controller error format, channel 0)
*/
- uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
+ u64 status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
MCI_STATUS_MISCV | MCI_STATUS_ADDRV | 0x10090;
struct kvm_x86_mce mce = {};
mce.status = status;
@@ -216,10 +217,10 @@ static void test_ucna_injection(struct kvm_vcpu *vcpu, struct thread_params *par
{
struct kvm_vm *vm = vcpu->vm;
params->vcpu = vcpu;
- params->p_i_ucna_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&i_ucna_rcvd);
- params->p_i_ucna_addr = (uint64_t *)addr_gva2hva(vm, (uint64_t)&i_ucna_addr);
- params->p_ucna_addr = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ucna_addr);
- params->p_ucna_addr2 = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ucna_addr2);
+ params->p_i_ucna_rcvd = (u64 *)addr_gva2hva(vm, (u64)&i_ucna_rcvd);
+ params->p_i_ucna_addr = (u64 *)addr_gva2hva(vm, (u64)&i_ucna_addr);
+ params->p_ucna_addr = (u64 *)addr_gva2hva(vm, (u64)&ucna_addr);
+ params->p_ucna_addr2 = (u64 *)addr_gva2hva(vm, (u64)&ucna_addr2);
run_ucna_injection(params);
@@ -242,7 +243,7 @@ static void test_ucna_injection(struct kvm_vcpu *vcpu, struct thread_params *par
static void setup_mce_cap(struct kvm_vcpu *vcpu, bool enable_cmci_p)
{
- uint64_t mcg_caps = MCG_CTL_P | MCG_SER_P | MCG_LMCE_P | KVM_MAX_MCE_BANKS;
+ u64 mcg_caps = MCG_CTL_P | MCG_SER_P | MCG_LMCE_P | KVM_MAX_MCE_BANKS;
if (enable_cmci_p)
mcg_caps |= MCG_CMCI_P;
@@ -250,7 +251,7 @@ static void setup_mce_cap(struct kvm_vcpu *vcpu, bool enable_cmci_p)
vcpu_ioctl(vcpu, KVM_X86_SETUP_MCE, &mcg_caps);
}
-static struct kvm_vcpu *create_vcpu_with_mce_cap(struct kvm_vm *vm, uint32_t vcpuid,
+static struct kvm_vcpu *create_vcpu_with_mce_cap(struct kvm_vm *vm, u32 vcpuid,
bool enable_cmci_p, void *guest_code)
{
struct kvm_vcpu *vcpu = vm_vcpu_add(vm, vcpuid, guest_code);
diff --git a/tools/testing/selftests/kvm/x86/userspace_io_test.c b/tools/testing/selftests/kvm/x86/userspace_io_test.c
index be7d72f3c029..9c5a87576c2e 100644
--- a/tools/testing/selftests/kvm/x86/userspace_io_test.c
+++ b/tools/testing/selftests/kvm/x86/userspace_io_test.c
@@ -10,7 +10,7 @@
#include "kvm_util.h"
#include "processor.h"
-static void guest_ins_port80(uint8_t *buffer, unsigned int count)
+static void guest_ins_port80(u8 *buffer, unsigned int count)
{
unsigned long end;
@@ -26,7 +26,7 @@ static void guest_ins_port80(uint8_t *buffer, unsigned int count)
static void guest_code(void)
{
- uint8_t buffer[8192];
+ u8 buffer[8192];
int i;
/*
diff --git a/tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c b/tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c
index 8463a9956410..2808ce727e5f 100644
--- a/tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c
+++ b/tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c
@@ -23,21 +23,21 @@ struct kvm_msr_filter filter_allow = {
.nmsrs = 1,
/* Test an MSR the kernel knows about. */
.base = MSR_IA32_XSS,
- .bitmap = (uint8_t*)&deny_bits,
+ .bitmap = (u8 *)&deny_bits,
}, {
.flags = KVM_MSR_FILTER_READ |
KVM_MSR_FILTER_WRITE,
.nmsrs = 1,
/* Test an MSR the kernel doesn't know about. */
.base = MSR_IA32_FLUSH_CMD,
- .bitmap = (uint8_t*)&deny_bits,
+ .bitmap = (u8 *)&deny_bits,
}, {
.flags = KVM_MSR_FILTER_READ |
KVM_MSR_FILTER_WRITE,
.nmsrs = 1,
/* Test a fabricated MSR that no one knows about. */
.base = MSR_NON_EXISTENT,
- .bitmap = (uint8_t*)&deny_bits,
+ .bitmap = (u8 *)&deny_bits,
},
},
};
@@ -49,7 +49,7 @@ struct kvm_msr_filter filter_fs = {
.flags = KVM_MSR_FILTER_READ,
.nmsrs = 1,
.base = MSR_FS_BASE,
- .bitmap = (uint8_t*)&deny_bits,
+ .bitmap = (u8 *)&deny_bits,
},
},
};
@@ -61,12 +61,12 @@ struct kvm_msr_filter filter_gs = {
.flags = KVM_MSR_FILTER_READ,
.nmsrs = 1,
.base = MSR_GS_BASE,
- .bitmap = (uint8_t*)&deny_bits,
+ .bitmap = (u8 *)&deny_bits,
},
},
};
-static uint64_t msr_non_existent_data;
+static u64 msr_non_existent_data;
static int guest_exception_count;
static u32 msr_reads, msr_writes;
@@ -77,7 +77,7 @@ static u8 bitmap_c0000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
static u8 bitmap_c0000000_read[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
static u8 bitmap_deadbeef[1] = { 0x1 };
-static void deny_msr(uint8_t *bitmap, u32 msr)
+static void deny_msr(u8 *bitmap, u32 msr)
{
u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1);
@@ -142,26 +142,26 @@ struct kvm_msr_filter no_filter_deny = {
* Note: Force test_rdmsr() to not be inlined to prevent the labels,
* rdmsr_start and rdmsr_end, from being defined multiple times.
*/
-static noinline uint64_t test_rdmsr(uint32_t msr)
+static noinline u64 test_rdmsr(u32 msr)
{
- uint32_t a, d;
+ u32 a, d;
guest_exception_count = 0;
__asm__ __volatile__("rdmsr_start: rdmsr; rdmsr_end:" :
"=a"(a), "=d"(d) : "c"(msr) : "memory");
- return a | ((uint64_t) d << 32);
+ return a | ((u64)d << 32);
}
/*
* Note: Force test_wrmsr() to not be inlined to prevent the labels,
* wrmsr_start and wrmsr_end, from being defined multiple times.
*/
-static noinline void test_wrmsr(uint32_t msr, uint64_t value)
+static noinline void test_wrmsr(u32 msr, u64 value)
{
- uint32_t a = value;
- uint32_t d = value >> 32;
+ u32 a = value;
+ u32 d = value >> 32;
guest_exception_count = 0;
@@ -176,26 +176,26 @@ extern char wrmsr_start, wrmsr_end;
* Note: Force test_em_rdmsr() to not be inlined to prevent the labels,
* rdmsr_start and rdmsr_end, from being defined multiple times.
*/
-static noinline uint64_t test_em_rdmsr(uint32_t msr)
+static noinline u64 test_em_rdmsr(u32 msr)
{
- uint32_t a, d;
+ u32 a, d;
guest_exception_count = 0;
__asm__ __volatile__(KVM_FEP "em_rdmsr_start: rdmsr; em_rdmsr_end:" :
"=a"(a), "=d"(d) : "c"(msr) : "memory");
- return a | ((uint64_t) d << 32);
+ return a | ((u64)d << 32);
}
/*
* Note: Force test_em_wrmsr() to not be inlined to prevent the labels,
* wrmsr_start and wrmsr_end, from being defined multiple times.
*/
-static noinline void test_em_wrmsr(uint32_t msr, uint64_t value)
+static noinline void test_em_wrmsr(u32 msr, u64 value)
{
- uint32_t a = value;
- uint32_t d = value >> 32;
+ u32 a = value;
+ u32 d = value >> 32;
guest_exception_count = 0;
@@ -208,7 +208,7 @@ extern char em_wrmsr_start, em_wrmsr_end;
static void guest_code_filter_allow(void)
{
- uint64_t data;
+ u64 data;
/*
* Test userspace intercepting rdmsr / wrmsr for MSR_IA32_XSS.
@@ -328,7 +328,7 @@ static void guest_code_filter_deny(void)
static void guest_code_permission_bitmap(void)
{
- uint64_t data;
+ u64 data;
data = test_rdmsr(MSR_FS_BASE);
GUEST_ASSERT(data == MSR_FS_BASE);
@@ -391,7 +391,7 @@ static void check_for_guest_assert(struct kvm_vcpu *vcpu)
}
}
-static void process_rdmsr(struct kvm_vcpu *vcpu, uint32_t msr_index)
+static void process_rdmsr(struct kvm_vcpu *vcpu, u32 msr_index)
{
struct kvm_run *run = vcpu->run;
@@ -423,7 +423,7 @@ static void process_rdmsr(struct kvm_vcpu *vcpu, uint32_t msr_index)
}
}
-static void process_wrmsr(struct kvm_vcpu *vcpu, uint32_t msr_index)
+static void process_wrmsr(struct kvm_vcpu *vcpu, u32 msr_index)
{
struct kvm_run *run = vcpu->run;
@@ -464,7 +464,7 @@ static void process_ucall_done(struct kvm_vcpu *vcpu)
uc.cmd, UCALL_DONE);
}
-static uint64_t process_ucall(struct kvm_vcpu *vcpu)
+static u64 process_ucall(struct kvm_vcpu *vcpu)
{
struct ucall uc = {};
@@ -489,20 +489,20 @@ static uint64_t process_ucall(struct kvm_vcpu *vcpu)
}
static void run_guest_then_process_rdmsr(struct kvm_vcpu *vcpu,
- uint32_t msr_index)
+ u32 msr_index)
{
vcpu_run(vcpu);
process_rdmsr(vcpu, msr_index);
}
static void run_guest_then_process_wrmsr(struct kvm_vcpu *vcpu,
- uint32_t msr_index)
+ u32 msr_index)
{
vcpu_run(vcpu);
process_wrmsr(vcpu, msr_index);
}
-static uint64_t run_guest_then_process_ucall(struct kvm_vcpu *vcpu)
+static u64 run_guest_then_process_ucall(struct kvm_vcpu *vcpu)
{
vcpu_run(vcpu);
return process_ucall(vcpu);
@@ -519,7 +519,7 @@ KVM_ONE_VCPU_TEST_SUITE(user_msr);
KVM_ONE_VCPU_TEST(user_msr, msr_filter_allow, guest_code_filter_allow)
{
struct kvm_vm *vm = vcpu->vm;
- uint64_t cmd;
+ u64 cmd;
int rc;
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
@@ -732,7 +732,7 @@ static void run_msr_filter_flag_test(struct kvm_vm *vm)
.flags = KVM_MSR_FILTER_READ,
.nmsrs = 1,
.base = 0,
- .bitmap = (uint8_t *)&deny_bits,
+ .bitmap = (u8 *)&deny_bits,
},
},
};
diff --git a/tools/testing/selftests/kvm/x86/vmx_apic_access_test.c b/tools/testing/selftests/kvm/x86/vmx_apic_access_test.c
index a81a24761aac..1720113eae79 100644
--- a/tools/testing/selftests/kvm/x86/vmx_apic_access_test.c
+++ b/tools/testing/selftests/kvm/x86/vmx_apic_access_test.c
@@ -38,7 +38,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages, unsigned long high_gpa)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
- uint32_t control;
+ u32 control;
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
GUEST_ASSERT(load_vmcs(vmx_pages));
@@ -72,7 +72,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages, unsigned long high_gpa)
int main(int argc, char *argv[])
{
unsigned long apic_access_addr = ~0ul;
- vm_vaddr_t vmx_pages_gva;
+ gva_t vmx_pages_gva;
unsigned long high_gpa;
struct vmx_pages *vmx;
bool done = false;
diff --git a/tools/testing/selftests/kvm/x86/vmx_apicv_updates_test.c b/tools/testing/selftests/kvm/x86/vmx_apicv_updates_test.c
index 337c53fddeff..80a4fd1e5bbb 100644
--- a/tools/testing/selftests/kvm/x86/vmx_apicv_updates_test.c
+++ b/tools/testing/selftests/kvm/x86/vmx_apicv_updates_test.c
@@ -33,7 +33,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
- uint32_t control;
+ u32 control;
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
GUEST_ASSERT(load_vmcs(vmx_pages));
@@ -110,7 +110,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
int main(int argc, char *argv[])
{
- vm_vaddr_t vmx_pages_gva;
+ gva_t vmx_pages_gva;
struct vmx_pages *vmx;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
diff --git a/tools/testing/selftests/kvm/x86/vmx_invalid_nested_guest_state.c b/tools/testing/selftests/kvm/x86/vmx_invalid_nested_guest_state.c
index a100ee5f0009..a2eaceed9ad5 100644
--- a/tools/testing/selftests/kvm/x86/vmx_invalid_nested_guest_state.c
+++ b/tools/testing/selftests/kvm/x86/vmx_invalid_nested_guest_state.c
@@ -52,7 +52,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
int main(int argc, char *argv[])
{
- vm_vaddr_t vmx_pages_gva;
+ gva_t vmx_pages_gva;
struct kvm_sregs sregs;
struct kvm_vcpu *vcpu;
struct kvm_run *run;
diff --git a/tools/testing/selftests/kvm/x86/vmx_msrs_test.c b/tools/testing/selftests/kvm/x86/vmx_msrs_test.c
index 90720b6205f4..c1e8632a1bb6 100644
--- a/tools/testing/selftests/kvm/x86/vmx_msrs_test.c
+++ b/tools/testing/selftests/kvm/x86/vmx_msrs_test.c
@@ -12,11 +12,10 @@
#include "kvm_util.h"
#include "vmx.h"
-static void vmx_fixed1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index,
- uint64_t mask)
+static void vmx_fixed1_msr_test(struct kvm_vcpu *vcpu, u32 msr_index, u64 mask)
{
- uint64_t val = vcpu_get_msr(vcpu, msr_index);
- uint64_t bit;
+ u64 val = vcpu_get_msr(vcpu, msr_index);
+ u64 bit;
mask &= val;
@@ -26,11 +25,10 @@ static void vmx_fixed1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index,
}
}
-static void vmx_fixed0_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index,
- uint64_t mask)
+static void vmx_fixed0_msr_test(struct kvm_vcpu *vcpu, u32 msr_index, u64 mask)
{
- uint64_t val = vcpu_get_msr(vcpu, msr_index);
- uint64_t bit;
+ u64 val = vcpu_get_msr(vcpu, msr_index);
+ u64 bit;
mask = ~mask | val;
@@ -40,7 +38,7 @@ static void vmx_fixed0_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index,
}
}
-static void vmx_fixed0and1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index)
+static void vmx_fixed0and1_msr_test(struct kvm_vcpu *vcpu, u32 msr_index)
{
vmx_fixed0_msr_test(vcpu, msr_index, GENMASK_ULL(31, 0));
vmx_fixed1_msr_test(vcpu, msr_index, GENMASK_ULL(63, 32));
@@ -68,10 +66,10 @@ static void vmx_save_restore_msrs_test(struct kvm_vcpu *vcpu)
}
static void __ia32_feature_control_msr_test(struct kvm_vcpu *vcpu,
- uint64_t msr_bit,
+ u64 msr_bit,
struct kvm_x86_cpu_feature feature)
{
- uint64_t val;
+ u64 val;
vcpu_clear_cpuid_feature(vcpu, feature);
@@ -90,7 +88,7 @@ static void __ia32_feature_control_msr_test(struct kvm_vcpu *vcpu,
static void ia32_feature_control_msr_test(struct kvm_vcpu *vcpu)
{
- uint64_t supported_bits = FEAT_CTL_LOCKED |
+ u64 supported_bits = FEAT_CTL_LOCKED |
FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX |
FEAT_CTL_SGX_LC_ENABLED |
diff --git a/tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c b/tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c
index 915c42001dba..f13dee317383 100644
--- a/tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c
+++ b/tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c
@@ -30,7 +30,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
u64 guest_cr4;
- vm_paddr_t pml5_pa, pml4_pa;
+ gpa_t pml5_pa, pml4_pa;
u64 *pml5;
u64 exit_reason;
@@ -73,7 +73,7 @@ void guest_code(struct vmx_pages *vmx_pages)
int main(int argc, char *argv[])
{
- vm_vaddr_t vmx_pages_gva = 0;
+ gva_t vmx_pages_gva = 0;
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
struct kvm_x86_state *state;
diff --git a/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c
index 7ff6f62e20a3..d004108dbdc6 100644
--- a/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c
+++ b/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c
@@ -52,16 +52,16 @@ static const union perf_capabilities format_caps = {
.pebs_format = -1,
};
-static void guest_test_perf_capabilities_gp(uint64_t val)
+static void guest_test_perf_capabilities_gp(u64 val)
{
- uint8_t vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val);
+ u8 vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val);
__GUEST_ASSERT(vector == GP_VECTOR,
"Expected #GP for value '0x%lx', got %s",
val, ex_str(vector));
}
-static void guest_code(uint64_t current_val)
+static void guest_code(u64 current_val)
{
int i;
@@ -129,7 +129,7 @@ KVM_ONE_VCPU_TEST(vmx_pmu_caps, basic_perf_capabilities, guest_code)
KVM_ONE_VCPU_TEST(vmx_pmu_caps, fungible_perf_capabilities, guest_code)
{
- const uint64_t fungible_caps = host_cap.capabilities & ~immutable_caps.capabilities;
+ const u64 fungible_caps = host_cap.capabilities & ~immutable_caps.capabilities;
int bit;
for_each_set_bit(bit, &fungible_caps, 64) {
@@ -148,7 +148,7 @@ KVM_ONE_VCPU_TEST(vmx_pmu_caps, fungible_perf_capabilities, guest_code)
*/
KVM_ONE_VCPU_TEST(vmx_pmu_caps, immutable_perf_capabilities, guest_code)
{
- const uint64_t reserved_caps = (~host_cap.capabilities |
+ const u64 reserved_caps = (~host_cap.capabilities |
immutable_caps.capabilities) &
~format_caps.capabilities;
union perf_capabilities val = host_cap;
@@ -210,7 +210,7 @@ KVM_ONE_VCPU_TEST(vmx_pmu_caps, lbr_perf_capabilities, guest_code)
KVM_ONE_VCPU_TEST(vmx_pmu_caps, perf_capabilities_unsupported, guest_code)
{
- uint64_t val;
+ u64 val;
int i, r;
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
diff --git a/tools/testing/selftests/kvm/x86/vmx_preemption_timer_test.c b/tools/testing/selftests/kvm/x86/vmx_preemption_timer_test.c
index 00dd2ac07a61..1b7b6ba23de7 100644
--- a/tools/testing/selftests/kvm/x86/vmx_preemption_timer_test.c
+++ b/tools/testing/selftests/kvm/x86/vmx_preemption_timer_test.c
@@ -152,7 +152,7 @@ void guest_code(struct vmx_pages *vmx_pages)
int main(int argc, char *argv[])
{
- vm_vaddr_t vmx_pages_gva = 0;
+ gva_t vmx_pages_gva = 0;
struct kvm_regs regs1, regs2;
struct kvm_vm *vm;
diff --git a/tools/testing/selftests/kvm/x86/xapic_ipi_test.c b/tools/testing/selftests/kvm/x86/xapic_ipi_test.c
index ae4a4b6c05ca..39ce9a9369f5 100644
--- a/tools/testing/selftests/kvm/x86/xapic_ipi_test.c
+++ b/tools/testing/selftests/kvm/x86/xapic_ipi_test.c
@@ -48,20 +48,20 @@
* Incremented in the IPI handler. Provides evidence to the sender that the IPI
* arrived at the destination
*/
-static volatile uint64_t ipis_rcvd;
+static volatile u64 ipis_rcvd;
/* Data struct shared between host main thread and vCPUs */
struct test_data_page {
- uint32_t halter_apic_id;
- volatile uint64_t hlt_count;
- volatile uint64_t wake_count;
- uint64_t ipis_sent;
- uint64_t migrations_attempted;
- uint64_t migrations_completed;
- uint32_t icr;
- uint32_t icr2;
- uint32_t halter_tpr;
- uint32_t halter_ppr;
+ u32 halter_apic_id;
+ volatile u64 hlt_count;
+ volatile u64 wake_count;
+ u64 ipis_sent;
+ u64 migrations_attempted;
+ u64 migrations_completed;
+ u32 icr;
+ u32 icr2;
+ u32 halter_tpr;
+ u32 halter_ppr;
/*
* Record local version register as a cross-check that APIC access
@@ -69,19 +69,19 @@ struct test_data_page {
* arch/x86/kvm/lapic.c). If test is failing, check that values match
* to determine whether APIC access exits are working.
*/
- uint32_t halter_lvr;
+ u32 halter_lvr;
};
struct thread_params {
struct test_data_page *data;
struct kvm_vcpu *vcpu;
- uint64_t *pipis_rcvd; /* host address of ipis_rcvd global */
+ u64 *pipis_rcvd; /* host address of ipis_rcvd global */
};
void verify_apic_base_addr(void)
{
- uint64_t msr = rdmsr(MSR_IA32_APICBASE);
- uint64_t base = GET_APIC_BASE(msr);
+ u64 msr = rdmsr(MSR_IA32_APICBASE);
+ u64 base = GET_APIC_BASE(msr);
GUEST_ASSERT(base == APIC_DEFAULT_GPA);
}
@@ -125,12 +125,12 @@ static void guest_ipi_handler(struct ex_regs *regs)
static void sender_guest_code(struct test_data_page *data)
{
- uint64_t last_wake_count;
- uint64_t last_hlt_count;
- uint64_t last_ipis_rcvd_count;
- uint32_t icr_val;
- uint32_t icr2_val;
- uint64_t tsc_start;
+ u64 last_wake_count;
+ u64 last_hlt_count;
+ u64 last_ipis_rcvd_count;
+ u32 icr_val;
+ u32 icr2_val;
+ u64 tsc_start;
verify_apic_base_addr();
xapic_enable();
@@ -248,7 +248,7 @@ static void cancel_join_vcpu_thread(pthread_t thread, struct kvm_vcpu *vcpu)
}
void do_migrations(struct test_data_page *data, int run_secs, int delay_usecs,
- uint64_t *pipis_rcvd)
+ u64 *pipis_rcvd)
{
long pages_not_moved;
unsigned long nodemask = 0;
@@ -259,9 +259,9 @@ void do_migrations(struct test_data_page *data, int run_secs, int delay_usecs,
int i;
int from, to;
unsigned long bit;
- uint64_t hlt_count;
- uint64_t wake_count;
- uint64_t ipis_sent;
+ u64 hlt_count;
+ u64 wake_count;
+ u64 ipis_sent;
fprintf(stderr, "Calling migrate_pages every %d microseconds\n",
delay_usecs);
@@ -393,12 +393,12 @@ int main(int argc, char *argv[])
int run_secs = 0;
int delay_usecs = 0;
struct test_data_page *data;
- vm_vaddr_t test_data_page_vaddr;
+ gva_t test_data_page_gva;
bool migrate = false;
pthread_t threads[2];
struct thread_params params[2];
struct kvm_vm *vm;
- uint64_t *pipis_rcvd;
+ u64 *pipis_rcvd;
get_cmdline_args(argc, argv, &run_secs, &migrate, &delay_usecs);
if (run_secs <= 0)
@@ -414,16 +414,16 @@ int main(int argc, char *argv[])
params[1].vcpu = vm_vcpu_add(vm, 1, sender_guest_code);
- test_data_page_vaddr = vm_vaddr_alloc_page(vm);
- data = addr_gva2hva(vm, test_data_page_vaddr);
+ test_data_page_gva = vm_alloc_page(vm);
+ data = addr_gva2hva(vm, test_data_page_gva);
memset(data, 0, sizeof(*data));
params[0].data = data;
params[1].data = data;
- vcpu_args_set(params[0].vcpu, 1, test_data_page_vaddr);
- vcpu_args_set(params[1].vcpu, 1, test_data_page_vaddr);
+ vcpu_args_set(params[0].vcpu, 1, test_data_page_gva);
+ vcpu_args_set(params[1].vcpu, 1, test_data_page_gva);
- pipis_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ipis_rcvd);
+ pipis_rcvd = (u64 *)addr_gva2hva(vm, (u64)&ipis_rcvd);
params[0].pipis_rcvd = pipis_rcvd;
params[1].pipis_rcvd = pipis_rcvd;
diff --git a/tools/testing/selftests/kvm/x86/xapic_state_test.c b/tools/testing/selftests/kvm/x86/xapic_state_test.c
index 0c5e12f5f14e..637bb90c1d93 100644
--- a/tools/testing/selftests/kvm/x86/xapic_state_test.c
+++ b/tools/testing/selftests/kvm/x86/xapic_state_test.c
@@ -23,7 +23,7 @@ static void xapic_guest_code(void)
xapic_enable();
while (1) {
- uint64_t val = (u64)xapic_read_reg(APIC_IRR) |
+ u64 val = (u64)xapic_read_reg(APIC_IRR) |
(u64)xapic_read_reg(APIC_IRR + 0x10) << 32;
xapic_write_reg(APIC_ICR2, val >> 32);
@@ -43,7 +43,7 @@ static void x2apic_guest_code(void)
x2apic_enable();
do {
- uint64_t val = x2apic_read_reg(APIC_IRR) |
+ u64 val = x2apic_read_reg(APIC_IRR) |
x2apic_read_reg(APIC_IRR + 0x10) << 32;
if (val & X2APIC_RSVD_BITS_MASK) {
@@ -56,12 +56,12 @@ static void x2apic_guest_code(void)
} while (1);
}
-static void ____test_icr(struct xapic_vcpu *x, uint64_t val)
+static void ____test_icr(struct xapic_vcpu *x, u64 val)
{
struct kvm_vcpu *vcpu = x->vcpu;
struct kvm_lapic_state xapic;
struct ucall uc;
- uint64_t icr;
+ u64 icr;
/*
* Tell the guest what ICR value to write. Use the IRR to pass info,
@@ -93,7 +93,7 @@ static void ____test_icr(struct xapic_vcpu *x, uint64_t val)
TEST_ASSERT_EQ(icr, val & ~APIC_ICR_BUSY);
}
-static void __test_icr(struct xapic_vcpu *x, uint64_t val)
+static void __test_icr(struct xapic_vcpu *x, u64 val)
{
/*
* The BUSY bit is reserved on both AMD and Intel, but only AMD treats
@@ -109,7 +109,7 @@ static void __test_icr(struct xapic_vcpu *x, uint64_t val)
static void test_icr(struct xapic_vcpu *x)
{
struct kvm_vcpu *vcpu = x->vcpu;
- uint64_t icr, i, j;
+ u64 icr, i, j;
icr = APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_FIXED;
for (i = 0; i <= 0xff; i++)
@@ -142,9 +142,9 @@ static void test_icr(struct xapic_vcpu *x)
__test_icr(x, -1ull & ~APIC_DM_FIXED_MASK);
}
-static void __test_apic_id(struct kvm_vcpu *vcpu, uint64_t apic_base)
+static void __test_apic_id(struct kvm_vcpu *vcpu, u64 apic_base)
{
- uint32_t apic_id, expected;
+ u32 apic_id, expected;
struct kvm_lapic_state xapic;
vcpu_set_msr(vcpu, MSR_IA32_APICBASE, apic_base);
@@ -170,9 +170,9 @@ static void __test_apic_id(struct kvm_vcpu *vcpu, uint64_t apic_base)
*/
static void test_apic_id(void)
{
- const uint32_t NR_VCPUS = 3;
+ const u32 NR_VCPUS = 3;
struct kvm_vcpu *vcpus[NR_VCPUS];
- uint64_t apic_base;
+ u64 apic_base;
struct kvm_vm *vm;
int i;
diff --git a/tools/testing/selftests/kvm/x86/xapic_tpr_test.c b/tools/testing/selftests/kvm/x86/xapic_tpr_test.c
index 3862134d9d40..ab25db2235d5 100644
--- a/tools/testing/selftests/kvm/x86/xapic_tpr_test.c
+++ b/tools/testing/selftests/kvm/x86/xapic_tpr_test.c
@@ -58,7 +58,7 @@ static void tpr_guest_irq_queue(void)
if (is_x2apic) {
x2apic_write_reg(APIC_SELF_IPI, IRQ_VECTOR);
} else {
- uint32_t icr, icr2;
+ u32 icr, icr2;
icr = APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED |
IRQ_VECTOR;
@@ -69,9 +69,9 @@ static void tpr_guest_irq_queue(void)
}
}
-static uint8_t tpr_guest_tpr_get(void)
+static u8 tpr_guest_tpr_get(void)
{
- uint32_t taskpri;
+ u32 taskpri;
if (is_x2apic)
taskpri = x2apic_read_reg(APIC_TASKPRI);
@@ -81,9 +81,9 @@ static uint8_t tpr_guest_tpr_get(void)
return GET_APIC_PRI(taskpri);
}
-static uint8_t tpr_guest_ppr_get(void)
+static u8 tpr_guest_ppr_get(void)
{
- uint32_t procpri;
+ u32 procpri;
if (is_x2apic)
procpri = x2apic_read_reg(APIC_PROCPRI);
@@ -93,9 +93,9 @@ static uint8_t tpr_guest_ppr_get(void)
return GET_APIC_PRI(procpri);
}
-static uint8_t tpr_guest_cr8_get(void)
+static u8 tpr_guest_cr8_get(void)
{
- uint64_t cr8;
+ u64 cr8;
asm volatile ("mov %%cr8, %[cr8]\n\t" : [cr8] "=r"(cr8));
@@ -104,7 +104,7 @@ static uint8_t tpr_guest_cr8_get(void)
static void tpr_guest_check_tpr_ppr_cr8_equal(void)
{
- uint8_t tpr;
+ u8 tpr;
tpr = tpr_guest_tpr_get();
@@ -157,19 +157,19 @@ static void tpr_guest_code(void)
GUEST_DONE();
}
-static uint8_t lapic_tpr_get(struct kvm_lapic_state *xapic)
+static u8 lapic_tpr_get(struct kvm_lapic_state *xapic)
{
return GET_APIC_PRI(*((u32 *)&xapic->regs[APIC_TASKPRI]));
}
-static void lapic_tpr_set(struct kvm_lapic_state *xapic, uint8_t val)
+static void lapic_tpr_set(struct kvm_lapic_state *xapic, u8 val)
{
u32 *taskpri = (u32 *)&xapic->regs[APIC_TASKPRI];
*taskpri = SET_APIC_PRI(*taskpri, val);
}
-static uint8_t sregs_tpr(struct kvm_sregs *sregs)
+static u8 sregs_tpr(struct kvm_sregs *sregs)
{
return sregs->cr8 & GENMASK(3, 0);
}
@@ -197,7 +197,7 @@ static void test_tpr_check_tpr_cr8_equal(struct kvm_vcpu *vcpu)
static void test_tpr_set_tpr_for_irq(struct kvm_vcpu *vcpu, bool mask)
{
struct kvm_lapic_state xapic;
- uint8_t tpr;
+ u8 tpr;
static_assert(IRQ_VECTOR >= 16, "invalid IRQ vector number");
tpr = IRQ_VECTOR / 16;
diff --git a/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c b/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c
index d038c1571729..40dc9e6b3fad 100644
--- a/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c
+++ b/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c
@@ -21,7 +21,7 @@
*/
#define ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0, xfeatures, dependencies) \
do { \
- uint64_t __supported = (supported_xcr0) & ((xfeatures) | (dependencies)); \
+ u64 __supported = (supported_xcr0) & ((xfeatures) | (dependencies)); \
\
__GUEST_ASSERT((__supported & (xfeatures)) != (xfeatures) || \
__supported == ((xfeatures) | (dependencies)), \
@@ -39,7 +39,7 @@ do { \
*/
#define ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0, xfeatures) \
do { \
- uint64_t __supported = (supported_xcr0) & (xfeatures); \
+ u64 __supported = (supported_xcr0) & (xfeatures); \
\
__GUEST_ASSERT(!__supported || __supported == (xfeatures), \
"supported = 0x%lx, xfeatures = 0x%llx", \
@@ -48,8 +48,8 @@ do { \
static void guest_code(void)
{
- uint64_t initial_xcr0;
- uint64_t supported_xcr0;
+ u64 initial_xcr0;
+ u64 supported_xcr0;
int i, vector;
set_cr4(get_cr4() | X86_CR4_OSXSAVE);
diff --git a/tools/testing/selftests/kvm/x86/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86/xen_shinfo_test.c
index 23909b501ac2..5076f6a75455 100644
--- a/tools/testing/selftests/kvm/x86/xen_shinfo_test.c
+++ b/tools/testing/selftests/kvm/x86/xen_shinfo_test.c
@@ -116,15 +116,15 @@ struct pvclock_wall_clock {
} __attribute__((__packed__));
struct vcpu_runstate_info {
- uint32_t state;
- uint64_t state_entry_time;
- uint64_t time[5]; /* Extra field for overrun check */
+ u32 state;
+ u64 state_entry_time;
+ u64 time[5]; /* Extra field for overrun check */
};
struct compat_vcpu_runstate_info {
- uint32_t state;
- uint64_t state_entry_time;
- uint64_t time[5];
+ u32 state;
+ u64 state_entry_time;
+ u64 time[5];
} __attribute__((__packed__));
struct arch_vcpu_info {
@@ -133,8 +133,8 @@ struct arch_vcpu_info {
};
struct vcpu_info {
- uint8_t evtchn_upcall_pending;
- uint8_t evtchn_upcall_mask;
+ u8 evtchn_upcall_pending;
+ u8 evtchn_upcall_mask;
unsigned long evtchn_pending_sel;
struct arch_vcpu_info arch;
struct pvclock_vcpu_time_info time;
@@ -145,7 +145,7 @@ struct shared_info {
unsigned long evtchn_pending[64];
unsigned long evtchn_mask[64];
struct pvclock_wall_clock wc;
- uint32_t wc_sec_hi;
+ u32 wc_sec_hi;
/* arch_shared_info here */
};
@@ -658,7 +658,7 @@ int main(int argc, char *argv[])
printf("Testing RUNSTATE_ADJUST\n");
rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST;
memset(&rst.u, 0, sizeof(rst.u));
- rst.u.runstate.state = (uint64_t)-1;
+ rst.u.runstate.state = (u64)-1;
rst.u.runstate.time_blocked =
0x5a - rs->time[RUNSTATE_blocked];
rst.u.runstate.time_offline =
@@ -1113,7 +1113,7 @@ int main(int argc, char *argv[])
/* Don't change the address, just trigger a write */
struct kvm_xen_vcpu_attr adj = {
.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST,
- .u.runstate.state = (uint64_t)-1
+ .u.runstate.state = (u64)-1
};
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &adj);
diff --git a/tools/testing/selftests/kvm/x86/xss_msr_test.c b/tools/testing/selftests/kvm/x86/xss_msr_test.c
index f331a4e9bae3..12c63df6bbce 100644
--- a/tools/testing/selftests/kvm/x86/xss_msr_test.c
+++ b/tools/testing/selftests/kvm/x86/xss_msr_test.c
@@ -17,7 +17,7 @@ int main(int argc, char *argv[])
bool xss_in_msr_list;
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
- uint64_t xss_val;
+ u64 xss_val;
int i, r;
/* Create VM */
diff --git a/tools/testing/selftests/liveupdate/liveupdate.c b/tools/testing/selftests/liveupdate/liveupdate.c
index c2878e3d5ef9..37c808fbe1e9 100644
--- a/tools/testing/selftests/liveupdate/liveupdate.c
+++ b/tools/testing/selftests/liveupdate/liveupdate.c
@@ -345,4 +345,45 @@ TEST_F(liveupdate_device, preserve_unsupported_fd)
ASSERT_EQ(close(session_fd), 0);
}
+/*
+ * Test Case: Prevent Double Preservation
+ *
+ * Verifies that a file (memfd) can only be preserved once across all active
+ * sessions. Attempting to preserve it a second time, whether in the same or
+ * a different session, should fail with EBUSY.
+ */
+TEST_F(liveupdate_device, prevent_double_preservation)
+{
+ int session_fd1, session_fd2, mem_fd;
+ int ret;
+
+ self->fd1 = open(LIVEUPDATE_DEV, O_RDWR);
+ if (self->fd1 < 0 && errno == ENOENT)
+ SKIP(return, "%s does not exist", LIVEUPDATE_DEV);
+ ASSERT_GE(self->fd1, 0);
+
+ session_fd1 = create_session(self->fd1, "double-preserve-session-1");
+ ASSERT_GE(session_fd1, 0);
+ session_fd2 = create_session(self->fd1, "double-preserve-session-2");
+ ASSERT_GE(session_fd2, 0);
+
+ mem_fd = memfd_create("test-memfd", 0);
+ ASSERT_GE(mem_fd, 0);
+
+ /* First preservation should succeed */
+ ASSERT_EQ(preserve_fd(session_fd1, mem_fd, 0x1111), 0);
+
+ /* Second preservation in a different session should fail with EBUSY */
+ ret = preserve_fd(session_fd2, mem_fd, 0x2222);
+ EXPECT_EQ(ret, -EBUSY);
+
+ /* Second preservation in the same session (different token) should fail with EBUSY */
+ ret = preserve_fd(session_fd1, mem_fd, 0x3333);
+ EXPECT_EQ(ret, -EBUSY);
+
+ ASSERT_EQ(close(mem_fd), 0);
+ ASSERT_EQ(close(session_fd1), 0);
+ ASSERT_EQ(close(session_fd2), 0);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
index 447769657634..44f4e703deb9 100755
--- a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
+++ b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
@@ -11,6 +11,11 @@ if [[ $(id -u) -ne 0 ]]; then
exit $ksft_skip
fi
+if ! command -v killall >/dev/null 2>&1; then
+ echo "killall not available. Skipping..."
+ exit $ksft_skip
+fi
+
nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
fault_limit_file=limit_in_bytes
diff --git a/tools/testing/selftests/mm/config b/tools/testing/selftests/mm/config
index 1dbe2b4558ab..06f78bd232e2 100644
--- a/tools/testing/selftests/mm/config
+++ b/tools/testing/selftests/mm/config
@@ -13,3 +13,4 @@ CONFIG_PROFILING=y
CONFIG_UPROBES=y
CONFIG_MEMORY_FAILURE=y
CONFIG_HWPOISON_INJECT=m
+CONFIG_PROC_MEM_ALWAYS_FORCE=y
diff --git a/tools/testing/selftests/mm/guard-regions.c b/tools/testing/selftests/mm/guard-regions.c
index dbd21d66d383..48e8b1539be3 100644
--- a/tools/testing/selftests/mm/guard-regions.c
+++ b/tools/testing/selftests/mm/guard-regions.c
@@ -21,6 +21,7 @@
#include <sys/uio.h>
#include <unistd.h>
#include "vm_util.h"
+#include "thp_settings.h"
#include "../pidfd/pidfd.h"
@@ -2195,6 +2196,9 @@ TEST_F(guard_regions, collapse)
char *ptr;
int i;
+ if (!thp_available())
+ SKIP(return, "Transparent Hugepages not available\n");
+
/* Need file to be correct size for tests for non-anon. */
if (variant->backing != ANON_BACKED)
ASSERT_EQ(ftruncate(self->fd, size), 0);
diff --git a/tools/testing/selftests/mm/hmm-tests.c b/tools/testing/selftests/mm/hmm-tests.c
index e8328c89d855..788689497e92 100644
--- a/tools/testing/selftests/mm/hmm-tests.c
+++ b/tools/testing/selftests/mm/hmm-tests.c
@@ -34,6 +34,7 @@
*/
#include <lib/test_hmm_uapi.h>
#include <mm/gup_test.h>
+#include <mm/vm_util.h>
struct hmm_buffer {
void *ptr;
@@ -548,7 +549,7 @@ TEST_F(hmm, anon_write_child)
for (migrate = 0; migrate < 2; ++migrate) {
for (use_thp = 0; use_thp < 2; ++use_thp) {
- npages = ALIGN(use_thp ? TWOMEG : HMM_BUFFER_SIZE,
+ npages = ALIGN(use_thp ? read_pmd_pagesize() : HMM_BUFFER_SIZE,
self->page_size) >> self->page_shift;
ASSERT_NE(npages, 0);
size = npages << self->page_shift;
@@ -728,7 +729,7 @@ TEST_F(hmm, anon_write_huge)
int *ptr;
int ret;
- size = 2 * TWOMEG;
+ size = 2 * read_pmd_pagesize();
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
@@ -744,7 +745,7 @@ TEST_F(hmm, anon_write_huge)
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
- size = TWOMEG;
+ size /= 2;
npages = size >> self->page_shift;
map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
ret = madvise(map, size, MADV_HUGEPAGE);
@@ -771,54 +772,6 @@ TEST_F(hmm, anon_write_huge)
}
/*
- * Read numeric data from raw and tagged kernel status files. Used to read
- * /proc and /sys data (without a tag) and from /proc/meminfo (with a tag).
- */
-static long file_read_ulong(char *file, const char *tag)
-{
- int fd;
- char buf[2048];
- int len;
- char *p, *q;
- long val;
-
- fd = open(file, O_RDONLY);
- if (fd < 0) {
- /* Error opening the file */
- return -1;
- }
-
- len = read(fd, buf, sizeof(buf));
- close(fd);
- if (len < 0) {
- /* Error in reading the file */
- return -1;
- }
- if (len == sizeof(buf)) {
- /* Error file is too large */
- return -1;
- }
- buf[len] = '\0';
-
- /* Search for a tag if provided */
- if (tag) {
- p = strstr(buf, tag);
- if (!p)
- return -1; /* looks like the line we want isn't there */
- p += strlen(tag);
- } else
- p = buf;
-
- val = strtol(p, &q, 0);
- if (*q != ' ') {
- /* Error parsing the file */
- return -1;
- }
-
- return val;
-}
-
-/*
* Write huge TLBFS page.
*/
TEST_F(hmm, anon_write_hugetlbfs)
@@ -826,15 +779,13 @@ TEST_F(hmm, anon_write_hugetlbfs)
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
- unsigned long default_hsize;
+ unsigned long default_hsize = default_huge_page_size();
unsigned long i;
int *ptr;
int ret;
- default_hsize = file_read_ulong("/proc/meminfo", "Hugepagesize:");
- if (default_hsize < 0 || default_hsize*1024 < default_hsize)
+ if (!default_hsize)
SKIP(return, "Huge page size could not be determined");
- default_hsize = default_hsize*1024; /* KB to B */
size = ALIGN(TWOMEG, default_hsize);
npages = size >> self->page_shift;
@@ -1606,7 +1557,7 @@ TEST_F(hmm, compound)
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
- unsigned long default_hsize;
+ unsigned long default_hsize = default_huge_page_size();
int *ptr;
unsigned char *m;
int ret;
@@ -1614,10 +1565,8 @@ TEST_F(hmm, compound)
/* Skip test if we can't allocate a hugetlbfs page. */
- default_hsize = file_read_ulong("/proc/meminfo", "Hugepagesize:");
- if (default_hsize < 0 || default_hsize*1024 < default_hsize)
+ if (!default_hsize)
SKIP(return, "Huge page size could not be determined");
- default_hsize = default_hsize*1024; /* KB to B */
size = ALIGN(TWOMEG, default_hsize);
npages = size >> self->page_shift;
@@ -2106,7 +2055,7 @@ TEST_F(hmm, migrate_anon_huge_empty)
int *ptr;
int ret;
- size = TWOMEG;
+ size = read_pmd_pagesize();
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
@@ -2158,7 +2107,7 @@ TEST_F(hmm, migrate_anon_huge_zero)
int ret;
int val;
- size = TWOMEG;
+ size = read_pmd_pagesize();
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
@@ -2221,7 +2170,7 @@ TEST_F(hmm, migrate_anon_huge_free)
int *ptr;
int ret;
- size = TWOMEG;
+ size = read_pmd_pagesize();
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
@@ -2280,7 +2229,7 @@ TEST_F(hmm, migrate_anon_huge_fault)
int *ptr;
int ret;
- size = TWOMEG;
+ size = read_pmd_pagesize();
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
@@ -2332,7 +2281,7 @@ TEST_F(hmm, migrate_partial_unmap_fault)
{
struct hmm_buffer *buffer;
unsigned long npages;
- unsigned long size = TWOMEG;
+ unsigned long size = read_pmd_pagesize();
unsigned long i;
void *old_ptr;
void *map;
@@ -2398,7 +2347,7 @@ TEST_F(hmm, migrate_remap_fault)
{
struct hmm_buffer *buffer;
unsigned long npages;
- unsigned long size = TWOMEG;
+ unsigned long size = read_pmd_pagesize();
unsigned long i;
void *old_ptr, *new_ptr = NULL;
void *map;
@@ -2498,7 +2447,7 @@ TEST_F(hmm, migrate_anon_huge_err)
int *ptr;
int ret;
- size = TWOMEG;
+ size = read_pmd_pagesize();
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
@@ -2593,7 +2542,7 @@ TEST_F(hmm, migrate_anon_huge_zero_err)
int *ptr;
int ret;
- size = TWOMEG;
+ size = read_pmd_pagesize();
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
diff --git a/tools/testing/selftests/mm/hugetlb_dio.c b/tools/testing/selftests/mm/hugetlb_dio.c
index 9ac62eb4c97d..31a054fa8134 100644
--- a/tools/testing/selftests/mm/hugetlb_dio.c
+++ b/tools/testing/selftests/mm/hugetlb_dio.c
@@ -17,12 +17,57 @@
#include <unistd.h>
#include <string.h>
#include <sys/mman.h>
+#include <sys/syscall.h>
#include "vm_util.h"
#include "kselftest.h"
-void run_dio_using_hugetlb(unsigned int start_off, unsigned int end_off)
+#ifndef STATX_DIOALIGN
+#define STATX_DIOALIGN 0x00002000U
+#endif
+
+static int get_dio_alignment(int fd)
+{
+ struct statx stx;
+ int ret;
+
+ ret = syscall(__NR_statx, fd, "", AT_EMPTY_PATH, STATX_DIOALIGN, &stx);
+ if (ret < 0)
+ return -1;
+
+ /*
+ * If STATX_DIOALIGN is unsupported, assume no alignment
+ * constraint and let the test proceed.
+ */
+ if (!(stx.stx_mask & STATX_DIOALIGN) || !stx.stx_dio_offset_align)
+ return 1;
+
+ return stx.stx_dio_offset_align;
+}
+
+static bool check_dio_alignment(unsigned int start_off,
+ unsigned int end_off, unsigned int align)
+{
+ unsigned int writesize = end_off - start_off;
+
+ /*
+ * The kernel's DIO path checks that file offset, length, and
+ * buffer address are all multiples of dio_offset_align. When
+ * this test case's parameters don't satisfy that, the write
+ * would fail with -EINVAL before exercising the hugetlb unpin
+ * path, so skip.
+ */
+ if (start_off % align != 0 || writesize % align != 0) {
+ ksft_test_result_skip("DIO align=%u incompatible with offset %u writesize %u\n",
+ align, start_off, writesize);
+ return false;
+ }
+
+ return true;
+}
+
+static void run_dio_using_hugetlb(int fd, unsigned int start_off,
+ unsigned int end_off, unsigned int align)
{
- int fd;
char *buffer = NULL;
char *orig_buffer = NULL;
size_t h_pagesize = 0;
@@ -32,6 +77,9 @@ void run_dio_using_hugetlb(unsigned int start_off, unsigned int end_off)
const int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB;
const int mmap_prot = PROT_READ | PROT_WRITE;
+ if (!check_dio_alignment(start_off, end_off, align))
+ return;
+
writesize = end_off - start_off;
/* Get the default huge page size */
@@ -39,10 +87,9 @@ void run_dio_using_hugetlb(unsigned int start_off, unsigned int end_off)
if (!h_pagesize)
ksft_exit_fail_msg("Unable to determine huge page size\n");
- /* Open the file to DIO */
- fd = open("/tmp", O_TMPFILE | O_RDWR | O_DIRECT, 0664);
- if (fd < 0)
- ksft_exit_fail_perror("Error opening file\n");
+ /* Reset file position since fd is shared across tests */
+ if (lseek(fd, 0, SEEK_SET) < 0)
+ ksft_exit_fail_perror("lseek failed\n");
/* Get the free huge pages before allocation */
free_hpage_b = get_free_hugepages();
@@ -71,7 +118,6 @@ void run_dio_using_hugetlb(unsigned int start_off, unsigned int end_off)
/* unmap the huge page */
munmap(orig_buffer, h_pagesize);
- close(fd);
/* Get the free huge pages after unmap*/
free_hpage_a = get_free_hugepages();
@@ -89,37 +135,38 @@ void run_dio_using_hugetlb(unsigned int start_off, unsigned int end_off)
int main(void)
{
- size_t pagesize = 0;
- int fd;
+ int fd, align;
+ const size_t pagesize = psize();
ksft_print_header();
- /* Open the file to DIO */
- fd = open("/tmp", O_TMPFILE | O_RDWR | O_DIRECT, 0664);
- if (fd < 0)
- ksft_exit_skip("Unable to allocate file: %s\n", strerror(errno));
- close(fd);
-
/* Check if huge pages are free */
if (!get_free_hugepages())
ksft_exit_skip("No free hugepage, exiting\n");
- ksft_set_plan(4);
+ fd = open("/tmp", O_TMPFILE | O_RDWR | O_DIRECT, 0664);
+ if (fd < 0)
+ ksft_exit_skip("Unable to allocate file: %s\n", strerror(errno));
- /* Get base page size */
- pagesize = psize();
+ align = get_dio_alignment(fd);
+ if (align < 0)
+ ksft_exit_skip("Unable to obtain DIO alignment: %s\n",
+ strerror(errno));
+ ksft_set_plan(4);
/* start and end is aligned to pagesize */
- run_dio_using_hugetlb(0, (pagesize * 3));
+ run_dio_using_hugetlb(fd, 0, (pagesize * 3), align);
/* start is aligned but end is not aligned */
- run_dio_using_hugetlb(0, (pagesize * 3) - (pagesize / 2));
+ run_dio_using_hugetlb(fd, 0, (pagesize * 3) - (pagesize / 2), align);
/* start is unaligned and end is aligned */
- run_dio_using_hugetlb(pagesize / 2, (pagesize * 3));
+ run_dio_using_hugetlb(fd, pagesize / 2, (pagesize * 3), align);
/* both start and end are unaligned */
- run_dio_using_hugetlb(pagesize / 2, (pagesize * 3) + (pagesize / 2));
+ run_dio_using_hugetlb(fd, pagesize / 2, (pagesize * 3) + (pagesize / 2), align);
+
+ close(fd);
ksft_finished();
}
diff --git a/tools/testing/selftests/mm/merge.c b/tools/testing/selftests/mm/merge.c
index 10b686102b79..519e5ac02db7 100644
--- a/tools/testing/selftests/mm/merge.c
+++ b/tools/testing/selftests/mm/merge.c
@@ -48,6 +48,19 @@ static pid_t do_fork(struct procmap_fd *procmap)
return 0;
}
+#ifdef __NR_mseal
+static int sys_mseal(void *ptr, size_t len, unsigned long flags)
+{
+ return syscall(__NR_mseal, (unsigned long)ptr, len, flags);
+}
+#else
+static int sys_mseal(void *ptr, size_t len, unsigned long flags)
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
FIXTURE_SETUP(merge)
{
self->page_size = psize();
@@ -1217,6 +1230,81 @@ TEST_F(merge, mremap_correct_placed_faulted)
ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
}
+TEST_F(merge, merge_vmas_with_mseal)
+{
+ unsigned int page_size = self->page_size;
+ struct procmap_fd *procmap = &self->procmap;
+ char *ptr, *ptr2, *ptr3;
+ /* We need our own as cannot munmap() once sealed. */
+ char *carveout;
+
+ /* Invalid mseal() call to see if implemented. */
+ ASSERT_EQ(sys_mseal(NULL, 0, ~0UL), -1);
+ if (errno == ENOSYS)
+ SKIP(return, "mseal not supported, skipping.");
+
+ /* Map carveout. */
+ carveout = mmap(NULL, 5 * page_size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ ASSERT_NE(carveout, MAP_FAILED);
+
+ /*
+ * Map 3 separate VMAs:
+ *
+ * |-----------|-----------|-----------|
+ * | RW | RWE | RO |
+ * |-----------|-----------|-----------|
+ * ptr ptr2 ptr3
+ */
+ ptr = mmap(&carveout[page_size], page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+ ptr2 = mmap(&carveout[2 * page_size], page_size,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr2, MAP_FAILED);
+ ptr3 = mmap(&carveout[3 * page_size], page_size, PROT_READ,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr3, MAP_FAILED);
+
+ /*
+ * mseal the second VMA:
+ *
+ * |-----------|-----------|-----------|
+ * | RW | RWES | RO |
+ * |-----------|-----------|-----------|
+ * ptr ptr2 ptr3
+ */
+ ASSERT_EQ(sys_mseal(ptr2, page_size, 0), 0);
+
+ /* Make first VMA mergeable upon mseal. */
+ ASSERT_EQ(mprotect(ptr, page_size,
+ PROT_READ | PROT_WRITE | PROT_EXEC), 0);
+ /*
+ * At this point we have:
+ *
+ * |-----------|-----------|-----------|
+ * | RWE | RWES | RO |
+ * |-----------|-----------|-----------|
+ * ptr ptr2 ptr3
+ *
+ * Now mseal all of the VMAs.
+ */
+ ASSERT_EQ(sys_mseal(ptr, 3 * page_size, 0), 0);
+
+ /*
+ * We should end up with:
+ *
+ * |-----------------------|-----------|
+ * | RWES | ROS |
+ * |-----------------------|-----------|
+ * ptr ptr3
+ */
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
+}
+
TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_prev)
{
struct procmap_fd *procmap = &self->procmap;
diff --git a/tools/testing/selftests/mm/soft-dirty.c b/tools/testing/selftests/mm/soft-dirty.c
index 59c0dbe99a9b..bcfcac99b436 100644
--- a/tools/testing/selftests/mm/soft-dirty.c
+++ b/tools/testing/selftests/mm/soft-dirty.c
@@ -82,7 +82,9 @@ static void test_hugepage(int pagemap_fd, int pagesize)
int i, ret;
if (!thp_is_enabled()) {
- ksft_test_result_skip("Transparent Hugepages not available\n");
+ ksft_print_msg("Transparent Hugepages not available\n");
+ ksft_test_result_skip("Test %s huge page allocation\n", __func__);
+ ksft_test_result_skip("Test %s huge page dirty bit\n", __func__);
return;
}
diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c
index e0167111bdd1..500d07c4938b 100644
--- a/tools/testing/selftests/mm/split_huge_page_test.c
+++ b/tools/testing/selftests/mm/split_huge_page_test.c
@@ -21,6 +21,7 @@
#include <time.h>
#include "vm_util.h"
#include "kselftest.h"
+#include "thp_settings.h"
uint64_t pagesize;
unsigned int pageshift;
@@ -255,21 +256,6 @@ static int check_after_split_folio_orders(char *vaddr_start, size_t len,
return status;
}
-static void write_file(const char *path, const char *buf, size_t buflen)
-{
- int fd;
- ssize_t numwritten;
-
- fd = open(path, O_WRONLY);
- if (fd == -1)
- ksft_exit_fail_msg("%s open failed: %s\n", path, strerror(errno));
-
- numwritten = write(fd, buf, buflen - 1);
- close(fd);
- if (numwritten < 1)
- ksft_exit_fail_msg("Write failed\n");
-}
-
static void write_debugfs(const char *fmt, ...)
{
char input[INPUT_MAX];
@@ -772,6 +758,9 @@ int main(int argc, char **argv)
ksft_finished();
}
+ if (!thp_is_enabled())
+ ksft_exit_skip("Transparent Hugepages not available\n");
+
if (argc > 1)
optional_xfs_path = argv[1];
diff --git a/tools/testing/selftests/mm/thp_settings.c b/tools/testing/selftests/mm/thp_settings.c
index 574bd0f8ae48..e748ebfb3d4e 100644
--- a/tools/testing/selftests/mm/thp_settings.c
+++ b/tools/testing/selftests/mm/thp_settings.c
@@ -6,6 +6,7 @@
#include <string.h>
#include <unistd.h>
+#include "vm_util.h"
#include "thp_settings.h"
#define THP_SYSFS "/sys/kernel/mm/transparent_hugepage/"
@@ -64,29 +65,6 @@ int read_file(const char *path, char *buf, size_t buflen)
return (unsigned int) numread;
}
-int write_file(const char *path, const char *buf, size_t buflen)
-{
- int fd;
- ssize_t numwritten;
-
- fd = open(path, O_WRONLY);
- if (fd == -1) {
- printf("open(%s)\n", path);
- exit(EXIT_FAILURE);
- return 0;
- }
-
- numwritten = write(fd, buf, buflen - 1);
- close(fd);
- if (numwritten < 1) {
- printf("write(%s)\n", buf);
- exit(EXIT_FAILURE);
- return 0;
- }
-
- return (unsigned int) numwritten;
-}
-
unsigned long read_num(const char *path)
{
char buf[21];
@@ -104,10 +82,7 @@ void write_num(const char *path, unsigned long num)
char buf[21];
sprintf(buf, "%ld", num);
- if (!write_file(path, buf, strlen(buf) + 1)) {
- perror(path);
- exit(EXIT_FAILURE);
- }
+ write_file(path, buf, strlen(buf) + 1);
}
int thp_read_string(const char *name, const char * const strings[])
@@ -165,11 +140,7 @@ void thp_write_string(const char *name, const char *val)
printf("%s: Pathname is too long\n", __func__);
exit(EXIT_FAILURE);
}
-
- if (!write_file(path, val, strlen(val) + 1)) {
- perror(path);
- exit(EXIT_FAILURE);
- }
+ write_file(path, val, strlen(val) + 1);
}
unsigned long thp_read_num(const char *name)
diff --git a/tools/testing/selftests/mm/thp_settings.h b/tools/testing/selftests/mm/thp_settings.h
index 76eeb712e5f1..7748a9009191 100644
--- a/tools/testing/selftests/mm/thp_settings.h
+++ b/tools/testing/selftests/mm/thp_settings.h
@@ -63,7 +63,6 @@ struct thp_settings {
};
int read_file(const char *path, char *buf, size_t buflen);
-int write_file(const char *path, const char *buf, size_t buflen);
unsigned long read_num(const char *path);
void write_num(const char *path, unsigned long num);
diff --git a/tools/testing/selftests/mm/transhuge-stress.c b/tools/testing/selftests/mm/transhuge-stress.c
index bcad47c09518..7a9f1035099b 100644
--- a/tools/testing/selftests/mm/transhuge-stress.c
+++ b/tools/testing/selftests/mm/transhuge-stress.c
@@ -17,6 +17,7 @@
#include <sys/mman.h>
#include "vm_util.h"
#include "kselftest.h"
+#include "thp_settings.h"
int backing_fd = -1;
int mmap_flags = MAP_ANONYMOUS | MAP_NORESERVE | MAP_PRIVATE;
@@ -37,6 +38,9 @@ int main(int argc, char **argv)
ksft_print_header();
+ if (!thp_is_enabled())
+ ksft_exit_skip("Transparent Hugepages not available\n");
+
ram = sysconf(_SC_PHYS_PAGES);
if (ram > SIZE_MAX / psize() / 4)
ram = SIZE_MAX / 4;
diff --git a/tools/testing/selftests/mm/vm_util.c b/tools/testing/selftests/mm/vm_util.c
index a6d4ff7dfdc0..db94564f4431 100644
--- a/tools/testing/selftests/mm/vm_util.c
+++ b/tools/testing/selftests/mm/vm_util.c
@@ -764,3 +764,27 @@ int unpoison_memory(unsigned long pfn)
return ret > 0 ? 0 : -errno;
}
+
+void write_file(const char *path, const char *buf, size_t buflen)
+{
+ int fd, saved_errno;
+ ssize_t numwritten;
+
+ if (buflen < 2)
+ ksft_exit_fail_msg("Incorrect buffer len: %zu\n", buflen);
+
+ fd = open(path, O_WRONLY);
+ if (fd == -1)
+ ksft_exit_fail_msg("%s open failed: %s\n", path, strerror(errno));
+
+ numwritten = write(fd, buf, buflen - 1);
+ saved_errno = errno;
+ close(fd);
+ errno = saved_errno;
+ if (numwritten < 0)
+ ksft_exit_fail_msg("%s write(%.*s) failed: %s\n", path, (int)(buflen - 1),
+ buf, strerror(errno));
+ if (numwritten != buflen - 1)
+ ksft_exit_fail_msg("%s write(%.*s) is truncated, expected %zu bytes, got %zd bytes\n",
+ path, (int)(buflen - 1), buf, buflen - 1, numwritten);
+}
diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h
index e9c4e24769c1..1a07305ceff4 100644
--- a/tools/testing/selftests/mm/vm_util.h
+++ b/tools/testing/selftests/mm/vm_util.h
@@ -166,3 +166,5 @@ int unpoison_memory(unsigned long pfn);
#define PAGEMAP_PRESENT(ent) (((ent) & (1ull << 63)) != 0)
#define PAGEMAP_PFN(ent) ((ent) & ((1ull << 55) - 1))
+
+void write_file(const char *path, const char *buf, size_t buflen);
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index a275ed584026..f3da38c54d27 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -96,6 +96,7 @@ TEST_PROGS := \
srv6_hl2encap_red_l2vpn_test.sh \
srv6_iptunnel_cache.sh \
stress_reuseport_listen.sh \
+ tcp_ecmp_failover.sh \
tcp_fastopen_backup_key.sh \
test_bpf.sh \
test_bridge_backup_port.sh \
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index 2a390cae41bf..94d722770420 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -101,6 +101,7 @@ CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_SCH_NETEM=y
CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_TEAM=y
CONFIG_NET_VRF=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_OVS=y
diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
index 6eb7f95e70e1..ac868a731694 100755
--- a/tools/testing/selftests/net/fib_nexthops.sh
+++ b/tools/testing/selftests/net/fib_nexthops.sh
@@ -1209,6 +1209,28 @@ ipv6_fcnal_runtime()
run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 124"
log_test $? 0 "IPv6 route using a group after replacing v4 gateways"
+ # Replacing an IPv6 nexthop with an IPv4 nexthop should update has_v4
+ # for all groups using it, preventing IPv6 routes from referencing the
+ # group after the replace.
+ run_cmd "$IP nexthop add id 89 via 2001:db8:91::2 dev veth1"
+ run_cmd "$IP nexthop add id 125 group 89"
+ run_cmd "$IP nexthop replace id 89 via 172.16.1.1 dev veth1"
+ run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 125"
+ log_test $? 2 "IPv6 route can not use group after v6 nexthop replaced by v4"
+
+ # Same scenario but with a blackhole nexthop: the group has no IPv6
+ # routes yet when the replace happens, so fib6_check_nh_list returns
+ # early without checking. has_v4 must still be updated to block
+ # subsequent IPv6 route additions.
+ run_cmd "$IP nexthop flush >/dev/null 2>&1"
+ run_cmd "$IP -6 nexthop add id 90 blackhole"
+ run_cmd "$IP nexthop add id 125 group 90"
+ run_cmd "$IP nexthop replace id 90 blackhole"
+ run_cmd "$IP -6 ro add 2001:db8:101::1/128 nhid 125"
+ log_test $? 2 "IPv6 route reject v6 blackhole replaced by v4 blackhole"
+ run_cmd "ip netns exec $me ping -6 2001:db8:101::1 -c1 -w$PING_TIMEOUT"
+ log_test $? 2 "Ping unreachable after rejected route"
+
$IP nexthop flush >/dev/null 2>&1
#
diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
index d847ff1737c3..27cbda68144e 100755
--- a/tools/testing/selftests/net/mptcp/diag.sh
+++ b/tools/testing/selftests/net/mptcp/diag.sh
@@ -322,6 +322,33 @@ wait_connected()
done
}
+chk_sndbuf()
+{
+ local server_sndbuf client_sndbuf msg
+ local port=${1}
+
+ msg="....chk sndbuf server/client"
+ server_sndbuf=$(ss -N "${ns}" -inmHM "sport" "${port}" | \
+ sed -n 's/.*tb\([0-9]\+\).*/\1/p')
+ client_sndbuf=$(ss -N "${ns}" -inmHM "dport" "${port}" | \
+ sed -n 's/.*tb\([0-9]\+\).*/\1/p')
+
+ mptcp_lib_print_title "${msg}"
+ if [ -z "${server_sndbuf}" ] || [ -z "${client_sndbuf}" ]; then
+ mptcp_lib_pr_fail "sndbuf S=${server_sndbuf} C=${client_sndbuf}"
+ mptcp_lib_result_fail "${msg}"
+ ret=${KSFT_FAIL}
+ elif [ "${server_sndbuf}" != "${client_sndbuf}" ]; then
+ mptcp_lib_pr_fail "sndbuf S=${server_sndbuf} != C=${client_sndbuf}"
+ mptcp_lib_result_fail "${msg}"
+ ret=${KSFT_FAIL}
+ else
+ mptcp_lib_pr_ok
+ mptcp_lib_result_pass "${msg}"
+ fi
+}
+
+
trap cleanup EXIT
mptcp_lib_ns_init ns
@@ -341,6 +368,7 @@ echo "b" | \
127.0.0.1 >/dev/null &
wait_connected $ns 10000
chk_msk_nr 2 "after MPC handshake"
+chk_sndbuf 10000
chk_last_time_info 10000
chk_msk_remote_key_nr 2 "....chk remote_key"
chk_msk_fallback_nr 0 "....chk no fallback"
diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
index 5fea7e7df628..989a5975dcea 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
@@ -474,20 +474,24 @@ mptcp_lib_wait_local_port_listen() {
wait_local_port_listen "${@}" "tcp"
}
+# $1: error file, $2: cmd, $3: expected msg, [$4: expected error]
mptcp_lib_check_output() {
local err="${1}"
local cmd="${2}"
local expected="${3}"
+ local exp_error="${4:-0}"
local cmd_ret=0
local out
- if ! out=$(${cmd} 2>"${err}"); then
- cmd_ret=${?}
- fi
+ out=$(${cmd} 2>"${err}") || cmd_ret=1
- if [ ${cmd_ret} -ne 0 ]; then
- mptcp_lib_pr_fail "command execution '${cmd}' stderr"
- cat "${err}"
+ if [ "${cmd_ret}" != "${exp_error}" ]; then
+ mptcp_lib_pr_fail "unexpected returned code for '${cmd}', info:"
+ if [ "${exp_error}" = 0 ]; then
+ cat "${err}"
+ else
+ echo "${out}"
+ fi
return 2
elif [ "${out}" = "${expected}" ]; then
return 0
diff --git a/tools/testing/selftests/net/mptcp/pm_netlink.sh b/tools/testing/selftests/net/mptcp/pm_netlink.sh
index 123d9d7a0278..04594dfc22b1 100755
--- a/tools/testing/selftests/net/mptcp/pm_netlink.sh
+++ b/tools/testing/selftests/net/mptcp/pm_netlink.sh
@@ -122,10 +122,12 @@ check()
local cmd="$1"
local expected="$2"
local msg="$3"
+ local exp_error="$4"
local rc=0
mptcp_lib_print_title "$msg"
- mptcp_lib_check_output "${err}" "${cmd}" "${expected}" || rc=${?}
+ mptcp_lib_check_output "${err}" "${cmd}" "${expected}" "${exp_error}" ||
+ rc=${?}
if [ ${rc} -eq 2 ]; then
mptcp_lib_result_fail "${msg} # error ${rc}"
ret=${KSFT_FAIL}
@@ -158,13 +160,13 @@ check "show_endpoints" \
"3,10.0.1.3,signal backup")" "dump addrs"
del_endpoint 2
-check "get_endpoint 2" "" "simple del addr"
+check "get_endpoint 2" "" "simple del addr" 1
check "show_endpoints" \
"$(format_endpoints "1,10.0.1.1" \
"3,10.0.1.3,signal backup")" "dump addrs after del"
add_endpoint 10.0.1.3 2>/dev/null
-check "get_endpoint 4" "" "duplicate addr"
+check "get_endpoint 4" "" "duplicate addr" 1
add_endpoint 10.0.1.4 flags signal
check "get_endpoint 4" "$(format_endpoints "4,10.0.1.4,signal")" "id addr increment"
@@ -173,7 +175,7 @@ for i in $(seq 5 9); do
add_endpoint "10.0.1.${i}" flags signal >/dev/null 2>&1
done
check "get_endpoint 9" "$(format_endpoints "9,10.0.1.9,signal")" "hard addr limit"
-check "get_endpoint 10" "" "above hard addr limit"
+check "get_endpoint 10" "" "above hard addr limit" 1
del_endpoint 9
for i in $(seq 10 255); do
@@ -192,9 +194,13 @@ check "show_endpoints" \
flush_endpoint
check "show_endpoints" "" "flush addrs"
-add_endpoint 10.0.1.1 flags unknown
-check "show_endpoints" "$(format_endpoints "1,10.0.1.1")" "ignore unknown flags"
-flush_endpoint
+# "unknown" flag is only supported by pm_nl_ctl
+if ! mptcp_lib_is_ip_mptcp; then
+ add_endpoint 10.0.1.1 flags unknown
+ check "show_endpoints" "$(format_endpoints "1,10.0.1.1")" \
+ "ignore unknown flags"
+ flush_endpoint
+fi
set_limits 9 1 2>/dev/null
check "get_limits" "${default_limits}" "rcv addrs above hard limit"
diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
index b327d3061ed5..3cdd953f6813 100755
--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh
+++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
@@ -26,6 +26,7 @@ tests="
netlink_checks ovsnl: validate netlink attrs and settings
upcall_interfaces ovs: test the upcall interfaces
tunnel_metadata ovs: test extraction of tunnel metadata
+ tunnel_refcount ovs: test tunnel vport reference cleanup
drop_reason drop: test drop reasons are emitted
psample psample: Sampling packets with psample"
@@ -830,6 +831,42 @@ test_tunnel_metadata() {
return 0
}
+test_tunnel_refcount() {
+ sbxname="test_tunnel_refcount"
+ sbx_add "${sbxname}" || return 1
+
+ ovs_sbx "${sbxname}" ip netns add trefns || return 1
+ on_exit "ovs_sbx ${sbxname} ip netns del trefns"
+
+ for tun_type in gre vxlan geneve; do
+ info "testing ${tun_type} tunnel vport refcount"
+
+ ovs_sbx "${sbxname}" ip netns exec trefns \
+ python3 $ovs_base/ovs-dpctl.py \
+ add-dp dp-${tun_type} || return 1
+
+ ovs_sbx "${sbxname}" ip netns exec trefns \
+ python3 $ovs_base/ovs-dpctl.py \
+ add-if --no-lwt -t ${tun_type} \
+ dp-${tun_type} ovs-${tun_type}0 || return 1
+
+ ovs_wait ip -netns trefns link show \
+ ovs-${tun_type}0 >/dev/null 2>&1 || return 1
+
+ info "deleting dp - may hang if reference counting is broken"
+ ovs_sbx "${sbxname}" ip netns exec trefns \
+ python3 $ovs_base/ovs-dpctl.py \
+ del-dp dp-${tun_type} &
+
+ dev_removed() {
+ ! ip -netns trefns link show "$1" >/dev/null 2>&1
+ }
+ ovs_wait dev_removed dp-${tun_type} || return 1
+ ovs_wait dev_removed ovs-${tun_type}0 || return 1
+ done
+ return 0
+}
+
run_test() {
(
tname="$1"
diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
index 848f61fdcee0..bbe35e2718d2 100644
--- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
@@ -11,7 +11,6 @@ import logging
import math
import multiprocessing
import re
-import socket
import struct
import sys
import time
@@ -2069,7 +2068,7 @@ class OvsVport(GenericNetlinkSocket):
elif vport_type == "internal":
return OvsVport.OVS_VPORT_TYPE_INTERNAL
elif vport_type == "gre":
- return OvsVport.OVS_VPORT_TYPE_INTERNAL
+ return OvsVport.OVS_VPORT_TYPE_GRE
elif vport_type == "vxlan":
return OvsVport.OVS_VPORT_TYPE_VXLAN
elif vport_type == "geneve":
@@ -2121,6 +2120,7 @@ class OvsVport(GenericNetlinkSocket):
)
TUNNEL_DEFAULTS = [("geneve", 6081),
+ ("gre", 0),
("vxlan", 4789)]
for tnl in TUNNEL_DEFAULTS:
@@ -2129,9 +2129,13 @@ class OvsVport(GenericNetlinkSocket):
dport = tnl[1]
if not lwt:
+ if tnl[0] == "gre":
+ # GRE tunnels have no options.
+ break
+
vportopt = OvsVport.ovs_vport_msg.vportopts()
vportopt["attrs"].append(
- ["OVS_TUNNEL_ATTR_DST_PORT", socket.htons(dport)]
+ ["OVS_TUNNEL_ATTR_DST_PORT", dport]
)
msg["attrs"].append(
["OVS_VPORT_ATTR_OPTIONS", vportopt]
@@ -2145,6 +2149,9 @@ class OvsVport(GenericNetlinkSocket):
geneve_port=dport,
geneve_collect_metadata=True,
geneve_udp_zero_csum6_rx=1)
+ elif tnl[0] == "gre":
+ ipr.link("add", ifname=vport_ifname, kind="gretap",
+ gre_collect_metadata=True)
elif tnl[0] == "vxlan":
ipr.link("add", ifname=vport_ifname, kind=tnl[0],
vxlan_learning=0, vxlan_collect_metadata=1,
@@ -2563,7 +2570,7 @@ def print_ovsdp_full(dp_lookup_rep, ifindex, ndb=NDB(), vpl=OvsVport()):
if vpo:
dpo = vpo.get_attr("OVS_TUNNEL_ATTR_DST_PORT")
if dpo:
- opts += " tnl-dport:%s" % socket.ntohs(dpo)
+ opts += " tnl-dport:%s" % dpo
print(
" port %d: %s (%s%s)"
% (
@@ -2632,7 +2639,7 @@ def main(argv):
"--ptype",
type=str,
default="netdev",
- choices=["netdev", "internal", "geneve", "vxlan"],
+ choices=["netdev", "internal", "gre", "geneve", "vxlan"],
help="Interface type (default netdev)",
)
addifcmd.add_argument(
@@ -2645,7 +2652,7 @@ def main(argv):
addifcmd.add_argument(
"-l",
"--lwt",
- type=bool,
+ action=argparse.BooleanOptionalAction,
default=True,
help="Use LWT infrastructure instead of vport (default true)."
)
diff --git a/tools/testing/selftests/net/ovpn/common.sh b/tools/testing/selftests/net/ovpn/common.sh
index 4c08f756e63a..2d844eb3aa6e 100644
--- a/tools/testing/selftests/net/ovpn/common.sh
+++ b/tools/testing/selftests/net/ovpn/common.sh
@@ -4,62 +4,181 @@
#
# Author: Antonio Quartulli <antonio@openvpn.net>
-UDP_PEERS_FILE=${UDP_PEERS_FILE:-udp_peers.txt}
-TCP_PEERS_FILE=${TCP_PEERS_FILE:-tcp_peers.txt}
-OVPN_CLI=${OVPN_CLI:-./ovpn-cli}
-YNL_CLI=${YNL_CLI:-../../../../net/ynl/pyynl/cli.py}
-ALG=${ALG:-aes}
-PROTO=${PROTO:-UDP}
-FLOAT=${FLOAT:-0}
-SYMMETRIC_ID=${SYMMETRIC_ID:-0}
-
-export ID_OFFSET=$(( 9 * (SYMMETRIC_ID == 0) ))
-
-JQ_FILTER='map(select(.msg.peer | has("remote-ipv6") | not)) |
+OVPN_COMMON_DIR=$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")
+source "$OVPN_COMMON_DIR/../../kselftest/ktap_helpers.sh"
+
+OVPN_UDP_PEERS_FILE=${OVPN_UDP_PEERS_FILE:-udp_peers.txt}
+OVPN_TCP_PEERS_FILE=${OVPN_TCP_PEERS_FILE:-tcp_peers.txt}
+OVPN_CLI=${OVPN_CLI:-${OVPN_COMMON_DIR}/ovpn-cli}
+OVPN_YNL=${OVPN_YNL:-${OVPN_COMMON_DIR}/../../../../net/ynl/pyynl/cli.py}
+OVPN_ALG=${OVPN_ALG:-aes}
+OVPN_PROTO=${OVPN_PROTO:-UDP}
+OVPN_FLOAT=${OVPN_FLOAT:-0}
+OVPN_SYMMETRIC_ID=${OVPN_SYMMETRIC_ID:-0}
+OVPN_VERBOSE=${OVPN_VERBOSE:-0}
+
+export OVPN_ID_OFFSET=$(( 9 * (OVPN_SYMMETRIC_ID == 0) ))
+
+OVPN_JQ_FILTER='map(if type == "array" then .[] else . end) |
+ map(select(.msg.peer | has("remote-ipv6") | not)) |
map(del(.msg.ifindex)) | sort_by(.msg.peer.id)[]'
-LAN_IP="11.11.11.11"
+OVPN_LAN_IP="11.11.11.11"
+
+declare -A OVPN_TMP_JSONS=()
+declare -A OVPN_LISTENER_PIDS=()
+OVPN_CURRENT_STAGE=""
+
+ovpn_is_verbose() {
+ [[ "${OVPN_VERBOSE}" == "1" ]]
+}
+
+ovpn_log() {
+ ovpn_is_verbose || return 0
+ printf '%s\n' "$*"
+}
+
+ovpn_print_cmd_output() {
+ local output_file="$1"
+ local line
+
+ [[ -s "${output_file}" ]] || return 0
+
+ while IFS= read -r line; do
+ ovpn_log "${line}"
+ done < "${output_file}"
+}
+
+ovpn_cmd_run() {
+ local mode="$1"
+ local label="$2"
+ local output_file
+ local rc
+ local ret=0
+
+ shift 2
+
+ output_file=$(mktemp)
+ if "$@" >"${output_file}" 2>&1; then
+ rc=0
+ else
+ rc=$?
+ fi
+
+ case "${mode}" in
+ ok)
+ if [[ "${rc}" -ne 0 ]]; then
+ cat "${output_file}"
+ printf '%s\n' \
+ "${label}: command failed with rc=${rc}: $*"
+ ret="${rc}"
+ fi
+ ;;
+ mayfail)
+ ;;
+ fail)
+ [[ "${rc}" -eq 0 ]] && ret=1
+ ;;
+ esac
+
+ if ovpn_is_verbose && [[ "${rc}" -eq 0 || "${mode}" != "ok" ]]; then
+ ovpn_print_cmd_output "${output_file}"
+ fi
+
+ rm -f "${output_file}"
+ return "${ret}"
+}
+
+ovpn_cmd_ok() {
+ ovpn_cmd_run ok "$@"
+}
+
+ovpn_cmd_mayfail() {
+ ovpn_cmd_run mayfail "$@"
+}
+
+ovpn_cmd_fail() {
+ ovpn_cmd_run fail "$@"
+}
-declare -A tmp_jsons=()
-declare -A listener_pids=()
+ovpn_run_bg() {
+ local pid_var="$1"
-create_ns() {
- ip netns add peer${1}
+ shift
+ if ovpn_is_verbose; then
+ "$@" &
+ else
+ "$@" >/dev/null 2>&1 &
+ fi
+
+ printf -v "${pid_var}" '%s' "$!"
+}
+
+ovpn_run_stage() {
+ local label="$1"
+
+ shift
+ OVPN_CURRENT_STAGE="${label}"
+ "$@"
+ OVPN_CURRENT_STAGE=""
+ ktap_test_pass "${label}"
}
-setup_ns() {
+ovpn_stage_err() {
+ # ERR trap is global under set -eE: only report failures that happen
+ # while ovpn_run_stage() is actively executing a stage body.
+ if [[ -n "${OVPN_CURRENT_STAGE}" ]]; then
+ ktap_test_fail "${OVPN_CURRENT_STAGE}"
+ OVPN_CURRENT_STAGE=""
+ fi
+}
+
+ovpn_create_ns() {
+ ip netns add "ovpn_peer${1}"
+}
+
+ovpn_setup_ns() {
+ local peer="ovpn_peer${1}"
+ local server_ns="ovpn_peer0"
+ local peer_ns
MODE="P2P"
if [ ${1} -eq 0 ]; then
MODE="MP"
- for p in $(seq 1 ${NUM_PEERS}); do
- ip link add veth${p} netns peer0 type veth peer name veth${p} netns peer${p}
+ for p in $(seq 1 ${OVPN_NUM_PEERS}); do
+ peer_ns="ovpn_peer${p}"
+ ip link add veth${p} netns "${server_ns}" type veth \
+ peer name veth${p} netns "${peer_ns}"
- ip -n peer0 addr add 10.10.${p}.1/24 dev veth${p}
- ip -n peer0 addr add fd00:0:0:${p}::1/64 dev veth${p}
- ip -n peer0 link set veth${p} up
+ ip -n "${server_ns}" addr add 10.10.${p}.1/24 dev \
+ veth${p}
+ ip -n "${server_ns}" addr add fd00:0:0:${p}::1/64 dev \
+ veth${p}
+ ip -n "${server_ns}" link set veth${p} up
- ip -n peer${p} addr add 10.10.${p}.2/24 dev veth${p}
- ip -n peer${p} addr add fd00:0:0:${p}::2/64 dev veth${p}
- ip -n peer${p} link set veth${p} up
+ ip -n "${peer_ns}" addr add 10.10.${p}.2/24 dev veth${p}
+ ip -n "${peer_ns}" addr add fd00:0:0:${p}::2/64 dev \
+ veth${p}
+ ip -n "${peer_ns}" link set veth${p} up
done
fi
- ip netns exec peer${1} ${OVPN_CLI} new_iface tun${1} $MODE
- ip -n peer${1} addr add ${2} dev tun${1}
+ ip netns exec "${peer}" ${OVPN_CLI} new_iface tun${1} $MODE
+ ip -n "${peer}" addr add ${2} dev tun${1}
# add a secondary IP to peer 1, to test a LAN behind a client
- if [ ${1} -eq 1 -a -n "${LAN_IP}" ]; then
- ip -n peer${1} addr add ${LAN_IP} dev tun${1}
- ip -n peer0 route add ${LAN_IP} via $(echo ${2} |sed -e s'!/.*!!') dev tun0
+ if [ ${1} -eq 1 -a -n "${OVPN_LAN_IP}" ]; then
+ ip -n "${peer}" addr add ${OVPN_LAN_IP} dev tun${1}
+ ip -n "${server_ns}" route add ${OVPN_LAN_IP} via \
+ $(echo ${2} |sed -e s'!/.*!!') dev tun0
fi
if [ -n "${3}" ]; then
- ip -n peer${1} link set mtu ${3} dev tun${1}
+ ip -n "${peer}" link set mtu ${3} dev tun${1}
fi
- ip -n peer${1} link set tun${1} up
+ ip -n "${peer}" link set tun${1} up
}
-build_capture_filter() {
+ovpn_build_capture_filter() {
# match the first four bytes of the openvpn data payload
- if [ "${PROTO}" == "UDP" ]; then
+ if [ "${OVPN_PROTO}" == "UDP" ]; then
# For UDP, libpcap transport indexing only works for IPv4, so
# use an explicit IPv4 or IPv6 expression based on the peer
# address. The IPv6 branch assumes there are no extension
@@ -76,108 +195,170 @@ build_capture_filter() {
fi
}
-setup_listener() {
+ovpn_setup_listener() {
+ local peer="$1"
+ local file
+ local peer_ns="ovpn_peer${peer}"
+
file=$(mktemp)
- PYTHONUNBUFFERED=1 ip netns exec peer${p} ${YNL_CLI} --family ovpn \
- --subscribe peers --output-json --duration 40 > ${file} &
- listener_pids[$1]=$!
- tmp_jsons[$1]="${file}"
+ PYTHONUNBUFFERED=1 ip netns exec "${peer_ns}" "${OVPN_YNL}" --family \
+ ovpn --subscribe peers --output-json > "${file}" \
+ 2>/dev/null &
+ OVPN_LISTENER_PIDS["${peer}"]=$!
+ OVPN_TMP_JSONS["${peer}"]="${file}"
}
-add_peer() {
+ovpn_add_peer() {
labels=("ASYMM" "SYMM")
- M_ID=${labels[SYMMETRIC_ID]}
+ local peer_ns
+ local server_ns="ovpn_peer0"
+ M_ID=${labels[OVPN_SYMMETRIC_ID]}
- if [ "${PROTO}" == "UDP" ]; then
+ if [ "${OVPN_PROTO}" == "UDP" ]; then
if [ ${1} -eq 0 ]; then
- ip netns exec peer0 ${OVPN_CLI} new_multi_peer tun0 1 \
- ${M_ID} ${UDP_PEERS_FILE}
+ ip netns exec "${server_ns}" ${OVPN_CLI} \
+ new_multi_peer tun0 1 ${M_ID} \
+ ${OVPN_UDP_PEERS_FILE}
- for p in $(seq 1 ${NUM_PEERS}); do
- ip netns exec peer0 ${OVPN_CLI} new_key tun0 ${p} 1 0 ${ALG} 0 \
+ for p in $(seq 1 ${OVPN_NUM_PEERS}); do
+ ip netns exec "${server_ns}" ${OVPN_CLI} \
+ new_key tun0 ${p} 1 0 ${OVPN_ALG} 0 \
data64.key
done
else
- if [ "${SYMMETRIC_ID}" -eq 1 ]; then
+ peer_ns="ovpn_peer${1}"
+ if [ "${OVPN_SYMMETRIC_ID}" -eq 1 ]; then
PEER_ID=${1}
TX_ID="none"
else
PEER_ID=$(awk "NR == ${1} {print \$2}" \
- ${UDP_PEERS_FILE})
+ ${OVPN_UDP_PEERS_FILE})
TX_ID=${1}
fi
- RADDR=$(awk "NR == ${1} {print \$3}" ${UDP_PEERS_FILE})
- RPORT=$(awk "NR == ${1} {print \$4}" ${UDP_PEERS_FILE})
- LPORT=$(awk "NR == ${1} {print \$6}" ${UDP_PEERS_FILE})
- ip netns exec peer${1} ${OVPN_CLI} new_peer tun${1} \
- ${PEER_ID} ${TX_ID} ${LPORT} ${RADDR} ${RPORT}
- ip netns exec peer${1} ${OVPN_CLI} new_key tun${1} \
- ${PEER_ID} 1 0 ${ALG} 1 data64.key
+ RADDR=$(awk "NR == ${1} {print \$3}" \
+ ${OVPN_UDP_PEERS_FILE})
+ RPORT=$(awk "NR == ${1} {print \$4}" \
+ ${OVPN_UDP_PEERS_FILE})
+ LPORT=$(awk "NR == ${1} {print \$6}" \
+ ${OVPN_UDP_PEERS_FILE})
+ ip netns exec "${peer_ns}" ${OVPN_CLI} new_peer \
+ tun${1} ${PEER_ID} ${TX_ID} ${LPORT} ${RADDR} \
+ ${RPORT}
+ ip netns exec "${peer_ns}" ${OVPN_CLI} new_key tun${1} \
+ ${PEER_ID} 1 0 ${OVPN_ALG} 1 data64.key
fi
else
if [ ${1} -eq 0 ]; then
- (ip netns exec peer0 ${OVPN_CLI} listen tun0 1 ${M_ID} \
- ${TCP_PEERS_FILE} && {
- for p in $(seq 1 ${NUM_PEERS}); do
- ip netns exec peer0 ${OVPN_CLI} new_key tun0 ${p} 1 0 \
- ${ALG} 0 data64.key
+ (ip netns exec "${server_ns}" ${OVPN_CLI} listen tun0 \
+ 1 ${M_ID} ${OVPN_TCP_PEERS_FILE} && {
+ for p in $(seq 1 ${OVPN_NUM_PEERS}); do
+ ip netns exec "${server_ns}" \
+ ${OVPN_CLI} new_key tun0 ${p} \
+ 1 0 ${OVPN_ALG} 0 data64.key
done
}) &
sleep 5
else
- if [ "${SYMMETRIC_ID}" -eq 1 ]; then
+ peer_ns="ovpn_peer${1}"
+ if [ "${OVPN_SYMMETRIC_ID}" -eq 1 ]; then
PEER_ID=${1}
TX_ID="none"
else
PEER_ID=$(awk "NR == ${1} {print \$2}" \
- ${TCP_PEERS_FILE})
+ ${OVPN_TCP_PEERS_FILE})
TX_ID=${1}
fi
- ip netns exec peer${1} ${OVPN_CLI} connect tun${1} \
+ ip netns exec "${peer_ns}" ${OVPN_CLI} connect tun${1} \
${PEER_ID} ${TX_ID} 10.10.${1}.1 1 data64.key
fi
fi
}
-compare_ntfs() {
- if [ ${#tmp_jsons[@]} -gt 0 ]; then
+ovpn_compare_ntfs() {
+ local diff_rc=0
+ local diff_file
+
+ if [ ${#OVPN_TMP_JSONS[@]} -gt 0 ]; then
suffix=""
- [ "${SYMMETRIC_ID}" -eq 1 ] && suffix="${suffix}-symm"
- [ "$FLOAT" == 1 ] && suffix="${suffix}-float"
+ [ "${OVPN_SYMMETRIC_ID}" -eq 1 ] && suffix="${suffix}-symm"
+ [ "$OVPN_FLOAT" == 1 ] && suffix="${suffix}-float"
expected="json/peer${1}${suffix}.json"
- received="${tmp_jsons[$1]}"
+ received="${OVPN_TMP_JSONS[$1]}"
+ diff_file=$(mktemp)
- kill -TERM ${listener_pids[$1]} || true
- wait ${listener_pids[$1]} || true
+ ovpn_stop_listener "${1}" 1
printf "Checking notifications for peer ${1}... "
- if diff <(jq -s "${JQ_FILTER}" ${expected}) \
- <(jq -s "${JQ_FILTER}" ${received}); then
+ if diff <(jq -s "${OVPN_JQ_FILTER}" ${expected}) \
+ <(jq -s "${OVPN_JQ_FILTER}" ${received}) \
+ >"${diff_file}" 2>&1; then
echo "OK"
+ else
+ diff_rc=$?
+ echo "failed"
+ cat "${diff_file}"
fi
- rm -f ${received} || true
+ rm -f "${diff_file}" || true
+ rm -f "${received}" || true
+ unset "OVPN_TMP_JSONS[$1]"
fi
+
+ return "${diff_rc}"
}
-cleanup() {
+ovpn_stop_listener() {
+ local peer="$1"
+ local keep_json="${2:-0}"
+ local pid="${OVPN_LISTENER_PIDS[$peer]:-}"
+ local json="${OVPN_TMP_JSONS[$peer]:-}"
+
+ if [[ -n "${pid}" ]]; then
+ kill -TERM "${pid}" 2>/dev/null || true
+ wait "${pid}" 2>/dev/null || true
+ unset "OVPN_LISTENER_PIDS[$peer]"
+ fi
+
+ if [[ -n "${json}" && "${keep_json}" -eq 0 ]]; then
+ rm -f "${json}" || true
+ unset "OVPN_TMP_JSONS[$peer]"
+ fi
+}
+
+ovpn_cleanup_peer_ns() {
+ local peer="$1"
+ local peer_id="${peer#ovpn_peer}"
+
+ ip -n "${peer}" link set tun${peer_id} down 2>/dev/null || true
+ ip netns exec "${peer}" ${OVPN_CLI} del_iface tun${peer_id} \
+ 1>/dev/null 2>&1 || true
+ ip netns del "${peer}" 2>/dev/null || true
+}
+
+ovpn_cleanup() {
+ local peer
+
# some ovpn-cli processes sleep in background so they need manual poking
- killall $(basename ${OVPN_CLI}) 2>/dev/null || true
+ killall "$(basename "${OVPN_CLI}")" 2>/dev/null || true
- # netns peer0 is deleted without erasing ifaces first
- for p in $(seq 1 10); do
- ip -n peer${p} link set tun${p} down 2>/dev/null || true
- ip netns exec peer${p} ${OVPN_CLI} del_iface tun${p} 2>/dev/null || true
+ for peer in "${!OVPN_LISTENER_PIDS[@]}"; do
+ ovpn_stop_listener "${peer}" 2>/dev/null
done
+
for p in $(seq 1 10); do
- ip -n peer0 link del veth${p} 2>/dev/null || true
- done
- for p in $(seq 0 10); do
- ip netns del peer${p} 2>/dev/null || true
+ ip -n ovpn_peer0 link del veth${p} 2>/dev/null || true
done
+
+ # remove from ovpn's netns pool
+ while IFS= read -r peer; do
+ [[ -n "${peer}" ]] || continue
+ ovpn_cleanup_peer_ns "${peer}" 2>/dev/null
+ done < <(ip netns list 2>/dev/null | awk '/^ovpn_/ {print $1}')
}
-if [ "${PROTO}" == "UDP" ]; then
- NUM_PEERS=${NUM_PEERS:-$(wc -l ${UDP_PEERS_FILE} | awk '{print $1}')}
+if [ "${OVPN_PROTO}" == "UDP" ]; then
+ OVPN_NUM_PEERS=${OVPN_NUM_PEERS:-$(wc -l ${OVPN_UDP_PEERS_FILE} | \
+ awk '{print $1}')}
else
- NUM_PEERS=${NUM_PEERS:-$(wc -l ${TCP_PEERS_FILE} | awk '{print $1}')}
+ OVPN_NUM_PEERS=${OVPN_NUM_PEERS:-$(wc -l ${OVPN_TCP_PEERS_FILE} | \
+ awk '{print $1}')}
fi
diff --git a/tools/testing/selftests/net/ovpn/config b/tools/testing/selftests/net/ovpn/config
index 42699740936d..d6cf033d555e 100644
--- a/tools/testing/selftests/net/ovpn/config
+++ b/tools/testing/selftests/net/ovpn/config
@@ -5,6 +5,9 @@ CONFIG_CRYPTO_GCM=y
CONFIG_DST_CACHE=y
CONFIG_INET=y
CONFIG_NET=y
+CONFIG_NETFILTER=y
CONFIG_NET_UDP_TUNNEL=y
+CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=y
CONFIG_OVPN=m
CONFIG_STREAM_PARSER=y
diff --git a/tools/testing/selftests/net/ovpn/test-chachapoly.sh b/tools/testing/selftests/net/ovpn/test-chachapoly.sh
index 32504079a2b8..cd3d94355d58 100755
--- a/tools/testing/selftests/net/ovpn/test-chachapoly.sh
+++ b/tools/testing/selftests/net/ovpn/test-chachapoly.sh
@@ -4,6 +4,6 @@
#
# Author: Antonio Quartulli <antonio@openvpn.net>
-ALG="chachapoly"
+OVPN_ALG="chachapoly"
source test.sh
diff --git a/tools/testing/selftests/net/ovpn/test-close-socket-tcp.sh b/tools/testing/selftests/net/ovpn/test-close-socket-tcp.sh
index 093d44772ffd..392d269bada5 100755
--- a/tools/testing/selftests/net/ovpn/test-close-socket-tcp.sh
+++ b/tools/testing/selftests/net/ovpn/test-close-socket-tcp.sh
@@ -4,6 +4,6 @@
#
# Author: Antonio Quartulli <antonio@openvpn.net>
-PROTO="TCP"
+OVPN_PROTO="TCP"
source test-close-socket.sh
diff --git a/tools/testing/selftests/net/ovpn/test-close-socket.sh b/tools/testing/selftests/net/ovpn/test-close-socket.sh
index 0d09df14fe8e..af1532b4d2da 100755
--- a/tools/testing/selftests/net/ovpn/test-close-socket.sh
+++ b/tools/testing/selftests/net/ovpn/test-close-socket.sh
@@ -5,41 +5,81 @@
# Author: Antonio Quartulli <antonio@openvpn.net>
#set -x
-set -e
+set -eE
source ./common.sh
-cleanup
+ovpn_test_finished=0
-modprobe -q ovpn || true
+ovpn_test_exit() {
+ ovpn_cleanup
+ modprobe -r ovpn || true
+
+ if [ "${ovpn_test_finished}" -eq 0 ]; then
+ ktap_print_totals
+ fi
+}
+
+ovpn_prepare_network() {
+ local p
+ local peer_ns
+
+ for p in $(seq 0 ${OVPN_NUM_PEERS}); do
+ ovpn_cmd_ok "create namespace peer${p}" ovpn_create_ns "${p}"
+ done
-for p in $(seq 0 ${NUM_PEERS}); do
- create_ns ${p}
-done
+ for p in $(seq 0 ${OVPN_NUM_PEERS}); do
+ ovpn_cmd_ok "configure peer${p} namespace" ovpn_setup_ns \
+ "${p}" 5.5.5.$((p + 1))/24
+ done
-for p in $(seq 0 ${NUM_PEERS}); do
- setup_ns ${p} 5.5.5.$((${p} + 1))/24
-done
+ for p in $(seq 0 ${OVPN_NUM_PEERS}); do
+ ovpn_cmd_ok "register peer${p} in overlay" ovpn_add_peer "${p}"
+ done
-for p in $(seq 0 ${NUM_PEERS}); do
- add_peer ${p}
-done
+ for p in $(seq 1 ${OVPN_NUM_PEERS}); do
+ peer_ns="ovpn_peer${p}"
+ ovpn_cmd_ok "set peer0 timeout for peer ${p}" \
+ ip netns exec ovpn_peer0 ${OVPN_CLI} set_peer tun0 \
+ ${p} 60 120
+ ovpn_cmd_ok "set peer${p} timeout for peer ${p}" \
+ ip netns exec "${peer_ns}" ${OVPN_CLI} set_peer \
+ tun${p} $((p + OVPN_ID_OFFSET)) 60 120
+ done
+}
-for p in $(seq 1 ${NUM_PEERS}); do
- ip netns exec peer0 ${OVPN_CLI} set_peer tun0 ${p} 60 120
- ip netns exec peer${p} ${OVPN_CLI} set_peer tun${p} $((${p}+9)) 60 120
-done
+ovpn_run_ping_traffic() {
+ local p
-sleep 1
+ for p in $(seq 1 ${OVPN_NUM_PEERS}); do
+ ovpn_cmd_ok "send ping traffic to peer ${p}" \
+ ip netns exec ovpn_peer0 ping -qfc 500 -w 3 \
+ 5.5.5.$((p + 1))
+ done
+}
-for p in $(seq 1 ${NUM_PEERS}); do
- ip netns exec peer0 ping -qfc 500 -w 3 5.5.5.$((${p} + 1))
-done
+ovpn_run_iperf() {
+ local iperf_pid
-ip netns exec peer0 iperf3 -1 -s &
-sleep 1
-ip netns exec peer1 iperf3 -Z -t 3 -c 5.5.5.1
+ ovpn_run_bg iperf_pid ip netns exec ovpn_peer0 iperf3 -1 -s
+ sleep 1
+ ovpn_cmd_ok "run iperf throughput flow" \
+ ip netns exec ovpn_peer1 iperf3 -Z -t 3 -c 5.5.5.1
+ wait "${iperf_pid}" || return 1
+}
+
+trap ovpn_test_exit EXIT
+trap ovpn_stage_err ERR
+
+ktap_print_header
+ktap_set_plan 3
+
+ovpn_cleanup
+modprobe -q ovpn || true
-cleanup
+ovpn_run_stage "setup network topology" ovpn_prepare_network
+ovpn_run_stage "run ping traffic" ovpn_run_ping_traffic
+ovpn_run_stage "run iperf throughput" ovpn_run_iperf
-modprobe -r ovpn || true
+ovpn_test_finished=1
+ktap_finished
diff --git a/tools/testing/selftests/net/ovpn/test-float.sh b/tools/testing/selftests/net/ovpn/test-float.sh
index ba5d725e18b0..91f8e113718e 100755
--- a/tools/testing/selftests/net/ovpn/test-float.sh
+++ b/tools/testing/selftests/net/ovpn/test-float.sh
@@ -4,6 +4,6 @@
#
# Author: Antonio Quartulli <antonio@openvpn.net>
-FLOAT="1"
+OVPN_FLOAT="1"
source test.sh
diff --git a/tools/testing/selftests/net/ovpn/test-mark.sh b/tools/testing/selftests/net/ovpn/test-mark.sh
index 8534428ed3eb..5a8f47554286 100755
--- a/tools/testing/selftests/net/ovpn/test-mark.sh
+++ b/tools/testing/selftests/net/ovpn/test-mark.sh
@@ -6,91 +6,166 @@
# Antonio Quartulli <antonio@openvpn.net>
#set -x
-set -e
+set -eE
MARK=1056
+MARK_DROP_COUNTER=0
source ./common.sh
-cleanup
-
+ovpn_test_finished=0
+
+ovpn_test_exit() {
+ ovpn_cleanup
+ modprobe -r ovpn || true
+
+ if [ "${ovpn_test_finished}" -eq 0 ]; then
+ ktap_print_totals
+ fi
+}
+
+ovpn_mark_prepare_network() {
+ local p
+ local peer_ns
+
+ for p in $(seq 0 "${OVPN_NUM_PEERS}"); do
+ ovpn_cmd_ok "create namespace peer${p}" ovpn_create_ns "${p}"
+ done
+
+ for p in $(seq 0 3); do
+ ovpn_cmd_ok "configure peer${p} namespace" ovpn_setup_ns \
+ "${p}" 5.5.5.$((p + 1))/24
+ done
+
+ ovpn_cmd_ok "create server-side multi-peer with fwmark" \
+ ip netns exec ovpn_peer0 "${OVPN_CLI}" new_multi_peer tun0 1 \
+ ASYMM "${OVPN_UDP_PEERS_FILE}" "${MARK}"
+ for p in $(seq 1 3); do
+ ovpn_cmd_ok "install server key for peer ${p}" \
+ ip netns exec ovpn_peer0 "${OVPN_CLI}" new_key tun0 \
+ "${p}" 1 0 "${OVPN_ALG}" 0 data64.key
+ done
+
+ for p in $(seq 1 3); do
+ ovpn_cmd_ok "register peer${p} in overlay" ovpn_add_peer "${p}"
+ done
+
+ for p in $(seq 1 3); do
+ peer_ns="ovpn_peer${p}"
+ ovpn_cmd_ok "set peer0 timeout for peer ${p}" \
+ ip netns exec ovpn_peer0 "${OVPN_CLI}" set_peer tun0 \
+ "${p}" 60 120
+ ovpn_cmd_ok "set peer${p} timeout for peer ${p}" \
+ ip netns exec "${peer_ns}" "${OVPN_CLI}" set_peer \
+ tun"${p}" $((p + OVPN_ID_OFFSET)) 60 120
+ done
+}
+
+ovpn_mark_run_baseline_traffic() {
+ local p
+
+ for p in $(seq 1 3); do
+ ovpn_cmd_ok "send baseline traffic to peer ${p}" \
+ ip netns exec ovpn_peer0 ping -qfc 500 -w 3 \
+ 5.5.5.$((p + 1))
+ done
+}
+
+ovpn_mark_add_drop_rule() {
+ ovpn_log "Adding an nftables drop rule based on mark value ${MARK}"
+
+ ovpn_cmd_ok "flush nft ruleset" ip netns exec ovpn_peer0 nft flush \
+ ruleset
+ ovpn_cmd_ok "create nft filter table" ip netns exec ovpn_peer0 nft \
+ "add table inet filter"
+ ovpn_cmd_ok "create nft filter output chain" \
+ ip netns exec ovpn_peer0 nft "add chain inet filter output { \
+ type filter hook output priority 0; policy accept; }"
+ ovpn_cmd_ok "add nft drop rule for mark ${MARK}" \
+ ip netns exec ovpn_peer0 nft add rule inet filter output \
+ meta mark == "${MARK}" \
+ counter drop
+
+ MARK_DROP_COUNTER=$(ip netns exec ovpn_peer0 nft list chain inet \
+ filter output | sed -n 's/.*packets \([0-9]*\).*/\1/p')
+ if [ -z "${MARK_DROP_COUNTER}" ]; then
+ printf '%s\n' "unable to read nft drop counter"
+ return 1
+ fi
+}
+
+ovpn_mark_verify_drop_traffic() {
+ local p
+ local ping_output
+ local lost_packets
+ local total_count
+
+ for p in $(seq 1 3); do
+ if ping_output=$(ip netns exec ovpn_peer0 ping -qfc 500 -w 1 \
+ 5.5.5.$((p + 1)) 2>&1); then
+ printf '%s\n' "expected ping to peer ${p} to fail \
+ after nft drop rule"
+ return 1
+ fi
+ ovpn_log "${ping_output}"
+ lost_packets=$(echo "${ping_output}" | \
+ awk '/packets transmitted/ { print $1 }')
+ if [ -z "${lost_packets}" ]; then
+ printf '%s\n' "unable to parse lost packets for peer \
+ ${p}"
+ return 1
+ fi
+ MARK_DROP_COUNTER=$((MARK_DROP_COUNTER + lost_packets))
+ done
+
+ total_count=$(ip netns exec ovpn_peer0 nft list chain inet filter \
+ output | sed -n 's/.*packets \([0-9]*\).*/\1/p')
+ if [ -z "${total_count}" ]; then
+ printf '%s\n' "unable to read final nft drop counter"
+ return 1
+ fi
+ if [ "${MARK_DROP_COUNTER}" -ne "${total_count}" ]; then
+ printf '%s\n' "expected ${MARK_DROP_COUNTER} drops, got \
+ ${total_count}"
+ return 1
+ fi
+}
+
+ovpn_mark_remove_drop_rule() {
+ ovpn_log "Removing the drop rule"
+
+ ovpn_cmd_ok "flush nft ruleset" ip netns exec ovpn_peer0 nft flush \
+ ruleset
+}
+
+ovpn_mark_verify_traffic_recovery() {
+ local p
+
+ sleep 1
+ for p in $(seq 1 3); do
+ ovpn_cmd_ok "send recovery traffic to peer ${p}" \
+ ip netns exec ovpn_peer0 ping -qfc 500 -w 3 \
+ 5.5.5.$((p + 1))
+ done
+}
+
+trap ovpn_test_exit EXIT
+trap ovpn_stage_err ERR
+
+ktap_print_header
+ktap_set_plan 6
+
+ovpn_cleanup
modprobe -q ovpn || true
-for p in $(seq 0 "${NUM_PEERS}"); do
- create_ns "${p}"
-done
-
-for p in $(seq 0 3); do
- setup_ns "${p}" 5.5.5.$((p + 1))/24
-done
-
-# add peer0 with mark
-ip netns exec peer0 "${OVPN_CLI}" new_multi_peer tun0 1 ASYMM \
- "${UDP_PEERS_FILE}" \
- ${MARK}
-for p in $(seq 1 3); do
- ip netns exec peer0 "${OVPN_CLI}" new_key tun0 "${p}" 1 0 "${ALG}" 0 \
- data64.key
-done
-
-for p in $(seq 1 3); do
- add_peer "${p}"
-done
-
-for p in $(seq 1 3); do
- ip netns exec peer0 "${OVPN_CLI}" set_peer tun0 "${p}" 60 120
- ip netns exec peer"${p}" "${OVPN_CLI}" set_peer tun"${p}" \
- $((p + 9)) 60 120
-done
-
-sleep 1
-
-for p in $(seq 1 3); do
- ip netns exec peer0 ping -qfc 500 -w 3 5.5.5.$((p + 1))
-done
-
-echo "Adding an nftables drop rule based on mark value ${MARK}"
-ip netns exec peer0 nft flush ruleset
-ip netns exec peer0 nft 'add table inet filter'
-ip netns exec peer0 nft 'add chain inet filter output {
- type filter hook output priority 0;
- policy accept;
-}'
-ip netns exec peer0 nft add rule inet filter output \
- meta mark == ${MARK} \
- counter drop
-
-DROP_COUNTER=$(ip netns exec peer0 nft list chain inet filter output \
- | sed -n 's/.*packets \([0-9]*\).*/\1/p')
-sleep 1
-
-# ping should fail
-for p in $(seq 1 3); do
- PING_OUTPUT=$(ip netns exec peer0 ping \
- -qfc 500 -w 1 5.5.5.$((p + 1)) 2>&1) && exit 1
- echo "${PING_OUTPUT}"
- LOST_PACKETS=$(echo "$PING_OUTPUT" \
- | awk '/packets transmitted/ { print $1 }')
- # increment the drop counter by the amount of lost packets
- DROP_COUNTER=$((DROP_COUNTER + LOST_PACKETS))
-done
-
-# check if the final nft counter matches our counter
-TOTAL_COUNT=$(ip netns exec peer0 nft list chain inet filter output \
- | sed -n 's/.*packets \([0-9]*\).*/\1/p')
-if [ "${DROP_COUNTER}" -ne "${TOTAL_COUNT}" ]; then
- echo "Expected ${TOTAL_COUNT} drops, got ${DROP_COUNTER}"
- exit 1
-fi
-
-echo "Removing the drop rule"
-ip netns exec peer0 nft flush ruleset
-sleep 1
-
-for p in $(seq 1 3); do
- ip netns exec peer0 ping -qfc 500 -w 3 5.5.5.$((p + 1))
-done
-
-cleanup
-
-modprobe -r ovpn || true
+ovpn_run_stage "setup marked network topology" ovpn_mark_prepare_network
+ovpn_run_stage "run baseline traffic" ovpn_mark_run_baseline_traffic
+ovpn_run_stage "install nft mark drop rule" ovpn_mark_add_drop_rule
+ovpn_run_stage "drop marked traffic and count packets" \
+ ovpn_mark_verify_drop_traffic
+ovpn_run_stage "remove nft drop rule" ovpn_mark_remove_drop_rule
+ovpn_run_stage "traffic recovers after drop removal" \
+ ovpn_mark_verify_traffic_recovery
+
+ovpn_test_finished=1
+ktap_finished
diff --git a/tools/testing/selftests/net/ovpn/test-symmetric-id-float.sh b/tools/testing/selftests/net/ovpn/test-symmetric-id-float.sh
index b3711a81b463..75296fe72c39 100755
--- a/tools/testing/selftests/net/ovpn/test-symmetric-id-float.sh
+++ b/tools/testing/selftests/net/ovpn/test-symmetric-id-float.sh
@@ -5,7 +5,7 @@
# Author: Ralf Lici <ralf@mandelbit.com>
# Antonio Quartulli <antonio@openvpn.net>
-SYMMETRIC_ID="1"
-FLOAT="1"
+OVPN_SYMMETRIC_ID="1"
+OVPN_FLOAT="1"
source test.sh
diff --git a/tools/testing/selftests/net/ovpn/test-symmetric-id-tcp.sh b/tools/testing/selftests/net/ovpn/test-symmetric-id-tcp.sh
index 188cafb67b2f..680a465c49d2 100755
--- a/tools/testing/selftests/net/ovpn/test-symmetric-id-tcp.sh
+++ b/tools/testing/selftests/net/ovpn/test-symmetric-id-tcp.sh
@@ -5,7 +5,7 @@
# Author: Ralf Lici <ralf@mandelbit.com>
# Antonio Quartulli <antonio@openvpn.net>
-PROTO="TCP"
-SYMMETRIC_ID=1
+OVPN_PROTO="TCP"
+OVPN_SYMMETRIC_ID=1
source test.sh
diff --git a/tools/testing/selftests/net/ovpn/test-symmetric-id.sh b/tools/testing/selftests/net/ovpn/test-symmetric-id.sh
index 35b119c72e4f..a2e2808959d9 100755
--- a/tools/testing/selftests/net/ovpn/test-symmetric-id.sh
+++ b/tools/testing/selftests/net/ovpn/test-symmetric-id.sh
@@ -5,6 +5,6 @@
# Author: Ralf Lici <ralf@mandelbit.com>
# Antonio Quartulli <antonio@openvpn.net>
-SYMMETRIC_ID="1"
+OVPN_SYMMETRIC_ID="1"
source test.sh
diff --git a/tools/testing/selftests/net/ovpn/test-tcp.sh b/tools/testing/selftests/net/ovpn/test-tcp.sh
index ba3f1f315a34..27cc6e7b98bc 100755
--- a/tools/testing/selftests/net/ovpn/test-tcp.sh
+++ b/tools/testing/selftests/net/ovpn/test-tcp.sh
@@ -4,6 +4,6 @@
#
# Author: Antonio Quartulli <antonio@openvpn.net>
-PROTO="TCP"
+OVPN_PROTO="TCP"
source test.sh
diff --git a/tools/testing/selftests/net/ovpn/test.sh b/tools/testing/selftests/net/ovpn/test.sh
index b60e94a4094e..c06e3135fbef 100755
--- a/tools/testing/selftests/net/ovpn/test.sh
+++ b/tools/testing/selftests/net/ovpn/test.sh
@@ -5,161 +5,316 @@
# Author: Antonio Quartulli <antonio@openvpn.net>
#set -x
-set -e
+set -eE
source ./common.sh
-cleanup
+ovpn_test_finished=0
-modprobe -q ovpn || true
+ovpn_test_exit() {
+ ovpn_cleanup
+ modprobe -r ovpn || true
+
+ if [ "${ovpn_test_finished}" -eq 0 ]; then
+ ktap_print_totals
+ fi
+}
+
+ovpn_prepare_network() {
+ local p
+ local peer_ns
+
+ for p in $(seq 0 ${OVPN_NUM_PEERS}); do
+ ovpn_cmd_ok "create namespace peer${p}" ovpn_create_ns "${p}"
+ done
+
+ for p in $(seq 0 ${OVPN_NUM_PEERS}); do
+ ovpn_cmd_ok "start notification listener peer${p}" \
+ ovpn_setup_listener "${p}"
+ # starting all YNL listeners back-to-back can intermittently
+ # stall their startup so serialize launches a bit
+ sleep 0.5
+ done
+
+ for p in $(seq 0 ${OVPN_NUM_PEERS}); do
+ ovpn_cmd_ok "configure peer${p} namespace" ovpn_setup_ns \
+ "${p}" 5.5.5.$((p + 1))/24 "${MTU}"
+ done
+
+ for p in $(seq 0 ${OVPN_NUM_PEERS}); do
+ ovpn_cmd_ok "register peer${p} in overlay" ovpn_add_peer "${p}"
+ done
+
+ for p in $(seq 1 ${OVPN_NUM_PEERS}); do
+ peer_ns="ovpn_peer${p}"
+ ovpn_cmd_ok "set peer0 timeout for peer ${p}" \
+ ip netns exec ovpn_peer0 ${OVPN_CLI} set_peer tun0 \
+ ${p} 60 120
+ ovpn_cmd_ok "set peer${p} timeout for peer ${p}" \
+ ip netns exec "${peer_ns}" ${OVPN_CLI} set_peer \
+ tun${p} $((p + OVPN_ID_OFFSET)) 60 120
+ done
+}
+
+ovpn_run_basic_traffic() {
+ local p
+ local header1
+ local header2
+ local peer_ns
+ local raddr
+ local tcpdump_pid1
+ local tcpdump_pid2
+ local tcpdump_timeout="1.5s"
+
+ for p in $(seq 1 ${OVPN_NUM_PEERS}); do
+ # The first part of the data packet header consists of:
+ # - TCP only: 2 bytes for the packet length
+ # - 5 bits for opcode ("9" for DATA_V2)
+ # - 3 bits for key-id ("0" at this point)
+ # - 12 bytes for peer-id:
+ # - with asymmetric ID: "${p}" one way and "${p} + 9" the
+ # other way
+ # - with symmetric ID: "${p}" both ways
+ header1=$(printf "0x4800000%x" ${p})
+ header2=$(printf "0x4800000%x" $((p + OVPN_ID_OFFSET)))
+ raddr=""
+ if [ "${OVPN_PROTO}" == "UDP" ]; then
+ raddr=$(awk "NR == ${p} {print \$3}" \
+ "${OVPN_UDP_PEERS_FILE}")
+ fi
+ peer_ns="ovpn_peer${p}"
+
+ timeout ${tcpdump_timeout} ip netns exec "${peer_ns}" \
+ tcpdump --immediate-mode -p -ni veth${p} -c 1 \
+ "$(ovpn_build_capture_filter "${header1}" "${raddr}")" \
+ >/dev/null 2>&1 &
+ tcpdump_pid1=$!
+ timeout ${tcpdump_timeout} ip netns exec "${peer_ns}" \
+ tcpdump --immediate-mode -p -ni veth${p} -c 1 \
+ "$(ovpn_build_capture_filter "${header2}" "${raddr}")" \
+ >/dev/null 2>&1 &
+ tcpdump_pid2=$!
+
+ sleep 0.3
+ ovpn_cmd_ok "send baseline traffic to peer ${p}" \
+ ip netns exec ovpn_peer0 \
+ ping -qfc 100 -w 3 5.5.5.$((p + 1))
+ ovpn_cmd_ok "send large-payload traffic to peer ${p}" \
+ ip netns exec ovpn_peer0 \
+ ping -qfc 100 -s 3000 -w 3 5.5.5.$((p + 1))
+
+ wait "${tcpdump_pid1}" || return 1
+ wait "${tcpdump_pid2}" || return 1
+ done
+}
+
+ovpn_run_lan_traffic() {
+ ovpn_cmd_ok "ping LAN behind peer1" \
+ ip netns exec ovpn_peer0 ping -qfc 500 -w 3 "${OVPN_LAN_IP}"
+}
+
+ovpn_run_float_mode() {
+ local p
+ local peer_ns
+
+ for p in $(seq 1 ${OVPN_NUM_PEERS}); do
+ peer_ns="ovpn_peer${p}"
+ ovpn_cmd_ok "float: remove old transport address on peer${p}" \
+ ip -n "${peer_ns}" addr del 10.10.${p}.2/24 dev veth${p}
+ ovpn_cmd_ok "float: add new transport address on peer${p}" \
+ ip -n "${peer_ns}" addr add 10.10.${p}.3/24 dev veth${p}
+ done
+ for p in $(seq 1 ${OVPN_NUM_PEERS}); do
+ peer_ns="ovpn_peer${p}"
+ ovpn_cmd_ok "ping tunnel after float peer ${p}" \
+ ip netns exec "${peer_ns}" ping -qfc 500 -w 3 5.5.5.1
+ done
+}
+
+ovpn_run_iperf() {
+ local iperf_pid
+
+ ovpn_run_bg iperf_pid ip netns exec ovpn_peer0 iperf3 -1 -s
+ sleep 1
+
+ ovpn_cmd_ok "run iperf throughput flow" \
+ ip netns exec ovpn_peer1 iperf3 -Z -t 3 -c 5.5.5.1
+ wait "${iperf_pid}" || return 1
+}
+
+ovpn_run_key_rollover() {
+ local p
+ local peer_ns
+
+ ovpn_log "Adding secondary key and then swap:"
+
+ for p in $(seq 1 ${OVPN_NUM_PEERS}); do
+ peer_ns="ovpn_peer${p}"
+ ovpn_cmd_ok "add secondary key on peer0 for peer ${p}" \
+ ip netns exec ovpn_peer0 ${OVPN_CLI} new_key tun0 \
+ ${p} 2 1 ${OVPN_ALG} 0 data64.key
+ ovpn_cmd_ok "add secondary key on peer${p} for peer ${p}" \
+ ip netns exec "${peer_ns}" ${OVPN_CLI} new_key tun${p} \
+ $((p + OVPN_ID_OFFSET)) 2 1 ${OVPN_ALG} 1 \
+ data64.key
+ ovpn_cmd_ok "swap keys on peer${p}" \
+ ip netns exec "${peer_ns}" ${OVPN_CLI} swap_keys \
+ tun${p} $((p + OVPN_ID_OFFSET))
+ done
+}
-for p in $(seq 0 ${NUM_PEERS}); do
- create_ns ${p}
-done
-
-for p in $(seq 0 ${NUM_PEERS}); do
- setup_listener ${p}
-done
-
-for p in $(seq 0 ${NUM_PEERS}); do
- setup_ns ${p} 5.5.5.$((${p} + 1))/24 ${MTU}
-done
-
-for p in $(seq 0 ${NUM_PEERS}); do
- add_peer ${p}
-done
-
-for p in $(seq 1 ${NUM_PEERS}); do
- ip netns exec peer0 ${OVPN_CLI} set_peer tun0 ${p} 60 120
- ip netns exec peer${p} ${OVPN_CLI} set_peer tun${p} \
- $((${p}+ID_OFFSET)) 60 120
-done
-
-sleep 1
-
-TCPDUMP_TIMEOUT="1.5s"
-for p in $(seq 1 ${NUM_PEERS}); do
- # The first part of the data packet header consists of:
- # - TCP only: 2 bytes for the packet length
- # - 5 bits for opcode ("9" for DATA_V2)
- # - 3 bits for key-id ("0" at this point)
- # - 12 bytes for peer-id:
- # - with asymmetric ID: "${p}" one way and "${p} + 9" the other way
- # - with symmetric ID: "${p}" both ways
- HEADER1=$(printf "0x4800000%x" ${p})
- HEADER2=$(printf "0x4800000%x" $((${p} + ID_OFFSET)))
- RADDR=""
- if [ "${PROTO}" == "UDP" ]; then
- RADDR=$(awk "NR == ${p} {print \$3}" ${UDP_PEERS_FILE})
+ovpn_run_queries() {
+ ovpn_log "Querying all peers:"
+
+ ovpn_cmd_ok "query all peers from peer0" \
+ ip netns exec ovpn_peer0 ${OVPN_CLI} get_peer tun0
+ ovpn_cmd_ok "query all peers from peer1" \
+ ip netns exec ovpn_peer1 ${OVPN_CLI} get_peer tun1
+
+ ovpn_log "Querying peer 1:"
+
+ ovpn_cmd_ok "query peer 1 from peer0" \
+ ip netns exec ovpn_peer0 ${OVPN_CLI} get_peer tun0 1
+}
+
+ovpn_query_peer_missing() {
+ ovpn_log "Querying non-existent peer 20:"
+
+ ovpn_cmd_fail "query missing peer 20 on peer0" \
+ ip netns exec ovpn_peer0 ${OVPN_CLI} get_peer tun0 20
+}
+
+ovpn_run_peer_cleanup() {
+ local p
+ local peer_ns
+
+ ovpn_log "Deleting peer 1:"
+
+ ovpn_cmd_ok "delete peer1 on peer0" \
+ ip netns exec ovpn_peer0 ${OVPN_CLI} del_peer tun0 1
+ ovpn_cmd_ok "delete peer1 on peer1" \
+ ip netns exec ovpn_peer1 ${OVPN_CLI} del_peer tun1 \
+ $((1 + OVPN_ID_OFFSET))
+
+ ovpn_log "Querying keys:"
+
+ for p in $(seq 2 ${OVPN_NUM_PEERS}); do
+ peer_ns="ovpn_peer${p}"
+ ovpn_cmd_ok "query peer${p} key 1" \
+ ip netns exec "${peer_ns}" ${OVPN_CLI} get_key tun${p} \
+ $((p + OVPN_ID_OFFSET)) 1
+ ovpn_cmd_ok "query peer${p} key 2" \
+ ip netns exec "${peer_ns}" ${OVPN_CLI} get_key tun${p} \
+ $((p + OVPN_ID_OFFSET)) 2
+ done
+}
+
+ovpn_run_traffic_delete_peer() {
+ local ping_pid
+
+ ovpn_log "Deleting peer while sending traffic:"
+
+ ovpn_run_bg ping_pid ip netns exec ovpn_peer2 ping -qf -w 4 5.5.5.1
+ sleep 2
+ ovpn_cmd_ok "delete peer0 peer 2" \
+ ip netns exec ovpn_peer0 ${OVPN_CLI} del_peer tun0 2
+
+ if [ "${OVPN_PROTO}" == "TCP" ]; then
+ # In TCP mode this command is expected to fail for both peers.
+ ovpn_cmd_mayfail "delete peer2 peer 2 (TCP non-fatal)" \
+ ip netns exec ovpn_peer2 ${OVPN_CLI} del_peer tun2 \
+ $((2 + OVPN_ID_OFFSET))
+ else
+ ovpn_cmd_ok "delete peer2 peer 2" ip netns exec ovpn_peer2 \
+ ${OVPN_CLI} del_peer tun2 $((2 + OVPN_ID_OFFSET))
fi
- timeout ${TCPDUMP_TIMEOUT} ip netns exec peer${p} \
- tcpdump --immediate-mode -p -ni veth${p} -c 1 \
- "$(build_capture_filter "${HEADER1}" "${RADDR}")" \
- >/dev/null 2>&1 &
- TCPDUMP_PID1=$!
- timeout ${TCPDUMP_TIMEOUT} ip netns exec peer${p} \
- tcpdump --immediate-mode -p -ni veth${p} -c 1 \
- "$(build_capture_filter "${HEADER2}" "${RADDR}")" \
- >/dev/null 2>&1 &
- TCPDUMP_PID2=$!
-
- sleep 0.3
- ip netns exec peer0 ping -qfc 500 -w 3 5.5.5.$((${p} + 1))
- ip netns exec peer0 ping -qfc 500 -s 3000 -w 3 5.5.5.$((${p} + 1))
-
- wait ${TCPDUMP_PID1}
- wait ${TCPDUMP_PID2}
-done
-
-# ping LAN behind client 1
-ip netns exec peer0 ping -qfc 500 -w 3 ${LAN_IP}
-
-if [ "$FLOAT" == "1" ]; then
- # make clients float..
- for p in $(seq 1 ${NUM_PEERS}); do
- ip -n peer${p} addr del 10.10.${p}.2/24 dev veth${p}
- ip -n peer${p} addr add 10.10.${p}.3/24 dev veth${p}
+ wait "${ping_pid}" || true
+}
+
+ovpn_run_key_cleanup() {
+ local p
+ local peer_ns
+
+ ovpn_log "Deleting keys:"
+
+ for p in $(seq 3 ${OVPN_NUM_PEERS}); do
+ peer_ns="ovpn_peer${p}"
+ ovpn_cmd_ok "delete key 1 for peer${p}" \
+ ip netns exec "${peer_ns}" ${OVPN_CLI} del_key tun${p} \
+ $((p + OVPN_ID_OFFSET)) 1
+ ovpn_cmd_ok "delete key 2 for peer${p}" \
+ ip netns exec "${peer_ns}" ${OVPN_CLI} del_key tun${p} \
+ $((p + OVPN_ID_OFFSET)) 2
+ done
+}
+
+ovpn_run_timeouts() {
+ local p
+ local peer_ns
+
+ ovpn_log "Setting timeout to 3s MP:"
+
+ for p in $(seq 3 ${OVPN_NUM_PEERS}); do
+ # Non-fatal: this may fail in some protocol modes.
+ ovpn_cmd_mayfail "set peer0 timeout for peer ${p} (non-fatal)" \
+ ip netns exec ovpn_peer0 ${OVPN_CLI} set_peer tun0 \
+ ${p} 3 3
+ peer_ns="ovpn_peer${p}"
+ ovpn_cmd_ok "disable timeout on peer${p} while peer0 adjusts \
+ state" ip netns exec "${peer_ns}" ${OVPN_CLI} set_peer \
+ tun${p} $((p + OVPN_ID_OFFSET)) 0 0
+ done
+ # wait for peers to timeout
+ sleep 5
+
+ ovpn_log "Setting timeout to 3s P2P:"
+
+ for p in $(seq 3 ${OVPN_NUM_PEERS}); do
+ peer_ns="ovpn_peer${p}"
+ ovpn_cmd_ok "set peer${p} P2P timeout" \
+ ip netns exec "${peer_ns}" ${OVPN_CLI} set_peer \
+ tun${p} $((p + OVPN_ID_OFFSET)) 3 3
done
- for p in $(seq 1 ${NUM_PEERS}); do
- ip netns exec peer${p} ping -qfc 500 -w 3 5.5.5.1
+ sleep 5
+}
+
+ovpn_run_notifications() {
+ local p
+
+ for p in $(seq 0 ${OVPN_NUM_PEERS}); do
+ ovpn_cmd_ok "validate listener output for peer ${p}" \
+ ovpn_compare_ntfs "${p}"
done
+}
+
+trap ovpn_test_exit EXIT
+trap ovpn_stage_err ERR
+
+ktap_print_header
+if [ "${OVPN_FLOAT}" == "1" ]; then
+ ktap_set_plan 13
+else
+ ktap_set_plan 12
fi
-ip netns exec peer0 iperf3 -1 -s &
-sleep 1
-ip netns exec peer1 iperf3 -Z -t 3 -c 5.5.5.1
-
-echo "Adding secondary key and then swap:"
-for p in $(seq 1 ${NUM_PEERS}); do
- ip netns exec peer0 ${OVPN_CLI} new_key tun0 ${p} 2 1 ${ALG} 0 \
- data64.key
- ip netns exec peer${p} ${OVPN_CLI} new_key tun${p} \
- $((${p} + ID_OFFSET)) 2 1 ${ALG} 1 data64.key
- ip netns exec peer${p} ${OVPN_CLI} swap_keys tun${p} \
- $((${p} + ID_OFFSET))
-done
-
-sleep 1
-
-echo "Querying all peers:"
-ip netns exec peer0 ${OVPN_CLI} get_peer tun0
-ip netns exec peer1 ${OVPN_CLI} get_peer tun1
-
-echo "Querying peer 1:"
-ip netns exec peer0 ${OVPN_CLI} get_peer tun0 1
-
-echo "Querying non-existent peer 20:"
-ip netns exec peer0 ${OVPN_CLI} get_peer tun0 20 || true
-
-echo "Deleting peer 1:"
-ip netns exec peer0 ${OVPN_CLI} del_peer tun0 1
-ip netns exec peer1 ${OVPN_CLI} del_peer tun1 $((1 + ID_OFFSET))
-
-echo "Querying keys:"
-for p in $(seq 2 ${NUM_PEERS}); do
- ip netns exec peer${p} ${OVPN_CLI} get_key tun${p} \
- $((${p} + ID_OFFSET)) 1
- ip netns exec peer${p} ${OVPN_CLI} get_key tun${p} \
- $((${p} + ID_OFFSET)) 2
-done
-
-echo "Deleting peer while sending traffic:"
-(ip netns exec peer2 ping -qf -w 4 5.5.5.1)&
-sleep 2
-ip netns exec peer0 ${OVPN_CLI} del_peer tun0 2
-# following command fails in TCP mode
-# (both ends get conn reset when one peer disconnects)
-ip netns exec peer2 ${OVPN_CLI} del_peer tun2 $((2 + ID_OFFSET)) || true
-
-echo "Deleting keys:"
-for p in $(seq 3 ${NUM_PEERS}); do
- ip netns exec peer${p} ${OVPN_CLI} del_key tun${p} \
- $((${p} + ID_OFFSET)) 1
- ip netns exec peer${p} ${OVPN_CLI} del_key tun${p} \
- $((${p} + ID_OFFSET)) 2
-done
-
-echo "Setting timeout to 3s MP:"
-for p in $(seq 3 ${NUM_PEERS}); do
- ip netns exec peer0 ${OVPN_CLI} set_peer tun0 ${p} 3 3 || true
- ip netns exec peer${p} ${OVPN_CLI} set_peer tun${p} \
- $((${p} + ID_OFFSET)) 0 0
-done
-# wait for peers to timeout
-sleep 5
-
-echo "Setting timeout to 3s P2P:"
-for p in $(seq 3 ${NUM_PEERS}); do
- ip netns exec peer${p} ${OVPN_CLI} set_peer tun${p} \
- $((${p} + ID_OFFSET)) 3 3
-done
-sleep 5
-
-for p in $(seq 0 ${NUM_PEERS}); do
- compare_ntfs ${p}
-done
-
-cleanup
-
-modprobe -r ovpn || true
+ovpn_cleanup
+modprobe -q ovpn || true
+
+ovpn_run_stage "setup network topology" ovpn_prepare_network
+ovpn_run_stage "run baseline data traffic" ovpn_run_basic_traffic
+ovpn_run_stage "run LAN traffic behind peer1" ovpn_run_lan_traffic
+[ "${OVPN_FLOAT}" == "1" ] && ovpn_run_stage "run floating peer checks" \
+ ovpn_run_float_mode
+ovpn_run_stage "run iperf throughput" ovpn_run_iperf
+ovpn_run_stage "run key rollout" ovpn_run_key_rollover
+ovpn_run_stage "query peers" ovpn_run_queries
+ovpn_run_stage "query missing peer fails" ovpn_query_peer_missing
+ovpn_run_stage "peer lifecycle and key queries" ovpn_run_peer_cleanup
+ovpn_run_stage "delete peer while traffic" ovpn_run_traffic_delete_peer
+ovpn_run_stage "delete stale keys" ovpn_run_key_cleanup
+ovpn_run_stage "check timeout behavior" ovpn_run_timeouts
+ovpn_run_stage "validate notification output" ovpn_run_notifications
+
+ovpn_test_finished=1
+ktap_finished
diff --git a/tools/testing/selftests/net/packetdrill/tcp_rfc5961_ack-out-of-window.pkt b/tools/testing/selftests/net/packetdrill/tcp_rfc5961_ack-out-of-window.pkt
new file mode 100644
index 000000000000..2776b8728085
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_rfc5961_ack-out-of-window.pkt
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// RFC 5961 Section 5.2 / RFC 793 Section 3.9: an incoming segment's
+// ACK value must lie in [SND.UNA - MAX.SND.WND, SND.NXT]; otherwise
+// the receiver MUST discard the segment and send a challenge ACK
+// back. Exercise both edges of that window in a single connection.
+
+`./defaults.sh
+sysctl -q net.ipv4.tcp_invalid_ratelimit=0
+`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+// Three-way handshake. Peer advertises rwnd = 1000 (no wscale), so
+// MAX.SND.WND is tracked as 1000.
+ +0 < S 0:0(0) win 1000 <mss 1000,sackOK,nop,nop,nop,wscale 0>
+ +0 > S. 0:0(0) ack 1 <...>
++.1 < . 1:1(0) ack 1 win 1000
+ +0 accept(3, ..., ...) = 4
+
+// ---- Upper edge: SEG.ACK > SND.NXT --------------------------------
+// Server has sent nothing yet, so SND.UNA = SND.NXT = 1.
+// Peer sends a pure ACK with SEG.ACK = 2, beyond SND.NXT.
+ +0 < . 1:1(0) ack 2 win 1000
+// Expect a challenge ACK: <SEQ = SND.NXT = 1, ACK = RCV.NXT = 1>.
+ +0 > . 1:1(0) ack 1
+
+// Advance SND.UNA past MAX.SND.WND so that the lower edge becomes
+// reachable. Issue two 1-MSS writes so each skb is exactly one MSS
+// and PSH is set by tcp_push() at the end of each sendmsg, keeping
+// the setup independent of the TSO / tcp_fragment split path.
+ +0 write(4, ..., 1000) = 1000
+ +0 > P. 1:1001(1000) ack 1
++.01 < . 1:1(0) ack 1001 win 1000
+ +0 write(4, ..., 1000) = 1000
+ +0 > P. 1001:2001(1000) ack 1
++.01 < . 1:1(0) ack 2001 win 1000
+// Now SND.UNA = SND.NXT = 2001, MAX.SND.WND = 1000, bytes_acked = 2000.
+
+// ---- Lower edge: SEG.ACK < SND.UNA - MAX.SND.WND ------------------
+// SND.UNA - MAX.SND.WND = 2001 - 1000 = 1001, so SEG.ACK = 1000 falls
+// below the acceptable range.
+ +0 < . 1:1(0) ack 1000 win 1000
+// Expect a challenge ACK: <SEQ = SND.NXT = 2001, ACK = RCV.NXT = 1>.
+ +0 > . 2001:2001(0) ack 1
diff --git a/tools/testing/selftests/net/packetdrill/tcp_ts_recent_invalid_ack.pkt b/tools/testing/selftests/net/packetdrill/tcp_ts_recent_invalid_ack.pkt
index 174ce9a1bfc0..ee6baf7c36cf 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_ts_recent_invalid_ack.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_ts_recent_invalid_ack.pkt
@@ -19,7 +19,9 @@
// bad packet with high tsval (its ACK sequence is above our sndnxt)
+0 < F. 1:1(0) ack 9999 win 20000 <nop,nop,TS val 200000 ecr 100>
-
+// Challenge ACK for SEG.ACK > SND.NXT (RFC 5961 5.2 / RFC 793 3.9).
+// ecr=200 (not 200000) proves ts_recent was not updated from the bad packet.
+ +0 > . 1:1(0) ack 1 <nop,nop,TS val 200 ecr 200>
+0 < . 1:1001(1000) ack 1 win 20000 <nop,nop,TS val 201 ecr 100>
+0 > . 1:1(0) ack 1001 <nop,nop,TS val 200 ecr 201>
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 5a5ff88321d5..c499953d4885 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -23,6 +23,7 @@ ALL_TESTS="
kci_test_encap
kci_test_macsec
kci_test_macsec_vlan
+ kci_test_team_bridge_macvlan
kci_test_ipsec
kci_test_ipsec_offload
kci_test_fdb_get
@@ -636,6 +637,49 @@ kci_test_macsec_vlan()
end_test "PASS: macsec_vlan"
}
+# Test ndo_change_rx_flags call from dev_uc_add under addr_list_lock spinlock.
+# When we are flipping the promisc, make sure it runs on the work queue.
+#
+# https://lore.kernel.org/netdev/20260214033859.43857-1-jiayuan.chen@linux.dev/
+# With (more conventional) macvlan instead of macsec.
+# macvlan -> bridge -> team -> dummy
+kci_test_team_bridge_macvlan()
+{
+ local vlan="test_macv1"
+ local bridge="test_br1"
+ local team="test_team1"
+ local dummy="test_dummy1"
+ local ret=0
+
+ run_cmd ip link add $team type team
+ if [ $ret -ne 0 ]; then
+ end_test "SKIP: team_bridge_macvlan: can't add team interface"
+ return $ksft_skip
+ fi
+
+ run_cmd ip link add $dummy type dummy
+ run_cmd ip link set $dummy master $team
+ run_cmd ip link set $team up
+ run_cmd ip link add $bridge type bridge vlan_filtering 1
+ run_cmd ip link set $bridge up
+ run_cmd ip link set $team master $bridge
+ run_cmd ip link add link $bridge name $vlan \
+ address 00:aa:bb:cc:dd:ee type macvlan mode bridge
+ run_cmd ip link set $vlan up
+
+ run_cmd ip link del $vlan
+ run_cmd ip link del $bridge
+ run_cmd ip link del $team
+ run_cmd ip link del $dummy
+
+ if [ $ret -ne 0 ]; then
+ end_test "FAIL: team_bridge_macvlan"
+ return 1
+ fi
+
+ end_test "PASS: team_bridge_macvlan"
+}
+
#-------------------------------------------------------------------
# Example commands
# ip x s add proto esp src 14.0.0.52 dst 14.0.0.70 \
diff --git a/tools/testing/selftests/net/tcp_ao/config b/tools/testing/selftests/net/tcp_ao/config
index 971cb6fa2d63..f22148512365 100644
--- a/tools/testing/selftests/net/tcp_ao/config
+++ b/tools/testing/selftests/net/tcp_ao/config
@@ -1,3 +1,4 @@
+CONFIG_CRYPTO_CMAC=y
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_RMD160=y
CONFIG_CRYPTO_SHA1=y
diff --git a/tools/testing/selftests/net/tcp_ecmp_failover.sh b/tools/testing/selftests/net/tcp_ecmp_failover.sh
new file mode 100755
index 000000000000..5768aa8bff6a
--- /dev/null
+++ b/tools/testing/selftests/net/tcp_ecmp_failover.sh
@@ -0,0 +1,216 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2026 Google LLC.
+#
+# This test verifies TCP flow failover between ECMP routes
+# upon carrier loss on the active device.
+#
+# socat -----------------------------> socat
+# |
+# .-- veth-c1 -|- veth-s1 --.
+# dummy0 -| | |-- dummy0
+# '-- veth-c2 -|- veth-s2 --'
+# |
+#
+
+REQUIRE_JQ=no
+REQUIRE_MZ=no
+NUM_NETIFS=0
+
+source forwarding/lib.sh
+
+CLIENT_IP="10.0.59.1"
+SERVER_IP="10.0.92.1"
+CLIENT_IP6="2001:db8:5a9a::1"
+SERVER_IP6="2001:db8:9292::1"
+
+setup_server()
+{
+ IP="ip -n $server"
+ NS_EXEC="ip netns exec $server"
+
+ $IP link add dummy0 type dummy
+ $IP link set dummy0 up
+
+ $IP -4 addr add $SERVER_IP/32 dev dummy0
+ $IP -6 addr add $SERVER_IP6/128 dev dummy0 nodad
+
+ $IP link set veth-s1 up
+ $IP link set veth-s2 up
+
+ $IP -4 addr add 192.168.1.2/24 dev veth-s1
+ $IP -4 addr add 192.168.2.2/24 dev veth-s2
+
+ $IP -4 route add $CLIENT_IP/32 \
+ nexthop via 192.168.1.1 dev veth-s1 weight 1 \
+ nexthop via 192.168.2.1 dev veth-s2 weight 1
+
+ $IP -6 addr add 2001:db8:1::2/64 dev veth-s1 nodad
+ $IP -6 addr add 2001:db8:2::2/64 dev veth-s2 nodad
+
+ $IP -6 route add $CLIENT_IP6/128 \
+ nexthop via 2001:db8:1::1 dev veth-s1 weight 1 \
+ nexthop via 2001:db8:2::1 dev veth-s2 weight 1
+}
+
+setup_client()
+{
+ IP="ip -n $client"
+ NS_EXEC="ip netns exec $client"
+
+ $IP link add dummy0 type dummy
+ $IP link set dummy0 up
+
+ $IP -4 addr add $CLIENT_IP/32 dev dummy0
+ $IP -6 addr add $CLIENT_IP6/128 dev dummy0 nodad
+
+ $IP link set veth-c1 up
+ $IP link set veth-c2 up
+
+ $IP -4 addr add 192.168.1.1/24 dev veth-c1
+ $IP -4 addr add 192.168.2.1/24 dev veth-c2
+
+ $IP -4 route add $SERVER_IP/32 \
+ nexthop via 192.168.1.2 dev veth-c1 weight 1 \
+ nexthop via 192.168.2.2 dev veth-c2 weight 1
+
+ $IP -6 addr add 2001:db8:1::1/64 dev veth-c1 nodad
+ $IP -6 addr add 2001:db8:2::1/64 dev veth-c2 nodad
+
+ $IP -6 route add $SERVER_IP6/128 \
+ nexthop via 2001:db8:1::2 dev veth-c1 weight 1 \
+ nexthop via 2001:db8:2::2 dev veth-c2 weight 1
+
+ # By default, tcp_retries1=3 triggers a route refresh
+ # after 3 retransmits (~5s). Ensure this never occurs
+ # for test stability.
+ $NS_EXEC sysctl -qw net.ipv4.tcp_retries1=100
+
+ # When NETDEV_CHANGE is issued for a dev tied to an ECMP
+ # route, RTNH_F_LINKDOWN is flagged and the sernum is
+ # bumped to invalidate the route via sk_dst_check().
+ #
+ # Without ignore_routes_with_linkdown=1, subsequent
+ # lookups may still select the same RTNH_F_LINKDOWN route.
+ $NS_EXEC sysctl -qw net.ipv4.conf.veth-c1.ignore_routes_with_linkdown=1
+ $NS_EXEC sysctl -qw net.ipv4.conf.veth-c2.ignore_routes_with_linkdown=1
+
+ $NS_EXEC sysctl -qw net.ipv6.conf.veth-c1.ignore_routes_with_linkdown=1
+ $NS_EXEC sysctl -qw net.ipv6.conf.veth-c2.ignore_routes_with_linkdown=1
+}
+
+setup()
+{
+ setup_ns client server
+
+ ip -n "$client" link add veth-c1 type veth peer veth-s1 netns "$server"
+ ip -n "$client" link add veth-c2 type veth peer veth-s2 netns "$server"
+
+ setup_server
+ setup_client
+}
+
+cleanup()
+{
+ cleanup_all_ns > /dev/null 2>&1
+}
+
+tcp_ecmp_failover()
+{
+ local pf=$1; shift
+ local server_ip=$1; shift
+ local client_ip=$1; shift
+
+ RET=0
+
+ tcpdump_start veth-s1 "$server"
+ tcpdump_start veth-s2 "$server"
+
+ ip netns exec "$server" \
+ socat -u TCP-LISTEN:8080,pf="$pf",bind="$server_ip",reuseaddr /dev/null &
+ server_pid=$!
+
+ # Wait for server to start listening.
+ # Sometimes client fails without this sleep.
+ sleep 1
+
+ ip netns exec "$client" \
+ socat -u /dev/zero TCP:"$server_ip":8080,pf="$pf",bind="$client_ip" &
+ client_pid=$!
+
+ # To capture enough packets.
+ sleep 3
+
+ tcpdump_stop veth-s1
+ tcpdump_stop veth-s2
+
+ pkts_s1=$(tcpdump_show veth-s1 | wc -l)
+ pkts_s2=$(tcpdump_show veth-s2 | wc -l)
+
+ tcpdump_cleanup veth-s1
+ tcpdump_cleanup veth-s2
+
+ # Detect the device chosen by the client
+ if [ "$pkts_s1" -gt "$pkts_s2" ]; then
+ veth_down=veth-s1
+ veth_up=veth-s2
+ else
+ veth_down=veth-s2
+ veth_up=veth-s1
+ fi
+
+ # Taking down $veth_down causes its peer to lose carrier,
+ # triggering NETDEV_CHANGE. This flags RTNH_F_LINKDOWN
+ # and bumps the sernum for the route associated with that
+ # peer, invalidating the cached dst in the TCP socket.
+ #
+ # Consequently, sk_dst_check() fails, forcing the subsequent
+ # lookup to select the remaining healthy route via $veth_up.
+ ip -n "$server" link set "$veth_down" down
+
+ tcpdump_start "$veth_up" "$server"
+
+ # To capture enough packets.
+ sleep 3
+
+ tcpdump_stop "$veth_up"
+
+ kill -9 "$client_pid" > /dev/null 2>&1
+ kill -9 "$server_pid" > /dev/null 2>&1
+ wait 2> /dev/null
+
+ pkts=$(tcpdump_show $veth_up | wc -l)
+
+ tcpdump_cleanup "$veth_up"
+
+ if [ "$pkts" -lt 1000 ]; then
+ RET=$ksft_fail
+ fi
+}
+
+test_ipv4()
+{
+ setup
+ tcp_ecmp_failover IPv4 $SERVER_IP $CLIENT_IP
+ log_test "TCP IPv4 failover"
+ cleanup
+}
+
+test_ipv6()
+{
+ setup
+ tcp_ecmp_failover IPv6 "[$SERVER_IP6]" "[$CLIENT_IP6]"
+ log_test "TCP IPv6 failover"
+ cleanup
+}
+
+require_command socat
+require_command tcpdump
+
+trap cleanup EXIT
+
+test_ipv4
+test_ipv6
+
+exit "$EXIT_STATUS"
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index 9e2ccea13d70..30a236b8e9f7 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -946,6 +946,49 @@ TEST_F(tls, peek_and_splice)
EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
}
+TEST_F(tls, splice_to_pipe_small)
+{
+ int send_len = TLS_PAYLOAD_MAX_LEN;
+ char mem_send[TLS_PAYLOAD_MAX_LEN];
+ char mem_recv[TLS_PAYLOAD_MAX_LEN];
+ size_t total = 0;
+ int p[2];
+
+ memrnd(mem_send, sizeof(mem_send));
+
+ ASSERT_GE(pipe(p), 0);
+
+ /* Shrink pipe to 1 page (typically 4096 bytes) to force multiple
+ * splice iterations for a 16384-byte TLS record.
+ */
+ EXPECT_GE(fcntl(p[1], F_SETPIPE_SZ, 4096), 4096);
+
+ EXPECT_EQ(send(self->fd, mem_send, send_len, 0), send_len);
+
+ while (total < (size_t)send_len) {
+ ssize_t spliced, drained;
+
+ spliced = splice(self->cfd, NULL, p[1], NULL,
+ send_len - total, 0);
+ EXPECT_GT(spliced, 0);
+ if (spliced <= 0)
+ break;
+
+ drained = read(p[0], mem_recv + total, spliced);
+ EXPECT_EQ(drained, spliced);
+ if (drained <= 0)
+ break;
+
+ total += drained;
+ }
+
+ EXPECT_EQ(total, (size_t)send_len);
+ EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+
+ close(p[0]);
+ close(p[1]);
+}
+
#define MAX_FRAGS 48
TEST_F(tls, splice_short)
{
diff --git a/tools/testing/selftests/rdma/Makefile b/tools/testing/selftests/rdma/Makefile
new file mode 100644
index 000000000000..7dd7cba7a73c
--- /dev/null
+++ b/tools/testing/selftests/rdma/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+TEST_PROGS := rxe_rping_between_netns.sh \
+ rxe_ipv6.sh \
+ rxe_socket_with_netns.sh \
+ rxe_test_NETDEV_UNREGISTER.sh
+
+include ../lib.mk
diff --git a/tools/testing/selftests/rdma/config b/tools/testing/selftests/rdma/config
new file mode 100644
index 000000000000..4ffb814e253b
--- /dev/null
+++ b/tools/testing/selftests/rdma/config
@@ -0,0 +1,3 @@
+CONFIG_TUN
+CONFIG_VETH
+CONFIG_RDMA_RXE
diff --git a/tools/testing/selftests/rdma/rxe_ipv6.sh b/tools/testing/selftests/rdma/rxe_ipv6.sh
new file mode 100755
index 000000000000..b7059bfd6d7c
--- /dev/null
+++ b/tools/testing/selftests/rdma/rxe_ipv6.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+# Configuration
+NS_NAME="net6"
+VETH_HOST="veth0"
+VETH_NS="veth1"
+RXE_NAME="rxe6"
+PORT=4791
+IP6_ADDR="2001:db8::1/64"
+
+exec > /dev/null
+
+# Cleanup function to run on exit (even on failure)
+cleanup() {
+ ip netns del "$NS_NAME" 2>/dev/null
+ modprobe -r rdma_rxe 2>/dev/null
+ echo "Done."
+}
+trap cleanup EXIT
+
+# 1. Prerequisites check
+for mod in tun veth rdma_rxe; do
+ if ! modinfo "$mod" >/dev/null 2>&1; then
+ echo "Error: Kernel module '$mod' not found."
+ exit 1
+ fi
+done
+
+modprobe rdma_rxe
+
+# 2. Setup Namespace and Networking
+echo "Setting up IPv6 network namespace..."
+ip netns add "$NS_NAME"
+ip link add "$VETH_HOST" type veth peer name "$VETH_NS"
+ip link set "$VETH_NS" netns "$NS_NAME"
+ip netns exec "$NS_NAME" ip addr add "$IP6_ADDR" dev "$VETH_NS"
+ip netns exec "$NS_NAME" ip link set "$VETH_NS" up
+ip link set "$VETH_HOST" up
+
+# 3. Add RDMA Link
+echo "Adding RDMA RXE link..."
+if ! ip netns exec "$NS_NAME" rdma link add "$RXE_NAME" type rxe netdev "$VETH_NS"; then
+ echo "Error: Failed to create RXE link."
+ exit 1
+fi
+
+# 4. Verification: Port should be listening
+# Using -H to skip headers and -q for quiet exit codes
+if ! ip netns exec "$NS_NAME" ss -Hul6n sport = :$PORT | grep -q ":$PORT"; then
+ echo "Error: UDP port $PORT is NOT listening after link creation."
+ exit 1
+fi
+echo "Verified: Port $PORT is active."
+
+# 5. Removal and Verification
+echo "Deleting RDMA link..."
+ip netns exec "$NS_NAME" rdma link del "$RXE_NAME"
+
+if ip netns exec "$NS_NAME" ss -Hul6n sport = :$PORT | grep -q ":$PORT"; then
+ echo "Error: UDP port $PORT still active after link deletion."
+ exit 1
+fi
+echo "Verified: Port $PORT closed successfully."
diff --git a/tools/testing/selftests/rdma/rxe_rping_between_netns.sh b/tools/testing/selftests/rdma/rxe_rping_between_netns.sh
new file mode 100755
index 000000000000..e5b876f58c6e
--- /dev/null
+++ b/tools/testing/selftests/rdma/rxe_rping_between_netns.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+
+# Configuration
+NS="test1"
+VETH_A="veth-a"
+VETH_B="veth-b"
+IP_A="1.1.1.1"
+IP_B="1.1.1.2"
+PORT=4791
+
+exec > /dev/null
+
+# --- Cleanup Routine ---
+cleanup() {
+ echo "Cleaning up resources..."
+ rdma link del rxe1 2>/dev/null
+ ip netns exec "$NS" rdma link del rxe0 2>/dev/null
+ ip link delete "$VETH_B" 2>/dev/null
+ ip netns del "$NS" 2>/dev/null
+ modprobe -r rdma_rxe 2>/dev/null
+}
+trap cleanup EXIT
+
+# --- Prerequisite Checks ---
+if [[ $EUID -ne 0 ]]; then
+ echo "This script must be run as root"
+ exit 1
+fi
+
+modprobe rdma_rxe || { echo "Failed to load rdma_rxe"; exit 1; }
+
+# --- Setup Network Topology ---
+echo "Setting up network namespace and veth pair..."
+ip netns add "$NS"
+ip link add "$VETH_A" type veth peer name "$VETH_B"
+ip link set "$VETH_A" netns "$NS"
+
+# Configure Namespace side (test1)
+ip netns exec "$NS" ip addr add "$IP_A/24" dev "$VETH_A"
+ip netns exec "$NS" ip link set "$VETH_A" up
+ip netns exec "$NS" ip link set lo up
+
+# Configure Host side
+ip addr add "$IP_B/24" dev "$VETH_B"
+ip link set "$VETH_B" up
+
+# --- RXE Link Creation ---
+echo "Creating RDMA links..."
+ip netns exec "$NS" rdma link add rxe0 type rxe netdev "$VETH_A"
+rdma link add rxe1 type rxe netdev "$VETH_B"
+
+# Verify UDP 4791 is listening
+check_port() {
+ local target=$1 # "host" or "ns"
+ if [ "$target" == "ns" ]; then
+ ip netns exec "$NS" ss -Huln sport == :$PORT | grep -q ":$PORT"
+ else
+ ss -Huln sport == :$PORT | grep -q ":$PORT"
+ fi
+}
+
+check_port "ns" || { echo "Error: RXE port not listening in namespace"; exit 1; }
+check_port "host" || { echo "Error: RXE port not listening on host"; exit 1; }
+
+# --- Connectivity Test ---
+echo "Testing connectivity with rping..."
+ping -c 2 -W 1 "$IP_A" > /dev/null || { echo "Ping failed"; exit 1; }
+
+# Start rping server in background
+ip netns exec "$NS" rping -s -a "$IP_A" -v > /dev/null 2>&1 &
+RPING_PID=$!
+sleep 1 # Allow server to bind
+
+# Run rping client
+rping -c -a "$IP_A" -d -v -C 3
+RESULT=$?
+
+kill $RPING_PID 2>/dev/null
+
+if [ $RESULT -eq 0 ]; then
+ echo "SUCCESS: RDMA traffic verified."
+else
+ echo "FAILURE: rping failed."
+ exit 1
+fi
diff --git a/tools/testing/selftests/rdma/rxe_socket_with_netns.sh b/tools/testing/selftests/rdma/rxe_socket_with_netns.sh
new file mode 100755
index 000000000000..002e5098f751
--- /dev/null
+++ b/tools/testing/selftests/rdma/rxe_socket_with_netns.sh
@@ -0,0 +1,76 @@
+#!/bin/bash
+
+# Configuration
+PORT=4791
+MODS=("tun" "rdma_rxe")
+
+exec > /dev/null
+
+# --- Helper: Cleanup Routine ---
+cleanup() {
+ echo "Cleaning up resources..."
+ rdma link del rxe1 2>/dev/null
+ rdma link del rxe0 2>/dev/null
+ ip link del tun0 2>/dev/null
+ ip link del tun1 2>/dev/null
+ for m in "${MODS[@]}"; do modprobe -r "$m" 2>/dev/null; done
+}
+
+# Ensure cleanup runs on script exit or interrupt
+trap cleanup EXIT
+
+# --- Phase 1: Environment Check ---
+if [[ $EUID -ne 0 ]]; then
+ echo "Error: This script must be run as root."
+ exit 1
+fi
+
+for m in "${MODS[@]}"; do
+ modprobe "$m" || { echo "Error: Failed to load $m"; exit 1; }
+done
+
+# --- Phase 2: Create Interfaces & RXE Links ---
+echo "Creating tun0 (1.1.1.1) and rxe0..."
+ip tuntap add mode tun tun0
+ip addr add 1.1.1.1/24 dev tun0
+ip link set tun0 up
+rdma link add rxe0 type rxe netdev tun0
+
+# Verify port 4791 is listening
+if ! ss -Huln sport = :$PORT | grep -q ":$PORT"; then
+ echo "Error: UDP port $PORT not found after rxe0 creation"
+ exit 1
+fi
+
+echo "Creating tun1 (2.2.2.2) and rxe1..."
+ip tuntap add mode tun tun1
+ip addr add 2.2.2.2/24 dev tun1
+ip link set tun1 up
+rdma link add rxe1 type rxe netdev tun1
+
+# Verify port 4791 is still listening
+if ! ss -Huln sport = :$PORT | grep -q ":$PORT"; then
+ echo "Error: UDP port $PORT missing after rxe1 creation"
+ exit 1
+fi
+
+# --- Phase 3: Targeted Deletion ---
+echo "Deleting rxe1..."
+rdma link del rxe1
+
+# Port should still be active because rxe0 is still alive
+if ! ss -Huln sport = :$PORT | grep -q ":$PORT"; then
+ echo "Error: UDP port $PORT closed prematurely"
+ exit 1
+fi
+
+echo "Deleting rxe0..."
+rdma link del rxe0
+
+# Port should now be gone
+if ss -Huln sport = :$PORT | grep -q ":$PORT"; then
+ echo "Error: UDP port $PORT still exists after all links deleted"
+ exit 1
+fi
+
+echo "Test passed successfully."
diff --git a/tools/testing/selftests/rdma/rxe_test_NETDEV_UNREGISTER.sh b/tools/testing/selftests/rdma/rxe_test_NETDEV_UNREGISTER.sh
new file mode 100755
index 000000000000..021ca451499d
--- /dev/null
+++ b/tools/testing/selftests/rdma/rxe_test_NETDEV_UNREGISTER.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+# Configuration
+DEV_NAME="tun0"
+RXE_NAME="rxe0"
+RDMA_PORT=4791
+
+exec > /dev/null
+
+# --- Cleanup Routine ---
+# Ensures environment is clean even if the script hits an error
+cleanup() {
+ echo "Performing cleanup..."
+ rdma link del $RXE_NAME 2>/dev/null
+ ip link del $DEV_NAME 2>/dev/null
+ modprobe -r rdma_rxe 2>/dev/null
+}
+trap cleanup EXIT
+
+# 1. Dependency Check
+if ! modinfo rdma_rxe >/dev/null 2>&1; then
+ echo "Error: rdma_rxe module not found."
+ exit 1
+fi
+
+modprobe rdma_rxe
+
+# 2. Setup TUN Device
+echo "Creating $DEV_NAME..."
+ip tuntap add mode tun "$DEV_NAME"
+ip addr add 1.1.1.1/24 dev "$DEV_NAME"
+ip link set "$DEV_NAME" up
+
+# 3. Attach RXE Link
+echo "Attaching RXE link $RXE_NAME to $DEV_NAME..."
+rdma link add "$RXE_NAME" type rxe netdev "$DEV_NAME"
+
+# 4. Verification: Port Check
+# Use -H (no header) and -q (quiet) for cleaner scripting logic
+if ! ss -Huln sport == :$RDMA_PORT | grep -q ":$RDMA_PORT"; then
+ echo "Error: UDP port $RDMA_PORT is not listening."
+ exit 1
+fi
+echo "Verified: RXE is listening on UDP $RDMA_PORT."
+
+# 5. Trigger NETDEV_UNREGISTER
+# We delete the underlying device without deleting the RDMA link first.
+echo "Triggering NETDEV_UNREGISTER by deleting $DEV_NAME..."
+ip link del "$DEV_NAME"
+
+# 6. Final Verification
+# The RXE link and the UDP port should be automatically cleaned up by the kernel.
+if rdma link show "$RXE_NAME" 2>/dev/null; then
+ echo "Error: $RXE_NAME still exists after netdev removal."
+ exit 1
+fi
+
+if ss -Huln sport == :$RDMA_PORT | grep -q ":$RDMA_PORT"; then
+ echo "Error: UDP port $RDMA_PORT still listening after netdev removal."
+ exit 1
+fi
+
+echo "Success: NETDEV_UNREGISTER handled correctly."
diff --git a/tools/testing/selftests/riscv/cfi/Makefile b/tools/testing/selftests/riscv/cfi/Makefile
index 96a4dc4b69c3..93b4738c0e2e 100644
--- a/tools/testing/selftests/riscv/cfi/Makefile
+++ b/tools/testing/selftests/riscv/cfi/Makefile
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
CFLAGS += $(KHDR_INCLUDES)
CFLAGS += -I$(top_srcdir)/tools/include
diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile
index 4ef90823b652..50d69e22ee7a 100644
--- a/tools/testing/selftests/rseq/Makefile
+++ b/tools/testing/selftests/rseq/Makefile
@@ -14,14 +14,20 @@ LDLIBS += -lpthread -ldl
# still track changes to header files and depend on shared object.
OVERRIDE_TARGETS = 1
-TEST_GEN_PROGS = basic_test basic_percpu_ops_test basic_percpu_ops_mm_cid_test param_test \
- param_test_benchmark param_test_compare_twice param_test_mm_cid \
- param_test_mm_cid_benchmark param_test_mm_cid_compare_twice \
- syscall_errors_test slice_test
+TEST_GEN_PROGS = basic_test basic_percpu_ops_test basic_percpu_ops_mm_cid_test \
+ param_test_benchmark param_test_mm_cid_benchmark
-TEST_GEN_PROGS_EXTENDED = librseq.so
+TEST_GEN_PROGS_EXTENDED = librseq.so \
+ param_test \
+ param_test_compare_twice \
+ param_test_mm_cid \
+ param_test_mm_cid_compare_twice \
+ syscall_errors_test \
+ legacy_check \
+ slice_test \
+ check_optimized
-TEST_PROGS = run_param_test.sh run_syscall_errors_test.sh
+TEST_PROGS = run_param_test.sh run_syscall_errors_test.sh run_legacy_check.sh run_timeslice_test.sh
TEST_FILES := settings
@@ -62,3 +68,6 @@ $(OUTPUT)/syscall_errors_test: syscall_errors_test.c $(TEST_GEN_PROGS_EXTENDED)
$(OUTPUT)/slice_test: slice_test.c $(TEST_GEN_PROGS_EXTENDED) rseq.h rseq-*.h
$(CC) $(CFLAGS) $< $(LDLIBS) -lrseq -o $@
+
+$(OUTPUT)/check_optimized: check_optimized.c $(TEST_GEN_PROGS_EXTENDED) rseq.h rseq-*.h
+ $(CC) $(CFLAGS) $< $(LDLIBS) -lrseq -o $@
diff --git a/tools/testing/selftests/rseq/check_optimized.c b/tools/testing/selftests/rseq/check_optimized.c
new file mode 100644
index 000000000000..a13e3f2c8fc6
--- /dev/null
+++ b/tools/testing/selftests/rseq/check_optimized.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: LGPL-2.1
+#define _GNU_SOURCE
+#include <assert.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/time.h>
+
+#include "rseq.h"
+
+int main(int argc, char **argv)
+{
+ if (__rseq_register_current_thread(true, false))
+ return -1;
+ return 0;
+}
diff --git a/tools/testing/selftests/rseq/legacy_check.c b/tools/testing/selftests/rseq/legacy_check.c
new file mode 100644
index 000000000000..3f7de4e28303
--- /dev/null
+++ b/tools/testing/selftests/rseq/legacy_check.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include <errno.h>
+#include <signal.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#include "rseq.h"
+
+#include "../kselftest_harness.h"
+
+FIXTURE(legacy)
+{
+};
+
+static int cpu_id_in_sigfn = -1;
+
+static void sigfn(int sig)
+{
+ struct rseq_abi *rs = rseq_get_abi();
+
+ cpu_id_in_sigfn = rs->cpu_id_start;
+}
+
+FIXTURE_SETUP(legacy)
+{
+ int res = __rseq_register_current_thread(true, true);
+
+ switch (res) {
+ case -ENOSYS:
+ SKIP(return, "RSEQ not enabled\n");
+ case -EBUSY:
+ SKIP(return, "GLIBC owns RSEQ. Disable GLIBC RSEQ registration\n");
+ default:
+ ASSERT_EQ(res, 0);
+ }
+
+ ASSERT_NE(signal(SIGUSR1, sigfn), SIG_ERR);
+}
+
+FIXTURE_TEARDOWN(legacy)
+{
+}
+
+TEST_F(legacy, legacy_test)
+{
+ struct rseq_abi *rs = rseq_get_abi();
+
+ ASSERT_NE(rs, NULL);
+
+ /* Overwrite rs::cpu_id_start */
+ rs->cpu_id_start = -1;
+ sleep(1);
+ ASSERT_NE(rs->cpu_id_start, -1);
+
+ rs->cpu_id_start = -1;
+ ASSERT_EQ(raise(SIGUSR1), 0);
+ ASSERT_NE(rs->cpu_id_start, -1);
+ ASSERT_NE(cpu_id_in_sigfn, -1);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c
index 05d03e679e06..e1e98dbabe4b 100644
--- a/tools/testing/selftests/rseq/param_test.c
+++ b/tools/testing/selftests/rseq/param_test.c
@@ -38,7 +38,7 @@ static int opt_modulo, verbose;
static int opt_yield, opt_signal, opt_sleep,
opt_disable_rseq, opt_threads = 200,
opt_disable_mod = 0, opt_test = 's';
-
+static bool opt_rseq_legacy;
static long long opt_reps = 5000;
static __thread __attribute__((tls_model("initial-exec")))
@@ -281,9 +281,12 @@ unsigned int yield_mod_cnt, nr_abort;
} \
}
+#define rseq_no_glibc true
+
#else
#define printf_verbose(fmt, ...)
+#define rseq_no_glibc false
#endif /* BENCHMARK */
@@ -481,7 +484,7 @@ void *test_percpu_spinlock_thread(void *arg)
long long i, reps;
if (!opt_disable_rseq && thread_data->reg &&
- rseq_register_current_thread())
+ __rseq_register_current_thread(rseq_no_glibc, opt_rseq_legacy))
abort();
reps = thread_data->reps;
for (i = 0; i < reps; i++) {
@@ -558,7 +561,7 @@ void *test_percpu_inc_thread(void *arg)
long long i, reps;
if (!opt_disable_rseq && thread_data->reg &&
- rseq_register_current_thread())
+ __rseq_register_current_thread(rseq_no_glibc, opt_rseq_legacy))
abort();
reps = thread_data->reps;
for (i = 0; i < reps; i++) {
@@ -712,7 +715,7 @@ void *test_percpu_list_thread(void *arg)
long long i, reps;
struct percpu_list *list = (struct percpu_list *)arg;
- if (!opt_disable_rseq && rseq_register_current_thread())
+ if (!opt_disable_rseq && __rseq_register_current_thread(rseq_no_glibc, opt_rseq_legacy))
abort();
reps = opt_reps;
@@ -895,7 +898,7 @@ void *test_percpu_buffer_thread(void *arg)
long long i, reps;
struct percpu_buffer *buffer = (struct percpu_buffer *)arg;
- if (!opt_disable_rseq && rseq_register_current_thread())
+ if (!opt_disable_rseq && __rseq_register_current_thread(rseq_no_glibc, opt_rseq_legacy))
abort();
reps = opt_reps;
@@ -1105,7 +1108,7 @@ void *test_percpu_memcpy_buffer_thread(void *arg)
long long i, reps;
struct percpu_memcpy_buffer *buffer = (struct percpu_memcpy_buffer *)arg;
- if (!opt_disable_rseq && rseq_register_current_thread())
+ if (!opt_disable_rseq && __rseq_register_current_thread(rseq_no_glibc, opt_rseq_legacy))
abort();
reps = opt_reps;
@@ -1258,7 +1261,7 @@ void *test_membarrier_worker_thread(void *arg)
const int iters = opt_reps;
int i;
- if (rseq_register_current_thread()) {
+ if (__rseq_register_current_thread(rseq_no_glibc, opt_rseq_legacy)) {
fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
errno, strerror(errno));
abort();
@@ -1323,7 +1326,7 @@ void *test_membarrier_manager_thread(void *arg)
intptr_t expect_a = 0, expect_b = 0;
int cpu_a = 0, cpu_b = 0;
- if (rseq_register_current_thread()) {
+ if (__rseq_register_current_thread(rseq_no_glibc, opt_rseq_legacy)) {
fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
errno, strerror(errno));
abort();
@@ -1475,6 +1478,7 @@ static void show_usage(int argc, char **argv)
printf(" [-D M] Disable rseq for each M threads\n");
printf(" [-T test] Choose test: (s)pinlock, (l)ist, (b)uffer, (m)emcpy, (i)ncrement, membarrie(r)\n");
printf(" [-M] Push into buffer and memcpy buffer with memory barriers.\n");
+ printf(" [-O] Test with optimized RSEQ\n");
printf(" [-v] Verbose output.\n");
printf(" [-h] Show this help.\n");
printf("\n");
@@ -1602,6 +1606,9 @@ int main(int argc, char **argv)
case 'M':
opt_mo = RSEQ_MO_RELEASE;
break;
+ case 'L':
+ opt_rseq_legacy = true;
+ break;
default:
show_usage(argc, argv);
goto error;
@@ -1618,7 +1625,7 @@ int main(int argc, char **argv)
if (set_signal_handler())
goto error;
- if (!opt_disable_rseq && rseq_register_current_thread())
+ if (!opt_disable_rseq && __rseq_register_current_thread(rseq_no_glibc, opt_rseq_legacy))
goto error;
if (!opt_disable_rseq && !rseq_validate_cpu_id()) {
fprintf(stderr, "Error: cpu id getter unavailable\n");
diff --git a/tools/testing/selftests/rseq/rseq-abi.h b/tools/testing/selftests/rseq/rseq-abi.h
index ecef315204b2..5f4ea2152c2f 100644
--- a/tools/testing/selftests/rseq/rseq-abi.h
+++ b/tools/testing/selftests/rseq/rseq-abi.h
@@ -192,9 +192,14 @@ struct rseq_abi {
struct rseq_abi_slice_ctrl slice_ctrl;
/*
+ * Place holder to push the size above 32 bytes.
+ */
+ __u8 __reserved;
+
+ /*
* Flexible array member at end of structure, after last feature field.
*/
char end[];
-} __attribute__((aligned(4 * sizeof(__u64))));
+} __attribute__((aligned(256)));
#endif /* _RSEQ_ABI_H */
diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
index a736727b83c1..be0d0a97031e 100644
--- a/tools/testing/selftests/rseq/rseq.c
+++ b/tools/testing/selftests/rseq/rseq.c
@@ -56,6 +56,7 @@ ptrdiff_t rseq_offset;
* unsuccessful.
*/
unsigned int rseq_size = -1U;
+static unsigned int rseq_alloc_size;
/* Flags used during rseq registration. */
unsigned int rseq_flags;
@@ -115,29 +116,17 @@ bool rseq_available(void)
}
}
-/* The rseq areas need to be at least 32 bytes. */
-static
-unsigned int get_rseq_min_alloc_size(void)
-{
- unsigned int alloc_size = rseq_size;
-
- if (alloc_size < ORIG_RSEQ_ALLOC_SIZE)
- alloc_size = ORIG_RSEQ_ALLOC_SIZE;
- return alloc_size;
-}
-
/*
* Return the feature size supported by the kernel.
*
* Depending on the value returned by getauxval(AT_RSEQ_FEATURE_SIZE):
*
- * 0: Return ORIG_RSEQ_FEATURE_SIZE (20)
+ * 0: Return ORIG_RSEQ_FEATURE_SIZE (20)
* > 0: Return the value from getauxval(AT_RSEQ_FEATURE_SIZE).
*
* It should never return a value below ORIG_RSEQ_FEATURE_SIZE.
*/
-static
-unsigned int get_rseq_kernel_feature_size(void)
+static unsigned int get_rseq_kernel_feature_size(void)
{
unsigned long auxv_rseq_feature_size, auxv_rseq_align;
@@ -152,15 +141,24 @@ unsigned int get_rseq_kernel_feature_size(void)
return ORIG_RSEQ_FEATURE_SIZE;
}
-int rseq_register_current_thread(void)
+int __rseq_register_current_thread(bool nolibc, bool legacy)
{
+ unsigned int size;
int rc;
if (!rseq_ownership) {
/* Treat libc's ownership as a successful registration. */
- return 0;
+ return nolibc ? -EBUSY : 0;
}
- rc = sys_rseq(&__rseq.abi, get_rseq_min_alloc_size(), 0, RSEQ_SIG);
+
+ /* The minimal allocation size is 32, which is the legacy allocation size */
+ size = get_rseq_kernel_feature_size();
+ if (legacy || size < ORIG_RSEQ_ALLOC_SIZE)
+ rseq_alloc_size = ORIG_RSEQ_ALLOC_SIZE;
+ else
+ rseq_alloc_size = size;
+
+ rc = sys_rseq(&__rseq.abi, rseq_alloc_size, 0, RSEQ_SIG);
if (rc) {
/*
* After at least one thread has registered successfully
@@ -179,9 +177,8 @@ int rseq_register_current_thread(void)
* The first thread to register sets the rseq_size to mimic the libc
* behavior.
*/
- if (RSEQ_READ_ONCE(rseq_size) == 0) {
- RSEQ_WRITE_ONCE(rseq_size, get_rseq_kernel_feature_size());
- }
+ if (RSEQ_READ_ONCE(rseq_size) == 0)
+ RSEQ_WRITE_ONCE(rseq_size, size);
return 0;
}
@@ -194,7 +191,7 @@ int rseq_unregister_current_thread(void)
/* Treat libc's ownership as a successful unregistration. */
return 0;
}
- rc = sys_rseq(&__rseq.abi, get_rseq_min_alloc_size(), RSEQ_ABI_FLAG_UNREGISTER, RSEQ_SIG);
+ rc = sys_rseq(&__rseq.abi, rseq_alloc_size, RSEQ_ABI_FLAG_UNREGISTER, RSEQ_SIG);
if (rc)
return -1;
return 0;
diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h
index f51a5fdb0444..c62ebb9290c0 100644
--- a/tools/testing/selftests/rseq/rseq.h
+++ b/tools/testing/selftests/rseq/rseq.h
@@ -8,6 +8,7 @@
#ifndef RSEQ_H
#define RSEQ_H
+#include <assert.h>
#include <stdint.h>
#include <stdbool.h>
#include <pthread.h>
@@ -142,7 +143,12 @@ static inline struct rseq_abi *rseq_get_abi(void)
* succeed. A restartable sequence executed from a non-registered
* thread will always fail.
*/
-int rseq_register_current_thread(void);
+int __rseq_register_current_thread(bool nolibc, bool legacy);
+
+static inline int rseq_register_current_thread(void)
+{
+ return __rseq_register_current_thread(false, false);
+}
/*
* Unregister rseq for current thread.
diff --git a/tools/testing/selftests/rseq/run_legacy_check.sh b/tools/testing/selftests/rseq/run_legacy_check.sh
new file mode 100755
index 000000000000..5577b46ea092
--- /dev/null
+++ b/tools/testing/selftests/rseq/run_legacy_check.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+GLIBC_TUNABLES="${GLIBC_TUNABLES:-}:glibc.pthread.rseq=0" ./legacy_check
diff --git a/tools/testing/selftests/rseq/run_param_test.sh b/tools/testing/selftests/rseq/run_param_test.sh
index 8d31426ab41f..69a3fa049929 100755
--- a/tools/testing/selftests/rseq/run_param_test.sh
+++ b/tools/testing/selftests/rseq/run_param_test.sh
@@ -34,6 +34,11 @@ REPS=1000
SLOW_REPS=100
NR_THREADS=$((6*${NR_CPUS}))
+# Prevent GLIBC from registering RSEQ so the selftest can run in legacy and
+# performance optimized mode.
+GLIBC_TUNABLES="${GLIBC_TUNABLES:-}:glibc.pthread.rseq=0"
+export GLIBC_TUNABLES
+
function do_tests()
{
local i=0
@@ -103,6 +108,40 @@ function inject_blocking()
NR_LOOPS=
}
+echo "Testing in legacy RSEQ mode"
+echo "Yield injection (25%)"
+inject_blocking -m 4 -y -L
+
+echo "Yield injection (50%)"
+inject_blocking -m 2 -y -L
+
+echo "Yield injection (100%)"
+inject_blocking -m 1 -y -L
+
+echo "Kill injection (25%)"
+inject_blocking -m 4 -k -L
+
+echo "Kill injection (50%)"
+inject_blocking -m 2 -k -L
+
+echo "Kill injection (100%)"
+inject_blocking -m 1 -k -L
+
+echo "Sleep injection (1ms, 25%)"
+inject_blocking -m 4 -s 1 -L
+
+echo "Sleep injection (1ms, 50%)"
+inject_blocking -m 2 -s 1 -L
+
+echo "Sleep injection (1ms, 100%)"
+inject_blocking -m 1 -s 1 -L
+
+./check_optimized || {
+ echo "Skipping optimized RSEQ mode test. Not supported";
+ exit 0
+}
+
+echo "Testing in optimized RSEQ mode"
echo "Yield injection (25%)"
inject_blocking -m 4 -y
diff --git a/tools/testing/selftests/rseq/run_timeslice_test.sh b/tools/testing/selftests/rseq/run_timeslice_test.sh
new file mode 100755
index 000000000000..551ebed71ec6
--- /dev/null
+++ b/tools/testing/selftests/rseq/run_timeslice_test.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+
+# Prevent GLIBC from registering RSEQ so the selftest can run in legacy
+# and performance optimized mode.
+GLIBC_TUNABLES="${GLIBC_TUNABLES:-}:glibc.pthread.rseq=0"
+export GLIBC_TUNABLES
+
+./check_optimized || {
+ echo "Skipping optimized RSEQ mode test. Not supported";
+ exit 0
+}
+
+./slice_test
diff --git a/tools/testing/selftests/rseq/slice_test.c b/tools/testing/selftests/rseq/slice_test.c
index 357122dcb487..e402d4440bc2 100644
--- a/tools/testing/selftests/rseq/slice_test.c
+++ b/tools/testing/selftests/rseq/slice_test.c
@@ -124,6 +124,13 @@ FIXTURE_SETUP(slice_ext)
{
cpu_set_t affinity;
+ if (__rseq_register_current_thread(true, false))
+ SKIP(return, "RSEQ not supported\n");
+
+ if (prctl(PR_RSEQ_SLICE_EXTENSION, PR_RSEQ_SLICE_EXTENSION_SET,
+ PR_RSEQ_SLICE_EXT_ENABLE, 0, 0))
+ SKIP(return, "Time slice extension not supported\n");
+
ASSERT_EQ(sched_getaffinity(0, sizeof(affinity), &affinity), 0);
/* Pin it on a single CPU. Avoid CPU 0 */
@@ -137,11 +144,6 @@ FIXTURE_SETUP(slice_ext)
break;
}
- ASSERT_EQ(rseq_register_current_thread(), 0);
-
- ASSERT_EQ(prctl(PR_RSEQ_SLICE_EXTENSION, PR_RSEQ_SLICE_EXTENSION_SET,
- PR_RSEQ_SLICE_EXT_ENABLE, 0, 0), 0);
-
self->noise_params.noise_nsecs = variant->noise_nsecs;
self->noise_params.sleep_nsecs = variant->sleep_nsecs;
self->noise_params.run = 1;
diff --git a/tools/testing/selftests/sched_ext/Makefile b/tools/testing/selftests/sched_ext/Makefile
index 789037be44c7..5d2dffca0e91 100644
--- a/tools/testing/selftests/sched_ext/Makefile
+++ b/tools/testing/selftests/sched_ext/Makefile
@@ -175,6 +175,7 @@ auto-test-targets := \
maximal \
maybe_null \
minimal \
+ non_scx_kfunc_deny \
numa \
allowed_cpus \
peek_dsq \
diff --git a/tools/testing/selftests/sched_ext/dequeue.c b/tools/testing/selftests/sched_ext/dequeue.c
index 4e93262703ca..383d06e972a4 100644
--- a/tools/testing/selftests/sched_ext/dequeue.c
+++ b/tools/testing/selftests/sched_ext/dequeue.c
@@ -33,6 +33,7 @@ static void worker_fn(int id)
/* Do some work to trigger scheduling events */
for (j = 0; j < 10000; j++)
sum += j;
+ asm volatile("" : : "r"(sum));
/* Sleep to trigger dequeue */
usleep(1000 + (id * 100));
diff --git a/tools/testing/selftests/sched_ext/non_scx_kfunc_deny.bpf.c b/tools/testing/selftests/sched_ext/non_scx_kfunc_deny.bpf.c
new file mode 100644
index 000000000000..9f16d39255e7
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/non_scx_kfunc_deny.bpf.c
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Verify that context-sensitive SCX kfuncs (even "unlocked" ones) are
+ * restricted to only SCX struct_ops programs. Non-SCX struct_ops programs,
+ * such as TCP congestion control programs, should be rejected by the BPF
+ * verifier when attempting to call these kfuncs.
+ *
+ * Copyright (C) 2026 Ching-Chun (Jim) Huang <jserv@ccns.ncku.edu.tw>
+ * Copyright (C) 2026 Cheng-Yang Chou <yphbchou0911@gmail.com>
+ */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+/* SCX kfunc from scx_kfunc_ids_any set */
+void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym;
+
+SEC("struct_ops/ssthresh")
+__u32 BPF_PROG(tcp_ca_ssthresh, struct sock *sk)
+{
+ /*
+ * This call should be rejected by the verifier because this is a
+ * TCP congestion control program (non-SCX struct_ops).
+ */
+ scx_bpf_kick_cpu(0, 0);
+ return 2;
+}
+
+SEC("struct_ops/cong_avoid")
+void BPF_PROG(tcp_ca_cong_avoid, struct sock *sk, __u32 ack, __u32 acked) {}
+
+SEC("struct_ops/undo_cwnd")
+__u32 BPF_PROG(tcp_ca_undo_cwnd, struct sock *sk) { return 2; }
+
+SEC(".struct_ops")
+struct tcp_congestion_ops tcp_non_scx_ca = {
+ .ssthresh = (void *)tcp_ca_ssthresh,
+ .cong_avoid = (void *)tcp_ca_cong_avoid,
+ .undo_cwnd = (void *)tcp_ca_undo_cwnd,
+ .name = "tcp_kfunc_deny",
+};
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/sched_ext/non_scx_kfunc_deny.c b/tools/testing/selftests/sched_ext/non_scx_kfunc_deny.c
new file mode 100644
index 000000000000..1c031575fb87
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/non_scx_kfunc_deny.c
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Verify that context-sensitive SCX kfuncs (even "unlocked" ones) are
+ * restricted to only SCX struct_ops programs. Non-SCX struct_ops programs,
+ * such as TCP congestion control programs, should be rejected by the BPF
+ * verifier when attempting to call these kfuncs.
+ *
+ * Copyright (C) 2026 Ching-Chun (Jim) Huang <jserv@ccns.ncku.edu.tw>
+ * Copyright (C) 2026 Cheng-Yang Chou <yphbchou0911@gmail.com>
+ */
+
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+#include "non_scx_kfunc_deny.bpf.skel.h"
+#include "scx_test.h"
+
+static enum scx_test_status run(void *ctx)
+{
+ struct non_scx_kfunc_deny *skel;
+ int err;
+
+ skel = non_scx_kfunc_deny__open();
+ if (!skel) {
+ SCX_ERR("Failed to open skel");
+ return SCX_TEST_FAIL;
+ }
+
+ err = non_scx_kfunc_deny__load(skel);
+ non_scx_kfunc_deny__destroy(skel);
+
+ if (err == 0) {
+ SCX_ERR("non-SCX BPF program loaded when it should have been rejected");
+ return SCX_TEST_FAIL;
+ }
+
+ return SCX_TEST_PASS;
+}
+
+struct scx_test non_scx_kfunc_deny = {
+ .name = "non_scx_kfunc_deny",
+ .description = "Verify that non-SCX struct_ops programs cannot call SCX kfuncs",
+ .run = run,
+};
+REGISTER_SCX_TEST(&non_scx_kfunc_deny)
diff --git a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json
index eefadd0546d3..848696c373fc 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json
@@ -1136,5 +1136,194 @@
"teardown": [
"$TC qdisc del dev $DUMMY handle 1: root"
]
+ },
+ {
+ "id": "7a5f",
+ "name": "Force red to dequeue from its child's gso_skb with qfq leaf",
+ "category": [
+ "qdisc",
+ "tbf",
+ "red",
+ "qfq"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.11.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY root handle 1: tbf rate 88bit burst 1661b peakrate 2257333 minburst 1024 limit 7b",
+ "$TC qdisc add dev $DUMMY parent 1: handle 2: red limit 757 min 16 max 24 avpkt 16",
+ "$TC qdisc add dev $DUMMY parent 2: handle 3: qfq",
+ "$TC class add dev $DUMMY classid 3:1 parent 3: qfq maxpkt 512 weight 1",
+ "$TC filter add dev $DUMMY parent 3: protocol ip prio 1 matchall classid 3:1 action ok"
+ ],
+ "cmdUnderTest": "ping -c 1 10.10.10.1 -W0.01 -I$DUMMY || true",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s -j qdisc ls dev $DUMMY parent 1:",
+ "matchJSON": [
+ {
+ "kind": "red",
+ "handle": "2:",
+ "bytes": 98,
+ "packets": 1,
+ "backlog": 0,
+ "qlen": 0
+ }
+ ],
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "cdae",
+ "name": "Force sfb to dequeue from its child's gso_skb with qfq leaf",
+ "category": [
+ "qdisc",
+ "tbf",
+ "sfb",
+ "qfq"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.11.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY root handle 1: tbf rate 88bit burst 1661b peakrate 2257333 minburst 1024 limit 7b",
+ "$TC qdisc add dev $DUMMY parent 1: handle 2: sfb",
+ "$TC qdisc add dev $DUMMY parent 2: handle 3: qfq",
+ "$TC class add dev $DUMMY classid 3:1 parent 3: qfq maxpkt 512 weight 1",
+ "$TC filter add dev $DUMMY parent 3: protocol ip prio 1 matchall classid 3:1 action ok"
+ ],
+ "cmdUnderTest": "ping -c 1 10.10.10.1 -W0.01 -I$DUMMY || true",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s -j qdisc ls dev $DUMMY parent 1:",
+ "matchJSON": [
+ {
+ "kind": "sfb",
+ "handle": "2:",
+ "bytes": 98,
+ "packets": 1,
+ "backlog": 0,
+ "qlen": 0
+ }
+ ],
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "291d",
+ "name": "Force red to dequeue from its child's gso_skb with dualpi2 leaf",
+ "category": [
+ "qdisc",
+ "tbf",
+ "red",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.11.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY root handle 1: tbf rate 88bit burst 1661b peakrate 2257333 minburst 1024 limit 7b",
+ "$TC qdisc add dev $DUMMY parent 1: handle 2: red limit 757 min 16 max 24 avpkt 16",
+ "$TC qdisc add dev $DUMMY parent 2: handle 3: dualpi2"
+ ],
+ "cmdUnderTest": "ping -c 1 10.10.10.1 -W0.01 -I$DUMMY || true",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s -j qdisc ls dev $DUMMY parent 1:",
+ "matchJSON": [
+ {
+ "kind": "red",
+ "handle": "2:",
+ "bytes": 98,
+ "packets": 1,
+ "backlog": 0,
+ "qlen": 0
+ }
+ ],
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "9c6d",
+ "name": "Force sfb to dequeue from its child's gso_skb with dualpi2 leaf",
+ "category": [
+ "qdisc",
+ "tbf",
+ "sfb",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.11.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY root handle 1: tbf rate 88bit burst 1661b peakrate 2257333 minburst 1024 limit 7b",
+ "$TC qdisc add dev $DUMMY parent 1: handle 2: sfb",
+ "$TC qdisc add dev $DUMMY parent 2: handle 3: dualpi2"
+ ],
+ "cmdUnderTest": "ping -c 1 10.10.10.1 -W0.01 -I$DUMMY || true",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s -j qdisc ls dev $DUMMY parent 1:",
+ "matchJSON": [
+ {
+ "kind": "sfb",
+ "handle": "2:",
+ "bytes": 98,
+ "packets": 1,
+ "backlog": 0,
+ "qlen": 0
+ }
+ ],
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "3a62",
+ "name": "Try to create a qlen underflow with QFQ/CBS",
+ "category": [
+ "qdisc",
+ "qfq",
+ "cbs"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.10.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY root handle 1: qfq",
+ "$TC class add dev $DUMMY classid 1:1 parent 1: qfq",
+ "$TC class add dev $DUMMY classid 1:2 parent 1: qfq",
+ "$TC qdisc add dev $DUMMY handle 2: parent 1:1 cbs",
+ "$TC qdisc add dev $DUMMY handle 3: parent 2: netem delay 5000000000",
+ "$TC filter add dev $DUMMY parent 1: prio 1 u32 match ip dst 10.10.10.1 classid 1:1 action ok",
+ "$TC filter add dev $DUMMY parent 1: prio 2 u32 match ip dst 10.10.10.2 classid 1:2 action ok",
+ "ping -c 1 10.10.10.1 -W0.01 -I$DUMMY || true",
+ "$IP l set $DUMMY down",
+ "$IP l set $DUMMY up",
+ "$TC qdisc replace dev $DUMMY handle 4: parent 2: pfifo"
+ ],
+ "cmdUnderTest": "ping -c 1 10.10.10.2 -W0.01 -I$DUMMY",
+ "expExitCode": "1",
+ "verifyCmd": "$TC -s -j qdisc ls dev $DUMMY parent 1:1",
+ "matchJSON": [
+ {
+ "kind": "cbs",
+ "handle": "2:",
+ "bytes": 0,
+ "packets": 0
+ }
+ ],
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
index 557fb074acf0..cd19d05925e4 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
@@ -302,5 +302,31 @@
"$TC qdisc del dev $ETH root",
"echo \"1\" > /sys/bus/netdevsim/del_device"
]
+ },
+ {
+ "id": "c7e1",
+ "name": "Class dump after graft and delete of explicit child qdisc",
+ "category": [
+ "qdisc",
+ "taprio"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "echo \"1 1 8\" > /sys/bus/netdevsim/new_device",
+ "$TC qdisc replace dev $ETH handle 8001: parent root taprio num_tc 8 map 0 1 2 3 4 5 6 7 queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 base-time 0 sched-entry S ff 20000000 clockid CLOCK_TAI",
+ "$TC qdisc add dev $ETH parent 8001:1 handle 8002: pfifo",
+ "$TC qdisc del dev $ETH parent 8001:1 handle 8002:"
+ ],
+ "cmdUnderTest": "$TC class show dev $ETH",
+ "expExitCode": "0",
+ "verifyCmd": "$TC class show dev $ETH",
+ "matchPattern": "class taprio 8001:[0-9]+ root",
+ "matchCount": "8",
+ "teardown": [
+ "$TC qdisc del dev $ETH root",
+ "echo \"1\" > /sys/bus/netdevsim/del_device"
+ ]
}
]
diff --git a/tools/testing/selftests/ublk/Makefile b/tools/testing/selftests/ublk/Makefile
index ec6a8ce83d38..6e4fe8d1fed1 100644
--- a/tools/testing/selftests/ublk/Makefile
+++ b/tools/testing/selftests/ublk/Makefile
@@ -37,6 +37,7 @@ TEST_PROGS += test_loop_07.sh
TEST_PROGS += test_integrity_01.sh
TEST_PROGS += test_integrity_02.sh
+TEST_PROGS += test_integrity_03.sh
TEST_PROGS += test_recover_01.sh
TEST_PROGS += test_recover_02.sh
diff --git a/tools/testing/selftests/ublk/test_batch_01.sh b/tools/testing/selftests/ublk/test_batch_01.sh
index a18fb39af8be..6e19303706a9 100755
--- a/tools/testing/selftests/ublk/test_batch_01.sh
+++ b/tools/testing/selftests/ublk/test_batch_01.sh
@@ -18,7 +18,7 @@ dev_id=$(_add_ublk_dev -t loop -q 2 -b "${UBLK_BACKFILES[0]}")
_check_add_dev $TID $?
if ! _mkfs_mount_test /dev/ublkb"${dev_id}"; then
- _cleanup_test "generic"
+ _cleanup_test
_show_result $TID 255
fi
@@ -27,5 +27,5 @@ _check_add_dev $TID $?
_mkfs_mount_test /dev/ublkb"${dev_id}"
ERR_CODE=$?
-_cleanup_test "generic"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_batch_02.sh b/tools/testing/selftests/ublk/test_batch_02.sh
index 7ca384d11987..7c683f755379 100755
--- a/tools/testing/selftests/ublk/test_batch_02.sh
+++ b/tools/testing/selftests/ublk/test_batch_02.sh
@@ -25,5 +25,5 @@ fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio --rw=readwrit
--iodepth=32 --size=100M --numjobs=4 > /dev/null 2>&1
ERR_CODE=$?
-_cleanup_test "generic"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_batch_03.sh b/tools/testing/selftests/ublk/test_batch_03.sh
index aca9cf144b55..914ccd6a335d 100755
--- a/tools/testing/selftests/ublk/test_batch_03.sh
+++ b/tools/testing/selftests/ublk/test_batch_03.sh
@@ -25,5 +25,5 @@ fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio --rw=readwrit
--iodepth=32 --size=100M --numjobs=4 > /dev/null 2>&1
ERR_CODE=$?
-_cleanup_test "generic"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_generic_02.sh b/tools/testing/selftests/ublk/test_generic_02.sh
index 46b657143fd6..2afc8cdbed8f 100755
--- a/tools/testing/selftests/ublk/test_generic_02.sh
+++ b/tools/testing/selftests/ublk/test_generic_02.sh
@@ -29,7 +29,7 @@ for _ in $(seq 100); do
done
if ! kill -0 "$btrace_pid" 2>/dev/null; then
- _cleanup_test "null"
+ _cleanup_test
exit "$UBLK_SKIP_CODE"
fi
@@ -51,5 +51,5 @@ if grep -q "^out_of_order:" "$UBLK_TMP"; then
grep "^out_of_order:" "$UBLK_TMP"
ERR_CODE=255
fi
-_cleanup_test "null"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_generic_03.sh b/tools/testing/selftests/ublk/test_generic_03.sh
index 8934ea926762..8e78be860d34 100755
--- a/tools/testing/selftests/ublk/test_generic_03.sh
+++ b/tools/testing/selftests/ublk/test_generic_03.sh
@@ -23,5 +23,5 @@ fi
if [ "$max_segment_size" != "32768" ]; then
ERR_CODE=255
fi
-_cleanup_test "null"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_generic_06.sh b/tools/testing/selftests/ublk/test_generic_06.sh
index 14a05054fcd8..a8b3634b6b4b 100755
--- a/tools/testing/selftests/ublk/test_generic_06.sh
+++ b/tools/testing/selftests/ublk/test_generic_06.sh
@@ -36,5 +36,5 @@ if [ $ELAPSED -ge 5 ]; then
ERR_CODE=255
fi
-_cleanup_test "fault_inject"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_generic_07.sh b/tools/testing/selftests/ublk/test_generic_07.sh
index 8dcfd8978f50..d2c5e65bd124 100755
--- a/tools/testing/selftests/ublk/test_generic_07.sh
+++ b/tools/testing/selftests/ublk/test_generic_07.sh
@@ -23,5 +23,5 @@ if [ "$ERR_CODE" -eq 0 ]; then
ERR_CODE=$?
fi
-_cleanup_test "generic"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_generic_08.sh b/tools/testing/selftests/ublk/test_generic_08.sh
index ce88c31d6b9c..77a18b313f3d 100755
--- a/tools/testing/selftests/ublk/test_generic_08.sh
+++ b/tools/testing/selftests/ublk/test_generic_08.sh
@@ -18,7 +18,7 @@ dev_id=$(_add_ublk_dev -t loop -q 2 --auto_zc "${UBLK_BACKFILES[0]}")
_check_add_dev $TID $?
if ! _mkfs_mount_test /dev/ublkb"${dev_id}"; then
- _cleanup_test "generic"
+ _cleanup_test
_show_result $TID 255
fi
@@ -27,5 +27,5 @@ _check_add_dev $TID $?
_mkfs_mount_test /dev/ublkb"${dev_id}"
ERR_CODE=$?
-_cleanup_test "generic"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_generic_09.sh b/tools/testing/selftests/ublk/test_generic_09.sh
index 744d0cdaa242..6c25242f245f 100755
--- a/tools/testing/selftests/ublk/test_generic_09.sh
+++ b/tools/testing/selftests/ublk/test_generic_09.sh
@@ -22,6 +22,6 @@ _check_add_dev $TID $?
fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio --rw=readwrite --iodepth=32 --size=256M > /dev/null 2>&1
ERR_CODE=$?
-_cleanup_test "null"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_generic_10.sh b/tools/testing/selftests/ublk/test_generic_10.sh
index 4b4293b9081f..fdabc9d9075e 100755
--- a/tools/testing/selftests/ublk/test_generic_10.sh
+++ b/tools/testing/selftests/ublk/test_generic_10.sh
@@ -25,5 +25,5 @@ if [ "$new_size" != "$size" ]; then
ERR_CODE=255
fi
-_cleanup_test "null"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_generic_12.sh b/tools/testing/selftests/ublk/test_generic_12.sh
index 54b81ddfe9f9..435497f8da8d 100755
--- a/tools/testing/selftests/ublk/test_generic_12.sh
+++ b/tools/testing/selftests/ublk/test_generic_12.sh
@@ -25,7 +25,7 @@ btrace_pid=$!
sleep 2
if ! kill -0 "$btrace_pid" > /dev/null 2>&1; then
- _cleanup_test "null"
+ _cleanup_test
exit "$UBLK_SKIP_CODE"
fi
@@ -54,5 +54,5 @@ if [[ $NR_THREADS_THAT_HANDLED_IO -ne $NTHREADS ]]; then
ERR_CODE=255
fi
-_cleanup_test "null"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_generic_13.sh b/tools/testing/selftests/ublk/test_generic_13.sh
index 922115aa14f4..2c1be6286db8 100755
--- a/tools/testing/selftests/ublk/test_generic_13.sh
+++ b/tools/testing/selftests/ublk/test_generic_13.sh
@@ -15,5 +15,5 @@ if ${UBLK_PROG} features | grep -q unknown; then
ERR_CODE=255
fi
-_cleanup_test "null"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_generic_16.sh b/tools/testing/selftests/ublk/test_generic_16.sh
index 3ef367836ac5..6a4952146ea1 100755
--- a/tools/testing/selftests/ublk/test_generic_16.sh
+++ b/tools/testing/selftests/ublk/test_generic_16.sh
@@ -9,7 +9,7 @@ _prep_test "null" "stop --safe command"
# Check if SAFE_STOP_DEV feature is supported
if ! _have_feature "SAFE_STOP_DEV"; then
- _cleanup_test "null"
+ _cleanup_test
exit "$UBLK_SKIP_CODE"
fi
@@ -52,5 +52,5 @@ wait $dd_pid 2>/dev/null
_ublk_del_dev "${dev_id}"
udevadm settle
-_cleanup_test "null"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_generic_17.sh b/tools/testing/selftests/ublk/test_generic_17.sh
index 2278b5fc9dba..b483d53a897a 100755
--- a/tools/testing/selftests/ublk/test_generic_17.sh
+++ b/tools/testing/selftests/ublk/test_generic_17.sh
@@ -31,5 +31,5 @@ fi
# time out here
_ublk_del_dev "${dev_id}"
-_cleanup_test "fault_inject"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_integrity_02.sh b/tools/testing/selftests/ublk/test_integrity_02.sh
index aaf1f52da559..2c35fbc8a7cc 100755
--- a/tools/testing/selftests/ublk/test_integrity_02.sh
+++ b/tools/testing/selftests/ublk/test_integrity_02.sh
@@ -7,9 +7,10 @@ if ! _have_program fio; then
exit $UBLK_SKIP_CODE
fi
+min_fio_version=fio-3.42
fio_version=$(fio --version)
-if [[ "$fio_version" =~ fio-[0-9]+\.[0-9]+$ ]]; then
- echo "Requires development fio version with https://github.com/axboe/fio/pull/1992"
+if ! sort --version-sort --check=quiet <(printf "%s\n%s\n" "$min_fio_version" "$fio_version"); then
+ echo "Requires fio version with https://github.com/axboe/fio/pull/1992"
exit $UBLK_SKIP_CODE
fi
diff --git a/tools/testing/selftests/ublk/test_integrity_03.sh b/tools/testing/selftests/ublk/test_integrity_03.sh
new file mode 100755
index 000000000000..10f02339ea2d
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_integrity_03.sh
@@ -0,0 +1,103 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
+
+if ! _have_program fio; then
+ exit $UBLK_SKIP_CODE
+fi
+
+_test_fill_and_verify() {
+ fio --name fill --rw randwrite $fio_args > /dev/null
+ if [ $? != 0 ]; then
+ echo "fio fill failed"
+ ERR_CODE=255
+ return 1
+ fi
+
+ fio --name verify --rw randread $fio_args > /dev/null
+ if [ $? != 0 ]; then
+ echo "fio verify failed"
+ ERR_CODE=255
+ return 1
+ fi
+}
+
+_test_corrupted_reftag() {
+ local dd_reftag_args="bs=1 seek=58 count=6 oflag=dsync conv=notrunc status=none"
+
+ # Overwrite 6-byte reftag at offset 48 + 10 = 58
+ dd if=/dev/urandom "of=${UBLK_BACKFILES[1]}" $dd_reftag_args
+ if [ $? != 0 ]; then
+ echo "dd corrupted_reftag failed"
+ ERR_CODE=255
+ return 1
+ fi
+
+ if fio --name corrupted_reftag --rw randread $fio_args > /dev/null 2> "$fio_err"; then
+ echo "fio corrupted_reftag unexpectedly succeeded"
+ ERR_CODE=255
+ return 1
+ fi
+
+ if ! grep -q "$expected_err" "$fio_err"; then
+ echo "fio corrupted_reftag message not found: $expected_err"
+ ERR_CODE=255
+ return 1
+ fi
+
+ # Reset to 0
+ dd if=/dev/zero "of=${UBLK_BACKFILES[1]}" $dd_reftag_args
+ if [ $? != 0 ]; then
+ echo "dd restore corrupted_reftag failed"
+ ERR_CODE=255
+ return 1
+ fi
+}
+
+_test_corrupted_data() {
+ local dd_data_args="bs=512 count=1 oflag=direct,dsync conv=notrunc status=none"
+
+ dd if=/dev/zero "of=${UBLK_BACKFILES[0]}" $dd_data_args
+ if [ $? != 0 ]; then
+ echo "dd corrupted_data failed"
+ ERR_CODE=255
+ return 1
+ fi
+
+ if fio --name corrupted_data --rw randread $fio_args > /dev/null 2> "$fio_err"; then
+ echo "fio corrupted_data unexpectedly succeeded"
+ ERR_CODE=255
+ return 1
+ fi
+
+ if ! grep -q "$expected_err" "$fio_err"; then
+ echo "fio corrupted_data message not found: $expected_err"
+ ERR_CODE=255
+ return 1
+ fi
+}
+
+_prep_test "loop" "end-to-end auto integrity"
+
+_create_backfile 0 256M
+_create_backfile 1 32M # 256M * (64 integrity bytes / 512 data bytes)
+integrity_params="--integrity_capable --integrity_reftag
+ --metadata_size 64 --pi_offset 48 --csum_type nvme"
+dev_id=$(_add_ublk_dev -t loop -u $integrity_params "${UBLK_BACKFILES[@]}")
+_check_add_dev "$TID" $?
+
+fio_args="--ioengine libaio --direct 1 --bsrange 512-1M --iodepth 32
+ --filename /dev/ublkb$dev_id"
+fio_err=$(mktemp "${UBLK_TEST_DIR}"/fio_err_XXXXX)
+ERR_CODE=0
+
+expected_err="Invalid or incomplete multibyte or wide character: read offset=0"
+_test_fill_and_verify && \
+_test_corrupted_reftag && \
+_test_corrupted_data
+
+rm -f "$fio_err"
+
+_cleanup_test
+_show_result "$TID" $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_loop_01.sh b/tools/testing/selftests/ublk/test_loop_01.sh
index 338a235fd82a..c0f5b619ad6e 100755
--- a/tools/testing/selftests/ublk/test_loop_01.sh
+++ b/tools/testing/selftests/ublk/test_loop_01.sh
@@ -20,6 +20,6 @@ _check_add_dev $TID $?
_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M
ERR_CODE=$?
-_cleanup_test "loop"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_loop_02.sh b/tools/testing/selftests/ublk/test_loop_02.sh
index 04c52454e2ec..f4191ea71f50 100755
--- a/tools/testing/selftests/ublk/test_loop_02.sh
+++ b/tools/testing/selftests/ublk/test_loop_02.sh
@@ -14,6 +14,6 @@ _check_add_dev $TID $?
_mkfs_mount_test /dev/ublkb"${dev_id}"
ERR_CODE=$?
-_cleanup_test "loop"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_loop_03.sh b/tools/testing/selftests/ublk/test_loop_03.sh
index 6e8f649fe93d..aaac0c59a5ad 100755
--- a/tools/testing/selftests/ublk/test_loop_03.sh
+++ b/tools/testing/selftests/ublk/test_loop_03.sh
@@ -19,6 +19,6 @@ _check_add_dev $TID $?
_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M
ERR_CODE=$?
-_cleanup_test "loop"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_loop_04.sh b/tools/testing/selftests/ublk/test_loop_04.sh
index 9f6774ec0de6..f584c119f1d2 100755
--- a/tools/testing/selftests/ublk/test_loop_04.sh
+++ b/tools/testing/selftests/ublk/test_loop_04.sh
@@ -15,6 +15,6 @@ _check_add_dev $TID $?
_mkfs_mount_test /dev/ublkb"${dev_id}"
ERR_CODE=$?
-_cleanup_test "loop"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_loop_05.sh b/tools/testing/selftests/ublk/test_loop_05.sh
index 2b8d99e007be..ca1a5df5f9de 100755
--- a/tools/testing/selftests/ublk/test_loop_05.sh
+++ b/tools/testing/selftests/ublk/test_loop_05.sh
@@ -20,6 +20,6 @@ _check_add_dev $TID $?
_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M
ERR_CODE=$?
-_cleanup_test "loop"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_loop_06.sh b/tools/testing/selftests/ublk/test_loop_06.sh
index e73f6f4844db..26f710ba9db7 100755
--- a/tools/testing/selftests/ublk/test_loop_06.sh
+++ b/tools/testing/selftests/ublk/test_loop_06.sh
@@ -19,6 +19,6 @@ _check_add_dev $TID $?
_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M
ERR_CODE=$?
-_cleanup_test "loop"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_loop_07.sh b/tools/testing/selftests/ublk/test_loop_07.sh
index 264d20e7c530..a9ab0b671cb2 100755
--- a/tools/testing/selftests/ublk/test_loop_07.sh
+++ b/tools/testing/selftests/ublk/test_loop_07.sh
@@ -15,6 +15,6 @@ _check_add_dev $TID $?
_mkfs_mount_test /dev/ublkb"${dev_id}"
ERR_CODE=$?
-_cleanup_test "loop"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_null_01.sh b/tools/testing/selftests/ublk/test_null_01.sh
index eebce8076530..d2c38cbb2dd5 100755
--- a/tools/testing/selftests/ublk/test_null_01.sh
+++ b/tools/testing/selftests/ublk/test_null_01.sh
@@ -18,6 +18,6 @@ _check_add_dev $TID $?
fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio --rw=readwrite --iodepth=32 --size=256M > /dev/null 2>&1
ERR_CODE=$?
-_cleanup_test "null"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_null_02.sh b/tools/testing/selftests/ublk/test_null_02.sh
index 654bdff39664..7b205ca56367 100755
--- a/tools/testing/selftests/ublk/test_null_02.sh
+++ b/tools/testing/selftests/ublk/test_null_02.sh
@@ -18,6 +18,6 @@ _check_add_dev $TID $?
fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio --rw=readwrite --iodepth=32 --size=256M > /dev/null 2>&1
ERR_CODE=$?
-_cleanup_test "null"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_null_03.sh b/tools/testing/selftests/ublk/test_null_03.sh
index 29cd09f06672..eee7a87a60da 100755
--- a/tools/testing/selftests/ublk/test_null_03.sh
+++ b/tools/testing/selftests/ublk/test_null_03.sh
@@ -18,6 +18,6 @@ _check_add_dev $TID $?
fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio --rw=readwrite --iodepth=32 --size=256M > /dev/null 2>&1
ERR_CODE=$?
-_cleanup_test "null"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_part_01.sh b/tools/testing/selftests/ublk/test_part_01.sh
index 8028f6e4b3a5..fa3b1a9af894 100755
--- a/tools/testing/selftests/ublk/test_part_01.sh
+++ b/tools/testing/selftests/ublk/test_part_01.sh
@@ -82,7 +82,7 @@ fi
_prep_test "generic" "test UBLK_F_NO_AUTO_PART_SCAN"
if ! _have_feature "UBLK_F_NO_AUTO_PART_SCAN"; then
- _cleanup_test "generic"
+ _cleanup_test
exit "$UBLK_SKIP_CODE"
fi
@@ -100,5 +100,5 @@ format_backing_file "${UBLK_BACKFILES[0]}"
[ "$ERR_CODE" -eq 0 ] && test_no_auto_part_scan "${UBLK_BACKFILES[0]}"
[ $? -ne 0 ] && ERR_CODE=255
-_cleanup_test "generic"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_part_02.sh b/tools/testing/selftests/ublk/test_part_02.sh
index 7d42ab4d6e83..d9ec06f36aee 100755
--- a/tools/testing/selftests/ublk/test_part_02.sh
+++ b/tools/testing/selftests/ublk/test_part_02.sh
@@ -63,5 +63,5 @@ _test_partition_scan_no_hang "no" "DEAD"
# Test 2: With recovery support - should transition to QUIESCED
_test_partition_scan_no_hang "yes" "QUIESCED"
-_cleanup_test "partition_scan"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_recover_01.sh b/tools/testing/selftests/ublk/test_recover_01.sh
index 2672f9c40fa8..1cddc2345dab 100755
--- a/tools/testing/selftests/ublk/test_recover_01.sh
+++ b/tools/testing/selftests/ublk/test_recover_01.sh
@@ -40,5 +40,5 @@ ublk_run_recover_test -t loop -q 2 -r 1 -i 1 "${UBLK_BACKFILES[0]}" &
ublk_run_recover_test -t stripe -q 2 -r 1 -i 1 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
wait
-_cleanup_test "recover"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_recover_02.sh b/tools/testing/selftests/ublk/test_recover_02.sh
index bda5064bc31f..9c3f481880d3 100755
--- a/tools/testing/selftests/ublk/test_recover_02.sh
+++ b/tools/testing/selftests/ublk/test_recover_02.sh
@@ -44,5 +44,5 @@ ublk_run_recover_test -t loop -q 2 -r 1 -z -i 1 "${UBLK_BACKFILES[0]}" &
ublk_run_recover_test -t stripe -q 2 -r 1 -z -i 1 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
wait
-_cleanup_test "recover"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_recover_03.sh b/tools/testing/selftests/ublk/test_recover_03.sh
index e0dc0b8fe5d6..2554805e5b02 100755
--- a/tools/testing/selftests/ublk/test_recover_03.sh
+++ b/tools/testing/selftests/ublk/test_recover_03.sh
@@ -39,5 +39,5 @@ ublk_run_quiesce_recover -t loop -q 2 -r 1 -i 1 "${UBLK_BACKFILES[0]}" &
ublk_run_quiesce_recover -t stripe -q 2 -r 1 -i 1 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
wait
-_cleanup_test "quiesce"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_recover_04.sh b/tools/testing/selftests/ublk/test_recover_04.sh
index 178443394ca5..4c83c1840c68 100755
--- a/tools/testing/selftests/ublk/test_recover_04.sh
+++ b/tools/testing/selftests/ublk/test_recover_04.sh
@@ -35,5 +35,5 @@ ublk_run_recover_test -t loop -q 2 -r 1 -u -i 1 "${UBLK_BACKFILES[0]}" &
ublk_run_recover_test -t stripe -q 2 -r 1 -u -i 1 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
wait
-_cleanup_test "recover"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_shmemzc_01.sh b/tools/testing/selftests/ublk/test_shmemzc_01.sh
index 47210af2aa20..b244ab3479a2 100755
--- a/tools/testing/selftests/ublk/test_shmemzc_01.sh
+++ b/tools/testing/selftests/ublk/test_shmemzc_01.sh
@@ -67,6 +67,6 @@ umount "$HTLB_MNT"
rmdir "$HTLB_MNT"
echo "$OLD_NR_HP" > /proc/sys/vm/nr_hugepages
-_cleanup_test "shmem_zc"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_shmemzc_02.sh b/tools/testing/selftests/ublk/test_shmemzc_02.sh
index aed9262494e9..810dccba6d84 100755
--- a/tools/testing/selftests/ublk/test_shmemzc_02.sh
+++ b/tools/testing/selftests/ublk/test_shmemzc_02.sh
@@ -63,6 +63,6 @@ umount "$HTLB_MNT"
rmdir "$HTLB_MNT"
echo "$OLD_NR_HP" > /proc/sys/vm/nr_hugepages
-_cleanup_test "shmem_zc"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_shmemzc_03.sh b/tools/testing/selftests/ublk/test_shmemzc_03.sh
index db967a9ffe81..606362491a32 100755
--- a/tools/testing/selftests/ublk/test_shmemzc_03.sh
+++ b/tools/testing/selftests/ublk/test_shmemzc_03.sh
@@ -64,6 +64,6 @@ umount "$HTLB_MNT"
rmdir "$HTLB_MNT"
echo "$OLD_NR_HP" > /proc/sys/vm/nr_hugepages
-_cleanup_test "shmem_zc"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_shmemzc_04.sh b/tools/testing/selftests/ublk/test_shmemzc_04.sh
index 899de088ece4..9a2a6c2e8abe 100755
--- a/tools/testing/selftests/ublk/test_shmemzc_04.sh
+++ b/tools/testing/selftests/ublk/test_shmemzc_04.sh
@@ -67,6 +67,6 @@ umount "$HTLB_MNT"
rmdir "$HTLB_MNT"
echo "$OLD_NR_HP" > /proc/sys/vm/nr_hugepages
-_cleanup_test "shmem_zc"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stress_01.sh b/tools/testing/selftests/ublk/test_stress_01.sh
index a9322ce496e9..f91783f27649 100755
--- a/tools/testing/selftests/ublk/test_stress_01.sh
+++ b/tools/testing/selftests/ublk/test_stress_01.sh
@@ -29,5 +29,5 @@ ublk_io_and_remove 256M -t loop -q 4 "${UBLK_BACKFILES[0]}" &
ublk_io_and_remove 256M -t stripe -q 4 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
wait
-_cleanup_test "stress"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stress_02.sh b/tools/testing/selftests/ublk/test_stress_02.sh
index 6c114194f9c9..b128d11658a8 100755
--- a/tools/testing/selftests/ublk/test_stress_02.sh
+++ b/tools/testing/selftests/ublk/test_stress_02.sh
@@ -31,5 +31,5 @@ for nr_queue in 1 4; do
wait
done
-_cleanup_test "stress"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stress_03.sh b/tools/testing/selftests/ublk/test_stress_03.sh
index 4e81ca0db758..a0f0aba8eebc 100755
--- a/tools/testing/selftests/ublk/test_stress_03.sh
+++ b/tools/testing/selftests/ublk/test_stress_03.sh
@@ -49,5 +49,5 @@ if _have_feature "PER_IO_DAEMON"; then
wait
fi
-_cleanup_test "stress"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stress_04.sh b/tools/testing/selftests/ublk/test_stress_04.sh
index 6c6f44b172bc..896eae68d444 100755
--- a/tools/testing/selftests/ublk/test_stress_04.sh
+++ b/tools/testing/selftests/ublk/test_stress_04.sh
@@ -48,5 +48,5 @@ if _have_feature "PER_IO_DAEMON"; then
wait
fi
-_cleanup_test "stress"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stress_05.sh b/tools/testing/selftests/ublk/test_stress_05.sh
index 7e9324de2030..d6c00c72080d 100755
--- a/tools/testing/selftests/ublk/test_stress_05.sh
+++ b/tools/testing/selftests/ublk/test_stress_05.sh
@@ -79,5 +79,5 @@ if _have_feature "PER_IO_DAEMON"; then
fi
wait
-_cleanup_test "stress"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stress_06.sh b/tools/testing/selftests/ublk/test_stress_06.sh
index c72e5d0b14be..9481a273a4b4 100755
--- a/tools/testing/selftests/ublk/test_stress_06.sh
+++ b/tools/testing/selftests/ublk/test_stress_06.sh
@@ -34,5 +34,5 @@ ublk_io_and_remove 256M -t loop -q 4 -u --nthreads 8 --per_io_tasks "${UBLK_BACK
ublk_io_and_remove 256M -t stripe -q 4 -u --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
wait
-_cleanup_test "stress"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stress_07.sh b/tools/testing/selftests/ublk/test_stress_07.sh
index 04c2764d5238..3e01c037cffb 100755
--- a/tools/testing/selftests/ublk/test_stress_07.sh
+++ b/tools/testing/selftests/ublk/test_stress_07.sh
@@ -34,5 +34,5 @@ ublk_io_and_kill_daemon 256M -t loop -q 4 -u --nthreads 8 --per_io_tasks "${UBLK
ublk_io_and_kill_daemon 256M -t stripe -q 4 -u --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
wait
-_cleanup_test "stress"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stress_08.sh b/tools/testing/selftests/ublk/test_stress_08.sh
index 37f7d204879a..5f32424d2892 100755
--- a/tools/testing/selftests/ublk/test_stress_08.sh
+++ b/tools/testing/selftests/ublk/test_stress_08.sh
@@ -40,5 +40,5 @@ ublk_io_and_remove 256M -t stripe -q 4 --auto_zc -b "${UBLK_BACKFILES[1]}" "${UB
ublk_io_and_remove 8G -t null -q 4 -z --auto_zc --auto_zc_fallback -b &
wait
-_cleanup_test "stress"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stress_09.sh b/tools/testing/selftests/ublk/test_stress_09.sh
index 53c1e3b2ab30..64cb8d9b0438 100755
--- a/tools/testing/selftests/ublk/test_stress_09.sh
+++ b/tools/testing/selftests/ublk/test_stress_09.sh
@@ -39,5 +39,5 @@ ublk_io_and_kill_daemon 256M -t stripe -q 4 -b "${UBLK_BACKFILES[1]}" "${UBLK_BA
ublk_io_and_kill_daemon 8G -t null -q 4 -z --auto_zc --auto_zc_fallback -b &
wait
-_cleanup_test "stress"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stripe_01.sh b/tools/testing/selftests/ublk/test_stripe_01.sh
index 3bc821aadad8..9ffce477b461 100755
--- a/tools/testing/selftests/ublk/test_stripe_01.sh
+++ b/tools/testing/selftests/ublk/test_stripe_01.sh
@@ -21,5 +21,5 @@ _check_add_dev $TID $?
_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=512M
ERR_CODE=$?
-_cleanup_test "stripe"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stripe_02.sh b/tools/testing/selftests/ublk/test_stripe_02.sh
index 4a7d2b21a6bf..4c172950a247 100755
--- a/tools/testing/selftests/ublk/test_stripe_02.sh
+++ b/tools/testing/selftests/ublk/test_stripe_02.sh
@@ -16,5 +16,5 @@ _check_add_dev $TID $?
_mkfs_mount_test /dev/ublkb"${dev_id}"
ERR_CODE=$?
-_cleanup_test "stripe"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stripe_03.sh b/tools/testing/selftests/ublk/test_stripe_03.sh
index a1c159d54e53..2cdf9f958988 100755
--- a/tools/testing/selftests/ublk/test_stripe_03.sh
+++ b/tools/testing/selftests/ublk/test_stripe_03.sh
@@ -21,5 +21,5 @@ _check_add_dev $TID $?
_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=512M
ERR_CODE=$?
-_cleanup_test "stripe"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stripe_04.sh b/tools/testing/selftests/ublk/test_stripe_04.sh
index 0c30bd6c2b3b..e24120eaca0e 100755
--- a/tools/testing/selftests/ublk/test_stripe_04.sh
+++ b/tools/testing/selftests/ublk/test_stripe_04.sh
@@ -16,5 +16,5 @@ _check_add_dev $TID $?
_mkfs_mount_test /dev/ublkb"${dev_id}"
ERR_CODE=$?
-_cleanup_test "stripe"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stripe_05.sh b/tools/testing/selftests/ublk/test_stripe_05.sh
index 6ddfa88ad226..f3de2d5cdfe4 100755
--- a/tools/testing/selftests/ublk/test_stripe_05.sh
+++ b/tools/testing/selftests/ublk/test_stripe_05.sh
@@ -21,5 +21,5 @@ _check_add_dev $TID $?
_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=512M
ERR_CODE=$?
-_cleanup_test "stripe"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stripe_06.sh b/tools/testing/selftests/ublk/test_stripe_06.sh
index a2c7bf4cc613..3fd5cd902956 100755
--- a/tools/testing/selftests/ublk/test_stripe_06.sh
+++ b/tools/testing/selftests/ublk/test_stripe_06.sh
@@ -16,5 +16,5 @@ _check_add_dev $TID $?
_mkfs_mount_test /dev/ublkb"${dev_id}"
ERR_CODE=$?
-_cleanup_test "stripe"
+_cleanup_test
_show_result $TID $ERR_CODE
diff --git a/tools/testing/vma/include/dup.h b/tools/testing/vma/include/dup.h
index b4864aad2db0..9e0dfd3a85b0 100644
--- a/tools/testing/vma/include/dup.h
+++ b/tools/testing/vma/include/dup.h
@@ -1330,7 +1330,7 @@ static inline int __compat_vma_mmap(struct vm_area_desc *desc,
/* Update the VMA from the descriptor. */
compat_set_vma_from_desc(vma, desc);
/* Complete any specified mmap actions. */
- return mmap_action_complete(vma, &desc->action);
+ return mmap_action_complete(vma, &desc->action, /*is_compat=*/true);
}
static inline int compat_vma_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/tools/testing/vma/include/stubs.h b/tools/testing/vma/include/stubs.h
index a30b8bc84955..64164e25658f 100644
--- a/tools/testing/vma/include/stubs.h
+++ b/tools/testing/vma/include/stubs.h
@@ -87,7 +87,8 @@ static inline int mmap_action_prepare(struct vm_area_desc *desc)
}
static inline int mmap_action_complete(struct vm_area_struct *vma,
- struct mmap_action *action)
+ struct mmap_action *action,
+ bool is_compat)
{
return 0;
}
diff --git a/tools/testing/vsock/util.c b/tools/testing/vsock/util.c
index 1fe1338c79cd..fe316b02a590 100644
--- a/tools/testing/vsock/util.c
+++ b/tools/testing/vsock/util.c
@@ -381,8 +381,14 @@ void send_buf(int fd, const void *buf, size_t len, int flags,
}
}
+#define RECV_PEEK_RETRY_USEC (10 * 1000)
+
/* Receive bytes in a buffer and check the return value.
*
+ * When MSG_PEEK is set, recv() is retried until it returns at least
+ * expected_ret bytes. The function returns on error, EOF, or timeout
+ * as usual.
+ *
* expected_ret:
* <0 Negative errno (for testing errors)
* 0 End-of-file
@@ -403,6 +409,15 @@ void recv_buf(int fd, void *buf, size_t len, int flags, ssize_t expected_ret)
if (ret <= 0)
break;
+ if (flags & MSG_PEEK) {
+ if (ret >= expected_ret) {
+ nread = ret;
+ break;
+ }
+ timeout_usleep(RECV_PEEK_RETRY_USEC);
+ continue;
+ }
+
nread += ret;
} while (nread < len);
timeout_end();
diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c
index 5bd20ccd9335..76be0e4a7f0e 100644
--- a/tools/testing/vsock/vsock_test.c
+++ b/tools/testing/vsock/vsock_test.c
@@ -346,6 +346,38 @@ static void test_stream_msg_peek_server(const struct test_opts *opts)
return test_msg_peek_server(opts, false);
}
+static void test_stream_peek_after_recv_server(const struct test_opts *opts)
+{
+ unsigned char buf_normal[MSG_PEEK_BUF_LEN];
+ unsigned char buf_peek[MSG_PEEK_BUF_LEN];
+ int fd;
+
+ fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
+ if (fd < 0) {
+ perror("accept");
+ exit(EXIT_FAILURE);
+ }
+
+ control_writeln("SRVREADY");
+
+ /* Partial recv to advance offset within the skb */
+ recv_buf(fd, buf_normal, 1, 0, 1);
+
+ /* Peek with a buffer larger than the remaining data */
+ recv_buf(fd, buf_peek, sizeof(buf_peek), MSG_PEEK, sizeof(buf_peek) - 1);
+
+ /* Consume the remaining data */
+ recv_buf(fd, buf_normal, sizeof(buf_normal) - 1, 0, sizeof(buf_normal) - 1);
+
+ /* Compare full peek and normal read. */
+ if (memcmp(buf_peek, buf_normal, sizeof(buf_peek) - 1)) {
+ fprintf(stderr, "Full peek data mismatch\n");
+ exit(EXIT_FAILURE);
+ }
+
+ close(fd);
+}
+
#define SOCK_BUF_SIZE (2 * 1024 * 1024)
#define SOCK_BUF_SIZE_SMALL (64 * 1024)
#define MAX_MSG_PAGES 4
@@ -1500,18 +1532,7 @@ static void test_stream_credit_update_test(const struct test_opts *opts,
}
/* Wait until there will be 128KB of data in rx queue. */
- while (1) {
- ssize_t res;
-
- res = recv(fd, buf, buf_size, MSG_PEEK);
- if (res == buf_size)
- break;
-
- if (res <= 0) {
- fprintf(stderr, "unexpected 'recv()' return: %zi\n", res);
- exit(EXIT_FAILURE);
- }
- }
+ recv_buf(fd, buf, buf_size, MSG_PEEK, buf_size);
/* There is 128KB of data in the socket's rx queue, dequeue first
* 64KB, credit update is sent if 'low_rx_bytes_test' == true.
@@ -2520,6 +2541,11 @@ static struct test_case test_cases[] = {
.run_client = test_stream_tx_credit_bounds_client,
.run_server = test_stream_tx_credit_bounds_server,
},
+ {
+ .name = "SOCK_STREAM MSG_PEEK after partial recv",
+ .run_client = test_stream_msg_peek_client,
+ .run_server = test_stream_peek_after_recv_server,
+ },
{},
};
diff --git a/tools/usb/usbip/libsrc/usbip_device_driver.c b/tools/usb/usbip/libsrc/usbip_device_driver.c
index 927a151fa9aa..1dfbb76ab26c 100644
--- a/tools/usb/usbip/libsrc/usbip_device_driver.c
+++ b/tools/usb/usbip/libsrc/usbip_device_driver.c
@@ -137,9 +137,9 @@ static int usbip_device_driver_open(struct usbip_host_driver *hdriver)
INIT_LIST_HEAD(&hdriver->edev_list);
ret = usbip_generic_driver_open(hdriver);
- if (ret)
- err("please load " USBIP_CORE_MOD_NAME ".ko and "
- USBIP_DEVICE_DRV_NAME ".ko!");
+ if (ret || hdriver->ndevs == 0)
+ info("please load " USBIP_CORE_MOD_NAME ".ko and "
+ USBIP_DEVICE_DRV_NAME ".ko");
return ret;
}
diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c
index ca78aa368476..01599cb2fa7b 100644
--- a/tools/usb/usbip/libsrc/usbip_host_common.c
+++ b/tools/usb/usbip/libsrc/usbip_host_common.c
@@ -149,6 +149,9 @@ static int refresh_exported_devices(struct usbip_host_driver *hdriver)
}
}
+ if (hdriver->ndevs == 0)
+ info("Please load appropriate modules or export devices.");
+
return 0;
}
diff --git a/tools/usb/usbip/libsrc/usbip_host_driver.c b/tools/usb/usbip/libsrc/usbip_host_driver.c
index 573e73ec36bd..bd8a6b84de0e 100644
--- a/tools/usb/usbip/libsrc/usbip_host_driver.c
+++ b/tools/usb/usbip/libsrc/usbip_host_driver.c
@@ -32,9 +32,10 @@ static int usbip_host_driver_open(struct usbip_host_driver *hdriver)
INIT_LIST_HEAD(&hdriver->edev_list);
ret = usbip_generic_driver_open(hdriver);
- if (ret)
- err("please load " USBIP_CORE_MOD_NAME ".ko and "
- USBIP_HOST_DRV_NAME ".ko!");
+ if (ret || hdriver->ndevs == 0)
+ info("please load " USBIP_CORE_MOD_NAME ".ko and "
+ USBIP_HOST_DRV_NAME ".ko");
+
return ret;
}