summaryrefslogtreecommitdiff
path: root/tools/perf/tests/shell
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/tests/shell')
-rwxr-xr-xtools/perf/tests/shell/amd-ibs-swfilt.sh67
-rwxr-xr-xtools/perf/tests/shell/annotate.sh57
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_blacklisted.sh4
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_kernel.sh8
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_basic.sh4
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_invalid_options.sh9
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_line_semantics.sh9
-rwxr-xr-xtools/perf/tests/shell/base_report/setup.sh20
-rwxr-xr-xtools/perf/tests/shell/base_report/test_basic.sh54
-rwxr-xr-xtools/perf/tests/shell/buildid.sh2
-rw-r--r--tools/perf/tests/shell/common/init.sh7
-rw-r--r--tools/perf/tests/shell/coresight/Makefile2
-rwxr-xr-xtools/perf/tests/shell/coresight/asm_pure_loop.sh2
-rw-r--r--tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S2
-rwxr-xr-xtools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh2
-rwxr-xr-xtools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh2
-rwxr-xr-xtools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh2
-rwxr-xr-xtools/perf/tests/shell/coresight/unroll_loop_thread_10.sh2
-rwxr-xr-xtools/perf/tests/shell/diff.sh14
-rwxr-xr-xtools/perf/tests/shell/drm_pmu.sh78
-rwxr-xr-xtools/perf/tests/shell/ftrace.sh7
-rwxr-xr-xtools/perf/tests/shell/header.sh74
-rw-r--r--tools/perf/tests/shell/lib/attr.py8
-rw-r--r--tools/perf/tests/shell/lib/perf_has_symbol.sh2
-rw-r--r--tools/perf/tests/shell/lib/perf_json_output_lint.py25
-rw-r--r--tools/perf/tests/shell/lib/perf_metric_validation.py12
-rw-r--r--tools/perf/tests/shell/lib/probe_vfs_getname.sh22
-rw-r--r--tools/perf/tests/shell/lib/setup_python.sh2
-rw-r--r--tools/perf/tests/shell/lib/stat_output.sh13
-rw-r--r--tools/perf/tests/shell/lib/waiting.sh2
-rwxr-xr-xtools/perf/tests/shell/list.sh2
-rwxr-xr-xtools/perf/tests/shell/lock_contention.sh28
-rwxr-xr-xtools/perf/tests/shell/perf-report-hierarchy.sh43
-rwxr-xr-xtools/perf/tests/shell/perftool-testsuite_probe.sh3
-rwxr-xr-xtools/perf/tests/shell/probe_vfs_getname.sh11
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh44
-rwxr-xr-xtools/perf/tests/shell/record+script_probe_vfs_getname.sh11
-rwxr-xr-xtools/perf/tests/shell/record+zstd_comp_decomp.sh2
-rwxr-xr-xtools/perf/tests/shell/record.sh138
-rwxr-xr-xtools/perf/tests/shell/record_bpf_filter.sh6
-rwxr-xr-xtools/perf/tests/shell/record_lbr.sh5
-rwxr-xr-xtools/perf/tests/shell/record_offcpu.sh73
-rwxr-xr-xtools/perf/tests/shell/record_sideband.sh2
-rwxr-xr-xtools/perf/tests/shell/sched.sh116
-rwxr-xr-xtools/perf/tests/shell/script.sh2
-rwxr-xr-xtools/perf/tests/shell/stat+csv_output.sh2
-rwxr-xr-xtools/perf/tests/shell/stat+csv_summary.sh2
-rwxr-xr-xtools/perf/tests/shell/stat+event_uniquifying.sh77
-rwxr-xr-xtools/perf/tests/shell/stat+json_output.sh14
-rwxr-xr-xtools/perf/tests/shell/stat+shadow_stat.sh2
-rwxr-xr-xtools/perf/tests/shell/stat+std_output.sh10
-rwxr-xr-xtools/perf/tests/shell/stat.sh89
-rwxr-xr-xtools/perf/tests/shell/stat_all_metrics.sh112
-rwxr-xr-xtools/perf/tests/shell/stat_all_pfm.sh2
-rwxr-xr-xtools/perf/tests/shell/stat_all_pmu.sh48
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh2
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters_cgrp.sh2
-rwxr-xr-xtools/perf/tests/shell/stat_metrics_values.sh17
-rwxr-xr-xtools/perf/tests/shell/test_arm_callgraph_fp.sh2
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight.sh2
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight_disasm.sh2
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe.sh32
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe_fork.sh2
-rwxr-xr-xtools/perf/tests/shell/test_bpf_metadata.sh76
-rwxr-xr-xtools/perf/tests/shell/test_brstack.sh72
-rwxr-xr-xtools/perf/tests/shell/test_data_symbol.sh61
-rwxr-xr-xtools/perf/tests/shell/test_intel_pt.sh35
-rwxr-xr-xtools/perf/tests/shell/test_stat_intel_tpebs.sh89
-rwxr-xr-xtools/perf/tests/shell/test_task_analyzer.sh2
-rwxr-xr-xtools/perf/tests/shell/test_uprobe_from_different_cu.sh11
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh12
-rwxr-xr-xtools/perf/tests/shell/trace_btf_enum.sh28
-rwxr-xr-xtools/perf/tests/shell/trace_btf_general.sh94
-rwxr-xr-xtools/perf/tests/shell/trace_exit_race.sh3
-rwxr-xr-xtools/perf/tests/shell/trace_record_replay.sh21
-rwxr-xr-xtools/perf/tests/shell/trace_summary.sh77
76 files changed, 1653 insertions, 347 deletions
diff --git a/tools/perf/tests/shell/amd-ibs-swfilt.sh b/tools/perf/tests/shell/amd-ibs-swfilt.sh
new file mode 100755
index 000000000000..7045ec72ba4c
--- /dev/null
+++ b/tools/perf/tests/shell/amd-ibs-swfilt.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+# AMD IBS software filtering
+
+echo "check availability of IBS swfilt"
+
+# check if IBS PMU is available
+if [ ! -d /sys/bus/event_source/devices/ibs_op ]; then
+ echo "[SKIP] IBS PMU does not exist"
+ exit 2
+fi
+
+# check if IBS PMU has swfilt format
+if [ ! -f /sys/bus/event_source/devices/ibs_op/format/swfilt ]; then
+ echo "[SKIP] IBS PMU does not have swfilt"
+ exit 2
+fi
+
+echo "run perf record with modifier and swfilt"
+
+# setting any modifiers should fail
+perf record -B -e ibs_op//u -o /dev/null true 2> /dev/null
+if [ $? -eq 0 ]; then
+ echo "[FAIL] IBS PMU should not accept exclude_kernel"
+ exit 1
+fi
+
+# setting it with swfilt should be fine
+perf record -B -e ibs_op/swfilt/u -o /dev/null true
+if [ $? -ne 0 ]; then
+ echo "[FAIL] IBS op PMU cannot handle swfilt for exclude_kernel"
+ exit 1
+fi
+
+# setting it with swfilt=1 should be fine
+perf record -B -e ibs_op/swfilt=1/k -o /dev/null true
+if [ $? -ne 0 ]; then
+ echo "[FAIL] IBS op PMU cannot handle swfilt for exclude_user"
+ exit 1
+fi
+
+# check ibs_fetch PMU as well
+perf record -B -e ibs_fetch/swfilt/u -o /dev/null true
+if [ $? -ne 0 ]; then
+ echo "[FAIL] IBS fetch PMU cannot handle swfilt for exclude_kernel"
+ exit 1
+fi
+
+# check system wide recording
+perf record -aB --synth=no -e ibs_op/swfilt/k -o /dev/null true
+if [ $? -ne 0 ]; then
+ echo "[FAIL] IBS op PMU cannot handle swfilt in system-wide mode"
+ exit 1
+fi
+
+echo "check number of samples with swfilt"
+
+kernel_sample=$(perf record -e ibs_op/swfilt/u -o- true | perf script -i- -F misc | grep -c ^K)
+if [ ${kernel_sample} -ne 0 ]; then
+ echo "[FAIL] unexpected kernel samples: " ${kernel_sample}
+ exit 1
+fi
+
+user_sample=$(perf record -e ibs_fetch/swfilt/k -o- true | perf script -i- -F misc | grep -c ^U)
+if [ ${user_sample} -ne 0 ]; then
+ echo "[FAIL] unexpected user samples: " ${user_sample}
+ exit 1
+fi
diff --git a/tools/perf/tests/shell/annotate.sh b/tools/perf/tests/shell/annotate.sh
index 1590a37363de..689de58e9238 100755
--- a/tools/perf/tests/shell/annotate.sh
+++ b/tools/perf/tests/shell/annotate.sh
@@ -35,54 +35,79 @@ trap_cleanup() {
trap trap_cleanup EXIT TERM INT
test_basic() {
- echo "Basic perf annotate test"
- if ! perf record -o "${perfdata}" ${testprog} 2> /dev/null
+ mode=$1
+ echo "${mode} perf annotate test"
+ if [ "x${mode}" == "xBasic" ]
then
- echo "Basic annotate [Failed: perf record]"
+ perf record -o "${perfdata}" ${testprog} 2> /dev/null
+ else
+ perf record -o - ${testprog} 2> /dev/null > "${perfdata}"
+ fi
+ if [ "x$?" != "x0" ]
+ then
+ echo "${mode} annotate [Failed: perf record]"
err=1
return
fi
# Generate the annotated output file
- perf annotate --no-demangle -i "${perfdata}" --stdio 2> /dev/null | head -250 > "${perfout}"
+ if [ "x${mode}" == "xBasic" ]
+ then
+ perf annotate --no-demangle -i "${perfdata}" --stdio --percent-limit 10 2> /dev/null > "${perfout}"
+ else
+ perf annotate --no-demangle -i - --stdio 2> /dev/null --percent-limit 10 < "${perfdata}" > "${perfout}"
+ fi
# check if it has the target symbol
- if ! grep "${testsym}" "${perfout}"
+ if ! grep -q "${testsym}" "${perfout}"
then
- echo "Basic annotate [Failed: missing target symbol]"
+ echo "${mode} annotate [Failed: missing target symbol]"
+ cat "${perfout}"
err=1
return
fi
# check if it has the disassembly lines
- if ! grep "${disasm_regex}" "${perfout}"
+ if ! grep -q "${disasm_regex}" "${perfout}"
then
- echo "Basic annotate [Failed: missing disasm output from default disassembler]"
+ echo "${mode} annotate [Failed: missing disasm output from default disassembler]"
err=1
return
fi
# check again with a target symbol name
- if ! perf annotate --no-demangle -i "${perfdata}" "${testsym}" 2> /dev/null | \
- head -250 | grep -m 3 "${disasm_regex}"
+ if [ "x${mode}" == "xBasic" ]
then
- echo "Basic annotate [Failed: missing disasm output when specifying the target symbol]"
+ perf annotate --no-demangle -i "${perfdata}" "${testsym}" 2> /dev/null > "${perfout}"
+ else
+ perf annotate --no-demangle -i - "${testsym}" 2> /dev/null < "${perfdata}" > "${perfout}"
+ fi
+
+ if ! head -250 "${perfout}"| grep -q -m 3 "${disasm_regex}"
+ then
+ echo "${mode} annotate [Failed: missing disasm output when specifying the target symbol]"
err=1
return
fi
# check one more with external objdump tool (forced by --objdump option)
- if ! perf annotate --no-demangle -i "${perfdata}" --objdump=objdump 2> /dev/null | \
- head -250 | grep -m 3 "${disasm_regex}"
+ if [ "x${mode}" == "xBasic" ]
+ then
+ perf annotate --no-demangle -i "${perfdata}" --percent-limit 10 --objdump=objdump 2> /dev/null > "${perfout}"
+ else
+ perf annotate --no-demangle -i - "${testsym}" --percent-limit 10 --objdump=objdump 2> /dev/null < "${perfdata}" > "${perfout}"
+ fi
+ if ! grep -q -m 3 "${disasm_regex}" "${perfout}"
then
- echo "Basic annotate [Failed: missing disasm output from non default disassembler (using --objdump)]"
+ echo "${mode} annotate [Failed: missing disasm output from non default disassembler (using --objdump)]"
err=1
return
fi
- echo "Basic annotate test [Success]"
+ echo "${mode} annotate test [Success]"
}
-test_basic
+test_basic Basic
+test_basic Pipe
cleanup
exit $err
diff --git a/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh b/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
index bead723e34af..8226449ac5c3 100755
--- a/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
+++ b/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-
+# perf_probe :: Reject blacklisted probes (exclusive)
# SPDX-License-Identifier: GPL-2.0
#
@@ -22,7 +22,7 @@ TEST_RESULT=0
BLACKFUNC_LIST=`head -n 5 /sys/kernel/debug/kprobes/blacklist 2> /dev/null | cut -f2`
if [ -z "$BLACKFUNC_LIST" ]; then
print_overall_skipped
- exit 0
+ exit 2
fi
# try to find vmlinux with DWARF debug info
diff --git a/tools/perf/tests/shell/base_probe/test_adding_kernel.sh b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
index d541ffd44a93..df288cf90cd6 100755
--- a/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
+++ b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Add 'perf probe's, list and remove them
+# perf_probe :: Add probes, list and remove them (exclusive)
# SPDX-License-Identifier: GPL-2.0
#
@@ -33,7 +33,7 @@ fi
check_kprobes_available
if [ $? -ne 0 ]; then
print_overall_skipped
- exit 0
+ exit 2
fi
@@ -169,7 +169,7 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: second pr
(( TEST_RESULT += $? ))
# adding existing probe with '--force' should pass
-NO_OF_PROBES=`$CMD_PERF probe -l | wc -l`
+NO_OF_PROBES=`$CMD_PERF probe -l $TEST_PROBE| wc -l`
$CMD_PERF probe --force --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_03.err
PERF_EXIT_CODE=$?
@@ -205,7 +205,7 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "using doubled probe"
$CMD_PERF probe --del \* 2> $LOGS_DIR/adding_kernel_removing_wildcard.err
PERF_EXIT_CODE=$?
-../common/check_all_lines_matched.pl "Removed event: probe:$TEST_PROBE" "Removed event: probe:${TEST_PROBE}_1" < $LOGS_DIR/adding_kernel_removing_wildcard.err
+../common/check_all_patterns_found.pl "Removed event: probe:$TEST_PROBE" "Removed event: probe:${TEST_PROBE}_1" < $LOGS_DIR/adding_kernel_removing_wildcard.err
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "removing multiple probes"
diff --git a/tools/perf/tests/shell/base_probe/test_basic.sh b/tools/perf/tests/shell/base_probe/test_basic.sh
index 09669ec479f2..9d8b5afbeddd 100755
--- a/tools/perf/tests/shell/base_probe/test_basic.sh
+++ b/tools/perf/tests/shell/base_probe/test_basic.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-
+# perf_probe :: Basic perf probe functionality (exclusive)
# SPDX-License-Identifier: GPL-2.0
#
@@ -19,7 +19,7 @@ TEST_RESULT=0
if ! check_kprobes_available; then
print_overall_skipped
- exit 0
+ exit 2
fi
diff --git a/tools/perf/tests/shell/base_probe/test_invalid_options.sh b/tools/perf/tests/shell/base_probe/test_invalid_options.sh
index 1fedfd8b0d0d..92f7254eb32a 100755
--- a/tools/perf/tests/shell/base_probe/test_invalid_options.sh
+++ b/tools/perf/tests/shell/base_probe/test_invalid_options.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-
+# perf_probe :: Reject invalid options (exclusive)
# SPDX-License-Identifier: GPL-2.0
#
@@ -19,9 +19,12 @@ TEST_RESULT=0
if ! check_kprobes_available; then
print_overall_skipped
- exit 0
+ exit 2
fi
+# Check for presence of DWARF
+$CMD_PERF check feature -q dwarf
+[ $? -ne 0 ] && HINT_FAIL="Some of the tests need DWARF to run"
### missing argument
@@ -75,5 +78,5 @@ done
# print overall results
-print_overall_results "$TEST_RESULT"
+print_overall_results "$TEST_RESULT" $HINT_FAIL
exit $?
diff --git a/tools/perf/tests/shell/base_probe/test_line_semantics.sh b/tools/perf/tests/shell/base_probe/test_line_semantics.sh
index d8f4bde0f585..20435b6bf6bc 100755
--- a/tools/perf/tests/shell/base_probe/test_line_semantics.sh
+++ b/tools/perf/tests/shell/base_probe/test_line_semantics.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-
+# perf_probe :: Check patterns for line semantics (exclusive)
# SPDX-License-Identifier: GPL-2.0
#
@@ -20,9 +20,12 @@ TEST_RESULT=0
if ! check_kprobes_available; then
print_overall_skipped
- exit 0
+ exit 2
fi
+# Check for presence of DWARF
+$CMD_PERF check feature -q dwarf
+[ $? -ne 0 ] && HINT_FAIL="Some of the tests need DWARF to run"
### acceptable --line descriptions
@@ -51,5 +54,5 @@ done
# print overall results
-print_overall_results "$TEST_RESULT"
+print_overall_results "$TEST_RESULT" $HINT_FAIL
exit $?
diff --git a/tools/perf/tests/shell/base_report/setup.sh b/tools/perf/tests/shell/base_report/setup.sh
index 4caa496660c6..8634e7e0dda6 100755
--- a/tools/perf/tests/shell/base_report/setup.sh
+++ b/tools/perf/tests/shell/base_report/setup.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-
+# perftool-testsuite :: perf_report
# SPDX-License-Identifier: GPL-2.0
#
@@ -15,6 +15,8 @@
# include working environment
. ../common/init.sh
+TEST_RESULT=0
+
test -d "$HEADER_TAR_DIR" || mkdir -p "$HEADER_TAR_DIR"
SW_EVENT="cpu-clock"
@@ -26,7 +28,21 @@ PERF_EXIT_CODE=$?
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "prepare the perf.data file"
-TEST_RESULT=$?
+(( TEST_RESULT += $? ))
+
+# Some minimal parallel workload.
+$CMD_PERF record --latency -o $CURRENT_TEST_DIR/perf.data.1 bash -c "for i in {1..100} ; do cat /proc/cpuinfo 1> /dev/null & done; sleep 1" 2> $LOGS_DIR/setup-latency.log
+PERF_EXIT_CODE=$?
+
+echo ==================
+cat $LOGS_DIR/setup-latency.log
+echo ==================
+
+../common/check_all_patterns_found.pl "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" < $LOGS_DIR/setup-latency.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "prepare the perf.data.1 file"
+(( TEST_RESULT += $? ))
print_overall_results $TEST_RESULT
exit $?
diff --git a/tools/perf/tests/shell/base_report/test_basic.sh b/tools/perf/tests/shell/base_report/test_basic.sh
index 47677cbd4df3..adfd8713b8f8 100755
--- a/tools/perf/tests/shell/base_report/test_basic.sh
+++ b/tools/perf/tests/shell/base_report/test_basic.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-
+# perf_report :: Basic perf report options (exclusive)
# SPDX-License-Identifier: GPL-2.0
#
@@ -183,6 +183,58 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "symbol filter"
(( TEST_RESULT += $? ))
+### latency and parallelism
+
+# Record with --latency should record with context switches.
+$CMD_PERF report -i $CURRENT_TEST_DIR/perf.data.1 --stdio --header-only > $LOGS_DIR/latency_header.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl ", context_switch = 1, " < $LOGS_DIR/latency_header.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "latency header"
+(( TEST_RESULT += $? ))
+
+
+# The default report for latency profile should show Overhead and Latency fields (in that order).
+$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data.1 > $LOGS_DIR/latency_default.log 2> $LOGS_DIR/latency_default.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "# Overhead Latency Command" < $LOGS_DIR/latency_default.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/latency_default.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "default report for latency profile"
+(( TEST_RESULT += $? ))
+
+
+# The latency report for latency profile should show Latency and Overhead fields (in that order).
+$CMD_PERF report --latency --stdio -i $CURRENT_TEST_DIR/perf.data.1 > $LOGS_DIR/latency_latency.log 2> $LOGS_DIR/latency_latency.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "# Latency Overhead Command" < $LOGS_DIR/latency_latency.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/latency_latency.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "latency report for latency profile"
+(( TEST_RESULT += $? ))
+
+
+# Ensure parallelism histogram with parallelism filter does not fail/crash.
+$CMD_PERF report --hierarchy --sort latency,parallelism,comm,symbol --parallelism=1,2 --stdio -i $CURRENT_TEST_DIR/perf.data.1 > $LOGS_DIR/parallelism_hierarchy.log 2> $LOGS_DIR/parallelism_hierarchy.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "# Latency Parallelism / Command / Symbol" < $LOGS_DIR/parallelism_hierarchy.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/parallelism_hierarchy.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "parallelism histogram"
+(( TEST_RESULT += $? ))
+
+
# TODO: $CMD_PERF report -n --showcpuutilization -TUxDg 2> 01.log
# print overall results
diff --git a/tools/perf/tests/shell/buildid.sh b/tools/perf/tests/shell/buildid.sh
index 3383ca3399d4..d2eb213da01d 100755
--- a/tools/perf/tests/shell/buildid.sh
+++ b/tools/perf/tests/shell/buildid.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# build id cache operations
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/common/init.sh b/tools/perf/tests/shell/common/init.sh
index 075f17623c8e..26c7525651e0 100644
--- a/tools/perf/tests/shell/common/init.sh
+++ b/tools/perf/tests/shell/common/init.sh
@@ -46,10 +46,13 @@ print_results()
print_overall_results()
{
RETVAL="$1"; shift
+ TASK_COMMENT="$*"
+ test -n "$TASK_COMMENT" && TASK_COMMENT=":: $TASK_COMMENT"
+
if [ $RETVAL -eq 0 ]; then
_echo "$MALLPASS## [ PASS ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME SUMMARY"
else
- _echo "$MALLFAIL## [ FAIL ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME SUMMARY :: $RETVAL failures found"
+ _echo "$MALLFAIL## [ FAIL ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME SUMMARY :: $RETVAL failures found $TASK_COMMENT"
fi
return $RETVAL
}
@@ -85,7 +88,7 @@ consider_skipping()
# the runmode of a testcase needs to be at least the current suite's runmode
if [ $PERFTOOL_TESTSUITE_RUNMODE -lt $TESTCASE_RUNMODE ]; then
print_overall_skipped
- exit 0
+ exit 2
fi
}
diff --git a/tools/perf/tests/shell/coresight/Makefile b/tools/perf/tests/shell/coresight/Makefile
index b070e779703e..fa08fd9a5991 100644
--- a/tools/perf/tests/shell/coresight/Makefile
+++ b/tools/perf/tests/shell/coresight/Makefile
@@ -24,6 +24,6 @@ CLEANDIRS = $(SUBDIRS:%=clean-%)
clean: $(CLEANDIRS)
$(CLEANDIRS):
- $(call QUIET_CLEAN, test-$(@:clean-%=%)) $(Q)$(MAKE) -C $(@:clean-%=%) clean >/dev/null
+ $(call QUIET_CLEAN, test-$(@:clean-%=%)) $(MAKE) -C $(@:clean-%=%) clean >/dev/null
.PHONY: all clean $(SUBDIRS) $(CLEANDIRS) $(INSTALLDIRS)
diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop.sh b/tools/perf/tests/shell/coresight/asm_pure_loop.sh
index c63bc8c73e26..0301904b9637 100755
--- a/tools/perf/tests/shell/coresight/asm_pure_loop.sh
+++ b/tools/perf/tests/shell/coresight/asm_pure_loop.sh
@@ -1,4 +1,4 @@
-#!/bin/sh -e
+#!/bin/bash -e
# CoreSight / ASM Pure Loop (exclusive)
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S
index 75cf084a927d..577760046772 100644
--- a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S
+++ b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S
@@ -26,3 +26,5 @@ skip:
mov x0, #0
mov x8, #93 // __NR_exit syscall
svc #0
+
+.section .note.GNU-stack, "", @progbits
diff --git a/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh b/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh
index 8e29630957c8..1f765d69acc3 100755
--- a/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh
+++ b/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh
@@ -1,4 +1,4 @@
-#!/bin/sh -e
+#!/bin/bash -e
# CoreSight / Memcpy 16k 10 Threads (exclusive)
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh b/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh
index 0c4c82a1c8e1..7f43a93a2ac2 100755
--- a/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh
+++ b/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh
@@ -1,4 +1,4 @@
-#!/bin/sh -e
+#!/bin/bash -e
# CoreSight / Thread Loop 10 Threads - Check TID (exclusive)
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh b/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh
index d3aea9fc6ced..a94d2079ed06 100755
--- a/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh
+++ b/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh
@@ -1,4 +1,4 @@
-#!/bin/sh -e
+#!/bin/bash -e
# CoreSight / Thread Loop 2 Threads - Check TID (exclusive)
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh b/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh
index 7429d3a2ae43..cb3e97a0a89f 100755
--- a/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh
+++ b/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh
@@ -1,4 +1,4 @@
-#!/bin/sh -e
+#!/bin/bash -e
# CoreSight / Unroll Loop Thread 10 (exclusive)
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/diff.sh b/tools/perf/tests/shell/diff.sh
index 14b87af88703..fe05fdebcab5 100755
--- a/tools/perf/tests/shell/diff.sh
+++ b/tools/perf/tests/shell/diff.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf diff tests
# SPDX-License-Identifier: GPL-2.0
@@ -39,13 +39,13 @@ make_data() {
file="$1"
if ! perf record -o "${file}" ${testprog} 2> /dev/null
then
- echo "Workload record [Failed record]"
+ echo "Workload record [Failed record]" >&2
echo 1
return
fi
if ! perf report -i "${file}" -q | grep -q "${testsym}"
then
- echo "Workload record [Failed missing output]"
+ echo "Workload record [Failed missing output]" >&2
echo 1
return
fi
@@ -55,12 +55,12 @@ make_data() {
test_two_files() {
echo "Basic two file diff test"
err=$(make_data "${perfdata1}")
- if [ $err != 0 ]
+ if [ "$err" != 0 ]
then
return
fi
err=$(make_data "${perfdata2}")
- if [ $err != 0 ]
+ if [ "$err" != 0 ]
then
return
fi
@@ -77,12 +77,12 @@ test_two_files() {
test_three_files() {
echo "Basic three file diff test"
err=$(make_data "${perfdata1}")
- if [ $err != 0 ]
+ if [ "$err" != 0 ]
then
return
fi
err=$(make_data "${perfdata2}")
- if [ $err != 0 ]
+ if [ "$err" != 0 ]
then
return
fi
diff --git a/tools/perf/tests/shell/drm_pmu.sh b/tools/perf/tests/shell/drm_pmu.sh
new file mode 100755
index 000000000000..e629fe0e8463
--- /dev/null
+++ b/tools/perf/tests/shell/drm_pmu.sh
@@ -0,0 +1,78 @@
+#!/bin/bash
+# DRM PMU
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+output=$(mktemp /tmp/perf.drm_pmu.XXXXXX.txt)
+
+cleanup() {
+ rm -f "${output}"
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+# Array to store file descriptors and device names
+declare -A device_fds
+
+# Open all devices and store file descriptors. Opening the device will create a
+# /proc/$$/fdinfo file containing the DRM statistics.
+fd_count=3 # Start with file descriptor 3
+for device in /dev/dri/*
+do
+ if [[ ! -c "$device" ]]
+ then
+ continue
+ fi
+ major=$(stat -c "%Hr" "$device")
+ if [[ "$major" != 226 ]]
+ then
+ continue
+ fi
+ echo "Opening $device"
+ eval "exec $fd_count<\"$device\""
+ echo "fdinfo for: $device (FD: $fd_count)"
+ cat "/proc/$$/fdinfo/$fd_count"
+ echo
+ device_fds["$device"]="$fd_count"
+ fd_count=$((fd_count + 1))
+done
+
+if [[ ${#device_fds[@]} -eq 0 ]]
+then
+ echo "No DRM devices found [Skip]"
+ cleanup
+ exit 2
+fi
+
+# For each DRM event
+err=0
+for p in $(perf list --raw-dump drm-)
+do
+ echo -n "Testing perf stat of $p. "
+ perf stat -e "$p" --pid=$$ true > "$output" 2>&1
+ if ! grep -q "$p" "$output"
+ then
+ echo "Missing DRM event in: [Failed]"
+ cat "$output"
+ err=1
+ else
+ echo "[OK]"
+ fi
+done
+
+# Close all file descriptors
+for fd in "${device_fds[@]}"; do
+ eval "exec $fd<&-"
+done
+
+# Finished
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/ftrace.sh b/tools/perf/tests/shell/ftrace.sh
index 2df05052c324..7f8aafcbb761 100755
--- a/tools/perf/tests/shell/ftrace.sh
+++ b/tools/perf/tests/shell/ftrace.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf ftrace tests
# SPDX-License-Identifier: GPL-2.0
@@ -67,11 +67,8 @@ test_ftrace_latency() {
test_ftrace_profile() {
echo "perf ftrace profile test"
- perf ftrace profile -m 16M sleep 0.1 > "${output}"
+ perf ftrace profile --graph-opts depth=5 sleep 0.1 > "${output}"
grep ^# "${output}"
- grep sleep "${output}"
- grep schedule "${output}"
- grep execve "${output}"
time_re="[[:space:]]+1[[:digit:]]{5}\.[[:digit:]]{3}"
# 100283.000 100283.000 100283.000 1 __x64_sys_clock_nanosleep
# Check for one *clock_nanosleep line with a Count of just 1 that takes a bit more than 0.1 seconds
diff --git a/tools/perf/tests/shell/header.sh b/tools/perf/tests/shell/header.sh
new file mode 100755
index 000000000000..e1628ac0a614
--- /dev/null
+++ b/tools/perf/tests/shell/header.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+# perf header tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test_header.perf.data.XXXXX)
+script_output=$(mktemp /tmp/__perf_test_header.perf.data.XXXXX.script)
+
+cleanup() {
+ rm -f "${perfdata}"
+ rm -f "${perfdata}".old
+ rm -f "${script_output}"
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+check_header_output() {
+ declare -a fields=(
+ "captured"
+ "hostname"
+ "os release"
+ "arch"
+ "cpuid"
+ "nrcpus"
+ "event"
+ "cmdline"
+ "perf version"
+ "sibling (cores|dies|threads)"
+ "sibling threads"
+ "total memory"
+ )
+ for i in "${fields[@]}"
+ do
+ if ! grep -q -E "$i" "${script_output}"
+ then
+ echo "Failed to find expected $i in output"
+ err=1
+ fi
+ done
+}
+
+test_file() {
+ echo "Test perf header file"
+
+ perf record -o "${perfdata}" -- perf test -w noploop
+ perf report --header-only -I -i "${perfdata}" > "${script_output}"
+ check_header_output
+
+ echo "Test perf header file [Done]"
+}
+
+test_pipe() {
+ echo "Test perf header pipe"
+
+ perf record -o - -- perf test -w noploop | perf report --header-only -I -i - > "${script_output}"
+ check_header_output
+
+ echo "Test perf header pipe [Done]"
+}
+
+test_file
+test_pipe
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/lib/attr.py b/tools/perf/tests/shell/lib/attr.py
index 3db9a7d78715..bfccc727d9b2 100644
--- a/tools/perf/tests/shell/lib/attr.py
+++ b/tools/perf/tests/shell/lib/attr.py
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-from __future__ import print_function
-
+import configparser
import os
import sys
import glob
@@ -13,11 +12,6 @@ import re
import shutil
import subprocess
-try:
- import configparser
-except ImportError:
- import ConfigParser as configparser
-
def data_equal(a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
diff --git a/tools/perf/tests/shell/lib/perf_has_symbol.sh b/tools/perf/tests/shell/lib/perf_has_symbol.sh
index 561c93b75d77..0b35cce0b13d 100644
--- a/tools/perf/tests/shell/lib/perf_has_symbol.sh
+++ b/tools/perf/tests/shell/lib/perf_has_symbol.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
perf_has_symbol()
diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py
index 8ddb85586131..c6750ef06c0f 100644
--- a/tools/perf/tests/shell/lib/perf_json_output_lint.py
+++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py
@@ -19,6 +19,7 @@ ap.add_argument('--per-cluster', action='store_true')
ap.add_argument('--per-die', action='store_true')
ap.add_argument('--per-node', action='store_true')
ap.add_argument('--per-socket', action='store_true')
+ap.add_argument('--metric-only', action='store_true')
ap.add_argument('--file', type=argparse.FileType('r'), default=sys.stdin)
args = ap.parse_args()
@@ -44,7 +45,7 @@ def is_counter_value(num):
def check_json_output(expected_items):
checks = {
- 'aggregate-number': lambda x: isfloat(x),
+ 'counters': lambda x: isfloat(x),
'core': lambda x: True,
'counter-value': lambda x: is_counter_value(x),
'cgroup': lambda x: True,
@@ -64,21 +65,25 @@ def check_json_output(expected_items):
'socket': lambda x: True,
'thread': lambda x: True,
'unit': lambda x: True,
+ 'insn per cycle': lambda x: isfloat(x),
+ 'GHz': lambda x: True, # FIXME: it seems unintended for --metric-only
}
input = '[\n' + ','.join(Lines) + '\n]'
for item in json.loads(input):
if expected_items != -1:
count = len(item)
- if count != expected_items and count >= 1 and count <= 7 and 'metric-value' in item:
+ if count not in expected_items and count >= 1 and count <= 7 and 'metric-value' in item:
# Events that generate >1 metric may have isolated metric
# values and possibly other prefixes like interval, core,
- # aggregate-number, or event-runtime/pcnt-running from multiplexing.
+ # counters, or event-runtime/pcnt-running from multiplexing.
pass
- elif count != expected_items and count >= 1 and count <= 5 and 'metricgroup' in item:
+ elif count not in expected_items and count >= 1 and count <= 5 and 'metricgroup' in item:
pass
- elif count == expected_items + 1 and 'metric-threshold' in item:
+ elif count - 1 in expected_items and 'metric-threshold' in item:
pass
- elif count != expected_items:
+ elif count in expected_items and 'insn per cycle' in item:
+ pass
+ elif count not in expected_items:
raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}'
f' in \'{item}\'')
for key, value in item.items():
@@ -90,11 +95,13 @@ def check_json_output(expected_items):
try:
if args.no_args or args.system_wide or args.event:
- expected_items = 7
+ expected_items = [5, 7]
elif args.interval or args.per_thread or args.system_wide_no_aggr:
- expected_items = 8
+ expected_items = [6, 8]
elif args.per_core or args.per_socket or args.per_node or args.per_die or args.per_cluster or args.per_cache:
- expected_items = 9
+ expected_items = [7, 9]
+ elif args.metric_only:
+ expected_items = [1, 2]
else:
# If no option is specified, don't check the number of items.
expected_items = -1
diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/perf/tests/shell/lib/perf_metric_validation.py
index 0b94216c9c46..dea8ef1977bf 100644
--- a/tools/perf/tests/shell/lib/perf_metric_validation.py
+++ b/tools/perf/tests/shell/lib/perf_metric_validation.py
@@ -35,7 +35,8 @@ class TestError:
class Validator:
- def __init__(self, rulefname, reportfname='', t=5, debug=False, datafname='', fullrulefname='', workload='true', metrics=''):
+ def __init__(self, rulefname, reportfname='', t=5, debug=False, datafname='', fullrulefname='',
+ workload='true', metrics='', cputype='cpu'):
self.rulefname = rulefname
self.reportfname = reportfname
self.rules = None
@@ -43,6 +44,7 @@ class Validator:
self.metrics = self.__set_metrics(metrics)
self.skiplist = set()
self.tolerance = t
+ self.cputype = cputype
self.workloads = [x for x in workload.split(",") if x]
self.wlidx = 0 # idx of current workloads
@@ -377,7 +379,7 @@ class Validator:
def _run_perf(self, metric, workload: str):
tool = 'perf'
- command = [tool, 'stat', '-j', '-M', f"{metric}", "-a"]
+ command = [tool, 'stat', '--cputype', self.cputype, '-j', '-M', f"{metric}", "-a"]
wl = workload.split()
command.extend(wl)
print(" ".join(command))
@@ -443,6 +445,8 @@ class Validator:
if 'MetricName' not in m:
print("Warning: no metric name")
continue
+ if 'Unit' in m and m['Unit'] != self.cputype:
+ continue
name = m['MetricName'].lower()
self.metrics.add(name)
if 'ScaleUnit' in m and (m['ScaleUnit'] == '1%' or m['ScaleUnit'] == '100%'):
@@ -578,6 +582,8 @@ def main() -> None:
parser.add_argument(
"-wl", help="Workload to run while data collection", default="true")
parser.add_argument("-m", help="Metric list to validate", default="")
+ parser.add_argument("-cputype", help="Only test metrics for the given CPU/PMU type",
+ default="cpu")
args = parser.parse_args()
outpath = Path(args.output_dir)
reportf = Path.joinpath(outpath, 'perf_report.json')
@@ -586,7 +592,7 @@ def main() -> None:
validator = Validator(args.rule, reportf, debug=args.debug,
datafname=datafile, fullrulefname=fullrule, workload=args.wl,
- metrics=args.m)
+ metrics=args.m, cputype=args.cputype)
ret = validator.test()
return ret
diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
index 5c33ec7a5a63..88cd0e26d5f6 100644
--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
perf probe -l 2>&1 | grep -q probe:vfs_getname
@@ -13,14 +13,28 @@ cleanup_probe_vfs_getname() {
add_probe_vfs_getname() {
add_probe_verbose=$1
if [ $had_vfs_getname -eq 1 ] ; then
- result_filename_re="[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*"
- line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_filename_re" | sed -r "s/$result_filename_re/\1/")
+ result_initname_re="[[:space:]]+([[:digit:]]+)[[:space:]]+initname.*"
+ line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_initname_re" | sed -r "s/$result_initname_re/\1/")
+
+ # Search the old regular expressions so that this will
+ # pass on older kernels as well.
+ if [ -z "$line" ] ; then
+ result_filename_re="[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*"
+ line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_filename_re" | sed -r "s/$result_filename_re/\1/")
+ fi
+
if [ -z "$line" ] ; then
result_aname_re="[[:space:]]+([[:digit:]]+)[[:space:]]+result->aname = NULL;"
line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_aname_re" | sed -r "s/$result_aname_re/\1/")
fi
+
+ if [ -z "$line" ] ; then
+ echo "Could not find probeable line"
+ return 2
+ fi
+
perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
- perf probe $add_probe_verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring"
+ perf probe $add_probe_verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring" || return 1
fi
}
diff --git a/tools/perf/tests/shell/lib/setup_python.sh b/tools/perf/tests/shell/lib/setup_python.sh
index c2fce1793538..a58e5536f2ed 100644
--- a/tools/perf/tests/shell/lib/setup_python.sh
+++ b/tools/perf/tests/shell/lib/setup_python.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
if [ "x$PYTHON" = "x" ]
diff --git a/tools/perf/tests/shell/lib/stat_output.sh b/tools/perf/tests/shell/lib/stat_output.sh
index 9a176ceae4a3..c2ec7881ec1d 100644
--- a/tools/perf/tests/shell/lib/stat_output.sh
+++ b/tools/perf/tests/shell/lib/stat_output.sh
@@ -148,6 +148,19 @@ check_per_socket()
echo "[Success]"
}
+check_metric_only()
+{
+ echo -n "Checking $1 output: metric only "
+ if [ "$(uname -m)" = "s390x" ] && ! grep '^facilities' /proc/cpuinfo | grep -qw 67
+ then
+ echo "[Skip] CPU-measurement counter facility not installed"
+ return
+ fi
+ perf stat --metric-only $2 -e instructions,cycles true
+ commachecker --metric-only
+ echo "[Success]"
+}
+
# The perf stat options for per-socket, per-core, per-die
# and -A ( no_aggr mode ) uses the info fetched from this
# directory: "/sys/devices/system/cpu/cpu*/topology". For
diff --git a/tools/perf/tests/shell/lib/waiting.sh b/tools/perf/tests/shell/lib/waiting.sh
index bdd5a7c71591..3a152892e077 100644
--- a/tools/perf/tests/shell/lib/waiting.sh
+++ b/tools/perf/tests/shell/lib/waiting.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
tenths=date\ +%s%1N
diff --git a/tools/perf/tests/shell/list.sh b/tools/perf/tests/shell/list.sh
index 76a9846cff22..0c04b3159cef 100755
--- a/tools/perf/tests/shell/list.sh
+++ b/tools/perf/tests/shell/list.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf list tests
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/lock_contention.sh b/tools/perf/tests/shell/lock_contention.sh
index 30d195d4c62f..d33d9e4392b0 100755
--- a/tools/perf/tests/shell/lock_contention.sh
+++ b/tools/perf/tests/shell/lock_contention.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# kernel lock contention analysis test
# SPDX-License-Identifier: GPL-2.0
@@ -44,7 +44,7 @@ check() {
test_record()
{
echo "Testing perf lock record and perf lock contention"
- perf lock record -o ${perfdata} -- perf bench sched messaging > /dev/null 2>&1
+ perf lock record -o ${perfdata} -- perf bench sched messaging -p > /dev/null 2>&1
# the output goes to the stderr and we expect only 1 output (-E 1)
perf lock contention -i ${perfdata} -E 1 -q 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
@@ -64,7 +64,7 @@ test_bpf()
fi
# the perf lock contention output goes to the stderr
- perf lock con -a -b -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b -E 1 -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] BPF result count is not 1:" "$(cat "${result}" | wc -l)"
err=1
@@ -75,7 +75,7 @@ test_bpf()
test_record_concurrent()
{
echo "Testing perf lock record and perf lock contention at the same time"
- perf lock record -o- -- perf bench sched messaging 2> /dev/null | \
+ perf lock record -o- -- perf bench sched messaging -p 2> /dev/null | \
perf lock contention -i- -E 1 -q 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] Recorded result count is not 1:" "$(cat "${result}" | wc -l)"
@@ -99,7 +99,7 @@ test_aggr_task()
fi
# the perf lock contention output goes to the stderr
- perf lock con -a -b -t -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b -t -E 1 -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] BPF result count is not 1:" "$(cat "${result}" | wc -l)"
err=1
@@ -122,7 +122,7 @@ test_aggr_addr()
fi
# the perf lock contention output goes to the stderr
- perf lock con -a -b -l -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b -l -E 1 -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] BPF result count is not 1:" "$(cat "${result}" | wc -l)"
err=1
@@ -140,7 +140,7 @@ test_aggr_cgroup()
fi
# the perf lock contention output goes to the stderr
- perf lock con -a -b -g -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b -g -E 1 -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] BPF result count is not 1:" "$(cat "${result}" | wc -l)"
err=1
@@ -162,7 +162,7 @@ test_type_filter()
return
fi
- perf lock con -a -b -Y spinlock -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b -Y spinlock -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(grep -c -v spinlock "${result}")" != "0" ]; then
echo "[Fail] BPF result should not have non-spinlocks:" "$(cat "${result}")"
err=1
@@ -194,7 +194,7 @@ test_lock_filter()
return
fi
- perf lock con -a -b -L tasklist_lock -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b -L tasklist_lock -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(grep -c -v "${test_lock_filter_type}" "${result}")" != "0" ]; then
echo "[Fail] BPF result should not have non-${test_lock_filter_type} locks:" "$(cat "${result}")"
err=1
@@ -222,7 +222,7 @@ test_stack_filter()
return
fi
- perf lock con -a -b -S unix_stream -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b -S unix_stream -E 1 -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] BPF result should have a lock from unix_stream:" "$(cat "${result}")"
err=1
@@ -250,7 +250,7 @@ test_aggr_task_stack_filter()
return
fi
- perf lock con -a -b -t -S unix_stream -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b -t -S unix_stream -E 1 -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] BPF result should have a task from unix_stream:" "$(cat "${result}")"
err=1
@@ -266,7 +266,7 @@ test_cgroup_filter()
return
fi
- perf lock con -a -b -g -E 1 -F wait_total -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b -g -E 1 -F wait_total -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] BPF result should have a cgroup result:" "$(cat "${result}")"
err=1
@@ -274,7 +274,7 @@ test_cgroup_filter()
fi
cgroup=$(cat "${result}" | awk '{ print $3 }')
- perf lock con -a -b -g -E 1 -G "${cgroup}" -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b -g -E 1 -G "${cgroup}" -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] BPF result should have a result with cgroup filter:" "$(cat "${cgroup}")"
err=1
@@ -309,7 +309,7 @@ test_csv_output()
fi
# the perf lock contention output goes to the stderr
- perf lock con -a -b -E 1 -x , --output ${result} -- perf bench sched messaging > /dev/null 2>&1
+ perf lock con -a -b -E 1 -x , --output ${result} -- perf bench sched messaging -p > /dev/null 2>&1
output=$(grep -v "^#" ${result} | tr -d -c , | wc -c)
if [ "${header}" != "${output}" ]; then
echo "[Fail] BPF result does not match the number of commas: ${header} != ${output}"
diff --git a/tools/perf/tests/shell/perf-report-hierarchy.sh b/tools/perf/tests/shell/perf-report-hierarchy.sh
new file mode 100755
index 000000000000..e3c6f9a24f33
--- /dev/null
+++ b/tools/perf/tests/shell/perf-report-hierarchy.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# perf report --hierarchy
+# SPDX-License-Identifier: GPL-2.0
+# Arnaldo Carvalho de Melo <acme@redhat.com>
+
+set -e
+
+temp_dir=$(mktemp -d /tmp/perf-test-report.XXXXXXXXXX)
+
+cleanup()
+{
+ trap - EXIT TERM INT
+ sane=$(echo "${temp_dir}" | cut -b 1-21)
+ if [ "${sane}" = "/tmp/perf-test-report" ] ; then
+ echo "--- Cleaning up ---"
+ rm -rf "${temp_dir:?}/"*
+ rmdir "${temp_dir}"
+ fi
+}
+
+trap_cleanup()
+{
+ cleanup
+ exit 1
+}
+
+trap trap_cleanup EXIT TERM INT
+
+test_report_hierarchy()
+{
+ echo "perf report --hierarchy"
+
+ perf_data="${temp_dir}/perf-report-hierarchy-perf.data"
+ perf record -o "${perf_data}" uname
+ perf report --hierarchy -i "${perf_data}" > /dev/null
+ echo "perf report --hierarchy test [Success]"
+}
+
+test_report_hierarchy
+
+cleanup
+
+exit 0
diff --git a/tools/perf/tests/shell/perftool-testsuite_probe.sh b/tools/perf/tests/shell/perftool-testsuite_probe.sh
index a0fec33a0358..3863df16c19b 100755
--- a/tools/perf/tests/shell/perftool-testsuite_probe.sh
+++ b/tools/perf/tests/shell/perftool-testsuite_probe.sh
@@ -1,7 +1,8 @@
#!/bin/bash
-# perftool-testsuite_probe
+# perftool-testsuite_probe (exclusive)
# SPDX-License-Identifier: GPL-2.0
+[ "$(id -u)" = 0 ] || exit 2
test -d "$(dirname "$0")/base_probe" || exit 2
cd "$(dirname "$0")/base_probe" || exit 2
status=0
diff --git a/tools/perf/tests/shell/probe_vfs_getname.sh b/tools/perf/tests/shell/probe_vfs_getname.sh
index 0c5aacc446b3..5fe5682c28ce 100755
--- a/tools/perf/tests/shell/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/probe_vfs_getname.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Add vfs_getname probe to get syscall args filenames (exclusive)
# SPDX-License-Identifier: GPL-2.0
@@ -8,11 +8,18 @@
. "$(dirname $0)"/lib/probe.sh
skip_if_no_perf_probe || exit 2
+[ "$(id -u)" = 0 ] || exit 2
# shellcheck source=lib/probe_vfs_getname.sh
. "$(dirname $0)"/lib/probe_vfs_getname.sh
-add_probe_vfs_getname || skip_if_no_debuginfo
+add_probe_vfs_getname
err=$?
+
+if [ $err -eq 1 ] ; then
+ skip_if_no_debuginfo
+ err=$?
+fi
+
cleanup_probe_vfs_getname
exit $err
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index 47a26f25db9f..ab99bef556bf 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -1,5 +1,5 @@
-#!/bin/sh
-# probe libc's inet_pton & backtrace it with ping
+#!/bin/bash
+# probe libc's inet_pton & backtrace it with ping (exclusive)
# Installs a probe on libc's inet_pton function, that will use uprobes,
# then use 'perf trace' on a ping to localhost asking for just one packet
@@ -18,12 +18,13 @@
libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
nm -Dg $libc 2>/dev/null | grep -F -q inet_pton || exit 254
-event_pattern='probe_libc:inet_pton(\_[[:digit:]]+)?'
+event_pattern='probe_libc:inet_pton(_[[:digit:]]+)?'
add_libc_inet_pton_event() {
event_name=$(perf probe -f -x $libc -a inet_pton 2>&1 | tail -n +2 | head -n -5 | \
- grep -P -o "$event_pattern(?=[[:space:]]\(on inet_pton in $libc\))")
+ awk -v ep="$event_pattern" -v l="$libc" '$0 ~ ep && $0 ~ \
+ ("\\(on inet_pton in " l "\\)") {print $1}')
if [ $? -ne 0 ] || [ -z "$event_name" ] ; then
printf "FAIL: could not add event\n"
@@ -43,17 +44,8 @@ trace_libc_inet_pton_backtrace() {
echo "((__GI_)?getaddrinfo|text_to_binary_address)\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
echo "(gaih_inet|main)\+0x[[:xdigit:]]+[[:space:]]\(inlined|.*/bin/ping.*\)$" >> $expected
;;
- ppc64|ppc64le)
- eventattr='max-stack=4'
- # Add gaih_inet to expected backtrace only if it is part of libc.
- if nm $libc | grep -F -q gaih_inet.; then
- echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
- fi
- echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
- echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
- ;;
*)
- eventattr='max-stack=3'
+ eventattr='max-stack=4'
echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
;;
esac
@@ -76,14 +68,25 @@ trace_libc_inet_pton_backtrace() {
fi
perf script -i $perf_data | tac | grep -m1 ^ping -B9 | tac > $perf_script
- exec 3<$perf_script
exec 4<$expected
- while read line <&3 && read -r pattern <&4; do
+ while read -r pattern <&4; do
+ echo "Pattern: $pattern"
[ -z "$pattern" ] && break
- echo $line
- echo "$line" | grep -E -q "$pattern"
- if [ $? -ne 0 ] ; then
- printf "FAIL: expected backtrace entry \"%s\" got \"%s\"\n" "$pattern" "$line"
+
+ found=0
+
+ # Search lines in the perf script result
+ exec 3<$perf_script
+ while read line <&3; do
+ [ -z "$line" ] && break
+ echo " Matching: $line"
+ ! echo "$line" | grep -E -q "$pattern"
+ found=$?
+ [ $found -eq 1 ] && break
+ done
+
+ if [ $found -ne 1 ] ; then
+ printf "FAIL: Didn't find the expected backtrace entry \"%s\"\n" "$pattern"
return 1
fi
done
@@ -103,6 +106,7 @@ delete_libc_inet_pton_event() {
# Check for IPv6 interface existence
ip a sh lo | grep -F -q inet6 || exit 2
+[ "$(id -u)" = 0 ] || exit 2
skip_if_no_perf_probe && \
add_libc_inet_pton_event && \
diff --git a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
index 5940fdc1df37..002f7037f182 100755
--- a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Use vfs_getname probe to get syscall args filenames (exclusive)
# Uses the 'perf test shell' library to add probe:vfs_getname to the system
@@ -13,6 +13,7 @@
. "$(dirname "$0")/lib/probe.sh"
skip_if_no_perf_probe || exit 2
+[ "$(id -u)" = 0 ] || exit 2
# shellcheck source=lib/probe_vfs_getname.sh
. "$(dirname "$0")/lib/probe_vfs_getname.sh"
@@ -34,8 +35,14 @@ perf_script_filenames() {
grep -E " +touch +[0-9]+ +\[[0-9]+\] +[0-9]+\.[0-9]+: +probe:vfs_getname[_0-9]*: +\([[:xdigit:]]+\) +pathname=\"${file}\""
}
-add_probe_vfs_getname || skip_if_no_debuginfo
+add_probe_vfs_getname
err=$?
+
+if [ $err -eq 1 ] ; then
+ skip_if_no_debuginfo
+ err=$?
+fi
+
if [ $err -ne 0 ] ; then
exit $err
fi
diff --git a/tools/perf/tests/shell/record+zstd_comp_decomp.sh b/tools/perf/tests/shell/record+zstd_comp_decomp.sh
index 8929046e9057..f6b82223834e 100755
--- a/tools/perf/tests/shell/record+zstd_comp_decomp.sh
+++ b/tools/perf/tests/shell/record+zstd_comp_decomp.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Zstd perf.data compression/decompression
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/record.sh b/tools/perf/tests/shell/record.sh
index 0fc7a909ae9b..b1ad24fb3b33 100755
--- a/tools/perf/tests/shell/record.sh
+++ b/tools/perf/tests/shell/record.sh
@@ -12,8 +12,10 @@ shelldir=$(dirname "$0")
. "${shelldir}"/lib/perf_has_symbol.sh
testsym="test_loop"
+testsym2="brstack"
skip_test_missing_symbol ${testsym}
+skip_test_missing_symbol ${testsym2}
err=0
perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
@@ -34,13 +36,15 @@ default_fd_limit=$(ulimit -Sn)
min_fd_limit=$(($(getconf _NPROCESSORS_ONLN) * 16))
cleanup() {
- rm -rf "${perfdata}"
- rm -rf "${perfdata}".old
+ rm -f "${perfdata}"
+ rm -f "${perfdata}".old
+ rm -f "${script_output}"
trap - EXIT TERM INT
}
trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
cleanup
exit 1
}
@@ -229,31 +233,77 @@ test_cgroup() {
echo "Cgroup sampling test [Success]"
}
+test_uid() {
+ echo "Uid sampling test"
+ if ! perf record -aB --synth=no --uid "$(id -u)" -o "${perfdata}" ${testprog} \
+ > "${script_output}" 2>&1
+ then
+ if grep -q "libbpf.*EPERM" "${script_output}"
+ then
+ echo "Uid sampling [Skipped permissions]"
+ return
+ else
+ echo "Uid sampling [Failed to record]"
+ err=1
+ # cat "${script_output}"
+ return
+ fi
+ fi
+ if ! perf report -i "${perfdata}" -q | grep -q "${testsym}"
+ then
+ echo "Uid sampling [Failed missing output]"
+ err=1
+ return
+ fi
+ echo "Uid sampling test [Success]"
+}
+
test_leader_sampling() {
echo "Basic leader sampling test"
- if ! perf record -o "${perfdata}" -e "{instructions,instructions}:Su" -- \
+ if ! perf record -o "${perfdata}" -e "{cycles,cycles}:Su" -- \
perf test -w brstack 2> /dev/null
then
echo "Leader sampling [Failed record]"
err=1
return
fi
+ perf script -i "${perfdata}" | grep brstack > $script_output
+ # Check if the two instruction counts are equal in each record.
+ # However, the throttling code doesn't consider event grouping. During throttling, only the
+ # leader is stopped, causing the slave's counts significantly higher. To temporarily solve this,
+ # let's set the tolerance rate to 80%.
+ # TODO: Revert the code for tolerance once the throttling mechanism is fixed.
index=0
- perf script -i "${perfdata}" > $script_output
+ valid_counts=0
+ invalid_counts=0
+ tolerance_rate=0.8
while IFS= read -r line
do
- # Check if the two instruction counts are equal in each record
- instructions=$(echo $line | awk '{for(i=1;i<=NF;i++) if($i=="instructions:") print $(i-1)}')
- if [ $(($index%2)) -ne 0 ] && [ ${instructions}x != ${prev_instructions}x ]
+ cycles=$(echo $line | awk '{for(i=1;i<=NF;i++) if($i=="cycles:") print $(i-1)}')
+ if [ $(($index%2)) -ne 0 ] && [ ${cycles}x != ${prev_cycles}x ]
then
- echo "Leader sampling [Failed inconsistent instructions count]"
- err=1
- return
+ invalid_counts=$(($invalid_counts+1))
+ else
+ valid_counts=$(($valid_counts+1))
fi
index=$(($index+1))
- prev_instructions=$instructions
- done < $script_output
- echo "Basic leader sampling test [Success]"
+ prev_cycles=$cycles
+ done < "${script_output}"
+ total_counts=$(bc <<< "$invalid_counts+$valid_counts")
+ if (( $(bc <<< "$total_counts <= 0") ))
+ then
+ echo "Leader sampling [No sample generated]"
+ err=1
+ return
+ fi
+ isok=$(bc <<< "scale=2; if (($invalid_counts/$total_counts) < (1-$tolerance_rate)) { 0 } else { 1 };")
+ if [ $isok -eq 1 ]
+ then
+ echo "Leader sampling [Failed inconsistent cycles count]"
+ err=1
+ else
+ echo "Basic leader sampling test [Success]"
+ fi
}
test_topdown_leader_sampling() {
@@ -273,27 +323,69 @@ test_topdown_leader_sampling() {
}
test_precise_max() {
+ local -i skipped=0
+
echo "precise_max attribute test"
- if ! perf stat -e "cycles,instructions" true 2> /dev/null
+ # Just to make sure event cycles is supported for sampling
+ if perf record -o "${perfdata}" -e "cycles" true 2> /dev/null
+ then
+ if ! perf record -o "${perfdata}" -e "cycles:P" true 2> /dev/null
+ then
+ echo "precise_max attribute [Failed cycles:P event]"
+ err=1
+ return
+ fi
+ else
+ echo "precise_max attribute [Skipped no cycles:P event]"
+ ((skipped+=1))
+ fi
+ # On s390 event instructions is not supported for perf record
+ if perf record -o "${perfdata}" -e "instructions" true 2> /dev/null
+ then
+ # On AMD, cycles and instructions events are treated differently
+ if ! perf record -o "${perfdata}" -e "instructions:P" true 2> /dev/null
+ then
+ echo "precise_max attribute [Failed instructions:P event]"
+ err=1
+ return
+ fi
+ else
+ echo "precise_max attribute [Skipped no instructions:P event]"
+ ((skipped+=1))
+ fi
+ if [ $skipped -eq 2 ]
then
echo "precise_max attribute [Skipped no hardware events]"
- return
+ else
+ echo "precise_max attribute test [Success]"
fi
- # Just to make sure it doesn't fail
- if ! perf record -o "${perfdata}" -e "cycles:P" true 2> /dev/null
+}
+
+test_callgraph() {
+ echo "Callgraph test"
+
+ case $(uname -m)
+ in s390x)
+ cmd_flags="--call-graph dwarf -e cpu-clock";;
+ *)
+ cmd_flags="-g";;
+ esac
+
+ if ! perf record -o "${perfdata}" $cmd_flags perf test -w brstack
then
- echo "precise_max attribute [Failed cycles:P event]"
+ echo "Callgraph test [Failed missing output]"
err=1
return
fi
- # On AMD, cycles and instructions events are treated differently
- if ! perf record -o "${perfdata}" -e "instructions:P" true 2> /dev/null
+
+ if ! perf report -i "${perfdata}" 2>&1 | grep "${testsym2}"
then
- echo "precise_max attribute [Failed instructions:P event]"
+ echo "Callgraph test [Failed missing symbol]"
err=1
return
fi
- echo "precise_max attribute test [Success]"
+
+ echo "Callgraph test [Success]"
}
# raise the limit of file descriptors to minimum
@@ -307,9 +399,11 @@ test_system_wide
test_workload
test_branch_counter
test_cgroup
+test_uid
test_leader_sampling
test_topdown_leader_sampling
test_precise_max
+test_callgraph
# restore the default value
ulimit -Sn $default_fd_limit
diff --git a/tools/perf/tests/shell/record_bpf_filter.sh b/tools/perf/tests/shell/record_bpf_filter.sh
index 1b58ccc1fd88..383574cb3bd3 100755
--- a/tools/perf/tests/shell/record_bpf_filter.sh
+++ b/tools/perf/tests/shell/record_bpf_filter.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf record sample filtering (by BPF) tests
# SPDX-License-Identifier: GPL-2.0
@@ -89,7 +89,7 @@ test_bpf_filter_fail() {
test_bpf_filter_group() {
echo "Group bpf-filter test"
- if ! perf record -e task-clock --filter 'period > 1000 || ip > 0' \
+ if ! perf record -e task-clock --filter 'period > 1000, ip > 0' \
-o /dev/null true 2>/dev/null
then
echo "Group bpf-filter test [Failed should succeed]"
@@ -97,7 +97,7 @@ test_bpf_filter_group() {
return
fi
- if ! perf record -e task-clock --filter 'cpu > 0 || ip > 0' \
+ if ! perf record -e task-clock --filter 'period > 1000 , cpu > 0 || ip > 0' \
-o /dev/null true 2>&1 | grep -q PERF_SAMPLE_CPU
then
echo "Group bpf-filter test [Failed forbidden CPU]"
diff --git a/tools/perf/tests/shell/record_lbr.sh b/tools/perf/tests/shell/record_lbr.sh
index 8d750ee631f8..6fcb5e52b9b4 100755
--- a/tools/perf/tests/shell/record_lbr.sh
+++ b/tools/perf/tests/shell/record_lbr.sh
@@ -4,7 +4,8 @@
set -e
-if [ ! -f /sys/devices/cpu/caps/branches ] && [ ! -f /sys/devices/cpu_core/caps/branches ]
+if [ ! -f /sys/bus/event_source/devices/cpu/caps/branches ] &&
+ [ ! -f /sys/bus/event_source/devices/cpu_core/caps/branches ]
then
echo "Skip: only x86 CPUs support LBR"
exit 2
@@ -93,7 +94,7 @@ lbr_test() {
return
fi
- zero_nr=$(echo "$out" | grep -c 'branch stack: nr:0' || true)
+ zero_nr=$(echo "$out" | grep -A3 'branch stack: nr:0' | grep thread | grep -cv swapper || true)
r=$(($zero_nr * 100 / $bs_nr))
if [ $r -gt $threshold ]; then
echo "$test [Failed empty br stack ratio exceed $threshold%: $r%]"
diff --git a/tools/perf/tests/shell/record_offcpu.sh b/tools/perf/tests/shell/record_offcpu.sh
index 678947fe69ee..860a2d6f4b75 100755
--- a/tools/perf/tests/shell/record_offcpu.sh
+++ b/tools/perf/tests/shell/record_offcpu.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf record offcpu profiling tests (exclusive)
# SPDX-License-Identifier: GPL-2.0
@@ -7,6 +7,9 @@ set -e
err=0
perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+ts=$(printf "%u" $((~0 << 32))) # OFF_CPU_TIMESTAMP
+dummy_timestamp=${ts%???} # remove the last 3 digits to match perf script
+
cleanup() {
rm -f ${perfdata}
rm -f ${perfdata}.old
@@ -19,6 +22,9 @@ trap_cleanup() {
}
trap trap_cleanup EXIT TERM INT
+test_above_thresh="Threshold test (above threshold)"
+test_below_thresh="Threshold test (below threshold)"
+
test_offcpu_priv() {
echo "Checking off-cpu privilege"
@@ -88,6 +94,63 @@ test_offcpu_child() {
echo "Child task off-cpu test [Success]"
}
+# task blocks longer than the --off-cpu-thresh, perf should collect a direct sample
+test_offcpu_above_thresh() {
+ echo "${test_above_thresh}"
+
+ # collect direct off-cpu samples for tasks blocked for more than 999ms
+ if ! perf record -e dummy --off-cpu --off-cpu-thresh 999 -o ${perfdata} -- sleep 1 2> /dev/null
+ then
+ echo "${test_above_thresh} [Failed record]"
+ err=1
+ return
+ fi
+ # direct sample's timestamp should be lower than the dummy_timestamp of the at-the-end sample
+ # check if a direct sample exists
+ if ! perf script --time "0, ${dummy_timestamp}" -i ${perfdata} -F event | grep -q "offcpu-time"
+ then
+ echo "${test_above_thresh} [Failed missing direct samples]"
+ err=1
+ return
+ fi
+ # there should only be one direct sample, and its period should be higher than off-cpu-thresh
+ if ! perf script --time "0, ${dummy_timestamp}" -i ${perfdata} -F period | \
+ awk '{ if (int($1) > 999000000) exit 0; else exit 1; }'
+ then
+ echo "${test_above_thresh} [Failed off-cpu time too short]"
+ err=1
+ return
+ fi
+ echo "${test_above_thresh} [Success]"
+}
+
+# task blocks shorter than the --off-cpu-thresh, perf should collect an at-the-end sample
+test_offcpu_below_thresh() {
+ echo "${test_below_thresh}"
+
+ # collect direct off-cpu samples for tasks blocked for more than 1.2s
+ if ! perf record -e dummy --off-cpu --off-cpu-thresh 1200 -o ${perfdata} -- sleep 1 2> /dev/null
+ then
+ echo "${test_below_thresh} [Failed record]"
+ err=1
+ return
+ fi
+ # see if there's an at-the-end sample
+ if ! perf script --time "${dummy_timestamp}," -i ${perfdata} -F event | grep -q 'offcpu-time'
+ then
+ echo "${test_below_thresh} [Failed at-the-end samples cannot be found]"
+ err=1
+ return
+ fi
+ # plus there shouldn't be any direct samples
+ if perf script --time "0, ${dummy_timestamp}" -i ${perfdata} -F event | grep -q 'offcpu-time'
+ then
+ echo "${test_below_thresh} [Failed direct samples are found when they shouldn't be]"
+ err=1
+ return
+ fi
+ echo "${test_below_thresh} [Success]"
+}
test_offcpu_priv
@@ -99,5 +162,13 @@ if [ $err = 0 ]; then
test_offcpu_child
fi
+if [ $err = 0 ]; then
+ test_offcpu_above_thresh
+fi
+
+if [ $err = 0 ]; then
+ test_offcpu_below_thresh
+fi
+
cleanup
exit $err
diff --git a/tools/perf/tests/shell/record_sideband.sh b/tools/perf/tests/shell/record_sideband.sh
index ac70ac27d590..2182551873be 100755
--- a/tools/perf/tests/shell/record_sideband.sh
+++ b/tools/perf/tests/shell/record_sideband.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf record sideband tests
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/sched.sh b/tools/perf/tests/shell/sched.sh
new file mode 100755
index 000000000000..b9b81eaf856e
--- /dev/null
+++ b/tools/perf/tests/shell/sched.sh
@@ -0,0 +1,116 @@
+#!/bin/bash
+# perf sched tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+if [ "$(id -u)" != 0 ]; then
+ echo "[Skip] No root permission"
+ exit 2
+fi
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test_sched.perf.data.XXXXX)
+PID1=0
+PID2=0
+
+cleanup() {
+ rm -f "${perfdata}"
+ rm -f "${perfdata}".old
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+start_noploops() {
+ # Start two noploop workloads on CPU0 to trigger scheduling.
+ perf test -w noploop 10 &
+ PID1=$!
+ taskset -pc 0 $PID1
+ perf test -w noploop 10 &
+ PID2=$!
+ taskset -pc 0 $PID2
+
+ if ! grep -q 'Cpus_allowed_list:\s*0$' "/proc/$PID1/status"
+ then
+ echo "Sched [Error taskset did not work for the 1st noploop ($PID1)]"
+ grep Cpus_allowed /proc/$PID1/status
+ err=1
+ fi
+
+ if ! grep -q 'Cpus_allowed_list:\s*0$' "/proc/$PID2/status"
+ then
+ echo "Sched [Error taskset did not work for the 2nd noploop ($PID2)]"
+ grep Cpus_allowed /proc/$PID2/status
+ err=1
+ fi
+}
+
+cleanup_noploops() {
+ kill "$PID1" "$PID2"
+}
+
+test_sched_record() {
+ echo "Sched record"
+
+ start_noploops
+
+ perf sched record --no-inherit -o "${perfdata}" sleep 1
+
+ cleanup_noploops
+}
+
+test_sched_latency() {
+ echo "Sched latency"
+
+ if ! perf sched latency -i "${perfdata}" | grep -q perf-noploop
+ then
+ echo "Sched latency [Failed missing output]"
+ err=1
+ fi
+}
+
+test_sched_script() {
+ echo "Sched script"
+
+ if ! perf sched script -i "${perfdata}" | grep -q perf-noploop
+ then
+ echo "Sched script [Failed missing output]"
+ err=1
+ fi
+}
+
+test_sched_map() {
+ echo "Sched map"
+
+ if ! perf sched map -i "${perfdata}" | grep -q perf-noploop
+ then
+ echo "Sched map [Failed missing output]"
+ err=1
+ fi
+}
+
+test_sched_timehist() {
+ echo "Sched timehist"
+
+ if ! perf sched timehist -i "${perfdata}" | grep -q perf-noploop
+ then
+ echo "Sched timehist [Failed missing output]"
+ err=1
+ fi
+}
+
+test_sched_record
+test_sched_latency
+test_sched_script
+test_sched_map
+test_sched_timehist
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/script.sh b/tools/perf/tests/shell/script.sh
index d3e2958d2242..7007f1cdf761 100755
--- a/tools/perf/tests/shell/script.sh
+++ b/tools/perf/tests/shell/script.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf script tests
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh
index fc2d8cc6e5e0..7a6f6e177402 100755
--- a/tools/perf/tests/shell/stat+csv_output.sh
+++ b/tools/perf/tests/shell/stat+csv_output.sh
@@ -44,6 +44,7 @@ function commachecker()
;; "--per-die") exp=8
;; "--per-cluster") exp=8
;; "--per-cache") exp=8
+ ;; "--metric-only") exp=2
esac
while read line
@@ -75,6 +76,7 @@ check_interval "CSV" "$perf_cmd"
check_event "CSV" "$perf_cmd"
check_per_thread "CSV" "$perf_cmd"
check_per_node "CSV" "$perf_cmd"
+check_metric_only "CSV" "$perf_cmd"
if [ $skip_test -ne 1 ]
then
check_system_wide_no_aggr "CSV" "$perf_cmd"
diff --git a/tools/perf/tests/shell/stat+csv_summary.sh b/tools/perf/tests/shell/stat+csv_summary.sh
index 323123ff4d19..9a4353db3825 100755
--- a/tools/perf/tests/shell/stat+csv_summary.sh
+++ b/tools/perf/tests/shell/stat+csv_summary.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf stat csv summary test
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/stat+event_uniquifying.sh b/tools/perf/tests/shell/stat+event_uniquifying.sh
new file mode 100755
index 000000000000..bf54bd6c3e2e
--- /dev/null
+++ b/tools/perf/tests/shell/stat+event_uniquifying.sh
@@ -0,0 +1,77 @@
+#!/bin/bash
+# perf stat events uniquifying
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+stat_output=$(mktemp /tmp/__perf_test.stat_output.XXXXX)
+perf_tool=perf
+err=0
+
+test_event_uniquifying() {
+ # We use `clockticks` in `uncore_imc` to verify the uniquify behavior.
+ pmu="uncore_imc"
+ event="clockticks"
+
+ # If the `-A` option is added, the event should be uniquified.
+ #
+ # $perf list -v clockticks
+ #
+ # List of pre-defined events (to be used in -e or -M):
+ #
+ # uncore_imc_0/clockticks/ [Kernel PMU event]
+ # uncore_imc_1/clockticks/ [Kernel PMU event]
+ # uncore_imc_2/clockticks/ [Kernel PMU event]
+ # uncore_imc_3/clockticks/ [Kernel PMU event]
+ # uncore_imc_4/clockticks/ [Kernel PMU event]
+ # uncore_imc_5/clockticks/ [Kernel PMU event]
+ #
+ # ...
+ #
+ # $perf stat -e clockticks -A -- true
+ #
+ # Performance counter stats for 'system wide':
+ #
+ # CPU0 3,773,018 uncore_imc_0/clockticks/
+ # CPU0 3,609,025 uncore_imc_1/clockticks/
+ # CPU0 0 uncore_imc_2/clockticks/
+ # CPU0 3,230,009 uncore_imc_3/clockticks/
+ # CPU0 3,049,897 uncore_imc_4/clockticks/
+ # CPU0 0 uncore_imc_5/clockticks/
+ #
+ # 0.002029828 seconds time elapsed
+
+ echo "stat event uniquifying test"
+ uniquified_event_array=()
+
+ # Skip if the machine does not have `uncore_imc` device.
+ if ! ${perf_tool} list pmu | grep -q ${pmu}; then
+ echo "Target does not support PMU ${pmu} [Skipped]"
+ err=2
+ return
+ fi
+
+ # Check how many uniquified events.
+ while IFS= read -r line; do
+ uniquified_event=$(echo "$line" | awk '{print $1}')
+ uniquified_event_array+=("${uniquified_event}")
+ done < <(${perf_tool} list -v ${event} | grep ${pmu})
+
+ perf_command="${perf_tool} stat -e $event -A -o ${stat_output} -- true"
+ $perf_command
+
+ # Check the output contains all uniquified events.
+ for uniquified_event in "${uniquified_event_array[@]}"; do
+ if ! cat "${stat_output}" | grep -q "${uniquified_event}"; then
+ echo "Event is not uniquified [Failed]"
+ echo "${perf_command}"
+ cat "${stat_output}"
+ err=1
+ break
+ fi
+ done
+}
+
+test_event_uniquifying
+rm -f "${stat_output}"
+exit $err
diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh
index 6b630d33c328..98fb65274ac4 100755
--- a/tools/perf/tests/shell/stat+json_output.sh
+++ b/tools/perf/tests/shell/stat+json_output.sh
@@ -173,6 +173,19 @@ check_per_socket()
echo "[Success]"
}
+check_metric_only()
+{
+ echo -n "Checking json output: metric only "
+ if [ "$(uname -m)" = "s390x" ] && ! grep '^facilities' /proc/cpuinfo | grep -qw 67
+ then
+ echo "[Skip] CPU-measurement counter facility not installed"
+ return
+ fi
+ perf stat -j --metric-only -e instructions,cycles -o "${stat_output}" true
+ $PYTHON $pythonchecker --metric-only --file "${stat_output}"
+ echo "[Success]"
+}
+
# The perf stat options for per-socket, per-core, per-die
# and -A ( no_aggr mode ) uses the info fetched from this
# directory: "/sys/devices/system/cpu/cpu*/topology". For
@@ -207,6 +220,7 @@ check_interval
check_event
check_per_thread
check_per_node
+check_metric_only
if [ $skip_test -ne 1 ]
then
check_system_wide_no_aggr
diff --git a/tools/perf/tests/shell/stat+shadow_stat.sh b/tools/perf/tests/shell/stat+shadow_stat.sh
index 0c7d79a230ea..8824f445d343 100755
--- a/tools/perf/tests/shell/stat+shadow_stat.sh
+++ b/tools/perf/tests/shell/stat+shadow_stat.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf stat metrics (shadow stat) test
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/stat+std_output.sh b/tools/perf/tests/shell/stat+std_output.sh
index cbf2894b2c84..6fee67693ba7 100755
--- a/tools/perf/tests/shell/stat+std_output.sh
+++ b/tools/perf/tests/shell/stat+std_output.sh
@@ -13,7 +13,7 @@ stat_output=$(mktemp /tmp/__perf_test.stat_output.std.XXXXX)
event_name=(cpu-clock task-clock context-switches cpu-migrations page-faults stalled-cycles-frontend stalled-cycles-backend cycles instructions branches branch-misses)
event_metric=("CPUs utilized" "CPUs utilized" "/sec" "/sec" "/sec" "frontend cycles idle" "backend cycles idle" "GHz" "insn per cycle" "/sec" "of all branches")
-skip_metric=("stalled cycles per insn" "tma_" "retiring" "frontend_bound" "bad_speculation" "backend_bound")
+skip_metric=("stalled cycles per insn" "tma_" "retiring" "frontend_bound" "bad_speculation" "backend_bound" "TopdownL1" "percent of slots")
cleanup() {
rm -f "${stat_output}"
@@ -30,6 +30,7 @@ trap trap_cleanup EXIT TERM INT
function commachecker()
{
local prefix=1
+ local -i metric_only=0
case "$1"
in "--interval") prefix=2
@@ -41,6 +42,7 @@ function commachecker()
;; "--per-die") prefix=3
;; "--per-cache") prefix=3
;; "--per-cluster") prefix=3
+ ;; "--metric-only") metric_only=1
esac
while read line
@@ -60,6 +62,9 @@ function commachecker()
x=${main_body%#*}
[ "$x" = "" ] && continue
+ # Check metric only - if it has a non-empty result
+ [ $metric_only -eq 1 ] && return 0
+
# Skip metrics without event name
y=${main_body#*#}
for i in "${!skip_metric[@]}"; do
@@ -84,6 +89,8 @@ function commachecker()
exit 1;
}
done < "${stat_output}"
+
+ [ $metric_only -eq 1 ] && exit 1
return 0
}
@@ -95,6 +102,7 @@ check_system_wide "STD" "$perf_cmd"
check_interval "STD" "$perf_cmd"
check_per_thread "STD" "$perf_cmd"
check_per_node "STD" "$perf_cmd"
+check_metric_only "STD" "$perf_cmd"
if [ $skip_test -ne 1 ]
then
check_system_wide_no_aggr "STD" "$perf_cmd"
diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh
index 7a8adf81e4b3..8a100a7f2dc1 100755
--- a/tools/perf/tests/shell/stat.sh
+++ b/tools/perf/tests/shell/stat.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf stat tests
# SPDX-License-Identifier: GPL-2.0
@@ -67,43 +67,54 @@ test_topdown_groups() {
echo "Topdown event group test [Skipped event parsing failed]"
return
fi
- if perf stat -e '{slots,topdown-retiring}' true 2>&1 | grep -E -q "<not supported>"
- then
- echo "Topdown event group test [Failed events not supported]"
- err=1
- return
- fi
- if perf stat -e 'instructions,topdown-retiring,slots' true 2>&1 | grep -E -q "<not supported>"
- then
- echo "Topdown event group test [Failed slots not reordered first in no-group case]"
- err=1
- return
- fi
- if perf stat -e '{instructions,topdown-retiring,slots}' true 2>&1 | grep -E -q "<not supported>"
- then
- echo "Topdown event group test [Failed slots not reordered first in single group case]"
- err=1
- return
- fi
- if perf stat -e '{instructions,slots},topdown-retiring' true 2>&1 | grep -E -q "<not supported>"
- then
- echo "Topdown event group test [Failed topdown metrics event not move into slots group]"
- err=1
- return
- fi
- if perf stat -e '{instructions,slots},{topdown-retiring}' true 2>&1 | grep -E -q "<not supported>"
- then
- echo "Topdown event group test [Failed topdown metrics group not merge into slots group]"
- err=1
- return
- fi
- if perf stat -e '{instructions,r400,r8000}' true 2>&1 | grep -E -q "<not supported>"
+ td_err=0
+ do_topdown_group_test() {
+ events=$1
+ failure=$2
+ if perf stat -e "$events" true 2>&1 | grep -E -q "<not supported>"
+ then
+ echo "Topdown event group test [Failed $failure for '$events']"
+ td_err=1
+ return
+ fi
+ }
+ do_topdown_group_test "{slots,topdown-retiring}" "events not supported"
+ do_topdown_group_test "{instructions,r400,r8000}" "raw format slots not reordered first"
+ filler_events=("instructions" "cycles"
+ "context-switches" "faults")
+ for ((i = 0; i < ${#filler_events[@]}; i+=2))
+ do
+ filler1=${filler_events[i]}
+ filler2=${filler_events[i+1]}
+ do_topdown_group_test "$filler1,topdown-retiring,slots" \
+ "slots not reordered first in no-group case"
+ do_topdown_group_test "slots,$filler1,topdown-retiring" \
+ "topdown metrics event not reordered in no-group case"
+ do_topdown_group_test "{$filler1,topdown-retiring,slots}" \
+ "slots not reordered first in single group case"
+ do_topdown_group_test "{$filler1,slots},topdown-retiring" \
+ "topdown metrics event not move into slots group"
+ do_topdown_group_test "topdown-retiring,{$filler1,slots}" \
+ "topdown metrics event not move into slots group last"
+ do_topdown_group_test "{$filler1,slots},{topdown-retiring}" \
+ "topdown metrics group not merge into slots group"
+ do_topdown_group_test "{topdown-retiring},{$filler1,slots}" \
+ "topdown metrics group not merge into slots group last"
+ do_topdown_group_test "{$filler1,slots},$filler2,topdown-retiring" \
+ "non-adjacent topdown metrics group not move into slots group"
+ do_topdown_group_test "$filler2,topdown-retiring,{$filler1,slots}" \
+ "non-adjacent topdown metrics group not move into slots group last"
+ do_topdown_group_test "{$filler1,slots},{$filler2,topdown-retiring}" \
+ "metrics group not merge into slots group"
+ do_topdown_group_test "{$filler1,topdown-retiring},{$filler2,slots}" \
+ "metrics group not merge into slots group last"
+ done
+ if test "$td_err" -eq 0
then
- echo "Topdown event group test [Failed raw format slots not reordered first]"
- err=1
- return
+ echo "Topdown event group test [Success]"
+ else
+ err="$td_err"
fi
- echo "Topdown event group test [Success]"
}
test_topdown_weak_groups() {
@@ -187,7 +198,11 @@ test_hybrid() {
# Run default Perf stat
cycles_events=$(perf stat -- true 2>&1 | grep -E "/cycles/[uH]*| cycles[:uH]* " -c)
- if [ "$pmus" -ne "$cycles_events" ]
+ # The expectation is that default output will have a cycles events on each
+ # hybrid PMU. In situations with no cycles PMU events, like virtualized, this
+ # can fall back to task-clock and so the end count may be 0. Fail if neither
+ # condition holds.
+ if [ "$pmus" -ne "$cycles_events" ] && [ "0" -ne "$cycles_events" ]
then
echo "hybrid test [Found $pmus PMUs but $cycles_events cycles events. Failed]"
err=1
diff --git a/tools/perf/tests/shell/stat_all_metrics.sh b/tools/perf/tests/shell/stat_all_metrics.sh
index 73e9347e88a9..6fa585a1e34c 100755
--- a/tools/perf/tests/shell/stat_all_metrics.sh
+++ b/tools/perf/tests/shell/stat_all_metrics.sh
@@ -7,80 +7,96 @@ ParanoidAndNotRoot()
[ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ]
}
+test_prog="sleep 0.01"
system_wide_flag="-a"
if ParanoidAndNotRoot 0
then
system_wide_flag=""
+ test_prog="perf test -w noploop"
fi
err=0
for m in $(perf list --raw-dump metrics); do
echo "Testing $m"
- result=$(perf stat -M "$m" $system_wide_flag -- sleep 0.01 2>&1)
+ result=$(perf stat -M "$m" $system_wide_flag -- $test_prog 2>&1)
result_err=$?
- if [[ $result_err -gt 0 ]]
+ if [[ $result_err -eq 0 && "$result" =~ ${m:0:50} ]]
then
- if [[ "$result" =~ \
- "Access to performance monitoring and observability operations is limited" ]]
+ # No error result and metric shown.
+ continue
+ fi
+ if [[ "$result" =~ "Cannot resolve IDs for" ]]
+ then
+ echo "Metric contains missing events"
+ echo $result
+ err=1 # Fail
+ continue
+ elif [[ "$result" =~ \
+ "Access to performance monitoring and observability operations is limited" ]]
+ then
+ echo "Permission failure"
+ echo $result
+ if [[ $err -eq 0 ]]
then
- echo "Permission failure"
- echo $result
- if [[ $err -eq 0 ]]
- then
- err=2 # Skip
- fi
- continue
- elif [[ "$result" =~ "in per-thread mode, enable system wide" ]]
+ err=2 # Skip
+ fi
+ continue
+ elif [[ "$result" =~ "in per-thread mode, enable system wide" ]]
+ then
+ echo "Permissions - need system wide mode"
+ echo $result
+ if [[ $err -eq 0 ]]
then
- echo "Permissions - need system wide mode"
- echo $result
- if [[ $err -eq 0 ]]
- then
- err=2 # Skip
- fi
- continue
- elif [[ "$result" =~ "<not supported>" ]]
+ err=2 # Skip
+ fi
+ continue
+ elif [[ "$result" =~ "<not supported>" ]]
+ then
+ echo "Not supported events"
+ echo $result
+ if [[ $err -eq 0 ]]
then
- echo "Not supported events"
- echo $result
- if [[ $err -eq 0 ]]
- then
- err=2 # Skip
- fi
- continue
- elif [[ "$result" =~ "FP_ARITH" || "$result" =~ "AMX" ]]
+ err=2 # Skip
+ fi
+ continue
+ elif [[ "$result" =~ "<not counted>" ]]
+ then
+ echo "Not counted events"
+ echo $result
+ if [[ $err -eq 0 ]]
then
- echo "FP issues"
- echo $result
- if [[ $err -eq 0 ]]
- then
- err=2 # Skip
- fi
- continue
- elif [[ "$result" =~ "PMM" ]]
+ err=2 # Skip
+ fi
+ continue
+ elif [[ "$result" =~ "FP_ARITH" || "$result" =~ "AMX" ]]
+ then
+ echo "FP issues"
+ echo $result
+ if [[ $err -eq 0 ]]
then
- echo "Optane memory issues"
- echo $result
- if [[ $err -eq 0 ]]
- then
- err=2 # Skip
- fi
- continue
+ err=2 # Skip
fi
- fi
-
- if [[ "$result" =~ ${m:0:50} ]]
+ continue
+ elif [[ "$result" =~ "PMM" ]]
then
+ echo "Optane memory issues"
+ echo $result
+ if [[ $err -eq 0 ]]
+ then
+ err=2 # Skip
+ fi
continue
fi
# Failed, possibly the workload was too small so retry with something longer.
result=$(perf stat -M "$m" $system_wide_flag -- perf bench internals synthesize 2>&1)
- if [[ "$result" =~ ${m:0:50} ]]
+ result_err=$?
+ if [[ $result_err -eq 0 && "$result" =~ ${m:0:50} ]]
then
+ # No error result and metric shown.
continue
fi
- echo "Metric '$m' not printed in:"
+ echo "Metric '$m' has non-zero error '$result_err' or not printed in:"
echo "$result"
err=1
done
diff --git a/tools/perf/tests/shell/stat_all_pfm.sh b/tools/perf/tests/shell/stat_all_pfm.sh
index 4d004f777a6e..c08c186af2c4 100755
--- a/tools/perf/tests/shell/stat_all_pfm.sh
+++ b/tools/perf/tests/shell/stat_all_pfm.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf all libpfm4 events test
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/stat_all_pmu.sh b/tools/perf/tests/shell/stat_all_pmu.sh
index 8b148b300be1..9c466c0efa85 100755
--- a/tools/perf/tests/shell/stat_all_pmu.sh
+++ b/tools/perf/tests/shell/stat_all_pmu.sh
@@ -2,7 +2,6 @@
# perf all PMU test (exclusive)
# SPDX-License-Identifier: GPL-2.0
-set -e
err=0
result=""
@@ -16,34 +15,55 @@ trap trap_cleanup EXIT TERM INT
# Test all PMU events; however exclude parameterized ones (name contains '?')
for p in $(perf list --raw-dump pmu | sed 's/[[:graph:]]\+?[[:graph:]]\+[[:space:]]//g')
do
- echo "Testing $p"
- result=$(perf stat -e "$p" true 2>&1)
- if echo "$result" | grep -q "$p"
+ echo -n "Testing $p -- "
+ output=$(perf stat -e "$p" true 2>&1)
+ stat_result=$?
+ if echo "$output" | grep -q "$p"
then
# Event seen in output.
- continue
- fi
- if echo "$result" | grep -q "<not supported>"
- then
- # Event not supported, so ignore.
- continue
+ if [ $stat_result -eq 0 ] && ! echo "$output" | grep -q "<not supported>"
+ then
+ # Event supported.
+ echo "supported"
+ continue
+ elif echo "$output" | grep -q "<not supported>"
+ then
+ # Event not supported, so ignore.
+ echo "not supported"
+ continue
+ elif echo "$output" | grep -q "No permission to enable"
+ then
+ # No permissions, so ignore.
+ echo "no permission to enable"
+ continue
+ elif echo "$output" | grep -q "Bad event name"
+ then
+ # Non-existent event.
+ echo "Error: Bad event name"
+ echo "$output"
+ err=1
+ continue
+ fi
fi
- if echo "$result" | grep -q "Access to performance monitoring and observability operations is limited."
+
+ if echo "$output" | grep -q "Access to performance monitoring and observability operations is limited."
then
# Access is limited, so ignore.
+ echo "access limited"
continue
fi
# We failed to see the event and it is supported. Possibly the workload was
# too small so retry with something longer.
- result=$(perf stat -e "$p" perf bench internals synthesize 2>&1)
- if echo "$result" | grep -q "$p"
+ output=$(perf stat -e "$p" perf bench internals synthesize 2>&1)
+ if echo "$output" | grep -q "$p"
then
# Event seen in output.
+ echo "supported"
continue
fi
echo "Error: event '$p' not printed in:"
- echo "$result"
+ echo "$output"
err=1
done
diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
index 95d2ad5d17c6..f43e28a136d3 100755
--- a/tools/perf/tests/shell/stat_bpf_counters.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf stat --bpf-counters test (exclusive)
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
index 2ec69060c42f..ff2e06c408bc 100755
--- a/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf stat --bpf-counters --for-each-cgroup test
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/stat_metrics_values.sh b/tools/perf/tests/shell/stat_metrics_values.sh
index 279f19c5919a..30566f0b5427 100755
--- a/tools/perf/tests/shell/stat_metrics_values.sh
+++ b/tools/perf/tests/shell/stat_metrics_values.sh
@@ -16,11 +16,16 @@ workload="perf bench futex hash -r 2 -s"
# Add -debug, save data file and full rule file
echo "Launch python validation script $pythonvalidator"
echo "Output will be stored in: $tmpdir"
-$PYTHON $pythonvalidator -rule $rulefile -output_dir $tmpdir -wl "${workload}"
-ret=$?
-rm -rf $tmpdir
-if [ $ret -ne 0 ]; then
- echo "Metric validation return with erros. Please check metrics reported with errors."
-fi
+for cputype in /sys/bus/event_source/devices/cpu_*; do
+ cputype=$(basename "$cputype")
+ echo "Testing metrics for: $cputype"
+ $PYTHON $pythonvalidator -rule $rulefile -output_dir $tmpdir -wl "${workload}" \
+ -cputype "${cputype}"
+ ret=$?
+ rm -rf $tmpdir
+ if [ $ret -ne 0 ]; then
+ echo "Metric validation return with errors. Please check metrics reported with errors."
+ fi
+done
exit $ret
diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
index 9caa36130175..9172dd68a81d 100755
--- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh
+++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Check Arm64 callgraphs are complete in fp mode
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/test_arm_coresight.sh b/tools/perf/tests/shell/test_arm_coresight.sh
index 573af9235b72..1c750b67d141 100755
--- a/tools/perf/tests/shell/test_arm_coresight.sh
+++ b/tools/perf/tests/shell/test_arm_coresight.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Check Arm CoreSight trace data recording and synthesized samples (exclusive)
# Uses the 'perf record' to record trace data with Arm CoreSight sinks;
diff --git a/tools/perf/tests/shell/test_arm_coresight_disasm.sh b/tools/perf/tests/shell/test_arm_coresight_disasm.sh
index be2d26303f94..0dfb4fadf531 100755
--- a/tools/perf/tests/shell/test_arm_coresight_disasm.sh
+++ b/tools/perf/tests/shell/test_arm_coresight_disasm.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Check Arm CoreSight disassembly script completes without errors (exclusive)
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/test_arm_spe.sh b/tools/perf/tests/shell/test_arm_spe.sh
index 3258368634f7..bb76ea88aa14 100755
--- a/tools/perf/tests/shell/test_arm_spe.sh
+++ b/tools/perf/tests/shell/test_arm_spe.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Check Arm SPE trace data recording and synthesized samples (exclusive)
# Uses the 'perf record' to record trace data of Arm SPE events;
@@ -107,7 +107,37 @@ arm_spe_system_wide_test() {
arm_spe_report "SPE system-wide testing" $err
}
+arm_spe_discard_test() {
+ echo "SPE discard mode"
+
+ for f in /sys/bus/event_source/devices/arm_spe_*; do
+ if [ -e "$f/format/discard" ]; then
+ cpu=$(cut -c -1 "$f/cpumask")
+ break
+ fi
+ done
+
+ if [ -z $cpu ]; then
+ arm_spe_report "SPE discard mode not present" 2
+ return
+ fi
+
+ # Test can use wildcard SPE instance and Perf will only open the event
+ # on instances that have that format flag. But make sure the target
+ # runs on an instance with discard mode otherwise we're not testing
+ # anything.
+ perf record -o ${perfdata} -e arm_spe/discard/ -N -B --no-bpf-event \
+ -- taskset --cpu-list $cpu true
+
+ if perf report -i ${perfdata} --stats | grep 'AUX events\|AUXTRACE events'; then
+ arm_spe_report "SPE discard mode found unexpected data" 1
+ else
+ arm_spe_report "SPE discard mode" 0
+ fi
+}
+
arm_spe_snapshot_test
arm_spe_system_wide_test
+arm_spe_discard_test
exit $glb_err
diff --git a/tools/perf/tests/shell/test_arm_spe_fork.sh b/tools/perf/tests/shell/test_arm_spe_fork.sh
index 8efeef9fb956..5bcca51c03ac 100755
--- a/tools/perf/tests/shell/test_arm_spe_fork.sh
+++ b/tools/perf/tests/shell/test_arm_spe_fork.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Check Arm SPE doesn't hang when there are forks
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/test_bpf_metadata.sh b/tools/perf/tests/shell/test_bpf_metadata.sh
new file mode 100755
index 000000000000..69e3c2055134
--- /dev/null
+++ b/tools/perf/tests/shell/test_bpf_metadata.sh
@@ -0,0 +1,76 @@
+#!/bin/bash
+# BPF metadata collection test
+#
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+
+cleanup() {
+ rm -f "${perfdata}"
+ rm -f "${perfdata}".old
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+test_bpf_metadata() {
+ echo "Checking BPF metadata collection"
+
+ if ! perf check -q feature libbpf-strings ; then
+ echo "Basic BPF metadata test [skipping - not supported]"
+ err=0
+ return
+ fi
+
+ # This is a basic invocation of perf record
+ # that invokes the perf_sample_filter BPF program.
+ if ! perf record -e task-clock --filter 'ip > 0' \
+ -o "${perfdata}" sleep 1 2> /dev/null
+ then
+ echo "Basic BPF metadata test [Failed record]"
+ err=1
+ return
+ fi
+
+ # The BPF programs that ship with "perf" all have the following
+ # variable defined at compile time:
+ #
+ # const char bpf_metadata_perf_version[] SEC(".rodata") = <...>;
+ #
+ # This invocation looks for a PERF_RECORD_BPF_METADATA event,
+ # and checks that its content contains the string given by
+ # "perf version".
+ VERS=$(perf version | awk '{print $NF}')
+ if ! perf script --show-bpf-events -i "${perfdata}" | awk '
+ /PERF_RECORD_BPF_METADATA.*perf_sample_filter/ {
+ header = 1;
+ }
+ /^ *entry/ {
+ if (header) { header = 0; entry = 1; }
+ }
+ $0 !~ /^ *entry/ {
+ entry = 0;
+ }
+ /perf_version/ {
+ if (entry) print $NF;
+ }
+ ' | egrep "$VERS" > /dev/null
+ then
+ echo "Basic BPF metadata test [Failed invalid output]"
+ err=1
+ return
+ fi
+ echo "Basic BPF metadata test [Success]"
+}
+
+test_bpf_metadata
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/test_brstack.sh b/tools/perf/tests/shell/test_brstack.sh
index 5f14d0cb013f..9138fa83bf36 100755
--- a/tools/perf/tests/shell/test_brstack.sh
+++ b/tools/perf/tests/shell/test_brstack.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Check branch stack sampling
# SPDX-License-Identifier: GPL-2.0
@@ -17,35 +17,50 @@ fi
skip_test_missing_symbol brstack_bench
+err=0
TMPDIR=$(mktemp -d /tmp/__perf_test.program.XXXXX)
TESTPROG="perf test -w brstack"
cleanup() {
rm -rf $TMPDIR
+ trap - EXIT TERM INT
}
-trap cleanup EXIT TERM INT
+trap_cleanup() {
+ set +e
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
test_user_branches() {
echo "Testing user branch stack sampling"
- perf record -o $TMPDIR/perf.data --branch-filter any,save_type,u -- ${TESTPROG} > /dev/null 2>&1
- perf script -i $TMPDIR/perf.data --fields brstacksym | xargs -n1 > $TMPDIR/perf.script
+ perf record -o "$TMPDIR/perf.data" --branch-filter any,save_type,u -- ${TESTPROG} > "$TMPDIR/record.txt" 2>&1
+ perf script -i "$TMPDIR/perf.data" --fields brstacksym > "$TMPDIR/perf.script"
# example of branch entries:
# brstack_foo+0x14/brstack_bar+0x40/P/-/-/0/CALL
- set -x
- grep -E -m1 "^brstack_bench\+[^ ]*/brstack_foo\+[^ ]*/IND_CALL/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack_foo\+[^ ]*/brstack_bar\+[^ ]*/CALL/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack_bench\+[^ ]*/brstack_foo\+[^ ]*/CALL/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack_bench\+[^ ]*/brstack_bar\+[^ ]*/CALL/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack_bar\+[^ ]*/brstack_foo\+[^ ]*/RET/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack_foo\+[^ ]*/brstack_bench\+[^ ]*/RET/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack_bench\+[^ ]*/brstack_bench\+[^ ]*/COND/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack\+[^ ]*/brstack\+[^ ]*/UNCOND/.*$" $TMPDIR/perf.script
- set +x
-
+ expected=(
+ "^brstack_bench\+[^ ]*/brstack_foo\+[^ ]*/IND_CALL/.*$"
+ "^brstack_foo\+[^ ]*/brstack_bar\+[^ ]*/CALL/.*$"
+ "^brstack_bench\+[^ ]*/brstack_foo\+[^ ]*/CALL/.*$"
+ "^brstack_bench\+[^ ]*/brstack_bar\+[^ ]*/CALL/.*$"
+ "^brstack_bar\+[^ ]*/brstack_foo\+[^ ]*/RET/.*$"
+ "^brstack_foo\+[^ ]*/brstack_bench\+[^ ]*/RET/.*$"
+ "^brstack_bench\+[^ ]*/brstack_bench\+[^ ]*/COND/.*$"
+ "^brstack\+[^ ]*/brstack\+[^ ]*/UNCOND/.*$"
+ )
+ for x in "${expected[@]}"
+ do
+ if ! tr -s ' ' '\n' < "$TMPDIR/perf.script" | grep -E -m1 -q "$x"
+ then
+ echo "Branches missing $x"
+ err=1
+ fi
+ done
# some branch types are still not being tested:
# IND COND_CALL COND_RET SYSCALL SYSRET IRQ SERROR NO_TX
}
@@ -57,14 +72,28 @@ test_filter() {
test_filter_expect=$2
echo "Testing branch stack filtering permutation ($test_filter_filter,$test_filter_expect)"
-
- perf record -o $TMPDIR/perf.data --branch-filter $test_filter_filter,save_type,u -- ${TESTPROG} > /dev/null 2>&1
- perf script -i $TMPDIR/perf.data --fields brstack | xargs -n1 > $TMPDIR/perf.script
+ perf record -o "$TMPDIR/perf.data" --branch-filter "$test_filter_filter,save_type,u" -- ${TESTPROG} > "$TMPDIR/record.txt" 2>&1
+ perf script -i "$TMPDIR/perf.data" --fields brstack > "$TMPDIR/perf.script"
# fail if we find any branch type that doesn't match any of the expected ones
# also consider UNKNOWN branch types (-)
- if grep -E -vm1 "^[^ ]*/($test_filter_expect|-|( *))/.*$" $TMPDIR/perf.script; then
- return 1
+ if [ ! -s "$TMPDIR/perf.script" ]
+ then
+ echo "Empty script output"
+ err=1
+ return
+ fi
+ # Look for lines not matching test_filter_expect ignoring issues caused
+ # by empty output
+ tr -s ' ' '\n' < "$TMPDIR/perf.script" | grep '.' | \
+ grep -E -vm1 "^[^ ]*/($test_filter_expect|-|( *))/.*$" \
+ > "$TMPDIR/perf.script-filtered" || true
+ if [ -s "$TMPDIR/perf.script-filtered" ]
+ then
+ echo "Unexpected branch filter in script output"
+ cat "$TMPDIR/perf.script"
+ err=1
+ return
fi
}
@@ -80,3 +109,6 @@ test_filter "any_ret" "RET|COND_RET|SYSRET|ERET"
test_filter "call,cond" "CALL|SYSCALL|COND"
test_filter "any_call,cond" "CALL|IND_CALL|COND_CALL|IRQ|SYSCALL|COND"
test_filter "cond,any_call,any_ret" "COND|CALL|IND_CALL|COND_CALL|SYSCALL|IRQ|RET|COND_RET|SYSRET|ERET"
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/test_data_symbol.sh b/tools/perf/tests/shell/test_data_symbol.sh
index c86da0235059..d61b5659a46d 100755
--- a/tools/perf/tests/shell/test_data_symbol.sh
+++ b/tools/perf/tests/shell/test_data_symbol.sh
@@ -5,8 +5,6 @@
# Leo Yan <leo.yan@linaro.org>, 2022
shelldir=$(dirname "$0")
-# shellcheck source=lib/waiting.sh
-. "${shelldir}"/lib/waiting.sh
# shellcheck source=lib/perf_has_symbol.sh
. "${shelldir}"/lib/perf_has_symbol.sh
@@ -18,7 +16,7 @@ skip_if_no_mem_event() {
skip_if_no_mem_event || exit 2
-skip_test_missing_symbol buf1
+skip_test_missing_symbol workload_datasym_buf1
TEST_PROGRAM="perf test -w datasym"
PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
@@ -26,18 +24,19 @@ ERR_FILE=$(mktemp /tmp/__perf_test.stderr.XXXXX)
check_result() {
# The memory report format is as below:
- # 99.92% ... [.] buf1+0x38
+ # 99.92% ... [.] workload_datasym_buf1+0x38
result=$(perf mem report -i ${PERF_DATA} -s symbol_daddr -q 2>&1 |
- awk '/buf1/ { print $4 }')
+ awk '/workload_datasym_buf1/ { print $4 }')
- # Testing is failed if has no any sample for "buf1"
+ # Testing is failed if has no any sample for "workload_datasym_buf1"
[ -z "$result" ] && return 1
while IFS= read -r line; do
- # The "data1" and "data2" fields in structure "buf1" have
- # offset "0x0" and "0x38", returns failure if detect any
- # other offset value.
- if [ "$line" != "buf1+0x0" ] && [ "$line" != "buf1+0x38" ]; then
+ # The "data1" and "data2" fields in structure
+ # "workload_datasym_buf1" have offset "0x0" and "0x38", returns
+ # failure if detect any other offset value.
+ if [ "$line" != "workload_datasym_buf1+0x0" ] && \
+ [ "$line" != "workload_datasym_buf1+0x38" ]; then
return 1
fi
done <<< "$result"
@@ -55,24 +54,38 @@ trap cleanup_files exit term int
echo "Recording workload..."
-# perf mem/c2c internally uses IBS PMU on AMD CPU which doesn't support
-# user/kernel filtering and per-process monitoring, spin program on
-# specific CPU and test in per-CPU mode.
is_amd=$(grep -E -c 'vendor_id.*AuthenticAMD' /proc/cpuinfo)
if (($is_amd >= 1)); then
- perf mem record -vvv -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM 2>"${ERR_FILE}" &
-else
- perf mem record -vvv --all-user -o ${PERF_DATA} -- $TEST_PROGRAM 2>"${ERR_FILE}" &
-fi
-
-PERFPID=$!
-
-wait_for_perf_to_start ${PERFPID} "${ERR_FILE}"
+ mem_events="$(perf mem record -v -e list 2>&1)"
+ if ! [[ "$mem_events" =~ ^mem\-ldst.*ibs_op/(.*)/.*available ]]; then
+ echo "ERROR: mem-ldst event is not matching"
+ exit 1
+ fi
+
+ # --ldlat on AMD:
+ # o Zen4 and earlier uarch does not support ldlat
+ # o Even on supported platforms, it's disabled (--ldlat=0) by default.
+ ldlat=${BASH_REMATCH[1]}
+ if [[ -n $ldlat ]]; then
+ if ! [[ "$ldlat" =~ ldlat=0 ]]; then
+ echo "ERROR: ldlat not initialized to 0?"
+ exit 1
+ fi
-sleep 1
+ mem_events="$(perf mem record -v --ldlat=150 -e list 2>&1)"
+ if ! [[ "$mem_events" =~ ^mem-ldst.*ibs_op/ldlat=150/.*available ]]; then
+ echo "ERROR: --ldlat not honored?"
+ exit 1
+ fi
+ fi
-kill $PERFPID
-wait $PERFPID
+ # perf mem/c2c internally uses IBS PMU on AMD CPU which doesn't
+ # support user/kernel filtering and per-process monitoring on older
+ # kernels, spin program on specific CPU and test in per-CPU mode.
+ perf mem record -vvv -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM 2>"${ERR_FILE}"
+else
+ perf mem record -vvv --all-user -o ${PERF_DATA} -- $TEST_PROGRAM 2>"${ERR_FILE}"
+fi
check_result
exit $?
diff --git a/tools/perf/tests/shell/test_intel_pt.sh b/tools/perf/tests/shell/test_intel_pt.sh
index e6f0070975f6..8ee761f03c38 100755
--- a/tools/perf/tests/shell/test_intel_pt.sh
+++ b/tools/perf/tests/shell/test_intel_pt.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Miscellaneous Intel PT testing (exclusive)
# SPDX-License-Identifier: GPL-2.0
@@ -288,6 +288,11 @@ test_jitdump()
jitdump_incl_dir="${script_dir}/../../util"
jitdump_h="${jitdump_incl_dir}/jitdump.h"
+ if ! perf check feature -q libelf ; then
+ echo "SKIP: libelf is needed for jitdump"
+ return 2
+ fi
+
if [ ! -e "${jitdump_h}" ] ; then
echo "SKIP: Include file jitdump.h not found"
return 2
@@ -644,6 +649,33 @@ test_pipe()
return 0
}
+test_pause_resume()
+{
+ echo "--- Test with pause / resume ---"
+ if ! perf_record_no_decode -o "${perfdatafile}" -e intel_pt/aux-action=start-paused/u uname ; then
+ echo "SKIP: pause / resume is not supported"
+ return 2
+ fi
+ if ! perf_record_no_bpf -o "${perfdatafile}" \
+ -e intel_pt/aux-action=start-paused/u \
+ -e instructions/period=50000,aux-action=resume,name=Resume/u \
+ -e instructions/period=100000,aux-action=pause,name=Pause/u uname ; then
+ echo "perf record with pause / resume failed"
+ return 1
+ fi
+ if ! perf script -i "${perfdatafile}" --itrace=b -Fperiod,event | \
+ awk 'BEGIN {paused=1;branches=0}
+ /Resume/ {paused=0}
+ /branches/ {if (paused) exit 1;branches=1}
+ /Pause/ {paused=1}
+ END {if (!branches) exit 1}' ; then
+ echo "perf record with pause / resume failed"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
count_result()
{
if [ "$1" -eq 2 ] ; then
@@ -672,6 +704,7 @@ test_power_event || ret=$? ; count_result $ret ; ret=0
test_no_tnt || ret=$? ; count_result $ret ; ret=0
test_event_trace || ret=$? ; count_result $ret ; ret=0
test_pipe || ret=$? ; count_result $ret ; ret=0
+test_pause_resume || ret=$? ; count_result $ret ; ret=0
cleanup
diff --git a/tools/perf/tests/shell/test_stat_intel_tpebs.sh b/tools/perf/tests/shell/test_stat_intel_tpebs.sh
index f95fc64bf0a7..a330ecdb7ba5 100755
--- a/tools/perf/tests/shell/test_stat_intel_tpebs.sh
+++ b/tools/perf/tests/shell/test_stat_intel_tpebs.sh
@@ -3,20 +3,83 @@
# SPDX-License-Identifier: GPL-2.0
set -e
-grep -q GenuineIntel /proc/cpuinfo || { echo Skipping non-Intel; exit 2; }
-# Use this event for testing because it should exist in all platforms
-event=cache-misses:R
+ParanoidAndNotRoot() {
+ [ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ]
+}
-# Hybrid platforms output like "cpu_atom/cache-misses/R", rather than as above
-alt_name=/cache-misses/R
+if ! grep -q GenuineIntel /proc/cpuinfo
+then
+ echo "Skipping non-Intel"
+ exit 2
+fi
-# Without this cmd option, default value or zero is returned
-#echo "Testing without --record-tpebs"
-#result=$(perf stat -e "$event" true 2>&1)
-#[[ "$result" =~ $event || "$result" =~ $alt_name ]] || exit 1
+if ParanoidAndNotRoot 0
+then
+ echo "Skipping paranoid >0 and not root"
+ exit 2
+fi
-# In platforms that do not support TPEBS, it should execute without error.
-echo "Testing with --record-tpebs"
-result=$(perf stat -e "$event" --record-tpebs -a sleep 0.01 2>&1)
-[[ "$result" =~ "perf record" && "$result" =~ $event || "$result" =~ $alt_name ]] || exit 1
+stat_output=$(mktemp /tmp/__perf_stat_tpebs_output.XXXXX)
+
+cleanup() {
+ rm -rf "${stat_output}"
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cat "${stat_output}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+# Event to be used in tests
+event=cache-misses
+
+if ! perf record -e "${event}:p" -a -o /dev/null sleep 0.01 > "${stat_output}" 2>&1
+then
+ echo "Missing ${event} support"
+ cleanup
+ exit 2
+fi
+
+test_with_record_tpebs() {
+ echo "Testing with --record-tpebs"
+ if ! perf stat -e "${event}:R" --record-tpebs -a sleep 0.01 > "${stat_output}" 2>&1
+ then
+ echo "Testing with --record-tpebs [Failed perf stat]"
+ cat "${stat_output}"
+ exit 1
+ fi
+
+ # Expected output:
+ # $ perf stat --record-tpebs -e cache-misses:R -a sleep 0.01
+ # Events enabled
+ # [ perf record: Woken up 2 times to write data ]
+ # [ perf record: Captured and wrote 0.056 MB - ]
+ #
+ # Performance counter stats for 'system wide':
+ #
+ # 0 cache-misses:R
+ #
+ # 0.013963299 seconds time elapsed
+ if ! grep "perf record" "${stat_output}"
+ then
+ echo "Testing with --record-tpebs [Failed missing perf record]"
+ cat "${stat_output}"
+ exit 1
+ fi
+ if ! grep "${event}:R" "${stat_output}" && ! grep "/${event}/R" "${stat_output}"
+ then
+ echo "Testing with --record-tpebs [Failed missing event name]"
+ cat "${stat_output}"
+ exit 1
+ fi
+ echo "Testing with --record-tpebs [Success]"
+}
+
+test_with_record_tpebs
+cleanup
+exit 0
diff --git a/tools/perf/tests/shell/test_task_analyzer.sh b/tools/perf/tests/shell/test_task_analyzer.sh
index 7d76fc63d995..e194fcf61df3 100755
--- a/tools/perf/tests/shell/test_task_analyzer.sh
+++ b/tools/perf/tests/shell/test_task_analyzer.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# perf script task-analyzer tests
+# perf script task-analyzer tests (exclusive)
# SPDX-License-Identifier: GPL-2.0
tmpdir=$(mktemp -d /tmp/perf-script-task-analyzer-XXXXX)
diff --git a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
index 33387c329f92..7adf9755d6de 100755
--- a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
+++ b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
@@ -4,12 +4,11 @@
set -e
-# Skip if there's no probe command.
-if ! perf | grep probe
-then
- echo "Skip: probe command isn't present"
- exit 2
-fi
+# shellcheck source=lib/probe.sh
+. "$(dirname $0)"/lib/probe.sh
+
+skip_if_no_perf_probe || exit 2
+[ "$(id -u)" == 0 ] || exit 2
# skip if there's no gcc
if ! [ -x "$(command -v gcc)" ]; then
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 708a13f00635..7a0b1145d0cd 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Check open filename arg using perf trace + vfs_getname (exclusive)
# Uses the 'perf test shell' library to add probe:vfs_getname to the system
@@ -15,6 +15,7 @@
skip_if_no_perf_probe || exit 2
skip_if_no_perf_trace || exit 2
+[ "$(id -u)" = 0 ] || exit 2
. "$(dirname $0)"/lib/probe_vfs_getname.sh
@@ -24,9 +25,14 @@ trace_open_vfs_getname() {
grep -E " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch/[0-9]+ open(at)?\((dfd: +CWD, +)?filename: +\"?${file}\"?, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
}
-
-add_probe_vfs_getname || skip_if_no_debuginfo
+add_probe_vfs_getname
err=$?
+
+if [ $err -eq 1 ] ; then
+ skip_if_no_debuginfo
+ err=$?
+fi
+
if [ $err -ne 0 ] ; then
exit $err
fi
diff --git a/tools/perf/tests/shell/trace_btf_enum.sh b/tools/perf/tests/shell/trace_btf_enum.sh
index 5a3b8a5a9b5c..572001d75d78 100755
--- a/tools/perf/tests/shell/trace_btf_enum.sh
+++ b/tools/perf/tests/shell/trace_btf_enum.sh
@@ -1,22 +1,22 @@
-#!/bin/sh
+#!/bin/bash
# perf trace enum augmentation tests
# SPDX-License-Identifier: GPL-2.0
err=0
-set -e
syscall="landlock_add_rule"
-non_syscall="timer:hrtimer_init,timer:hrtimer_start"
+non_syscall="timer:hrtimer_start"
TESTPROG="perf test -w landlock"
# shellcheck source=lib/probe.sh
. "$(dirname $0)"/lib/probe.sh
skip_if_no_perf_trace || exit 2
+[ "$(id -u)" = 0 ] || exit 2
check_vmlinux() {
echo "Checking if vmlinux exists"
- if ! ls /sys/kernel/btf/vmlinux 1>/dev/null 2>&1
+ if [ ! -f /sys/kernel/btf/vmlinux ]
then
echo "trace+enum test [Skipped missing vmlinux BTF support]"
err=2
@@ -26,25 +26,31 @@ check_vmlinux() {
trace_landlock() {
echo "Tracing syscall ${syscall}"
- # test flight just to see if landlock_add_rule and libbpf are available
- $TESTPROG
+ # test flight just to see if landlock_add_rule is available
+ if ! perf trace $TESTPROG 2>&1 | grep -q landlock
+ then
+ echo "No landlock system call found, skipping to non-syscall tracing."
+ return
+ fi
- if perf trace -e $syscall $TESTPROG 2>&1 | \
- grep -q -E ".*landlock_add_rule\(ruleset_fd: 11, rule_type: (LANDLOCK_RULE_PATH_BENEATH|LANDLOCK_RULE_NET_PORT), rule_attr: 0x[a-f0-9]+, flags: 45\) = -1.*"
+ output="$(perf trace -e $syscall $TESTPROG 2>&1)"
+ if echo "$output" | grep -q -E ".*landlock_add_rule\(ruleset_fd: 11, rule_type: (LANDLOCK_RULE_PATH_BENEATH|LANDLOCK_RULE_NET_PORT), rule_attr: 0x[a-f0-9]+, flags: 45\) = -1.*"
then
err=0
else
+ printf "[syscall failure] Failed to trace syscall $syscall, output:\n$output\n"
err=1
fi
}
trace_non_syscall() {
- echo "Tracing non-syscall tracepoint ${non-syscall}"
- if perf trace -e $non_syscall --max-events=1 2>&1 | \
- grep -q -E '.*timer:hrtimer_.*\(.*mode: HRTIMER_MODE_.*\)$'
+ echo "Tracing non-syscall tracepoint ${non_syscall}"
+ output="$(perf trace -e $non_syscall --max-events=1 2>&1)"
+ if echo "$output" | grep -q -E '.*timer:hrtimer_.*\(.*mode: HRTIMER_MODE_.*\)$'
then
err=0
else
+ printf "[tracepoint failure] Failed to trace tracepoint $non_syscall, output:\n$output\n"
err=1
fi
}
diff --git a/tools/perf/tests/shell/trace_btf_general.sh b/tools/perf/tests/shell/trace_btf_general.sh
new file mode 100755
index 000000000000..ef2da806be6b
--- /dev/null
+++ b/tools/perf/tests/shell/trace_btf_general.sh
@@ -0,0 +1,94 @@
+#!/bin/bash
+# perf trace BTF general tests
+# SPDX-License-Identifier: GPL-2.0
+
+err=0
+
+# shellcheck source=lib/probe.sh
+. "$(dirname $0)"/lib/probe.sh
+
+file1=$(mktemp /tmp/file1_XXXX)
+file2=$(echo $file1 | sed 's/file1/file2/g')
+
+buffer="buffer content"
+perf_config_tmp=$(mktemp /tmp/.perfconfig_XXXXX)
+
+trap cleanup EXIT TERM INT HUP
+
+check_vmlinux() {
+ echo "Checking if vmlinux BTF exists"
+ if [ ! -f /sys/kernel/btf/vmlinux ]
+ then
+ echo "Skipped due to missing vmlinux BTF"
+ return 2
+ fi
+ return 0
+}
+
+trace_test_string() {
+ echo "Testing perf trace's string augmentation"
+ output="$(perf trace --sort-events -e renameat* --max-events=1 -- mv ${file1} ${file2} 2>&1)"
+ if ! echo "$output" | grep -q -E "^mv/[0-9]+ renameat(2)?\(.*, \"${file1}\", .*, \"${file2}\", .*\) += +[0-9]+$"
+ then
+ printf "String augmentation test failed, output:\n$output\n"
+ err=1
+ fi
+}
+
+trace_test_buffer() {
+ echo "Testing perf trace's buffer augmentation"
+ # echo will insert a newline (\10) at the end of the buffer
+ output="$(perf trace --sort-events -e write --max-events=1 -- echo "${buffer}" 2>&1)"
+ if ! echo "$output" | grep -q -E "^echo/[0-9]+ write\([0-9]+, ${buffer}.*, [0-9]+\) += +[0-9]+$"
+ then
+ printf "Buffer augmentation test failed, output:\n$output\n"
+ err=1
+ fi
+}
+
+trace_test_struct_btf() {
+ echo "Testing perf trace's struct augmentation"
+ output="$(perf trace --sort-events -e clock_nanosleep --force-btf --max-events=1 -- sleep 1 2>&1)"
+ if ! echo "$output" | grep -q -E "^sleep/[0-9]+ clock_nanosleep\(0, 0, \{1,.*\}, 0x[0-9a-f]+\) += +[0-9]+$"
+ then
+ printf "BTF struct augmentation test failed, output:\n$output\n"
+ err=1
+ fi
+}
+
+cleanup() {
+ rm -rf ${file1} ${file2} ${perf_config_tmp}
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+
+# don't overwrite user's perf config
+trace_config() {
+ export PERF_CONFIG=${perf_config_tmp}
+ perf config trace.show_arg_names=false trace.show_duration=false \
+ trace.show_timestamp=false trace.args_alignment=0
+}
+
+skip_if_no_perf_trace || exit 2
+check_vmlinux || exit 2
+[ "$(id -u)" = 0 ] || exit 2
+
+trace_config
+
+trace_test_string
+
+if [ $err = 0 ]; then
+ trace_test_buffer
+fi
+
+if [ $err = 0 ]; then
+ trace_test_struct_btf
+fi
+
+cleanup
+
+exit $err
diff --git a/tools/perf/tests/shell/trace_exit_race.sh b/tools/perf/tests/shell/trace_exit_race.sh
index fbb0adc33a88..db300cde94fb 100755
--- a/tools/perf/tests/shell/trace_exit_race.sh
+++ b/tools/perf/tests/shell/trace_exit_race.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf trace exit race
# SPDX-License-Identifier: GPL-2.0
@@ -10,6 +10,7 @@
. "$(dirname $0)"/lib/probe.sh
skip_if_no_perf_trace || exit 2
+[ "$(id -u)" = 0 ] || exit 2
if [ "$1" = "-v" ]; then
verbose="1"
diff --git a/tools/perf/tests/shell/trace_record_replay.sh b/tools/perf/tests/shell/trace_record_replay.sh
new file mode 100755
index 000000000000..88d30a03dcec
--- /dev/null
+++ b/tools/perf/tests/shell/trace_record_replay.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# perf trace record and replay
+# SPDX-License-Identifier: GPL-2.0
+
+# Check that perf trace works with record and replay
+
+# shellcheck source=lib/probe.sh
+. "$(dirname $0)"/lib/probe.sh
+
+skip_if_no_perf_trace || exit 2
+[ "$(id -u)" = 0 ] || exit 2
+
+file=$(mktemp /tmp/temporary_file.XXXXX)
+
+perf trace record -o ${file} sleep 1 || exit 1
+if ! perf trace -i ${file} 2>&1 | grep nanosleep; then
+ echo "Failed: cannot find *nanosleep syscall"
+ exit 1
+fi
+
+rm -f ${file}
diff --git a/tools/perf/tests/shell/trace_summary.sh b/tools/perf/tests/shell/trace_summary.sh
new file mode 100755
index 000000000000..22e2651d5919
--- /dev/null
+++ b/tools/perf/tests/shell/trace_summary.sh
@@ -0,0 +1,77 @@
+#!/bin/bash
+# perf trace summary (exclusive)
+# SPDX-License-Identifier: GPL-2.0
+
+# Check that perf trace works with various summary mode
+
+# shellcheck source=lib/probe.sh
+. "$(dirname $0)"/lib/probe.sh
+
+skip_if_no_perf_trace || exit 2
+[ "$(id -u)" = 0 ] || exit 2
+
+OUTPUT=$(mktemp /tmp/perf_trace_test.XXXXX)
+
+test_perf_trace() {
+ args=$1
+ workload="true"
+ search="^\s*(open|read|close).*[0-9]+%$"
+
+ echo "testing: perf trace ${args} -- ${workload}"
+ perf trace ${args} -- ${workload} >${OUTPUT} 2>&1
+ if [ $? -ne 0 ]; then
+ echo "Error: perf trace ${args} failed unexpectedly"
+ cat ${OUTPUT}
+ rm -f ${OUTPUT}
+ exit 1
+ fi
+
+ count=$(grep -E -c -m 3 "${search}" ${OUTPUT})
+ if [ "${count}" != "3" ]; then
+ echo "Error: cannot find enough pattern ${search} in the output"
+ cat ${OUTPUT}
+ rm -f ${OUTPUT}
+ exit 1
+ fi
+}
+
+# summary only for a process
+test_perf_trace "-s"
+
+# normal output with summary at the end
+test_perf_trace "-S"
+
+# summary only with an explicit summary mode
+test_perf_trace "-s --summary-mode=thread"
+
+# summary with normal output - total summary mode
+test_perf_trace "-S --summary-mode=total"
+
+# summary only for system wide - per-thread summary
+test_perf_trace "-as --summary-mode=thread --no-bpf-summary"
+
+# summary only for system wide - total summary mode
+test_perf_trace "-as --summary-mode=total --no-bpf-summary"
+
+if ! perf check feature -q bpf; then
+ echo "Skip --bpf-summary tests as perf built without libbpf"
+ rm -f ${OUTPUT}
+ exit 2
+fi
+
+# summary only for system wide - per-thread summary with BPF
+test_perf_trace "-as --summary-mode=thread --bpf-summary"
+
+# summary only for system wide - total summary mode with BPF
+test_perf_trace "-as --summary-mode=total --bpf-summary"
+
+# summary with normal output for system wide - total summary mode with BPF
+test_perf_trace "-aS --summary-mode=total --bpf-summary"
+
+# summary only for system wide - cgroup summary mode with BPF
+test_perf_trace "-as --summary-mode=cgroup --bpf-summary"
+
+# summary with normal output for system wide - cgroup summary mode with BPF
+test_perf_trace "-aS --summary-mode=cgroup --bpf-summary"
+
+rm -f ${OUTPUT}