summaryrefslogtreecommitdiff
path: root/tools/perf/tests/shell
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/tests/shell')
-rwxr-xr-xtools/perf/tests/shell/annotate.sh56
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_blacklisted.sh4
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_kernel.sh8
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_basic.sh4
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_invalid_options.sh9
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_line_semantics.sh9
-rwxr-xr-xtools/perf/tests/shell/base_report/setup.sh20
-rwxr-xr-xtools/perf/tests/shell/base_report/test_basic.sh54
-rw-r--r--tools/perf/tests/shell/common/init.sh7
-rw-r--r--tools/perf/tests/shell/coresight/Makefile2
-rw-r--r--tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S2
-rwxr-xr-xtools/perf/tests/shell/diff.sh12
-rwxr-xr-xtools/perf/tests/shell/ftrace.sh5
-rw-r--r--tools/perf/tests/shell/lib/attr.py8
-rw-r--r--tools/perf/tests/shell/lib/perf_json_output_lint.py7
-rw-r--r--tools/perf/tests/shell/lib/stat_output.sh13
-rwxr-xr-xtools/perf/tests/shell/perftool-testsuite_probe.sh3
-rwxr-xr-xtools/perf/tests/shell/probe_vfs_getname.sh1
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh37
-rwxr-xr-xtools/perf/tests/shell/record+script_probe_vfs_getname.sh1
-rwxr-xr-xtools/perf/tests/shell/record.sh53
-rwxr-xr-xtools/perf/tests/shell/record_bpf_filter.sh4
-rwxr-xr-xtools/perf/tests/shell/stat+csv_output.sh2
-rwxr-xr-xtools/perf/tests/shell/stat+json_output.sh14
-rwxr-xr-xtools/perf/tests/shell/stat+std_output.sh10
-rwxr-xr-xtools/perf/tests/shell/stat.sh83
-rwxr-xr-xtools/perf/tests/shell/stat_all_metrics.sh8
-rwxr-xr-xtools/perf/tests/shell/stat_all_pmu.sh48
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe.sh30
-rwxr-xr-xtools/perf/tests/shell/test_brstack.sh4
-rwxr-xr-xtools/perf/tests/shell/test_data_symbol.sh32
-rwxr-xr-xtools/perf/tests/shell/test_intel_pt.sh28
-rwxr-xr-xtools/perf/tests/shell/test_stat_intel_tpebs.sh89
-rwxr-xr-xtools/perf/tests/shell/test_task_analyzer.sh2
-rwxr-xr-xtools/perf/tests/shell/test_uprobe_from_different_cu.sh11
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh1
-rwxr-xr-xtools/perf/tests/shell/trace_btf_enum.sh3
-rwxr-xr-xtools/perf/tests/shell/trace_btf_general.sh95
-rwxr-xr-xtools/perf/tests/shell/trace_exit_race.sh1
-rwxr-xr-xtools/perf/tests/shell/trace_record_replay.sh21
40 files changed, 613 insertions, 188 deletions
diff --git a/tools/perf/tests/shell/annotate.sh b/tools/perf/tests/shell/annotate.sh
index 1590a37363de..16a1ccd06089 100755
--- a/tools/perf/tests/shell/annotate.sh
+++ b/tools/perf/tests/shell/annotate.sh
@@ -35,54 +35,78 @@ trap_cleanup() {
trap trap_cleanup EXIT TERM INT
test_basic() {
- echo "Basic perf annotate test"
- if ! perf record -o "${perfdata}" ${testprog} 2> /dev/null
+ mode=$1
+ echo "${mode} perf annotate test"
+ if [ "x${mode}" == "xBasic" ]
then
- echo "Basic annotate [Failed: perf record]"
+ perf record -o "${perfdata}" ${testprog} 2> /dev/null
+ else
+ perf record -o - ${testprog} 2> /dev/null > "${perfdata}"
+ fi
+ if [ "x$?" != "x0" ]
+ then
+ echo "${mode} annotate [Failed: perf record]"
err=1
return
fi
# Generate the annotated output file
- perf annotate --no-demangle -i "${perfdata}" --stdio 2> /dev/null | head -250 > "${perfout}"
+ if [ "x${mode}" == "xBasic" ]
+ then
+ perf annotate --no-demangle -i "${perfdata}" --stdio 2> /dev/null > "${perfout}"
+ else
+ perf annotate --no-demangle -i - --stdio 2> /dev/null < "${perfdata}" > "${perfout}"
+ fi
# check if it has the target symbol
- if ! grep "${testsym}" "${perfout}"
+ if ! head -250 "${perfout}" | grep -q "${testsym}"
then
- echo "Basic annotate [Failed: missing target symbol]"
+ echo "${mode} annotate [Failed: missing target symbol]"
err=1
return
fi
# check if it has the disassembly lines
- if ! grep "${disasm_regex}" "${perfout}"
+ if ! head -250 "${perfout}" | grep -q "${disasm_regex}"
then
- echo "Basic annotate [Failed: missing disasm output from default disassembler]"
+ echo "${mode} annotate [Failed: missing disasm output from default disassembler]"
err=1
return
fi
# check again with a target symbol name
- if ! perf annotate --no-demangle -i "${perfdata}" "${testsym}" 2> /dev/null | \
- head -250 | grep -m 3 "${disasm_regex}"
+ if [ "x${mode}" == "xBasic" ]
then
- echo "Basic annotate [Failed: missing disasm output when specifying the target symbol]"
+ perf annotate --no-demangle -i "${perfdata}" "${testsym}" 2> /dev/null > "${perfout}"
+ else
+ perf annotate --no-demangle -i - "${testsym}" 2> /dev/null < "${perfdata}" > "${perfout}"
+ fi
+
+ if ! head -250 "${perfout}"| grep -q -m 3 "${disasm_regex}"
+ then
+ echo "${mode} annotate [Failed: missing disasm output when specifying the target symbol]"
err=1
return
fi
# check one more with external objdump tool (forced by --objdump option)
- if ! perf annotate --no-demangle -i "${perfdata}" --objdump=objdump 2> /dev/null | \
- head -250 | grep -m 3 "${disasm_regex}"
+ if [ "x${mode}" == "xBasic" ]
+ then
+ perf annotate --no-demangle -i "${perfdata}" --objdump=objdump 2> /dev/null > "${perfout}"
+ else
+ perf annotate --no-demangle -i - "${testsym}" 2> /dev/null < "${perfdata}" > "${perfout}"
+ fi
+ if ! head -250 "${perfout}" | grep -q -m 3 "${disasm_regex}"
then
- echo "Basic annotate [Failed: missing disasm output from non default disassembler (using --objdump)]"
+ echo "${mode} annotate [Failed: missing disasm output from non default disassembler (using --objdump)]"
err=1
return
fi
- echo "Basic annotate test [Success]"
+ echo "${mode} annotate test [Success]"
}
-test_basic
+test_basic Basic
+test_basic Pipe
cleanup
exit $err
diff --git a/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh b/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
index bead723e34af..8226449ac5c3 100755
--- a/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
+++ b/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-
+# perf_probe :: Reject blacklisted probes (exclusive)
# SPDX-License-Identifier: GPL-2.0
#
@@ -22,7 +22,7 @@ TEST_RESULT=0
BLACKFUNC_LIST=`head -n 5 /sys/kernel/debug/kprobes/blacklist 2> /dev/null | cut -f2`
if [ -z "$BLACKFUNC_LIST" ]; then
print_overall_skipped
- exit 0
+ exit 2
fi
# try to find vmlinux with DWARF debug info
diff --git a/tools/perf/tests/shell/base_probe/test_adding_kernel.sh b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
index d541ffd44a93..df288cf90cd6 100755
--- a/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
+++ b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Add 'perf probe's, list and remove them
+# perf_probe :: Add probes, list and remove them (exclusive)
# SPDX-License-Identifier: GPL-2.0
#
@@ -33,7 +33,7 @@ fi
check_kprobes_available
if [ $? -ne 0 ]; then
print_overall_skipped
- exit 0
+ exit 2
fi
@@ -169,7 +169,7 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: second pr
(( TEST_RESULT += $? ))
# adding existing probe with '--force' should pass
-NO_OF_PROBES=`$CMD_PERF probe -l | wc -l`
+NO_OF_PROBES=`$CMD_PERF probe -l $TEST_PROBE| wc -l`
$CMD_PERF probe --force --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_03.err
PERF_EXIT_CODE=$?
@@ -205,7 +205,7 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "using doubled probe"
$CMD_PERF probe --del \* 2> $LOGS_DIR/adding_kernel_removing_wildcard.err
PERF_EXIT_CODE=$?
-../common/check_all_lines_matched.pl "Removed event: probe:$TEST_PROBE" "Removed event: probe:${TEST_PROBE}_1" < $LOGS_DIR/adding_kernel_removing_wildcard.err
+../common/check_all_patterns_found.pl "Removed event: probe:$TEST_PROBE" "Removed event: probe:${TEST_PROBE}_1" < $LOGS_DIR/adding_kernel_removing_wildcard.err
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "removing multiple probes"
diff --git a/tools/perf/tests/shell/base_probe/test_basic.sh b/tools/perf/tests/shell/base_probe/test_basic.sh
index 09669ec479f2..9d8b5afbeddd 100755
--- a/tools/perf/tests/shell/base_probe/test_basic.sh
+++ b/tools/perf/tests/shell/base_probe/test_basic.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-
+# perf_probe :: Basic perf probe functionality (exclusive)
# SPDX-License-Identifier: GPL-2.0
#
@@ -19,7 +19,7 @@ TEST_RESULT=0
if ! check_kprobes_available; then
print_overall_skipped
- exit 0
+ exit 2
fi
diff --git a/tools/perf/tests/shell/base_probe/test_invalid_options.sh b/tools/perf/tests/shell/base_probe/test_invalid_options.sh
index 1fedfd8b0d0d..92f7254eb32a 100755
--- a/tools/perf/tests/shell/base_probe/test_invalid_options.sh
+++ b/tools/perf/tests/shell/base_probe/test_invalid_options.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-
+# perf_probe :: Reject invalid options (exclusive)
# SPDX-License-Identifier: GPL-2.0
#
@@ -19,9 +19,12 @@ TEST_RESULT=0
if ! check_kprobes_available; then
print_overall_skipped
- exit 0
+ exit 2
fi
+# Check for presence of DWARF
+$CMD_PERF check feature -q dwarf
+[ $? -ne 0 ] && HINT_FAIL="Some of the tests need DWARF to run"
### missing argument
@@ -75,5 +78,5 @@ done
# print overall results
-print_overall_results "$TEST_RESULT"
+print_overall_results "$TEST_RESULT" $HINT_FAIL
exit $?
diff --git a/tools/perf/tests/shell/base_probe/test_line_semantics.sh b/tools/perf/tests/shell/base_probe/test_line_semantics.sh
index d8f4bde0f585..20435b6bf6bc 100755
--- a/tools/perf/tests/shell/base_probe/test_line_semantics.sh
+++ b/tools/perf/tests/shell/base_probe/test_line_semantics.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-
+# perf_probe :: Check patterns for line semantics (exclusive)
# SPDX-License-Identifier: GPL-2.0
#
@@ -20,9 +20,12 @@ TEST_RESULT=0
if ! check_kprobes_available; then
print_overall_skipped
- exit 0
+ exit 2
fi
+# Check for presence of DWARF
+$CMD_PERF check feature -q dwarf
+[ $? -ne 0 ] && HINT_FAIL="Some of the tests need DWARF to run"
### acceptable --line descriptions
@@ -51,5 +54,5 @@ done
# print overall results
-print_overall_results "$TEST_RESULT"
+print_overall_results "$TEST_RESULT" $HINT_FAIL
exit $?
diff --git a/tools/perf/tests/shell/base_report/setup.sh b/tools/perf/tests/shell/base_report/setup.sh
index 4caa496660c6..8634e7e0dda6 100755
--- a/tools/perf/tests/shell/base_report/setup.sh
+++ b/tools/perf/tests/shell/base_report/setup.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-
+# perftool-testsuite :: perf_report
# SPDX-License-Identifier: GPL-2.0
#
@@ -15,6 +15,8 @@
# include working environment
. ../common/init.sh
+TEST_RESULT=0
+
test -d "$HEADER_TAR_DIR" || mkdir -p "$HEADER_TAR_DIR"
SW_EVENT="cpu-clock"
@@ -26,7 +28,21 @@ PERF_EXIT_CODE=$?
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "prepare the perf.data file"
-TEST_RESULT=$?
+(( TEST_RESULT += $? ))
+
+# Some minimal parallel workload.
+$CMD_PERF record --latency -o $CURRENT_TEST_DIR/perf.data.1 bash -c "for i in {1..100} ; do cat /proc/cpuinfo 1> /dev/null & done; sleep 1" 2> $LOGS_DIR/setup-latency.log
+PERF_EXIT_CODE=$?
+
+echo ==================
+cat $LOGS_DIR/setup-latency.log
+echo ==================
+
+../common/check_all_patterns_found.pl "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" < $LOGS_DIR/setup-latency.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "prepare the perf.data.1 file"
+(( TEST_RESULT += $? ))
print_overall_results $TEST_RESULT
exit $?
diff --git a/tools/perf/tests/shell/base_report/test_basic.sh b/tools/perf/tests/shell/base_report/test_basic.sh
index 47677cbd4df3..adfd8713b8f8 100755
--- a/tools/perf/tests/shell/base_report/test_basic.sh
+++ b/tools/perf/tests/shell/base_report/test_basic.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-
+# perf_report :: Basic perf report options (exclusive)
# SPDX-License-Identifier: GPL-2.0
#
@@ -183,6 +183,58 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "symbol filter"
(( TEST_RESULT += $? ))
+### latency and parallelism
+
+# Record with --latency should record with context switches.
+$CMD_PERF report -i $CURRENT_TEST_DIR/perf.data.1 --stdio --header-only > $LOGS_DIR/latency_header.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl ", context_switch = 1, " < $LOGS_DIR/latency_header.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "latency header"
+(( TEST_RESULT += $? ))
+
+
+# The default report for latency profile should show Overhead and Latency fields (in that order).
+$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data.1 > $LOGS_DIR/latency_default.log 2> $LOGS_DIR/latency_default.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "# Overhead Latency Command" < $LOGS_DIR/latency_default.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/latency_default.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "default report for latency profile"
+(( TEST_RESULT += $? ))
+
+
+# The latency report for latency profile should show Latency and Overhead fields (in that order).
+$CMD_PERF report --latency --stdio -i $CURRENT_TEST_DIR/perf.data.1 > $LOGS_DIR/latency_latency.log 2> $LOGS_DIR/latency_latency.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "# Latency Overhead Command" < $LOGS_DIR/latency_latency.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/latency_latency.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "latency report for latency profile"
+(( TEST_RESULT += $? ))
+
+
+# Ensure parallelism histogram with parallelism filter does not fail/crash.
+$CMD_PERF report --hierarchy --sort latency,parallelism,comm,symbol --parallelism=1,2 --stdio -i $CURRENT_TEST_DIR/perf.data.1 > $LOGS_DIR/parallelism_hierarchy.log 2> $LOGS_DIR/parallelism_hierarchy.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "# Latency Parallelism / Command / Symbol" < $LOGS_DIR/parallelism_hierarchy.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/parallelism_hierarchy.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "parallelism histogram"
+(( TEST_RESULT += $? ))
+
+
# TODO: $CMD_PERF report -n --showcpuutilization -TUxDg 2> 01.log
# print overall results
diff --git a/tools/perf/tests/shell/common/init.sh b/tools/perf/tests/shell/common/init.sh
index 075f17623c8e..26c7525651e0 100644
--- a/tools/perf/tests/shell/common/init.sh
+++ b/tools/perf/tests/shell/common/init.sh
@@ -46,10 +46,13 @@ print_results()
print_overall_results()
{
RETVAL="$1"; shift
+ TASK_COMMENT="$*"
+ test -n "$TASK_COMMENT" && TASK_COMMENT=":: $TASK_COMMENT"
+
if [ $RETVAL -eq 0 ]; then
_echo "$MALLPASS## [ PASS ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME SUMMARY"
else
- _echo "$MALLFAIL## [ FAIL ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME SUMMARY :: $RETVAL failures found"
+ _echo "$MALLFAIL## [ FAIL ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME SUMMARY :: $RETVAL failures found $TASK_COMMENT"
fi
return $RETVAL
}
@@ -85,7 +88,7 @@ consider_skipping()
# the runmode of a testcase needs to be at least the current suite's runmode
if [ $PERFTOOL_TESTSUITE_RUNMODE -lt $TESTCASE_RUNMODE ]; then
print_overall_skipped
- exit 0
+ exit 2
fi
}
diff --git a/tools/perf/tests/shell/coresight/Makefile b/tools/perf/tests/shell/coresight/Makefile
index b070e779703e..fa08fd9a5991 100644
--- a/tools/perf/tests/shell/coresight/Makefile
+++ b/tools/perf/tests/shell/coresight/Makefile
@@ -24,6 +24,6 @@ CLEANDIRS = $(SUBDIRS:%=clean-%)
clean: $(CLEANDIRS)
$(CLEANDIRS):
- $(call QUIET_CLEAN, test-$(@:clean-%=%)) $(Q)$(MAKE) -C $(@:clean-%=%) clean >/dev/null
+ $(call QUIET_CLEAN, test-$(@:clean-%=%)) $(MAKE) -C $(@:clean-%=%) clean >/dev/null
.PHONY: all clean $(SUBDIRS) $(CLEANDIRS) $(INSTALLDIRS)
diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S
index 75cf084a927d..577760046772 100644
--- a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S
+++ b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S
@@ -26,3 +26,5 @@ skip:
mov x0, #0
mov x8, #93 // __NR_exit syscall
svc #0
+
+.section .note.GNU-stack, "", @progbits
diff --git a/tools/perf/tests/shell/diff.sh b/tools/perf/tests/shell/diff.sh
index 14b87af88703..e05a5dc49479 100755
--- a/tools/perf/tests/shell/diff.sh
+++ b/tools/perf/tests/shell/diff.sh
@@ -39,13 +39,13 @@ make_data() {
file="$1"
if ! perf record -o "${file}" ${testprog} 2> /dev/null
then
- echo "Workload record [Failed record]"
+ echo "Workload record [Failed record]" >&2
echo 1
return
fi
if ! perf report -i "${file}" -q | grep -q "${testsym}"
then
- echo "Workload record [Failed missing output]"
+ echo "Workload record [Failed missing output]" >&2
echo 1
return
fi
@@ -55,12 +55,12 @@ make_data() {
test_two_files() {
echo "Basic two file diff test"
err=$(make_data "${perfdata1}")
- if [ $err != 0 ]
+ if [ "$err" != 0 ]
then
return
fi
err=$(make_data "${perfdata2}")
- if [ $err != 0 ]
+ if [ "$err" != 0 ]
then
return
fi
@@ -77,12 +77,12 @@ test_two_files() {
test_three_files() {
echo "Basic three file diff test"
err=$(make_data "${perfdata1}")
- if [ $err != 0 ]
+ if [ "$err" != 0 ]
then
return
fi
err=$(make_data "${perfdata2}")
- if [ $err != 0 ]
+ if [ "$err" != 0 ]
then
return
fi
diff --git a/tools/perf/tests/shell/ftrace.sh b/tools/perf/tests/shell/ftrace.sh
index 2df05052c324..c243731d2fbf 100755
--- a/tools/perf/tests/shell/ftrace.sh
+++ b/tools/perf/tests/shell/ftrace.sh
@@ -67,11 +67,8 @@ test_ftrace_latency() {
test_ftrace_profile() {
echo "perf ftrace profile test"
- perf ftrace profile -m 16M sleep 0.1 > "${output}"
+ perf ftrace profile --graph-opts depth=5 sleep 0.1 > "${output}"
grep ^# "${output}"
- grep sleep "${output}"
- grep schedule "${output}"
- grep execve "${output}"
time_re="[[:space:]]+1[[:digit:]]{5}\.[[:digit:]]{3}"
# 100283.000 100283.000 100283.000 1 __x64_sys_clock_nanosleep
# Check for one *clock_nanosleep line with a Count of just 1 that takes a bit more than 0.1 seconds
diff --git a/tools/perf/tests/shell/lib/attr.py b/tools/perf/tests/shell/lib/attr.py
index 3db9a7d78715..bfccc727d9b2 100644
--- a/tools/perf/tests/shell/lib/attr.py
+++ b/tools/perf/tests/shell/lib/attr.py
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-from __future__ import print_function
-
+import configparser
import os
import sys
import glob
@@ -13,11 +12,6 @@ import re
import shutil
import subprocess
-try:
- import configparser
-except ImportError:
- import ConfigParser as configparser
-
def data_equal(a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py
index b066d721f897..9e772a89ce38 100644
--- a/tools/perf/tests/shell/lib/perf_json_output_lint.py
+++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py
@@ -19,6 +19,7 @@ ap.add_argument('--per-cluster', action='store_true')
ap.add_argument('--per-die', action='store_true')
ap.add_argument('--per-node', action='store_true')
ap.add_argument('--per-socket', action='store_true')
+ap.add_argument('--metric-only', action='store_true')
ap.add_argument('--file', type=argparse.FileType('r'), default=sys.stdin)
args = ap.parse_args()
@@ -64,6 +65,8 @@ def check_json_output(expected_items):
'socket': lambda x: True,
'thread': lambda x: True,
'unit': lambda x: True,
+ 'insn per cycle': lambda x: isfloat(x),
+ 'GHz': lambda x: True, # FIXME: it seems unintended for --metric-only
}
input = '[\n' + ','.join(Lines) + '\n]'
for item in json.loads(input):
@@ -78,6 +81,8 @@ def check_json_output(expected_items):
pass
elif count - 1 in expected_items and 'metric-threshold' in item:
pass
+ elif count in expected_items and 'insn per cycle' in item:
+ pass
elif count not in expected_items:
raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}'
f' in \'{item}\'')
@@ -95,6 +100,8 @@ try:
expected_items = [6, 8]
elif args.per_core or args.per_socket or args.per_node or args.per_die or args.per_cluster or args.per_cache:
expected_items = [7, 9]
+ elif args.metric_only:
+ expected_items = [1, 2]
else:
# If no option is specified, don't check the number of items.
expected_items = -1
diff --git a/tools/perf/tests/shell/lib/stat_output.sh b/tools/perf/tests/shell/lib/stat_output.sh
index 9a176ceae4a3..c2ec7881ec1d 100644
--- a/tools/perf/tests/shell/lib/stat_output.sh
+++ b/tools/perf/tests/shell/lib/stat_output.sh
@@ -148,6 +148,19 @@ check_per_socket()
echo "[Success]"
}
+check_metric_only()
+{
+ echo -n "Checking $1 output: metric only "
+ if [ "$(uname -m)" = "s390x" ] && ! grep '^facilities' /proc/cpuinfo | grep -qw 67
+ then
+ echo "[Skip] CPU-measurement counter facility not installed"
+ return
+ fi
+ perf stat --metric-only $2 -e instructions,cycles true
+ commachecker --metric-only
+ echo "[Success]"
+}
+
# The perf stat options for per-socket, per-core, per-die
# and -A ( no_aggr mode ) uses the info fetched from this
# directory: "/sys/devices/system/cpu/cpu*/topology". For
diff --git a/tools/perf/tests/shell/perftool-testsuite_probe.sh b/tools/perf/tests/shell/perftool-testsuite_probe.sh
index a0fec33a0358..3863df16c19b 100755
--- a/tools/perf/tests/shell/perftool-testsuite_probe.sh
+++ b/tools/perf/tests/shell/perftool-testsuite_probe.sh
@@ -1,7 +1,8 @@
#!/bin/bash
-# perftool-testsuite_probe
+# perftool-testsuite_probe (exclusive)
# SPDX-License-Identifier: GPL-2.0
+[ "$(id -u)" = 0 ] || exit 2
test -d "$(dirname "$0")/base_probe" || exit 2
cd "$(dirname "$0")/base_probe" || exit 2
status=0
diff --git a/tools/perf/tests/shell/probe_vfs_getname.sh b/tools/perf/tests/shell/probe_vfs_getname.sh
index 0c5aacc446b3..c51a32931af6 100755
--- a/tools/perf/tests/shell/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/probe_vfs_getname.sh
@@ -8,6 +8,7 @@
. "$(dirname $0)"/lib/probe.sh
skip_if_no_perf_probe || exit 2
+[ "$(id -u)" = 0 ] || exit 2
# shellcheck source=lib/probe_vfs_getname.sh
. "$(dirname $0)"/lib/probe_vfs_getname.sh
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index 47a26f25db9f..c4bab5b5cc59 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# probe libc's inet_pton & backtrace it with ping
+# probe libc's inet_pton & backtrace it with ping (exclusive)
# Installs a probe on libc's inet_pton function, that will use uprobes,
# then use 'perf trace' on a ping to localhost asking for just one packet
@@ -43,17 +43,8 @@ trace_libc_inet_pton_backtrace() {
echo "((__GI_)?getaddrinfo|text_to_binary_address)\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
echo "(gaih_inet|main)\+0x[[:xdigit:]]+[[:space:]]\(inlined|.*/bin/ping.*\)$" >> $expected
;;
- ppc64|ppc64le)
- eventattr='max-stack=4'
- # Add gaih_inet to expected backtrace only if it is part of libc.
- if nm $libc | grep -F -q gaih_inet.; then
- echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
- fi
- echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
- echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
- ;;
*)
- eventattr='max-stack=3'
+ eventattr='max-stack=4'
echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
;;
esac
@@ -76,14 +67,25 @@ trace_libc_inet_pton_backtrace() {
fi
perf script -i $perf_data | tac | grep -m1 ^ping -B9 | tac > $perf_script
- exec 3<$perf_script
exec 4<$expected
- while read line <&3 && read -r pattern <&4; do
+ while read -r pattern <&4; do
+ echo "Pattern: $pattern"
[ -z "$pattern" ] && break
- echo $line
- echo "$line" | grep -E -q "$pattern"
- if [ $? -ne 0 ] ; then
- printf "FAIL: expected backtrace entry \"%s\" got \"%s\"\n" "$pattern" "$line"
+
+ found=0
+
+ # Search lines in the perf script result
+ exec 3<$perf_script
+ while read line <&3; do
+ [ -z "$line" ] && break
+ echo " Matching: $line"
+ ! echo "$line" | grep -E -q "$pattern"
+ found=$?
+ [ $found -eq 1 ] && break
+ done
+
+ if [ $found -ne 1 ] ; then
+ printf "FAIL: Didn't find the expected backtrace entry \"%s\"\n" "$pattern"
return 1
fi
done
@@ -103,6 +105,7 @@ delete_libc_inet_pton_event() {
# Check for IPv6 interface existence
ip a sh lo | grep -F -q inet6 || exit 2
+[ "$(id -u)" = 0 ] || exit 2
skip_if_no_perf_probe && \
add_libc_inet_pton_event && \
diff --git a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
index 5940fdc1df37..fd5b10d46915 100755
--- a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
@@ -13,6 +13,7 @@
. "$(dirname "$0")/lib/probe.sh"
skip_if_no_perf_probe || exit 2
+[ "$(id -u)" = 0 ] || exit 2
# shellcheck source=lib/probe_vfs_getname.sh
. "$(dirname "$0")/lib/probe_vfs_getname.sh"
diff --git a/tools/perf/tests/shell/record.sh b/tools/perf/tests/shell/record.sh
index 0fc7a909ae9b..ba8d873d3ca7 100755
--- a/tools/perf/tests/shell/record.sh
+++ b/tools/perf/tests/shell/record.sh
@@ -231,7 +231,7 @@ test_cgroup() {
test_leader_sampling() {
echo "Basic leader sampling test"
- if ! perf record -o "${perfdata}" -e "{instructions,instructions}:Su" -- \
+ if ! perf record -o "${perfdata}" -e "{cycles,cycles}:Su" -- \
perf test -w brstack 2> /dev/null
then
echo "Leader sampling [Failed record]"
@@ -243,15 +243,15 @@ test_leader_sampling() {
while IFS= read -r line
do
# Check if the two instruction counts are equal in each record
- instructions=$(echo $line | awk '{for(i=1;i<=NF;i++) if($i=="instructions:") print $(i-1)}')
- if [ $(($index%2)) -ne 0 ] && [ ${instructions}x != ${prev_instructions}x ]
+ cycles=$(echo $line | awk '{for(i=1;i<=NF;i++) if($i=="cycles:") print $(i-1)}')
+ if [ $(($index%2)) -ne 0 ] && [ ${cycles}x != ${prev_cycles}x ]
then
- echo "Leader sampling [Failed inconsistent instructions count]"
+ echo "Leader sampling [Failed inconsistent cycles count]"
err=1
return
fi
index=$(($index+1))
- prev_instructions=$instructions
+ prev_cycles=$cycles
done < $script_output
echo "Basic leader sampling test [Success]"
}
@@ -273,27 +273,42 @@ test_topdown_leader_sampling() {
}
test_precise_max() {
+ local -i skipped=0
+
echo "precise_max attribute test"
- if ! perf stat -e "cycles,instructions" true 2> /dev/null
+ # Just to make sure event cycles is supported for sampling
+ if perf record -o "${perfdata}" -e "cycles" true 2> /dev/null
then
- echo "precise_max attribute [Skipped no hardware events]"
- return
+ if ! perf record -o "${perfdata}" -e "cycles:P" true 2> /dev/null
+ then
+ echo "precise_max attribute [Failed cycles:P event]"
+ err=1
+ return
+ fi
+ else
+ echo "precise_max attribute [Skipped no cycles:P event]"
+ ((skipped+=1))
fi
- # Just to make sure it doesn't fail
- if ! perf record -o "${perfdata}" -e "cycles:P" true 2> /dev/null
+ # On s390 event instructions is not supported for perf record
+ if perf record -o "${perfdata}" -e "instructions" true 2> /dev/null
then
- echo "precise_max attribute [Failed cycles:P event]"
- err=1
- return
+ # On AMD, cycles and instructions events are treated differently
+ if ! perf record -o "${perfdata}" -e "instructions:P" true 2> /dev/null
+ then
+ echo "precise_max attribute [Failed instructions:P event]"
+ err=1
+ return
+ fi
+ else
+ echo "precise_max attribute [Skipped no instructions:P event]"
+ ((skipped+=1))
fi
- # On AMD, cycles and instructions events are treated differently
- if ! perf record -o "${perfdata}" -e "instructions:P" true 2> /dev/null
+ if [ $skipped -eq 2 ]
then
- echo "precise_max attribute [Failed instructions:P event]"
- err=1
- return
+ echo "precise_max attribute [Skipped no hardware events]"
+ else
+ echo "precise_max attribute test [Success]"
fi
- echo "precise_max attribute test [Success]"
}
# raise the limit of file descriptors to minimum
diff --git a/tools/perf/tests/shell/record_bpf_filter.sh b/tools/perf/tests/shell/record_bpf_filter.sh
index 1b58ccc1fd88..4d6c3c1b7fb9 100755
--- a/tools/perf/tests/shell/record_bpf_filter.sh
+++ b/tools/perf/tests/shell/record_bpf_filter.sh
@@ -89,7 +89,7 @@ test_bpf_filter_fail() {
test_bpf_filter_group() {
echo "Group bpf-filter test"
- if ! perf record -e task-clock --filter 'period > 1000 || ip > 0' \
+ if ! perf record -e task-clock --filter 'period > 1000, ip > 0' \
-o /dev/null true 2>/dev/null
then
echo "Group bpf-filter test [Failed should succeed]"
@@ -97,7 +97,7 @@ test_bpf_filter_group() {
return
fi
- if ! perf record -e task-clock --filter 'cpu > 0 || ip > 0' \
+ if ! perf record -e task-clock --filter 'period > 1000 , cpu > 0 || ip > 0' \
-o /dev/null true 2>&1 | grep -q PERF_SAMPLE_CPU
then
echo "Group bpf-filter test [Failed forbidden CPU]"
diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh
index fc2d8cc6e5e0..7a6f6e177402 100755
--- a/tools/perf/tests/shell/stat+csv_output.sh
+++ b/tools/perf/tests/shell/stat+csv_output.sh
@@ -44,6 +44,7 @@ function commachecker()
;; "--per-die") exp=8
;; "--per-cluster") exp=8
;; "--per-cache") exp=8
+ ;; "--metric-only") exp=2
esac
while read line
@@ -75,6 +76,7 @@ check_interval "CSV" "$perf_cmd"
check_event "CSV" "$perf_cmd"
check_per_thread "CSV" "$perf_cmd"
check_per_node "CSV" "$perf_cmd"
+check_metric_only "CSV" "$perf_cmd"
if [ $skip_test -ne 1 ]
then
check_system_wide_no_aggr "CSV" "$perf_cmd"
diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh
index 6b630d33c328..98fb65274ac4 100755
--- a/tools/perf/tests/shell/stat+json_output.sh
+++ b/tools/perf/tests/shell/stat+json_output.sh
@@ -173,6 +173,19 @@ check_per_socket()
echo "[Success]"
}
+check_metric_only()
+{
+ echo -n "Checking json output: metric only "
+ if [ "$(uname -m)" = "s390x" ] && ! grep '^facilities' /proc/cpuinfo | grep -qw 67
+ then
+ echo "[Skip] CPU-measurement counter facility not installed"
+ return
+ fi
+ perf stat -j --metric-only -e instructions,cycles -o "${stat_output}" true
+ $PYTHON $pythonchecker --metric-only --file "${stat_output}"
+ echo "[Success]"
+}
+
# The perf stat options for per-socket, per-core, per-die
# and -A ( no_aggr mode ) uses the info fetched from this
# directory: "/sys/devices/system/cpu/cpu*/topology". For
@@ -207,6 +220,7 @@ check_interval
check_event
check_per_thread
check_per_node
+check_metric_only
if [ $skip_test -ne 1 ]
then
check_system_wide_no_aggr
diff --git a/tools/perf/tests/shell/stat+std_output.sh b/tools/perf/tests/shell/stat+std_output.sh
index cbf2894b2c84..6fee67693ba7 100755
--- a/tools/perf/tests/shell/stat+std_output.sh
+++ b/tools/perf/tests/shell/stat+std_output.sh
@@ -13,7 +13,7 @@ stat_output=$(mktemp /tmp/__perf_test.stat_output.std.XXXXX)
event_name=(cpu-clock task-clock context-switches cpu-migrations page-faults stalled-cycles-frontend stalled-cycles-backend cycles instructions branches branch-misses)
event_metric=("CPUs utilized" "CPUs utilized" "/sec" "/sec" "/sec" "frontend cycles idle" "backend cycles idle" "GHz" "insn per cycle" "/sec" "of all branches")
-skip_metric=("stalled cycles per insn" "tma_" "retiring" "frontend_bound" "bad_speculation" "backend_bound")
+skip_metric=("stalled cycles per insn" "tma_" "retiring" "frontend_bound" "bad_speculation" "backend_bound" "TopdownL1" "percent of slots")
cleanup() {
rm -f "${stat_output}"
@@ -30,6 +30,7 @@ trap trap_cleanup EXIT TERM INT
function commachecker()
{
local prefix=1
+ local -i metric_only=0
case "$1"
in "--interval") prefix=2
@@ -41,6 +42,7 @@ function commachecker()
;; "--per-die") prefix=3
;; "--per-cache") prefix=3
;; "--per-cluster") prefix=3
+ ;; "--metric-only") metric_only=1
esac
while read line
@@ -60,6 +62,9 @@ function commachecker()
x=${main_body%#*}
[ "$x" = "" ] && continue
+ # Check metric only - if it has a non-empty result
+ [ $metric_only -eq 1 ] && return 0
+
# Skip metrics without event name
y=${main_body#*#}
for i in "${!skip_metric[@]}"; do
@@ -84,6 +89,8 @@ function commachecker()
exit 1;
}
done < "${stat_output}"
+
+ [ $metric_only -eq 1 ] && exit 1
return 0
}
@@ -95,6 +102,7 @@ check_system_wide "STD" "$perf_cmd"
check_interval "STD" "$perf_cmd"
check_per_thread "STD" "$perf_cmd"
check_per_node "STD" "$perf_cmd"
+check_metric_only "STD" "$perf_cmd"
if [ $skip_test -ne 1 ]
then
check_system_wide_no_aggr "STD" "$perf_cmd"
diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh
index 68323d636fb7..8a100a7f2dc1 100755
--- a/tools/perf/tests/shell/stat.sh
+++ b/tools/perf/tests/shell/stat.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf stat tests
# SPDX-License-Identifier: GPL-2.0
@@ -67,43 +67,54 @@ test_topdown_groups() {
echo "Topdown event group test [Skipped event parsing failed]"
return
fi
- if perf stat -e '{slots,topdown-retiring}' true 2>&1 | grep -E -q "<not supported>"
- then
- echo "Topdown event group test [Failed events not supported]"
- err=1
- return
- fi
- if perf stat -e 'instructions,topdown-retiring,slots' true 2>&1 | grep -E -q "<not supported>"
- then
- echo "Topdown event group test [Failed slots not reordered first in no-group case]"
- err=1
- return
- fi
- if perf stat -e '{instructions,topdown-retiring,slots}' true 2>&1 | grep -E -q "<not supported>"
- then
- echo "Topdown event group test [Failed slots not reordered first in single group case]"
- err=1
- return
- fi
- if perf stat -e '{instructions,slots},topdown-retiring' true 2>&1 | grep -E -q "<not supported>"
- then
- echo "Topdown event group test [Failed topdown metrics event not move into slots group]"
- err=1
- return
- fi
- if perf stat -e '{instructions,slots},{topdown-retiring}' true 2>&1 | grep -E -q "<not supported>"
- then
- echo "Topdown event group test [Failed topdown metrics group not merge into slots group]"
- err=1
- return
- fi
- if perf stat -e '{instructions,r400,r8000}' true 2>&1 | grep -E -q "<not supported>"
+ td_err=0
+ do_topdown_group_test() {
+ events=$1
+ failure=$2
+ if perf stat -e "$events" true 2>&1 | grep -E -q "<not supported>"
+ then
+ echo "Topdown event group test [Failed $failure for '$events']"
+ td_err=1
+ return
+ fi
+ }
+ do_topdown_group_test "{slots,topdown-retiring}" "events not supported"
+ do_topdown_group_test "{instructions,r400,r8000}" "raw format slots not reordered first"
+ filler_events=("instructions" "cycles"
+ "context-switches" "faults")
+ for ((i = 0; i < ${#filler_events[@]}; i+=2))
+ do
+ filler1=${filler_events[i]}
+ filler2=${filler_events[i+1]}
+ do_topdown_group_test "$filler1,topdown-retiring,slots" \
+ "slots not reordered first in no-group case"
+ do_topdown_group_test "slots,$filler1,topdown-retiring" \
+ "topdown metrics event not reordered in no-group case"
+ do_topdown_group_test "{$filler1,topdown-retiring,slots}" \
+ "slots not reordered first in single group case"
+ do_topdown_group_test "{$filler1,slots},topdown-retiring" \
+ "topdown metrics event not move into slots group"
+ do_topdown_group_test "topdown-retiring,{$filler1,slots}" \
+ "topdown metrics event not move into slots group last"
+ do_topdown_group_test "{$filler1,slots},{topdown-retiring}" \
+ "topdown metrics group not merge into slots group"
+ do_topdown_group_test "{topdown-retiring},{$filler1,slots}" \
+ "topdown metrics group not merge into slots group last"
+ do_topdown_group_test "{$filler1,slots},$filler2,topdown-retiring" \
+ "non-adjacent topdown metrics group not move into slots group"
+ do_topdown_group_test "$filler2,topdown-retiring,{$filler1,slots}" \
+ "non-adjacent topdown metrics group not move into slots group last"
+ do_topdown_group_test "{$filler1,slots},{$filler2,topdown-retiring}" \
+ "metrics group not merge into slots group"
+ do_topdown_group_test "{$filler1,topdown-retiring},{$filler2,slots}" \
+ "metrics group not merge into slots group last"
+ done
+ if test "$td_err" -eq 0
then
- echo "Topdown event group test [Failed raw format slots not reordered first]"
- err=1
- return
+ echo "Topdown event group test [Success]"
+ else
+ err="$td_err"
fi
- echo "Topdown event group test [Success]"
}
test_topdown_weak_groups() {
diff --git a/tools/perf/tests/shell/stat_all_metrics.sh b/tools/perf/tests/shell/stat_all_metrics.sh
index 73e9347e88a9..ee817c66da06 100755
--- a/tools/perf/tests/shell/stat_all_metrics.sh
+++ b/tools/perf/tests/shell/stat_all_metrics.sh
@@ -20,7 +20,13 @@ for m in $(perf list --raw-dump metrics); do
result_err=$?
if [[ $result_err -gt 0 ]]
then
- if [[ "$result" =~ \
+ if [[ "$result" =~ "Cannot resolve IDs for" ]]
+ then
+ echo "Metric contains missing events"
+ echo $result
+ err=1 # Fail
+ continue
+ elif [[ "$result" =~ \
"Access to performance monitoring and observability operations is limited" ]]
then
echo "Permission failure"
diff --git a/tools/perf/tests/shell/stat_all_pmu.sh b/tools/perf/tests/shell/stat_all_pmu.sh
index 8b148b300be1..9c466c0efa85 100755
--- a/tools/perf/tests/shell/stat_all_pmu.sh
+++ b/tools/perf/tests/shell/stat_all_pmu.sh
@@ -2,7 +2,6 @@
# perf all PMU test (exclusive)
# SPDX-License-Identifier: GPL-2.0
-set -e
err=0
result=""
@@ -16,34 +15,55 @@ trap trap_cleanup EXIT TERM INT
# Test all PMU events; however exclude parameterized ones (name contains '?')
for p in $(perf list --raw-dump pmu | sed 's/[[:graph:]]\+?[[:graph:]]\+[[:space:]]//g')
do
- echo "Testing $p"
- result=$(perf stat -e "$p" true 2>&1)
- if echo "$result" | grep -q "$p"
+ echo -n "Testing $p -- "
+ output=$(perf stat -e "$p" true 2>&1)
+ stat_result=$?
+ if echo "$output" | grep -q "$p"
then
# Event seen in output.
- continue
- fi
- if echo "$result" | grep -q "<not supported>"
- then
- # Event not supported, so ignore.
- continue
+ if [ $stat_result -eq 0 ] && ! echo "$output" | grep -q "<not supported>"
+ then
+ # Event supported.
+ echo "supported"
+ continue
+ elif echo "$output" | grep -q "<not supported>"
+ then
+ # Event not supported, so ignore.
+ echo "not supported"
+ continue
+ elif echo "$output" | grep -q "No permission to enable"
+ then
+ # No permissions, so ignore.
+ echo "no permission to enable"
+ continue
+ elif echo "$output" | grep -q "Bad event name"
+ then
+ # Non-existent event.
+ echo "Error: Bad event name"
+ echo "$output"
+ err=1
+ continue
+ fi
fi
- if echo "$result" | grep -q "Access to performance monitoring and observability operations is limited."
+
+ if echo "$output" | grep -q "Access to performance monitoring and observability operations is limited."
then
# Access is limited, so ignore.
+ echo "access limited"
continue
fi
# We failed to see the event and it is supported. Possibly the workload was
# too small so retry with something longer.
- result=$(perf stat -e "$p" perf bench internals synthesize 2>&1)
- if echo "$result" | grep -q "$p"
+ output=$(perf stat -e "$p" perf bench internals synthesize 2>&1)
+ if echo "$output" | grep -q "$p"
then
# Event seen in output.
+ echo "supported"
continue
fi
echo "Error: event '$p' not printed in:"
- echo "$result"
+ echo "$output"
err=1
done
diff --git a/tools/perf/tests/shell/test_arm_spe.sh b/tools/perf/tests/shell/test_arm_spe.sh
index 3258368634f7..a69aab70dd8a 100755
--- a/tools/perf/tests/shell/test_arm_spe.sh
+++ b/tools/perf/tests/shell/test_arm_spe.sh
@@ -107,7 +107,37 @@ arm_spe_system_wide_test() {
arm_spe_report "SPE system-wide testing" $err
}
+arm_spe_discard_test() {
+ echo "SPE discard mode"
+
+ for f in /sys/bus/event_source/devices/arm_spe_*; do
+ if [ -e "$f/format/discard" ]; then
+ cpu=$(cut -c -1 "$f/cpumask")
+ break
+ fi
+ done
+
+ if [ -z $cpu ]; then
+ arm_spe_report "SPE discard mode not present" 2
+ return
+ fi
+
+ # Test can use wildcard SPE instance and Perf will only open the event
+ # on instances that have that format flag. But make sure the target
+ # runs on an instance with discard mode otherwise we're not testing
+ # anything.
+ perf record -o ${perfdata} -e arm_spe/discard/ -N -B --no-bpf-event \
+ -- taskset --cpu-list $cpu true
+
+ if perf report -i ${perfdata} --stats | grep 'AUX events\|AUXTRACE events'; then
+ arm_spe_report "SPE discard mode found unexpected data" 1
+ else
+ arm_spe_report "SPE discard mode" 0
+ fi
+}
+
arm_spe_snapshot_test
arm_spe_system_wide_test
+arm_spe_discard_test
exit $glb_err
diff --git a/tools/perf/tests/shell/test_brstack.sh b/tools/perf/tests/shell/test_brstack.sh
index 5f14d0cb013f..e01df7581393 100755
--- a/tools/perf/tests/shell/test_brstack.sh
+++ b/tools/perf/tests/shell/test_brstack.sh
@@ -30,7 +30,7 @@ test_user_branches() {
echo "Testing user branch stack sampling"
perf record -o $TMPDIR/perf.data --branch-filter any,save_type,u -- ${TESTPROG} > /dev/null 2>&1
- perf script -i $TMPDIR/perf.data --fields brstacksym | xargs -n1 > $TMPDIR/perf.script
+ perf script -i $TMPDIR/perf.data --fields brstacksym | tr -s ' ' '\n' > $TMPDIR/perf.script
# example of branch entries:
# brstack_foo+0x14/brstack_bar+0x40/P/-/-/0/CALL
@@ -59,7 +59,7 @@ test_filter() {
echo "Testing branch stack filtering permutation ($test_filter_filter,$test_filter_expect)"
perf record -o $TMPDIR/perf.data --branch-filter $test_filter_filter,save_type,u -- ${TESTPROG} > /dev/null 2>&1
- perf script -i $TMPDIR/perf.data --fields brstack | xargs -n1 > $TMPDIR/perf.script
+ perf script -i $TMPDIR/perf.data --fields brstack | tr -s ' ' '\n' | grep '.' > $TMPDIR/perf.script
# fail if we find any branch type that doesn't match any of the expected ones
# also consider UNKNOWN branch types (-)
diff --git a/tools/perf/tests/shell/test_data_symbol.sh b/tools/perf/tests/shell/test_data_symbol.sh
index c86da0235059..bbe8277496ae 100755
--- a/tools/perf/tests/shell/test_data_symbol.sh
+++ b/tools/perf/tests/shell/test_data_symbol.sh
@@ -5,8 +5,6 @@
# Leo Yan <leo.yan@linaro.org>, 2022
shelldir=$(dirname "$0")
-# shellcheck source=lib/waiting.sh
-. "${shelldir}"/lib/waiting.sh
# shellcheck source=lib/perf_has_symbol.sh
. "${shelldir}"/lib/perf_has_symbol.sh
@@ -18,7 +16,7 @@ skip_if_no_mem_event() {
skip_if_no_mem_event || exit 2
-skip_test_missing_symbol buf1
+skip_test_missing_symbol workload_datasym_buf1
TEST_PROGRAM="perf test -w datasym"
PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
@@ -26,18 +24,19 @@ ERR_FILE=$(mktemp /tmp/__perf_test.stderr.XXXXX)
check_result() {
# The memory report format is as below:
- # 99.92% ... [.] buf1+0x38
+ # 99.92% ... [.] workload_datasym_buf1+0x38
result=$(perf mem report -i ${PERF_DATA} -s symbol_daddr -q 2>&1 |
- awk '/buf1/ { print $4 }')
+ awk '/workload_datasym_buf1/ { print $4 }')
- # Testing is failed if has no any sample for "buf1"
+ # Testing is failed if has no any sample for "workload_datasym_buf1"
[ -z "$result" ] && return 1
while IFS= read -r line; do
- # The "data1" and "data2" fields in structure "buf1" have
- # offset "0x0" and "0x38", returns failure if detect any
- # other offset value.
- if [ "$line" != "buf1+0x0" ] && [ "$line" != "buf1+0x38" ]; then
+ # The "data1" and "data2" fields in structure
+ # "workload_datasym_buf1" have offset "0x0" and "0x38", returns
+ # failure if detect any other offset value.
+ if [ "$line" != "workload_datasym_buf1+0x0" ] && \
+ [ "$line" != "workload_datasym_buf1+0x38" ]; then
return 1
fi
done <<< "$result"
@@ -60,19 +59,10 @@ echo "Recording workload..."
# specific CPU and test in per-CPU mode.
is_amd=$(grep -E -c 'vendor_id.*AuthenticAMD' /proc/cpuinfo)
if (($is_amd >= 1)); then
- perf mem record -vvv -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM 2>"${ERR_FILE}" &
+ perf mem record -vvv -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM 2>"${ERR_FILE}"
else
- perf mem record -vvv --all-user -o ${PERF_DATA} -- $TEST_PROGRAM 2>"${ERR_FILE}" &
+ perf mem record -vvv --all-user -o ${PERF_DATA} -- $TEST_PROGRAM 2>"${ERR_FILE}"
fi
-PERFPID=$!
-
-wait_for_perf_to_start ${PERFPID} "${ERR_FILE}"
-
-sleep 1
-
-kill $PERFPID
-wait $PERFPID
-
check_result
exit $?
diff --git a/tools/perf/tests/shell/test_intel_pt.sh b/tools/perf/tests/shell/test_intel_pt.sh
index e6f0070975f6..f3a9a040bacc 100755
--- a/tools/perf/tests/shell/test_intel_pt.sh
+++ b/tools/perf/tests/shell/test_intel_pt.sh
@@ -644,6 +644,33 @@ test_pipe()
return 0
}
+test_pause_resume()
+{
+ echo "--- Test with pause / resume ---"
+ if ! perf_record_no_decode -o "${perfdatafile}" -e intel_pt/aux-action=start-paused/u uname ; then
+ echo "SKIP: pause / resume is not supported"
+ return 2
+ fi
+ if ! perf_record_no_bpf -o "${perfdatafile}" \
+ -e intel_pt/aux-action=start-paused/u \
+ -e instructions/period=50000,aux-action=resume,name=Resume/u \
+ -e instructions/period=100000,aux-action=pause,name=Pause/u uname ; then
+ echo "perf record with pause / resume failed"
+ return 1
+ fi
+ if ! perf script -i "${perfdatafile}" --itrace=b -Fperiod,event | \
+ awk 'BEGIN {paused=1;branches=0}
+ /Resume/ {paused=0}
+ /branches/ {if (paused) exit 1;branches=1}
+ /Pause/ {paused=1}
+ END {if (!branches) exit 1}' ; then
+ echo "perf record with pause / resume failed"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
count_result()
{
if [ "$1" -eq 2 ] ; then
@@ -672,6 +699,7 @@ test_power_event || ret=$? ; count_result $ret ; ret=0
test_no_tnt || ret=$? ; count_result $ret ; ret=0
test_event_trace || ret=$? ; count_result $ret ; ret=0
test_pipe || ret=$? ; count_result $ret ; ret=0
+test_pause_resume || ret=$? ; count_result $ret ; ret=0
cleanup
diff --git a/tools/perf/tests/shell/test_stat_intel_tpebs.sh b/tools/perf/tests/shell/test_stat_intel_tpebs.sh
index f95fc64bf0a7..a330ecdb7ba5 100755
--- a/tools/perf/tests/shell/test_stat_intel_tpebs.sh
+++ b/tools/perf/tests/shell/test_stat_intel_tpebs.sh
@@ -3,20 +3,83 @@
# SPDX-License-Identifier: GPL-2.0
set -e
-grep -q GenuineIntel /proc/cpuinfo || { echo Skipping non-Intel; exit 2; }
-# Use this event for testing because it should exist in all platforms
-event=cache-misses:R
+ParanoidAndNotRoot() {
+ [ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ]
+}
-# Hybrid platforms output like "cpu_atom/cache-misses/R", rather than as above
-alt_name=/cache-misses/R
+if ! grep -q GenuineIntel /proc/cpuinfo
+then
+ echo "Skipping non-Intel"
+ exit 2
+fi
-# Without this cmd option, default value or zero is returned
-#echo "Testing without --record-tpebs"
-#result=$(perf stat -e "$event" true 2>&1)
-#[[ "$result" =~ $event || "$result" =~ $alt_name ]] || exit 1
+if ParanoidAndNotRoot 0
+then
+ echo "Skipping paranoid >0 and not root"
+ exit 2
+fi
-# In platforms that do not support TPEBS, it should execute without error.
-echo "Testing with --record-tpebs"
-result=$(perf stat -e "$event" --record-tpebs -a sleep 0.01 2>&1)
-[[ "$result" =~ "perf record" && "$result" =~ $event || "$result" =~ $alt_name ]] || exit 1
+stat_output=$(mktemp /tmp/__perf_stat_tpebs_output.XXXXX)
+
+cleanup() {
+ rm -rf "${stat_output}"
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cat "${stat_output}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+# Event to be used in tests
+event=cache-misses
+
+if ! perf record -e "${event}:p" -a -o /dev/null sleep 0.01 > "${stat_output}" 2>&1
+then
+ echo "Missing ${event} support"
+ cleanup
+ exit 2
+fi
+
+test_with_record_tpebs() {
+ echo "Testing with --record-tpebs"
+ if ! perf stat -e "${event}:R" --record-tpebs -a sleep 0.01 > "${stat_output}" 2>&1
+ then
+ echo "Testing with --record-tpebs [Failed perf stat]"
+ cat "${stat_output}"
+ exit 1
+ fi
+
+ # Expected output:
+ # $ perf stat --record-tpebs -e cache-misses:R -a sleep 0.01
+ # Events enabled
+ # [ perf record: Woken up 2 times to write data ]
+ # [ perf record: Captured and wrote 0.056 MB - ]
+ #
+ # Performance counter stats for 'system wide':
+ #
+ # 0 cache-misses:R
+ #
+ # 0.013963299 seconds time elapsed
+ if ! grep "perf record" "${stat_output}"
+ then
+ echo "Testing with --record-tpebs [Failed missing perf record]"
+ cat "${stat_output}"
+ exit 1
+ fi
+ if ! grep "${event}:R" "${stat_output}" && ! grep "/${event}/R" "${stat_output}"
+ then
+ echo "Testing with --record-tpebs [Failed missing event name]"
+ cat "${stat_output}"
+ exit 1
+ fi
+ echo "Testing with --record-tpebs [Success]"
+}
+
+test_with_record_tpebs
+cleanup
+exit 0
diff --git a/tools/perf/tests/shell/test_task_analyzer.sh b/tools/perf/tests/shell/test_task_analyzer.sh
index 7d76fc63d995..e194fcf61df3 100755
--- a/tools/perf/tests/shell/test_task_analyzer.sh
+++ b/tools/perf/tests/shell/test_task_analyzer.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# perf script task-analyzer tests
+# perf script task-analyzer tests (exclusive)
# SPDX-License-Identifier: GPL-2.0
tmpdir=$(mktemp -d /tmp/perf-script-task-analyzer-XXXXX)
diff --git a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
index 33387c329f92..7adf9755d6de 100755
--- a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
+++ b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
@@ -4,12 +4,11 @@
set -e
-# Skip if there's no probe command.
-if ! perf | grep probe
-then
- echo "Skip: probe command isn't present"
- exit 2
-fi
+# shellcheck source=lib/probe.sh
+. "$(dirname $0)"/lib/probe.sh
+
+skip_if_no_perf_probe || exit 2
+[ "$(id -u)" == 0 ] || exit 2
# skip if there's no gcc
if ! [ -x "$(command -v gcc)" ]; then
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 708a13f00635..60fccb62c540 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -15,6 +15,7 @@
skip_if_no_perf_probe || exit 2
skip_if_no_perf_trace || exit 2
+[ "$(id -u)" = 0 ] || exit 2
. "$(dirname $0)"/lib/probe_vfs_getname.sh
diff --git a/tools/perf/tests/shell/trace_btf_enum.sh b/tools/perf/tests/shell/trace_btf_enum.sh
index 8d1e6bbeac90..f0b49f7fb57d 100755
--- a/tools/perf/tests/shell/trace_btf_enum.sh
+++ b/tools/perf/tests/shell/trace_btf_enum.sh
@@ -6,13 +6,14 @@ err=0
set -e
syscall="landlock_add_rule"
-non_syscall="timer:hrtimer_init,timer:hrtimer_start"
+non_syscall="timer:hrtimer_setup,timer:hrtimer_start"
TESTPROG="perf test -w landlock"
# shellcheck source=lib/probe.sh
. "$(dirname $0)"/lib/probe.sh
skip_if_no_perf_trace || exit 2
+[ "$(id -u)" = 0 ] || exit 2
check_vmlinux() {
echo "Checking if vmlinux exists"
diff --git a/tools/perf/tests/shell/trace_btf_general.sh b/tools/perf/tests/shell/trace_btf_general.sh
new file mode 100755
index 000000000000..a25d8744695e
--- /dev/null
+++ b/tools/perf/tests/shell/trace_btf_general.sh
@@ -0,0 +1,95 @@
+#!/bin/bash
+# perf trace BTF general tests
+# SPDX-License-Identifier: GPL-2.0
+
+err=0
+set -e
+
+# shellcheck source=lib/probe.sh
+. "$(dirname $0)"/lib/probe.sh
+
+file1=$(mktemp /tmp/file1_XXXX)
+file2=$(echo $file1 | sed 's/file1/file2/g')
+
+buffer="buffer content"
+perf_config_tmp=$(mktemp /tmp/.perfconfig_XXXXX)
+
+trap cleanup EXIT TERM INT HUP
+
+check_vmlinux() {
+ echo "Checking if vmlinux BTF exists"
+ if [ ! -f /sys/kernel/btf/vmlinux ]
+ then
+ echo "Skipped due to missing vmlinux BTF"
+ return 2
+ fi
+ return 0
+}
+
+trace_test_string() {
+ echo "Testing perf trace's string augmentation"
+ if ! perf trace -e renameat* --max-events=1 -- mv ${file1} ${file2} 2>&1 | \
+ grep -q -E "^mv/[0-9]+ renameat(2)?\(.*, \"${file1}\", .*, \"${file2}\", .*\) += +[0-9]+$"
+ then
+ echo "String augmentation test failed"
+ err=1
+ fi
+}
+
+trace_test_buffer() {
+ echo "Testing perf trace's buffer augmentation"
+ # echo will insert a newline (\10) at the end of the buffer
+ if ! perf trace -e write --max-events=1 -- echo "${buffer}" 2>&1 | \
+ grep -q -E "^echo/[0-9]+ write\([0-9]+, ${buffer}.*, [0-9]+\) += +[0-9]+$"
+ then
+ echo "Buffer augmentation test failed"
+ err=1
+ fi
+}
+
+trace_test_struct_btf() {
+ echo "Testing perf trace's struct augmentation"
+ if ! perf trace -e clock_nanosleep --force-btf --max-events=1 -- sleep 1 2>&1 | \
+ grep -q -E "^sleep/[0-9]+ clock_nanosleep\(0, 0, \{1,\}, 0x[0-9a-f]+\) += +[0-9]+$"
+ then
+ echo "BTF struct augmentation test failed"
+ err=1
+ fi
+}
+
+cleanup() {
+ rm -rf ${file1} ${file2} ${perf_config_tmp}
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+
+# don't overwrite user's perf config
+trace_config() {
+ export PERF_CONFIG=${perf_config_tmp}
+ perf config trace.show_arg_names=false trace.show_duration=false \
+ trace.show_timestamp=false trace.args_alignment=0
+}
+
+skip_if_no_perf_trace || exit 2
+check_vmlinux || exit 2
+[ "$(id -u)" = 0 ] || exit 2
+
+trace_config
+
+trace_test_string
+
+if [ $err = 0 ]; then
+ trace_test_buffer
+fi
+
+if [ $err = 0 ]; then
+ trace_test_struct_btf
+fi
+
+cleanup
+
+exit $err
diff --git a/tools/perf/tests/shell/trace_exit_race.sh b/tools/perf/tests/shell/trace_exit_race.sh
index fbb0adc33a88..1e247693e756 100755
--- a/tools/perf/tests/shell/trace_exit_race.sh
+++ b/tools/perf/tests/shell/trace_exit_race.sh
@@ -10,6 +10,7 @@
. "$(dirname $0)"/lib/probe.sh
skip_if_no_perf_trace || exit 2
+[ "$(id -u)" = 0 ] || exit 2
if [ "$1" = "-v" ]; then
verbose="1"
diff --git a/tools/perf/tests/shell/trace_record_replay.sh b/tools/perf/tests/shell/trace_record_replay.sh
new file mode 100755
index 000000000000..6b4ed863c1ef
--- /dev/null
+++ b/tools/perf/tests/shell/trace_record_replay.sh
@@ -0,0 +1,21 @@
+#!/bin/sh
+# perf trace record and replay
+# SPDX-License-Identifier: GPL-2.0
+
+# Check that perf trace works with record and replay
+
+# shellcheck source=lib/probe.sh
+. "$(dirname $0)"/lib/probe.sh
+
+skip_if_no_perf_trace || exit 2
+[ "$(id -u)" = 0 ] || exit 2
+
+file=$(mktemp /tmp/temporary_file.XXXXX)
+
+perf trace record -o ${file} sleep 1 || exit 1
+if ! perf trace -i ${file} 2>&1 | grep nanosleep; then
+ echo "Failed: cannot find *nanosleep syscall"
+ exit 1
+fi
+
+rm -f ${file}