diff options
Diffstat (limited to 'tools/perf/tests/shell')
-rw-r--r-- | tools/perf/tests/shell/lib/perf_json_output_lint.py | 96 | ||||
-rwxr-xr-x | tools/perf/tests/shell/record_offcpu.sh | 57 | ||||
-rwxr-xr-x | tools/perf/tests/shell/stat+csv_output.sh | 7 | ||||
-rwxr-xr-x | tools/perf/tests/shell/stat+json_output.sh | 147 | ||||
-rwxr-xr-x | tools/perf/tests/shell/stat.sh | 19 | ||||
-rwxr-xr-x | tools/perf/tests/shell/stat_all_metrics.sh | 47 | ||||
-rwxr-xr-x | tools/perf/tests/shell/test_arm_spe.sh | 30 | ||||
-rwxr-xr-x | tools/perf/tests/shell/test_brstack.sh | 114 |
8 files changed, 488 insertions, 29 deletions
diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py new file mode 100644 index 000000000000..d90f8d102eb9 --- /dev/null +++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py @@ -0,0 +1,96 @@ +#!/usr/bin/python +# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +# Basic sanity check of perf JSON output as specified in the man page. + +import argparse +import sys +import json + +ap = argparse.ArgumentParser() +ap.add_argument('--no-args', action='store_true') +ap.add_argument('--interval', action='store_true') +ap.add_argument('--system-wide-no-aggr', action='store_true') +ap.add_argument('--system-wide', action='store_true') +ap.add_argument('--event', action='store_true') +ap.add_argument('--per-core', action='store_true') +ap.add_argument('--per-thread', action='store_true') +ap.add_argument('--per-die', action='store_true') +ap.add_argument('--per-node', action='store_true') +ap.add_argument('--per-socket', action='store_true') +args = ap.parse_args() + +Lines = sys.stdin.readlines() + +def isfloat(num): + try: + float(num) + return True + except ValueError: + return False + + +def isint(num): + try: + int(num) + return True + except ValueError: + return False + +def is_counter_value(num): + return isfloat(num) or num == '<not counted>' or num == '<not supported>' + +def check_json_output(expected_items): + if expected_items != -1: + for line in Lines: + if 'failed' not in line: + count = 0 + count = line.count(',') + if count != expected_items and count >= 1 and count <= 3 and 'metric-value' in line: + # Events that generate >1 metric may have isolated metric + # values and possibly other prefixes like interval, core and + # aggregate-number. + continue + if count != expected_items: + raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}' + f' in \'{line}\'') + checks = { + 'aggregate-number': lambda x: isfloat(x), + 'core': lambda x: True, + 'counter-value': lambda x: is_counter_value(x), + 'cgroup': lambda x: True, + 'cpu': lambda x: isint(x), + 'die': lambda x: True, + 'event': lambda x: True, + 'event-runtime': lambda x: isfloat(x), + 'interval': lambda x: isfloat(x), + 'metric-unit': lambda x: True, + 'metric-value': lambda x: isfloat(x), + 'node': lambda x: True, + 'pcnt-running': lambda x: isfloat(x), + 'socket': lambda x: True, + 'thread': lambda x: True, + 'unit': lambda x: True, + } + input = '[\n' + ','.join(Lines) + '\n]' + for item in json.loads(input): + for key, value in item.items(): + if key not in checks: + raise RuntimeError(f'Unexpected key: key={key} value={value}') + if not checks[key](value): + raise RuntimeError(f'Check failed for: key={key} value={value}') + + +try: + if args.no_args or args.system_wide or args.event: + expected_items = 6 + elif args.interval or args.per_thread or args.system_wide_no_aggr: + expected_items = 7 + elif args.per_core or args.per_socket or args.per_node or args.per_die: + expected_items = 8 + else: + # If no option is specified, don't check the number of items. + expected_items = -1 + check_json_output(expected_items) +except: + print('Test failed for input:\n' + '\n'.join(Lines)) + raise diff --git a/tools/perf/tests/shell/record_offcpu.sh b/tools/perf/tests/shell/record_offcpu.sh index 96e0739f7478..d2eba583a2ac 100755 --- a/tools/perf/tests/shell/record_offcpu.sh +++ b/tools/perf/tests/shell/record_offcpu.sh @@ -19,20 +19,26 @@ trap_cleanup() { } trap trap_cleanup exit term int -test_offcpu() { - echo "Basic off-cpu test" +test_offcpu_priv() { + echo "Checking off-cpu privilege" + if [ `id -u` != 0 ] then - echo "Basic off-cpu test [Skipped permission]" + echo "off-cpu test [Skipped permission]" err=2 return fi - if perf record --off-cpu -o ${perfdata} --quiet true 2>&1 | grep BUILD_BPF_SKEL + if perf record --off-cpu -o /dev/null --quiet true 2>&1 | grep BUILD_BPF_SKEL then - echo "Basic off-cpu test [Skipped missing BPF support]" + echo "off-cpu test [Skipped missing BPF support]" err=2 return fi +} + +test_offcpu_basic() { + echo "Basic off-cpu test" + if ! perf record --off-cpu -e dummy -o ${perfdata} sleep 1 2> /dev/null then echo "Basic off-cpu test [Failed record]" @@ -41,7 +47,7 @@ test_offcpu() { fi if ! perf evlist -i ${perfdata} | grep -q "offcpu-time" then - echo "Basic off-cpu test [Failed record]" + echo "Basic off-cpu test [Failed no event]" err=1 return fi @@ -54,7 +60,44 @@ test_offcpu() { echo "Basic off-cpu test [Success]" } -test_offcpu +test_offcpu_child() { + echo "Child task off-cpu test" + + # perf bench sched messaging creates 400 processes + if ! perf record --off-cpu -e dummy -o ${perfdata} -- \ + perf bench sched messaging -g 10 > /dev/null 2&>1 + then + echo "Child task off-cpu test [Failed record]" + err=1 + return + fi + if ! perf evlist -i ${perfdata} | grep -q "offcpu-time" + then + echo "Child task off-cpu test [Failed no event]" + err=1 + return + fi + # each process waits for read and write, so it should be more than 800 events + if ! perf report -i ${perfdata} -s comm -q -n -t ';' --percent-limit=90 | \ + awk -F ";" '{ if (NF > 3 && int($3) < 800) exit 1; }' + then + echo "Child task off-cpu test [Failed invalid output]" + err=1 + return + fi + echo "Child task off-cpu test [Success]" +} + + +test_offcpu_priv + +if [ $err = 0 ]; then + test_offcpu_basic +fi + +if [ $err = 0 ]; then + test_offcpu_child +fi cleanup exit $err diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh index 38c26f3ef4c1..eb5196f58190 100755 --- a/tools/perf/tests/shell/stat+csv_output.sh +++ b/tools/perf/tests/shell/stat+csv_output.sh @@ -8,7 +8,8 @@ set -e function commachecker() { - local -i cnt=0 exp=0 + local -i cnt=0 + local exp=0 case "$1" in "--no-args") exp=6 @@ -17,7 +18,7 @@ function commachecker() ;; "--interval") exp=7 ;; "--per-thread") exp=7 ;; "--system-wide-no-aggr") exp=7 - [ $(uname -m) = "s390x" ] && exp=6 + [ $(uname -m) = "s390x" ] && exp='^[6-7]$' ;; "--per-core") exp=8 ;; "--per-socket") exp=8 ;; "--per-node") exp=8 @@ -34,7 +35,7 @@ function commachecker() x=$(echo $line | tr -d -c ',') cnt="${#x}" # echo $line $cnt - [ "$cnt" -ne "$exp" ] && { + [[ ! "$cnt" =~ $exp ]] && { echo "wrong number of fields. expected $exp in $line" 1>&2 exit 1; } diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh new file mode 100755 index 000000000000..ea8714a36051 --- /dev/null +++ b/tools/perf/tests/shell/stat+json_output.sh @@ -0,0 +1,147 @@ +#!/bin/bash +# perf stat JSON output linter +# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +# Checks various perf stat JSON output commands for the +# correct number of fields. + +set -e + +pythonchecker=$(dirname $0)/lib/perf_json_output_lint.py +if [ "x$PYTHON" == "x" ] +then + if which python3 > /dev/null + then + PYTHON=python3 + elif which python > /dev/null + then + PYTHON=python + else + echo Skipping test, python not detected please set environment variable PYTHON. + exit 2 + fi +fi + +# Return true if perf_event_paranoid is > $1 and not running as root. +function ParanoidAndNotRoot() +{ + [ $(id -u) != 0 ] && [ $(cat /proc/sys/kernel/perf_event_paranoid) -gt $1 ] +} + +check_no_args() +{ + echo -n "Checking json output: no args " + perf stat -j true 2>&1 | $PYTHON $pythonchecker --no-args + echo "[Success]" +} + +check_system_wide() +{ + echo -n "Checking json output: system wide " + if ParanoidAndNotRoot 0 + then + echo "[Skip] paranoia and not root" + return + fi + perf stat -j -a true 2>&1 | $PYTHON $pythonchecker --system-wide + echo "[Success]" +} + +check_system_wide_no_aggr() +{ + echo -n "Checking json output: system wide " + if ParanoidAndNotRoot 0 + then + echo "[Skip] paranoia and not root" + return + fi + echo -n "Checking json output: system wide no aggregation " + perf stat -j -A -a --no-merge true 2>&1 | $PYTHON $pythonchecker --system-wide-no-aggr + echo "[Success]" +} + +check_interval() +{ + echo -n "Checking json output: interval " + perf stat -j -I 1000 true 2>&1 | $PYTHON $pythonchecker --interval + echo "[Success]" +} + + +check_event() +{ + echo -n "Checking json output: event " + perf stat -j -e cpu-clock true 2>&1 | $PYTHON $pythonchecker --event + echo "[Success]" +} + +check_per_core() +{ + echo -n "Checking json output: per core " + if ParanoidAndNotRoot 0 + then + echo "[Skip] paranoia and not root" + return + fi + perf stat -j --per-core -a true 2>&1 | $PYTHON $pythonchecker --per-core + echo "[Success]" +} + +check_per_thread() +{ + echo -n "Checking json output: per thread " + if ParanoidAndNotRoot 0 + then + echo "[Skip] paranoia and not root" + return + fi + perf stat -j --per-thread -a true 2>&1 | $PYTHON $pythonchecker --per-thread + echo "[Success]" +} + +check_per_die() +{ + echo -n "Checking json output: per die " + if ParanoidAndNotRoot 0 + then + echo "[Skip] paranoia and not root" + return + fi + perf stat -j --per-die -a true 2>&1 | $PYTHON $pythonchecker --per-die + echo "[Success]" +} + +check_per_node() +{ + echo -n "Checking json output: per node " + if ParanoidAndNotRoot 0 + then + echo "[Skip] paranoia and not root" + return + fi + perf stat -j --per-node -a true 2>&1 | $PYTHON $pythonchecker --per-node + echo "[Success]" +} + +check_per_socket() +{ + echo -n "Checking json output: per socket " + if ParanoidAndNotRoot 0 + then + echo "[Skip] paranoia and not root" + return + fi + perf stat -j --per-socket -a true 2>&1 | $PYTHON $pythonchecker --per-socket + echo "[Success]" +} + +check_no_args +check_system_wide +check_system_wide_no_aggr +check_interval +check_event +check_per_core +check_per_thread +check_per_die +check_per_node +check_per_socket +exit 0 diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh index 9313ef2739e0..26a51b48aee4 100755 --- a/tools/perf/tests/shell/stat.sh +++ b/tools/perf/tests/shell/stat.sh @@ -28,6 +28,24 @@ test_stat_record_report() { echo "stat record and report test [Success]" } +test_stat_repeat_weak_groups() { + echo "stat repeat weak groups test" + if ! perf stat -e '{cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles}' \ + true 2>&1 | grep -q 'seconds time elapsed' + then + echo "stat repeat weak groups test [Skipped event parsing failed]" + return + fi + if ! perf stat -r2 -e '{cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles}:W' \ + true > /dev/null 2>&1 + then + echo "stat repeat weak groups test [Failed]" + err=1 + return + fi + echo "stat repeat weak groups test [Success]" +} + test_topdown_groups() { # Topdown events must be grouped with the slots event first. Test that # parse-events reorders this. @@ -75,6 +93,7 @@ test_topdown_weak_groups() { test_default_stat test_stat_record_report +test_stat_repeat_weak_groups test_topdown_groups test_topdown_weak_groups exit $err diff --git a/tools/perf/tests/shell/stat_all_metrics.sh b/tools/perf/tests/shell/stat_all_metrics.sh index e7c59e5a7a98..6e79349e42be 100755 --- a/tools/perf/tests/shell/stat_all_metrics.sh +++ b/tools/perf/tests/shell/stat_all_metrics.sh @@ -1,26 +1,41 @@ -#!/bin/sh +#!/bin/bash # perf all metrics test # SPDX-License-Identifier: GPL-2.0 -set -e - err=0 for m in $(perf list --raw-dump metrics); do echo "Testing $m" result=$(perf stat -M "$m" true 2>&1) - if [[ ! "$result" =~ "$m" ]] && [[ ! "$result" =~ "<not supported>" ]]; then - # We failed to see the metric and the events are support. Possibly the - # workload was too small so retry with something longer. - result=$(perf stat -M "$m" perf bench internals synthesize 2>&1) - if [[ ! "$result" =~ "$m" ]]; then - echo "Metric '$m' not printed in:" - echo "$result" - if [[ "$result" =~ "FP_ARITH" && "$err" != "1" ]]; then - echo "Skip, not fail, for FP issues" - err=2 - else - err=1 - fi + if [[ "$result" =~ "${m:0:50}" ]] || [[ "$result" =~ "<not supported>" ]] + then + continue + fi + # Failed so try system wide. + result=$(perf stat -M "$m" -a true 2>&1) + if [[ "$result" =~ "${m:0:50}" ]] + then + continue + fi + # Failed again, possibly the workload was too small so retry with something + # longer. + result=$(perf stat -M "$m" perf bench internals synthesize 2>&1) + if [[ "$result" =~ "${m:0:50}" ]] + then + continue + fi + echo "Metric '$m' not printed in:" + echo "$result" + if [[ "$err" != "1" ]] + then + err=2 + if [[ "$result" =~ "FP_ARITH" || "$result" =~ "AMX" ]] + then + echo "Skip, not fail, for FP issues" + elif [[ "$result" =~ "PMM" ]] + then + echo "Skip, not fail, for Optane memory issues" + else + err=1 fi fi done diff --git a/tools/perf/tests/shell/test_arm_spe.sh b/tools/perf/tests/shell/test_arm_spe.sh index e59044edc406..0d47479adba8 100755 --- a/tools/perf/tests/shell/test_arm_spe.sh +++ b/tools/perf/tests/shell/test_arm_spe.sh @@ -23,17 +23,20 @@ glb_err=0 cleanup_files() { rm -f ${perfdata} + rm -f ${perfdata}.old exit $glb_err } trap cleanup_files exit term int arm_spe_report() { - if [ $2 != 0 ]; then + if [ $2 = 0 ]; then + echo "$1: PASS" + elif [ $2 = 2 ]; then + echo "$1: SKIPPED" + else echo "$1: FAIL" glb_err=$2 - else - echo "$1: PASS" fi } @@ -85,5 +88,26 @@ arm_spe_snapshot_test() { arm_spe_report "SPE snapshot testing" $err } +arm_spe_system_wide_test() { + echo "Recording trace with system-wide mode $perfdata" + + perf record -o - -e dummy -a -B true > /dev/null 2>&1 + if [ $? != 0 ]; then + arm_spe_report "SPE system-wide testing" 2 + return + fi + + perf record -o ${perfdata} -e arm_spe// -a --no-bpf-event \ + -- dd if=/dev/zero of=/dev/null count=100000 > /dev/null 2>&1 + + perf_script_samples dd && + perf_report_samples dd + + err=$? + arm_spe_report "SPE system-wide testing" $err +} + arm_spe_snapshot_test +arm_spe_system_wide_test + exit $glb_err diff --git a/tools/perf/tests/shell/test_brstack.sh b/tools/perf/tests/shell/test_brstack.sh new file mode 100755 index 000000000000..c644f94a6500 --- /dev/null +++ b/tools/perf/tests/shell/test_brstack.sh @@ -0,0 +1,114 @@ +#!/bin/sh +# Check branch stack sampling + +# SPDX-License-Identifier: GPL-2.0 +# German Gomez <german.gomez@arm.com>, 2022 + +# we need a C compiler to build the test programs +# so bail if none is found +if ! [ -x "$(command -v cc)" ]; then + echo "failed: no compiler, install gcc" + exit 2 +fi + +# skip the test if the hardware doesn't support branch stack sampling +perf record -b -o- -B true > /dev/null 2>&1 || exit 2 + +TMPDIR=$(mktemp -d /tmp/__perf_test.program.XXXXX) + +cleanup() { + rm -rf $TMPDIR +} + +trap cleanup exit term int + +gen_test_program() { + # generate test program + cat << EOF > $1 +#define BENCH_RUNS 999999 +int cnt; +void bar(void) { +} /* return */ +void foo(void) { + bar(); /* call */ +} /* return */ +void bench(void) { + void (*foo_ind)(void) = foo; + if ((cnt++) % 3) /* branch (cond) */ + foo(); /* call */ + bar(); /* call */ + foo_ind(); /* call (ind) */ +} +int main(void) +{ + int cnt = 0; + while (1) { + if ((cnt++) > BENCH_RUNS) + break; + bench(); /* call */ + } /* branch (uncond) */ + return 0; +} +EOF +} + +test_user_branches() { + echo "Testing user branch stack sampling" + + gen_test_program "$TEMPDIR/program.c" + cc -fno-inline -g "$TEMPDIR/program.c" -o $TMPDIR/a.out + + perf record -o $TMPDIR/perf.data --branch-filter any,save_type,u -- $TMPDIR/a.out > /dev/null 2>&1 + perf script -i $TMPDIR/perf.data --fields brstacksym | xargs -n1 > $TMPDIR/perf.script + + # example of branch entries: + # foo+0x14/bar+0x40/P/-/-/0/CALL + + set -x + egrep -m1 "^bench\+[^ ]*/foo\+[^ ]*/IND_CALL$" $TMPDIR/perf.script + egrep -m1 "^foo\+[^ ]*/bar\+[^ ]*/CALL$" $TMPDIR/perf.script + egrep -m1 "^bench\+[^ ]*/foo\+[^ ]*/CALL$" $TMPDIR/perf.script + egrep -m1 "^bench\+[^ ]*/bar\+[^ ]*/CALL$" $TMPDIR/perf.script + egrep -m1 "^bar\+[^ ]*/foo\+[^ ]*/RET$" $TMPDIR/perf.script + egrep -m1 "^foo\+[^ ]*/bench\+[^ ]*/RET$" $TMPDIR/perf.script + egrep -m1 "^bench\+[^ ]*/bench\+[^ ]*/COND$" $TMPDIR/perf.script + egrep -m1 "^main\+[^ ]*/main\+[^ ]*/UNCOND$" $TMPDIR/perf.script + set +x + + # some branch types are still not being tested: + # IND COND_CALL COND_RET SYSCALL SYSRET IRQ SERROR NO_TX +} + +# first argument <arg0> is the argument passed to "--branch-stack <arg0>,save_type,u" +# second argument are the expected branch types for the given filter +test_filter() { + local filter=$1 + local expect=$2 + + echo "Testing branch stack filtering permutation ($filter,$expect)" + + gen_test_program "$TEMPDIR/program.c" + cc -fno-inline -g "$TEMPDIR/program.c" -o $TMPDIR/a.out + + perf record -o $TMPDIR/perf.data --branch-filter $filter,save_type,u -- $TMPDIR/a.out > /dev/null 2>&1 + perf script -i $TMPDIR/perf.data --fields brstack | xargs -n1 > $TMPDIR/perf.script + + # fail if we find any branch type that doesn't match any of the expected ones + # also consider UNKNOWN branch types (-) + if egrep -vm1 "^[^ ]*/($expect|-|( *))$" $TMPDIR/perf.script; then + return 1 + fi +} + +set -e + +test_user_branches + +test_filter "any_call" "CALL|IND_CALL|COND_CALL|SYSCALL|IRQ" +test_filter "call" "CALL|SYSCALL" +test_filter "cond" "COND" +test_filter "any_ret" "RET|COND_RET|SYSRET|ERET" + +test_filter "call,cond" "CALL|SYSCALL|COND" +test_filter "any_call,cond" "CALL|IND_CALL|COND_CALL|IRQ|SYSCALL|COND" +test_filter "cond,any_call,any_ret" "COND|CALL|IND_CALL|COND_CALL|SYSCALL|IRQ|RET|COND_RET|SYSRET|ERET" |