diff options
Diffstat (limited to 'tools/perf/tests/shell/lib')
-rw-r--r-- | tools/perf/tests/shell/lib/attr.py | 8 | ||||
-rw-r--r-- | tools/perf/tests/shell/lib/perf_has_symbol.sh | 2 | ||||
-rw-r--r-- | tools/perf/tests/shell/lib/perf_json_output_lint.py | 25 | ||||
-rw-r--r-- | tools/perf/tests/shell/lib/perf_metric_validation.py | 12 | ||||
-rw-r--r-- | tools/perf/tests/shell/lib/probe_vfs_getname.sh | 22 | ||||
-rw-r--r-- | tools/perf/tests/shell/lib/setup_python.sh | 2 | ||||
-rw-r--r-- | tools/perf/tests/shell/lib/stat_output.sh | 13 | ||||
-rw-r--r-- | tools/perf/tests/shell/lib/waiting.sh | 2 |
8 files changed, 60 insertions, 26 deletions
diff --git a/tools/perf/tests/shell/lib/attr.py b/tools/perf/tests/shell/lib/attr.py index 3db9a7d78715..bfccc727d9b2 100644 --- a/tools/perf/tests/shell/lib/attr.py +++ b/tools/perf/tests/shell/lib/attr.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -from __future__ import print_function - +import configparser import os import sys import glob @@ -13,11 +12,6 @@ import re import shutil import subprocess -try: - import configparser -except ImportError: - import ConfigParser as configparser - def data_equal(a, b): # Allow multiple values in assignment separated by '|' a_list = a.split('|') diff --git a/tools/perf/tests/shell/lib/perf_has_symbol.sh b/tools/perf/tests/shell/lib/perf_has_symbol.sh index 561c93b75d77..0b35cce0b13d 100644 --- a/tools/perf/tests/shell/lib/perf_has_symbol.sh +++ b/tools/perf/tests/shell/lib/perf_has_symbol.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # SPDX-License-Identifier: GPL-2.0 perf_has_symbol() diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py index 8ddb85586131..c6750ef06c0f 100644 --- a/tools/perf/tests/shell/lib/perf_json_output_lint.py +++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py @@ -19,6 +19,7 @@ ap.add_argument('--per-cluster', action='store_true') ap.add_argument('--per-die', action='store_true') ap.add_argument('--per-node', action='store_true') ap.add_argument('--per-socket', action='store_true') +ap.add_argument('--metric-only', action='store_true') ap.add_argument('--file', type=argparse.FileType('r'), default=sys.stdin) args = ap.parse_args() @@ -44,7 +45,7 @@ def is_counter_value(num): def check_json_output(expected_items): checks = { - 'aggregate-number': lambda x: isfloat(x), + 'counters': lambda x: isfloat(x), 'core': lambda x: True, 'counter-value': lambda x: is_counter_value(x), 'cgroup': lambda x: True, @@ -64,21 +65,25 @@ def check_json_output(expected_items): 'socket': lambda x: True, 'thread': lambda x: True, 'unit': lambda x: True, + 'insn per cycle': lambda x: isfloat(x), + 'GHz': lambda x: True, # FIXME: it seems unintended for --metric-only } input = '[\n' + ','.join(Lines) + '\n]' for item in json.loads(input): if expected_items != -1: count = len(item) - if count != expected_items and count >= 1 and count <= 7 and 'metric-value' in item: + if count not in expected_items and count >= 1 and count <= 7 and 'metric-value' in item: # Events that generate >1 metric may have isolated metric # values and possibly other prefixes like interval, core, - # aggregate-number, or event-runtime/pcnt-running from multiplexing. + # counters, or event-runtime/pcnt-running from multiplexing. pass - elif count != expected_items and count >= 1 and count <= 5 and 'metricgroup' in item: + elif count not in expected_items and count >= 1 and count <= 5 and 'metricgroup' in item: pass - elif count == expected_items + 1 and 'metric-threshold' in item: + elif count - 1 in expected_items and 'metric-threshold' in item: pass - elif count != expected_items: + elif count in expected_items and 'insn per cycle' in item: + pass + elif count not in expected_items: raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}' f' in \'{item}\'') for key, value in item.items(): @@ -90,11 +95,13 @@ def check_json_output(expected_items): try: if args.no_args or args.system_wide or args.event: - expected_items = 7 + expected_items = [5, 7] elif args.interval or args.per_thread or args.system_wide_no_aggr: - expected_items = 8 + expected_items = [6, 8] elif args.per_core or args.per_socket or args.per_node or args.per_die or args.per_cluster or args.per_cache: - expected_items = 9 + expected_items = [7, 9] + elif args.metric_only: + expected_items = [1, 2] else: # If no option is specified, don't check the number of items. expected_items = -1 diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/perf/tests/shell/lib/perf_metric_validation.py index 0b94216c9c46..dea8ef1977bf 100644 --- a/tools/perf/tests/shell/lib/perf_metric_validation.py +++ b/tools/perf/tests/shell/lib/perf_metric_validation.py @@ -35,7 +35,8 @@ class TestError: class Validator: - def __init__(self, rulefname, reportfname='', t=5, debug=False, datafname='', fullrulefname='', workload='true', metrics=''): + def __init__(self, rulefname, reportfname='', t=5, debug=False, datafname='', fullrulefname='', + workload='true', metrics='', cputype='cpu'): self.rulefname = rulefname self.reportfname = reportfname self.rules = None @@ -43,6 +44,7 @@ class Validator: self.metrics = self.__set_metrics(metrics) self.skiplist = set() self.tolerance = t + self.cputype = cputype self.workloads = [x for x in workload.split(",") if x] self.wlidx = 0 # idx of current workloads @@ -377,7 +379,7 @@ class Validator: def _run_perf(self, metric, workload: str): tool = 'perf' - command = [tool, 'stat', '-j', '-M', f"{metric}", "-a"] + command = [tool, 'stat', '--cputype', self.cputype, '-j', '-M', f"{metric}", "-a"] wl = workload.split() command.extend(wl) print(" ".join(command)) @@ -443,6 +445,8 @@ class Validator: if 'MetricName' not in m: print("Warning: no metric name") continue + if 'Unit' in m and m['Unit'] != self.cputype: + continue name = m['MetricName'].lower() self.metrics.add(name) if 'ScaleUnit' in m and (m['ScaleUnit'] == '1%' or m['ScaleUnit'] == '100%'): @@ -578,6 +582,8 @@ def main() -> None: parser.add_argument( "-wl", help="Workload to run while data collection", default="true") parser.add_argument("-m", help="Metric list to validate", default="") + parser.add_argument("-cputype", help="Only test metrics for the given CPU/PMU type", + default="cpu") args = parser.parse_args() outpath = Path(args.output_dir) reportf = Path.joinpath(outpath, 'perf_report.json') @@ -586,7 +592,7 @@ def main() -> None: validator = Validator(args.rule, reportf, debug=args.debug, datafname=datafile, fullrulefname=fullrule, workload=args.wl, - metrics=args.m) + metrics=args.m, cputype=args.cputype) ret = validator.test() return ret diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh index 5c33ec7a5a63..88cd0e26d5f6 100644 --- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh +++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # Arnaldo Carvalho de Melo <acme@kernel.org>, 2017 perf probe -l 2>&1 | grep -q probe:vfs_getname @@ -13,14 +13,28 @@ cleanup_probe_vfs_getname() { add_probe_vfs_getname() { add_probe_verbose=$1 if [ $had_vfs_getname -eq 1 ] ; then - result_filename_re="[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*" - line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_filename_re" | sed -r "s/$result_filename_re/\1/") + result_initname_re="[[:space:]]+([[:digit:]]+)[[:space:]]+initname.*" + line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_initname_re" | sed -r "s/$result_initname_re/\1/") + + # Search the old regular expressions so that this will + # pass on older kernels as well. + if [ -z "$line" ] ; then + result_filename_re="[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*" + line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_filename_re" | sed -r "s/$result_filename_re/\1/") + fi + if [ -z "$line" ] ; then result_aname_re="[[:space:]]+([[:digit:]]+)[[:space:]]+result->aname = NULL;" line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_aname_re" | sed -r "s/$result_aname_re/\1/") fi + + if [ -z "$line" ] ; then + echo "Could not find probeable line" + return 2 + fi + perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \ - perf probe $add_probe_verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring" + perf probe $add_probe_verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring" || return 1 fi } diff --git a/tools/perf/tests/shell/lib/setup_python.sh b/tools/perf/tests/shell/lib/setup_python.sh index c2fce1793538..a58e5536f2ed 100644 --- a/tools/perf/tests/shell/lib/setup_python.sh +++ b/tools/perf/tests/shell/lib/setup_python.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # SPDX-License-Identifier: GPL-2.0 if [ "x$PYTHON" = "x" ] diff --git a/tools/perf/tests/shell/lib/stat_output.sh b/tools/perf/tests/shell/lib/stat_output.sh index 9a176ceae4a3..c2ec7881ec1d 100644 --- a/tools/perf/tests/shell/lib/stat_output.sh +++ b/tools/perf/tests/shell/lib/stat_output.sh @@ -148,6 +148,19 @@ check_per_socket() echo "[Success]" } +check_metric_only() +{ + echo -n "Checking $1 output: metric only " + if [ "$(uname -m)" = "s390x" ] && ! grep '^facilities' /proc/cpuinfo | grep -qw 67 + then + echo "[Skip] CPU-measurement counter facility not installed" + return + fi + perf stat --metric-only $2 -e instructions,cycles true + commachecker --metric-only + echo "[Success]" +} + # The perf stat options for per-socket, per-core, per-die # and -A ( no_aggr mode ) uses the info fetched from this # directory: "/sys/devices/system/cpu/cpu*/topology". For diff --git a/tools/perf/tests/shell/lib/waiting.sh b/tools/perf/tests/shell/lib/waiting.sh index bdd5a7c71591..3a152892e077 100644 --- a/tools/perf/tests/shell/lib/waiting.sh +++ b/tools/perf/tests/shell/lib/waiting.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # SPDX-License-Identifier: GPL-2.0 tenths=date\ +%s%1N |