From b6d5799b0b5866dc63be7f032473dc536f865b4d Mon Sep 17 00:00:00 2001 From: David Gow Date: Wed, 26 May 2021 14:24:04 -0700 Subject: kunit: Add 'kunit_shutdown' option Add a new kernel command-line option, 'kunit_shutdown', which allows the user to specify that the kernel poweroff, halt, or reboot after completing all KUnit tests; this is very handy for running KUnit tests on UML or a VM so that the UML/VM process exits cleanly immediately after running all tests without needing a special initramfs. Signed-off-by: David Gow Signed-off-by: Brendan Higgins Reviewed-by: Stephen Boyd Tested-By: Daniel Latypov Reviewed-by: Daniel Latypov Signed-off-by: Shuah Khan --- tools/testing/kunit/kunit_kernel.py | 2 +- tools/testing/kunit/kunit_parser.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py index 89a7d4024e87..dbbceaee1230 100644 --- a/tools/testing/kunit/kunit_kernel.py +++ b/tools/testing/kunit/kunit_kernel.py @@ -208,7 +208,7 @@ class LinuxSourceTree(object): def run_kernel(self, args=None, build_dir='', filter_glob='', timeout=None) -> Iterator[str]: if not args: args = [] - args.extend(['mem=1G', 'console=tty']) + args.extend(['mem=1G', 'console=tty', 'kunit_shutdown=halt']) if filter_glob: args.append('kunit.filter_glob='+filter_glob) self._ops.linux_bin(args, timeout, build_dir) diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index e8bcc139702e..8d8d4d70b39d 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -49,7 +49,7 @@ class TestStatus(Enum): kunit_start_re = re.compile(r'TAP version [0-9]+$') kunit_end_re = re.compile('(List of all partitions:|' - 'Kernel panic - not syncing: VFS:)') + 'Kernel panic - not syncing: VFS:|reboot: System halted)') def isolate_kunit_output(kernel_output) -> Iterator[str]: started = False -- cgit v1.2.3 From 87c9c16317882dd6dbbc07e349bc3223e14f3244 Mon Sep 17 00:00:00 2001 From: Brendan Higgins Date: Wed, 26 May 2021 14:24:06 -0700 Subject: kunit: tool: add support for QEMU Add basic support to run QEMU via kunit_tool. Add support for i386, x86_64, arm, arm64, and a bunch more. Signed-off-by: Brendan Higgins Tested-by: David Gow Reviewed-by: David Gow Signed-off-by: Shuah Khan --- tools/testing/kunit/kunit.py | 57 +++++++-- tools/testing/kunit/kunit_config.py | 7 +- tools/testing/kunit/kunit_kernel.py | 175 +++++++++++++++++++++++----- tools/testing/kunit/kunit_tool_test.py | 18 ++- tools/testing/kunit/qemu_config.py | 16 +++ tools/testing/kunit/qemu_configs/alpha.py | 10 ++ tools/testing/kunit/qemu_configs/arm.py | 13 +++ tools/testing/kunit/qemu_configs/arm64.py | 12 ++ tools/testing/kunit/qemu_configs/i386.py | 10 ++ tools/testing/kunit/qemu_configs/powerpc.py | 12 ++ tools/testing/kunit/qemu_configs/riscv.py | 31 +++++ tools/testing/kunit/qemu_configs/s390.py | 14 +++ tools/testing/kunit/qemu_configs/sparc.py | 10 ++ tools/testing/kunit/qemu_configs/x86_64.py | 10 ++ 14 files changed, 354 insertions(+), 41 deletions(-) create mode 100644 tools/testing/kunit/qemu_config.py create mode 100644 tools/testing/kunit/qemu_configs/alpha.py create mode 100644 tools/testing/kunit/qemu_configs/arm.py create mode 100644 tools/testing/kunit/qemu_configs/arm64.py create mode 100644 tools/testing/kunit/qemu_configs/i386.py create mode 100644 tools/testing/kunit/qemu_configs/powerpc.py create mode 100644 tools/testing/kunit/qemu_configs/riscv.py create mode 100644 tools/testing/kunit/qemu_configs/s390.py create mode 100644 tools/testing/kunit/qemu_configs/sparc.py create mode 100644 tools/testing/kunit/qemu_configs/x86_64.py (limited to 'tools') diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py index 5da8fb3762f9..be8d8d4a4e08 100755 --- a/tools/testing/kunit/kunit.py +++ b/tools/testing/kunit/kunit.py @@ -70,10 +70,10 @@ def build_tests(linux: kunit_kernel.LinuxSourceTree, kunit_parser.print_with_timestamp('Building KUnit Kernel ...') build_start = time.time() - success = linux.build_um_kernel(request.alltests, - request.jobs, - request.build_dir, - request.make_options) + success = linux.build_kernel(request.alltests, + request.jobs, + request.build_dir, + request.make_options) build_end = time.time() if not success: return KunitResult(KunitStatus.BUILD_FAILURE, @@ -189,6 +189,31 @@ def add_common_opts(parser) -> None: 'will get automatically appended.', metavar='kunitconfig') + parser.add_argument('--arch', + help=('Specifies the architecture to run tests under. ' + 'The architecture specified here must match the ' + 'string passed to the ARCH make param, ' + 'e.g. i386, x86_64, arm, um, etc. Non-UML ' + 'architectures run on QEMU.'), + type=str, default='um', metavar='arch') + + parser.add_argument('--cross_compile', + help=('Sets make\'s CROSS_COMPILE variable; it should ' + 'be set to a toolchain path prefix (the prefix ' + 'of gcc and other tools in your toolchain, for ' + 'example `sparc64-linux-gnu-` if you have the ' + 'sparc toolchain installed on your system, or ' + '`$HOME/toolchains/microblaze/gcc-9.2.0-nolibc/microblaze-linux/bin/microblaze-linux-` ' + 'if you have downloaded the microblaze toolchain ' + 'from the 0-day website to a directory in your ' + 'home directory called `toolchains`).'), + metavar='cross_compile') + + parser.add_argument('--qemu_config', + help=('Takes a path to a path to a file containing ' + 'a QemuArchParams object.'), + type=str, metavar='qemu_config') + def add_build_opts(parser) -> None: parser.add_argument('--jobs', help='As in the make command, "Specifies the number of ' @@ -270,7 +295,11 @@ def main(argv, linux=None): os.mkdir(cli_args.build_dir) if not linux: - linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir, kunitconfig_path=cli_args.kunitconfig) + linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir, + kunitconfig_path=cli_args.kunitconfig, + arch=cli_args.arch, + cross_compile=cli_args.cross_compile, + qemu_config_path=cli_args.qemu_config) request = KunitRequest(cli_args.raw_output, cli_args.timeout, @@ -289,7 +318,11 @@ def main(argv, linux=None): os.mkdir(cli_args.build_dir) if not linux: - linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir, kunitconfig_path=cli_args.kunitconfig) + linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir, + kunitconfig_path=cli_args.kunitconfig, + arch=cli_args.arch, + cross_compile=cli_args.cross_compile, + qemu_config_path=cli_args.qemu_config) request = KunitConfigRequest(cli_args.build_dir, cli_args.make_options) @@ -301,7 +334,11 @@ def main(argv, linux=None): sys.exit(1) elif cli_args.subcommand == 'build': if not linux: - linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir, kunitconfig_path=cli_args.kunitconfig) + linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir, + kunitconfig_path=cli_args.kunitconfig, + arch=cli_args.arch, + cross_compile=cli_args.cross_compile, + qemu_config_path=cli_args.qemu_config) request = KunitBuildRequest(cli_args.jobs, cli_args.build_dir, @@ -315,7 +352,11 @@ def main(argv, linux=None): sys.exit(1) elif cli_args.subcommand == 'exec': if not linux: - linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir) + linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir, + kunitconfig_path=cli_args.kunitconfig, + arch=cli_args.arch, + cross_compile=cli_args.cross_compile, + qemu_config_path=cli_args.qemu_config) exec_request = KunitExecRequest(cli_args.timeout, cli_args.build_dir, diff --git a/tools/testing/kunit/kunit_config.py b/tools/testing/kunit/kunit_config.py index 1e2683dcc0e7..c77c7d2ef622 100644 --- a/tools/testing/kunit/kunit_config.py +++ b/tools/testing/kunit/kunit_config.py @@ -52,8 +52,13 @@ class Kconfig(object): return False return True + def merge_in_entries(self, other: 'Kconfig') -> None: + if other.is_subset_of(self): + return + self._entries = list(self.entries().union(other.entries())) + def write_to_file(self, path: str) -> None: - with open(path, 'w') as f: + with open(path, 'a+') as f: for entry in self.entries(): f.write(str(entry) + '\n') diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py index dbbceaee1230..e1951fa60027 100644 --- a/tools/testing/kunit/kunit_kernel.py +++ b/tools/testing/kunit/kunit_kernel.py @@ -6,23 +6,31 @@ # Author: Felix Guo # Author: Brendan Higgins +from __future__ import annotations +import importlib.util import logging import subprocess import os import shutil import signal from typing import Iterator +from typing import Optional from contextlib import ExitStack +from collections import namedtuple + import kunit_config import kunit_parser +import qemu_config KCONFIG_PATH = '.config' KUNITCONFIG_PATH = '.kunitconfig' DEFAULT_KUNITCONFIG_PATH = 'arch/um/configs/kunit_defconfig' BROKEN_ALLCONFIG_PATH = 'tools/testing/kunit/configs/broken_on_uml.config' OUTFILE_PATH = 'test.log' +ABS_TOOL_PATH = os.path.abspath(os.path.dirname(__file__)) +QEMU_CONFIGS_DIR = os.path.join(ABS_TOOL_PATH, 'qemu_configs') def get_file_path(build_dir, default): if build_dir: @@ -40,6 +48,10 @@ class BuildError(Exception): class LinuxSourceTreeOperations(object): """An abstraction over command line operations performed on a source tree.""" + def __init__(self, linux_arch: str, cross_compile: Optional[str]): + self._linux_arch = linux_arch + self._cross_compile = cross_compile + def make_mrproper(self) -> None: try: subprocess.check_output(['make', 'mrproper'], stderr=subprocess.STDOUT) @@ -48,12 +60,21 @@ class LinuxSourceTreeOperations(object): except subprocess.CalledProcessError as e: raise ConfigError(e.output.decode()) + def make_arch_qemuconfig(self, kconfig: kunit_config.Kconfig) -> None: + pass + + def make_allyesconfig(self, build_dir, make_options) -> None: + raise ConfigError('Only the "um" arch is supported for alltests') + def make_olddefconfig(self, build_dir, make_options) -> None: - command = ['make', 'ARCH=um', 'olddefconfig'] + command = ['make', 'ARCH=' + self._linux_arch, 'olddefconfig'] + if self._cross_compile: + command += ['CROSS_COMPILE=' + self._cross_compile] if make_options: command.extend(make_options) if build_dir: command += ['O=' + build_dir] + print('Populating config with:\n$', ' '.join(command)) try: subprocess.check_output(command, stderr=subprocess.STDOUT) except OSError as e: @@ -61,6 +82,79 @@ class LinuxSourceTreeOperations(object): except subprocess.CalledProcessError as e: raise ConfigError(e.output.decode()) + def make(self, jobs, build_dir, make_options) -> None: + command = ['make', 'ARCH=' + self._linux_arch, '--jobs=' + str(jobs)] + if make_options: + command.extend(make_options) + if self._cross_compile: + command += ['CROSS_COMPILE=' + self._cross_compile] + if build_dir: + command += ['O=' + build_dir] + print('Building with:\n$', ' '.join(command)) + try: + proc = subprocess.Popen(command, + stderr=subprocess.PIPE, + stdout=subprocess.DEVNULL) + except OSError as e: + raise BuildError('Could not call execute make: ' + str(e)) + except subprocess.CalledProcessError as e: + raise BuildError(e.output) + _, stderr = proc.communicate() + if proc.returncode != 0: + raise BuildError(stderr.decode()) + if stderr: # likely only due to build warnings + print(stderr.decode()) + + def run(self, params, timeout, build_dir, outfile) -> None: + pass + + +class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations): + + def __init__(self, qemu_arch_params: qemu_config.QemuArchParams, cross_compile: Optional[str]): + super().__init__(linux_arch=qemu_arch_params.linux_arch, + cross_compile=cross_compile) + self._kconfig = qemu_arch_params.kconfig + self._qemu_arch = qemu_arch_params.qemu_arch + self._kernel_path = qemu_arch_params.kernel_path + self._kernel_command_line = qemu_arch_params.kernel_command_line + ' kunit_shutdown=reboot' + self._extra_qemu_params = qemu_arch_params.extra_qemu_params + + def make_arch_qemuconfig(self, base_kunitconfig: kunit_config.Kconfig) -> None: + kconfig = kunit_config.Kconfig() + kconfig.parse_from_string(self._kconfig) + base_kunitconfig.merge_in_entries(kconfig) + + def run(self, params, timeout, build_dir, outfile): + kernel_path = os.path.join(build_dir, self._kernel_path) + qemu_command = ['qemu-system-' + self._qemu_arch, + '-nodefaults', + '-m', '1024', + '-kernel', kernel_path, + '-append', '\'' + ' '.join(params + [self._kernel_command_line]) + '\'', + '-no-reboot', + '-nographic', + '-serial stdio'] + self._extra_qemu_params + print('Running tests with:\n$', ' '.join(qemu_command)) + with open(outfile, 'w') as output: + process = subprocess.Popen(' '.join(qemu_command), + stdin=subprocess.PIPE, + stdout=output, + stderr=subprocess.STDOUT, + text=True, shell=True) + try: + process.wait(timeout=timeout) + except Exception as e: + print(e) + process.terminate() + return process + +class LinuxSourceTreeOperationsUml(LinuxSourceTreeOperations): + """An abstraction over command line operations performed on a source tree.""" + + def __init__(self, cross_compile=None): + super().__init__(linux_arch='um', cross_compile=cross_compile) + def make_allyesconfig(self, build_dir, make_options) -> None: kunit_parser.print_with_timestamp( 'Enabling all CONFIGs for UML...') @@ -83,32 +177,16 @@ class LinuxSourceTreeOperations(object): kunit_parser.print_with_timestamp( 'Starting Kernel with all configs takes a few minutes...') - def make(self, jobs, build_dir, make_options) -> None: - command = ['make', 'ARCH=um', '--jobs=' + str(jobs)] - if make_options: - command.extend(make_options) - if build_dir: - command += ['O=' + build_dir] - try: - proc = subprocess.Popen(command, - stderr=subprocess.PIPE, - stdout=subprocess.DEVNULL) - except OSError as e: - raise BuildError('Could not call make command: ' + str(e)) - _, stderr = proc.communicate() - if proc.returncode != 0: - raise BuildError(stderr.decode()) - if stderr: # likely only due to build warnings - print(stderr.decode()) - - def linux_bin(self, params, timeout, build_dir) -> None: + def run(self, params, timeout, build_dir, outfile): """Runs the Linux UML binary. Must be named 'linux'.""" linux_bin = get_file_path(build_dir, 'linux') outfile = get_outfile_path(build_dir) with open(outfile, 'w') as output: process = subprocess.Popen([linux_bin] + params, + stdin=subprocess.PIPE, stdout=output, - stderr=subprocess.STDOUT) + stderr=subprocess.STDOUT, + text=True) process.wait(timeout) def get_kconfig_path(build_dir) -> str: @@ -120,13 +198,54 @@ def get_kunitconfig_path(build_dir) -> str: def get_outfile_path(build_dir) -> str: return get_file_path(build_dir, OUTFILE_PATH) +def get_source_tree_ops(arch: str, cross_compile: Optional[str]) -> LinuxSourceTreeOperations: + config_path = os.path.join(QEMU_CONFIGS_DIR, arch + '.py') + if arch == 'um': + return LinuxSourceTreeOperationsUml(cross_compile=cross_compile) + elif os.path.isfile(config_path): + return get_source_tree_ops_from_qemu_config(config_path, cross_compile)[1] + else: + raise ConfigError(arch + ' is not a valid arch') + +def get_source_tree_ops_from_qemu_config(config_path: str, + cross_compile: Optional[str]) -> tuple[ + str, LinuxSourceTreeOperations]: + # The module name/path has very little to do with where the actual file + # exists (I learned this through experimentation and could not find it + # anywhere in the Python documentation). + # + # Bascially, we completely ignore the actual file location of the config + # we are loading and just tell Python that the module lives in the + # QEMU_CONFIGS_DIR for import purposes regardless of where it actually + # exists as a file. + module_path = '.' + os.path.join(os.path.basename(QEMU_CONFIGS_DIR), os.path.basename(config_path)) + spec = importlib.util.spec_from_file_location(module_path, config_path) + config = importlib.util.module_from_spec(spec) + # TODO(brendanhiggins@google.com): I looked this up and apparently other + # Python projects have noted that pytype complains that "No attribute + # 'exec_module' on _importlib_modulespec._Loader". Disabling for now. + spec.loader.exec_module(config) # pytype: disable=attribute-error + return config.QEMU_ARCH.linux_arch, LinuxSourceTreeOperationsQemu( + config.QEMU_ARCH, cross_compile=cross_compile) + class LinuxSourceTree(object): """Represents a Linux kernel source tree with KUnit tests.""" - def __init__(self, build_dir: str, load_config=True, kunitconfig_path='') -> None: + def __init__( + self, + build_dir: str, + load_config=True, + kunitconfig_path='', + arch=None, + cross_compile=None, + qemu_config_path=None) -> None: signal.signal(signal.SIGINT, self.signal_handler) - - self._ops = LinuxSourceTreeOperations() + if qemu_config_path: + self._arch, self._ops = get_source_tree_ops_from_qemu_config( + qemu_config_path, cross_compile) + else: + self._arch = 'um' if arch is None else arch + self._ops = get_source_tree_ops(self._arch, cross_compile) if not load_config: return @@ -170,8 +289,9 @@ class LinuxSourceTree(object): kconfig_path = get_kconfig_path(build_dir) if build_dir and not os.path.exists(build_dir): os.mkdir(build_dir) - self._kconfig.write_to_file(kconfig_path) try: + self._ops.make_arch_qemuconfig(self._kconfig) + self._kconfig.write_to_file(kconfig_path) self._ops.make_olddefconfig(build_dir, make_options) except ConfigError as e: logging.error(e) @@ -184,6 +304,7 @@ class LinuxSourceTree(object): if os.path.exists(kconfig_path): existing_kconfig = kunit_config.Kconfig() existing_kconfig.read_from_file(kconfig_path) + self._ops.make_arch_qemuconfig(self._kconfig) if not self._kconfig.is_subset_of(existing_kconfig): print('Regenerating .config ...') os.remove(kconfig_path) @@ -194,7 +315,7 @@ class LinuxSourceTree(object): print('Generating .config ...') return self.build_config(build_dir, make_options) - def build_um_kernel(self, alltests, jobs, build_dir, make_options) -> bool: + def build_kernel(self, alltests, jobs, build_dir, make_options) -> bool: try: if alltests: self._ops.make_allyesconfig(build_dir, make_options) @@ -211,8 +332,8 @@ class LinuxSourceTree(object): args.extend(['mem=1G', 'console=tty', 'kunit_shutdown=halt']) if filter_glob: args.append('kunit.filter_glob='+filter_glob) - self._ops.linux_bin(args, timeout, build_dir) outfile = get_outfile_path(build_dir) + self._ops.run(args, timeout, build_dir, outfile) subprocess.call(['stty', 'sane']) with open(outfile, 'r') as file: for line in file: diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 2e809dd956a7..a3b7f68e1cf9 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -303,7 +303,7 @@ class KUnitMainTest(unittest.TestCase): self.linux_source_mock = mock.Mock() self.linux_source_mock.build_reconfig = mock.Mock(return_value=True) - self.linux_source_mock.build_um_kernel = mock.Mock(return_value=True) + self.linux_source_mock.build_kernel = mock.Mock(return_value=True) self.linux_source_mock.run_kernel = mock.Mock(return_value=all_passed_log) def test_config_passes_args_pass(self): @@ -314,7 +314,7 @@ class KUnitMainTest(unittest.TestCase): def test_build_passes_args_pass(self): kunit.main(['build'], self.linux_source_mock) self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 0) - self.linux_source_mock.build_um_kernel.assert_called_once_with(False, 8, '.kunit', None) + self.linux_source_mock.build_kernel.assert_called_once_with(False, 8, '.kunit', None) self.assertEqual(self.linux_source_mock.run_kernel.call_count, 0) def test_exec_passes_args_pass(self): @@ -396,7 +396,7 @@ class KUnitMainTest(unittest.TestCase): def test_build_builddir(self): build_dir = '.kunit' kunit.main(['build', '--build_dir', build_dir], self.linux_source_mock) - self.linux_source_mock.build_um_kernel.assert_called_once_with(False, 8, build_dir, None) + self.linux_source_mock.build_kernel.assert_called_once_with(False, 8, build_dir, None) def test_exec_builddir(self): build_dir = '.kunit' @@ -410,14 +410,22 @@ class KUnitMainTest(unittest.TestCase): mock_linux_init.return_value = self.linux_source_mock kunit.main(['run', '--kunitconfig=mykunitconfig']) # Just verify that we parsed and initialized it correctly here. - mock_linux_init.assert_called_once_with('.kunit', kunitconfig_path='mykunitconfig') + mock_linux_init.assert_called_once_with('.kunit', + kunitconfig_path='mykunitconfig', + arch='um', + cross_compile=None, + qemu_config_path=None) @mock.patch.object(kunit_kernel, 'LinuxSourceTree') def test_config_kunitconfig(self, mock_linux_init): mock_linux_init.return_value = self.linux_source_mock kunit.main(['config', '--kunitconfig=mykunitconfig']) # Just verify that we parsed and initialized it correctly here. - mock_linux_init.assert_called_once_with('.kunit', kunitconfig_path='mykunitconfig') + mock_linux_init.assert_called_once_with('.kunit', + kunitconfig_path='mykunitconfig', + arch='um', + cross_compile=None, + qemu_config_path=None) if __name__ == '__main__': unittest.main() diff --git a/tools/testing/kunit/qemu_config.py b/tools/testing/kunit/qemu_config.py new file mode 100644 index 000000000000..1672f6184e95 --- /dev/null +++ b/tools/testing/kunit/qemu_config.py @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Collection of configs for building non-UML kernels and running them on QEMU. +# +# Copyright (C) 2021, Google LLC. +# Author: Brendan Higgins + +from collections import namedtuple + + +QemuArchParams = namedtuple('QemuArchParams', ['linux_arch', + 'kconfig', + 'qemu_arch', + 'kernel_path', + 'kernel_command_line', + 'extra_qemu_params']) diff --git a/tools/testing/kunit/qemu_configs/alpha.py b/tools/testing/kunit/qemu_configs/alpha.py new file mode 100644 index 000000000000..5d0c0cff03bd --- /dev/null +++ b/tools/testing/kunit/qemu_configs/alpha.py @@ -0,0 +1,10 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='alpha', + kconfig=''' +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y''', + qemu_arch='alpha', + kernel_path='arch/alpha/boot/vmlinux', + kernel_command_line='console=ttyS0', + extra_qemu_params=['']) diff --git a/tools/testing/kunit/qemu_configs/arm.py b/tools/testing/kunit/qemu_configs/arm.py new file mode 100644 index 000000000000..b9c2a35e0296 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/arm.py @@ -0,0 +1,13 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='arm', + kconfig=''' +CONFIG_ARCH_VIRT=y +CONFIG_SERIAL_AMBA_PL010=y +CONFIG_SERIAL_AMBA_PL010_CONSOLE=y +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y''', + qemu_arch='arm', + kernel_path='arch/arm/boot/zImage', + kernel_command_line='console=ttyAMA0', + extra_qemu_params=['-machine virt']) diff --git a/tools/testing/kunit/qemu_configs/arm64.py b/tools/testing/kunit/qemu_configs/arm64.py new file mode 100644 index 000000000000..517c04459f47 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/arm64.py @@ -0,0 +1,12 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='arm64', + kconfig=''' +CONFIG_SERIAL_AMBA_PL010=y +CONFIG_SERIAL_AMBA_PL010_CONSOLE=y +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y''', + qemu_arch='aarch64', + kernel_path='arch/arm64/boot/Image.gz', + kernel_command_line='console=ttyAMA0', + extra_qemu_params=['-machine virt', '-cpu cortex-a57']) diff --git a/tools/testing/kunit/qemu_configs/i386.py b/tools/testing/kunit/qemu_configs/i386.py new file mode 100644 index 000000000000..aed3ffd3937d --- /dev/null +++ b/tools/testing/kunit/qemu_configs/i386.py @@ -0,0 +1,10 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='i386', + kconfig=''' +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y''', + qemu_arch='x86_64', + kernel_path='arch/x86/boot/bzImage', + kernel_command_line='console=ttyS0', + extra_qemu_params=['']) diff --git a/tools/testing/kunit/qemu_configs/powerpc.py b/tools/testing/kunit/qemu_configs/powerpc.py new file mode 100644 index 000000000000..35e9de24f0db --- /dev/null +++ b/tools/testing/kunit/qemu_configs/powerpc.py @@ -0,0 +1,12 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='powerpc', + kconfig=''' +CONFIG_PPC64=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_HVC_CONSOLE=y''', + qemu_arch='ppc64', + kernel_path='vmlinux', + kernel_command_line='console=ttyS0', + extra_qemu_params=['-M pseries', '-cpu power8']) diff --git a/tools/testing/kunit/qemu_configs/riscv.py b/tools/testing/kunit/qemu_configs/riscv.py new file mode 100644 index 000000000000..9e528087cd7c --- /dev/null +++ b/tools/testing/kunit/qemu_configs/riscv.py @@ -0,0 +1,31 @@ +from ..qemu_config import QemuArchParams +import os +import os.path +import sys + +GITHUB_OPENSBI_URL = 'https://github.com/qemu/qemu/raw/master/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin' +OPENSBI_FILE = os.path.basename(GITHUB_OPENSBI_URL) + +if not os.path.isfile(OPENSBI_FILE): + print('\n\nOpenSBI file is not in the current working directory.\n' + 'Would you like me to download it for you from:\n' + GITHUB_OPENSBI_URL + ' ?\n') + response = input('yes/[no]: ') + if response.strip() == 'yes': + os.system('wget ' + GITHUB_OPENSBI_URL) + else: + sys.exit() + +QEMU_ARCH = QemuArchParams(linux_arch='riscv', + kconfig=''' +CONFIG_SOC_VIRT=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_EARLYCON_RISCV_SBI=y''', + qemu_arch='riscv64', + kernel_path='arch/riscv/boot/Image', + kernel_command_line='console=ttyS0', + extra_qemu_params=[ + '-machine virt', + '-cpu rv64', + '-bios opensbi-riscv64-generic-fw_dynamic.bin']) diff --git a/tools/testing/kunit/qemu_configs/s390.py b/tools/testing/kunit/qemu_configs/s390.py new file mode 100644 index 000000000000..e310bd521113 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/s390.py @@ -0,0 +1,14 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='s390', + kconfig=''' +CONFIG_EXPERT=y +CONFIG_TUNE_ZEC12=y +CONFIG_NUMA=y +CONFIG_MODULES=y''', + qemu_arch='s390x', + kernel_path='arch/s390/boot/bzImage', + kernel_command_line='console=ttyS0', + extra_qemu_params=[ + '-machine s390-ccw-virtio', + '-cpu qemu',]) diff --git a/tools/testing/kunit/qemu_configs/sparc.py b/tools/testing/kunit/qemu_configs/sparc.py new file mode 100644 index 000000000000..27f474e7ad6e --- /dev/null +++ b/tools/testing/kunit/qemu_configs/sparc.py @@ -0,0 +1,10 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='sparc', + kconfig=''' +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y''', + qemu_arch='sparc', + kernel_path='arch/sparc/boot/zImage', + kernel_command_line='console=ttyS0 mem=256M', + extra_qemu_params=['-m 256']) diff --git a/tools/testing/kunit/qemu_configs/x86_64.py b/tools/testing/kunit/qemu_configs/x86_64.py new file mode 100644 index 000000000000..77ab1aeee8a3 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/x86_64.py @@ -0,0 +1,10 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='x86_64', + kconfig=''' +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y''', + qemu_arch='x86_64', + kernel_path='arch/x86/boot/bzImage', + kernel_command_line='console=ttyS0', + extra_qemu_params=['']) -- cgit v1.2.3 From d9d6b8225e7319fde69b1aa59c3bb66d501f3845 Mon Sep 17 00:00:00 2001 From: David Gow Date: Fri, 21 May 2021 21:42:40 -0700 Subject: kunit: Move default config from arch/um -> tools/testing/kunit The default .kunitconfig file is currently kept in arch/um/configs/kunit_defconfig, but -- with the impending QEMU patch -- will no-longer be exclusively used for UML-based kernels. Move it alongside the other KUnit configs in tools/testing/kunit/configs, and give it a name which matches the existing all_tests.config and broken_on_uml.config files. Also update the Getting Started documentation to point to the new file. Signed-off-by: David Gow Reviewed-by: Brendan Higgins Signed-off-by: Shuah Khan --- Documentation/dev-tools/kunit/start.rst | 2 +- arch/um/configs/kunit_defconfig | 3 --- tools/testing/kunit/configs/default.config | 3 +++ tools/testing/kunit/kunit_kernel.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) delete mode 100644 arch/um/configs/kunit_defconfig create mode 100644 tools/testing/kunit/configs/default.config (limited to 'tools') diff --git a/Documentation/dev-tools/kunit/start.rst b/Documentation/dev-tools/kunit/start.rst index aa56d7ca6bfb..62d8462db4df 100644 --- a/Documentation/dev-tools/kunit/start.rst +++ b/Documentation/dev-tools/kunit/start.rst @@ -36,7 +36,7 @@ A good starting point for a ``.kunitconfig`` is the KUnit defconfig: .. code-block:: bash cd $PATH_TO_LINUX_REPO - cp arch/um/configs/kunit_defconfig .kunitconfig + cp tools/testing/kunit/configs/default.config .kunitconfig You can then add any other Kconfig options you wish, e.g.: diff --git a/arch/um/configs/kunit_defconfig b/arch/um/configs/kunit_defconfig deleted file mode 100644 index e67af7b9f1bb..000000000000 --- a/arch/um/configs/kunit_defconfig +++ /dev/null @@ -1,3 +0,0 @@ -CONFIG_KUNIT=y -CONFIG_KUNIT_EXAMPLE_TEST=y -CONFIG_KUNIT_ALL_TESTS=y diff --git a/tools/testing/kunit/configs/default.config b/tools/testing/kunit/configs/default.config new file mode 100644 index 000000000000..e67af7b9f1bb --- /dev/null +++ b/tools/testing/kunit/configs/default.config @@ -0,0 +1,3 @@ +CONFIG_KUNIT=y +CONFIG_KUNIT_EXAMPLE_TEST=y +CONFIG_KUNIT_ALL_TESTS=y diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py index e1951fa60027..90bc007f1f93 100644 --- a/tools/testing/kunit/kunit_kernel.py +++ b/tools/testing/kunit/kunit_kernel.py @@ -26,7 +26,7 @@ import qemu_config KCONFIG_PATH = '.config' KUNITCONFIG_PATH = '.kunitconfig' -DEFAULT_KUNITCONFIG_PATH = 'arch/um/configs/kunit_defconfig' +DEFAULT_KUNITCONFIG_PATH = 'tools/testing/kunit/configs/default.config' BROKEN_ALLCONFIG_PATH = 'tools/testing/kunit/configs/broken_on_uml.config' OUTFILE_PATH = 'test.log' ABS_TOOL_PATH = os.path.abspath(os.path.dirname(__file__)) -- cgit v1.2.3 From 8a5124c0f33c65a0d94aacac8294e90a87ecf3f6 Mon Sep 17 00:00:00 2001 From: David Gow Date: Fri, 21 May 2021 21:42:41 -0700 Subject: kunit: Remove the unused all_tests.config This isn't used anywhere. While it's possible that people were manually referencing it, the new default config (in default.config in the same path) provides equivalent functionality. Signed-off-by: David Gow Reviewed-by: Brendan Higgins Signed-off-by: Shuah Khan --- tools/testing/kunit/configs/all_tests.config | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 tools/testing/kunit/configs/all_tests.config (limited to 'tools') diff --git a/tools/testing/kunit/configs/all_tests.config b/tools/testing/kunit/configs/all_tests.config deleted file mode 100644 index 9235b7d42d38..000000000000 --- a/tools/testing/kunit/configs/all_tests.config +++ /dev/null @@ -1,3 +0,0 @@ -CONFIG_KUNIT=y -CONFIG_KUNIT_TEST=y -CONFIG_KUNIT_EXAMPLE_TEST=y -- cgit v1.2.3 From b29b14f11d8803e59645644363d7b1cb314fff3f Mon Sep 17 00:00:00 2001 From: Daniel Latypov Date: Wed, 26 May 2021 01:22:17 -0700 Subject: kunit: tool: internal refactor of parser input handling Note: this does not change the parser behavior at all (except for making one error message more useful). This is just an internal refactor. The TAP output parser currently operates over a List[str]. This works, but we only ever need to be able to "peek" at the current line and the ability to "pop" it off. Also, using a List means we need to wait for all the output before we can start parsing. While this is not an issue for most tests which are really lightweight, we do have some longer (~5 minutes) tests. This patch introduces an LineStream wrapper class that * Exposes a peek()/pop() interface instead of manipulating an array * this allows us to more easily add debugging code [1] * Can consume an input from a generator * we can now parse results as tests are running (the parser code currently doesn't print until the end, so no impact yet). * Tracks the current line number to print better error messages * Would allow us to add additional features more easily, e.g. storing N previous lines so we can print out invalid lines in context, etc. [1] The parsing logic is currently quite fragile. E.g. it'll often say the kernel "CRASHED" if there's something slightly wrong with the output format. When debugging a test that had some memory corruption issues, it resulted in very misleading errors from the parser. Now we could easily add this to trace all the lines consumed and why +import inspect ... def pop(self) -> str: n = self._next + print(f'popping {n[0]}: {n[1].ljust(40, " ")}| caller={inspect.stack()[1].function}') Example output: popping 77: TAP version 14 | caller=parse_tap_header popping 78: 1..1 | caller=parse_test_plan popping 79: # Subtest: kunit_executor_test | caller=parse_subtest_header popping 80: 1..2 | caller=parse_subtest_plan popping 81: ok 1 - parse_filter_test | caller=parse_ok_not_ok_test_case popping 82: ok 2 - filter_subsuite_test | caller=parse_ok_not_ok_test_case popping 83: ok 1 - kunit_executor_test | caller=parse_ok_not_ok_test_suite If we introduce an invalid line, we can see the parser go down the wrong path: popping 77: TAP version 14 | caller=parse_tap_header popping 78: 1..1 | caller=parse_test_plan popping 79: # Subtest: kunit_executor_test | caller=parse_subtest_header popping 80: 1..2 | caller=parse_subtest_plan popping 81: 1..2 # this is invalid! | caller=parse_ok_not_ok_test_case popping 82: ok 1 - parse_filter_test | caller=parse_ok_not_ok_test_case popping 83: ok 2 - filter_subsuite_test | caller=parse_ok_not_ok_test_case popping 84: ok 1 - kunit_executor_test | caller=parse_ok_not_ok_test_case [ERROR] ran out of lines before end token Signed-off-by: Daniel Latypov Reviewed-by: David Gow Acked-by: Brendan Higgins Signed-off-by: Shuah Khan --- tools/testing/kunit/kunit_parser.py | 136 +++++++++++++++++++++------------ tools/testing/kunit/kunit_tool_test.py | 18 +++-- 2 files changed, 99 insertions(+), 55 deletions(-) (limited to 'tools') diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 8d8d4d70b39d..2b7dbba4fb7f 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -47,22 +47,63 @@ class TestStatus(Enum): NO_TESTS = auto() FAILURE_TO_PARSE_TESTS = auto() +class LineStream: + """Provides a peek()/pop() interface over an iterator of (line#, text).""" + _lines: Iterator[Tuple[int, str]] + _next: Tuple[int, str] + _done: bool + + def __init__(self, lines: Iterator[Tuple[int, str]]): + self._lines = lines + self._done = False + self._next = (0, '') + self._get_next() + + def _get_next(self) -> None: + try: + self._next = next(self._lines) + except StopIteration: + self._done = True + + def peek(self) -> str: + return self._next[1] + + def pop(self) -> str: + n = self._next + self._get_next() + return n[1] + + def __bool__(self) -> bool: + return not self._done + + # Only used by kunit_tool_test.py. + def __iter__(self) -> Iterator[str]: + while bool(self): + yield self.pop() + + def line_number(self) -> int: + return self._next[0] + kunit_start_re = re.compile(r'TAP version [0-9]+$') kunit_end_re = re.compile('(List of all partitions:|' 'Kernel panic - not syncing: VFS:|reboot: System halted)') -def isolate_kunit_output(kernel_output) -> Iterator[str]: - started = False - for line in kernel_output: - line = line.rstrip() # line always has a trailing \n - if kunit_start_re.search(line): - prefix_len = len(line.split('TAP version')[0]) - started = True - yield line[prefix_len:] if prefix_len > 0 else line - elif kunit_end_re.search(line): - break - elif started: - yield line[prefix_len:] if prefix_len > 0 else line +def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream: + def isolate_kunit_output(kernel_output: Iterable[str]) -> Iterator[Tuple[int, str]]: + line_num = 0 + started = False + for line in kernel_output: + line_num += 1 + line = line.rstrip() # line always has a trailing \n + if kunit_start_re.search(line): + prefix_len = len(line.split('TAP version')[0]) + started = True + yield line_num, line[prefix_len:] + elif kunit_end_re.search(line): + break + elif started: + yield line_num, line[prefix_len:] + return LineStream(lines=isolate_kunit_output(kernel_output)) def raw_output(kernel_output) -> None: for line in kernel_output: @@ -97,14 +138,14 @@ def print_log(log) -> None: TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*#).*$') -def consume_non_diagnostic(lines: List[str]) -> None: - while lines and not TAP_ENTRIES.match(lines[0]): - lines.pop(0) +def consume_non_diagnostic(lines: LineStream) -> None: + while lines and not TAP_ENTRIES.match(lines.peek()): + lines.pop() -def save_non_diagnostic(lines: List[str], test_case: TestCase) -> None: - while lines and not TAP_ENTRIES.match(lines[0]): - test_case.log.append(lines[0]) - lines.pop(0) +def save_non_diagnostic(lines: LineStream, test_case: TestCase) -> None: + while lines and not TAP_ENTRIES.match(lines.peek()): + test_case.log.append(lines.peek()) + lines.pop() OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text']) @@ -112,18 +153,18 @@ OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$') OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$') -def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool: +def parse_ok_not_ok_test_case(lines: LineStream, test_case: TestCase) -> bool: save_non_diagnostic(lines, test_case) if not lines: test_case.status = TestStatus.TEST_CRASHED return True - line = lines[0] + line = lines.peek() match = OK_NOT_OK_SUBTEST.match(line) while not match and lines: - line = lines.pop(0) + line = lines.pop() match = OK_NOT_OK_SUBTEST.match(line) if match: - test_case.log.append(lines.pop(0)) + test_case.log.append(lines.pop()) test_case.name = match.group(2) if test_case.status == TestStatus.TEST_CRASHED: return True @@ -138,14 +179,14 @@ def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool: SUBTEST_DIAGNOSTIC = re.compile(r'^[\s]+# (.*)$') DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^[\s]+# .*?: kunit test case crashed!$') -def parse_diagnostic(lines: List[str], test_case: TestCase) -> bool: +def parse_diagnostic(lines: LineStream, test_case: TestCase) -> bool: save_non_diagnostic(lines, test_case) if not lines: return False - line = lines[0] + line = lines.peek() match = SUBTEST_DIAGNOSTIC.match(line) if match: - test_case.log.append(lines.pop(0)) + test_case.log.append(lines.pop()) crash_match = DIAGNOSTIC_CRASH_MESSAGE.match(line) if crash_match: test_case.status = TestStatus.TEST_CRASHED @@ -153,7 +194,7 @@ def parse_diagnostic(lines: List[str], test_case: TestCase) -> bool: else: return False -def parse_test_case(lines: List[str]) -> Optional[TestCase]: +def parse_test_case(lines: LineStream) -> Optional[TestCase]: test_case = TestCase() save_non_diagnostic(lines, test_case) while parse_diagnostic(lines, test_case): @@ -165,24 +206,24 @@ def parse_test_case(lines: List[str]) -> Optional[TestCase]: SUBTEST_HEADER = re.compile(r'^[\s]+# Subtest: (.*)$') -def parse_subtest_header(lines: List[str]) -> Optional[str]: +def parse_subtest_header(lines: LineStream) -> Optional[str]: consume_non_diagnostic(lines) if not lines: return None - match = SUBTEST_HEADER.match(lines[0]) + match = SUBTEST_HEADER.match(lines.peek()) if match: - lines.pop(0) + lines.pop() return match.group(1) else: return None SUBTEST_PLAN = re.compile(r'[\s]+[0-9]+\.\.([0-9]+)') -def parse_subtest_plan(lines: List[str]) -> Optional[int]: +def parse_subtest_plan(lines: LineStream) -> Optional[int]: consume_non_diagnostic(lines) - match = SUBTEST_PLAN.match(lines[0]) + match = SUBTEST_PLAN.match(lines.peek()) if match: - lines.pop(0) + lines.pop() return int(match.group(1)) else: return None @@ -199,17 +240,17 @@ def max_status(left: TestStatus, right: TestStatus) -> TestStatus: else: return TestStatus.SUCCESS -def parse_ok_not_ok_test_suite(lines: List[str], +def parse_ok_not_ok_test_suite(lines: LineStream, test_suite: TestSuite, expected_suite_index: int) -> bool: consume_non_diagnostic(lines) if not lines: test_suite.status = TestStatus.TEST_CRASHED return False - line = lines[0] + line = lines.peek() match = OK_NOT_OK_MODULE.match(line) if match: - lines.pop(0) + lines.pop() if match.group(1) == 'ok': test_suite.status = TestStatus.SUCCESS else: @@ -231,7 +272,7 @@ def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus: max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases) return max_status(max_test_case_status, test_suite.status) -def parse_test_suite(lines: List[str], expected_suite_index: int) -> Optional[TestSuite]: +def parse_test_suite(lines: LineStream, expected_suite_index: int) -> Optional[TestSuite]: if not lines: return None consume_non_diagnostic(lines) @@ -257,26 +298,26 @@ def parse_test_suite(lines: List[str], expected_suite_index: int) -> Optional[Te print_with_timestamp(red('[ERROR] ') + 'ran out of lines before end token') return test_suite else: - print('failed to parse end of suite' + lines[0]) + print(f'failed to parse end of suite "{name}", at line {lines.line_number()}: {lines.peek()}') return None TAP_HEADER = re.compile(r'^TAP version 14$') -def parse_tap_header(lines: List[str]) -> bool: +def parse_tap_header(lines: LineStream) -> bool: consume_non_diagnostic(lines) - if TAP_HEADER.match(lines[0]): - lines.pop(0) + if TAP_HEADER.match(lines.peek()): + lines.pop() return True else: return False TEST_PLAN = re.compile(r'[0-9]+\.\.([0-9]+)') -def parse_test_plan(lines: List[str]) -> Optional[int]: +def parse_test_plan(lines: LineStream) -> Optional[int]: consume_non_diagnostic(lines) - match = TEST_PLAN.match(lines[0]) + match = TEST_PLAN.match(lines.peek()) if match: - lines.pop(0) + lines.pop() return int(match.group(1)) else: return None @@ -284,7 +325,7 @@ def parse_test_plan(lines: List[str]) -> Optional[int]: def bubble_up_suite_errors(test_suites: Iterable[TestSuite]) -> TestStatus: return bubble_up_errors(x.status for x in test_suites) -def parse_test_result(lines: List[str]) -> TestResult: +def parse_test_result(lines: LineStream) -> TestResult: consume_non_diagnostic(lines) if not lines or not parse_tap_header(lines): return TestResult(TestStatus.NO_TESTS, [], lines) @@ -338,11 +379,12 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]: print_with_timestamp('') return total_tests, failed_tests, crashed_tests -def parse_run_tests(kernel_output) -> TestResult: +def parse_run_tests(kernel_output: Iterable[str]) -> TestResult: total_tests = 0 failed_tests = 0 crashed_tests = 0 - test_result = parse_test_result(list(isolate_kunit_output(kernel_output))) + lines = extract_tap_lines(kernel_output) + test_result = parse_test_result(lines) if test_result.status == TestStatus.NO_TESTS: print(red('[ERROR] ') + yellow('no tests run!')) elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS: diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index a3b7f68e1cf9..40ff281140d1 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -11,6 +11,7 @@ from unittest import mock import tempfile, shutil # Handling test_tmpdir +import itertools import json import signal import os @@ -92,17 +93,18 @@ class KconfigTest(unittest.TestCase): class KUnitParserTest(unittest.TestCase): - def assertContains(self, needle, haystack): - for line in haystack: + def assertContains(self, needle: str, haystack: kunit_parser.LineStream): + # Clone the iterator so we can print the contents on failure. + copy, backup = itertools.tee(haystack) + for line in copy: if needle in line: return - raise AssertionError('"' + - str(needle) + '" not found in "' + str(haystack) + '"!') + raise AssertionError(f'"{needle}" not found in {list(backup)}!') def test_output_isolated_correctly(self): log_path = test_data_path('test_output_isolated_correctly.log') with open(log_path) as file: - result = kunit_parser.isolate_kunit_output(file.readlines()) + result = kunit_parser.extract_tap_lines(file.readlines()) self.assertContains('TAP version 14', result) self.assertContains(' # Subtest: example', result) self.assertContains(' 1..2', result) @@ -113,7 +115,7 @@ class KUnitParserTest(unittest.TestCase): def test_output_with_prefix_isolated_correctly(self): log_path = test_data_path('test_pound_sign.log') with open(log_path) as file: - result = kunit_parser.isolate_kunit_output(file.readlines()) + result = kunit_parser.extract_tap_lines(file.readlines()) self.assertContains('TAP version 14', result) self.assertContains(' # Subtest: kunit-resource-test', result) self.assertContains(' 1..5', result) @@ -159,7 +161,7 @@ class KUnitParserTest(unittest.TestCase): empty_log = test_data_path('test_is_test_passed-no_tests_run.log') with open(empty_log) as file: result = kunit_parser.parse_run_tests( - kunit_parser.isolate_kunit_output(file.readlines())) + kunit_parser.extract_tap_lines(file.readlines())) self.assertEqual(0, len(result.suites)) self.assertEqual( kunit_parser.TestStatus.NO_TESTS, @@ -170,7 +172,7 @@ class KUnitParserTest(unittest.TestCase): print_mock = mock.patch('builtins.print').start() with open(crash_log) as file: result = kunit_parser.parse_run_tests( - kunit_parser.isolate_kunit_output(file.readlines())) + kunit_parser.extract_tap_lines(file.readlines())) print_mock.assert_any_call(StrContains('no tests run!')) print_mock.stop() file.close() -- cgit v1.2.3 From 5acaf6031f5349244e1fcfd74eb7b6212154fab3 Mon Sep 17 00:00:00 2001 From: David Gow Date: Thu, 24 Jun 2021 23:58:13 -0700 Subject: kunit: tool: Support skipped tests in kunit_tool Add support for the SKIP directive to kunit_tool's TAP parser. Skipped tests now show up as such in the printed summary. The number of skipped tests is counted, and if all tests in a suite are skipped, the suite is also marked as skipped. Otherwise, skipped tests do affect the suite result. Example output: [00:22:34] ======== [SKIPPED] example_skip ======== [00:22:34] [SKIPPED] example_skip_test # SKIP this test should be skipped [00:22:34] [SKIPPED] example_mark_skipped_test # SKIP this test should be skipped [00:22:34] ============================================================ [00:22:34] Testing complete. 2 tests run. 0 failed. 0 crashed. 2 skipped. Signed-off-by: David Gow Reviewed-by: Daniel Latypov Reviewed-by: Brendan Higgins Signed-off-by: Shuah Khan --- tools/testing/kunit/kunit_parser.py | 77 +++++++++++++++------- tools/testing/kunit/kunit_tool_test.py | 22 +++++++ .../kunit/test_data/test_skip_all_tests.log | 15 +++++ tools/testing/kunit/test_data/test_skip_tests.log | 15 +++++ 4 files changed, 105 insertions(+), 24 deletions(-) create mode 100644 tools/testing/kunit/test_data/test_skip_all_tests.log create mode 100644 tools/testing/kunit/test_data/test_skip_tests.log (limited to 'tools') diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 2b7dbba4fb7f..c3c524b79db8 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -43,6 +43,7 @@ class TestCase(object): class TestStatus(Enum): SUCCESS = auto() FAILURE = auto() + SKIPPED = auto() TEST_CRASHED = auto() NO_TESTS = auto() FAILURE_TO_PARSE_TESTS = auto() @@ -149,6 +150,8 @@ def save_non_diagnostic(lines: LineStream, test_case: TestCase) -> None: OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text']) +OK_NOT_OK_SKIP = re.compile(r'^[\s]*(ok|not ok) [0-9]+ - (.*) # SKIP(.*)$') + OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$') OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$') @@ -166,6 +169,10 @@ def parse_ok_not_ok_test_case(lines: LineStream, test_case: TestCase) -> bool: if match: test_case.log.append(lines.pop()) test_case.name = match.group(2) + skip_match = OK_NOT_OK_SKIP.match(line) + if skip_match: + test_case.status = TestStatus.SKIPPED + return True if test_case.status == TestStatus.TEST_CRASHED: return True if match.group(1) == 'ok': @@ -229,16 +236,16 @@ def parse_subtest_plan(lines: LineStream) -> Optional[int]: return None def max_status(left: TestStatus, right: TestStatus) -> TestStatus: - if left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED: + if left == right: + return left + elif left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED: return TestStatus.TEST_CRASHED elif left == TestStatus.FAILURE or right == TestStatus.FAILURE: return TestStatus.FAILURE - elif left != TestStatus.SUCCESS: - return left - elif right != TestStatus.SUCCESS: + elif left == TestStatus.SKIPPED: return right else: - return TestStatus.SUCCESS + return left def parse_ok_not_ok_test_suite(lines: LineStream, test_suite: TestSuite, @@ -255,6 +262,9 @@ def parse_ok_not_ok_test_suite(lines: LineStream, test_suite.status = TestStatus.SUCCESS else: test_suite.status = TestStatus.FAILURE + skip_match = OK_NOT_OK_SKIP.match(line) + if skip_match: + test_suite.status = TestStatus.SKIPPED suite_index = int(match.group(2)) if suite_index != expected_suite_index: print_with_timestamp( @@ -265,8 +275,8 @@ def parse_ok_not_ok_test_suite(lines: LineStream, else: return False -def bubble_up_errors(statuses: Iterable[TestStatus]) -> TestStatus: - return reduce(max_status, statuses, TestStatus.SUCCESS) +def bubble_up_errors(status_list: Iterable[TestStatus]) -> TestStatus: + return reduce(max_status, status_list, TestStatus.SKIPPED) def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus: max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases) @@ -352,37 +362,53 @@ def parse_test_result(lines: LineStream) -> TestResult: else: return TestResult(TestStatus.NO_TESTS, [], lines) -def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]: - total_tests = 0 - failed_tests = 0 - crashed_tests = 0 +class TestCounts: + passed: int + failed: int + crashed: int + skipped: int + + def __init__(self): + self.passed = 0 + self.failed = 0 + self.crashed = 0 + self.skipped = 0 + + def total(self) -> int: + return self.passed + self.failed + self.crashed + self.skipped + +def print_and_count_results(test_result: TestResult) -> TestCounts: + counts = TestCounts() for test_suite in test_result.suites: if test_suite.status == TestStatus.SUCCESS: print_suite_divider(green('[PASSED] ') + test_suite.name) + elif test_suite.status == TestStatus.SKIPPED: + print_suite_divider(yellow('[SKIPPED] ') + test_suite.name) elif test_suite.status == TestStatus.TEST_CRASHED: print_suite_divider(red('[CRASHED] ' + test_suite.name)) else: print_suite_divider(red('[FAILED] ') + test_suite.name) for test_case in test_suite.cases: - total_tests += 1 if test_case.status == TestStatus.SUCCESS: + counts.passed += 1 print_with_timestamp(green('[PASSED] ') + test_case.name) + elif test_case.status == TestStatus.SKIPPED: + counts.skipped += 1 + print_with_timestamp(yellow('[SKIPPED] ') + test_case.name) elif test_case.status == TestStatus.TEST_CRASHED: - crashed_tests += 1 + counts.crashed += 1 print_with_timestamp(red('[CRASHED] ' + test_case.name)) print_log(map(yellow, test_case.log)) print_with_timestamp('') else: - failed_tests += 1 + counts.failed += 1 print_with_timestamp(red('[FAILED] ') + test_case.name) print_log(map(yellow, test_case.log)) print_with_timestamp('') - return total_tests, failed_tests, crashed_tests + return counts def parse_run_tests(kernel_output: Iterable[str]) -> TestResult: - total_tests = 0 - failed_tests = 0 - crashed_tests = 0 + counts = TestCounts() lines = extract_tap_lines(kernel_output) test_result = parse_test_result(lines) if test_result.status == TestStatus.NO_TESTS: @@ -390,12 +416,15 @@ def parse_run_tests(kernel_output: Iterable[str]) -> TestResult: elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS: print(red('[ERROR] ') + yellow('could not parse test results!')) else: - (total_tests, - failed_tests, - crashed_tests) = print_and_count_results(test_result) + counts = print_and_count_results(test_result) print_with_timestamp(DIVIDER) - fmt = green if test_result.status == TestStatus.SUCCESS else red + if test_result.status == TestStatus.SUCCESS: + fmt = green + elif test_result.status == TestStatus.SKIPPED: + fmt = yellow + else: + fmt =red print_with_timestamp( - fmt('Testing complete. %d tests run. %d failed. %d crashed.' % - (total_tests, failed_tests, crashed_tests))) + fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' % + (counts.total(), counts.failed, counts.crashed, counts.skipped))) return test_result diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 40ff281140d1..bdae0e5f6197 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -185,6 +185,28 @@ class KUnitParserTest(unittest.TestCase): kunit_parser.TestStatus.TEST_CRASHED, result.status) + def test_skipped_test(self): + skipped_log = test_data_path('test_skip_tests.log') + file = open(skipped_log) + result = kunit_parser.parse_run_tests(file.readlines()) + + # A skipped test does not fail the whole suite. + self.assertEqual( + kunit_parser.TestStatus.SUCCESS, + result.status) + file.close() + + def test_skipped_all_tests(self): + skipped_log = test_data_path('test_skip_all_tests.log') + file = open(skipped_log) + result = kunit_parser.parse_run_tests(file.readlines()) + + self.assertEqual( + kunit_parser.TestStatus.SKIPPED, + result.status) + file.close() + + def test_ignores_prefix_printk_time(self): prefix_log = test_data_path('test_config_printk_time.log') with open(prefix_log) as file: diff --git a/tools/testing/kunit/test_data/test_skip_all_tests.log b/tools/testing/kunit/test_data/test_skip_all_tests.log new file mode 100644 index 000000000000..2ea6e6d14fff --- /dev/null +++ b/tools/testing/kunit/test_data/test_skip_all_tests.log @@ -0,0 +1,15 @@ +TAP version 14 +1..2 + # Subtest: string-stream-test + 1..3 + ok 1 - string_stream_test_empty_on_creation # SKIP all tests skipped + ok 2 - string_stream_test_not_empty_after_add # SKIP all tests skipped + ok 3 - string_stream_test_get_string # SKIP all tests skipped +ok 1 - string-stream-test # SKIP + # Subtest: example + 1..2 + # example_simple_test: initializing + ok 1 - example_simple_test # SKIP all tests skipped + # example_skip_test: initializing + ok 2 - example_skip_test # SKIP this test should be skipped +ok 2 - example # SKIP diff --git a/tools/testing/kunit/test_data/test_skip_tests.log b/tools/testing/kunit/test_data/test_skip_tests.log new file mode 100644 index 000000000000..79b326e31274 --- /dev/null +++ b/tools/testing/kunit/test_data/test_skip_tests.log @@ -0,0 +1,15 @@ +TAP version 14 +1..2 + # Subtest: string-stream-test + 1..3 + ok 1 - string_stream_test_empty_on_creation + ok 2 - string_stream_test_not_empty_after_add + ok 3 - string_stream_test_get_string +ok 1 - string-stream-test + # Subtest: example + 1..2 + # example_simple_test: initializing + ok 1 - example_simple_test + # example_skip_test: initializing + ok 2 - example_skip_test # SKIP this test should be skipped +ok 2 - example -- cgit v1.2.3