diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-12-13 03:42:57 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-12-13 03:42:57 +0300 |
commit | e2ed78d5d9ca07a2b9d158ebac366170a2d3083d (patch) | |
tree | 1c5e722706ca862849c149c39431f63c1a0f6cf5 /tools | |
parent | 23a68d14def8247735b66a14b370b42d8fcedb47 (diff) | |
parent | 054be257f28ca8eeb8e3620766501b81ceb4b293 (diff) | |
download | linux-e2ed78d5d9ca07a2b9d158ebac366170a2d3083d.tar.xz |
Merge tag 'linux-kselftest-kunit-next-6.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest
Pull KUnit updates from Shuah Khan:
"Several enhancements, fixes, clean-ups, documentation updates,
improvements to logging and KTAP compliance of KUnit test output:
- log numbers in decimal and hex
- parse KTAP compliant test output
- allow conditionally exposing static symbols to tests when KUNIT is
enabled
- make static symbols visible during kunit testing
- clean-ups to remove unused structure definition"
* tag 'linux-kselftest-kunit-next-6.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest: (29 commits)
Documentation: dev-tools: Clarify requirements for result description
apparmor: test: make static symbols visible during kunit testing
kunit: add macro to allow conditionally exposing static symbols to tests
kunit: tool: make parser preserve whitespace when printing test log
Documentation: kunit: Fix "How Do I Use This" / "Next Steps" sections
kunit: tool: don't include KTAP headers and the like in the test log
kunit: improve KTAP compliance of KUnit test output
kunit: tool: parse KTAP compliant test output
mm: slub: test: Use the kunit_get_current_test() function
kunit: Use the static key when retrieving the current test
kunit: Provide a static key to check if KUnit is actively running tests
kunit: tool: make --json do nothing if --raw_ouput is set
kunit: tool: tweak error message when no KTAP found
kunit: remove KUNIT_INIT_MEM_ASSERTION macro
Documentation: kunit: Remove redundant 'tips.rst' page
Documentation: KUnit: reword description of assertions
Documentation: KUnit: make usage.rst a superset of tips.rst, remove duplication
kunit: eliminate KUNIT_INIT_*_ASSERT_STRUCT macros
kunit: tool: remove redundant file.close() call in unit test
kunit: tool: unit tests all check parser errors, standardize formatting a bit
...
Diffstat (limited to 'tools')
-rwxr-xr-x | tools/testing/kunit/kunit.py | 36 | ||||
-rw-r--r-- | tools/testing/kunit/kunit_parser.py | 188 | ||||
-rwxr-xr-x | tools/testing/kunit/kunit_tool_test.py | 164 | ||||
-rw-r--r-- | tools/testing/kunit/test_data/test_parse_ktap_output.log | 8 | ||||
-rw-r--r-- | tools/testing/kunit/test_data/test_parse_subtest_header.log | 7 |
5 files changed, 260 insertions, 143 deletions
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py index 4d4663fb578b..43fbe96318fe 100755 --- a/tools/testing/kunit/kunit.py +++ b/tools/testing/kunit/kunit.py @@ -192,28 +192,30 @@ def _map_to_overall_status(test_status: kunit_parser.TestStatus) -> KunitStatus: def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input_data: Iterable[str]) -> Tuple[KunitResult, kunit_parser.Test]: parse_start = time.time() - test_result = kunit_parser.Test() - if request.raw_output: # Treat unparsed results as one passing test. - test_result.status = kunit_parser.TestStatus.SUCCESS - test_result.counts.passed = 1 + fake_test = kunit_parser.Test() + fake_test.status = kunit_parser.TestStatus.SUCCESS + fake_test.counts.passed = 1 output: Iterable[str] = input_data if request.raw_output == 'all': pass elif request.raw_output == 'kunit': - output = kunit_parser.extract_tap_lines(output, lstrip=False) + output = kunit_parser.extract_tap_lines(output) for line in output: print(line.rstrip()) + parse_time = time.time() - parse_start + return KunitResult(KunitStatus.SUCCESS, parse_time), fake_test - else: - test_result = kunit_parser.parse_run_tests(input_data) - parse_end = time.time() + + # Actually parse the test results. + test = kunit_parser.parse_run_tests(input_data) + parse_time = time.time() - parse_start if request.json: json_str = kunit_json.get_json_result( - test=test_result, + test=test, metadata=metadata) if request.json == 'stdout': print(json_str) @@ -223,10 +225,10 @@ def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input stdout.print_with_timestamp("Test results stored in %s" % os.path.abspath(request.json)) - if test_result.status != kunit_parser.TestStatus.SUCCESS: - return KunitResult(KunitStatus.TEST_FAILURE, parse_end - parse_start), test_result + if test.status != kunit_parser.TestStatus.SUCCESS: + return KunitResult(KunitStatus.TEST_FAILURE, parse_time), test - return KunitResult(KunitStatus.SUCCESS, parse_end - parse_start), test_result + return KunitResult(KunitStatus.SUCCESS, parse_time), test def run_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitRequest) -> KunitResult: @@ -359,14 +361,14 @@ def add_exec_opts(parser) -> None: choices=['suite', 'test']) def add_parse_opts(parser) -> None: - parser.add_argument('--raw_output', help='If set don\'t format output from kernel. ' - 'If set to --raw_output=kunit, filters to just KUnit output.', + parser.add_argument('--raw_output', help='If set don\'t parse output from kernel. ' + 'By default, filters to just KUnit output. Use ' + '--raw_output=all to show everything', type=str, nargs='?', const='all', default=None, choices=['all', 'kunit']) parser.add_argument('--json', nargs='?', - help='Stores test results in a JSON, and either ' - 'prints to stdout or saves to file if a ' - 'filename is specified', + help='Prints parsed test results as JSON to stdout or a file if ' + 'a filename is specified. Does nothing if --raw_output is set.', type=str, const='stdout', default=None, metavar='FILE') diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 1ae873e3e341..a225799f6b1b 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -10,8 +10,10 @@ # Author: Rae Moar <rmoar@google.com> from __future__ import annotations +from dataclasses import dataclass import re import sys +import textwrap from enum import Enum, auto from typing import Iterable, Iterator, List, Optional, Tuple @@ -58,6 +60,10 @@ class Test: self.counts.errors += 1 stdout.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}') + def ok_status(self) -> bool: + """Returns true if the status was ok, i.e. passed or skipped.""" + return self.status in (TestStatus.SUCCESS, TestStatus.SKIPPED) + class TestStatus(Enum): """An enumeration class to represent the status of a test.""" SUCCESS = auto() @@ -67,27 +73,17 @@ class TestStatus(Enum): NO_TESTS = auto() FAILURE_TO_PARSE_TESTS = auto() +@dataclass class TestCounts: """ Tracks the counts of statuses of all test cases and any errors within a Test. - - Attributes: - passed : int - the number of tests that have passed - failed : int - the number of tests that have failed - crashed : int - the number of tests that have crashed - skipped : int - the number of tests that have skipped - errors : int - the number of errors in the test and subtests - """ - def __init__(self): - """Creates TestCounts object with counts of all test - statuses and test errors set to 0. - """ - self.passed = 0 - self.failed = 0 - self.crashed = 0 - self.skipped = 0 - self.errors = 0 + """ + passed: int = 0 + failed: int = 0 + crashed: int = 0 + skipped: int = 0 + errors: int = 0 def __str__(self) -> str: """Returns the string representation of a TestCounts object.""" @@ -213,12 +209,12 @@ class LineStream: # Parsing helper methods: -KTAP_START = re.compile(r'KTAP version ([0-9]+)$') -TAP_START = re.compile(r'TAP version ([0-9]+)$') -KTAP_END = re.compile('(List of all partitions:|' +KTAP_START = re.compile(r'\s*KTAP version ([0-9]+)$') +TAP_START = re.compile(r'\s*TAP version ([0-9]+)$') +KTAP_END = re.compile(r'\s*(List of all partitions:|' 'Kernel panic - not syncing: VFS:|reboot: System halted)') -def extract_tap_lines(kernel_output: Iterable[str], lstrip=True) -> LineStream: +def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream: """Extracts KTAP lines from the kernel output.""" def isolate_ktap_output(kernel_output: Iterable[str]) \ -> Iterator[Tuple[int, str]]: @@ -244,11 +240,8 @@ def extract_tap_lines(kernel_output: Iterable[str], lstrip=True) -> LineStream: # stop extracting KTAP lines break elif started: - # remove the prefix and optionally any leading - # whitespace. Our parsing logic relies on this. + # remove the prefix, if any. line = line[prefix_len:] - if lstrip: - line = line.lstrip() yield line_num, line return LineStream(lines=isolate_ktap_output(kernel_output)) @@ -300,10 +293,10 @@ def parse_ktap_header(lines: LineStream, test: Test) -> bool: check_version(version_num, TAP_VERSIONS, 'TAP', test) else: return False - test.log.append(lines.pop()) + lines.pop() return True -TEST_HEADER = re.compile(r'^# Subtest: (.*)$') +TEST_HEADER = re.compile(r'^\s*# Subtest: (.*)$') def parse_test_header(lines: LineStream, test: Test) -> bool: """ @@ -323,11 +316,11 @@ def parse_test_header(lines: LineStream, test: Test) -> bool: match = TEST_HEADER.match(lines.peek()) if not match: return False - test.log.append(lines.pop()) test.name = match.group(1) + lines.pop() return True -TEST_PLAN = re.compile(r'1\.\.([0-9]+)') +TEST_PLAN = re.compile(r'^\s*1\.\.([0-9]+)') def parse_test_plan(lines: LineStream, test: Test) -> bool: """ @@ -350,14 +343,14 @@ def parse_test_plan(lines: LineStream, test: Test) -> bool: if not match: test.expected_count = None return False - test.log.append(lines.pop()) expected_count = int(match.group(1)) test.expected_count = expected_count + lines.pop() return True -TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$') +TEST_RESULT = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$') -TEST_RESULT_SKIP = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$') +TEST_RESULT_SKIP = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$') def peek_test_name_match(lines: LineStream, test: Test) -> bool: """ @@ -414,7 +407,7 @@ def parse_test_result(lines: LineStream, test: Test, # Check if line matches test result line format if not match: return False - test.log.append(lines.pop()) + lines.pop() # Set name of test object if skip_match: @@ -446,6 +439,7 @@ def parse_diagnostic(lines: LineStream) -> List[str]: - '# Subtest: [test name]' - '[ok|not ok] [test number] [-] [test name] [optional skip directive]' + - 'KTAP version [version number]' Parameters: lines - LineStream of KTAP output to parse @@ -454,8 +448,9 @@ def parse_diagnostic(lines: LineStream) -> List[str]: Log of diagnostic lines """ log = [] # type: List[str] - while lines and not TEST_RESULT.match(lines.peek()) and not \ - TEST_HEADER.match(lines.peek()): + non_diagnostic_lines = [TEST_RESULT, TEST_HEADER, KTAP_START] + while lines and not any(re.match(lines.peek()) + for re in non_diagnostic_lines): log.append(lines.pop()) return log @@ -501,17 +496,22 @@ def print_test_header(test: Test) -> None: test - Test object representing current test being printed """ message = test.name + if message != "": + # Add a leading space before the subtest counts only if a test name + # is provided using a "# Subtest" header line. + message += " " if test.expected_count: if test.expected_count == 1: - message += ' (1 subtest)' + message += '(1 subtest)' else: - message += f' ({test.expected_count} subtests)' + message += f'({test.expected_count} subtests)' stdout.print_with_timestamp(format_test_divider(message, len(message))) def print_log(log: Iterable[str]) -> None: """Prints all strings in saved log for test in yellow.""" - for m in log: - stdout.print_with_timestamp(stdout.yellow(m)) + formatted = textwrap.dedent('\n'.join(log)) + for line in formatted.splitlines(): + stdout.print_with_timestamp(stdout.yellow(line)) def format_test_result(test: Test) -> str: """ @@ -565,6 +565,40 @@ def print_test_footer(test: Test) -> None: stdout.print_with_timestamp(format_test_divider(message, len(message) - stdout.color_len())) + + +def _summarize_failed_tests(test: Test) -> str: + """Tries to summarize all the failing subtests in `test`.""" + + def failed_names(test: Test, parent_name: str) -> List[str]: + # Note: we use 'main' internally for the top-level test. + if not parent_name or parent_name == 'main': + full_name = test.name + else: + full_name = parent_name + '.' + test.name + + if not test.subtests: # this is a leaf node + return [full_name] + + # If all the children failed, just say this subtest failed. + # Don't summarize it down "the top-level test failed", though. + failed_subtests = [sub for sub in test.subtests if not sub.ok_status()] + if parent_name and len(failed_subtests) == len(test.subtests): + return [full_name] + + all_failures = [] # type: List[str] + for t in failed_subtests: + all_failures.extend(failed_names(t, full_name)) + return all_failures + + failures = failed_names(test, '') + # If there are too many failures, printing them out will just be noisy. + if len(failures) > 10: # this is an arbitrary limit + return '' + + return 'Failures: ' + ', '.join(failures) + + def print_summary_line(test: Test) -> None: """ Prints summary line of test object. Color of line is dependent on @@ -587,6 +621,15 @@ def print_summary_line(test: Test) -> None: color = stdout.red stdout.print_with_timestamp(color(f'Testing complete. {test.counts}')) + # Summarize failures that might have gone off-screen since we had a lot + # of tests (arbitrarily defined as >=100 for now). + if test.ok_status() or test.counts.total() < 100: + return + summarized = _summarize_failed_tests(test) + if not summarized: + return + stdout.print_with_timestamp(color(summarized)) + # Other methods: def bubble_up_test_results(test: Test) -> None: @@ -609,7 +652,7 @@ def bubble_up_test_results(test: Test) -> None: elif test.counts.get_status() == TestStatus.TEST_CRASHED: test.status = TestStatus.TEST_CRASHED -def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: +def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest: bool) -> Test: """ Finds next test to parse in LineStream, creates new Test object, parses any subtests of the test, populates Test object with all @@ -627,15 +670,32 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: 1..4 [subtests] - - Subtest header line + - Subtest header (must include either the KTAP version line or + "# Subtest" header line) - Example: + Example (preferred format with both KTAP version line and + "# Subtest" line): + KTAP version 1 # Subtest: name 1..3 [subtests] ok 1 name + Example (only "# Subtest" line): + + # Subtest: name + 1..3 + [subtests] + ok 1 name + + Example (only KTAP version line, compliant with KTAP v1 spec): + + KTAP version 1 + 1..3 + [subtests] + ok 1 name + - Test result line Example: @@ -647,28 +707,29 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: expected_num - expected test number for test to be parsed log - list of strings containing any preceding diagnostic lines corresponding to the current test + is_subtest - boolean indicating whether test is a subtest Return: Test object populated with characteristics and any subtests """ test = Test() test.log.extend(log) - parent_test = False - main = parse_ktap_header(lines, test) - if main: - # If KTAP/TAP header is found, attempt to parse + if not is_subtest: + # If parsing the main/top-level test, parse KTAP version line and # test plan test.name = "main" + ktap_line = parse_ktap_header(lines, test) parse_test_plan(lines, test) parent_test = True else: - # If KTAP/TAP header is not found, test must be subtest - # header or test result line so parse attempt to parser - # subtest header - parent_test = parse_test_header(lines, test) + # If not the main test, attempt to parse a test header containing + # the KTAP version line and/or subtest header line + ktap_line = parse_ktap_header(lines, test) + subtest_line = parse_test_header(lines, test) + parent_test = (ktap_line or subtest_line) if parent_test: - # If subtest header is found, attempt to parse - # test plan and print header + # If KTAP version line and/or subtest header is found, attempt + # to parse test plan and print test header parse_test_plan(lines, test) print_test_header(test) expected_count = test.expected_count @@ -683,7 +744,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: sub_log = parse_diagnostic(lines) sub_test = Test() if not lines or (peek_test_name_match(lines, test) and - not main): + is_subtest): if expected_count and test_num <= expected_count: # If parser reaches end of test before # parsing expected number of subtests, print @@ -697,20 +758,19 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: test.log.extend(sub_log) break else: - sub_test = parse_test(lines, test_num, sub_log) + sub_test = parse_test(lines, test_num, sub_log, True) subtests.append(sub_test) test_num += 1 test.subtests = subtests - if not main: + if is_subtest: # If not main test, look for test result line test.log.extend(parse_diagnostic(lines)) - if (parent_test and peek_test_name_match(lines, test)) or \ - not parent_test: - parse_test_result(lines, test, expected_num) - else: + if test.name != "" and not peek_test_name_match(lines, test): test.add_error('missing subtest result line!') + else: + parse_test_result(lines, test, expected_num) - # Check for there being no tests + # Check for there being no subtests within parent test if parent_test and len(subtests) == 0: # Don't override a bad status if this test had one reported. # Assumption: no subtests means CRASHED is from Test.__init__() @@ -720,11 +780,11 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: # Add statuses to TestCounts attribute in Test object bubble_up_test_results(test) - if parent_test and not main: + if parent_test and is_subtest: # If test has subtests and is not the main test object, print # footer. print_test_footer(test) - elif not main: + elif is_subtest: print_test_result(test) return test @@ -744,10 +804,10 @@ def parse_run_tests(kernel_output: Iterable[str]) -> Test: test = Test() if not lines: test.name = '<missing>' - test.add_error('could not find any KTAP output!') + test.add_error('Could not find any KTAP output. Did any KUnit tests run?') test.status = TestStatus.FAILURE_TO_PARSE_TESTS else: - test = parse_test(lines, 0, []) + test = parse_test(lines, 0, [], False) if test.status != TestStatus.NO_TESTS: test.status = test.counts.get_status() stdout.print_with_timestamp(DIVIDER) diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index e2cd2cc2e98f..0c2190514103 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -80,6 +80,13 @@ class KconfigTest(unittest.TestCase): self.assertEqual(actual_kconfig, expected_kconfig) class KUnitParserTest(unittest.TestCase): + def setUp(self): + self.print_mock = mock.patch('kunit_printer.Printer.print').start() + self.addCleanup(mock.patch.stopall) + + def noPrintCallContains(self, substr: str): + for call in self.print_mock.mock_calls: + self.assertNotIn(substr, call.args[0]) def assertContains(self, needle: str, haystack: kunit_parser.LineStream): # Clone the iterator so we can print the contents on failure. @@ -133,33 +140,29 @@ class KUnitParserTest(unittest.TestCase): all_passed_log = test_data_path('test_is_test_passed-all_passed.log') with open(all_passed_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual(result.counts.errors, 0) def test_parse_successful_nested_tests_log(self): all_passed_log = test_data_path('test_is_test_passed-all_passed_nested.log') with open(all_passed_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual(result.counts.errors, 0) def test_kselftest_nested(self): kselftest_log = test_data_path('test_is_test_passed-kselftest.log') with open(kselftest_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual(result.counts.errors, 0) def test_parse_failed_test_log(self): failed_log = test_data_path('test_is_test_passed-failure.log') with open(failed_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.FAILURE, - result.status) + self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status) + self.assertEqual(result.counts.errors, 0) def test_no_header(self): empty_log = test_data_path('test_is_test_passed-no_tests_run_no_header.log') @@ -167,9 +170,8 @@ class KUnitParserTest(unittest.TestCase): result = kunit_parser.parse_run_tests( kunit_parser.extract_tap_lines(file.readlines())) self.assertEqual(0, len(result.subtests)) - self.assertEqual( - kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS, - result.status) + self.assertEqual(kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS, result.status) + self.assertEqual(result.counts.errors, 1) def test_missing_test_plan(self): missing_plan_log = test_data_path('test_is_test_passed-' @@ -179,12 +181,8 @@ class KUnitParserTest(unittest.TestCase): kunit_parser.extract_tap_lines( file.readlines())) # A missing test plan is not an error. - self.assertEqual(0, result.counts.errors) - # All tests should be accounted for. - self.assertEqual(10, result.counts.total()) - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) + self.assertEqual(result.counts, kunit_parser.TestCounts(passed=10, errors=0)) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) def test_no_tests(self): header_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log') @@ -192,9 +190,8 @@ class KUnitParserTest(unittest.TestCase): result = kunit_parser.parse_run_tests( kunit_parser.extract_tap_lines(file.readlines())) self.assertEqual(0, len(result.subtests)) - self.assertEqual( - kunit_parser.TestStatus.NO_TESTS, - result.status) + self.assertEqual(kunit_parser.TestStatus.NO_TESTS, result.status) + self.assertEqual(result.counts.errors, 1) def test_no_tests_no_plan(self): no_plan_log = test_data_path('test_is_test_passed-no_tests_no_plan.log') @@ -205,7 +202,7 @@ class KUnitParserTest(unittest.TestCase): self.assertEqual( kunit_parser.TestStatus.NO_TESTS, result.subtests[0].subtests[0].status) - self.assertEqual(1, result.counts.errors) + self.assertEqual(result.counts, kunit_parser.TestCounts(passed=1, errors=1)) def test_no_kunit_output(self): @@ -214,9 +211,10 @@ class KUnitParserTest(unittest.TestCase): with open(crash_log) as file: result = kunit_parser.parse_run_tests( kunit_parser.extract_tap_lines(file.readlines())) - print_mock.assert_any_call(StrContains('could not find any KTAP output!')) + print_mock.assert_any_call(StrContains('Could not find any KTAP output.')) print_mock.stop() self.assertEqual(0, len(result.subtests)) + self.assertEqual(result.counts.errors, 1) def test_skipped_test(self): skipped_log = test_data_path('test_skip_tests.log') @@ -224,18 +222,16 @@ class KUnitParserTest(unittest.TestCase): result = kunit_parser.parse_run_tests(file.readlines()) # A skipped test does not fail the whole suite. - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual(result.counts, kunit_parser.TestCounts(passed=4, skipped=1)) def test_skipped_all_tests(self): skipped_log = test_data_path('test_skip_all_tests.log') with open(skipped_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.SKIPPED, - result.status) + self.assertEqual(kunit_parser.TestStatus.SKIPPED, result.status) + self.assertEqual(result.counts, kunit_parser.TestCounts(skipped=5)) def test_ignores_hyphen(self): hyphen_log = test_data_path('test_strip_hyphen.log') @@ -243,71 +239,112 @@ class KUnitParserTest(unittest.TestCase): result = kunit_parser.parse_run_tests(file.readlines()) # A skipped test does not fail the whole suite. - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) self.assertEqual( "sysctl_test", result.subtests[0].name) self.assertEqual( "example", result.subtests[1].name) - file.close() - def test_ignores_prefix_printk_time(self): prefix_log = test_data_path('test_config_printk_time.log') with open(prefix_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) - self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) def test_ignores_multiple_prefixes(self): prefix_log = test_data_path('test_multiple_prefixes.log') with open(prefix_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) - self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) def test_prefix_mixed_kernel_output(self): mixed_prefix_log = test_data_path('test_interrupted_tap_output.log') with open(mixed_prefix_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) - self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) def test_prefix_poundsign(self): pound_log = test_data_path('test_pound_sign.log') with open(pound_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) - self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) def test_kernel_panic_end(self): panic_log = test_data_path('test_kernel_panic_interrupt.log') with open(panic_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.TEST_CRASHED, - result.status) - self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(kunit_parser.TestStatus.TEST_CRASHED, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertGreaterEqual(result.counts.errors, 1) def test_pound_no_prefix(self): pound_log = test_data_path('test_pound_no_prefix.log') with open(pound_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) - self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) + + def test_summarize_failures(self): + output = """ + KTAP version 1 + 1..2 + # Subtest: all_failed_suite + 1..2 + not ok 1 - test1 + not ok 2 - test2 + not ok 1 - all_failed_suite + # Subtest: some_failed_suite + 1..2 + ok 1 - test1 + not ok 2 - test2 + not ok 1 - some_failed_suite + """ + result = kunit_parser.parse_run_tests(output.splitlines()) + self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status) + + self.assertEqual(kunit_parser._summarize_failed_tests(result), + 'Failures: all_failed_suite, some_failed_suite.test2') + + def test_ktap_format(self): + ktap_log = test_data_path('test_parse_ktap_output.log') + with open(ktap_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.assertEqual(result.counts, kunit_parser.TestCounts(passed=3)) + self.assertEqual('suite', result.subtests[0].name) + self.assertEqual('case_1', result.subtests[0].subtests[0].name) + self.assertEqual('case_2', result.subtests[0].subtests[1].name) + + def test_parse_subtest_header(self): + ktap_log = test_data_path('test_parse_subtest_header.log') + with open(ktap_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.print_mock.assert_any_call(StrContains('suite (1 subtest)')) + + def test_show_test_output_on_failure(self): + output = """ + KTAP version 1 + 1..1 + Test output. + Indented more. + not ok 1 test1 + """ + result = kunit_parser.parse_run_tests(output.splitlines()) + self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status) + + self.print_mock.assert_any_call(StrContains('Test output.')) + self.print_mock.assert_any_call(StrContains(' Indented more.')) + self.noPrintCallContains('not ok 1 test1') def line_stream_from_strs(strs: Iterable[str]) -> kunit_parser.LineStream: return kunit_parser.LineStream(enumerate(strs, start=1)) @@ -485,6 +522,9 @@ class LinuxSourceTreeTest(unittest.TestCase): class KUnitJsonTest(unittest.TestCase): + def setUp(self): + self.print_mock = mock.patch('kunit_printer.Printer.print').start() + self.addCleanup(mock.patch.stopall) def _json_for(self, log_file): with open(test_data_path(log_file)) as file: @@ -581,7 +621,7 @@ class KUnitMainTest(unittest.TestCase): self.assertEqual(e.exception.code, 1) self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) - self.print_mock.assert_any_call(StrContains('could not find any KTAP output!')) + self.print_mock.assert_any_call(StrContains('Could not find any KTAP output.')) def test_exec_no_tests(self): self.linux_source_mock.run_kernel = mock.Mock(return_value=['TAP version 14', '1..0']) diff --git a/tools/testing/kunit/test_data/test_parse_ktap_output.log b/tools/testing/kunit/test_data/test_parse_ktap_output.log new file mode 100644 index 000000000000..ccdf244e5303 --- /dev/null +++ b/tools/testing/kunit/test_data/test_parse_ktap_output.log @@ -0,0 +1,8 @@ +KTAP version 1 +1..1 + KTAP version 1 + 1..3 + ok 1 case_1 + ok 2 case_2 + ok 3 case_3 +ok 1 suite diff --git a/tools/testing/kunit/test_data/test_parse_subtest_header.log b/tools/testing/kunit/test_data/test_parse_subtest_header.log new file mode 100644 index 000000000000..216631092e7b --- /dev/null +++ b/tools/testing/kunit/test_data/test_parse_subtest_header.log @@ -0,0 +1,7 @@ +KTAP version 1 +1..1 + KTAP version 1 + # Subtest: suite + 1..1 + ok 1 test +ok 1 suite
\ No newline at end of file |