From a6cf5d67c516f1cb73815ec54db2606a27db4d80 Mon Sep 17 00:00:00 2001 From: Pengyu Lv Date: Wed, 22 Nov 2023 11:35:21 +0800 Subject: [PATCH 01/13] Share parsed outcomes among tasks when ananlyzing This extremely improves the performance. Signed-off-by: Pengyu Lv --- tests/scripts/analyze_outcomes.py | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index a070b01639..ddacf2e06e 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -179,23 +179,26 @@ by a semicolon. outcomes[key].failures.append(setup) return outcomes -def do_analyze_coverage(results: Results, outcome_file, args): +def do_analyze_coverage(results: Results, outcomes_or_file, args): """Perform coverage analysis.""" results.new_section("Analyze coverage") - outcomes = read_outcome_file(outcome_file) + outcomes = read_outcome_file(outcomes_or_file) \ + if isinstance(outcomes_or_file, str) else outcomes_or_file analyze_outcomes(results, outcomes, args) -def do_analyze_driver_vs_reference(results: Results, outcome_file, args): +def do_analyze_driver_vs_reference(results: Results, outcomes_or_file, args): """Perform driver vs reference analyze.""" results.new_section("Analyze driver {} vs reference {}", args['component_driver'], args['component_ref']) - execute_reference_driver_tests(results, args['component_ref'], \ - args['component_driver'], outcome_file) - ignored_suites = ['test_suite_' + x for x in args['ignored_suites']] - outcomes = read_outcome_file(outcome_file) + if isinstance(outcomes_or_file, str): + execute_reference_driver_tests(results, args['component_ref'], \ + args['component_driver'], outcomes_or_file) + outcomes = read_outcome_file(outcomes_or_file) + else: + outcomes = outcomes_or_file analyze_driver_vs_reference(results, outcomes, args['component_ref'], args['component_driver'], @@ -493,10 +496,19 @@ def main(): KNOWN_TASKS['analyze_coverage']['args']['full_coverage'] = options.full_coverage + # If the outcome file already exists, we assume that the user wants to + # perform the comparison. + # Share the contents among tasks to improve performance. + if os.path.exists(options.outcomes): + main_results.info("Read outcome file from {}.", options.outcomes) + outcomes_or_file = read_outcome_file(options.outcomes) + else: + outcomes_or_file = options.outcomes + for task in tasks_list: test_function = KNOWN_TASKS[task]['test_function'] test_args = KNOWN_TASKS[task]['args'] - test_function(main_results, options.outcomes, test_args) + test_function(main_results, outcomes_or_file, test_args) main_results.info("Overall results: {} warnings and {} errors", main_results.warning_count, main_results.error_count) From a4428588782c60947b11a2ee703e4eceda7ac8b4 Mon Sep 17 00:00:00 2001 From: Pengyu Lv Date: Wed, 22 Nov 2023 19:02:15 +0800 Subject: [PATCH 02/13] Restruct the structure of outcome file presentation Signed-off-by: Pengyu Lv --- tests/scripts/analyze_outcomes.py | 68 ++++++++++++------------------- 1 file changed, 26 insertions(+), 42 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index ddacf2e06e..2cd6257d35 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -40,25 +40,6 @@ class Results: def _print_line(fmt, *args, **kwargs): sys.stderr.write((fmt + '\n').format(*args, **kwargs)) -class TestCaseOutcomes: - """The outcomes of one test case across many configurations.""" - # pylint: disable=too-few-public-methods - - def __init__(self): - # Collect a list of witnesses of the test case succeeding or failing. - # Currently we don't do anything with witnesses except count them. - # The format of a witness is determined by the read_outcome_file - # function; it's the platform and configuration joined by ';'. - self.successes = [] - self.failures = [] - - def hits(self): - """Return the number of times a test case has been run. - - This includes passes and failures, but not skips. - """ - return len(self.successes) + len(self.failures) - def execute_reference_driver_tests(results: Results, ref_component, driver_component, \ outcome_file): """Run the tests specified in ref_component and driver_component. Results @@ -82,7 +63,12 @@ def analyze_coverage(results, outcomes, allow_list, full_coverage): """Check that all available test cases are executed at least once.""" available = check_test_cases.collect_available_test_cases() for key in available: - hits = outcomes[key].hits() if key in outcomes else 0 + hits = 0 + for _comp, comp_outcomes in outcomes.items(): + if key in comp_outcomes["successes"] or \ + key in comp_outcomes["failures"]: + hits += 1 + if hits == 0 and key not in allow_list: if full_coverage: results.error('Test case not executed: {}', key) @@ -117,8 +103,14 @@ def analyze_driver_vs_reference(results: Results, outcomes, - only some specific test inside a test suite, for which the corresponding output string is provided """ - seen_reference_passing = False - for key in outcomes: + ref_outcomes = outcomes.get("component_" + component_ref) + driver_outcomes = outcomes.get("component_" + component_driver) + + if ref_outcomes is None or not ref_outcomes['successes']: + results.error("no passing test in reference component: bad outcome file?") + return + + for key in ref_outcomes["successes"]: # key is like "test_suite_foo.bar;Description of test case" (full_test_suite, test_string) = key.split(';') test_suite = full_test_suite.split('.')[0] # retrieve main part of test suite name @@ -136,23 +128,11 @@ def analyze_driver_vs_reference(results: Results, outcomes, if name_matches_pattern(test_string, str_or_re): ignored = True - # Search for tests that run in reference component and not in driver component - driver_test_passed = False - reference_test_passed = False - for entry in outcomes[key].successes: - if component_driver in entry: - driver_test_passed = True - if component_ref in entry: - reference_test_passed = True - seen_reference_passing = True - if reference_test_passed and not driver_test_passed and not ignored: + if not ignored and not key in driver_outcomes['successes']: results.error("PASS -> SKIP/FAIL: {}", key) - if ignored and driver_test_passed: + if ignored and key in driver_outcomes['successes']: results.error("uselessly ignored: {}", key) - if not seen_reference_passing: - results.error("no passing test in reference component: bad outcome file?") - def analyze_outcomes(results: Results, outcomes, args): """Run all analyses on the given outcome collection.""" analyze_coverage(results, outcomes, args['allow_list'], @@ -168,15 +148,19 @@ by a semicolon. outcomes = {} with open(outcome_file, 'r', encoding='utf-8') as input_file: for line in input_file: - (platform, config, suite, case, result, _cause) = line.split(';') + (_platform, config, suite, case, result, _cause) = line.split(';') key = ';'.join([suite, case]) - setup = ';'.join([platform, config]) - if key not in outcomes: - outcomes[key] = TestCaseOutcomes() + if config not in outcomes: + outcomes[config] = {"successes":[], "failures":[]} if result == 'PASS': - outcomes[key].successes.append(setup) + outcomes[config]['successes'].append(key) elif result == 'FAIL': - outcomes[key].failures.append(setup) + outcomes[config]['failures'].append(key) + + for config in outcomes: + outcomes[config]['successes'] = frozenset(outcomes[config]['successes']) + outcomes[config]['failures'] = frozenset(outcomes[config]['failures']) + return outcomes def do_analyze_coverage(results: Results, outcomes_or_file, args): From 31a9b7891adf28a7437b177cb547d2ffb58a8983 Mon Sep 17 00:00:00 2001 From: Pengyu Lv Date: Thu, 23 Nov 2023 14:15:37 +0800 Subject: [PATCH 03/13] Improve comments and variable naming Signed-off-by: Pengyu Lv --- tests/scripts/analyze_outcomes.py | 57 ++++++++++++++++++------------- 1 file changed, 33 insertions(+), 24 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 2cd6257d35..0baba1b7e9 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -62,24 +62,24 @@ def execute_reference_driver_tests(results: Results, ref_component, driver_compo def analyze_coverage(results, outcomes, allow_list, full_coverage): """Check that all available test cases are executed at least once.""" available = check_test_cases.collect_available_test_cases() - for key in available: + for suite_case in available: hits = 0 for _comp, comp_outcomes in outcomes.items(): - if key in comp_outcomes["successes"] or \ - key in comp_outcomes["failures"]: + if suite_case in comp_outcomes["successes"] or \ + suite_case in comp_outcomes["failures"]: hits += 1 - if hits == 0 and key not in allow_list: + if hits == 0 and suite_case not in allow_list: if full_coverage: - results.error('Test case not executed: {}', key) + results.error('Test case not executed: {}', suite_case) else: - results.warning('Test case not executed: {}', key) - elif hits != 0 and key in allow_list: + results.warning('Test case not executed: {}', suite_case) + elif hits != 0 and suite_case in allow_list: # Test Case should be removed from the allow list. if full_coverage: - results.error('Allow listed test case was executed: {}', key) + results.error('Allow listed test case was executed: {}', suite_case) else: - results.warning('Allow listed test case was executed: {}', key) + results.warning('Allow listed test case was executed: {}', suite_case) def name_matches_pattern(name, str_or_re): """Check if name matches a pattern, that may be a string or regex. @@ -96,8 +96,8 @@ def name_matches_pattern(name, str_or_re): def analyze_driver_vs_reference(results: Results, outcomes, component_ref, component_driver, ignored_suites, ignored_tests=None): - """Check that all tests executed in the reference component are also - executed in the corresponding driver component. + """Check that all tests passed in the reference component are also + passed in the corresponding driver component. Skip: - full test suites provided in ignored_suites list - only some specific test inside a test suite, for which the corresponding @@ -110,9 +110,9 @@ def analyze_driver_vs_reference(results: Results, outcomes, results.error("no passing test in reference component: bad outcome file?") return - for key in ref_outcomes["successes"]: - # key is like "test_suite_foo.bar;Description of test case" - (full_test_suite, test_string) = key.split(';') + for suite_case in ref_outcomes["successes"]: + # suite_case is like "test_suite_foo.bar;Description of test case" + (full_test_suite, test_string) = suite_case.split(';') test_suite = full_test_suite.split('.')[0] # retrieve main part of test suite name # Immediately skip fully-ignored test suites @@ -128,10 +128,10 @@ def analyze_driver_vs_reference(results: Results, outcomes, if name_matches_pattern(test_string, str_or_re): ignored = True - if not ignored and not key in driver_outcomes['successes']: - results.error("PASS -> SKIP/FAIL: {}", key) - if ignored and key in driver_outcomes['successes']: - results.error("uselessly ignored: {}", key) + if not ignored and not suite_case in driver_outcomes['successes']: + results.error("PASS -> SKIP/FAIL: {}", suite_case) + if ignored and suite_case in driver_outcomes['successes']: + results.error("uselessly ignored: {}", suite_case) def analyze_outcomes(results: Results, outcomes, args): """Run all analyses on the given outcome collection.""" @@ -141,22 +141,31 @@ def analyze_outcomes(results: Results, outcomes, args): def read_outcome_file(outcome_file): """Parse an outcome file and return an outcome collection. -An outcome collection is a dictionary mapping keys to TestCaseOutcomes objects. -The keys are the test suite name and the test case description, separated -by a semicolon. +An outcome collection is a dictionary presentation of the outcome file: +``` +outcomes = { + "": { + "successes": frozenset(["", ... ]), + "failures": frozenset(["", ...]) + } + ... +} +suite_case = ";" +``` """ outcomes = {} with open(outcome_file, 'r', encoding='utf-8') as input_file: for line in input_file: (_platform, config, suite, case, result, _cause) = line.split(';') - key = ';'.join([suite, case]) + suite_case = ';'.join([suite, case]) if config not in outcomes: outcomes[config] = {"successes":[], "failures":[]} if result == 'PASS': - outcomes[config]['successes'].append(key) + outcomes[config]['successes'].append(suite_case) elif result == 'FAIL': - outcomes[config]['failures'].append(key) + outcomes[config]['failures'].append(suite_case) + # Convert `list` to `frozenset` to improve search performance for config in outcomes: outcomes[config]['successes'] = frozenset(outcomes[config]['successes']) outcomes[config]['failures'] = frozenset(outcomes[config]['failures']) From dd1d6a7cca72bd65ac54dba85b1e7bf9a2f4cef3 Mon Sep 17 00:00:00 2001 From: Pengyu Lv Date: Mon, 27 Nov 2023 17:57:31 +0800 Subject: [PATCH 04/13] Improve readability of the script Signed-off-by: Pengyu Lv --- tests/scripts/analyze_outcomes.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 0baba1b7e9..4d13676089 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -64,7 +64,7 @@ def analyze_coverage(results, outcomes, allow_list, full_coverage): available = check_test_cases.collect_available_test_cases() for suite_case in available: hits = 0 - for _comp, comp_outcomes in outcomes.items(): + for comp_outcomes in outcomes.values(): if suite_case in comp_outcomes["successes"] or \ suite_case in comp_outcomes["failures"]: hits += 1 @@ -96,8 +96,8 @@ def name_matches_pattern(name, str_or_re): def analyze_driver_vs_reference(results: Results, outcomes, component_ref, component_driver, ignored_suites, ignored_tests=None): - """Check that all tests passed in the reference component are also - passed in the corresponding driver component. + """Check that all tests passing in the reference component are also + passing in the corresponding driver component. Skip: - full test suites provided in ignored_suites list - only some specific test inside a test suite, for which the corresponding @@ -144,7 +144,7 @@ def read_outcome_file(outcome_file): An outcome collection is a dictionary presentation of the outcome file: ``` outcomes = { - "": { + "": { "successes": frozenset(["", ... ]), "failures": frozenset(["", ...]) } @@ -156,19 +156,19 @@ suite_case = ";" outcomes = {} with open(outcome_file, 'r', encoding='utf-8') as input_file: for line in input_file: - (_platform, config, suite, case, result, _cause) = line.split(';') + (_platform, component, suite, case, result, _cause) = line.split(';') suite_case = ';'.join([suite, case]) - if config not in outcomes: - outcomes[config] = {"successes":[], "failures":[]} + if component not in outcomes: + outcomes[component] = {"successes":[], "failures":[]} if result == 'PASS': - outcomes[config]['successes'].append(suite_case) + outcomes[component]['successes'].append(suite_case) elif result == 'FAIL': - outcomes[config]['failures'].append(suite_case) + outcomes[component]['failures'].append(suite_case) # Convert `list` to `frozenset` to improve search performance - for config in outcomes: - outcomes[config]['successes'] = frozenset(outcomes[config]['successes']) - outcomes[config]['failures'] = frozenset(outcomes[config]['failures']) + for component in outcomes: + outcomes[component]['successes'] = frozenset(outcomes[component]['successes']) + outcomes[component]['failures'] = frozenset(outcomes[component]['failures']) return outcomes @@ -489,9 +489,9 @@ def main(): KNOWN_TASKS['analyze_coverage']['args']['full_coverage'] = options.full_coverage - # If the outcome file already exists, we assume that the user wants to - # perform the comparison. - # Share the contents among tasks to improve performance. + # If the outcome file exists, parse it once and share the result + # among tasks to improve performance. + # Otherwise, it will be generated by do_analyze_driver_vs_reference. if os.path.exists(options.outcomes): main_results.info("Read outcome file from {}.", options.outcomes) outcomes_or_file = read_outcome_file(options.outcomes) From f28cf594b1f297e2d6354c3de6f85d0ea2a32dca Mon Sep 17 00:00:00 2001 From: Pengyu Lv Date: Tue, 28 Nov 2023 10:56:29 +0800 Subject: [PATCH 05/13] Break the loop when case hits We don't care about the number of hits of the test cases, so break the iteration when the case hits. Signed-off-by: Pengyu Lv --- tests/scripts/analyze_outcomes.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 4d13676089..488c96bbad 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -68,6 +68,7 @@ def analyze_coverage(results, outcomes, allow_list, full_coverage): if suite_case in comp_outcomes["successes"] or \ suite_case in comp_outcomes["failures"]: hits += 1 + break if hits == 0 and suite_case not in allow_list: if full_coverage: From 59b9efc6dd89e761ac961798123363774c8e074d Mon Sep 17 00:00:00 2001 From: Pengyu Lv Date: Tue, 28 Nov 2023 11:15:00 +0800 Subject: [PATCH 06/13] Check if driver_component is missing Signed-off-by: Pengyu Lv --- tests/scripts/analyze_outcomes.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 488c96bbad..2515b309e9 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -107,7 +107,11 @@ def analyze_driver_vs_reference(results: Results, outcomes, ref_outcomes = outcomes.get("component_" + component_ref) driver_outcomes = outcomes.get("component_" + component_driver) - if ref_outcomes is None or not ref_outcomes['successes']: + if ref_outcomes is None or driver_outcomes is None: + results.error("required components are missing: bad outcome file?") + return + + if not ref_outcomes['successes']: results.error("no passing test in reference component: bad outcome file?") return From 28ae4648a61504acdfde9758e81368f8a7ec54bd Mon Sep 17 00:00:00 2001 From: Pengyu Lv Date: Tue, 28 Nov 2023 11:35:19 +0800 Subject: [PATCH 07/13] Use mutable set all the time Signed-off-by: Pengyu Lv --- tests/scripts/analyze_outcomes.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 2515b309e9..890c70dd64 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -164,16 +164,11 @@ suite_case = ";" (_platform, component, suite, case, result, _cause) = line.split(';') suite_case = ';'.join([suite, case]) if component not in outcomes: - outcomes[component] = {"successes":[], "failures":[]} + outcomes[component] = {"successes":set(), "failures":set()} if result == 'PASS': - outcomes[component]['successes'].append(suite_case) + outcomes[component]['successes'].add(suite_case) elif result == 'FAIL': - outcomes[component]['failures'].append(suite_case) - - # Convert `list` to `frozenset` to improve search performance - for component in outcomes: - outcomes[component]['successes'] = frozenset(outcomes[component]['successes']) - outcomes[component]['failures'] = frozenset(outcomes[component]['failures']) + outcomes[component]['failures'].add(suite_case) return outcomes From 18908ec2767a1557b908137ad37ebecc52eca932 Mon Sep 17 00:00:00 2001 From: Pengyu Lv Date: Tue, 28 Nov 2023 12:11:52 +0800 Subject: [PATCH 08/13] Define named tuple for component outcomes Signed-off-by: Pengyu Lv --- tests/scripts/analyze_outcomes.py | 42 +++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 13 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 890c70dd64..b52952458b 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -12,9 +12,14 @@ import traceback import re import subprocess import os +import typing import check_test_cases +ComponentOutcomes = typing.NamedTuple('ComponentOutcomes', + [('successes', typing.Set[str]), + ('failures', typing.Set[str])]) + class Results: """Process analysis results.""" @@ -65,8 +70,8 @@ def analyze_coverage(results, outcomes, allow_list, full_coverage): for suite_case in available: hits = 0 for comp_outcomes in outcomes.values(): - if suite_case in comp_outcomes["successes"] or \ - suite_case in comp_outcomes["failures"]: + if suite_case in comp_outcomes.successes or \ + suite_case in comp_outcomes.failures: hits += 1 break @@ -111,11 +116,11 @@ def analyze_driver_vs_reference(results: Results, outcomes, results.error("required components are missing: bad outcome file?") return - if not ref_outcomes['successes']: + if not ref_outcomes.successes: results.error("no passing test in reference component: bad outcome file?") return - for suite_case in ref_outcomes["successes"]: + for suite_case in ref_outcomes.successes: # suite_case is like "test_suite_foo.bar;Description of test case" (full_test_suite, test_string) = suite_case.split(';') test_suite = full_test_suite.split('.')[0] # retrieve main part of test suite name @@ -133,9 +138,9 @@ def analyze_driver_vs_reference(results: Results, outcomes, if name_matches_pattern(test_string, str_or_re): ignored = True - if not ignored and not suite_case in driver_outcomes['successes']: + if not ignored and not suite_case in driver_outcomes.successes: results.error("PASS -> SKIP/FAIL: {}", suite_case) - if ignored and suite_case in driver_outcomes['successes']: + if ignored and suite_case in driver_outcomes.successes: results.error("uselessly ignored: {}", suite_case) def analyze_outcomes(results: Results, outcomes, args): @@ -149,12 +154,23 @@ def read_outcome_file(outcome_file): An outcome collection is a dictionary presentation of the outcome file: ``` outcomes = { - "": { - "successes": frozenset(["", ... ]), - "failures": frozenset(["", ...]) - } + "": ComponentOutcomes, ... } + +CompoentOutcomes is a named tuple which is defined as: + +ComponentOutcomes( + successes = { + , + ... + }, + failures = { + , + ... + } +) + suite_case = ";" ``` """ @@ -164,11 +180,11 @@ suite_case = ";" (_platform, component, suite, case, result, _cause) = line.split(';') suite_case = ';'.join([suite, case]) if component not in outcomes: - outcomes[component] = {"successes":set(), "failures":set()} + outcomes[component] = ComponentOutcomes(set(), set()) if result == 'PASS': - outcomes[component]['successes'].add(suite_case) + outcomes[component].successes.add(suite_case) elif result == 'FAIL': - outcomes[component]['failures'].add(suite_case) + outcomes[component].failures.add(suite_case) return outcomes From 20e3ca391ed30347ed611e9bfe83600f3455ed4d Mon Sep 17 00:00:00 2001 From: Pengyu Lv Date: Tue, 28 Nov 2023 15:30:03 +0800 Subject: [PATCH 09/13] Run tests for ref_vs_driver outside task function Signed-off-by: Pengyu Lv --- tests/scripts/analyze_outcomes.py | 45 +++++++++++++++---------------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index b52952458b..4e925a18e4 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -50,11 +50,7 @@ def execute_reference_driver_tests(results: Results, ref_component, driver_compo """Run the tests specified in ref_component and driver_component. Results are stored in the output_file and they will be used for the following coverage analysis""" - # If the outcome file already exists, we assume that the user wants to - # perform the comparison analysis again without repeating the tests. - if os.path.exists(outcome_file): - results.info("Outcome file ({}) already exists. Tests will be skipped.", outcome_file) - return + results.new_section("Test {} and {}", ref_component, driver_component) shell_command = "tests/scripts/all.sh --outcome-file " + outcome_file + \ " " + ref_component + " " + driver_component @@ -188,27 +184,18 @@ suite_case = ";" return outcomes -def do_analyze_coverage(results: Results, outcomes_or_file, args): +def do_analyze_coverage(results: Results, outcomes, args): """Perform coverage analysis.""" results.new_section("Analyze coverage") - outcomes = read_outcome_file(outcomes_or_file) \ - if isinstance(outcomes_or_file, str) else outcomes_or_file analyze_outcomes(results, outcomes, args) -def do_analyze_driver_vs_reference(results: Results, outcomes_or_file, args): +def do_analyze_driver_vs_reference(results: Results, outcomes, args): """Perform driver vs reference analyze.""" results.new_section("Analyze driver {} vs reference {}", args['component_driver'], args['component_ref']) ignored_suites = ['test_suite_' + x for x in args['ignored_suites']] - if isinstance(outcomes_or_file, str): - execute_reference_driver_tests(results, args['component_ref'], \ - args['component_driver'], outcomes_or_file) - outcomes = read_outcome_file(outcomes_or_file) - else: - outcomes = outcomes_or_file - analyze_driver_vs_reference(results, outcomes, args['component_ref'], args['component_driver'], ignored_suites, args['ignored_tests']) @@ -507,17 +494,29 @@ def main(): # If the outcome file exists, parse it once and share the result # among tasks to improve performance. - # Otherwise, it will be generated by do_analyze_driver_vs_reference. - if os.path.exists(options.outcomes): - main_results.info("Read outcome file from {}.", options.outcomes) - outcomes_or_file = read_outcome_file(options.outcomes) - else: - outcomes_or_file = options.outcomes + # Otherwise, it will be generated by execute_reference_driver_tests. + if not os.path.exists(options.outcomes): + if len(tasks_list) > 1: + sys.stderr.write("mutiple tasks found, please provide a valid outcomes file.\n") + sys.exit(2) + + task_name = tasks_list[0] + task = KNOWN_TASKS[task_name] + if task['test_function'] != do_analyze_driver_vs_reference: + sys.stderr.write("please provide valid outcomes file for {}.\n".format(task_name)) + sys.exit(2) + + execute_reference_driver_tests(main_results, + task['args']['component_ref'], + task['args']['component_driver'], + options.outcomes) + + outcomes = read_outcome_file(options.outcomes) for task in tasks_list: test_function = KNOWN_TASKS[task]['test_function'] test_args = KNOWN_TASKS[task]['args'] - test_function(main_results, outcomes_or_file, test_args) + test_function(main_results, outcomes, test_args) main_results.info("Overall results: {} warnings and {} errors", main_results.warning_count, main_results.error_count) From c2e8f3a0800d720ea92a93e5c1911e9691694a3b Mon Sep 17 00:00:00 2001 From: Pengyu Lv Date: Tue, 28 Nov 2023 17:22:04 +0800 Subject: [PATCH 10/13] Add type annotations to analyze_outcomes.py Signed-off-by: Pengyu Lv --- tests/scripts/analyze_outcomes.py | 74 +++++++++++++++---------------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 4e925a18e4..018d941113 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -16,10 +16,32 @@ import typing import check_test_cases + +# `CompoentOutcomes` is a named tuple which is defined as: +# ComponentOutcomes( +# successes = { +# "", +# ... +# }, +# failures = { +# "", +# ... +# } +# ) +# suite_case = ";" ComponentOutcomes = typing.NamedTuple('ComponentOutcomes', [('successes', typing.Set[str]), ('failures', typing.Set[str])]) +# `Outcomes` is a representation of the outcomes file, +# which defined as: +# Outcomes = { +# "": ComponentOutcomes, +# ... +# } +Outcomes = typing.Dict[str, ComponentOutcomes] + + class Results: """Process analysis results.""" @@ -45,8 +67,8 @@ class Results: def _print_line(fmt, *args, **kwargs): sys.stderr.write((fmt + '\n').format(*args, **kwargs)) -def execute_reference_driver_tests(results: Results, ref_component, driver_component, \ - outcome_file): +def execute_reference_driver_tests(results: Results, ref_component: str, driver_component: str, \ + outcome_file: str) -> None: """Run the tests specified in ref_component and driver_component. Results are stored in the output_file and they will be used for the following coverage analysis""" @@ -60,7 +82,8 @@ def execute_reference_driver_tests(results: Results, ref_component, driver_compo if ret_val != 0: results.error("failed to run reference/driver components") -def analyze_coverage(results, outcomes, allow_list, full_coverage): +def analyze_coverage(results: Results, outcomes: Outcomes, + allow_list: typing.List[str], full_coverage: bool) -> None: """Check that all available test cases are executed at least once.""" available = check_test_cases.collect_available_test_cases() for suite_case in available: @@ -83,7 +106,7 @@ def analyze_coverage(results, outcomes, allow_list, full_coverage): else: results.warning('Allow listed test case was executed: {}', suite_case) -def name_matches_pattern(name, str_or_re): +def name_matches_pattern(name: str, str_or_re) -> bool: """Check if name matches a pattern, that may be a string or regex. - If the pattern is a string, name must be equal to match. - If the pattern is a regex, name must fully match. @@ -91,13 +114,13 @@ def name_matches_pattern(name, str_or_re): # The CI's python is too old for re.Pattern #if isinstance(str_or_re, re.Pattern): if not isinstance(str_or_re, str): - return str_or_re.fullmatch(name) + return str_or_re.fullmatch(name) is not None else: return str_or_re == name -def analyze_driver_vs_reference(results: Results, outcomes, - component_ref, component_driver, - ignored_suites, ignored_tests=None): +def analyze_driver_vs_reference(results: Results, outcomes: Outcomes, + component_ref: str, component_driver: str, + ignored_suites: typing.List[str], ignored_tests=None) -> None: """Check that all tests passing in the reference component are also passing in the corresponding driver component. Skip: @@ -139,37 +162,14 @@ def analyze_driver_vs_reference(results: Results, outcomes, if ignored and suite_case in driver_outcomes.successes: results.error("uselessly ignored: {}", suite_case) -def analyze_outcomes(results: Results, outcomes, args): +def analyze_outcomes(results: Results, outcomes: Outcomes, args) -> None: """Run all analyses on the given outcome collection.""" analyze_coverage(results, outcomes, args['allow_list'], args['full_coverage']) -def read_outcome_file(outcome_file): +def read_outcome_file(outcome_file: str) -> Outcomes: """Parse an outcome file and return an outcome collection. - -An outcome collection is a dictionary presentation of the outcome file: -``` -outcomes = { - "": ComponentOutcomes, - ... -} - -CompoentOutcomes is a named tuple which is defined as: - -ComponentOutcomes( - successes = { - , - ... - }, - failures = { - , - ... - } -) - -suite_case = ";" -``` -""" + """ outcomes = {} with open(outcome_file, 'r', encoding='utf-8') as input_file: for line in input_file: @@ -184,12 +184,12 @@ suite_case = ";" return outcomes -def do_analyze_coverage(results: Results, outcomes, args): +def do_analyze_coverage(results: Results, outcomes: Outcomes, args) -> None: """Perform coverage analysis.""" results.new_section("Analyze coverage") analyze_outcomes(results, outcomes, args) -def do_analyze_driver_vs_reference(results: Results, outcomes, args): +def do_analyze_driver_vs_reference(results: Results, outcomes: Outcomes, args) -> None: """Perform driver vs reference analyze.""" results.new_section("Analyze driver {} vs reference {}", args['component_driver'], args['component_ref']) @@ -502,7 +502,7 @@ def main(): task_name = tasks_list[0] task = KNOWN_TASKS[task_name] - if task['test_function'] != do_analyze_driver_vs_reference: + if task['test_function'] != do_analyze_driver_vs_reference: # pylint: disable=comparison-with-callable sys.stderr.write("please provide valid outcomes file for {}.\n".format(task_name)) sys.exit(2) From 451ec8a4bca2eea57304013ade53b403d59a3b5a Mon Sep 17 00:00:00 2001 From: Pengyu Lv Date: Tue, 28 Nov 2023 17:59:05 +0800 Subject: [PATCH 11/13] Add comment to read_outcome_file in analyze_outcomes.py Signed-off-by: Pengyu Lv --- tests/scripts/analyze_outcomes.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 018d941113..02aac225fc 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -174,6 +174,9 @@ def read_outcome_file(outcome_file: str) -> Outcomes: with open(outcome_file, 'r', encoding='utf-8') as input_file: for line in input_file: (_platform, component, suite, case, result, _cause) = line.split(';') + # Note that `component` is not unique. If a test case passes on Linux + # and fails on FreeBSD, it'll end up in both the successes set and + # the failures set. suite_case = ';'.join([suite, case]) if component not in outcomes: outcomes[component] = ComponentOutcomes(set(), set()) From 550cd6f9b2a5773060ec87926dcdcdd26148d1a3 Mon Sep 17 00:00:00 2001 From: Pengyu Lv Date: Wed, 29 Nov 2023 09:17:59 +0800 Subject: [PATCH 12/13] Use boolean `hit` instead of int `hits` Also fix a typo in the comments. Signed-off-by: Pengyu Lv --- tests/scripts/analyze_outcomes.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 02aac225fc..52059bda09 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -17,7 +17,7 @@ import typing import check_test_cases -# `CompoentOutcomes` is a named tuple which is defined as: +# `ComponentOutcomes` is a named tuple which is defined as: # ComponentOutcomes( # successes = { # "", @@ -87,19 +87,19 @@ def analyze_coverage(results: Results, outcomes: Outcomes, """Check that all available test cases are executed at least once.""" available = check_test_cases.collect_available_test_cases() for suite_case in available: - hits = 0 + hit = False for comp_outcomes in outcomes.values(): if suite_case in comp_outcomes.successes or \ suite_case in comp_outcomes.failures: - hits += 1 + hit = True break - if hits == 0 and suite_case not in allow_list: + if hit == 0 and suite_case not in allow_list: if full_coverage: results.error('Test case not executed: {}', suite_case) else: results.warning('Test case not executed: {}', suite_case) - elif hits != 0 and suite_case in allow_list: + elif hit != 0 and suite_case in allow_list: # Test Case should be removed from the allow list. if full_coverage: results.error('Allow listed test case was executed: {}', suite_case) From 5dcfd0c613eb31a20e02d901fdb26d72262b9835 Mon Sep 17 00:00:00 2001 From: Pengyu Lv Date: Wed, 29 Nov 2023 18:03:28 +0800 Subject: [PATCH 13/13] Some improvements Signed-off-by: Pengyu Lv --- tests/scripts/analyze_outcomes.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 52059bda09..0d8289bdaa 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -87,19 +87,16 @@ def analyze_coverage(results: Results, outcomes: Outcomes, """Check that all available test cases are executed at least once.""" available = check_test_cases.collect_available_test_cases() for suite_case in available: - hit = False - for comp_outcomes in outcomes.values(): - if suite_case in comp_outcomes.successes or \ - suite_case in comp_outcomes.failures: - hit = True - break + hit = any(suite_case in comp_outcomes.successes or + suite_case in comp_outcomes.failures + for comp_outcomes in outcomes.values()) - if hit == 0 and suite_case not in allow_list: + if not hit and suite_case not in allow_list: if full_coverage: results.error('Test case not executed: {}', suite_case) else: results.warning('Test case not executed: {}', suite_case) - elif hit != 0 and suite_case in allow_list: + elif hit and suite_case in allow_list: # Test Case should be removed from the allow list. if full_coverage: results.error('Allow listed test case was executed: {}', suite_case)