Merge pull request #8335 from valeriosetti/issue8301

Fix error reporting in driver testing parity
This commit is contained in:
Manuel Pégourié-Gonnard 2023-10-19 07:31:48 +00:00 committed by GitHub
commit fb72969396
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -22,17 +22,23 @@ class Results:
self.error_count = 0 self.error_count = 0
self.warning_count = 0 self.warning_count = 0
@staticmethod def new_section(self, fmt, *args, **kwargs):
def log(fmt, *args, **kwargs): self._print_line('\n*** ' + fmt + ' ***\n', *args, **kwargs)
sys.stderr.write((fmt + '\n').format(*args, **kwargs))
def info(self, fmt, *args, **kwargs):
self._print_line('Info: ' + fmt, *args, **kwargs)
def error(self, fmt, *args, **kwargs): def error(self, fmt, *args, **kwargs):
self.log('Error: ' + fmt, *args, **kwargs)
self.error_count += 1 self.error_count += 1
self._print_line('Error: ' + fmt, *args, **kwargs)
def warning(self, fmt, *args, **kwargs): def warning(self, fmt, *args, **kwargs):
self.log('Warning: ' + fmt, *args, **kwargs)
self.warning_count += 1 self.warning_count += 1
self._print_line('Warning: ' + fmt, *args, **kwargs)
@staticmethod
def _print_line(fmt, *args, **kwargs):
sys.stderr.write((fmt + '\n').format(*args, **kwargs))
class TestCaseOutcomes: class TestCaseOutcomes:
"""The outcomes of one test case across many configurations.""" """The outcomes of one test case across many configurations."""
@ -53,25 +59,24 @@ class TestCaseOutcomes:
""" """
return len(self.successes) + len(self.failures) return len(self.successes) + len(self.failures)
def execute_reference_driver_tests(ref_component, driver_component, outcome_file): def execute_reference_driver_tests(results: Results, ref_component, driver_component, \
outcome_file):
"""Run the tests specified in ref_component and driver_component. Results """Run the tests specified in ref_component and driver_component. Results
are stored in the output_file and they will be used for the following are stored in the output_file and they will be used for the following
coverage analysis""" coverage analysis"""
# If the outcome file already exists, we assume that the user wants to # If the outcome file already exists, we assume that the user wants to
# perform the comparison analysis again without repeating the tests. # perform the comparison analysis again without repeating the tests.
if os.path.exists(outcome_file): if os.path.exists(outcome_file):
Results.log("Outcome file (" + outcome_file + ") already exists. " + \ results.info("Outcome file ({}) already exists. Tests will be skipped.", outcome_file)
"Tests will be skipped.")
return return
shell_command = "tests/scripts/all.sh --outcome-file " + outcome_file + \ shell_command = "tests/scripts/all.sh --outcome-file " + outcome_file + \
" " + ref_component + " " + driver_component " " + ref_component + " " + driver_component
Results.log("Running: " + shell_command) results.info("Running: {}", shell_command)
ret_val = subprocess.run(shell_command.split(), check=False).returncode ret_val = subprocess.run(shell_command.split(), check=False).returncode
if ret_val != 0: if ret_val != 0:
Results.log("Error: failed to run reference/driver components") results.error("failed to run reference/driver components")
sys.exit(ret_val)
def analyze_coverage(results, outcomes, allow_list, full_coverage): def analyze_coverage(results, outcomes, allow_list, full_coverage):
"""Check that all available test cases are executed at least once.""" """Check that all available test cases are executed at least once."""
@ -90,7 +95,8 @@ def analyze_coverage(results, outcomes, allow_list, full_coverage):
else: else:
results.warning('Allow listed test case was executed: {}', key) results.warning('Allow listed test case was executed: {}', key)
def analyze_driver_vs_reference(outcomes, component_ref, component_driver, def analyze_driver_vs_reference(results: Results, outcomes,
component_ref, component_driver,
ignored_suites, ignored_test=None): ignored_suites, ignored_test=None):
"""Check that all tests executed in the reference component are also """Check that all tests executed in the reference component are also
executed in the corresponding driver component. executed in the corresponding driver component.
@ -100,7 +106,6 @@ def analyze_driver_vs_reference(outcomes, component_ref, component_driver,
output string is provided output string is provided
""" """
available = check_test_cases.collect_available_test_cases() available = check_test_cases.collect_available_test_cases()
result = True
for key in available: for key in available:
# Continue if test was not executed by any component # Continue if test was not executed by any component
@ -125,16 +130,12 @@ def analyze_driver_vs_reference(outcomes, component_ref, component_driver,
if component_ref in entry: if component_ref in entry:
reference_test_passed = True reference_test_passed = True
if(reference_test_passed and not driver_test_passed): if(reference_test_passed and not driver_test_passed):
Results.log(key) results.error("Did not pass with driver: {}", key)
result = False
return result
def analyze_outcomes(outcomes, args): def analyze_outcomes(results: Results, outcomes, args):
"""Run all analyses on the given outcome collection.""" """Run all analyses on the given outcome collection."""
results = Results()
analyze_coverage(results, outcomes, args['allow_list'], analyze_coverage(results, outcomes, args['allow_list'],
args['full_coverage']) args['full_coverage'])
return results
def read_outcome_file(outcome_file): def read_outcome_file(outcome_file):
"""Parse an outcome file and return an outcome collection. """Parse an outcome file and return an outcome collection.
@ -157,29 +158,30 @@ by a semicolon.
outcomes[key].failures.append(setup) outcomes[key].failures.append(setup)
return outcomes return outcomes
def do_analyze_coverage(outcome_file, args): def do_analyze_coverage(results: Results, outcome_file, args):
"""Perform coverage analysis.""" """Perform coverage analysis."""
results.new_section("Analyze coverage")
outcomes = read_outcome_file(outcome_file) outcomes = read_outcome_file(outcome_file)
Results.log("\n*** Analyze coverage ***\n") analyze_outcomes(results, outcomes, args)
results = analyze_outcomes(outcomes, args)
return results.error_count == 0
def do_analyze_driver_vs_reference(outcome_file, args): def do_analyze_driver_vs_reference(results: Results, outcome_file, args):
"""Perform driver vs reference analyze.""" """Perform driver vs reference analyze."""
execute_reference_driver_tests(args['component_ref'], \ results.new_section("Analyze driver {} vs reference {}",
args['component_driver'], outcome_file) args['component_driver'], args['component_ref'])
execute_reference_driver_tests(results, args['component_ref'], \
args['component_driver'], outcome_file)
ignored_suites = ['test_suite_' + x for x in args['ignored_suites']] ignored_suites = ['test_suite_' + x for x in args['ignored_suites']]
outcomes = read_outcome_file(outcome_file) outcomes = read_outcome_file(outcome_file)
Results.log("\n*** Analyze driver {} vs reference {} ***\n".format(
args['component_driver'], args['component_ref'])) analyze_driver_vs_reference(results, outcomes,
return analyze_driver_vs_reference(outcomes, args['component_ref'], args['component_ref'], args['component_driver'],
args['component_driver'], ignored_suites, ignored_suites, args['ignored_tests'])
args['ignored_tests'])
# List of tasks with a function that can handle this task and additional arguments if required # List of tasks with a function that can handle this task and additional arguments if required
TASKS = { KNOWN_TASKS = {
'analyze_coverage': { 'analyze_coverage': {
'test_function': do_analyze_coverage, 'test_function': do_analyze_coverage,
'args': { 'args': {
@ -641,11 +643,13 @@ TASKS = {
} }
def main(): def main():
main_results = Results()
try: try:
parser = argparse.ArgumentParser(description=__doc__) parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('outcomes', metavar='OUTCOMES.CSV', parser.add_argument('outcomes', metavar='OUTCOMES.CSV',
help='Outcome file to analyze') help='Outcome file to analyze')
parser.add_argument('task', default='all', nargs='?', parser.add_argument('specified_tasks', default='all', nargs='?',
help='Analysis to be done. By default, run all tasks. ' help='Analysis to be done. By default, run all tasks. '
'With one or more TASK, run only those. ' 'With one or more TASK, run only those. '
'TASK can be the name of a single task or ' 'TASK can be the name of a single task or '
@ -660,33 +664,31 @@ def main():
options = parser.parse_args() options = parser.parse_args()
if options.list: if options.list:
for task in TASKS: for task in KNOWN_TASKS:
Results.log(task) print(task)
sys.exit(0) sys.exit(0)
result = True if options.specified_tasks == 'all':
tasks_list = KNOWN_TASKS.keys()
if options.task == 'all':
tasks = TASKS.keys()
else: else:
tasks = re.split(r'[, ]+', options.task) tasks_list = re.split(r'[, ]+', options.specified_tasks)
for task in tasks_list:
if task not in KNOWN_TASKS:
sys.stderr.write('invalid task: {}'.format(task))
sys.exit(2)
for task in tasks: KNOWN_TASKS['analyze_coverage']['args']['full_coverage'] = options.full_coverage
if task not in TASKS:
Results.log('Error: invalid task: {}'.format(task))
sys.exit(1)
TASKS['analyze_coverage']['args']['full_coverage'] = \ for task in tasks_list:
options.full_coverage test_function = KNOWN_TASKS[task]['test_function']
test_args = KNOWN_TASKS[task]['args']
test_function(main_results, options.outcomes, test_args)
for task in TASKS: main_results.info("Overall results: {} warnings and {} errors",
if task in tasks: main_results.warning_count, main_results.error_count)
if not TASKS[task]['test_function'](options.outcomes, TASKS[task]['args']):
result = False sys.exit(0 if (main_results.error_count == 0) else 1)
if result is False:
sys.exit(1)
Results.log("SUCCESS :-)")
except Exception: # pylint: disable=broad-except except Exception: # pylint: disable=broad-except
# Print the backtrace and exit explicitly with our chosen status. # Print the backtrace and exit explicitly with our chosen status.
traceback.print_exc() traceback.print_exc()