mirror of
https://github.com/Mbed-TLS/mbedtls.git
synced 2025-04-18 05:42:35 +00:00
analyze_outcomes: improve logging system
- the script now only terminates in case of hard faults - each task is assigned a log - this log tracks messages, warning and errors - when task completes, errors and warnings are listed and messages are appended to the main log - on exit the main log is printed and the proper return value is returned Signed-off-by: Valerio Setti <valerio.setti@nordicsemi.no>
This commit is contained in:
parent
dfd7ca6344
commit
aaef0bc172
@ -15,25 +15,31 @@ import os
|
|||||||
|
|
||||||
import check_test_cases
|
import check_test_cases
|
||||||
|
|
||||||
class Results:
|
class TestLog:
|
||||||
"""Process analysis results."""
|
"""Process analysis results."""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.error_count = 0
|
self.error_count = 0
|
||||||
self.warning_count = 0
|
self.warning_count = 0
|
||||||
|
self.output = ""
|
||||||
|
|
||||||
@staticmethod
|
def add_line(self, fmt, *args, **kwargs):
|
||||||
def log(fmt, *args, **kwargs):
|
self.output = self.output + (fmt + '\n').format(*args, **kwargs)
|
||||||
sys.stderr.write((fmt + '\n').format(*args, **kwargs))
|
|
||||||
|
def info(self, fmt, *args, **kwargs):
|
||||||
|
self.add_line(fmt, *args, **kwargs)
|
||||||
|
|
||||||
def error(self, fmt, *args, **kwargs):
|
def error(self, fmt, *args, **kwargs):
|
||||||
self.log('Error: ' + fmt, *args, **kwargs)
|
self.info('Error: ' + fmt, *args, **kwargs)
|
||||||
self.error_count += 1
|
self.error_count += 1
|
||||||
|
|
||||||
def warning(self, fmt, *args, **kwargs):
|
def warning(self, fmt, *args, **kwargs):
|
||||||
self.log('Warning: ' + fmt, *args, **kwargs)
|
self.info('Warning: ' + fmt, *args, **kwargs)
|
||||||
self.warning_count += 1
|
self.warning_count += 1
|
||||||
|
|
||||||
|
def print_output(self):
|
||||||
|
sys.stderr.write(self.output)
|
||||||
|
|
||||||
class TestCaseOutcomes:
|
class TestCaseOutcomes:
|
||||||
"""The outcomes of one test case across many configurations."""
|
"""The outcomes of one test case across many configurations."""
|
||||||
# pylint: disable=too-few-public-methods
|
# pylint: disable=too-few-public-methods
|
||||||
@ -53,25 +59,27 @@ class TestCaseOutcomes:
|
|||||||
"""
|
"""
|
||||||
return len(self.successes) + len(self.failures)
|
return len(self.successes) + len(self.failures)
|
||||||
|
|
||||||
def execute_reference_driver_tests(ref_component, driver_component, outcome_file):
|
def execute_reference_driver_tests(log: TestLog, ref_component, driver_component, \
|
||||||
|
outcome_file) -> TestLog:
|
||||||
"""Run the tests specified in ref_component and driver_component. Results
|
"""Run the tests specified in ref_component and driver_component. Results
|
||||||
are stored in the output_file and they will be used for the following
|
are stored in the output_file and they will be used for the following
|
||||||
coverage analysis"""
|
coverage analysis"""
|
||||||
# If the outcome file already exists, we assume that the user wants to
|
# If the outcome file already exists, we assume that the user wants to
|
||||||
# perform the comparison analysis again without repeating the tests.
|
# perform the comparison analysis again without repeating the tests.
|
||||||
if os.path.exists(outcome_file):
|
if os.path.exists(outcome_file):
|
||||||
Results.log("Outcome file (" + outcome_file + ") already exists. " + \
|
log.info("Outcome file (" + outcome_file + ") already exists. " + \
|
||||||
"Tests will be skipped.")
|
"Tests will be skipped.")
|
||||||
return
|
return log
|
||||||
|
|
||||||
shell_command = "tests/scripts/all.sh --outcome-file " + outcome_file + \
|
shell_command = "tests/scripts/all.sh --outcome-file " + outcome_file + \
|
||||||
" " + ref_component + " " + driver_component
|
" " + ref_component + " " + driver_component
|
||||||
Results.log("Running: " + shell_command)
|
log.info("Running: " + shell_command)
|
||||||
ret_val = subprocess.run(shell_command.split(), check=False).returncode
|
ret_val = subprocess.run(shell_command.split(), check=False).returncode
|
||||||
|
|
||||||
if ret_val != 0:
|
if ret_val != 0:
|
||||||
Results.log("Error: failed to run reference/driver components")
|
log.error("failed to run reference/driver components")
|
||||||
sys.exit(ret_val)
|
|
||||||
|
return log
|
||||||
|
|
||||||
def analyze_coverage(results, outcomes, allow_list, full_coverage):
|
def analyze_coverage(results, outcomes, allow_list, full_coverage):
|
||||||
"""Check that all available test cases are executed at least once."""
|
"""Check that all available test cases are executed at least once."""
|
||||||
@ -90,7 +98,8 @@ def analyze_coverage(results, outcomes, allow_list, full_coverage):
|
|||||||
else:
|
else:
|
||||||
results.warning('Allow listed test case was executed: {}', key)
|
results.warning('Allow listed test case was executed: {}', key)
|
||||||
|
|
||||||
def analyze_driver_vs_reference(outcomes, component_ref, component_driver,
|
def analyze_driver_vs_reference(log: TestLog, outcomes,
|
||||||
|
component_ref, component_driver,
|
||||||
ignored_suites, ignored_test=None):
|
ignored_suites, ignored_test=None):
|
||||||
"""Check that all tests executed in the reference component are also
|
"""Check that all tests executed in the reference component are also
|
||||||
executed in the corresponding driver component.
|
executed in the corresponding driver component.
|
||||||
@ -100,7 +109,6 @@ def analyze_driver_vs_reference(outcomes, component_ref, component_driver,
|
|||||||
output string is provided
|
output string is provided
|
||||||
"""
|
"""
|
||||||
available = check_test_cases.collect_available_test_cases()
|
available = check_test_cases.collect_available_test_cases()
|
||||||
result = True
|
|
||||||
|
|
||||||
for key in available:
|
for key in available:
|
||||||
# Continue if test was not executed by any component
|
# Continue if test was not executed by any component
|
||||||
@ -125,16 +133,15 @@ def analyze_driver_vs_reference(outcomes, component_ref, component_driver,
|
|||||||
if component_ref in entry:
|
if component_ref in entry:
|
||||||
reference_test_passed = True
|
reference_test_passed = True
|
||||||
if(reference_test_passed and not driver_test_passed):
|
if(reference_test_passed and not driver_test_passed):
|
||||||
Results.log(key)
|
log.error(key)
|
||||||
result = False
|
|
||||||
return result
|
|
||||||
|
|
||||||
def analyze_outcomes(outcomes, args):
|
return log
|
||||||
|
|
||||||
|
def analyze_outcomes(log: TestLog, outcomes, args) -> TestLog:
|
||||||
"""Run all analyses on the given outcome collection."""
|
"""Run all analyses on the given outcome collection."""
|
||||||
results = Results()
|
analyze_coverage(log, outcomes, args['allow_list'],
|
||||||
analyze_coverage(results, outcomes, args['allow_list'],
|
|
||||||
args['full_coverage'])
|
args['full_coverage'])
|
||||||
return results
|
return log
|
||||||
|
|
||||||
def read_outcome_file(outcome_file):
|
def read_outcome_file(outcome_file):
|
||||||
"""Parse an outcome file and return an outcome collection.
|
"""Parse an outcome file and return an outcome collection.
|
||||||
@ -159,24 +166,32 @@ by a semicolon.
|
|||||||
|
|
||||||
def do_analyze_coverage(outcome_file, args):
|
def do_analyze_coverage(outcome_file, args):
|
||||||
"""Perform coverage analysis."""
|
"""Perform coverage analysis."""
|
||||||
|
log = TestLog()
|
||||||
|
log.info("\n*** Analyze coverage ***\n")
|
||||||
outcomes = read_outcome_file(outcome_file)
|
outcomes = read_outcome_file(outcome_file)
|
||||||
Results.log("\n*** Analyze coverage ***\n")
|
log = analyze_outcomes(log, outcomes, args)
|
||||||
results = analyze_outcomes(outcomes, args)
|
return log
|
||||||
return results.error_count == 0
|
|
||||||
|
|
||||||
def do_analyze_driver_vs_reference(outcome_file, args):
|
def do_analyze_driver_vs_reference(outcome_file, args):
|
||||||
"""Perform driver vs reference analyze."""
|
"""Perform driver vs reference analyze."""
|
||||||
execute_reference_driver_tests(args['component_ref'], \
|
log = TestLog()
|
||||||
args['component_driver'], outcome_file)
|
|
||||||
|
log = execute_reference_driver_tests(log, args['component_ref'], \
|
||||||
|
args['component_driver'], outcome_file)
|
||||||
|
if log.error_count != 0:
|
||||||
|
return log
|
||||||
|
|
||||||
ignored_suites = ['test_suite_' + x for x in args['ignored_suites']]
|
ignored_suites = ['test_suite_' + x for x in args['ignored_suites']]
|
||||||
|
|
||||||
outcomes = read_outcome_file(outcome_file)
|
outcomes = read_outcome_file(outcome_file)
|
||||||
Results.log("\n*** Analyze driver {} vs reference {} ***\n".format(
|
|
||||||
|
log.info("\n*** Analyze driver {} vs reference {} ***\n".format(
|
||||||
args['component_driver'], args['component_ref']))
|
args['component_driver'], args['component_ref']))
|
||||||
return analyze_driver_vs_reference(outcomes, args['component_ref'],
|
log = analyze_driver_vs_reference(log, outcomes,
|
||||||
args['component_driver'], ignored_suites,
|
args['component_ref'], args['component_driver'],
|
||||||
args['ignored_tests'])
|
ignored_suites, args['ignored_tests'])
|
||||||
|
|
||||||
|
return log
|
||||||
|
|
||||||
# List of tasks with a function that can handle this task and additional arguments if required
|
# List of tasks with a function that can handle this task and additional arguments if required
|
||||||
KNOWN_TASKS = {
|
KNOWN_TASKS = {
|
||||||
@ -641,6 +656,8 @@ KNOWN_TASKS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
main_log = TestLog()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
parser = argparse.ArgumentParser(description=__doc__)
|
parser = argparse.ArgumentParser(description=__doc__)
|
||||||
parser.add_argument('outcomes', metavar='OUTCOMES.CSV',
|
parser.add_argument('outcomes', metavar='OUTCOMES.CSV',
|
||||||
@ -661,16 +678,17 @@ def main():
|
|||||||
|
|
||||||
if options.list:
|
if options.list:
|
||||||
for task in KNOWN_TASKS:
|
for task in KNOWN_TASKS:
|
||||||
Results.log(task)
|
main_log.info(task)
|
||||||
|
main_log.print_output()
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
if options.specified_tasks == 'all':
|
if options.specified_tasks == 'all':
|
||||||
tasks_list = KNOWN_TASKS.keys()
|
tasks_list = KNOWN_TASKS.keys()
|
||||||
else:
|
else:
|
||||||
tasks_list = re.split(r'[, ]+', options.specified_tasks)
|
tasks_list = re.split(r'[, ]+', options.specified_tasks)
|
||||||
|
|
||||||
for task in tasks_list:
|
for task in tasks_list:
|
||||||
if task not in KNOWN_TASKS:
|
if task not in KNOWN_TASKS:
|
||||||
|
main_log.error('invalid task: {}'.format(task))
|
||||||
|
|
||||||
KNOWN_TASKS['analyze_coverage']['args']['full_coverage'] = options.full_coverage
|
KNOWN_TASKS['analyze_coverage']['args']['full_coverage'] = options.full_coverage
|
||||||
|
|
||||||
@ -678,12 +696,20 @@ def main():
|
|||||||
|
|
||||||
for task in KNOWN_TASKS:
|
for task in KNOWN_TASKS:
|
||||||
if task in tasks_list:
|
if task in tasks_list:
|
||||||
if not KNOWN_TASKS[task]['test_function'](options.outcomes, KNOWN_TASKS[task]['args']):
|
test_function = KNOWN_TASKS[task]['test_function']
|
||||||
|
test_args = KNOWN_TASKS[task]['args']
|
||||||
|
test_log = test_function(options.outcomes, test_args)
|
||||||
|
# Merge the output of this task with the main one
|
||||||
|
main_log.output = main_log.output + test_log.output
|
||||||
|
main_log.info("Task {} completed with:\n".format(task) + \
|
||||||
|
"{} warnings\n".format(test_log.warning_count) + \
|
||||||
|
"{} errors\n".format(test_log.error_count))
|
||||||
|
if test_log.error_count != 0:
|
||||||
all_succeeded = False
|
all_succeeded = False
|
||||||
|
|
||||||
if all_succeeded is False:
|
main_log.print_output()
|
||||||
sys.exit(1)
|
sys.exit(0 if all_succeeded else 1)
|
||||||
Results.log("SUCCESS :-)")
|
|
||||||
except Exception: # pylint: disable=broad-except
|
except Exception: # pylint: disable=broad-except
|
||||||
# Print the backtrace and exit explicitly with our chosen status.
|
# Print the backtrace and exit explicitly with our chosen status.
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user