mirror of
https://github.com/Mbed-TLS/mbedtls.git
synced 2025-02-11 00:40:05 +00:00
Merge pull request #9208 from davidhorstmann-arm/move-test-generation-files-3.6
[Backport 3.6] Move test generation files to framework
This commit is contained in:
commit
2cfa6c9b45
@ -323,11 +323,11 @@ if(ENABLE_TESTING OR ENABLE_PROGRAMS)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/tests
|
||||
COMMAND
|
||||
"${MBEDTLS_PYTHON_EXECUTABLE}"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/scripts/generate_test_keys.py"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/framework/scripts/generate_test_keys.py"
|
||||
"--output"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/src/test_keys.h"
|
||||
DEPENDS
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/tests/scripts/generate_test_keys.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/framework/scripts/generate_test_keys.py
|
||||
)
|
||||
add_custom_target(test_keys_header DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/tests/src/test_keys.h)
|
||||
add_custom_command(
|
||||
@ -337,11 +337,11 @@ if(ENABLE_TESTING OR ENABLE_PROGRAMS)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/tests
|
||||
COMMAND
|
||||
"${MBEDTLS_PYTHON_EXECUTABLE}"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/scripts/generate_test_cert_macros.py"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/framework/scripts/generate_test_cert_macros.py"
|
||||
"--output"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/src/test_certs.h"
|
||||
DEPENDS
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/tests/scripts/generate_test_cert_macros.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/framework/scripts/generate_test_cert_macros.py
|
||||
)
|
||||
add_custom_target(test_certs_header DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/tests/src/test_certs.h)
|
||||
add_dependencies(mbedtls_test test_keys_header test_certs_header)
|
||||
|
@ -153,7 +153,7 @@ The size of operation structures needs to be known at compile time, since caller
|
||||
|
||||
### Unit tests
|
||||
|
||||
A number of unit tests are automatically generated by `tests/scripts/generate_psa_tests.py` based on the algorithms and key types declared in `include/psa/crypto_values.h` and `include/psa/crypto_extra.h`:
|
||||
A number of unit tests are automatically generated by `framework/scripts/generate_psa_tests.py` based on the algorithms and key types declared in `include/psa/crypto_values.h` and `include/psa/crypto_extra.h`:
|
||||
|
||||
* Attempt to create a key with a key type that is not supported.
|
||||
* Attempt to perform an operation with a combination of key type and algorithm that is not valid or not supported.
|
||||
|
@ -663,7 +663,7 @@ psa_status_t mem_poison_psa_aead_update(psa_aead_operation_t *operation,
|
||||
|
||||
There now exists a more generic mechanism for making exactly this kind of transformation - the PSA test wrappers, which exist in the files `tests/include/test/psa_test_wrappers.h` and `tests/src/psa_test_wrappers.c`. These are wrappers around all PSA functions that allow testing code to be inserted at the start and end of a PSA function call.
|
||||
|
||||
The test wrappers are generated by a script, although they are not automatically generated as part of the build process. Instead, they are checked into source control and must be manually updated when functions change by running `tests/scripts/generate_psa_wrappers.py`.
|
||||
The test wrappers are generated by a script, although they are not automatically generated as part of the build process. Instead, they are checked into source control and must be manually updated when functions change by running `framework/scripts/generate_psa_wrappers.py`.
|
||||
|
||||
Poisoning code is added to these test wrappers where relevant in order to pre-poison and post-unpoison the parameters to the functions.
|
||||
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit e156a8eb8e6db88cdf0a3041fc7f645131eab16d
|
||||
Subproject commit 623c1b4532e8de64a5d82ea84a7496e64c370d15
|
@ -326,8 +326,14 @@ class AbiChecker:
|
||||
@staticmethod
|
||||
def _list_generated_test_data_files(git_worktree_path):
|
||||
"""List the generated test data files."""
|
||||
generate_psa_tests = 'framework/scripts/generate_psa_tests.py'
|
||||
if not os.path.isfile(git_worktree_path + '/' + generate_psa_tests):
|
||||
# The checked-out revision is from before generate_psa_tests.py
|
||||
# was moved to the framework submodule. Use the old location.
|
||||
generate_psa_tests = 'tests/scripts/generate_psa_tests.py'
|
||||
|
||||
output = subprocess.check_output(
|
||||
['tests/scripts/generate_psa_tests.py', '--list'],
|
||||
[generate_psa_tests, '--list'],
|
||||
cwd=git_worktree_path,
|
||||
).decode('ascii')
|
||||
return [line for line in output.split('\n') if line]
|
||||
@ -353,8 +359,14 @@ class AbiChecker:
|
||||
if 'storage_format' in filename:
|
||||
storage_data_files.add(filename)
|
||||
to_be_generated.add(filename)
|
||||
|
||||
generate_psa_tests = 'framework/scripts/generate_psa_tests.py'
|
||||
if not os.path.isfile(git_worktree_path + '/' + generate_psa_tests):
|
||||
# The checked-out revision is from before generate_psa_tests.py
|
||||
# was moved to the framework submodule. Use the old location.
|
||||
generate_psa_tests = 'tests/scripts/generate_psa_tests.py'
|
||||
subprocess.check_call(
|
||||
['tests/scripts/generate_psa_tests.py'] + sorted(to_be_generated),
|
||||
[generate_psa_tests] + sorted(to_be_generated),
|
||||
cwd=git_worktree_path,
|
||||
)
|
||||
for test_file in sorted(storage_data_files):
|
||||
|
@ -10,8 +10,8 @@ perl scripts\generate_features.pl || exit /b 1
|
||||
python scripts\generate_ssl_debug_helpers.py || exit /b 1
|
||||
perl scripts\generate_visualc_files.pl || exit /b 1
|
||||
python scripts\generate_psa_constants.py || exit /b 1
|
||||
python tests\scripts\generate_bignum_tests.py || exit /b 1
|
||||
python tests\scripts\generate_ecp_tests.py || exit /b 1
|
||||
python tests\scripts\generate_psa_tests.py || exit /b 1
|
||||
python tests\scripts\generate_test_keys.py --output tests\src\test_keys.h || exit /b 1
|
||||
python tests\scripts\generate_test_cert_macros.py --output tests\src\test_certs.h || exit /b 1
|
||||
python framework\scripts\generate_bignum_tests.py || exit /b 1
|
||||
python framework\scripts\generate_ecp_tests.py || exit /b 1
|
||||
python framework\scripts\generate_psa_tests.py || exit /b 1
|
||||
python framework\scripts\generate_test_keys.py --output tests\src\test_keys.h || exit /b 1
|
||||
python framework\scripts\generate_test_cert_macros.py --output tests\src\test_certs.h || exit /b 1
|
||||
|
@ -21,7 +21,7 @@ file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/suites)
|
||||
execute_process(
|
||||
COMMAND
|
||||
${MBEDTLS_PYTHON_EXECUTABLE}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../tests/scripts/generate_bignum_tests.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_bignum_tests.py
|
||||
--list-for-cmake
|
||||
WORKING_DIRECTORY
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/..
|
||||
@ -33,7 +33,7 @@ string(REGEX REPLACE "[^;]*/" ""
|
||||
execute_process(
|
||||
COMMAND
|
||||
${MBEDTLS_PYTHON_EXECUTABLE}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../tests/scripts/generate_ecp_tests.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_ecp_tests.py
|
||||
--list-for-cmake
|
||||
WORKING_DIRECTORY
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/..
|
||||
@ -45,7 +45,7 @@ string(REGEX REPLACE "[^;]*/" ""
|
||||
execute_process(
|
||||
COMMAND
|
||||
${MBEDTLS_PYTHON_EXECUTABLE}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../tests/scripts/generate_psa_tests.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_psa_tests.py
|
||||
--list-for-cmake
|
||||
WORKING_DIRECTORY
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/..
|
||||
@ -81,10 +81,10 @@ if(GEN_FILES)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/..
|
||||
COMMAND
|
||||
${MBEDTLS_PYTHON_EXECUTABLE}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../tests/scripts/generate_bignum_tests.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_bignum_tests.py
|
||||
--directory ${CMAKE_CURRENT_BINARY_DIR}/suites
|
||||
DEPENDS
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../tests/scripts/generate_bignum_tests.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_bignum_tests.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/mbedtls_framework/bignum_common.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/mbedtls_framework/bignum_core.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/mbedtls_framework/bignum_mod_raw.py
|
||||
@ -99,10 +99,10 @@ if(GEN_FILES)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/..
|
||||
COMMAND
|
||||
${MBEDTLS_PYTHON_EXECUTABLE}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../tests/scripts/generate_ecp_tests.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_ecp_tests.py
|
||||
--directory ${CMAKE_CURRENT_BINARY_DIR}/suites
|
||||
DEPENDS
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../tests/scripts/generate_ecp_tests.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_ecp_tests.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/mbedtls_framework/bignum_common.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/mbedtls_framework/ecp.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/mbedtls_framework/test_case.py
|
||||
@ -115,10 +115,10 @@ if(GEN_FILES)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/..
|
||||
COMMAND
|
||||
${MBEDTLS_PYTHON_EXECUTABLE}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../tests/scripts/generate_psa_tests.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_psa_tests.py
|
||||
--directory ${CMAKE_CURRENT_BINARY_DIR}/suites
|
||||
DEPENDS
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../tests/scripts/generate_psa_tests.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_psa_tests.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/mbedtls_framework/crypto_data_tests.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/mbedtls_framework/crypto_knowledge.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/mbedtls_framework/macro_collector.py
|
||||
@ -220,7 +220,7 @@ function(add_test_suite suite_name)
|
||||
test_suite_${data_name}.c
|
||||
COMMAND
|
||||
${MBEDTLS_PYTHON_EXECUTABLE}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/scripts/generate_test_code.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_test_code.py
|
||||
-f ${CMAKE_CURRENT_SOURCE_DIR}/suites/test_suite_${suite_name}.function
|
||||
-d ${data_file}
|
||||
-t ${CMAKE_CURRENT_SOURCE_DIR}/suites/main_test.function
|
||||
@ -229,7 +229,7 @@ function(add_test_suite suite_name)
|
||||
--helpers-file ${CMAKE_CURRENT_SOURCE_DIR}/suites/helpers.function
|
||||
-o .
|
||||
DEPENDS
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/scripts/generate_test_code.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_test_code.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/suites/test_suite_${suite_name}.function
|
||||
${data_file}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/suites/main_test.function
|
||||
|
@ -18,25 +18,25 @@ endif
|
||||
|
||||
.PHONY: generated_files
|
||||
GENERATED_BIGNUM_DATA_FILES := $(patsubst tests/%,%,$(shell \
|
||||
$(PYTHON) scripts/generate_bignum_tests.py --list || \
|
||||
$(PYTHON) ../framework/scripts/generate_bignum_tests.py --list || \
|
||||
echo FAILED \
|
||||
))
|
||||
ifeq ($(GENERATED_BIGNUM_DATA_FILES),FAILED)
|
||||
$(error "$(PYTHON) scripts/generate_bignum_tests.py --list" failed)
|
||||
$(error "$(PYTHON) ../framework/scripts/generate_bignum_tests.py --list" failed)
|
||||
endif
|
||||
GENERATED_ECP_DATA_FILES := $(patsubst tests/%,%,$(shell \
|
||||
$(PYTHON) scripts/generate_ecp_tests.py --list || \
|
||||
$(PYTHON) ../framework/scripts/generate_ecp_tests.py --list || \
|
||||
echo FAILED \
|
||||
))
|
||||
ifeq ($(GENERATED_ECP_DATA_FILES),FAILED)
|
||||
$(error "$(PYTHON) scripts/generate_ecp_tests.py --list" failed)
|
||||
$(error "$(PYTHON) ../framework/scripts/generate_ecp_tests.py --list" failed)
|
||||
endif
|
||||
GENERATED_PSA_DATA_FILES := $(patsubst tests/%,%,$(shell \
|
||||
$(PYTHON) scripts/generate_psa_tests.py --list || \
|
||||
$(PYTHON) ../framework/scripts/generate_psa_tests.py --list || \
|
||||
echo FAILED \
|
||||
))
|
||||
ifeq ($(GENERATED_PSA_DATA_FILES),FAILED)
|
||||
$(error "$(PYTHON) scripts/generate_psa_tests.py --list" failed)
|
||||
$(error "$(PYTHON) ../framework/scripts/generate_psa_tests.py --list" failed)
|
||||
endif
|
||||
GENERATED_FILES := $(GENERATED_PSA_DATA_FILES) $(GENERATED_ECP_DATA_FILES) $(GENERATED_BIGNUM_DATA_FILES)
|
||||
generated_files: $(GENERATED_FILES) src/test_keys.h src/test_certs.h
|
||||
@ -49,7 +49,7 @@ generated_files: $(GENERATED_FILES) src/test_keys.h src/test_certs.h
|
||||
# a separate instance of the recipe for each output file.
|
||||
.SECONDARY: generated_bignum_test_data generated_ecp_test_data generated_psa_test_data
|
||||
$(GENERATED_BIGNUM_DATA_FILES): $(gen_file_dep) generated_bignum_test_data
|
||||
generated_bignum_test_data: scripts/generate_bignum_tests.py
|
||||
generated_bignum_test_data: ../framework/scripts/generate_bignum_tests.py
|
||||
generated_bignum_test_data: ../framework/scripts/mbedtls_framework/bignum_common.py
|
||||
generated_bignum_test_data: ../framework/scripts/mbedtls_framework/bignum_core.py
|
||||
generated_bignum_test_data: ../framework/scripts/mbedtls_framework/bignum_mod_raw.py
|
||||
@ -58,20 +58,20 @@ generated_bignum_test_data: ../framework/scripts/mbedtls_framework/test_case.py
|
||||
generated_bignum_test_data: ../framework/scripts/mbedtls_framework/test_data_generation.py
|
||||
generated_bignum_test_data:
|
||||
echo " Gen $(GENERATED_BIGNUM_DATA_FILES)"
|
||||
$(PYTHON) scripts/generate_bignum_tests.py
|
||||
$(PYTHON) ../framework/scripts/generate_bignum_tests.py
|
||||
|
||||
$(GENERATED_ECP_DATA_FILES): $(gen_file_dep) generated_ecp_test_data
|
||||
generated_ecp_test_data: scripts/generate_ecp_tests.py
|
||||
generated_ecp_test_data: ../framework/scripts/generate_ecp_tests.py
|
||||
generated_ecp_test_data: ../framework/scripts/mbedtls_framework/bignum_common.py
|
||||
generated_ecp_test_data: ../framework/scripts/mbedtls_framework/ecp.py
|
||||
generated_ecp_test_data: ../framework/scripts/mbedtls_framework/test_case.py
|
||||
generated_ecp_test_data: ../framework/scripts/mbedtls_framework/test_data_generation.py
|
||||
generated_ecp_test_data:
|
||||
echo " Gen $(GENERATED_ECP_DATA_FILES)"
|
||||
$(PYTHON) scripts/generate_ecp_tests.py
|
||||
$(PYTHON) ../framework/scripts/generate_ecp_tests.py
|
||||
|
||||
$(GENERATED_PSA_DATA_FILES): $(gen_file_dep) generated_psa_test_data
|
||||
generated_psa_test_data: scripts/generate_psa_tests.py
|
||||
generated_psa_test_data: ../framework/scripts/generate_psa_tests.py
|
||||
generated_psa_test_data: ../framework/scripts/mbedtls_framework/crypto_data_tests.py
|
||||
generated_psa_test_data: ../framework/scripts/mbedtls_framework/crypto_knowledge.py
|
||||
generated_psa_test_data: ../framework/scripts/mbedtls_framework/macro_collector.py
|
||||
@ -90,7 +90,7 @@ generated_psa_test_data: ../include/psa/crypto_extra.h
|
||||
generated_psa_test_data: suites/test_suite_psa_crypto_metadata.data
|
||||
generated_psa_test_data:
|
||||
echo " Gen $(GENERATED_PSA_DATA_FILES) ..."
|
||||
$(PYTHON) scripts/generate_psa_tests.py
|
||||
$(PYTHON) ../framework/scripts/generate_psa_tests.py
|
||||
|
||||
# A test application is built for each suites/test_suite_*.data file.
|
||||
# Application name is same as .data file's base name and can be
|
||||
@ -112,12 +112,12 @@ all: $(BINARIES)
|
||||
|
||||
mbedtls_test: $(MBEDTLS_TEST_OBJS)
|
||||
|
||||
src/test_certs.h: scripts/generate_test_cert_macros.py \
|
||||
$($(PYTHON) scripts/generate_test_cert_macros.py --list-dependencies)
|
||||
$(PYTHON) scripts/generate_test_cert_macros.py --output $@
|
||||
src/test_certs.h: ../framework/scripts/generate_test_cert_macros.py \
|
||||
$($(PYTHON) ../framework/scripts/generate_test_cert_macros.py --list-dependencies)
|
||||
$(PYTHON) ../framework/scripts/generate_test_cert_macros.py --output $@
|
||||
|
||||
src/test_keys.h: scripts/generate_test_keys.py
|
||||
$(PYTHON) scripts/generate_test_keys.py --output $@
|
||||
src/test_keys.h: ../framework/scripts/generate_test_keys.py
|
||||
$(PYTHON) ../framework/scripts/generate_test_keys.py --output $@
|
||||
|
||||
TEST_OBJS_DEPS = $(wildcard include/test/*.h include/test/*/*.h)
|
||||
ifdef RECORD_PSA_STATUS_COVERAGE_LOG
|
||||
@ -159,9 +159,9 @@ c: $(C_FILES)
|
||||
# dot in .c file's base name.
|
||||
#
|
||||
.SECONDEXPANSION:
|
||||
%.c: suites/$$(firstword $$(subst ., ,$$*)).function suites/%.data scripts/generate_test_code.py suites/helpers.function suites/main_test.function suites/host_test.function
|
||||
%.c: suites/$$(firstword $$(subst ., ,$$*)).function suites/%.data ../framework/scripts/generate_test_code.py suites/helpers.function suites/main_test.function suites/host_test.function
|
||||
echo " Gen $@"
|
||||
$(PYTHON) scripts/generate_test_code.py -f suites/$(firstword $(subst ., ,$*)).function \
|
||||
$(PYTHON) ../framework/scripts/generate_test_code.py -f suites/$(firstword $(subst ., ,$*)).function \
|
||||
-d suites/$*.data \
|
||||
-t suites/main_test.function \
|
||||
-p suites/host_test.function \
|
||||
|
@ -5,7 +5,7 @@
|
||||
* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
/* THIS FILE is generated by `tests/scripts/generate_test_cert_macros.py` */
|
||||
/* THIS FILE is generated by `framework/scripts/generate_test_cert_macros.py` */
|
||||
/* *INDENT-OFF* */
|
||||
|
||||
{% for mode, name, value in macros %}
|
||||
|
@ -6434,7 +6434,7 @@ component_check_test_helpers () {
|
||||
# unittest writes out mundane stuff like number or tests run on stderr.
|
||||
# Our convention is to reserve stderr for actual errors, and write
|
||||
# harmless info on stdout so it can be suppress with --quiet.
|
||||
./tests/scripts/test_generate_test_code.py 2>&1
|
||||
./framework/scripts/test_generate_test_code.py 2>&1
|
||||
|
||||
msg "unit test: translate_ciphers.py"
|
||||
python3 -m unittest tests/scripts/translate_ciphers.py 2>&1
|
||||
|
@ -128,10 +128,10 @@ check()
|
||||
|
||||
# These checks are common to Mbed TLS and TF-PSA-Crypto
|
||||
check scripts/generate_psa_constants.py programs/psa/psa_constant_names_generated.c
|
||||
check tests/scripts/generate_bignum_tests.py $(tests/scripts/generate_bignum_tests.py --list)
|
||||
check tests/scripts/generate_ecp_tests.py $(tests/scripts/generate_ecp_tests.py --list)
|
||||
check tests/scripts/generate_psa_tests.py $(tests/scripts/generate_psa_tests.py --list)
|
||||
check tests/scripts/generate_test_keys.py tests/src/test_keys.h
|
||||
check framework/scripts/generate_bignum_tests.py $(framework/scripts/generate_bignum_tests.py --list)
|
||||
check framework/scripts/generate_ecp_tests.py $(framework/scripts/generate_ecp_tests.py --list)
|
||||
check framework/scripts/generate_psa_tests.py $(framework/scripts/generate_psa_tests.py --list)
|
||||
check framework/scripts/generate_test_keys.py tests/src/test_keys.h
|
||||
check scripts/generate_driver_wrappers.py $library_dir/psa_crypto_driver_wrappers.h $library_dir/psa_crypto_driver_wrappers_no_static.c
|
||||
|
||||
# Additional checks for Mbed TLS only
|
||||
@ -140,7 +140,7 @@ if in_mbedtls_repo; then
|
||||
check scripts/generate_query_config.pl programs/test/query_config.c
|
||||
check scripts/generate_features.pl library/version_features.c
|
||||
check scripts/generate_ssl_debug_helpers.py library/ssl_debug_helpers_generated.c
|
||||
check tests/scripts/generate_test_cert_macros.py tests/src/test_certs.h
|
||||
check framework/scripts/generate_test_cert_macros.py tests/src/test_certs.h
|
||||
# generate_visualc_files enumerates source files (library/*.c). It doesn't
|
||||
# care about their content, but the files must exist. So it must run after
|
||||
# the step that creates or updates these files.
|
||||
@ -150,4 +150,4 @@ fi
|
||||
# Generated files that are present in the repository even in the development
|
||||
# branch. (This is intended to be temporary, until the generator scripts are
|
||||
# fully reviewed and the build scripts support a generated header file.)
|
||||
check tests/scripts/generate_psa_wrappers.py tests/include/test/psa_test_wrappers.h tests/src/psa_test_wrappers.c
|
||||
check framework/scripts/generate_psa_wrappers.py tests/include/test/psa_test_wrappers.h tests/src/psa_test_wrappers.c
|
||||
|
@ -1,187 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Generate test data for bignum functions.
|
||||
|
||||
With no arguments, generate all test data. With non-option arguments,
|
||||
generate only the specified files.
|
||||
|
||||
Class structure:
|
||||
|
||||
Child classes of test_data_generation.BaseTarget (file targets) represent an output
|
||||
file. These indicate where test cases will be written to, for all subclasses of
|
||||
this target. Multiple file targets should not reuse a `target_basename`.
|
||||
|
||||
Each subclass derived from a file target can either be:
|
||||
- A concrete class, representing a test function, which generates test cases.
|
||||
- An abstract class containing shared methods and attributes, not associated
|
||||
with a test function. An example is BignumOperation, which provides
|
||||
common features used for bignum binary operations.
|
||||
|
||||
Both concrete and abstract subclasses can be derived from, to implement
|
||||
additional test cases (see BignumCmp and BignumCmpAbs for examples of deriving
|
||||
from abstract and concrete classes).
|
||||
|
||||
|
||||
Adding test case generation for a function:
|
||||
|
||||
A subclass representing the test function should be added, deriving from a
|
||||
file target such as BignumTarget. This test class must set/implement the
|
||||
following:
|
||||
- test_function: the function name from the associated .function file.
|
||||
- test_name: a descriptive name or brief summary to refer to the test
|
||||
function.
|
||||
- arguments(): a method to generate the list of arguments required for the
|
||||
test_function.
|
||||
- generate_function_tests(): a method to generate TestCases for the function.
|
||||
This should create instances of the class with required input data, and
|
||||
call `.create_test_case()` to yield the TestCase.
|
||||
|
||||
Additional details and other attributes/methods are given in the documentation
|
||||
of BaseTarget in test_data_generation.py.
|
||||
"""
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
import sys
|
||||
|
||||
from abc import ABCMeta
|
||||
from typing import List
|
||||
|
||||
import scripts_path # pylint: disable=unused-import
|
||||
from mbedtls_framework import test_data_generation
|
||||
from mbedtls_framework import bignum_common
|
||||
# Import modules containing additional test classes
|
||||
# Test function classes in these modules will be registered by
|
||||
# the framework
|
||||
from mbedtls_framework import bignum_core, bignum_mod_raw, bignum_mod # pylint: disable=unused-import
|
||||
|
||||
class BignumTarget(test_data_generation.BaseTarget):
|
||||
#pylint: disable=too-few-public-methods
|
||||
"""Target for bignum (legacy) test case generation."""
|
||||
target_basename = 'test_suite_bignum.generated'
|
||||
|
||||
|
||||
class BignumOperation(bignum_common.OperationCommon, BignumTarget,
|
||||
metaclass=ABCMeta):
|
||||
#pylint: disable=abstract-method
|
||||
"""Common features for bignum operations in legacy tests."""
|
||||
unique_combinations_only = True
|
||||
input_values = [
|
||||
"", "0", "-", "-0",
|
||||
"7b", "-7b",
|
||||
"0000000000000000123", "-0000000000000000123",
|
||||
"1230000000000000000", "-1230000000000000000"
|
||||
]
|
||||
|
||||
def description_suffix(self) -> str:
|
||||
#pylint: disable=no-self-use # derived classes need self
|
||||
"""Text to add at the end of the test case description."""
|
||||
return ""
|
||||
|
||||
def description(self) -> str:
|
||||
"""Generate a description for the test case.
|
||||
|
||||
If not set, case_description uses the form A `symbol` B, where symbol
|
||||
is used to represent the operation. Descriptions of each value are
|
||||
generated to provide some context to the test case.
|
||||
"""
|
||||
if not self.case_description:
|
||||
self.case_description = "{} {} {}".format(
|
||||
self.value_description(self.arg_a),
|
||||
self.symbol,
|
||||
self.value_description(self.arg_b)
|
||||
)
|
||||
description_suffix = self.description_suffix()
|
||||
if description_suffix:
|
||||
self.case_description += " " + description_suffix
|
||||
return super().description()
|
||||
|
||||
@staticmethod
|
||||
def value_description(val) -> str:
|
||||
"""Generate a description of the argument val.
|
||||
|
||||
This produces a simple description of the value, which is used in test
|
||||
case naming to add context.
|
||||
"""
|
||||
if val == "":
|
||||
return "0 (null)"
|
||||
if val == "-":
|
||||
return "negative 0 (null)"
|
||||
if val == "0":
|
||||
return "0 (1 limb)"
|
||||
|
||||
if val[0] == "-":
|
||||
tmp = "negative"
|
||||
val = val[1:]
|
||||
else:
|
||||
tmp = "positive"
|
||||
if val[0] == "0":
|
||||
tmp += " with leading zero limb"
|
||||
elif len(val) > 10:
|
||||
tmp = "large " + tmp
|
||||
return tmp
|
||||
|
||||
|
||||
class BignumCmp(BignumOperation):
|
||||
"""Test cases for bignum value comparison."""
|
||||
count = 0
|
||||
test_function = "mpi_cmp_mpi"
|
||||
test_name = "MPI compare"
|
||||
input_cases = [
|
||||
("-2", "-3"),
|
||||
("-2", "-2"),
|
||||
("2b4", "2b5"),
|
||||
("2b5", "2b6")
|
||||
]
|
||||
|
||||
def __init__(self, val_a, val_b) -> None:
|
||||
super().__init__(val_a, val_b)
|
||||
self._result = int(self.int_a > self.int_b) - int(self.int_a < self.int_b)
|
||||
self.symbol = ["<", "==", ">"][self._result + 1]
|
||||
|
||||
def result(self) -> List[str]:
|
||||
return [str(self._result)]
|
||||
|
||||
|
||||
class BignumCmpAbs(BignumCmp):
|
||||
"""Test cases for absolute bignum value comparison."""
|
||||
count = 0
|
||||
test_function = "mpi_cmp_abs"
|
||||
test_name = "MPI compare (abs)"
|
||||
|
||||
def __init__(self, val_a, val_b) -> None:
|
||||
super().__init__(val_a.strip("-"), val_b.strip("-"))
|
||||
|
||||
|
||||
class BignumAdd(BignumOperation):
|
||||
"""Test cases for bignum value addition."""
|
||||
count = 0
|
||||
symbol = "+"
|
||||
test_function = "mpi_add_mpi"
|
||||
test_name = "MPI add"
|
||||
input_cases = bignum_common.combination_pairs(
|
||||
[
|
||||
"1c67967269c6", "9cde3",
|
||||
"-1c67967269c6", "-9cde3",
|
||||
]
|
||||
)
|
||||
|
||||
def __init__(self, val_a: str, val_b: str) -> None:
|
||||
super().__init__(val_a, val_b)
|
||||
self._result = self.int_a + self.int_b
|
||||
|
||||
def description_suffix(self) -> str:
|
||||
if (self.int_a >= 0 and self.int_b >= 0):
|
||||
return "" # obviously positive result or 0
|
||||
if (self.int_a <= 0 and self.int_b <= 0):
|
||||
return "" # obviously negative result or 0
|
||||
# The sign of the result is not obvious, so indicate it
|
||||
return ", result{}0".format('>' if self._result > 0 else
|
||||
'<' if self._result < 0 else '=')
|
||||
|
||||
def result(self) -> List[str]:
|
||||
return [bignum_common.quote_str("{:x}".format(self._result))]
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Use the section of the docstring relevant to the CLI as description
|
||||
test_data_generation.main(sys.argv[1:], "\n".join(__doc__.splitlines()[:4]))
|
@ -1,22 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Generate test data for ecp functions.
|
||||
|
||||
The command line usage, class structure and available methods are the same
|
||||
as in generate_bignum_tests.py.
|
||||
"""
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
import sys
|
||||
|
||||
import scripts_path # pylint: disable=unused-import
|
||||
from mbedtls_framework import test_data_generation
|
||||
# Import modules containing additional test classes
|
||||
# Test function classes in these modules will be registered by
|
||||
# the framework
|
||||
from mbedtls_framework import ecp # pylint: disable=unused-import
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Use the section of the docstring relevant to the CLI as description
|
||||
test_data_generation.main(sys.argv[1:], "\n".join(__doc__.splitlines()[:4]))
|
@ -1,183 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
#
|
||||
|
||||
"""
|
||||
Make fuzz like testing for pkcs7 tests
|
||||
Given a valid DER pkcs7 file add tests to the test_suite_pkcs7.data file
|
||||
- It is expected that the pkcs7_asn1_fail( data_t *pkcs7_buf )
|
||||
function is defined in test_suite_pkcs7.function
|
||||
- This is not meant to be portable code, if anything it is meant to serve as
|
||||
documentation for showing how those ugly tests in test_suite_pkcs7.data were created
|
||||
"""
|
||||
|
||||
|
||||
import sys
|
||||
from os.path import exists
|
||||
|
||||
PKCS7_TEST_FILE = "../suites/test_suite_pkcs7.data"
|
||||
|
||||
class Test: # pylint: disable=too-few-public-methods
|
||||
"""
|
||||
A instance of a test in test_suite_pkcs7.data
|
||||
"""
|
||||
def __init__(self, name, depends, func_call):
|
||||
self.name = name
|
||||
self.depends = depends
|
||||
self.func_call = func_call
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
def to_string(self):
|
||||
return "\n" + self.name + "\n" + self.depends + "\n" + self.func_call + "\n"
|
||||
|
||||
class TestData:
|
||||
"""
|
||||
Take in test_suite_pkcs7.data file.
|
||||
Allow for new tests to be added.
|
||||
"""
|
||||
mandatory_dep = "MBEDTLS_MD_CAN_SHA256"
|
||||
test_name = "PKCS7 Parse Failure Invalid ASN1"
|
||||
test_function = "pkcs7_asn1_fail:"
|
||||
def __init__(self, file_name):
|
||||
self.file_name = file_name
|
||||
self.last_test_num, self.old_tests = self.read_test_file(file_name)
|
||||
self.new_tests = []
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
def read_test_file(self, file):
|
||||
"""
|
||||
Parse the test_suite_pkcs7.data file.
|
||||
"""
|
||||
tests = []
|
||||
if not exists(file):
|
||||
print(file + " Does not exist")
|
||||
sys.exit()
|
||||
with open(file, "r", encoding='UTF-8') as fp:
|
||||
data = fp.read()
|
||||
lines = [line.strip() for line in data.split('\n') if len(line.strip()) > 1]
|
||||
i = 0
|
||||
while i < len(lines):
|
||||
if "depends" in lines[i+1]:
|
||||
tests.append(Test(lines[i], lines[i+1], lines[i+2]))
|
||||
i += 3
|
||||
else:
|
||||
tests.append(Test(lines[i], None, lines[i+1]))
|
||||
i += 2
|
||||
latest_test_num = float(tests[-1].name.split('#')[1])
|
||||
return latest_test_num, tests
|
||||
|
||||
def add(self, name, func_call):
|
||||
self.last_test_num += 1
|
||||
self.new_tests.append(Test(self.test_name + ": " + name + " #" + \
|
||||
str(self.last_test_num), "depends_on:" + self.mandatory_dep, \
|
||||
self.test_function + '"' + func_call + '"'))
|
||||
|
||||
def write_changes(self):
|
||||
with open(self.file_name, 'a', encoding='UTF-8') as fw:
|
||||
fw.write("\n")
|
||||
for t in self.new_tests:
|
||||
fw.write(t.to_string())
|
||||
|
||||
|
||||
def asn1_mutate(data):
|
||||
"""
|
||||
We have been given an asn1 structure representing a pkcs7.
|
||||
We want to return an array of slightly modified versions of this data
|
||||
they should be modified in a way which makes the structure invalid
|
||||
|
||||
We know that asn1 structures are:
|
||||
|---1 byte showing data type---|----byte(s) for length of data---|---data content--|
|
||||
We know that some data types can contain other data types.
|
||||
Return a dictionary of reasons and mutated data types.
|
||||
"""
|
||||
|
||||
# off the bat just add bytes to start and end of the buffer
|
||||
mutations = []
|
||||
reasons = []
|
||||
mutations.append(["00"] + data)
|
||||
reasons.append("Add null byte to start")
|
||||
mutations.append(data + ["00"])
|
||||
reasons.append("Add null byte to end")
|
||||
# for every asn1 entry we should attempt to:
|
||||
# - change the data type tag
|
||||
# - make the length longer than actual
|
||||
# - make the length shorter than actual
|
||||
i = 0
|
||||
while i < len(data):
|
||||
tag_i = i
|
||||
leng_i = tag_i + 1
|
||||
data_i = leng_i + 1 + (int(data[leng_i][1], 16) if data[leng_i][0] == '8' else 0)
|
||||
if data[leng_i][0] == '8':
|
||||
length = int(''.join(data[leng_i + 1: data_i]), 16)
|
||||
else:
|
||||
length = int(data[leng_i], 16)
|
||||
|
||||
tag = data[tag_i]
|
||||
print("Looking at ans1: offset " + str(i) + " tag = " + tag + \
|
||||
", length = " + str(length)+ ":")
|
||||
print(''.join(data[data_i:data_i+length]))
|
||||
# change tag to something else
|
||||
if tag == "02":
|
||||
# turn integers into octet strings
|
||||
new_tag = "04"
|
||||
else:
|
||||
# turn everything else into an integer
|
||||
new_tag = "02"
|
||||
mutations.append(data[:tag_i] + [new_tag] + data[leng_i:])
|
||||
reasons.append("Change tag " + tag + " to " + new_tag)
|
||||
|
||||
# change lengths to too big
|
||||
# skip any edge cases which would cause carry over
|
||||
if int(data[data_i - 1], 16) < 255:
|
||||
new_length = str(hex(int(data[data_i - 1], 16) + 1))[2:]
|
||||
if len(new_length) == 1:
|
||||
new_length = "0"+new_length
|
||||
mutations.append(data[:data_i -1] + [new_length] + data[data_i:])
|
||||
reasons.append("Change length from " + str(length) + " to " \
|
||||
+ str(length + 1))
|
||||
# we can add another test here for tags that contain other tags \
|
||||
# where they have more data than there containing tags account for
|
||||
if tag in ["30", "a0", "31"]:
|
||||
mutations.append(data[:data_i -1] + [new_length] + \
|
||||
data[data_i:data_i + length] + ["00"] + \
|
||||
data[data_i + length:])
|
||||
reasons.append("Change contents of tag " + tag + " to contain \
|
||||
one unaccounted extra byte")
|
||||
# change lengths to too small
|
||||
if int(data[data_i - 1], 16) > 0:
|
||||
new_length = str(hex(int(data[data_i - 1], 16) - 1))[2:]
|
||||
if len(new_length) == 1:
|
||||
new_length = "0"+new_length
|
||||
mutations.append(data[:data_i -1] + [new_length] + data[data_i:])
|
||||
reasons.append("Change length from " + str(length) + " to " + str(length - 1))
|
||||
|
||||
# some tag types contain other tag types so we should iterate into the data
|
||||
if tag in ["30", "a0", "31"]:
|
||||
i = data_i
|
||||
else:
|
||||
i = data_i + length
|
||||
|
||||
return list(zip(reasons, mutations))
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print("USAGE: " + sys.argv[0] + " <pkcs7_der_file>")
|
||||
sys.exit()
|
||||
|
||||
DATA_FILE = sys.argv[1]
|
||||
TEST_DATA = TestData(PKCS7_TEST_FILE)
|
||||
with open(DATA_FILE, 'rb') as f:
|
||||
DATA_STR = f.read().hex()
|
||||
# make data an array of byte strings eg ['de','ad','be','ef']
|
||||
HEX_DATA = list(map(''.join, [[DATA_STR[i], DATA_STR[i+1]] for i in range(0, len(DATA_STR), \
|
||||
2)]))
|
||||
# returns tuples of test_names and modified data buffers
|
||||
MUT_ARR = asn1_mutate(HEX_DATA)
|
||||
|
||||
print("made " + str(len(MUT_ARR)) + " new tests")
|
||||
for new_test in MUT_ARR:
|
||||
TEST_DATA.add(new_test[0], ''.join(new_test[1]))
|
||||
|
||||
TEST_DATA.write_changes()
|
@ -1,850 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Generate test data for PSA cryptographic mechanisms.
|
||||
|
||||
With no arguments, generate all test data. With non-option arguments,
|
||||
generate only the specified files.
|
||||
"""
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
import enum
|
||||
import re
|
||||
import sys
|
||||
from typing import Callable, Dict, FrozenSet, Iterable, Iterator, List, Optional
|
||||
|
||||
import scripts_path # pylint: disable=unused-import
|
||||
from mbedtls_framework import crypto_data_tests
|
||||
from mbedtls_framework import crypto_knowledge
|
||||
from mbedtls_framework import macro_collector #pylint: disable=unused-import
|
||||
from mbedtls_framework import psa_information
|
||||
from mbedtls_framework import psa_storage
|
||||
from mbedtls_framework import test_case
|
||||
from mbedtls_framework import test_data_generation
|
||||
|
||||
|
||||
|
||||
def test_case_for_key_type_not_supported(
|
||||
verb: str, key_type: str, bits: int,
|
||||
dependencies: List[str],
|
||||
*args: str,
|
||||
param_descr: str = ''
|
||||
) -> test_case.TestCase:
|
||||
"""Return one test case exercising a key creation method
|
||||
for an unsupported key type or size.
|
||||
"""
|
||||
psa_information.hack_dependencies_not_implemented(dependencies)
|
||||
tc = test_case.TestCase()
|
||||
short_key_type = crypto_knowledge.short_expression(key_type)
|
||||
adverb = 'not' if dependencies else 'never'
|
||||
if param_descr:
|
||||
adverb = param_descr + ' ' + adverb
|
||||
tc.set_description('PSA {} {} {}-bit {} supported'
|
||||
.format(verb, short_key_type, bits, adverb))
|
||||
tc.set_dependencies(dependencies)
|
||||
tc.set_function(verb + '_not_supported')
|
||||
tc.set_arguments([key_type] + list(args))
|
||||
return tc
|
||||
|
||||
class KeyTypeNotSupported:
|
||||
"""Generate test cases for when a key type is not supported."""
|
||||
|
||||
def __init__(self, info: psa_information.Information) -> None:
|
||||
self.constructors = info.constructors
|
||||
|
||||
ALWAYS_SUPPORTED = frozenset([
|
||||
'PSA_KEY_TYPE_DERIVE',
|
||||
'PSA_KEY_TYPE_PASSWORD',
|
||||
'PSA_KEY_TYPE_PASSWORD_HASH',
|
||||
'PSA_KEY_TYPE_RAW_DATA',
|
||||
'PSA_KEY_TYPE_HMAC'
|
||||
])
|
||||
def test_cases_for_key_type_not_supported(
|
||||
self,
|
||||
kt: crypto_knowledge.KeyType,
|
||||
param: Optional[int] = None,
|
||||
param_descr: str = '',
|
||||
) -> Iterator[test_case.TestCase]:
|
||||
"""Return test cases exercising key creation when the given type is unsupported.
|
||||
|
||||
If param is present and not None, emit test cases conditioned on this
|
||||
parameter not being supported. If it is absent or None, emit test cases
|
||||
conditioned on the base type not being supported.
|
||||
"""
|
||||
if kt.name in self.ALWAYS_SUPPORTED:
|
||||
# Don't generate test cases for key types that are always supported.
|
||||
# They would be skipped in all configurations, which is noise.
|
||||
return
|
||||
import_dependencies = [('!' if param is None else '') +
|
||||
psa_information.psa_want_symbol(kt.name)]
|
||||
if kt.params is not None:
|
||||
import_dependencies += [('!' if param == i else '') +
|
||||
psa_information.psa_want_symbol(sym)
|
||||
for i, sym in enumerate(kt.params)]
|
||||
if kt.name.endswith('_PUBLIC_KEY'):
|
||||
generate_dependencies = []
|
||||
else:
|
||||
generate_dependencies = \
|
||||
psa_information.fix_key_pair_dependencies(import_dependencies, 'GENERATE')
|
||||
import_dependencies = \
|
||||
psa_information.fix_key_pair_dependencies(import_dependencies, 'BASIC')
|
||||
for bits in kt.sizes_to_test():
|
||||
yield test_case_for_key_type_not_supported(
|
||||
'import', kt.expression, bits,
|
||||
psa_information.finish_family_dependencies(import_dependencies, bits),
|
||||
test_case.hex_string(kt.key_material(bits)),
|
||||
param_descr=param_descr,
|
||||
)
|
||||
if not generate_dependencies and param is not None:
|
||||
# If generation is impossible for this key type, rather than
|
||||
# supported or not depending on implementation capabilities,
|
||||
# only generate the test case once.
|
||||
continue
|
||||
# For public key we expect that key generation fails with
|
||||
# INVALID_ARGUMENT. It is handled by KeyGenerate class.
|
||||
if not kt.is_public():
|
||||
yield test_case_for_key_type_not_supported(
|
||||
'generate', kt.expression, bits,
|
||||
psa_information.finish_family_dependencies(generate_dependencies, bits),
|
||||
str(bits),
|
||||
param_descr=param_descr,
|
||||
)
|
||||
# To be added: derive
|
||||
|
||||
ECC_KEY_TYPES = ('PSA_KEY_TYPE_ECC_KEY_PAIR',
|
||||
'PSA_KEY_TYPE_ECC_PUBLIC_KEY')
|
||||
DH_KEY_TYPES = ('PSA_KEY_TYPE_DH_KEY_PAIR',
|
||||
'PSA_KEY_TYPE_DH_PUBLIC_KEY')
|
||||
|
||||
def test_cases_for_not_supported(self) -> Iterator[test_case.TestCase]:
|
||||
"""Generate test cases that exercise the creation of keys of unsupported types."""
|
||||
for key_type in sorted(self.constructors.key_types):
|
||||
if key_type in self.ECC_KEY_TYPES:
|
||||
continue
|
||||
if key_type in self.DH_KEY_TYPES:
|
||||
continue
|
||||
kt = crypto_knowledge.KeyType(key_type)
|
||||
yield from self.test_cases_for_key_type_not_supported(kt)
|
||||
for curve_family in sorted(self.constructors.ecc_curves):
|
||||
for constr in self.ECC_KEY_TYPES:
|
||||
kt = crypto_knowledge.KeyType(constr, [curve_family])
|
||||
yield from self.test_cases_for_key_type_not_supported(
|
||||
kt, param_descr='type')
|
||||
yield from self.test_cases_for_key_type_not_supported(
|
||||
kt, 0, param_descr='curve')
|
||||
for dh_family in sorted(self.constructors.dh_groups):
|
||||
for constr in self.DH_KEY_TYPES:
|
||||
kt = crypto_knowledge.KeyType(constr, [dh_family])
|
||||
yield from self.test_cases_for_key_type_not_supported(
|
||||
kt, param_descr='type')
|
||||
yield from self.test_cases_for_key_type_not_supported(
|
||||
kt, 0, param_descr='group')
|
||||
|
||||
def test_case_for_key_generation(
|
||||
key_type: str, bits: int,
|
||||
dependencies: List[str],
|
||||
*args: str,
|
||||
result: str = ''
|
||||
) -> test_case.TestCase:
|
||||
"""Return one test case exercising a key generation.
|
||||
"""
|
||||
psa_information.hack_dependencies_not_implemented(dependencies)
|
||||
tc = test_case.TestCase()
|
||||
short_key_type = crypto_knowledge.short_expression(key_type)
|
||||
tc.set_description('PSA {} {}-bit'
|
||||
.format(short_key_type, bits))
|
||||
tc.set_dependencies(dependencies)
|
||||
tc.set_function('generate_key')
|
||||
tc.set_arguments([key_type] + list(args) + [result])
|
||||
|
||||
return tc
|
||||
|
||||
class KeyGenerate:
|
||||
"""Generate positive and negative (invalid argument) test cases for key generation."""
|
||||
|
||||
def __init__(self, info: psa_information.Information) -> None:
|
||||
self.constructors = info.constructors
|
||||
|
||||
ECC_KEY_TYPES = ('PSA_KEY_TYPE_ECC_KEY_PAIR',
|
||||
'PSA_KEY_TYPE_ECC_PUBLIC_KEY')
|
||||
DH_KEY_TYPES = ('PSA_KEY_TYPE_DH_KEY_PAIR',
|
||||
'PSA_KEY_TYPE_DH_PUBLIC_KEY')
|
||||
|
||||
@staticmethod
|
||||
def test_cases_for_key_type_key_generation(
|
||||
kt: crypto_knowledge.KeyType
|
||||
) -> Iterator[test_case.TestCase]:
|
||||
"""Return test cases exercising key generation.
|
||||
|
||||
All key types can be generated except for public keys. For public key
|
||||
PSA_ERROR_INVALID_ARGUMENT status is expected.
|
||||
"""
|
||||
result = 'PSA_SUCCESS'
|
||||
|
||||
import_dependencies = [psa_information.psa_want_symbol(kt.name)]
|
||||
if kt.params is not None:
|
||||
import_dependencies += [psa_information.psa_want_symbol(sym)
|
||||
for i, sym in enumerate(kt.params)]
|
||||
if kt.name.endswith('_PUBLIC_KEY'):
|
||||
# The library checks whether the key type is a public key generically,
|
||||
# before it reaches a point where it needs support for the specific key
|
||||
# type, so it returns INVALID_ARGUMENT for unsupported public key types.
|
||||
generate_dependencies = []
|
||||
result = 'PSA_ERROR_INVALID_ARGUMENT'
|
||||
else:
|
||||
generate_dependencies = \
|
||||
psa_information.fix_key_pair_dependencies(import_dependencies, 'GENERATE')
|
||||
for bits in kt.sizes_to_test():
|
||||
if kt.name == 'PSA_KEY_TYPE_RSA_KEY_PAIR':
|
||||
size_dependency = "PSA_VENDOR_RSA_GENERATE_MIN_KEY_BITS <= " + str(bits)
|
||||
test_dependencies = generate_dependencies + [size_dependency]
|
||||
else:
|
||||
test_dependencies = generate_dependencies
|
||||
yield test_case_for_key_generation(
|
||||
kt.expression, bits,
|
||||
psa_information.finish_family_dependencies(test_dependencies, bits),
|
||||
str(bits),
|
||||
result
|
||||
)
|
||||
|
||||
def test_cases_for_key_generation(self) -> Iterator[test_case.TestCase]:
|
||||
"""Generate test cases that exercise the generation of keys."""
|
||||
for key_type in sorted(self.constructors.key_types):
|
||||
if key_type in self.ECC_KEY_TYPES:
|
||||
continue
|
||||
if key_type in self.DH_KEY_TYPES:
|
||||
continue
|
||||
kt = crypto_knowledge.KeyType(key_type)
|
||||
yield from self.test_cases_for_key_type_key_generation(kt)
|
||||
for curve_family in sorted(self.constructors.ecc_curves):
|
||||
for constr in self.ECC_KEY_TYPES:
|
||||
kt = crypto_knowledge.KeyType(constr, [curve_family])
|
||||
yield from self.test_cases_for_key_type_key_generation(kt)
|
||||
for dh_family in sorted(self.constructors.dh_groups):
|
||||
for constr in self.DH_KEY_TYPES:
|
||||
kt = crypto_knowledge.KeyType(constr, [dh_family])
|
||||
yield from self.test_cases_for_key_type_key_generation(kt)
|
||||
|
||||
class OpFail:
|
||||
"""Generate test cases for operations that must fail."""
|
||||
#pylint: disable=too-few-public-methods
|
||||
|
||||
class Reason(enum.Enum):
|
||||
NOT_SUPPORTED = 0
|
||||
INVALID = 1
|
||||
INCOMPATIBLE = 2
|
||||
PUBLIC = 3
|
||||
|
||||
def __init__(self, info: psa_information.Information) -> None:
|
||||
self.constructors = info.constructors
|
||||
key_type_expressions = self.constructors.generate_expressions(
|
||||
sorted(self.constructors.key_types)
|
||||
)
|
||||
self.key_types = [crypto_knowledge.KeyType(kt_expr)
|
||||
for kt_expr in key_type_expressions]
|
||||
|
||||
def make_test_case(
|
||||
self,
|
||||
alg: crypto_knowledge.Algorithm,
|
||||
category: crypto_knowledge.AlgorithmCategory,
|
||||
reason: 'Reason',
|
||||
kt: Optional[crypto_knowledge.KeyType] = None,
|
||||
not_deps: FrozenSet[str] = frozenset(),
|
||||
) -> test_case.TestCase:
|
||||
"""Construct a failure test case for a one-key or keyless operation."""
|
||||
#pylint: disable=too-many-arguments,too-many-locals
|
||||
tc = test_case.TestCase()
|
||||
pretty_alg = alg.short_expression()
|
||||
if reason == self.Reason.NOT_SUPPORTED:
|
||||
short_deps = [re.sub(r'PSA_WANT_ALG_', r'', dep)
|
||||
for dep in not_deps]
|
||||
pretty_reason = '!' + '&'.join(sorted(short_deps))
|
||||
else:
|
||||
pretty_reason = reason.name.lower()
|
||||
if kt:
|
||||
key_type = kt.expression
|
||||
pretty_type = kt.short_expression()
|
||||
else:
|
||||
key_type = ''
|
||||
pretty_type = ''
|
||||
tc.set_description('PSA {} {}: {}{}'
|
||||
.format(category.name.lower(),
|
||||
pretty_alg,
|
||||
pretty_reason,
|
||||
' with ' + pretty_type if pretty_type else ''))
|
||||
dependencies = psa_information.automatic_dependencies(alg.base_expression, key_type)
|
||||
dependencies = psa_information.fix_key_pair_dependencies(dependencies, 'BASIC')
|
||||
for i, dep in enumerate(dependencies):
|
||||
if dep in not_deps:
|
||||
dependencies[i] = '!' + dep
|
||||
tc.set_dependencies(dependencies)
|
||||
tc.set_function(category.name.lower() + '_fail')
|
||||
arguments = [] # type: List[str]
|
||||
if kt:
|
||||
key_material = kt.key_material(kt.sizes_to_test()[0])
|
||||
arguments += [key_type, test_case.hex_string(key_material)]
|
||||
arguments.append(alg.expression)
|
||||
if category.is_asymmetric():
|
||||
arguments.append('1' if reason == self.Reason.PUBLIC else '0')
|
||||
error = ('NOT_SUPPORTED' if reason == self.Reason.NOT_SUPPORTED else
|
||||
'INVALID_ARGUMENT')
|
||||
arguments.append('PSA_ERROR_' + error)
|
||||
tc.set_arguments(arguments)
|
||||
return tc
|
||||
|
||||
def no_key_test_cases(
|
||||
self,
|
||||
alg: crypto_knowledge.Algorithm,
|
||||
category: crypto_knowledge.AlgorithmCategory,
|
||||
) -> Iterator[test_case.TestCase]:
|
||||
"""Generate failure test cases for keyless operations with the specified algorithm."""
|
||||
if alg.can_do(category):
|
||||
# Compatible operation, unsupported algorithm
|
||||
for dep in psa_information.automatic_dependencies(alg.base_expression):
|
||||
yield self.make_test_case(alg, category,
|
||||
self.Reason.NOT_SUPPORTED,
|
||||
not_deps=frozenset([dep]))
|
||||
else:
|
||||
# Incompatible operation, supported algorithm
|
||||
yield self.make_test_case(alg, category, self.Reason.INVALID)
|
||||
|
||||
def one_key_test_cases(
|
||||
self,
|
||||
alg: crypto_knowledge.Algorithm,
|
||||
category: crypto_knowledge.AlgorithmCategory,
|
||||
) -> Iterator[test_case.TestCase]:
|
||||
"""Generate failure test cases for one-key operations with the specified algorithm."""
|
||||
for kt in self.key_types:
|
||||
key_is_compatible = kt.can_do(alg)
|
||||
if key_is_compatible and alg.can_do(category):
|
||||
# Compatible key and operation, unsupported algorithm
|
||||
for dep in psa_information.automatic_dependencies(alg.base_expression):
|
||||
yield self.make_test_case(alg, category,
|
||||
self.Reason.NOT_SUPPORTED,
|
||||
kt=kt, not_deps=frozenset([dep]))
|
||||
# Public key for a private-key operation
|
||||
if category.is_asymmetric() and kt.is_public():
|
||||
yield self.make_test_case(alg, category,
|
||||
self.Reason.PUBLIC,
|
||||
kt=kt)
|
||||
elif key_is_compatible:
|
||||
# Compatible key, incompatible operation, supported algorithm
|
||||
yield self.make_test_case(alg, category,
|
||||
self.Reason.INVALID,
|
||||
kt=kt)
|
||||
elif alg.can_do(category):
|
||||
# Incompatible key, compatible operation, supported algorithm
|
||||
yield self.make_test_case(alg, category,
|
||||
self.Reason.INCOMPATIBLE,
|
||||
kt=kt)
|
||||
else:
|
||||
# Incompatible key and operation. Don't test cases where
|
||||
# multiple things are wrong, to keep the number of test
|
||||
# cases reasonable.
|
||||
pass
|
||||
|
||||
def test_cases_for_algorithm(
|
||||
self,
|
||||
alg: crypto_knowledge.Algorithm,
|
||||
) -> Iterator[test_case.TestCase]:
|
||||
"""Generate operation failure test cases for the specified algorithm."""
|
||||
for category in crypto_knowledge.AlgorithmCategory:
|
||||
if category == crypto_knowledge.AlgorithmCategory.PAKE:
|
||||
# PAKE operations are not implemented yet
|
||||
pass
|
||||
elif category.requires_key():
|
||||
yield from self.one_key_test_cases(alg, category)
|
||||
else:
|
||||
yield from self.no_key_test_cases(alg, category)
|
||||
|
||||
def all_test_cases(self) -> Iterator[test_case.TestCase]:
|
||||
"""Generate all test cases for operations that must fail."""
|
||||
algorithms = sorted(self.constructors.algorithms)
|
||||
for expr in self.constructors.generate_expressions(algorithms):
|
||||
alg = crypto_knowledge.Algorithm(expr)
|
||||
yield from self.test_cases_for_algorithm(alg)
|
||||
|
||||
|
||||
class StorageKey(psa_storage.Key):
|
||||
"""Representation of a key for storage format testing."""
|
||||
|
||||
IMPLICIT_USAGE_FLAGS = {
|
||||
'PSA_KEY_USAGE_SIGN_HASH': 'PSA_KEY_USAGE_SIGN_MESSAGE',
|
||||
'PSA_KEY_USAGE_VERIFY_HASH': 'PSA_KEY_USAGE_VERIFY_MESSAGE'
|
||||
} #type: Dict[str, str]
|
||||
"""Mapping of usage flags to the flags that they imply."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
usage: Iterable[str],
|
||||
without_implicit_usage: Optional[bool] = False,
|
||||
**kwargs
|
||||
) -> None:
|
||||
"""Prepare to generate a key.
|
||||
|
||||
* `usage` : The usage flags used for the key.
|
||||
* `without_implicit_usage`: Flag to define to apply the usage extension
|
||||
"""
|
||||
usage_flags = set(usage)
|
||||
if not without_implicit_usage:
|
||||
for flag in sorted(usage_flags):
|
||||
if flag in self.IMPLICIT_USAGE_FLAGS:
|
||||
usage_flags.add(self.IMPLICIT_USAGE_FLAGS[flag])
|
||||
if usage_flags:
|
||||
usage_expression = ' | '.join(sorted(usage_flags))
|
||||
else:
|
||||
usage_expression = '0'
|
||||
super().__init__(usage=usage_expression, **kwargs)
|
||||
|
||||
class StorageTestData(StorageKey):
|
||||
"""Representation of test case data for storage format testing."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
expected_usage: Optional[List[str]] = None,
|
||||
**kwargs
|
||||
) -> None:
|
||||
"""Prepare to generate test data
|
||||
|
||||
* `description` : used for the test case names
|
||||
* `expected_usage`: the usage flags generated as the expected usage flags
|
||||
in the test cases. CAn differ from the usage flags
|
||||
stored in the keys because of the usage flags extension.
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self.description = description #type: str
|
||||
if expected_usage is None:
|
||||
self.expected_usage = self.usage #type: psa_storage.Expr
|
||||
elif expected_usage:
|
||||
self.expected_usage = psa_storage.Expr(' | '.join(expected_usage))
|
||||
else:
|
||||
self.expected_usage = psa_storage.Expr(0)
|
||||
|
||||
class StorageFormat:
|
||||
"""Storage format stability test cases."""
|
||||
|
||||
def __init__(self, info: psa_information.Information, version: int, forward: bool) -> None:
|
||||
"""Prepare to generate test cases for storage format stability.
|
||||
|
||||
* `info`: information about the API. See the `Information` class.
|
||||
* `version`: the storage format version to generate test cases for.
|
||||
* `forward`: if true, generate forward compatibility test cases which
|
||||
save a key and check that its representation is as intended. Otherwise
|
||||
generate backward compatibility test cases which inject a key
|
||||
representation and check that it can be read and used.
|
||||
"""
|
||||
self.constructors = info.constructors #type: macro_collector.PSAMacroEnumerator
|
||||
self.version = version #type: int
|
||||
self.forward = forward #type: bool
|
||||
|
||||
RSA_OAEP_RE = re.compile(r'PSA_ALG_RSA_OAEP\((.*)\)\Z')
|
||||
BRAINPOOL_RE = re.compile(r'PSA_KEY_TYPE_\w+\(PSA_ECC_FAMILY_BRAINPOOL_\w+\)\Z')
|
||||
@classmethod
|
||||
def exercise_key_with_algorithm(
|
||||
cls,
|
||||
key_type: psa_storage.Expr, bits: int,
|
||||
alg: psa_storage.Expr
|
||||
) -> bool:
|
||||
"""Whether to exercise the given key with the given algorithm.
|
||||
|
||||
Normally only the type and algorithm matter for compatibility, and
|
||||
this is handled in crypto_knowledge.KeyType.can_do(). This function
|
||||
exists to detect exceptional cases. Exceptional cases detected here
|
||||
are not tested in OpFail and should therefore have manually written
|
||||
test cases.
|
||||
"""
|
||||
# Some test keys have the RAW_DATA type and attributes that don't
|
||||
# necessarily make sense. We do this to validate numerical
|
||||
# encodings of the attributes.
|
||||
# Raw data keys have no useful exercise anyway so there is no
|
||||
# loss of test coverage.
|
||||
if key_type.string == 'PSA_KEY_TYPE_RAW_DATA':
|
||||
return False
|
||||
# OAEP requires room for two hashes plus wrapping
|
||||
m = cls.RSA_OAEP_RE.match(alg.string)
|
||||
if m:
|
||||
hash_alg = m.group(1)
|
||||
hash_length = crypto_knowledge.Algorithm.hash_length(hash_alg)
|
||||
key_length = (bits + 7) // 8
|
||||
# Leave enough room for at least one byte of plaintext
|
||||
return key_length > 2 * hash_length + 2
|
||||
# There's nothing wrong with ECC keys on Brainpool curves,
|
||||
# but operations with them are very slow. So we only exercise them
|
||||
# with a single algorithm, not with all possible hashes. We do
|
||||
# exercise other curves with all algorithms so test coverage is
|
||||
# perfectly adequate like this.
|
||||
m = cls.BRAINPOOL_RE.match(key_type.string)
|
||||
if m and alg.string != 'PSA_ALG_ECDSA_ANY':
|
||||
return False
|
||||
return True
|
||||
|
||||
def make_test_case(self, key: StorageTestData) -> test_case.TestCase:
|
||||
"""Construct a storage format test case for the given key.
|
||||
|
||||
If ``forward`` is true, generate a forward compatibility test case:
|
||||
create a key and validate that it has the expected representation.
|
||||
Otherwise generate a backward compatibility test case: inject the
|
||||
key representation into storage and validate that it can be read
|
||||
correctly.
|
||||
"""
|
||||
verb = 'save' if self.forward else 'read'
|
||||
tc = test_case.TestCase()
|
||||
tc.set_description(verb + ' ' + key.description)
|
||||
dependencies = psa_information.automatic_dependencies(
|
||||
key.lifetime.string, key.type.string,
|
||||
key.alg.string, key.alg2.string,
|
||||
)
|
||||
dependencies = psa_information.finish_family_dependencies(dependencies, key.bits)
|
||||
dependencies += psa_information.generate_deps_from_description(key.description)
|
||||
dependencies = psa_information.fix_key_pair_dependencies(dependencies, 'BASIC')
|
||||
tc.set_dependencies(dependencies)
|
||||
tc.set_function('key_storage_' + verb)
|
||||
if self.forward:
|
||||
extra_arguments = []
|
||||
else:
|
||||
flags = []
|
||||
if self.exercise_key_with_algorithm(key.type, key.bits, key.alg):
|
||||
flags.append('TEST_FLAG_EXERCISE')
|
||||
if 'READ_ONLY' in key.lifetime.string:
|
||||
flags.append('TEST_FLAG_READ_ONLY')
|
||||
extra_arguments = [' | '.join(flags) if flags else '0']
|
||||
tc.set_arguments([key.lifetime.string,
|
||||
key.type.string, str(key.bits),
|
||||
key.expected_usage.string,
|
||||
key.alg.string, key.alg2.string,
|
||||
'"' + key.material.hex() + '"',
|
||||
'"' + key.hex() + '"',
|
||||
*extra_arguments])
|
||||
return tc
|
||||
|
||||
def key_for_lifetime(
|
||||
self,
|
||||
lifetime: str,
|
||||
) -> StorageTestData:
|
||||
"""Construct a test key for the given lifetime."""
|
||||
short = lifetime
|
||||
short = re.sub(r'PSA_KEY_LIFETIME_FROM_PERSISTENCE_AND_LOCATION',
|
||||
r'', short)
|
||||
short = crypto_knowledge.short_expression(short)
|
||||
description = 'lifetime: ' + short
|
||||
key = StorageTestData(version=self.version,
|
||||
id=1, lifetime=lifetime,
|
||||
type='PSA_KEY_TYPE_RAW_DATA', bits=8,
|
||||
usage=['PSA_KEY_USAGE_EXPORT'], alg=0, alg2=0,
|
||||
material=b'L',
|
||||
description=description)
|
||||
return key
|
||||
|
||||
def all_keys_for_lifetimes(self) -> Iterator[StorageTestData]:
|
||||
"""Generate test keys covering lifetimes."""
|
||||
lifetimes = sorted(self.constructors.lifetimes)
|
||||
expressions = self.constructors.generate_expressions(lifetimes)
|
||||
for lifetime in expressions:
|
||||
# Don't attempt to create or load a volatile key in storage
|
||||
if 'VOLATILE' in lifetime:
|
||||
continue
|
||||
# Don't attempt to create a read-only key in storage,
|
||||
# but do attempt to load one.
|
||||
if 'READ_ONLY' in lifetime and self.forward:
|
||||
continue
|
||||
yield self.key_for_lifetime(lifetime)
|
||||
|
||||
def key_for_usage_flags(
|
||||
self,
|
||||
usage_flags: List[str],
|
||||
short: Optional[str] = None,
|
||||
test_implicit_usage: Optional[bool] = True
|
||||
) -> StorageTestData:
|
||||
"""Construct a test key for the given key usage."""
|
||||
extra_desc = ' without implication' if test_implicit_usage else ''
|
||||
description = 'usage' + extra_desc + ': '
|
||||
key1 = StorageTestData(version=self.version,
|
||||
id=1, lifetime=0x00000001,
|
||||
type='PSA_KEY_TYPE_RAW_DATA', bits=8,
|
||||
expected_usage=usage_flags,
|
||||
without_implicit_usage=not test_implicit_usage,
|
||||
usage=usage_flags, alg=0, alg2=0,
|
||||
material=b'K',
|
||||
description=description)
|
||||
if short is None:
|
||||
usage_expr = key1.expected_usage.string
|
||||
key1.description += crypto_knowledge.short_expression(usage_expr)
|
||||
else:
|
||||
key1.description += short
|
||||
return key1
|
||||
|
||||
def generate_keys_for_usage_flags(self, **kwargs) -> Iterator[StorageTestData]:
|
||||
"""Generate test keys covering usage flags."""
|
||||
known_flags = sorted(self.constructors.key_usage_flags)
|
||||
yield self.key_for_usage_flags(['0'], **kwargs)
|
||||
for usage_flag in known_flags:
|
||||
yield self.key_for_usage_flags([usage_flag], **kwargs)
|
||||
for flag1, flag2 in zip(known_flags,
|
||||
known_flags[1:] + [known_flags[0]]):
|
||||
yield self.key_for_usage_flags([flag1, flag2], **kwargs)
|
||||
|
||||
def generate_key_for_all_usage_flags(self) -> Iterator[StorageTestData]:
|
||||
known_flags = sorted(self.constructors.key_usage_flags)
|
||||
yield self.key_for_usage_flags(known_flags, short='all known')
|
||||
|
||||
def all_keys_for_usage_flags(self) -> Iterator[StorageTestData]:
|
||||
yield from self.generate_keys_for_usage_flags()
|
||||
yield from self.generate_key_for_all_usage_flags()
|
||||
|
||||
def key_for_type_and_alg(
|
||||
self,
|
||||
kt: crypto_knowledge.KeyType,
|
||||
bits: int,
|
||||
alg: Optional[crypto_knowledge.Algorithm] = None,
|
||||
) -> StorageTestData:
|
||||
"""Construct a test key of the given type.
|
||||
|
||||
If alg is not None, this key allows it.
|
||||
"""
|
||||
usage_flags = ['PSA_KEY_USAGE_EXPORT']
|
||||
alg1 = 0 #type: psa_storage.Exprable
|
||||
alg2 = 0
|
||||
if alg is not None:
|
||||
alg1 = alg.expression
|
||||
usage_flags += alg.usage_flags(public=kt.is_public())
|
||||
key_material = kt.key_material(bits)
|
||||
description = 'type: {} {}-bit'.format(kt.short_expression(1), bits)
|
||||
if alg is not None:
|
||||
description += ', ' + alg.short_expression(1)
|
||||
key = StorageTestData(version=self.version,
|
||||
id=1, lifetime=0x00000001,
|
||||
type=kt.expression, bits=bits,
|
||||
usage=usage_flags, alg=alg1, alg2=alg2,
|
||||
material=key_material,
|
||||
description=description)
|
||||
return key
|
||||
|
||||
def keys_for_type(
|
||||
self,
|
||||
key_type: str,
|
||||
all_algorithms: List[crypto_knowledge.Algorithm],
|
||||
) -> Iterator[StorageTestData]:
|
||||
"""Generate test keys for the given key type."""
|
||||
kt = crypto_knowledge.KeyType(key_type)
|
||||
for bits in kt.sizes_to_test():
|
||||
# Test a non-exercisable key, as well as exercisable keys for
|
||||
# each compatible algorithm.
|
||||
# To do: test reading a key from storage with an incompatible
|
||||
# or unsupported algorithm.
|
||||
yield self.key_for_type_and_alg(kt, bits)
|
||||
compatible_algorithms = [alg for alg in all_algorithms
|
||||
if kt.can_do(alg)]
|
||||
for alg in compatible_algorithms:
|
||||
yield self.key_for_type_and_alg(kt, bits, alg)
|
||||
|
||||
def all_keys_for_types(self) -> Iterator[StorageTestData]:
|
||||
"""Generate test keys covering key types and their representations."""
|
||||
key_types = sorted(self.constructors.key_types)
|
||||
all_algorithms = [crypto_knowledge.Algorithm(alg)
|
||||
for alg in self.constructors.generate_expressions(
|
||||
sorted(self.constructors.algorithms)
|
||||
)]
|
||||
for key_type in self.constructors.generate_expressions(key_types):
|
||||
yield from self.keys_for_type(key_type, all_algorithms)
|
||||
|
||||
def keys_for_algorithm(self, alg: str) -> Iterator[StorageTestData]:
|
||||
"""Generate test keys for the encoding of the specified algorithm."""
|
||||
# These test cases only validate the encoding of algorithms, not
|
||||
# whether the key read from storage is suitable for an operation.
|
||||
# `keys_for_types` generate read tests with an algorithm and a
|
||||
# compatible key.
|
||||
descr = crypto_knowledge.short_expression(alg, 1)
|
||||
usage = ['PSA_KEY_USAGE_EXPORT']
|
||||
key1 = StorageTestData(version=self.version,
|
||||
id=1, lifetime=0x00000001,
|
||||
type='PSA_KEY_TYPE_RAW_DATA', bits=8,
|
||||
usage=usage, alg=alg, alg2=0,
|
||||
material=b'K',
|
||||
description='alg: ' + descr)
|
||||
yield key1
|
||||
key2 = StorageTestData(version=self.version,
|
||||
id=1, lifetime=0x00000001,
|
||||
type='PSA_KEY_TYPE_RAW_DATA', bits=8,
|
||||
usage=usage, alg=0, alg2=alg,
|
||||
material=b'L',
|
||||
description='alg2: ' + descr)
|
||||
yield key2
|
||||
|
||||
def all_keys_for_algorithms(self) -> Iterator[StorageTestData]:
|
||||
"""Generate test keys covering algorithm encodings."""
|
||||
algorithms = sorted(self.constructors.algorithms)
|
||||
for alg in self.constructors.generate_expressions(algorithms):
|
||||
yield from self.keys_for_algorithm(alg)
|
||||
|
||||
def generate_all_keys(self) -> Iterator[StorageTestData]:
|
||||
"""Generate all keys for the test cases."""
|
||||
yield from self.all_keys_for_lifetimes()
|
||||
yield from self.all_keys_for_usage_flags()
|
||||
yield from self.all_keys_for_types()
|
||||
yield from self.all_keys_for_algorithms()
|
||||
|
||||
def all_test_cases(self) -> Iterator[test_case.TestCase]:
|
||||
"""Generate all storage format test cases."""
|
||||
# First build a list of all keys, then construct all the corresponding
|
||||
# test cases. This allows all required information to be obtained in
|
||||
# one go, which is a significant performance gain as the information
|
||||
# includes numerical values obtained by compiling a C program.
|
||||
all_keys = list(self.generate_all_keys())
|
||||
for key in all_keys:
|
||||
if key.location_value() != 0:
|
||||
# Skip keys with a non-default location, because they
|
||||
# require a driver and we currently have no mechanism to
|
||||
# determine whether a driver is available.
|
||||
continue
|
||||
yield self.make_test_case(key)
|
||||
|
||||
class StorageFormatForward(StorageFormat):
|
||||
"""Storage format stability test cases for forward compatibility."""
|
||||
|
||||
def __init__(self, info: psa_information.Information, version: int) -> None:
|
||||
super().__init__(info, version, True)
|
||||
|
||||
class StorageFormatV0(StorageFormat):
|
||||
"""Storage format stability test cases for version 0 compatibility."""
|
||||
|
||||
def __init__(self, info: psa_information.Information) -> None:
|
||||
super().__init__(info, 0, False)
|
||||
|
||||
def all_keys_for_usage_flags(self) -> Iterator[StorageTestData]:
|
||||
"""Generate test keys covering usage flags."""
|
||||
yield from super().all_keys_for_usage_flags()
|
||||
yield from self.generate_keys_for_usage_flags(test_implicit_usage=False)
|
||||
|
||||
def keys_for_implicit_usage(
|
||||
self,
|
||||
implyer_usage: str,
|
||||
alg: str,
|
||||
key_type: crypto_knowledge.KeyType
|
||||
) -> StorageTestData:
|
||||
# pylint: disable=too-many-locals
|
||||
"""Generate test keys for the specified implicit usage flag,
|
||||
algorithm and key type combination.
|
||||
"""
|
||||
bits = key_type.sizes_to_test()[0]
|
||||
implicit_usage = StorageKey.IMPLICIT_USAGE_FLAGS[implyer_usage]
|
||||
usage_flags = ['PSA_KEY_USAGE_EXPORT']
|
||||
material_usage_flags = usage_flags + [implyer_usage]
|
||||
expected_usage_flags = material_usage_flags + [implicit_usage]
|
||||
alg2 = 0
|
||||
key_material = key_type.key_material(bits)
|
||||
usage_expression = crypto_knowledge.short_expression(implyer_usage, 1)
|
||||
alg_expression = crypto_knowledge.short_expression(alg, 1)
|
||||
key_type_expression = key_type.short_expression(1)
|
||||
description = 'implied by {}: {} {} {}-bit'.format(
|
||||
usage_expression, alg_expression, key_type_expression, bits)
|
||||
key = StorageTestData(version=self.version,
|
||||
id=1, lifetime=0x00000001,
|
||||
type=key_type.expression, bits=bits,
|
||||
usage=material_usage_flags,
|
||||
expected_usage=expected_usage_flags,
|
||||
without_implicit_usage=True,
|
||||
alg=alg, alg2=alg2,
|
||||
material=key_material,
|
||||
description=description)
|
||||
return key
|
||||
|
||||
def gather_key_types_for_sign_alg(self) -> Dict[str, List[str]]:
|
||||
# pylint: disable=too-many-locals
|
||||
"""Match possible key types for sign algorithms."""
|
||||
# To create a valid combination both the algorithms and key types
|
||||
# must be filtered. Pair them with keywords created from its names.
|
||||
incompatible_alg_keyword = frozenset(['RAW', 'ANY', 'PURE'])
|
||||
incompatible_key_type_keywords = frozenset(['MONTGOMERY'])
|
||||
keyword_translation = {
|
||||
'ECDSA': 'ECC',
|
||||
'ED[0-9]*.*' : 'EDWARDS'
|
||||
}
|
||||
exclusive_keywords = {
|
||||
'EDWARDS': 'ECC'
|
||||
}
|
||||
key_types = set(self.constructors.generate_expressions(self.constructors.key_types))
|
||||
algorithms = set(self.constructors.generate_expressions(self.constructors.sign_algorithms))
|
||||
alg_with_keys = {} #type: Dict[str, List[str]]
|
||||
translation_table = str.maketrans('(', '_', ')')
|
||||
for alg in algorithms:
|
||||
# Generate keywords from the name of the algorithm
|
||||
alg_keywords = set(alg.partition('(')[0].split(sep='_')[2:])
|
||||
# Translate keywords for better matching with the key types
|
||||
for keyword in alg_keywords.copy():
|
||||
for pattern, replace in keyword_translation.items():
|
||||
if re.match(pattern, keyword):
|
||||
alg_keywords.remove(keyword)
|
||||
alg_keywords.add(replace)
|
||||
# Filter out incompatible algorithms
|
||||
if not alg_keywords.isdisjoint(incompatible_alg_keyword):
|
||||
continue
|
||||
|
||||
for key_type in key_types:
|
||||
# Generate keywords from the of the key type
|
||||
key_type_keywords = set(key_type.translate(translation_table).split(sep='_')[3:])
|
||||
|
||||
# Remove ambiguous keywords
|
||||
for keyword1, keyword2 in exclusive_keywords.items():
|
||||
if keyword1 in key_type_keywords:
|
||||
key_type_keywords.remove(keyword2)
|
||||
|
||||
if key_type_keywords.isdisjoint(incompatible_key_type_keywords) and\
|
||||
not key_type_keywords.isdisjoint(alg_keywords):
|
||||
if alg in alg_with_keys:
|
||||
alg_with_keys[alg].append(key_type)
|
||||
else:
|
||||
alg_with_keys[alg] = [key_type]
|
||||
return alg_with_keys
|
||||
|
||||
def all_keys_for_implicit_usage(self) -> Iterator[StorageTestData]:
|
||||
"""Generate test keys for usage flag extensions."""
|
||||
# Generate a key type and algorithm pair for each extendable usage
|
||||
# flag to generate a valid key for exercising. The key is generated
|
||||
# without usage extension to check the extension compatibility.
|
||||
alg_with_keys = self.gather_key_types_for_sign_alg()
|
||||
|
||||
for usage in sorted(StorageKey.IMPLICIT_USAGE_FLAGS, key=str):
|
||||
for alg in sorted(alg_with_keys):
|
||||
for key_type in sorted(alg_with_keys[alg]):
|
||||
# The key types must be filtered to fit the specific usage flag.
|
||||
kt = crypto_knowledge.KeyType(key_type)
|
||||
if kt.is_public() and '_SIGN_' in usage:
|
||||
# Can't sign with a public key
|
||||
continue
|
||||
yield self.keys_for_implicit_usage(usage, alg, kt)
|
||||
|
||||
def generate_all_keys(self) -> Iterator[StorageTestData]:
|
||||
yield from super().generate_all_keys()
|
||||
yield from self.all_keys_for_implicit_usage()
|
||||
|
||||
|
||||
class PSATestGenerator(test_data_generation.TestGenerator):
|
||||
"""Test generator subclass including PSA targets and info."""
|
||||
# Note that targets whose names contain 'test_format' have their content
|
||||
# validated by `abi_check.py`.
|
||||
targets = {
|
||||
'test_suite_psa_crypto_generate_key.generated':
|
||||
lambda info: KeyGenerate(info).test_cases_for_key_generation(),
|
||||
'test_suite_psa_crypto_not_supported.generated':
|
||||
lambda info: KeyTypeNotSupported(info).test_cases_for_not_supported(),
|
||||
'test_suite_psa_crypto_low_hash.generated':
|
||||
lambda info: crypto_data_tests.HashPSALowLevel(info).all_test_cases(),
|
||||
'test_suite_psa_crypto_op_fail.generated':
|
||||
lambda info: OpFail(info).all_test_cases(),
|
||||
'test_suite_psa_crypto_storage_format.current':
|
||||
lambda info: StorageFormatForward(info, 0).all_test_cases(),
|
||||
'test_suite_psa_crypto_storage_format.v0':
|
||||
lambda info: StorageFormatV0(info).all_test_cases(),
|
||||
} #type: Dict[str, Callable[[psa_information.Information], Iterable[test_case.TestCase]]]
|
||||
|
||||
def __init__(self, options):
|
||||
super().__init__(options)
|
||||
self.info = psa_information.Information()
|
||||
|
||||
def generate_target(self, name: str, *target_args) -> None:
|
||||
super().generate_target(name, self.info)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_data_generation.main(sys.argv[1:], __doc__, PSATestGenerator)
|
@ -1,257 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Generate wrapper functions for PSA function calls.
|
||||
"""
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
### WARNING: the code in this file has not been extensively reviewed yet.
|
||||
### We do not think it is harmful, but it may be below our normal standards
|
||||
### for robustness and maintainability.
|
||||
|
||||
import argparse
|
||||
import itertools
|
||||
import os
|
||||
from typing import Iterator, List, Optional, Tuple
|
||||
|
||||
import scripts_path #pylint: disable=unused-import
|
||||
from mbedtls_framework import build_tree
|
||||
from mbedtls_framework import c_parsing_helper
|
||||
from mbedtls_framework import c_wrapper_generator
|
||||
from mbedtls_framework import typing_util
|
||||
|
||||
|
||||
class BufferParameter:
|
||||
"""Description of an input or output buffer parameter sequence to a PSA function."""
|
||||
#pylint: disable=too-few-public-methods
|
||||
|
||||
def __init__(self, i: int, is_output: bool,
|
||||
buffer_name: str, size_name: str) -> None:
|
||||
"""Initialize the parameter information.
|
||||
|
||||
i is the index of the function argument that is the pointer to the buffer.
|
||||
The size is argument i+1. For a variable-size output, the actual length
|
||||
goes in argument i+2.
|
||||
|
||||
buffer_name and size_names are the names of arguments i and i+1.
|
||||
This class does not yet help with the output length.
|
||||
"""
|
||||
self.index = i
|
||||
self.buffer_name = buffer_name
|
||||
self.size_name = size_name
|
||||
self.is_output = is_output
|
||||
|
||||
|
||||
class PSAWrapperGenerator(c_wrapper_generator.Base):
|
||||
"""Generate a C source file containing wrapper functions for PSA Crypto API calls."""
|
||||
|
||||
_CPP_GUARDS = ('defined(MBEDTLS_PSA_CRYPTO_C) && ' +
|
||||
'defined(MBEDTLS_TEST_HOOKS) && \\\n ' +
|
||||
'!defined(RECORD_PSA_STATUS_COVERAGE_LOG)')
|
||||
_WRAPPER_NAME_PREFIX = 'mbedtls_test_wrap_'
|
||||
_WRAPPER_NAME_SUFFIX = ''
|
||||
|
||||
def gather_data(self) -> None:
|
||||
root_dir = build_tree.guess_mbedtls_root()
|
||||
for header_name in ['crypto.h', 'crypto_extra.h']:
|
||||
header_path = os.path.join(root_dir, 'include', 'psa', header_name)
|
||||
c_parsing_helper.read_function_declarations(self.functions, header_path)
|
||||
|
||||
_SKIP_FUNCTIONS = frozenset([
|
||||
'mbedtls_psa_external_get_random', # not a library function
|
||||
'psa_get_key_domain_parameters', # client-side function
|
||||
'psa_get_key_slot_number', # client-side function
|
||||
'psa_key_derivation_verify_bytes', # not implemented yet
|
||||
'psa_key_derivation_verify_key', # not implemented yet
|
||||
'psa_set_key_domain_parameters', # client-side function
|
||||
])
|
||||
|
||||
def _skip_function(self, function: c_wrapper_generator.FunctionInfo) -> bool:
|
||||
if function.return_type != 'psa_status_t':
|
||||
return True
|
||||
if function.name in self._SKIP_FUNCTIONS:
|
||||
return True
|
||||
return False
|
||||
|
||||
# PAKE stuff: not implemented yet
|
||||
_PAKE_STUFF = frozenset([
|
||||
'psa_crypto_driver_pake_inputs_t *',
|
||||
'psa_pake_cipher_suite_t *',
|
||||
])
|
||||
|
||||
def _return_variable_name(self,
|
||||
function: c_wrapper_generator.FunctionInfo) -> str:
|
||||
"""The name of the variable that will contain the return value."""
|
||||
if function.return_type == 'psa_status_t':
|
||||
return 'status'
|
||||
return super()._return_variable_name(function)
|
||||
|
||||
_FUNCTION_GUARDS = c_wrapper_generator.Base._FUNCTION_GUARDS.copy() \
|
||||
#pylint: disable=protected-access
|
||||
_FUNCTION_GUARDS.update({
|
||||
'mbedtls_psa_register_se_key': 'defined(MBEDTLS_PSA_CRYPTO_SE_C)',
|
||||
'mbedtls_psa_inject_entropy': 'defined(MBEDTLS_PSA_INJECT_ENTROPY)',
|
||||
'mbedtls_psa_external_get_random': 'defined(MBEDTLS_PSA_CRYPTO_EXTERNAL_RNG)',
|
||||
'mbedtls_psa_platform_get_builtin_key': 'defined(MBEDTLS_PSA_CRYPTO_BUILTIN_KEYS)',
|
||||
})
|
||||
|
||||
@staticmethod
|
||||
def _detect_buffer_parameters(arguments: List[c_parsing_helper.ArgumentInfo],
|
||||
argument_names: List[str]) -> Iterator[BufferParameter]:
|
||||
"""Detect function arguments that are buffers (pointer, size [,length])."""
|
||||
types = ['' if arg.suffix else arg.type for arg in arguments]
|
||||
# pairs = list of (type_of_arg_N, type_of_arg_N+1)
|
||||
# where each type_of_arg_X is the empty string if the type is an array
|
||||
# or there is no argument X.
|
||||
pairs = enumerate(itertools.zip_longest(types, types[1:], fillvalue=''))
|
||||
for i, t01 in pairs:
|
||||
if (t01[0] == 'const uint8_t *' or t01[0] == 'uint8_t *') and \
|
||||
t01[1] == 'size_t':
|
||||
yield BufferParameter(i, not t01[0].startswith('const '),
|
||||
argument_names[i], argument_names[i+1])
|
||||
|
||||
@staticmethod
|
||||
def _write_poison_buffer_parameter(out: typing_util.Writable,
|
||||
param: BufferParameter,
|
||||
poison: bool) -> None:
|
||||
"""Write poisoning or unpoisoning code for a buffer parameter.
|
||||
|
||||
Write poisoning code if poison is true, unpoisoning code otherwise.
|
||||
"""
|
||||
out.write(' MBEDTLS_TEST_MEMORY_{}({}, {});\n'.format(
|
||||
'POISON' if poison else 'UNPOISON',
|
||||
param.buffer_name, param.size_name
|
||||
))
|
||||
|
||||
def _write_poison_buffer_parameters(self, out: typing_util.Writable,
|
||||
buffer_parameters: List[BufferParameter],
|
||||
poison: bool) -> None:
|
||||
"""Write poisoning or unpoisoning code for the buffer parameters.
|
||||
|
||||
Write poisoning code if poison is true, unpoisoning code otherwise.
|
||||
"""
|
||||
if not buffer_parameters:
|
||||
return
|
||||
out.write('#if !defined(MBEDTLS_PSA_ASSUME_EXCLUSIVE_BUFFERS)\n')
|
||||
for param in buffer_parameters:
|
||||
self._write_poison_buffer_parameter(out, param, poison)
|
||||
out.write('#endif /* !defined(MBEDTLS_PSA_ASSUME_EXCLUSIVE_BUFFERS) */\n')
|
||||
|
||||
@staticmethod
|
||||
def _parameter_should_be_copied(function_name: str,
|
||||
_buffer_name: Optional[str]) -> bool:
|
||||
"""Whether the specified buffer argument to a PSA function should be copied.
|
||||
"""
|
||||
# False-positives that do not need buffer copying
|
||||
if function_name in ('mbedtls_psa_inject_entropy',
|
||||
'psa_crypto_driver_pake_get_password',
|
||||
'psa_crypto_driver_pake_get_user',
|
||||
'psa_crypto_driver_pake_get_peer'):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _write_function_call(self, out: typing_util.Writable,
|
||||
function: c_wrapper_generator.FunctionInfo,
|
||||
argument_names: List[str]) -> None:
|
||||
buffer_parameters = list(
|
||||
param
|
||||
for param in self._detect_buffer_parameters(function.arguments,
|
||||
argument_names)
|
||||
if self._parameter_should_be_copied(function.name,
|
||||
function.arguments[param.index].name))
|
||||
self._write_poison_buffer_parameters(out, buffer_parameters, True)
|
||||
super()._write_function_call(out, function, argument_names)
|
||||
self._write_poison_buffer_parameters(out, buffer_parameters, False)
|
||||
|
||||
def _write_prologue(self, out: typing_util.Writable, header: bool) -> None:
|
||||
super()._write_prologue(out, header)
|
||||
out.write("""
|
||||
#if {}
|
||||
|
||||
#include <psa/crypto.h>
|
||||
|
||||
#include <test/memory.h>
|
||||
#include <test/psa_crypto_helpers.h>
|
||||
#include <test/psa_test_wrappers.h>
|
||||
"""
|
||||
.format(self._CPP_GUARDS))
|
||||
|
||||
def _write_epilogue(self, out: typing_util.Writable, header: bool) -> None:
|
||||
out.write("""
|
||||
#endif /* {} */
|
||||
"""
|
||||
.format(self._CPP_GUARDS))
|
||||
super()._write_epilogue(out, header)
|
||||
|
||||
|
||||
class PSALoggingWrapperGenerator(PSAWrapperGenerator, c_wrapper_generator.Logging):
|
||||
"""Generate a C source file containing wrapper functions that log PSA Crypto API calls."""
|
||||
|
||||
def __init__(self, stream: str) -> None:
|
||||
super().__init__()
|
||||
self.set_stream(stream)
|
||||
|
||||
_PRINTF_TYPE_CAST = c_wrapper_generator.Logging._PRINTF_TYPE_CAST.copy()
|
||||
_PRINTF_TYPE_CAST.update({
|
||||
'mbedtls_svc_key_id_t': 'unsigned',
|
||||
'psa_algorithm_t': 'unsigned',
|
||||
'psa_drv_slot_number_t': 'unsigned long long',
|
||||
'psa_key_derivation_step_t': 'int',
|
||||
'psa_key_id_t': 'unsigned',
|
||||
'psa_key_slot_number_t': 'unsigned long long',
|
||||
'psa_key_lifetime_t': 'unsigned',
|
||||
'psa_key_type_t': 'unsigned',
|
||||
'psa_key_usage_flags_t': 'unsigned',
|
||||
'psa_pake_role_t': 'int',
|
||||
'psa_pake_step_t': 'int',
|
||||
'psa_status_t': 'int',
|
||||
})
|
||||
|
||||
def _printf_parameters(self, typ: str, var: str) -> Tuple[str, List[str]]:
|
||||
if typ.startswith('const '):
|
||||
typ = typ[6:]
|
||||
if typ == 'uint8_t *':
|
||||
# Skip buffers
|
||||
return '', []
|
||||
if typ.endswith('operation_t *'):
|
||||
return '', []
|
||||
if typ in self._PAKE_STUFF:
|
||||
return '', []
|
||||
if typ == 'psa_key_attributes_t *':
|
||||
return (var + '={id=%u, lifetime=0x%08x, type=0x%08x, bits=%u, alg=%08x, usage=%08x}',
|
||||
['(unsigned) psa_get_key_{}({})'.format(field, var)
|
||||
for field in ['id', 'lifetime', 'type', 'bits', 'algorithm', 'usage_flags']])
|
||||
return super()._printf_parameters(typ, var)
|
||||
|
||||
|
||||
DEFAULT_C_OUTPUT_FILE_NAME = 'tests/src/psa_test_wrappers.c'
|
||||
DEFAULT_H_OUTPUT_FILE_NAME = 'tests/include/test/psa_test_wrappers.h'
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(description=globals()['__doc__'])
|
||||
parser.add_argument('--log',
|
||||
help='Stream to log to (default: no logging code)')
|
||||
parser.add_argument('--output-c',
|
||||
metavar='FILENAME',
|
||||
default=DEFAULT_C_OUTPUT_FILE_NAME,
|
||||
help=('Output .c file path (default: {}; skip .c output if empty)'
|
||||
.format(DEFAULT_C_OUTPUT_FILE_NAME)))
|
||||
parser.add_argument('--output-h',
|
||||
metavar='FILENAME',
|
||||
default=DEFAULT_H_OUTPUT_FILE_NAME,
|
||||
help=('Output .h file path (default: {}; skip .h output if empty)'
|
||||
.format(DEFAULT_H_OUTPUT_FILE_NAME)))
|
||||
options = parser.parse_args()
|
||||
if options.log:
|
||||
generator = PSALoggingWrapperGenerator(options.log) #type: PSAWrapperGenerator
|
||||
else:
|
||||
generator = PSAWrapperGenerator()
|
||||
generator.gather_data()
|
||||
if options.output_h:
|
||||
generator.write_h_file(options.output_h)
|
||||
if options.output_c:
|
||||
generator.write_c_file(options.output_c)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,108 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Generate `tests/src/test_certs.h` which includes certficaties/keys/certificate list for testing.
|
||||
"""
|
||||
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import jinja2
|
||||
import scripts_path # pylint: disable=unused-import
|
||||
from mbedtls_framework.build_tree import guess_project_root
|
||||
|
||||
TEST_DIR = os.path.join(guess_project_root(), 'tests')
|
||||
DATA_FILES_PATH = os.path.join(TEST_DIR, 'data_files')
|
||||
|
||||
INPUT_ARGS = [
|
||||
("string", "TEST_CA_CRT_EC_PEM", DATA_FILES_PATH + "/test-ca2.crt"),
|
||||
("binary", "TEST_CA_CRT_EC_DER", DATA_FILES_PATH + "/test-ca2.crt.der"),
|
||||
("string", "TEST_CA_KEY_EC_PEM", DATA_FILES_PATH + "/test-ca2.key.enc"),
|
||||
("password", "TEST_CA_PWD_EC_PEM", "PolarSSLTest"),
|
||||
("binary", "TEST_CA_KEY_EC_DER", DATA_FILES_PATH + "/test-ca2.key.der"),
|
||||
("string", "TEST_CA_CRT_RSA_SHA256_PEM", DATA_FILES_PATH + "/test-ca-sha256.crt"),
|
||||
("binary", "TEST_CA_CRT_RSA_SHA256_DER", DATA_FILES_PATH + "/test-ca-sha256.crt.der"),
|
||||
("string", "TEST_CA_CRT_RSA_SHA1_PEM", DATA_FILES_PATH + "/test-ca-sha1.crt"),
|
||||
("binary", "TEST_CA_CRT_RSA_SHA1_DER", DATA_FILES_PATH + "/test-ca-sha1.crt.der"),
|
||||
("string", "TEST_CA_KEY_RSA_PEM", DATA_FILES_PATH + "/test-ca.key"),
|
||||
("password", "TEST_CA_PWD_RSA_PEM", "PolarSSLTest"),
|
||||
("binary", "TEST_CA_KEY_RSA_DER", DATA_FILES_PATH + "/test-ca.key.der"),
|
||||
("string", "TEST_SRV_CRT_EC_PEM", DATA_FILES_PATH + "/server5.crt"),
|
||||
("binary", "TEST_SRV_CRT_EC_DER", DATA_FILES_PATH + "/server5.crt.der"),
|
||||
("string", "TEST_SRV_KEY_EC_PEM", DATA_FILES_PATH + "/server5.key"),
|
||||
("binary", "TEST_SRV_KEY_EC_DER", DATA_FILES_PATH + "/server5.key.der"),
|
||||
("string", "TEST_SRV_CRT_RSA_SHA256_PEM", DATA_FILES_PATH + "/server2-sha256.crt"),
|
||||
("binary", "TEST_SRV_CRT_RSA_SHA256_DER", DATA_FILES_PATH + "/server2-sha256.crt.der"),
|
||||
("string", "TEST_SRV_CRT_RSA_SHA1_PEM", DATA_FILES_PATH + "/server2.crt"),
|
||||
("binary", "TEST_SRV_CRT_RSA_SHA1_DER", DATA_FILES_PATH + "/server2.crt.der"),
|
||||
("string", "TEST_SRV_KEY_RSA_PEM", DATA_FILES_PATH + "/server2.key"),
|
||||
("binary", "TEST_SRV_KEY_RSA_DER", DATA_FILES_PATH + "/server2.key.der"),
|
||||
("string", "TEST_CLI_CRT_EC_PEM", DATA_FILES_PATH + "/cli2.crt"),
|
||||
("binary", "TEST_CLI_CRT_EC_DER", DATA_FILES_PATH + "/cli2.crt.der"),
|
||||
("string", "TEST_CLI_KEY_EC_PEM", DATA_FILES_PATH + "/cli2.key"),
|
||||
("binary", "TEST_CLI_KEY_EC_DER", DATA_FILES_PATH + "/cli2.key.der"),
|
||||
("string", "TEST_CLI_CRT_RSA_PEM", DATA_FILES_PATH + "/cli-rsa-sha256.crt"),
|
||||
("binary", "TEST_CLI_CRT_RSA_DER", DATA_FILES_PATH + "/cli-rsa-sha256.crt.der"),
|
||||
("string", "TEST_CLI_KEY_RSA_PEM", DATA_FILES_PATH + "/cli-rsa.key"),
|
||||
("binary", "TEST_CLI_KEY_RSA_DER", DATA_FILES_PATH + "/cli-rsa.key.der"),
|
||||
]
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
default_output_path = os.path.join(TEST_DIR, 'src', 'test_certs.h')
|
||||
parser.add_argument('--output', type=str, default=default_output_path)
|
||||
parser.add_argument('--list-dependencies', action='store_true')
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.list_dependencies:
|
||||
files_list = [arg[2] for arg in INPUT_ARGS]
|
||||
print(" ".join(files_list))
|
||||
return
|
||||
|
||||
generate(INPUT_ARGS, output=args.output)
|
||||
|
||||
#pylint: disable=dangerous-default-value, unused-argument
|
||||
def generate(values=[], output=None):
|
||||
"""Generate C header file.
|
||||
"""
|
||||
template_loader = jinja2.FileSystemLoader(DATA_FILES_PATH)
|
||||
template_env = jinja2.Environment(
|
||||
loader=template_loader, lstrip_blocks=True, trim_blocks=True,
|
||||
keep_trailing_newline=True)
|
||||
|
||||
def read_as_c_array(filename):
|
||||
with open(filename, 'rb') as f:
|
||||
data = f.read(12)
|
||||
while data:
|
||||
yield ', '.join(['{:#04x}'.format(b) for b in data])
|
||||
data = f.read(12)
|
||||
|
||||
def read_lines(filename):
|
||||
with open(filename) as f:
|
||||
try:
|
||||
for line in f:
|
||||
yield line.strip()
|
||||
except:
|
||||
print(filename)
|
||||
raise
|
||||
|
||||
def put_to_column(value, position=0):
|
||||
return ' '*position + value
|
||||
|
||||
template_env.filters['read_as_c_array'] = read_as_c_array
|
||||
template_env.filters['read_lines'] = read_lines
|
||||
template_env.filters['put_to_column'] = put_to_column
|
||||
|
||||
template = template_env.get_template('test_certs.h.jinja2')
|
||||
|
||||
with open(output, 'w') as f:
|
||||
f.write(template.render(macros=values))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
File diff suppressed because it is too large
Load Diff
@ -1,185 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
"""Module generating EC and RSA keys to be used in test_suite_pk instead of
|
||||
generating the required key at run time. This helps speeding up testing."""
|
||||
|
||||
from typing import Iterator, List, Tuple
|
||||
import re
|
||||
import argparse
|
||||
import scripts_path # pylint: disable=unused-import
|
||||
from mbedtls_framework.asymmetric_key_data import ASYMMETRIC_KEY_DATA
|
||||
from mbedtls_framework.build_tree import guess_project_root
|
||||
|
||||
BYTES_PER_LINE = 16
|
||||
|
||||
def c_byte_array_literal_content(array_name: str, key_data: bytes) -> Iterator[str]:
|
||||
yield 'const unsigned char '
|
||||
yield array_name
|
||||
yield '[] = {'
|
||||
for index in range(0, len(key_data), BYTES_PER_LINE):
|
||||
yield '\n '
|
||||
for b in key_data[index:index + BYTES_PER_LINE]:
|
||||
yield ' {:#04x},'.format(b)
|
||||
yield '\n};'
|
||||
|
||||
def convert_der_to_c(array_name: str, key_data: bytes) -> str:
|
||||
return ''.join(c_byte_array_literal_content(array_name, key_data))
|
||||
|
||||
def get_key_type(key: str) -> str:
|
||||
if re.match('PSA_KEY_TYPE_RSA_.*', key):
|
||||
return "rsa"
|
||||
elif re.match('PSA_KEY_TYPE_ECC_.*', key):
|
||||
return "ec"
|
||||
else:
|
||||
print("Unhandled key type {}".format(key))
|
||||
return "unknown"
|
||||
|
||||
def get_ec_key_family(key: str) -> str:
|
||||
match = re.search(r'.*\((.*)\)', key)
|
||||
if match is None:
|
||||
raise Exception("Unable to get EC family from {}".format(key))
|
||||
return match.group(1)
|
||||
|
||||
# Legacy EC group ID do not support all the key types that PSA does, so the
|
||||
# following dictionaries are used for:
|
||||
# - getting prefix/suffix for legacy curve names
|
||||
# - understand if the curve is supported in legacy symbols (MBEDTLS_ECP_DP_...)
|
||||
EC_NAME_CONVERSION = {
|
||||
'PSA_ECC_FAMILY_SECP_K1': {
|
||||
192: ('secp', 'k1'),
|
||||
224: ('secp', 'k1'),
|
||||
256: ('secp', 'k1')
|
||||
},
|
||||
'PSA_ECC_FAMILY_SECP_R1': {
|
||||
192: ('secp', 'r1'),
|
||||
224: ('secp', 'r1'),
|
||||
256: ('secp', 'r1'),
|
||||
384: ('secp', 'r1'),
|
||||
521: ('secp', 'r1')
|
||||
},
|
||||
'PSA_ECC_FAMILY_BRAINPOOL_P_R1': {
|
||||
256: ('bp', 'r1'),
|
||||
384: ('bp', 'r1'),
|
||||
512: ('bp', 'r1')
|
||||
},
|
||||
'PSA_ECC_FAMILY_MONTGOMERY': {
|
||||
255: ('curve', '19'),
|
||||
448: ('curve', '')
|
||||
}
|
||||
}
|
||||
|
||||
def get_ec_curve_name(priv_key: str, bits: int) -> str:
|
||||
ec_family = get_ec_key_family(priv_key)
|
||||
try:
|
||||
prefix = EC_NAME_CONVERSION[ec_family][bits][0]
|
||||
suffix = EC_NAME_CONVERSION[ec_family][bits][1]
|
||||
except KeyError:
|
||||
return ""
|
||||
return prefix + str(bits) + suffix
|
||||
|
||||
def get_look_up_table_entry(key_type: str, group_id_or_keybits: str,
|
||||
priv_array_name: str, pub_array_name: str) -> Iterator[str]:
|
||||
if key_type == "ec":
|
||||
yield " {{ {}, 0,\n".format(group_id_or_keybits)
|
||||
else:
|
||||
yield " {{ 0, {},\n".format(group_id_or_keybits)
|
||||
yield " {0}, sizeof({0}),\n".format(priv_array_name)
|
||||
yield " {0}, sizeof({0}) }},".format(pub_array_name)
|
||||
|
||||
|
||||
def write_output_file(output_file_name: str, arrays: str, look_up_table: str):
|
||||
with open(output_file_name, 'wt') as output:
|
||||
output.write("""\
|
||||
/*********************************************************************************
|
||||
* This file was automatically generated from tests/scripts/generate_test_keys.py.
|
||||
* Please do not edit it manually.
|
||||
*********************************************************************************/
|
||||
""")
|
||||
output.write(arrays)
|
||||
output.write("""
|
||||
struct predefined_key_element {{
|
||||
int group_id; // EC group ID; 0 for RSA keys
|
||||
int keybits; // bits size of RSA key; 0 for EC keys
|
||||
const unsigned char *priv_key;
|
||||
size_t priv_key_len;
|
||||
const unsigned char *pub_key;
|
||||
size_t pub_key_len;
|
||||
}};
|
||||
|
||||
struct predefined_key_element predefined_keys[] = {{
|
||||
{}
|
||||
}};
|
||||
|
||||
/* End of generated file */
|
||||
""".format(look_up_table))
|
||||
|
||||
def collect_keys() -> Tuple[str, str]:
|
||||
""""
|
||||
This function reads key data from ASYMMETRIC_KEY_DATA and, only for the
|
||||
keys supported in legacy ECP/RSA modules, it returns 2 strings:
|
||||
- the 1st contains C arrays declaration of these keys and
|
||||
- the 2nd contains the final look-up table for all these arrays.
|
||||
"""
|
||||
arrays = []
|
||||
look_up_table = []
|
||||
|
||||
# Get a list of private keys only in order to get a single item for every
|
||||
# (key type, key bits) pair. We know that ASYMMETRIC_KEY_DATA
|
||||
# contains also the public counterpart.
|
||||
priv_keys = [key for key in ASYMMETRIC_KEY_DATA if '_KEY_PAIR' in key]
|
||||
priv_keys = sorted(priv_keys)
|
||||
|
||||
for priv_key in priv_keys:
|
||||
key_type = get_key_type(priv_key)
|
||||
# Ignore keys which are not EC or RSA
|
||||
if key_type == "unknown":
|
||||
continue
|
||||
|
||||
pub_key = re.sub('_KEY_PAIR', '_PUBLIC_KEY', priv_key)
|
||||
|
||||
for bits in ASYMMETRIC_KEY_DATA[priv_key]:
|
||||
if key_type == "ec":
|
||||
curve = get_ec_curve_name(priv_key, bits)
|
||||
# Ignore EC curves unsupported in legacy symbols
|
||||
if curve == "":
|
||||
continue
|
||||
# Create output array name
|
||||
if key_type == "rsa":
|
||||
array_name_base = "_".join(["test", key_type, str(bits)])
|
||||
else:
|
||||
array_name_base = "_".join(["test", key_type, curve])
|
||||
array_name_priv = array_name_base + "_priv"
|
||||
array_name_pub = array_name_base + "_pub"
|
||||
# Convert bytearray to C array
|
||||
c_array_priv = convert_der_to_c(array_name_priv, ASYMMETRIC_KEY_DATA[priv_key][bits])
|
||||
c_array_pub = convert_der_to_c(array_name_pub, ASYMMETRIC_KEY_DATA[pub_key][bits])
|
||||
# Write the C array to the output file
|
||||
arrays.append(''.join(["\n", c_array_priv, "\n", c_array_pub, "\n"]))
|
||||
# Update the lookup table
|
||||
if key_type == "ec":
|
||||
group_id_or_keybits = "MBEDTLS_ECP_DP_" + curve.upper()
|
||||
else:
|
||||
group_id_or_keybits = str(bits)
|
||||
look_up_table.append(''.join(get_look_up_table_entry(key_type, group_id_or_keybits,
|
||||
array_name_priv, array_name_pub)))
|
||||
|
||||
return ''.join(arrays), '\n'.join(look_up_table)
|
||||
|
||||
def main() -> None:
|
||||
default_output_path = guess_project_root() + "/tests/src/test_keys.h"
|
||||
|
||||
argparser = argparse.ArgumentParser()
|
||||
argparser.add_argument("--output", help="Output file", default=default_output_path)
|
||||
args = argparser.parse_args()
|
||||
|
||||
output_file = args.output
|
||||
|
||||
arrays, look_up_table = collect_keys()
|
||||
|
||||
write_output_file(output_file, arrays, look_up_table)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
File diff suppressed because it is too large
Load Diff
@ -243,7 +243,7 @@ psa_status_t pk_psa_import_key(const unsigned char *key_data, size_t key_len,
|
||||
/** Setup the provided PK context.
|
||||
*
|
||||
* Predefined keys used for the setup are taken from "test/src/test_keys.h"
|
||||
* which is automatically generated using "tests/scripts/generate_test_keys.py".
|
||||
* which is automatically generated using "framework/scripts/generate_test_keys.py".
|
||||
*
|
||||
* \param pk The PK object to fill. It must have been initialized
|
||||
* (mbedtls_pk_init()), but not setup (mbedtls_pk_setup()).
|
||||
|
Loading…
x
Reference in New Issue
Block a user