From b9cd19bc8c6196511eb1d10094c62e58e5da6eed Mon Sep 17 00:00:00 2001 From: Dave Rodgman Date: Fri, 30 Dec 2022 21:32:03 +0000 Subject: [PATCH] Prevent perf regressions in mbedtls_xor Signed-off-by: Dave Rodgman --- library/alignment.h | 13 +++++++++++++ library/common.h | 6 ++++-- library/constant_time.c | 9 --------- 3 files changed, 17 insertions(+), 11 deletions(-) diff --git a/library/alignment.h b/library/alignment.h index bfc965eae1..aa4c430b96 100644 --- a/library/alignment.h +++ b/library/alignment.h @@ -29,6 +29,19 @@ #include "mbedtls/build_info.h" +/* + * Define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS for architectures where unaligned memory + * accesses are known to be safe and efficient. + */ +#if defined(__ARM_FEATURE_UNALIGNED) \ + || defined(__i386__) || defined(__amd64__) || defined(__x86_64__) +/* + * __ARM_FEATURE_UNALIGNED is defined where appropriate by armcc, gcc 7, clang 9 + * (and later versions); all x86 platforms should have efficient unaligned access. + */ +#define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS +#endif + /** * Read the unsigned 16 bits integer from the given address, which need not * be aligned. diff --git a/library/common.h b/library/common.h index fd3ddba48c..ae6625b9e5 100644 --- a/library/common.h +++ b/library/common.h @@ -122,11 +122,13 @@ static inline const unsigned char *mbedtls_buffer_offset_const( */ inline void mbedtls_xor(unsigned char *r, const unsigned char *a, const unsigned char *b, size_t n) { - size_t i; - for (i = 0; (i + 4) <= n; i += 4) { + size_t i = 0; +#if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) + for (; (i + 4) <= n; i += 4) { uint32_t x = mbedtls_get_unaligned_uint32(a + i) ^ mbedtls_get_unaligned_uint32(b + i); mbedtls_put_unaligned_uint32(r + i, x); } +#endif for (; i < n; i++) { r[i] = a[i] ^ b[i]; } diff --git a/library/constant_time.c b/library/constant_time.c index 309e11cd2b..89778d53ca 100644 --- a/library/constant_time.c +++ b/library/constant_time.c @@ -47,15 +47,6 @@ #include -/* - * Define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS for architectures where unaligned memory - * accesses are known to be safe and efficient. - */ -#if defined(__ARM_FEATURE_UNALIGNED) -/* __ARM_FEATURE_UNALIGNED is defined by armcc, gcc 7, clang 9 and later versions */ -#define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS -#endif - /* * Define MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS where assembly is present to * perform fast unaligned access to volatile data.