From 4b849c18a54c6940e44ee918b18aa1ff8c237ba3 Mon Sep 17 00:00:00 2001 From: Shengqi Chen Date: Wed, 6 Dec 2023 04:01:09 +0800 Subject: [PATCH] module/icp/asm-arm/sha2: enable non-SIMD asm kernels on armv5/6 My merged pull request #15557 fixes compilation of sha2 kernels on arm v5/6. However, the compiler guards only allows sha256/512_armv7_impl to be used when __ARM_ARCH > 6. This patch enables these ASM kernels on all arm architectures. Some compiler guards are adjusted accordingly to avoid the unnecessary compilation of SIMD (e.g., neon, armv8ce) kernels on old architectures. Reviewed-by: Brian Behlendorf Signed-off-by: Shengqi Chen Closes #15623 --- module/icp/algs/sha2/sha256_impl.c | 22 +++++++++++++--------- module/icp/algs/sha2/sha512_impl.c | 19 ++++++++----------- module/icp/asm-arm/sha2/sha256-armv7.S | 8 +++----- module/icp/asm-arm/sha2/sha512-armv7.S | 4 +++- 4 files changed, 27 insertions(+), 26 deletions(-) diff --git a/module/icp/algs/sha2/sha256_impl.c b/module/icp/algs/sha2/sha256_impl.c index 1d758c306f8..95cae07f291 100644 --- a/module/icp/algs/sha2/sha256_impl.c +++ b/module/icp/algs/sha2/sha256_impl.c @@ -118,7 +118,15 @@ const sha256_ops_t sha256_shani_impl = { }; #endif -#elif defined(__aarch64__) || (defined(__arm__) && __ARM_ARCH > 6) +#elif defined(__aarch64__) || defined(__arm__) +extern void zfs_sha256_block_armv7(uint32_t s[8], const void *, size_t); +const sha256_ops_t sha256_armv7_impl = { + .is_supported = sha2_is_supported, + .transform = zfs_sha256_block_armv7, + .name = "armv7" +}; + +#if __ARM_ARCH > 6 static boolean_t sha256_have_neon(void) { return (kfpu_allowed() && zfs_neon_available()); @@ -129,13 +137,6 @@ static boolean_t sha256_have_armv8ce(void) return (kfpu_allowed() && zfs_sha256_available()); } -extern void zfs_sha256_block_armv7(uint32_t s[8], const void *, size_t); -const sha256_ops_t sha256_armv7_impl = { - .is_supported = sha2_is_supported, - .transform = zfs_sha256_block_armv7, - .name = "armv7" -}; - TF(zfs_sha256_block_neon, tf_sha256_neon); const sha256_ops_t sha256_neon_impl = { .is_supported = sha256_have_neon, @@ -149,6 +150,7 @@ const sha256_ops_t sha256_armv8_impl = { .transform = tf_sha256_armv8ce, .name = "armv8-ce" }; +#endif #elif defined(__PPC64__) static boolean_t sha256_have_isa207(void) @@ -192,11 +194,13 @@ static const sha256_ops_t *const sha256_impls[] = { #if defined(__x86_64) && defined(HAVE_SSE4_1) &sha256_shani_impl, #endif -#if defined(__aarch64__) || (defined(__arm__) && __ARM_ARCH > 6) +#if defined(__aarch64__) || defined(__arm__) &sha256_armv7_impl, +#if __ARM_ARCH > 6 &sha256_neon_impl, &sha256_armv8_impl, #endif +#endif #if defined(__PPC64__) &sha256_ppc_impl, &sha256_power8_impl, diff --git a/module/icp/algs/sha2/sha512_impl.c b/module/icp/algs/sha2/sha512_impl.c index 6d8af921466..05ab220a1a8 100644 --- a/module/icp/algs/sha2/sha512_impl.c +++ b/module/icp/algs/sha2/sha512_impl.c @@ -88,7 +88,7 @@ const sha512_ops_t sha512_avx2_impl = { }; #endif -#elif defined(__aarch64__) +#elif defined(__aarch64__) || defined(__arm__) extern void zfs_sha512_block_armv7(uint64_t s[8], const void *, size_t); const sha512_ops_t sha512_armv7_impl = { .is_supported = sha2_is_supported, @@ -96,6 +96,7 @@ const sha512_ops_t sha512_armv7_impl = { .name = "armv7" }; +#if defined(__aarch64__) static boolean_t sha512_have_armv8ce(void) { return (kfpu_allowed() && zfs_sha512_available()); @@ -107,15 +108,9 @@ const sha512_ops_t sha512_armv8_impl = { .transform = tf_sha512_armv8ce, .name = "armv8-ce" }; +#endif -#elif defined(__arm__) && __ARM_ARCH > 6 -extern void zfs_sha512_block_armv7(uint64_t s[8], const void *, size_t); -const sha512_ops_t sha512_armv7_impl = { - .is_supported = sha2_is_supported, - .transform = zfs_sha512_block_armv7, - .name = "armv7" -}; - +#if defined(__arm__) && __ARM_ARCH > 6 static boolean_t sha512_have_neon(void) { return (kfpu_allowed() && zfs_neon_available()); @@ -127,6 +122,7 @@ const sha512_ops_t sha512_neon_impl = { .transform = tf_sha512_neon, .name = "neon" }; +#endif #elif defined(__PPC64__) TF(zfs_sha512_ppc, tf_sha512_ppc); @@ -164,14 +160,15 @@ static const sha512_ops_t *const sha512_impls[] = { #if defined(__x86_64) && defined(HAVE_AVX2) &sha512_avx2_impl, #endif -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(__arm__) &sha512_armv7_impl, +#if defined(__aarch64__) &sha512_armv8_impl, #endif #if defined(__arm__) && __ARM_ARCH > 6 - &sha512_armv7_impl, &sha512_neon_impl, #endif +#endif #if defined(__PPC64__) &sha512_ppc_impl, &sha512_power8_impl, diff --git a/module/icp/asm-arm/sha2/sha256-armv7.S b/module/icp/asm-arm/sha2/sha256-armv7.S index 190dbabc5ec..3ae66626df3 100644 --- a/module/icp/asm-arm/sha2/sha256-armv7.S +++ b/module/icp/asm-arm/sha2/sha256-armv7.S @@ -1837,6 +1837,7 @@ zfs_sha256_block_armv7: #endif .size zfs_sha256_block_armv7,.-zfs_sha256_block_armv7 +#if __ARM_ARCH__ >= 7 .arch armv7-a .fpu neon @@ -1849,11 +1850,7 @@ zfs_sha256_block_neon: stmdb sp!,{r4-r12,lr} sub r11,sp,#16*4+16 -#if __ARM_ARCH__ >=7 adr r14,K256 -#else - ldr r14,=K256 -#endif bic r11,r11,#15 @ align for 128-bit stores mov r12,sp mov sp,r11 @ alloca @@ -2773,4 +2770,5 @@ zfs_sha256_block_armv8: bx lr @ bx lr .size zfs_sha256_block_armv8,.-zfs_sha256_block_armv8 -#endif +#endif // #if __ARM_ARCH__ >= 7 +#endif // #if defined(__arm__) diff --git a/module/icp/asm-arm/sha2/sha512-armv7.S b/module/icp/asm-arm/sha2/sha512-armv7.S index 499cb6df956..66d7dd3cf0f 100644 --- a/module/icp/asm-arm/sha2/sha512-armv7.S +++ b/module/icp/asm-arm/sha2/sha512-armv7.S @@ -493,6 +493,7 @@ zfs_sha512_block_armv7: #endif .size zfs_sha512_block_armv7,.-zfs_sha512_block_armv7 +#if __ARM_ARCH__ >= 7 .arch armv7-a .fpu neon @@ -1822,4 +1823,5 @@ zfs_sha512_block_neon: VFP_ABI_POP bx lr @ .word 0xe12fff1e .size zfs_sha512_block_neon,.-zfs_sha512_block_neon -#endif +#endif // #if __ARM_ARCH__ >= 7 +#endif // #if defined(__arm__)