From dce883beb46900b8b30e810096833f16b3ea2551 Mon Sep 17 00:00:00 2001 From: Andy Polyakov Date: Fri, 28 Jul 2023 16:50:00 +0200 Subject: [PATCH] Execute build/refresh.sh. --- build/coff/ct_inverse_mod_256-armv8.S | 4 ++-- build/coff/ct_inverse_mod_384-armv8.S | 4 ++-- build/coff/div3w-armv8.S | 2 +- build/coff/sha256-armv8.S | 2 +- build/elf/ct_inverse_mod_256-armv8.S | 4 ++-- build/elf/ct_inverse_mod_384-armv8.S | 4 ++-- build/elf/div3w-armv8.S | 2 +- build/elf/sha256-armv8.S | 2 +- build/mach-o/ct_inverse_mod_256-armv8.S | 4 ++-- build/mach-o/ct_inverse_mod_384-armv8.S | 4 ++-- build/mach-o/div3w-armv8.S | 2 +- build/mach-o/sha256-armv8.S | 2 +- build/win64/ct_inverse_mod_256-armv8.asm | 4 ++-- build/win64/ct_inverse_mod_384-armv8.asm | 4 ++-- build/win64/div3w-armv8.asm | 2 +- build/win64/sha256-armv8.asm | 2 +- 16 files changed, 24 insertions(+), 24 deletions(-) diff --git a/build/coff/ct_inverse_mod_256-armv8.S b/build/coff/ct_inverse_mod_256-armv8.S index 17c3d252..b46cbe1d 100644 --- a/build/coff/ct_inverse_mod_256-armv8.S +++ b/build/coff/ct_inverse_mod_256-armv8.S @@ -62,14 +62,14 @@ ct_inverse_mod_256: madd x4, x16, x8, xzr // |u|*|f0| madd x4, x17, x9, x4 // |v|*|g0| str x4, [x0,#8*4] - asr x5, x4, #63 // sign extenstion + asr x5, x4, #63 // sign extension stp x5, x5, [x0,#8*5] stp x5, x5, [x0,#8*7] madd x4, x12, x8, xzr // |u|*|f1| madd x4, x13, x9, x4 // |v|*|g1| str x4, [x0,#8*9] - asr x5, x4, #63 // sign extenstion + asr x5, x4, #63 // sign extension stp x5, x5, [x0,#8*10] stp x5, x5, [x0,#8*12] eor x1, x1, #256 // flip-flop src |a|b|u|v| diff --git a/build/coff/ct_inverse_mod_384-armv8.S b/build/coff/ct_inverse_mod_384-armv8.S index 65193f1e..7ce7a8a1 100644 --- a/build/coff/ct_inverse_mod_384-armv8.S +++ b/build/coff/ct_inverse_mod_384-armv8.S @@ -73,7 +73,7 @@ ct_inverse_mod_383: adds x3, x3, x5 adc x4, x4, x6 stp x3, x4, [x0,#8*6] - asr x5, x4, #63 // sign extenstion + asr x5, x4, #63 // sign extension stp x5, x5, [x0,#8*8] stp x5, x5, [x0,#8*10] @@ -84,7 +84,7 @@ ct_inverse_mod_383: adds x3, x3, x5 adc x4, x4, x6 stp x3, x4, [x0,#8*12] - asr x5, x4, #63 // sign extenstion + asr x5, x4, #63 // sign extension stp x5, x5, [x0,#8*14] stp x5, x5, [x0,#8*16] eor x1, x1, #256 // flip-flop src |a|b|u|v| diff --git a/build/coff/div3w-armv8.S b/build/coff/div3w-armv8.S index c17b9e38..2e5d7045 100644 --- a/build/coff/div3w-armv8.S +++ b/build/coff/div3w-armv8.S @@ -27,7 +27,7 @@ div_3_limbs: asr x3,x0,#63 // top bit -> mask add x0,x0,x0 // Q <<= 1 subs x6,x4,x1 // R - D - add x0,x0,#1 // Q + specilative bit + add x0,x0,#1 // Q + speculative bit sbcs x7,x5,x2 sbc x0,x0,xzr // subtract speculative bit diff --git a/build/coff/sha256-armv8.S b/build/coff/sha256-armv8.S index 49309edb..a4cd8090 100644 --- a/build/coff/sha256-armv8.S +++ b/build/coff/sha256-armv8.S @@ -10,7 +10,7 @@ // // sha256_block procedure for ARMv8. // -// This module is stripped of scalar code paths, with raionale that all +// This module is stripped of scalar code paths, with rationale that all // known processors are NEON-capable. // // See original module at CRYPTOGAMS for further details. diff --git a/build/elf/ct_inverse_mod_256-armv8.S b/build/elf/ct_inverse_mod_256-armv8.S index 347eb315..4a54d5f6 100644 --- a/build/elf/ct_inverse_mod_256-armv8.S +++ b/build/elf/ct_inverse_mod_256-armv8.S @@ -60,14 +60,14 @@ ct_inverse_mod_256: madd x4, x16, x8, xzr // |u|*|f0| madd x4, x17, x9, x4 // |v|*|g0| str x4, [x0,#8*4] - asr x5, x4, #63 // sign extenstion + asr x5, x4, #63 // sign extension stp x5, x5, [x0,#8*5] stp x5, x5, [x0,#8*7] madd x4, x12, x8, xzr // |u|*|f1| madd x4, x13, x9, x4 // |v|*|g1| str x4, [x0,#8*9] - asr x5, x4, #63 // sign extenstion + asr x5, x4, #63 // sign extension stp x5, x5, [x0,#8*10] stp x5, x5, [x0,#8*12] eor x1, x1, #256 // flip-flop src |a|b|u|v| diff --git a/build/elf/ct_inverse_mod_384-armv8.S b/build/elf/ct_inverse_mod_384-armv8.S index d7eca170..1702e3f2 100644 --- a/build/elf/ct_inverse_mod_384-armv8.S +++ b/build/elf/ct_inverse_mod_384-armv8.S @@ -71,7 +71,7 @@ ct_inverse_mod_383: adds x3, x3, x5 adc x4, x4, x6 stp x3, x4, [x0,#8*6] - asr x5, x4, #63 // sign extenstion + asr x5, x4, #63 // sign extension stp x5, x5, [x0,#8*8] stp x5, x5, [x0,#8*10] @@ -82,7 +82,7 @@ ct_inverse_mod_383: adds x3, x3, x5 adc x4, x4, x6 stp x3, x4, [x0,#8*12] - asr x5, x4, #63 // sign extenstion + asr x5, x4, #63 // sign extension stp x5, x5, [x0,#8*14] stp x5, x5, [x0,#8*16] eor x1, x1, #256 // flip-flop src |a|b|u|v| diff --git a/build/elf/div3w-armv8.S b/build/elf/div3w-armv8.S index a2b1d676..37621bee 100644 --- a/build/elf/div3w-armv8.S +++ b/build/elf/div3w-armv8.S @@ -25,7 +25,7 @@ div_3_limbs: asr x3,x0,#63 // top bit -> mask add x0,x0,x0 // Q <<= 1 subs x6,x4,x1 // R - D - add x0,x0,#1 // Q + specilative bit + add x0,x0,#1 // Q + speculative bit sbcs x7,x5,x2 sbc x0,x0,xzr // subtract speculative bit diff --git a/build/elf/sha256-armv8.S b/build/elf/sha256-armv8.S index 6c000391..45c1162c 100644 --- a/build/elf/sha256-armv8.S +++ b/build/elf/sha256-armv8.S @@ -10,7 +10,7 @@ // // sha256_block procedure for ARMv8. // -// This module is stripped of scalar code paths, with raionale that all +// This module is stripped of scalar code paths, with rationale that all // known processors are NEON-capable. // // See original module at CRYPTOGAMS for further details. diff --git a/build/mach-o/ct_inverse_mod_256-armv8.S b/build/mach-o/ct_inverse_mod_256-armv8.S index f3a2c3b5..9723d7ea 100644 --- a/build/mach-o/ct_inverse_mod_256-armv8.S +++ b/build/mach-o/ct_inverse_mod_256-armv8.S @@ -60,14 +60,14 @@ _ct_inverse_mod_256: madd x4, x16, x8, xzr // |u|*|f0| madd x4, x17, x9, x4 // |v|*|g0| str x4, [x0,#8*4] - asr x5, x4, #63 // sign extenstion + asr x5, x4, #63 // sign extension stp x5, x5, [x0,#8*5] stp x5, x5, [x0,#8*7] madd x4, x12, x8, xzr // |u|*|f1| madd x4, x13, x9, x4 // |v|*|g1| str x4, [x0,#8*9] - asr x5, x4, #63 // sign extenstion + asr x5, x4, #63 // sign extension stp x5, x5, [x0,#8*10] stp x5, x5, [x0,#8*12] eor x1, x1, #256 // flip-flop src |a|b|u|v| diff --git a/build/mach-o/ct_inverse_mod_384-armv8.S b/build/mach-o/ct_inverse_mod_384-armv8.S index c7d9ba84..c8a58cda 100644 --- a/build/mach-o/ct_inverse_mod_384-armv8.S +++ b/build/mach-o/ct_inverse_mod_384-armv8.S @@ -71,7 +71,7 @@ _ct_inverse_mod_383: adds x3, x3, x5 adc x4, x4, x6 stp x3, x4, [x0,#8*6] - asr x5, x4, #63 // sign extenstion + asr x5, x4, #63 // sign extension stp x5, x5, [x0,#8*8] stp x5, x5, [x0,#8*10] @@ -82,7 +82,7 @@ _ct_inverse_mod_383: adds x3, x3, x5 adc x4, x4, x6 stp x3, x4, [x0,#8*12] - asr x5, x4, #63 // sign extenstion + asr x5, x4, #63 // sign extension stp x5, x5, [x0,#8*14] stp x5, x5, [x0,#8*16] eor x1, x1, #256 // flip-flop src |a|b|u|v| diff --git a/build/mach-o/div3w-armv8.S b/build/mach-o/div3w-armv8.S index 5a5eb3a0..4b130080 100644 --- a/build/mach-o/div3w-armv8.S +++ b/build/mach-o/div3w-armv8.S @@ -25,7 +25,7 @@ Loop: asr x3,x0,#63 // top bit -> mask add x0,x0,x0 // Q <<= 1 subs x6,x4,x1 // R - D - add x0,x0,#1 // Q + specilative bit + add x0,x0,#1 // Q + speculative bit sbcs x7,x5,x2 sbc x0,x0,xzr // subtract speculative bit diff --git a/build/mach-o/sha256-armv8.S b/build/mach-o/sha256-armv8.S index 854e90d8..3f3c1266 100644 --- a/build/mach-o/sha256-armv8.S +++ b/build/mach-o/sha256-armv8.S @@ -10,7 +10,7 @@ // // sha256_block procedure for ARMv8. // -// This module is stripped of scalar code paths, with raionale that all +// This module is stripped of scalar code paths, with rationale that all // known processors are NEON-capable. // // See original module at CRYPTOGAMS for further details. diff --git a/build/win64/ct_inverse_mod_256-armv8.asm b/build/win64/ct_inverse_mod_256-armv8.asm index f3c2f0d0..6e476749 100644 --- a/build/win64/ct_inverse_mod_256-armv8.asm +++ b/build/win64/ct_inverse_mod_256-armv8.asm @@ -60,14 +60,14 @@ madd x4, x16, x8, xzr // |u|*|f0| madd x4, x17, x9, x4 // |v|*|g0| str x4, [x0,#8*4] - asr x5, x4, #63 // sign extenstion + asr x5, x4, #63 // sign extension stp x5, x5, [x0,#8*5] stp x5, x5, [x0,#8*7] madd x4, x12, x8, xzr // |u|*|f1| madd x4, x13, x9, x4 // |v|*|g1| str x4, [x0,#8*9] - asr x5, x4, #63 // sign extenstion + asr x5, x4, #63 // sign extension stp x5, x5, [x0,#8*10] stp x5, x5, [x0,#8*12] eor x1, x1, #256 // flip-flop src |a|b|u|v| diff --git a/build/win64/ct_inverse_mod_384-armv8.asm b/build/win64/ct_inverse_mod_384-armv8.asm index 4ab12e05..d2fbc192 100644 --- a/build/win64/ct_inverse_mod_384-armv8.asm +++ b/build/win64/ct_inverse_mod_384-armv8.asm @@ -71,7 +71,7 @@ adds x3, x3, x5 adc x4, x4, x6 stp x3, x4, [x0,#8*6] - asr x5, x4, #63 // sign extenstion + asr x5, x4, #63 // sign extension stp x5, x5, [x0,#8*8] stp x5, x5, [x0,#8*10] @@ -82,7 +82,7 @@ adds x3, x3, x5 adc x4, x4, x6 stp x3, x4, [x0,#8*12] - asr x5, x4, #63 // sign extenstion + asr x5, x4, #63 // sign extension stp x5, x5, [x0,#8*14] stp x5, x5, [x0,#8*16] eor x1, x1, #256 // flip-flop src |a|b|u|v| diff --git a/build/win64/div3w-armv8.asm b/build/win64/div3w-armv8.asm index 7114ccf0..aec90679 100644 --- a/build/win64/div3w-armv8.asm +++ b/build/win64/div3w-armv8.asm @@ -25,7 +25,7 @@ asr x3,x0,#63 // top bit -> mask add x0,x0,x0 // Q <<= 1 subs x6,x4,x1 // R - D - add x0,x0,#1 // Q + specilative bit + add x0,x0,#1 // Q + speculative bit sbcs x7,x5,x2 sbc x0,x0,xzr // subtract speculative bit diff --git a/build/win64/sha256-armv8.asm b/build/win64/sha256-armv8.asm index 3f907ca2..31e74219 100644 --- a/build/win64/sha256-armv8.asm +++ b/build/win64/sha256-armv8.asm @@ -10,7 +10,7 @@ // // sha256_block procedure for ARMv8. // -// This module is stripped of scalar code paths, with raionale that all +// This module is stripped of scalar code paths, with rationale that all // known processors are NEON-capable. // // See original module at CRYPTOGAMS for further details.