From 440150006064295038d4d7fcb7fe2796bf1672df Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Wed, 13 May 2015 17:31:47 -0500 Subject: [PATCH] Add constant-time multiply `secp256k1_ecmult_const` for ECDH Designed with clear separation of the wNAF conversion, precomputation and exponentiation (since the precomp at least we will probably want to separate in the API for users who reuse points a lot. Future work: - actually separate precomp in the API - do multiexp rather than single exponentiation --- Makefile.am | 2 + src/ecmult_const.h | 15 ++++ src/ecmult_const_impl.h | 139 ++++++++++++++++++++++++++++++++ src/group.h | 4 + src/group_impl.h | 5 ++ src/scalar.h | 11 +++ src/scalar_4x64_impl.h | 28 +++++++ src/scalar_8x32_impl.h | 41 ++++++++++ src/scalar_impl.h | 5 ++ src/secp256k1.c | 1 + src/tests.c | 174 +++++++++++++++++++++++++++++++++++++++- 11 files changed, 424 insertions(+), 1 deletion(-) create mode 100644 src/ecmult_const.h create mode 100644 src/ecmult_const_impl.h diff --git a/Makefile.am b/Makefile.am index 2609c7feed..8c8bd77367 100644 --- a/Makefile.am +++ b/Makefile.am @@ -19,6 +19,8 @@ noinst_HEADERS += src/eckey.h noinst_HEADERS += src/eckey_impl.h noinst_HEADERS += src/ecmult.h noinst_HEADERS += src/ecmult_impl.h +noinst_HEADERS += src/ecmult_const.h +noinst_HEADERS += src/ecmult_const_impl.h noinst_HEADERS += src/ecmult_gen.h noinst_HEADERS += src/ecmult_gen_impl.h noinst_HEADERS += src/num.h diff --git a/src/ecmult_const.h b/src/ecmult_const.h new file mode 100644 index 0000000000..e8ff1feb8c --- /dev/null +++ b/src/ecmult_const.h @@ -0,0 +1,15 @@ +/********************************************************************** + * Copyright (c) 2015 Andrew Poelstra * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef _SECP256K1_ECMULT_CONST_ +#define _SECP256K1_ECMULT_CONST_ + +#include "scalar.h" +#include "group.h" + +static void secp256k1_ecmult_const(secp256k1_gej_t *r, const secp256k1_ge_t *a, const secp256k1_scalar_t *q); + +#endif diff --git a/src/ecmult_const_impl.h b/src/ecmult_const_impl.h new file mode 100644 index 0000000000..956d5e4c0f --- /dev/null +++ b/src/ecmult_const_impl.h @@ -0,0 +1,139 @@ +/********************************************************************** + * Copyright (c) 2015 Pieter Wuille, Andrew Poelstra * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef _SECP256K1_ECMULT_CONST_IMPL_ +#define _SECP256K1_ECMULT_CONST_IMPL_ + +#include "scalar.h" +#include "group.h" +#include "ecmult_const.h" +#include "ecmult_impl.h" + +#define WNAF_BITS 256 +#define WNAF_SIZE(w) ((WNAF_BITS + (w) - 1) / (w)) + +/* This is like `ECMULT_TABLE_GET_GE` but is constant time */ +#define ECMULT_CONST_TABLE_GET_GE(r,pre,n,w) do { \ + int m; \ + int abs_n = (n) * (((n) > 0) * 2 - 1); \ + secp256k1_fe_t neg_y; \ + VERIFY_CHECK(((n) & 1) == 1); \ + VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ + VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ + for (m = 1; m < (1 << ((w) - 1)); m += 2) { \ + /* This loop is used to avoid secret data in array indices. See + * the comment in ecmult_gen_impl.h for rationale. */ \ + secp256k1_fe_cmov(&(r)->x, &(pre)[(m - 1) / 2].x, m == abs_n); \ + secp256k1_fe_cmov(&(r)->y, &(pre)[(m - 1) / 2].y, m == abs_n); \ + } \ + (r)->infinity = 0; \ + secp256k1_fe_normalize_weak(&(r)->x); \ + secp256k1_fe_normalize_weak(&(r)->y); \ + secp256k1_fe_negate(&neg_y, &(r)->y, 1); \ + secp256k1_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \ +} while(0) + + +/** Convert a number to WNAF notation. The number becomes represented by sum(2^{wi} * wnaf[i], i=0..return_val) + * with the following guarantees: + * - each wnaf[i] an odd integer between -(1 << w) and (1 << w) + * - each wnaf[i] is nonzero + * - the number of words set is returned; this is always (WNAF_BITS + w - 1) / w + * + * Adapted from `The Width-w NAF Method Provides Small Memory and Fast Elliptic Scalar + * Multiplications Secure against Side Channel Attacks`, Okeya and Tagaki. M. Joye (Ed.) + * CT-RSA 2003, LNCS 2612, pp. 328-443, 2003. Springer-Verlagy Berlin Heidelberg 2003 + * + * Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335 + */ +static void secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar_t *a, int w) { + secp256k1_scalar_t s = *a; + /* Negate to force oddness */ + int is_even = secp256k1_scalar_is_even(&s); + int global_sign = secp256k1_scalar_cond_negate(&s, is_even); + + int word = 0; + /* 1 2 3 */ + int u_last = secp256k1_scalar_shr_int(&s, w); + int u; + /* 4 */ + while (word * w < WNAF_BITS) { + int sign; + int even; + + /* 4.1 4.4 */ + u = secp256k1_scalar_shr_int(&s, w); + /* 4.2 */ + even = ((u & 1) == 0); + sign = 2 * (u_last > 0) - 1; + u += sign * even; + u_last -= sign * even * (1 << w); + + /* 4.3, adapted for global sign change */ + wnaf[word++] = u_last * global_sign; + + u_last = u; + } + wnaf[word] = u * global_sign; + + VERIFY_CHECK(secp256k1_scalar_is_zero(&s)); + VERIFY_CHECK(word == WNAF_SIZE(w)); +} + + +static void secp256k1_ecmult_const(secp256k1_gej_t *r, const secp256k1_ge_t *a, const secp256k1_scalar_t *scalar) { + secp256k1_ge_t pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; + secp256k1_ge_t tmpa; + secp256k1_fe_t Z; + + int wnaf[1 + WNAF_SIZE(WINDOW_A - 1)]; + + int i; + int is_zero = secp256k1_scalar_is_zero(scalar); + secp256k1_scalar_t sc = *scalar; + /* the wNAF ladder cannot handle zero, so bump this to one .. we will + * correct the result after the fact */ + sc.d[0] += is_zero; + + /* build wnaf representation for q. */ + secp256k1_wnaf_const(wnaf, &sc, WINDOW_A - 1); + + /* Calculate odd multiples of a. + * All multiples are brought to the same Z 'denominator', which is stored + * in Z. Due to secp256k1' isomorphism we can do all operations pretending + * that the Z coordinate was 1, use affine addition formulae, and correct + * the Z coordinate of the result once at the end. + */ + secp256k1_gej_set_ge(r, a); + secp256k1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r); + + /* first loop iteration (separated out so we can directly set r, rather + * than having it start at infinity, get doubled several times, then have + * its new value added to it) */ + i = wnaf[WNAF_SIZE(WINDOW_A - 1)]; + VERIFY_CHECK(i != 0); + ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A); + secp256k1_gej_set_ge(r, &tmpa); + /* remaining loop iterations */ + for (i = WNAF_SIZE(WINDOW_A - 1) - 1; i >= 0; i--) { + int n; + int j; + for (j = 0; j < WINDOW_A - 1; ++j) { + secp256k1_gej_double_nonzero(r, r, NULL); + } + n = wnaf[i]; + VERIFY_CHECK(n != 0); + ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A); + secp256k1_gej_add_ge(r, r, &tmpa); + } + + secp256k1_fe_mul(&r->z, &r->z, &Z); + + /* correct for zero */ + r->infinity |= is_zero; +} + +#endif diff --git a/src/group.h b/src/group.h index 9203bdf7eb..48398918e2 100644 --- a/src/group.h +++ b/src/group.h @@ -94,6 +94,10 @@ static void secp256k1_gej_neg(secp256k1_gej_t *r, const secp256k1_gej_t *a); /** Check whether a group element is the point at infinity. */ static int secp256k1_gej_is_infinity(const secp256k1_gej_t *a); +/** Set r equal to the double of a. If rzr is not-NULL, r->z = a->z * *rzr (where infinity means an implicit z = 0). + * a may not be zero. Constant time. */ +static void secp256k1_gej_double_nonzero(secp256k1_gej_t *r, const secp256k1_gej_t *a, secp256k1_fe_t *rzr); + /** Set r equal to the double of a. If rzr is not-NULL, r->z = a->z * *rzr (where infinity means an implicit z = 0). */ static void secp256k1_gej_double_var(secp256k1_gej_t *r, const secp256k1_gej_t *a, secp256k1_fe_t *rzr); diff --git a/src/group_impl.h b/src/group_impl.h index 008bbac315..165eba2a52 100644 --- a/src/group_impl.h +++ b/src/group_impl.h @@ -301,6 +301,11 @@ static void secp256k1_gej_double_var(secp256k1_gej_t *r, const secp256k1_gej_t * secp256k1_fe_add(&r->y, &t2); /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */ } +static SECP256K1_INLINE void secp256k1_gej_double_nonzero(secp256k1_gej_t *r, const secp256k1_gej_t *a, secp256k1_fe_t *rzr) { + VERIFY_CHECK(!secp256k1_gej_is_infinity(a)); + secp256k1_gej_double_var(r, a, rzr); +} + static void secp256k1_gej_add_var(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_gej_t *b, secp256k1_fe_t *rzr) { /* Operations: 12 mul, 4 sqr, 2 normalize, 12 mul_int/add/negate */ secp256k1_fe_t z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; diff --git a/src/scalar.h b/src/scalar.h index f5d09f8d47..f33e72a7c9 100644 --- a/src/scalar.h +++ b/src/scalar.h @@ -48,6 +48,10 @@ static void secp256k1_scalar_add_bit(secp256k1_scalar_t *r, unsigned int bit); /** Multiply two scalars (modulo the group order). */ static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b); +/** Shift a scalar right by some amount strictly between 0 and 16, returning + * the low bits that were shifted off */ +static int secp256k1_scalar_shr_int(secp256k1_scalar_t *r, int n); + /** Compute the square of a scalar (modulo the group order). */ static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a); @@ -66,9 +70,16 @@ static int secp256k1_scalar_is_zero(const secp256k1_scalar_t *a); /** Check whether a scalar equals one. */ static int secp256k1_scalar_is_one(const secp256k1_scalar_t *a); +/** Check whether a scalar, considered as an nonnegative integer, is even. */ +static int secp256k1_scalar_is_even(const secp256k1_scalar_t *a); + /** Check whether a scalar is higher than the group order divided by 2. */ static int secp256k1_scalar_is_high(const secp256k1_scalar_t *a); +/** Conditionally negate a number, in constant time. + * Returns -1 if the number was negated, 1 otherwise */ +static int secp256k1_scalar_cond_negate(secp256k1_scalar_t *a, int flag); + #ifndef USE_NUM_NONE /** Convert a scalar to a number. */ static void secp256k1_scalar_get_num(secp256k1_num_t *r, const secp256k1_scalar_t *a); diff --git a/src/scalar_4x64_impl.h b/src/scalar_4x64_impl.h index ff365292f8..147229dab9 100644 --- a/src/scalar_4x64_impl.h +++ b/src/scalar_4x64_impl.h @@ -164,6 +164,22 @@ static int secp256k1_scalar_is_high(const secp256k1_scalar_t *a) { return yes; } +static int secp256k1_scalar_cond_negate(secp256k1_scalar_t *r, int flag) { + /* If we are flag = 0, mask = 00...00 and this is a no-op; + * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */ + uint64_t mask = !flag - 1; + uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1; + uint128_t t = (uint128_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask); + r->d[0] = t & nonzero; t >>= 64; + t += (uint128_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask); + r->d[1] = t & nonzero; t >>= 64; + t += (uint128_t)(r->d[2] ^ mask) + (SECP256K1_N_2 & mask); + r->d[2] = t & nonzero; t >>= 64; + t += (uint128_t)(r->d[3] ^ mask) + (SECP256K1_N_3 & mask); + r->d[3] = t & nonzero; + return 2 * (mask == 0) - 1; +} + /* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */ /** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ @@ -877,6 +893,18 @@ static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t secp256k1_scalar_reduce_512(r, l); } +static int secp256k1_scalar_shr_int(secp256k1_scalar_t *r, int n) { + int ret; + VERIFY_CHECK(n > 0); + VERIFY_CHECK(n < 16); + ret = r->d[0] & ((1 << n) - 1); + r->d[0] = (r->d[0] >> n) + (r->d[1] << (64 - n)); + r->d[1] = (r->d[1] >> n) + (r->d[2] << (64 - n)); + r->d[2] = (r->d[2] >> n) + (r->d[3] << (64 - n)); + r->d[3] = (r->d[3] >> n); + return ret; +} + static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) { uint64_t l[8]; secp256k1_scalar_sqr_512(l, a); diff --git a/src/scalar_8x32_impl.h b/src/scalar_8x32_impl.h index 22b31d4112..0ad2423db0 100644 --- a/src/scalar_8x32_impl.h +++ b/src/scalar_8x32_impl.h @@ -234,6 +234,31 @@ static int secp256k1_scalar_is_high(const secp256k1_scalar_t *a) { return yes; } +static int secp256k1_scalar_cond_negate(secp256k1_scalar_t *r, int flag) { + /* If we are flag = 0, mask = 00...00 and this is a no-op; + * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */ + uint32_t mask = !flag - 1; + uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(r) == 0); + uint64_t t = (uint64_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask); + r->d[0] = t & nonzero; t >>= 32; + t += (uint64_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask); + r->d[1] = t & nonzero; t >>= 32; + t += (uint64_t)(r->d[2] ^ mask) + (SECP256K1_N_2 & mask); + r->d[2] = t & nonzero; t >>= 32; + t += (uint64_t)(r->d[3] ^ mask) + (SECP256K1_N_3 & mask); + r->d[3] = t & nonzero; t >>= 32; + t += (uint64_t)(r->d[4] ^ mask) + (SECP256K1_N_4 & mask); + r->d[4] = t & nonzero; t >>= 32; + t += (uint64_t)(r->d[5] ^ mask) + (SECP256K1_N_5 & mask); + r->d[5] = t & nonzero; t >>= 32; + t += (uint64_t)(r->d[6] ^ mask) + (SECP256K1_N_6 & mask); + r->d[6] = t & nonzero; t >>= 32; + t += (uint64_t)(r->d[7] ^ mask) + (SECP256K1_N_7 & mask); + r->d[7] = t & nonzero; + return 2 * (mask == 0) - 1; +} + + /* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */ /** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ @@ -624,6 +649,22 @@ static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t secp256k1_scalar_reduce_512(r, l); } +static int secp256k1_scalar_shr_int(secp256k1_scalar_t *r, int n) { + int ret; + VERIFY_CHECK(n > 0); + VERIFY_CHECK(n < 16); + ret = r->d[0] & ((1 << n) - 1); + r->d[0] = (r->d[0] >> n) + (r->d[1] << (32 - n)); + r->d[1] = (r->d[1] >> n) + (r->d[2] << (32 - n)); + r->d[2] = (r->d[2] >> n) + (r->d[3] << (32 - n)); + r->d[3] = (r->d[3] >> n) + (r->d[4] << (32 - n)); + r->d[4] = (r->d[4] >> n) + (r->d[5] << (32 - n)); + r->d[5] = (r->d[5] >> n) + (r->d[6] << (32 - n)); + r->d[6] = (r->d[6] >> n) + (r->d[7] << (32 - n)); + r->d[7] = (r->d[7] >> n); + return ret; +} + static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) { uint32_t l[16]; secp256k1_scalar_sqr_512(l, a); diff --git a/src/scalar_impl.h b/src/scalar_impl.h index abd777bd2c..d5426a8c37 100644 --- a/src/scalar_impl.h +++ b/src/scalar_impl.h @@ -234,6 +234,11 @@ static void secp256k1_scalar_inverse(secp256k1_scalar_t *r, const secp256k1_scal secp256k1_scalar_mul(r, t, &x6); /* 111111 */ } +SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar_t *a) { + /* d[0] is present and is the lowest word for all representations */ + return !(a->d[0] & 1); +} + static void secp256k1_scalar_inverse_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *x) { #if defined(USE_SCALAR_INV_BUILTIN) secp256k1_scalar_inverse(r, x); diff --git a/src/secp256k1.c b/src/secp256k1.c index 8f5b652019..6904108e2a 100644 --- a/src/secp256k1.c +++ b/src/secp256k1.c @@ -14,6 +14,7 @@ #include "scalar_impl.h" #include "group_impl.h" #include "ecmult_impl.h" +#include "ecmult_const_impl.h" #include "ecmult_gen_impl.h" #include "ecdsa_impl.h" #include "eckey_impl.h" diff --git a/src/tests.c b/src/tests.c index b4ca476c08..7dc9805cb2 100644 --- a/src/tests.c +++ b/src/tests.c @@ -496,6 +496,20 @@ void scalar_test(void) { secp256k1_scalar_get_num(&rnum2, &r); CHECK(secp256k1_num_eq(&rnum, &rnum2)); } + + { + /* test secp256k1_scalar_shr_int */ + secp256k1_scalar_t r; + int i; + int low; + random_scalar_order_test(&r); + for (i = 0; i < 100; ++i) { + int shift = 1 + (secp256k1_rand32() % 15); + int expected = r.d[0] % (1 << shift); + low = secp256k1_scalar_shr_int(&r, shift); + CHECK(expected == low); + } + } #endif { @@ -1377,6 +1391,109 @@ void run_point_times_order(void) { CHECK(secp256k1_fe_equal_var(&x, &xr)); } +void ecmult_const_random_mult(void) { + /* random starting point A (on the curve) */ + secp256k1_ge_t a = SECP256K1_GE_CONST( + 0x6d986544, 0x57ff52b8, 0xcf1b8126, 0x5b802a5b, + 0xa97f9263, 0xb1e88044, 0x93351325, 0x91bc450a, + 0x535c59f7, 0x325e5d2b, 0xc391fbe8, 0x3c12787c, + 0x337e4a98, 0xe82a9011, 0x0123ba37, 0xdd769c7d + ); + /* random initial factor xn */ + secp256k1_scalar_t xn = SECP256K1_SCALAR_CONST( + 0x649d4f77, 0xc4242df7, 0x7f2079c9, 0x14530327, + 0xa31b876a, 0xd2d8ce2a, 0x2236d5c6, 0xd7b2029b + ); + /* expected xn * A (from sage) */ + secp256k1_ge_t expected_b = SECP256K1_GE_CONST( + 0x23773684, 0x4d209dc7, 0x098a786f, 0x20d06fcd, + 0x070a38bf, 0xc11ac651, 0x03004319, 0x1e2a8786, + 0xed8c3b8e, 0xc06dd57b, 0xd06ea66e, 0x45492b0f, + 0xb84e4e1b, 0xfb77e21f, 0x96baae2a, 0x63dec956 + ); + secp256k1_gej_t b; + secp256k1_ecmult_const(&b, &a, &xn); + + CHECK(secp256k1_ge_is_valid_var(&a)); + ge_equals_gej(&expected_b, &b); +} + +void ecmult_const_commutativity(void) { + secp256k1_scalar_t a; + secp256k1_scalar_t b; + secp256k1_gej_t res1; + secp256k1_gej_t res2; + secp256k1_ge_t mid1; + secp256k1_ge_t mid2; + random_scalar_order_test(&a); + random_scalar_order_test(&b); + + secp256k1_ecmult_const(&res1, &secp256k1_ge_const_g, &a); + secp256k1_ecmult_const(&res2, &secp256k1_ge_const_g, &b); + secp256k1_ge_set_gej(&mid1, &res1); + secp256k1_ge_set_gej(&mid2, &res2); + secp256k1_ecmult_const(&res1, &mid1, &b); + secp256k1_ecmult_const(&res2, &mid2, &a); + secp256k1_ge_set_gej(&mid1, &res1); + secp256k1_ge_set_gej(&mid2, &res2); + ge_equals_ge(&mid1, &mid2); +} + +void ecmult_const_mult_zero_one(void) { + secp256k1_scalar_t zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + secp256k1_scalar_t one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); + secp256k1_scalar_t negone; + secp256k1_gej_t res1; + secp256k1_ge_t res2; + secp256k1_ge_t point; + secp256k1_scalar_negate(&negone, &one); + + random_group_element_test(&point); + secp256k1_ecmult_const(&res1, &point, &zero); + secp256k1_ge_set_gej(&res2, &res1); + CHECK(secp256k1_ge_is_infinity(&res2)); + secp256k1_ecmult_const(&res1, &point, &one); + secp256k1_ge_set_gej(&res2, &res1); + ge_equals_ge(&res2, &point); + secp256k1_ecmult_const(&res1, &point, &negone); + secp256k1_gej_neg(&res1, &res1); + secp256k1_ge_set_gej(&res2, &res1); + ge_equals_ge(&res2, &point); +} + +void ecmult_const_chain_multiply(void) { + /* Check known result (randomly generated test problem from sage) */ + const secp256k1_scalar_t scalar = SECP256K1_SCALAR_CONST( + 0x4968d524, 0x2abf9b7a, 0x466abbcf, 0x34b11b6d, + 0xcd83d307, 0x827bed62, 0x05fad0ce, 0x18fae63b + ); + const secp256k1_gej_t expected_point = SECP256K1_GEJ_CONST( + 0x5494c15d, 0x32099706, 0xc2395f94, 0x348745fd, + 0x757ce30e, 0x4e8c90fb, 0xa2bad184, 0xf883c69f, + 0x5d195d20, 0xe191bf7f, 0x1be3e55f, 0x56a80196, + 0x6071ad01, 0xf1462f66, 0xc997fa94, 0xdb858435 + ); + secp256k1_gej_t point; + secp256k1_ge_t res; + int i; + + secp256k1_gej_set_ge(&point, &secp256k1_ge_const_g); + for (i = 0; i < 100; ++i) { + secp256k1_ge_t tmp; + secp256k1_ge_set_gej(&tmp, &point); + secp256k1_ecmult_const(&point, &tmp, &scalar); + } + secp256k1_ge_set_gej(&res, &point); + ge_equals_gej(&res, &expected_point); +} + +void run_ecmult_const_tests(void) { + ecmult_const_mult_zero_one(); + ecmult_const_random_mult(); + ecmult_const_commutativity(); + ecmult_const_chain_multiply(); +} + void test_wnaf(const secp256k1_scalar_t *number, int w) { secp256k1_scalar_t x, two, t; int wnaf[256]; @@ -1411,12 +1528,66 @@ void test_wnaf(const secp256k1_scalar_t *number, int w) { CHECK(secp256k1_scalar_eq(&x, number)); /* check that wnaf represents number */ } +void test_constant_wnaf_negate(const secp256k1_scalar_t *number) { + secp256k1_scalar_t neg1 = *number; + secp256k1_scalar_t neg2 = *number; + int sign1 = 1; + int sign2 = 1; + + if (!secp256k1_scalar_get_bits(&neg1, 0, 1)) { + secp256k1_scalar_negate(&neg1, &neg1); + sign1 = -1; + } + sign2 = secp256k1_scalar_cond_negate(&neg2, secp256k1_scalar_is_even(&neg2)); + CHECK(sign1 == sign2); + CHECK(secp256k1_scalar_eq(&neg1, &neg2)); +} + +void test_constant_wnaf(const secp256k1_scalar_t *number, int w) { + secp256k1_scalar_t x, shift; + int wnaf[256] = {0}; + int i; + + secp256k1_scalar_set_int(&x, 0); + secp256k1_scalar_set_int(&shift, 1 << w); + secp256k1_wnaf_const(wnaf, number, w); + + for (i = WNAF_SIZE(w); i >= 0; --i) { + secp256k1_scalar_t t; + int v = wnaf[i]; + CHECK(v != 0); /* check nonzero */ + CHECK(v & 1); /* check parity */ + CHECK(v > -(1 << w)); /* check range above */ + CHECK(v < (1 << w)); /* check range below */ + + secp256k1_scalar_mul(&x, &x, &shift); + if (v >= 0) { + secp256k1_scalar_set_int(&t, v); + } else { + secp256k1_scalar_set_int(&t, -v); + secp256k1_scalar_negate(&t, &t); + } + secp256k1_scalar_add(&x, &x, &t); + } + CHECK(secp256k1_scalar_eq(&x, number)); +} + void run_wnaf(void) { int i; - secp256k1_scalar_t n; + secp256k1_scalar_t n = {{0}}; + + /* Sanity check: 1 and 2 are the smallest odd and even numbers and should + * have easier-to-diagnose failure modes */ + n.d[0] = 1; + test_constant_wnaf(&n, 4); + n.d[0] = 2; + test_constant_wnaf(&n, 4); + /* Random tests */ for (i = 0; i < count; i++) { random_scalar_order(&n); test_wnaf(&n, 4+(i%10)); + test_constant_wnaf_negate(&n); + test_constant_wnaf(&n, 4 + (i % 10)); } } @@ -2266,6 +2437,7 @@ int main(int argc, char **argv) { run_ecmult_chain(); run_ecmult_constants(); run_ecmult_gen_blind(); + run_ecmult_const_tests(); /* endomorphism tests */ #ifdef USE_ENDOMORPHISM