diff --git a/ecc/bls12-377/fr/kzg/kzg.go b/ecc/bls12-377/fr/kzg/kzg.go index 1cc6531fe..7d0289e6a 100644 --- a/ecc/bls12-377/fr/kzg/kzg.go +++ b/ecc/bls12-377/fr/kzg/kzg.go @@ -77,7 +77,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { _, _, gen1Aff, gen2Aff := bls12377.Generators() srs.G1[0] = gen1Aff srs.G2[0] = gen2Aff - srs.G2[1].ScalarMultiplication(&gen2Aff, bAlpha) + srs.G2[1].ScalarMul(&gen2Aff, bAlpha) alphas := make([]fr.Element, size-1) alphas[0] = alpha @@ -87,7 +87,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { for i := 0; i < len(alphas); i++ { alphas[i].FromMont() } - g1s := bls12377.BatchScalarMultiplicationG1(&gen1Aff, alphas) + g1s := bls12377.BatchScalarMulG1(&gen1Aff, alphas) copy(srs.G1[1:], g1s) return &srs, nil @@ -169,16 +169,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) { func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error { // [f(a)]G₁ - var claimedValueG1Aff bls12377.G1Affine + var claimedValueG1Aff bls12377.G1Jac var claimedValueBigInt big.Int proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt) + claimedValueG1Aff.ScalarMulUnconverted(&srs.G1[0], &claimedValueBigInt) // [f(α) - f(a)]G₁ - var fminusfaG1Jac, tmpG1Jac bls12377.G1Jac + var fminusfaG1Jac bls12377.G1Jac fminusfaG1Jac.FromAffine(commitment) - tmpG1Jac.FromAffine(&claimedValueG1Aff) - fminusfaG1Jac.SubAssign(&tmpG1Jac) + fminusfaG1Jac.SubAssign(&claimedValueG1Aff) // [-H(α)]G₁ var negH bls12377.G1Affine @@ -190,7 +189,7 @@ func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) point.ToBigIntRegular(&pointBigInt) genG2Jac.FromAffine(&srs.G2[0]) alphaG2Jac.FromAffine(&srs.G2[1]) - alphaMinusaG2Jac.ScalarMultiplication(&genG2Jac, &pointBigInt). + alphaMinusaG2Jac.ScalarMul(&genG2Jac, &pointBigInt). Neg(&alphaMinusaG2Jac). AddAssign(&alphaG2Jac) @@ -419,7 +418,7 @@ func BatchVerifyMultiPoints(digests []Digest, proofs []OpeningProof, points []fr var foldedEvalsCommit bls12377.G1Affine var foldedEvalsBigInt big.Int foldedEvals.ToBigIntRegular(&foldedEvalsBigInt) - foldedEvalsCommit.ScalarMultiplication(&srs.G1[0], &foldedEvalsBigInt) + foldedEvalsCommit.ScalarMul(&srs.G1[0], &foldedEvalsBigInt) // compute foldedDigests = ∑ᵢλᵢ[fᵢ(α)]G₁ - [∑ᵢλᵢfᵢ(aᵢ)]G₁ foldedDigests.Sub(&foldedDigests, &foldedEvalsCommit) diff --git a/ecc/bls12-377/fr/kzg/kzg_test.go b/ecc/bls12-377/fr/kzg/kzg_test.go index e09042bdf..7321ae959 100644 --- a/ecc/bls12-377/fr/kzg/kzg_test.go +++ b/ecc/bls12-377/fr/kzg/kzg_test.go @@ -130,7 +130,7 @@ func TestCommit(t *testing.T) { fx.ToBigIntRegular(&fxbi) var manualCommit bls12377.G1Affine manualCommit.Set(&testSRS.G1[0]) - manualCommit.ScalarMultiplication(&manualCommit, &fxbi) + manualCommit.ScalarMul(&manualCommit, &fxbi) // compare both results if !kzgCommit.Equal(&manualCommit) { diff --git a/ecc/bls12-377/fr/plookup/table.go b/ecc/bls12-377/fr/plookup/table.go index e5347c1e3..50b4ae920 100644 --- a/ecc/bls12-377/fr/plookup/table.go +++ b/ecc/bls12-377/fr/plookup/table.go @@ -209,9 +209,9 @@ func VerifyLookupTables(srs *kzg.SRS, proof ProofLookupTables) error { var blambda big.Int lambda.ToBigIntRegular(&blambda) for i := nbRows - 2; i >= 0; i-- { - comf.ScalarMultiplication(&comf, &blambda). + comf.ScalarMul(&comf, &blambda). Add(&comf, &proof.fs[i]) - comt.ScalarMultiplication(&comt, &blambda). + comt.ScalarMul(&comt, &blambda). Add(&comt, &proof.ts[i]) } diff --git a/ecc/bls12-377/g1.go b/ecc/bls12-377/g1.go index 73d42aed1..cb949f95c 100644 --- a/ecc/bls12-377/g1.go +++ b/ecc/bls12-377/g1.go @@ -50,8 +50,8 @@ func (p *G1Affine) Set(a *G1Affine) *G1Affine { return p } -// ScalarMultiplication computes and returns p = a ⋅ s -func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { +// ScalarMul computes and returns p = a ⋅ s +func (p *G1Affine) ScalarMul(a *G1Affine, s *big.Int) *G1Affine { var _p G1Jac _p.FromAffine(a) _p.mulGLV(&_p, s) @@ -59,6 +59,14 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { return p } +// ScalarMulUnconverted computes and returns p = a ⋅ s +// Takes an affine point and returns a Jacobian point (useful for KZG) +func (p *G1Jac) ScalarMulUnconverted(a *G1Affine, s *big.Int) *G1Jac { + p.FromAffine(a) + p.mulGLV(p, s) + return p +} + // Add adds two point in affine coordinates. // This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { @@ -323,9 +331,9 @@ func (p *G1Jac) DoubleAssign() *G1Jac { return p } -// ScalarMultiplication computes and returns p = a ⋅ s +// ScalarMul computes and returns p = a ⋅ s // see https://www.iacr.org/archive/crypto2001/21390189.pdf -func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { +func (p *G1Jac) ScalarMul(a *G1Jac, s *big.Int) *G1Jac { return p.mulGLV(a, s) } @@ -374,8 +382,8 @@ func (p *G1Jac) IsInSubGroup() bool { var res G1Jac res.phi(p). - ScalarMultiplication(&res, &xGen). - ScalarMultiplication(&res, &xGen). + ScalarMul(&res, &xGen). + ScalarMul(&res, &xGen). AddAssign(p) return res.IsOnCurve() && res.Z.IsZero() @@ -506,7 +514,7 @@ func (p *G1Affine) ClearCofactor(a *G1Affine) *G1Affine { func (p *G1Jac) ClearCofactor(a *G1Jac) *G1Jac { // cf https://eprint.iacr.org/2019/403.pdf, 5 var res G1Jac - res.ScalarMultiplication(a, &xGen).Neg(&res).AddAssign(a) + res.ScalarMul(a, &xGen).Neg(&res).AddAssign(a) p.Set(&res) return p @@ -861,10 +869,10 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) { } -// BatchScalarMultiplicationG1 multiplies the same base by all scalars +// BatchScalarMulG1 multiplies the same base by all scalars // and return resulting points in affine coordinates // uses a simple windowed-NAF like exponentiation algorithm -func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affine { +func BatchScalarMulG1(base *G1Affine, scalars []fr.Element) []G1Affine { // approximate cost in group ops is // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) diff --git a/ecc/bls12-377/g1_test.go b/ecc/bls12-377/g1_test.go index 907d1541c..8f9f9b986 100644 --- a/ecc/bls12-377/g1_test.go +++ b/ecc/bls12-377/g1_test.go @@ -110,7 +110,7 @@ func TestG1AffineIsOnCurve(t *testing.T) { var op1, op2 G1Jac op1 = fuzzG1Jac(&g1Gen, a) _r := fr.Modulus() - op2.ScalarMultiplication(&op1, _r) + op2.ScalarMul(&op1, _r) return op1.IsInSubGroup() && op2.Z.IsZero() }, GenFp(), @@ -353,12 +353,12 @@ func TestG1AffineOps(t *testing.T) { var scalar, blindedScalar, rminusone big.Int var op1, op2, op3, gneg G1Jac rminusone.SetUint64(1).Sub(r, &rminusone) - op3.ScalarMultiplication(&g1Gen, &rminusone) + op3.ScalarMul(&g1Gen, &rminusone) gneg.Neg(&g1Gen) s.ToBigIntRegular(&scalar) blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) - op1.ScalarMultiplication(&g1Gen, &scalar) - op2.ScalarMultiplication(&g1Gen, &blindedScalar) + op1.ScalarMul(&g1Gen, &scalar) + op2.ScalarMul(&g1Gen, &blindedScalar) return op1.Equal(&op2) && g.Equal(&g1Infinity) && !op1.Equal(&g1Infinity) && gneg.Equal(&op3) @@ -422,7 +422,7 @@ func TestG1AffineCofactorCleaning(t *testing.T) { } -func TestG1AffineBatchScalarMultiplication(t *testing.T) { +func TestG1AffineBatchScalarMul(t *testing.T) { parameters := gopter.DefaultTestParameters() if testing.Short() { @@ -438,7 +438,7 @@ func TestG1AffineBatchScalarMultiplication(t *testing.T) { // size of the multiExps const nbSamples = 10 - properties.Property("[BLS12-377] BatchScalarMultiplication should be consistent with individual scalar multiplications", prop.ForAll( + properties.Property("[BLS12-377] BatchScalarMul should be consistent with individual scalar multiplications", prop.ForAll( func(mixer fr.Element) bool { // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -449,7 +449,7 @@ func TestG1AffineBatchScalarMultiplication(t *testing.T) { FromMont() } - result := BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:]) + result := BatchScalarMulG1(&g1GenAff, sampleScalars[:]) if len(result) != len(sampleScalars) { return false @@ -508,7 +508,7 @@ func BenchmarkG1AffineBatchScalarMul(b *testing.B) { b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - _ = BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:using]) + _ = BatchScalarMulG1(&g1GenAff, sampleScalars[:using]) } }) } diff --git a/ecc/bls12-377/g2.go b/ecc/bls12-377/g2.go index ac68e3c2e..5e3a309a7 100644 --- a/ecc/bls12-377/g2.go +++ b/ecc/bls12-377/g2.go @@ -55,8 +55,8 @@ func (p *G2Affine) Set(a *G2Affine) *G2Affine { return p } -// ScalarMultiplication computes and returns p = a ⋅ s -func (p *G2Affine) ScalarMultiplication(a *G2Affine, s *big.Int) *G2Affine { +// ScalarMul computes and returns p = a ⋅ s +func (p *G2Affine) ScalarMul(a *G2Affine, s *big.Int) *G2Affine { var _p G2Jac _p.FromAffine(a) _p.mulGLV(&_p, s) @@ -328,9 +328,9 @@ func (p *G2Jac) DoubleAssign() *G2Jac { return p } -// ScalarMultiplication computes and returns p = a ⋅ s +// ScalarMul computes and returns p = a ⋅ s // see https://www.iacr.org/archive/crypto2001/21390189.pdf -func (p *G2Jac) ScalarMultiplication(a *G2Jac, s *big.Int) *G2Jac { +func (p *G2Jac) ScalarMul(a *G2Jac, s *big.Int) *G2Jac { return p.mulGLV(a, s) } @@ -374,7 +374,7 @@ func (p *G2Jac) IsOnCurve() bool { func (p *G2Jac) IsInSubGroup() bool { var res, tmp G2Jac tmp.psi(p) - res.ScalarMultiplication(p, &xGen). + res.ScalarMul(p, &xGen). SubAssign(&tmp) return res.IsOnCurve() && res.Z.IsZero() @@ -513,8 +513,8 @@ func (p *G2Affine) ClearCofactor(a *G2Affine) *G2Affine { func (p *G2Jac) ClearCofactor(a *G2Jac) *G2Jac { // https://eprint.iacr.org/2017/419.pdf, 4.1 var xg, xxg, res, t G2Jac - xg.ScalarMultiplication(a, &xGen) - xxg.ScalarMultiplication(&xg, &xGen) + xg.ScalarMul(a, &xGen) + xxg.ScalarMul(&xg, &xGen) res.Set(&xxg). SubAssign(&xg). @@ -868,10 +868,10 @@ func (p *g2Proj) FromAffine(Q *G2Affine) *g2Proj { return p } -// BatchScalarMultiplicationG2 multiplies the same base by all scalars +// BatchScalarMulG2 multiplies the same base by all scalars // and return resulting points in affine coordinates // uses a simple windowed-NAF like exponentiation algorithm -func BatchScalarMultiplicationG2(base *G2Affine, scalars []fr.Element) []G2Affine { +func BatchScalarMulG2(base *G2Affine, scalars []fr.Element) []G2Affine { // approximate cost in group ops is // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) diff --git a/ecc/bls12-377/g2_test.go b/ecc/bls12-377/g2_test.go index df1c0da2f..107d90aac 100644 --- a/ecc/bls12-377/g2_test.go +++ b/ecc/bls12-377/g2_test.go @@ -124,7 +124,7 @@ func TestG2AffineIsOnCurve(t *testing.T) { var op1, op2 G2Jac op1 = fuzzG2Jac(&g2Gen, a) _r := fr.Modulus() - op2.ScalarMultiplication(&op1, _r) + op2.ScalarMul(&op1, _r) return op1.IsInSubGroup() && op2.Z.IsZero() }, GenE2(), @@ -375,12 +375,12 @@ func TestG2AffineOps(t *testing.T) { var scalar, blindedScalar, rminusone big.Int var op1, op2, op3, gneg G2Jac rminusone.SetUint64(1).Sub(r, &rminusone) - op3.ScalarMultiplication(&g2Gen, &rminusone) + op3.ScalarMul(&g2Gen, &rminusone) gneg.Neg(&g2Gen) s.ToBigIntRegular(&scalar) blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) - op1.ScalarMultiplication(&g2Gen, &scalar) - op2.ScalarMultiplication(&g2Gen, &blindedScalar) + op1.ScalarMul(&g2Gen, &scalar) + op2.ScalarMul(&g2Gen, &blindedScalar) return op1.Equal(&op2) && g.Equal(&g2Infinity) && !op1.Equal(&g2Infinity) && gneg.Equal(&op3) @@ -441,7 +441,7 @@ func TestG2AffineCofactorCleaning(t *testing.T) { } -func TestG2AffineBatchScalarMultiplication(t *testing.T) { +func TestG2AffineBatchScalarMul(t *testing.T) { parameters := gopter.DefaultTestParameters() if testing.Short() { @@ -457,7 +457,7 @@ func TestG2AffineBatchScalarMultiplication(t *testing.T) { // size of the multiExps const nbSamples = 10 - properties.Property("[BLS12-377] BatchScalarMultiplication should be consistent with individual scalar multiplications", prop.ForAll( + properties.Property("[BLS12-377] BatchScalarMul should be consistent with individual scalar multiplications", prop.ForAll( func(mixer fr.Element) bool { // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -468,7 +468,7 @@ func TestG2AffineBatchScalarMultiplication(t *testing.T) { FromMont() } - result := BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:]) + result := BatchScalarMulG2(&g2GenAff, sampleScalars[:]) if len(result) != len(sampleScalars) { return false @@ -527,7 +527,7 @@ func BenchmarkG2AffineBatchScalarMul(b *testing.B) { b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - _ = BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:using]) + _ = BatchScalarMulG2(&g2GenAff, sampleScalars[:using]) } }) } diff --git a/ecc/bls12-377/marshal_test.go b/ecc/bls12-377/marshal_test.go index aac2cf9a6..99f70adca 100644 --- a/ecc/bls12-377/marshal_test.go +++ b/ecc/bls12-377/marshal_test.go @@ -55,9 +55,9 @@ func TestEncoder(t *testing.T) { inA = rand.Uint64() inB.SetRandom() inC.SetRandom() - inD.ScalarMultiplication(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) + inD.ScalarMul(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) // inE --> infinity - inF.ScalarMultiplication(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) + inF.ScalarMul(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) inG = make([]G1Affine, 2) inH = make([]G2Affine, 0) inG[1] = inD @@ -263,7 +263,7 @@ func TestG1AffineSerialization(t *testing.T) { var start, end G1Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g1GenAff, &ab) + start.ScalarMul(&g1GenAff, &ab) buf := start.RawBytes() n, err := end.SetBytes(buf[:]) @@ -283,7 +283,7 @@ func TestG1AffineSerialization(t *testing.T) { var start, end G1Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g1GenAff, &ab) + start.ScalarMul(&g1GenAff, &ab) buf := start.Bytes() n, err := end.SetBytes(buf[:]) @@ -356,7 +356,7 @@ func TestG2AffineSerialization(t *testing.T) { var start, end G2Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g2GenAff, &ab) + start.ScalarMul(&g2GenAff, &ab) buf := start.RawBytes() n, err := end.SetBytes(buf[:]) @@ -376,7 +376,7 @@ func TestG2AffineSerialization(t *testing.T) { var start, end G2Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g2GenAff, &ab) + start.ScalarMul(&g2GenAff, &ab) buf := start.Bytes() n, err := end.SetBytes(buf[:]) diff --git a/ecc/bls12-377/multiexp_test.go b/ecc/bls12-377/multiexp_test.go index fc7e810a5..09bdea9d1 100644 --- a/ecc/bls12-377/multiexp_test.go +++ b/ecc/bls12-377/multiexp_test.go @@ -100,7 +100,7 @@ func TestMultiExpG1(t *testing.T) { // compute expected result with double and add var finalScalar, mixerBigInt big.Int finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) - expected.ScalarMultiplication(&g1Gen, &finalScalar) + expected.ScalarMul(&g1Gen, &finalScalar) // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -150,7 +150,7 @@ func TestMultiExpG1(t *testing.T) { var op1ScalarMul G1Affine finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) finalBigScalar.ToBigIntRegular(&finalBigScalarBi) - op1ScalarMul.ScalarMultiplication(&g1GenAff, &finalBigScalarBi) + op1ScalarMul.ScalarMul(&g1GenAff, &finalBigScalarBi) return op1ScalarMul.Equal(&op1MultiExp) }, @@ -249,7 +249,7 @@ func BenchmarkManyMultiExpG1Reference(b *testing.B) { func fillBenchBasesG1(samplePoints []G1Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) - samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + samplePoints[0].ScalarMul(&samplePoints[0], &r) one := samplePoints[0].X one.SetOne() @@ -330,7 +330,7 @@ func TestMultiExpG2(t *testing.T) { // compute expected result with double and add var finalScalar, mixerBigInt big.Int finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) - expected.ScalarMultiplication(&g2Gen, &finalScalar) + expected.ScalarMul(&g2Gen, &finalScalar) // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -380,7 +380,7 @@ func TestMultiExpG2(t *testing.T) { var op1ScalarMul G2Affine finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) finalBigScalar.ToBigIntRegular(&finalBigScalarBi) - op1ScalarMul.ScalarMultiplication(&g2GenAff, &finalBigScalarBi) + op1ScalarMul.ScalarMul(&g2GenAff, &finalBigScalarBi) return op1ScalarMul.Equal(&op1MultiExp) }, @@ -479,7 +479,7 @@ func BenchmarkManyMultiExpG2Reference(b *testing.B) { func fillBenchBasesG2(samplePoints []G2Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) - samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + samplePoints[0].ScalarMul(&samplePoints[0], &r) one := samplePoints[0].X one.SetOne() diff --git a/ecc/bls12-377/pairing_test.go b/ecc/bls12-377/pairing_test.go index c64efebad..5a6a77f2c 100644 --- a/ecc/bls12-377/pairing_test.go +++ b/ecc/bls12-377/pairing_test.go @@ -120,8 +120,8 @@ func TestPairing(t *testing.T) { b.ToBigIntRegular(&bbigint) ab.Mul(&abigint, &bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ = Pair([]G1Affine{g1GenAff}, []G2Affine{g2GenAff}) resa, _ = Pair([]G1Affine{ag1}, []G2Affine{g2GenAff}) @@ -185,8 +185,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) P0 := []G1Affine{g1GenAff} P1 := []G1Affine{ag1} @@ -228,8 +228,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) g1Inf.FromJacobian(&g1Infinity) g2Inf.FromJacobian(&g2Infinity) @@ -266,8 +266,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ := Pair([]G1Affine{ag1}, []G2Affine{bg2}) diff --git a/ecc/bls12-378/fr/kzg/kzg.go b/ecc/bls12-378/fr/kzg/kzg.go index b624ab7e4..760db7d96 100644 --- a/ecc/bls12-378/fr/kzg/kzg.go +++ b/ecc/bls12-378/fr/kzg/kzg.go @@ -77,7 +77,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { _, _, gen1Aff, gen2Aff := bls12378.Generators() srs.G1[0] = gen1Aff srs.G2[0] = gen2Aff - srs.G2[1].ScalarMultiplication(&gen2Aff, bAlpha) + srs.G2[1].ScalarMul(&gen2Aff, bAlpha) alphas := make([]fr.Element, size-1) alphas[0] = alpha @@ -87,7 +87,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { for i := 0; i < len(alphas); i++ { alphas[i].FromMont() } - g1s := bls12378.BatchScalarMultiplicationG1(&gen1Aff, alphas) + g1s := bls12378.BatchScalarMulG1(&gen1Aff, alphas) copy(srs.G1[1:], g1s) return &srs, nil @@ -169,16 +169,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) { func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error { // [f(a)]G₁ - var claimedValueG1Aff bls12378.G1Affine + var claimedValueG1Aff bls12378.G1Jac var claimedValueBigInt big.Int proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt) + claimedValueG1Aff.ScalarMulUnconverted(&srs.G1[0], &claimedValueBigInt) // [f(α) - f(a)]G₁ - var fminusfaG1Jac, tmpG1Jac bls12378.G1Jac + var fminusfaG1Jac bls12378.G1Jac fminusfaG1Jac.FromAffine(commitment) - tmpG1Jac.FromAffine(&claimedValueG1Aff) - fminusfaG1Jac.SubAssign(&tmpG1Jac) + fminusfaG1Jac.SubAssign(&claimedValueG1Aff) // [-H(α)]G₁ var negH bls12378.G1Affine @@ -190,7 +189,7 @@ func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) point.ToBigIntRegular(&pointBigInt) genG2Jac.FromAffine(&srs.G2[0]) alphaG2Jac.FromAffine(&srs.G2[1]) - alphaMinusaG2Jac.ScalarMultiplication(&genG2Jac, &pointBigInt). + alphaMinusaG2Jac.ScalarMul(&genG2Jac, &pointBigInt). Neg(&alphaMinusaG2Jac). AddAssign(&alphaG2Jac) @@ -419,7 +418,7 @@ func BatchVerifyMultiPoints(digests []Digest, proofs []OpeningProof, points []fr var foldedEvalsCommit bls12378.G1Affine var foldedEvalsBigInt big.Int foldedEvals.ToBigIntRegular(&foldedEvalsBigInt) - foldedEvalsCommit.ScalarMultiplication(&srs.G1[0], &foldedEvalsBigInt) + foldedEvalsCommit.ScalarMul(&srs.G1[0], &foldedEvalsBigInt) // compute foldedDigests = ∑ᵢλᵢ[fᵢ(α)]G₁ - [∑ᵢλᵢfᵢ(aᵢ)]G₁ foldedDigests.Sub(&foldedDigests, &foldedEvalsCommit) diff --git a/ecc/bls12-378/fr/kzg/kzg_test.go b/ecc/bls12-378/fr/kzg/kzg_test.go index 7e8fdc320..0b955694b 100644 --- a/ecc/bls12-378/fr/kzg/kzg_test.go +++ b/ecc/bls12-378/fr/kzg/kzg_test.go @@ -130,7 +130,7 @@ func TestCommit(t *testing.T) { fx.ToBigIntRegular(&fxbi) var manualCommit bls12378.G1Affine manualCommit.Set(&testSRS.G1[0]) - manualCommit.ScalarMultiplication(&manualCommit, &fxbi) + manualCommit.ScalarMul(&manualCommit, &fxbi) // compare both results if !kzgCommit.Equal(&manualCommit) { diff --git a/ecc/bls12-378/fr/plookup/table.go b/ecc/bls12-378/fr/plookup/table.go index 8c7299969..d4d755c77 100644 --- a/ecc/bls12-378/fr/plookup/table.go +++ b/ecc/bls12-378/fr/plookup/table.go @@ -209,9 +209,9 @@ func VerifyLookupTables(srs *kzg.SRS, proof ProofLookupTables) error { var blambda big.Int lambda.ToBigIntRegular(&blambda) for i := nbRows - 2; i >= 0; i-- { - comf.ScalarMultiplication(&comf, &blambda). + comf.ScalarMul(&comf, &blambda). Add(&comf, &proof.fs[i]) - comt.ScalarMultiplication(&comt, &blambda). + comt.ScalarMul(&comt, &blambda). Add(&comt, &proof.ts[i]) } diff --git a/ecc/bls12-378/g1.go b/ecc/bls12-378/g1.go index 41c829ba6..b932bba16 100644 --- a/ecc/bls12-378/g1.go +++ b/ecc/bls12-378/g1.go @@ -50,8 +50,8 @@ func (p *G1Affine) Set(a *G1Affine) *G1Affine { return p } -// ScalarMultiplication computes and returns p = a ⋅ s -func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { +// ScalarMul computes and returns p = a ⋅ s +func (p *G1Affine) ScalarMul(a *G1Affine, s *big.Int) *G1Affine { var _p G1Jac _p.FromAffine(a) _p.mulGLV(&_p, s) @@ -59,6 +59,14 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { return p } +// ScalarMulUnconverted computes and returns p = a ⋅ s +// Takes an affine point and returns a Jacobian point (useful for KZG) +func (p *G1Jac) ScalarMulUnconverted(a *G1Affine, s *big.Int) *G1Jac { + p.FromAffine(a) + p.mulGLV(p, s) + return p +} + // Add adds two point in affine coordinates. // This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { @@ -323,9 +331,9 @@ func (p *G1Jac) DoubleAssign() *G1Jac { return p } -// ScalarMultiplication computes and returns p = a ⋅ s +// ScalarMul computes and returns p = a ⋅ s // see https://www.iacr.org/archive/crypto2001/21390189.pdf -func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { +func (p *G1Jac) ScalarMul(a *G1Jac, s *big.Int) *G1Jac { return p.mulGLV(a, s) } @@ -374,8 +382,8 @@ func (p *G1Jac) IsInSubGroup() bool { var res G1Jac res.phi(p). - ScalarMultiplication(&res, &xGen). - ScalarMultiplication(&res, &xGen). + ScalarMul(&res, &xGen). + ScalarMul(&res, &xGen). AddAssign(p) return res.IsOnCurve() && res.Z.IsZero() @@ -506,7 +514,7 @@ func (p *G1Affine) ClearCofactor(a *G1Affine) *G1Affine { func (p *G1Jac) ClearCofactor(a *G1Jac) *G1Jac { // cf https://eprint.iacr.org/2019/403.pdf, 5 var res G1Jac - res.ScalarMultiplication(a, &xGen).Neg(&res).AddAssign(a) + res.ScalarMul(a, &xGen).Neg(&res).AddAssign(a) p.Set(&res) return p @@ -861,10 +869,10 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) { } -// BatchScalarMultiplicationG1 multiplies the same base by all scalars +// BatchScalarMulG1 multiplies the same base by all scalars // and return resulting points in affine coordinates // uses a simple windowed-NAF like exponentiation algorithm -func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affine { +func BatchScalarMulG1(base *G1Affine, scalars []fr.Element) []G1Affine { // approximate cost in group ops is // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) diff --git a/ecc/bls12-378/g1_test.go b/ecc/bls12-378/g1_test.go index ca3840383..811d97c9f 100644 --- a/ecc/bls12-378/g1_test.go +++ b/ecc/bls12-378/g1_test.go @@ -110,7 +110,7 @@ func TestG1AffineIsOnCurve(t *testing.T) { var op1, op2 G1Jac op1 = fuzzG1Jac(&g1Gen, a) _r := fr.Modulus() - op2.ScalarMultiplication(&op1, _r) + op2.ScalarMul(&op1, _r) return op1.IsInSubGroup() && op2.Z.IsZero() }, GenFp(), @@ -353,12 +353,12 @@ func TestG1AffineOps(t *testing.T) { var scalar, blindedScalar, rminusone big.Int var op1, op2, op3, gneg G1Jac rminusone.SetUint64(1).Sub(r, &rminusone) - op3.ScalarMultiplication(&g1Gen, &rminusone) + op3.ScalarMul(&g1Gen, &rminusone) gneg.Neg(&g1Gen) s.ToBigIntRegular(&scalar) blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) - op1.ScalarMultiplication(&g1Gen, &scalar) - op2.ScalarMultiplication(&g1Gen, &blindedScalar) + op1.ScalarMul(&g1Gen, &scalar) + op2.ScalarMul(&g1Gen, &blindedScalar) return op1.Equal(&op2) && g.Equal(&g1Infinity) && !op1.Equal(&g1Infinity) && gneg.Equal(&op3) @@ -422,7 +422,7 @@ func TestG1AffineCofactorCleaning(t *testing.T) { } -func TestG1AffineBatchScalarMultiplication(t *testing.T) { +func TestG1AffineBatchScalarMul(t *testing.T) { parameters := gopter.DefaultTestParameters() if testing.Short() { @@ -438,7 +438,7 @@ func TestG1AffineBatchScalarMultiplication(t *testing.T) { // size of the multiExps const nbSamples = 10 - properties.Property("[BLS12-378] BatchScalarMultiplication should be consistent with individual scalar multiplications", prop.ForAll( + properties.Property("[BLS12-378] BatchScalarMul should be consistent with individual scalar multiplications", prop.ForAll( func(mixer fr.Element) bool { // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -449,7 +449,7 @@ func TestG1AffineBatchScalarMultiplication(t *testing.T) { FromMont() } - result := BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:]) + result := BatchScalarMulG1(&g1GenAff, sampleScalars[:]) if len(result) != len(sampleScalars) { return false @@ -508,7 +508,7 @@ func BenchmarkG1AffineBatchScalarMul(b *testing.B) { b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - _ = BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:using]) + _ = BatchScalarMulG1(&g1GenAff, sampleScalars[:using]) } }) } diff --git a/ecc/bls12-378/g2.go b/ecc/bls12-378/g2.go index 44789a126..a880ddb53 100644 --- a/ecc/bls12-378/g2.go +++ b/ecc/bls12-378/g2.go @@ -55,8 +55,8 @@ func (p *G2Affine) Set(a *G2Affine) *G2Affine { return p } -// ScalarMultiplication computes and returns p = a ⋅ s -func (p *G2Affine) ScalarMultiplication(a *G2Affine, s *big.Int) *G2Affine { +// ScalarMul computes and returns p = a ⋅ s +func (p *G2Affine) ScalarMul(a *G2Affine, s *big.Int) *G2Affine { var _p G2Jac _p.FromAffine(a) _p.mulGLV(&_p, s) @@ -328,9 +328,9 @@ func (p *G2Jac) DoubleAssign() *G2Jac { return p } -// ScalarMultiplication computes and returns p = a ⋅ s +// ScalarMul computes and returns p = a ⋅ s // see https://www.iacr.org/archive/crypto2001/21390189.pdf -func (p *G2Jac) ScalarMultiplication(a *G2Jac, s *big.Int) *G2Jac { +func (p *G2Jac) ScalarMul(a *G2Jac, s *big.Int) *G2Jac { return p.mulGLV(a, s) } @@ -374,7 +374,7 @@ func (p *G2Jac) IsOnCurve() bool { func (p *G2Jac) IsInSubGroup() bool { var res, tmp G2Jac tmp.psi(p) - res.ScalarMultiplication(p, &xGen). + res.ScalarMul(p, &xGen). SubAssign(&tmp) return res.IsOnCurve() && res.Z.IsZero() @@ -513,8 +513,8 @@ func (p *G2Affine) ClearCofactor(a *G2Affine) *G2Affine { func (p *G2Jac) ClearCofactor(a *G2Jac) *G2Jac { // https://eprint.iacr.org/2017/419.pdf, 4.1 var xg, xxg, res, t G2Jac - xg.ScalarMultiplication(a, &xGen) - xxg.ScalarMultiplication(&xg, &xGen) + xg.ScalarMul(a, &xGen) + xxg.ScalarMul(&xg, &xGen) res.Set(&xxg). SubAssign(&xg). @@ -868,10 +868,10 @@ func (p *g2Proj) FromAffine(Q *G2Affine) *g2Proj { return p } -// BatchScalarMultiplicationG2 multiplies the same base by all scalars +// BatchScalarMulG2 multiplies the same base by all scalars // and return resulting points in affine coordinates // uses a simple windowed-NAF like exponentiation algorithm -func BatchScalarMultiplicationG2(base *G2Affine, scalars []fr.Element) []G2Affine { +func BatchScalarMulG2(base *G2Affine, scalars []fr.Element) []G2Affine { // approximate cost in group ops is // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) diff --git a/ecc/bls12-378/g2_test.go b/ecc/bls12-378/g2_test.go index 89ebff3cc..4ec9a0131 100644 --- a/ecc/bls12-378/g2_test.go +++ b/ecc/bls12-378/g2_test.go @@ -124,7 +124,7 @@ func TestG2AffineIsOnCurve(t *testing.T) { var op1, op2 G2Jac op1 = fuzzG2Jac(&g2Gen, a) _r := fr.Modulus() - op2.ScalarMultiplication(&op1, _r) + op2.ScalarMul(&op1, _r) return op1.IsInSubGroup() && op2.Z.IsZero() }, GenE2(), @@ -375,12 +375,12 @@ func TestG2AffineOps(t *testing.T) { var scalar, blindedScalar, rminusone big.Int var op1, op2, op3, gneg G2Jac rminusone.SetUint64(1).Sub(r, &rminusone) - op3.ScalarMultiplication(&g2Gen, &rminusone) + op3.ScalarMul(&g2Gen, &rminusone) gneg.Neg(&g2Gen) s.ToBigIntRegular(&scalar) blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) - op1.ScalarMultiplication(&g2Gen, &scalar) - op2.ScalarMultiplication(&g2Gen, &blindedScalar) + op1.ScalarMul(&g2Gen, &scalar) + op2.ScalarMul(&g2Gen, &blindedScalar) return op1.Equal(&op2) && g.Equal(&g2Infinity) && !op1.Equal(&g2Infinity) && gneg.Equal(&op3) @@ -441,7 +441,7 @@ func TestG2AffineCofactorCleaning(t *testing.T) { } -func TestG2AffineBatchScalarMultiplication(t *testing.T) { +func TestG2AffineBatchScalarMul(t *testing.T) { parameters := gopter.DefaultTestParameters() if testing.Short() { @@ -457,7 +457,7 @@ func TestG2AffineBatchScalarMultiplication(t *testing.T) { // size of the multiExps const nbSamples = 10 - properties.Property("[BLS12-378] BatchScalarMultiplication should be consistent with individual scalar multiplications", prop.ForAll( + properties.Property("[BLS12-378] BatchScalarMul should be consistent with individual scalar multiplications", prop.ForAll( func(mixer fr.Element) bool { // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -468,7 +468,7 @@ func TestG2AffineBatchScalarMultiplication(t *testing.T) { FromMont() } - result := BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:]) + result := BatchScalarMulG2(&g2GenAff, sampleScalars[:]) if len(result) != len(sampleScalars) { return false @@ -527,7 +527,7 @@ func BenchmarkG2AffineBatchScalarMul(b *testing.B) { b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - _ = BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:using]) + _ = BatchScalarMulG2(&g2GenAff, sampleScalars[:using]) } }) } diff --git a/ecc/bls12-378/marshal_test.go b/ecc/bls12-378/marshal_test.go index fa23da6fa..82e9efc3d 100644 --- a/ecc/bls12-378/marshal_test.go +++ b/ecc/bls12-378/marshal_test.go @@ -55,9 +55,9 @@ func TestEncoder(t *testing.T) { inA = rand.Uint64() inB.SetRandom() inC.SetRandom() - inD.ScalarMultiplication(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) + inD.ScalarMul(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) // inE --> infinity - inF.ScalarMultiplication(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) + inF.ScalarMul(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) inG = make([]G1Affine, 2) inH = make([]G2Affine, 0) inG[1] = inD @@ -263,7 +263,7 @@ func TestG1AffineSerialization(t *testing.T) { var start, end G1Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g1GenAff, &ab) + start.ScalarMul(&g1GenAff, &ab) buf := start.RawBytes() n, err := end.SetBytes(buf[:]) @@ -283,7 +283,7 @@ func TestG1AffineSerialization(t *testing.T) { var start, end G1Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g1GenAff, &ab) + start.ScalarMul(&g1GenAff, &ab) buf := start.Bytes() n, err := end.SetBytes(buf[:]) @@ -356,7 +356,7 @@ func TestG2AffineSerialization(t *testing.T) { var start, end G2Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g2GenAff, &ab) + start.ScalarMul(&g2GenAff, &ab) buf := start.RawBytes() n, err := end.SetBytes(buf[:]) @@ -376,7 +376,7 @@ func TestG2AffineSerialization(t *testing.T) { var start, end G2Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g2GenAff, &ab) + start.ScalarMul(&g2GenAff, &ab) buf := start.Bytes() n, err := end.SetBytes(buf[:]) diff --git a/ecc/bls12-378/multiexp_test.go b/ecc/bls12-378/multiexp_test.go index ff93935d2..1fef29f62 100644 --- a/ecc/bls12-378/multiexp_test.go +++ b/ecc/bls12-378/multiexp_test.go @@ -100,7 +100,7 @@ func TestMultiExpG1(t *testing.T) { // compute expected result with double and add var finalScalar, mixerBigInt big.Int finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) - expected.ScalarMultiplication(&g1Gen, &finalScalar) + expected.ScalarMul(&g1Gen, &finalScalar) // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -150,7 +150,7 @@ func TestMultiExpG1(t *testing.T) { var op1ScalarMul G1Affine finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) finalBigScalar.ToBigIntRegular(&finalBigScalarBi) - op1ScalarMul.ScalarMultiplication(&g1GenAff, &finalBigScalarBi) + op1ScalarMul.ScalarMul(&g1GenAff, &finalBigScalarBi) return op1ScalarMul.Equal(&op1MultiExp) }, @@ -249,7 +249,7 @@ func BenchmarkManyMultiExpG1Reference(b *testing.B) { func fillBenchBasesG1(samplePoints []G1Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) - samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + samplePoints[0].ScalarMul(&samplePoints[0], &r) one := samplePoints[0].X one.SetOne() @@ -330,7 +330,7 @@ func TestMultiExpG2(t *testing.T) { // compute expected result with double and add var finalScalar, mixerBigInt big.Int finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) - expected.ScalarMultiplication(&g2Gen, &finalScalar) + expected.ScalarMul(&g2Gen, &finalScalar) // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -380,7 +380,7 @@ func TestMultiExpG2(t *testing.T) { var op1ScalarMul G2Affine finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) finalBigScalar.ToBigIntRegular(&finalBigScalarBi) - op1ScalarMul.ScalarMultiplication(&g2GenAff, &finalBigScalarBi) + op1ScalarMul.ScalarMul(&g2GenAff, &finalBigScalarBi) return op1ScalarMul.Equal(&op1MultiExp) }, @@ -479,7 +479,7 @@ func BenchmarkManyMultiExpG2Reference(b *testing.B) { func fillBenchBasesG2(samplePoints []G2Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) - samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + samplePoints[0].ScalarMul(&samplePoints[0], &r) one := samplePoints[0].X one.SetOne() diff --git a/ecc/bls12-378/pairing_test.go b/ecc/bls12-378/pairing_test.go index 790d64d88..043be8016 100644 --- a/ecc/bls12-378/pairing_test.go +++ b/ecc/bls12-378/pairing_test.go @@ -120,8 +120,8 @@ func TestPairing(t *testing.T) { b.ToBigIntRegular(&bbigint) ab.Mul(&abigint, &bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ = Pair([]G1Affine{g1GenAff}, []G2Affine{g2GenAff}) resa, _ = Pair([]G1Affine{ag1}, []G2Affine{g2GenAff}) @@ -185,8 +185,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) P0 := []G1Affine{g1GenAff} P1 := []G1Affine{ag1} @@ -228,8 +228,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) g1Inf.FromJacobian(&g1Infinity) g2Inf.FromJacobian(&g2Infinity) @@ -266,8 +266,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ := Pair([]G1Affine{ag1}, []G2Affine{bg2}) diff --git a/ecc/bls12-381/fr/kzg/kzg.go b/ecc/bls12-381/fr/kzg/kzg.go index d2dfaf6dd..8ed6a9a24 100644 --- a/ecc/bls12-381/fr/kzg/kzg.go +++ b/ecc/bls12-381/fr/kzg/kzg.go @@ -77,7 +77,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { _, _, gen1Aff, gen2Aff := bls12381.Generators() srs.G1[0] = gen1Aff srs.G2[0] = gen2Aff - srs.G2[1].ScalarMultiplication(&gen2Aff, bAlpha) + srs.G2[1].ScalarMul(&gen2Aff, bAlpha) alphas := make([]fr.Element, size-1) alphas[0] = alpha @@ -87,7 +87,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { for i := 0; i < len(alphas); i++ { alphas[i].FromMont() } - g1s := bls12381.BatchScalarMultiplicationG1(&gen1Aff, alphas) + g1s := bls12381.BatchScalarMulG1(&gen1Aff, alphas) copy(srs.G1[1:], g1s) return &srs, nil @@ -169,16 +169,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) { func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error { // [f(a)]G₁ - var claimedValueG1Aff bls12381.G1Affine + var claimedValueG1Aff bls12381.G1Jac var claimedValueBigInt big.Int proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt) + claimedValueG1Aff.ScalarMulUnconverted(&srs.G1[0], &claimedValueBigInt) // [f(α) - f(a)]G₁ - var fminusfaG1Jac, tmpG1Jac bls12381.G1Jac + var fminusfaG1Jac bls12381.G1Jac fminusfaG1Jac.FromAffine(commitment) - tmpG1Jac.FromAffine(&claimedValueG1Aff) - fminusfaG1Jac.SubAssign(&tmpG1Jac) + fminusfaG1Jac.SubAssign(&claimedValueG1Aff) // [-H(α)]G₁ var negH bls12381.G1Affine @@ -190,7 +189,7 @@ func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) point.ToBigIntRegular(&pointBigInt) genG2Jac.FromAffine(&srs.G2[0]) alphaG2Jac.FromAffine(&srs.G2[1]) - alphaMinusaG2Jac.ScalarMultiplication(&genG2Jac, &pointBigInt). + alphaMinusaG2Jac.ScalarMul(&genG2Jac, &pointBigInt). Neg(&alphaMinusaG2Jac). AddAssign(&alphaG2Jac) @@ -419,7 +418,7 @@ func BatchVerifyMultiPoints(digests []Digest, proofs []OpeningProof, points []fr var foldedEvalsCommit bls12381.G1Affine var foldedEvalsBigInt big.Int foldedEvals.ToBigIntRegular(&foldedEvalsBigInt) - foldedEvalsCommit.ScalarMultiplication(&srs.G1[0], &foldedEvalsBigInt) + foldedEvalsCommit.ScalarMul(&srs.G1[0], &foldedEvalsBigInt) // compute foldedDigests = ∑ᵢλᵢ[fᵢ(α)]G₁ - [∑ᵢλᵢfᵢ(aᵢ)]G₁ foldedDigests.Sub(&foldedDigests, &foldedEvalsCommit) diff --git a/ecc/bls12-381/fr/kzg/kzg_test.go b/ecc/bls12-381/fr/kzg/kzg_test.go index 2332edb46..6c93f32e1 100644 --- a/ecc/bls12-381/fr/kzg/kzg_test.go +++ b/ecc/bls12-381/fr/kzg/kzg_test.go @@ -130,7 +130,7 @@ func TestCommit(t *testing.T) { fx.ToBigIntRegular(&fxbi) var manualCommit bls12381.G1Affine manualCommit.Set(&testSRS.G1[0]) - manualCommit.ScalarMultiplication(&manualCommit, &fxbi) + manualCommit.ScalarMul(&manualCommit, &fxbi) // compare both results if !kzgCommit.Equal(&manualCommit) { diff --git a/ecc/bls12-381/fr/plookup/table.go b/ecc/bls12-381/fr/plookup/table.go index 8593e30c6..55119d951 100644 --- a/ecc/bls12-381/fr/plookup/table.go +++ b/ecc/bls12-381/fr/plookup/table.go @@ -209,9 +209,9 @@ func VerifyLookupTables(srs *kzg.SRS, proof ProofLookupTables) error { var blambda big.Int lambda.ToBigIntRegular(&blambda) for i := nbRows - 2; i >= 0; i-- { - comf.ScalarMultiplication(&comf, &blambda). + comf.ScalarMul(&comf, &blambda). Add(&comf, &proof.fs[i]) - comt.ScalarMultiplication(&comt, &blambda). + comt.ScalarMul(&comt, &blambda). Add(&comt, &proof.ts[i]) } diff --git a/ecc/bls12-381/g1.go b/ecc/bls12-381/g1.go index fc151cb34..d4e8e85ec 100644 --- a/ecc/bls12-381/g1.go +++ b/ecc/bls12-381/g1.go @@ -50,8 +50,8 @@ func (p *G1Affine) Set(a *G1Affine) *G1Affine { return p } -// ScalarMultiplication computes and returns p = a ⋅ s -func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { +// ScalarMul computes and returns p = a ⋅ s +func (p *G1Affine) ScalarMul(a *G1Affine, s *big.Int) *G1Affine { var _p G1Jac _p.FromAffine(a) _p.mulGLV(&_p, s) @@ -59,6 +59,14 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { return p } +// ScalarMulUnconverted computes and returns p = a ⋅ s +// Takes an affine point and returns a Jacobian point (useful for KZG) +func (p *G1Jac) ScalarMulUnconverted(a *G1Affine, s *big.Int) *G1Jac { + p.FromAffine(a) + p.mulGLV(p, s) + return p +} + // Add adds two point in affine coordinates. // This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { @@ -323,9 +331,9 @@ func (p *G1Jac) DoubleAssign() *G1Jac { return p } -// ScalarMultiplication computes and returns p = a ⋅ s +// ScalarMul computes and returns p = a ⋅ s // see https://www.iacr.org/archive/crypto2001/21390189.pdf -func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { +func (p *G1Jac) ScalarMul(a *G1Jac, s *big.Int) *G1Jac { return p.mulGLV(a, s) } @@ -374,8 +382,8 @@ func (p *G1Jac) IsInSubGroup() bool { var res G1Jac res.phi(p). - ScalarMultiplication(&res, &xGen). - ScalarMultiplication(&res, &xGen). + ScalarMul(&res, &xGen). + ScalarMul(&res, &xGen). AddAssign(p) return res.IsOnCurve() && res.Z.IsZero() @@ -506,7 +514,7 @@ func (p *G1Affine) ClearCofactor(a *G1Affine) *G1Affine { func (p *G1Jac) ClearCofactor(a *G1Jac) *G1Jac { // cf https://eprint.iacr.org/2019/403.pdf, 5 var res G1Jac - res.ScalarMultiplication(a, &xGen).AddAssign(a) + res.ScalarMul(a, &xGen).AddAssign(a) p.Set(&res) return p @@ -861,10 +869,10 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) { } -// BatchScalarMultiplicationG1 multiplies the same base by all scalars +// BatchScalarMulG1 multiplies the same base by all scalars // and return resulting points in affine coordinates // uses a simple windowed-NAF like exponentiation algorithm -func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affine { +func BatchScalarMulG1(base *G1Affine, scalars []fr.Element) []G1Affine { // approximate cost in group ops is // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) diff --git a/ecc/bls12-381/g1_test.go b/ecc/bls12-381/g1_test.go index 700a4227b..c237a7ff9 100644 --- a/ecc/bls12-381/g1_test.go +++ b/ecc/bls12-381/g1_test.go @@ -110,7 +110,7 @@ func TestG1AffineIsOnCurve(t *testing.T) { var op1, op2 G1Jac op1 = fuzzG1Jac(&g1Gen, a) _r := fr.Modulus() - op2.ScalarMultiplication(&op1, _r) + op2.ScalarMul(&op1, _r) return op1.IsInSubGroup() && op2.Z.IsZero() }, GenFp(), @@ -353,12 +353,12 @@ func TestG1AffineOps(t *testing.T) { var scalar, blindedScalar, rminusone big.Int var op1, op2, op3, gneg G1Jac rminusone.SetUint64(1).Sub(r, &rminusone) - op3.ScalarMultiplication(&g1Gen, &rminusone) + op3.ScalarMul(&g1Gen, &rminusone) gneg.Neg(&g1Gen) s.ToBigIntRegular(&scalar) blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) - op1.ScalarMultiplication(&g1Gen, &scalar) - op2.ScalarMultiplication(&g1Gen, &blindedScalar) + op1.ScalarMul(&g1Gen, &scalar) + op2.ScalarMul(&g1Gen, &blindedScalar) return op1.Equal(&op2) && g.Equal(&g1Infinity) && !op1.Equal(&g1Infinity) && gneg.Equal(&op3) @@ -422,7 +422,7 @@ func TestG1AffineCofactorCleaning(t *testing.T) { } -func TestG1AffineBatchScalarMultiplication(t *testing.T) { +func TestG1AffineBatchScalarMul(t *testing.T) { parameters := gopter.DefaultTestParameters() if testing.Short() { @@ -438,7 +438,7 @@ func TestG1AffineBatchScalarMultiplication(t *testing.T) { // size of the multiExps const nbSamples = 10 - properties.Property("[BLS12-381] BatchScalarMultiplication should be consistent with individual scalar multiplications", prop.ForAll( + properties.Property("[BLS12-381] BatchScalarMul should be consistent with individual scalar multiplications", prop.ForAll( func(mixer fr.Element) bool { // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -449,7 +449,7 @@ func TestG1AffineBatchScalarMultiplication(t *testing.T) { FromMont() } - result := BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:]) + result := BatchScalarMulG1(&g1GenAff, sampleScalars[:]) if len(result) != len(sampleScalars) { return false @@ -508,7 +508,7 @@ func BenchmarkG1AffineBatchScalarMul(b *testing.B) { b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - _ = BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:using]) + _ = BatchScalarMulG1(&g1GenAff, sampleScalars[:using]) } }) } diff --git a/ecc/bls12-381/g2.go b/ecc/bls12-381/g2.go index d129abb7f..e3bbe51a7 100644 --- a/ecc/bls12-381/g2.go +++ b/ecc/bls12-381/g2.go @@ -55,8 +55,8 @@ func (p *G2Affine) Set(a *G2Affine) *G2Affine { return p } -// ScalarMultiplication computes and returns p = a ⋅ s -func (p *G2Affine) ScalarMultiplication(a *G2Affine, s *big.Int) *G2Affine { +// ScalarMul computes and returns p = a ⋅ s +func (p *G2Affine) ScalarMul(a *G2Affine, s *big.Int) *G2Affine { var _p G2Jac _p.FromAffine(a) _p.mulGLV(&_p, s) @@ -328,9 +328,9 @@ func (p *G2Jac) DoubleAssign() *G2Jac { return p } -// ScalarMultiplication computes and returns p = a ⋅ s +// ScalarMul computes and returns p = a ⋅ s // see https://www.iacr.org/archive/crypto2001/21390189.pdf -func (p *G2Jac) ScalarMultiplication(a *G2Jac, s *big.Int) *G2Jac { +func (p *G2Jac) ScalarMul(a *G2Jac, s *big.Int) *G2Jac { return p.mulGLV(a, s) } @@ -375,7 +375,7 @@ func (p *G2Jac) IsOnCurve() bool { func (p *G2Jac) IsInSubGroup() bool { var res, tmp G2Jac tmp.psi(p) - res.ScalarMultiplication(p, &xGen). + res.ScalarMul(p, &xGen). AddAssign(&tmp) return res.IsOnCurve() && res.Z.IsZero() @@ -514,8 +514,8 @@ func (p *G2Affine) ClearCofactor(a *G2Affine) *G2Affine { func (p *G2Jac) ClearCofactor(a *G2Jac) *G2Jac { // https://eprint.iacr.org/2017/419.pdf, 4.1 var xg, xxg, res, t G2Jac - xg.ScalarMultiplication(a, &xGen).Neg(&xg) - xxg.ScalarMultiplication(&xg, &xGen).Neg(&xxg) + xg.ScalarMul(a, &xGen).Neg(&xg) + xxg.ScalarMul(&xg, &xGen).Neg(&xxg) res.Set(&xxg). SubAssign(&xg). @@ -869,10 +869,10 @@ func (p *g2Proj) FromAffine(Q *G2Affine) *g2Proj { return p } -// BatchScalarMultiplicationG2 multiplies the same base by all scalars +// BatchScalarMulG2 multiplies the same base by all scalars // and return resulting points in affine coordinates // uses a simple windowed-NAF like exponentiation algorithm -func BatchScalarMultiplicationG2(base *G2Affine, scalars []fr.Element) []G2Affine { +func BatchScalarMulG2(base *G2Affine, scalars []fr.Element) []G2Affine { // approximate cost in group ops is // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) diff --git a/ecc/bls12-381/g2_test.go b/ecc/bls12-381/g2_test.go index e24dffa78..b07174023 100644 --- a/ecc/bls12-381/g2_test.go +++ b/ecc/bls12-381/g2_test.go @@ -124,7 +124,7 @@ func TestG2AffineIsOnCurve(t *testing.T) { var op1, op2 G2Jac op1 = fuzzG2Jac(&g2Gen, a) _r := fr.Modulus() - op2.ScalarMultiplication(&op1, _r) + op2.ScalarMul(&op1, _r) return op1.IsInSubGroup() && op2.Z.IsZero() }, GenE2(), @@ -375,12 +375,12 @@ func TestG2AffineOps(t *testing.T) { var scalar, blindedScalar, rminusone big.Int var op1, op2, op3, gneg G2Jac rminusone.SetUint64(1).Sub(r, &rminusone) - op3.ScalarMultiplication(&g2Gen, &rminusone) + op3.ScalarMul(&g2Gen, &rminusone) gneg.Neg(&g2Gen) s.ToBigIntRegular(&scalar) blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) - op1.ScalarMultiplication(&g2Gen, &scalar) - op2.ScalarMultiplication(&g2Gen, &blindedScalar) + op1.ScalarMul(&g2Gen, &scalar) + op2.ScalarMul(&g2Gen, &blindedScalar) return op1.Equal(&op2) && g.Equal(&g2Infinity) && !op1.Equal(&g2Infinity) && gneg.Equal(&op3) @@ -441,7 +441,7 @@ func TestG2AffineCofactorCleaning(t *testing.T) { } -func TestG2AffineBatchScalarMultiplication(t *testing.T) { +func TestG2AffineBatchScalarMul(t *testing.T) { parameters := gopter.DefaultTestParameters() if testing.Short() { @@ -457,7 +457,7 @@ func TestG2AffineBatchScalarMultiplication(t *testing.T) { // size of the multiExps const nbSamples = 10 - properties.Property("[BLS12-381] BatchScalarMultiplication should be consistent with individual scalar multiplications", prop.ForAll( + properties.Property("[BLS12-381] BatchScalarMul should be consistent with individual scalar multiplications", prop.ForAll( func(mixer fr.Element) bool { // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -468,7 +468,7 @@ func TestG2AffineBatchScalarMultiplication(t *testing.T) { FromMont() } - result := BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:]) + result := BatchScalarMulG2(&g2GenAff, sampleScalars[:]) if len(result) != len(sampleScalars) { return false @@ -527,7 +527,7 @@ func BenchmarkG2AffineBatchScalarMul(b *testing.B) { b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - _ = BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:using]) + _ = BatchScalarMulG2(&g2GenAff, sampleScalars[:using]) } }) } diff --git a/ecc/bls12-381/marshal_test.go b/ecc/bls12-381/marshal_test.go index 6fb4c0f62..2bdfa0208 100644 --- a/ecc/bls12-381/marshal_test.go +++ b/ecc/bls12-381/marshal_test.go @@ -55,9 +55,9 @@ func TestEncoder(t *testing.T) { inA = rand.Uint64() inB.SetRandom() inC.SetRandom() - inD.ScalarMultiplication(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) + inD.ScalarMul(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) // inE --> infinity - inF.ScalarMultiplication(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) + inF.ScalarMul(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) inG = make([]G1Affine, 2) inH = make([]G2Affine, 0) inG[1] = inD @@ -263,7 +263,7 @@ func TestG1AffineSerialization(t *testing.T) { var start, end G1Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g1GenAff, &ab) + start.ScalarMul(&g1GenAff, &ab) buf := start.RawBytes() n, err := end.SetBytes(buf[:]) @@ -283,7 +283,7 @@ func TestG1AffineSerialization(t *testing.T) { var start, end G1Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g1GenAff, &ab) + start.ScalarMul(&g1GenAff, &ab) buf := start.Bytes() n, err := end.SetBytes(buf[:]) @@ -356,7 +356,7 @@ func TestG2AffineSerialization(t *testing.T) { var start, end G2Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g2GenAff, &ab) + start.ScalarMul(&g2GenAff, &ab) buf := start.RawBytes() n, err := end.SetBytes(buf[:]) @@ -376,7 +376,7 @@ func TestG2AffineSerialization(t *testing.T) { var start, end G2Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g2GenAff, &ab) + start.ScalarMul(&g2GenAff, &ab) buf := start.Bytes() n, err := end.SetBytes(buf[:]) diff --git a/ecc/bls12-381/multiexp_test.go b/ecc/bls12-381/multiexp_test.go index 9dd4d40bb..3f139299c 100644 --- a/ecc/bls12-381/multiexp_test.go +++ b/ecc/bls12-381/multiexp_test.go @@ -100,7 +100,7 @@ func TestMultiExpG1(t *testing.T) { // compute expected result with double and add var finalScalar, mixerBigInt big.Int finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) - expected.ScalarMultiplication(&g1Gen, &finalScalar) + expected.ScalarMul(&g1Gen, &finalScalar) // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -150,7 +150,7 @@ func TestMultiExpG1(t *testing.T) { var op1ScalarMul G1Affine finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) finalBigScalar.ToBigIntRegular(&finalBigScalarBi) - op1ScalarMul.ScalarMultiplication(&g1GenAff, &finalBigScalarBi) + op1ScalarMul.ScalarMul(&g1GenAff, &finalBigScalarBi) return op1ScalarMul.Equal(&op1MultiExp) }, @@ -249,7 +249,7 @@ func BenchmarkManyMultiExpG1Reference(b *testing.B) { func fillBenchBasesG1(samplePoints []G1Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) - samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + samplePoints[0].ScalarMul(&samplePoints[0], &r) one := samplePoints[0].X one.SetOne() @@ -330,7 +330,7 @@ func TestMultiExpG2(t *testing.T) { // compute expected result with double and add var finalScalar, mixerBigInt big.Int finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) - expected.ScalarMultiplication(&g2Gen, &finalScalar) + expected.ScalarMul(&g2Gen, &finalScalar) // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -380,7 +380,7 @@ func TestMultiExpG2(t *testing.T) { var op1ScalarMul G2Affine finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) finalBigScalar.ToBigIntRegular(&finalBigScalarBi) - op1ScalarMul.ScalarMultiplication(&g2GenAff, &finalBigScalarBi) + op1ScalarMul.ScalarMul(&g2GenAff, &finalBigScalarBi) return op1ScalarMul.Equal(&op1MultiExp) }, @@ -479,7 +479,7 @@ func BenchmarkManyMultiExpG2Reference(b *testing.B) { func fillBenchBasesG2(samplePoints []G2Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) - samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + samplePoints[0].ScalarMul(&samplePoints[0], &r) one := samplePoints[0].X one.SetOne() diff --git a/ecc/bls12-381/pairing_test.go b/ecc/bls12-381/pairing_test.go index 3262379ef..834169493 100644 --- a/ecc/bls12-381/pairing_test.go +++ b/ecc/bls12-381/pairing_test.go @@ -120,8 +120,8 @@ func TestPairing(t *testing.T) { b.ToBigIntRegular(&bbigint) ab.Mul(&abigint, &bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ = Pair([]G1Affine{g1GenAff}, []G2Affine{g2GenAff}) resa, _ = Pair([]G1Affine{ag1}, []G2Affine{g2GenAff}) @@ -185,8 +185,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) P0 := []G1Affine{g1GenAff} P1 := []G1Affine{ag1} @@ -228,8 +228,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) g1Inf.FromJacobian(&g1Infinity) g2Inf.FromJacobian(&g2Infinity) @@ -266,8 +266,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ := Pair([]G1Affine{ag1}, []G2Affine{bg2}) diff --git a/ecc/bls24-315/fr/kzg/kzg.go b/ecc/bls24-315/fr/kzg/kzg.go index b090802b5..ab9d0468c 100644 --- a/ecc/bls24-315/fr/kzg/kzg.go +++ b/ecc/bls24-315/fr/kzg/kzg.go @@ -77,7 +77,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { _, _, gen1Aff, gen2Aff := bls24315.Generators() srs.G1[0] = gen1Aff srs.G2[0] = gen2Aff - srs.G2[1].ScalarMultiplication(&gen2Aff, bAlpha) + srs.G2[1].ScalarMul(&gen2Aff, bAlpha) alphas := make([]fr.Element, size-1) alphas[0] = alpha @@ -87,7 +87,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { for i := 0; i < len(alphas); i++ { alphas[i].FromMont() } - g1s := bls24315.BatchScalarMultiplicationG1(&gen1Aff, alphas) + g1s := bls24315.BatchScalarMulG1(&gen1Aff, alphas) copy(srs.G1[1:], g1s) return &srs, nil @@ -169,16 +169,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) { func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error { // [f(a)]G₁ - var claimedValueG1Aff bls24315.G1Affine + var claimedValueG1Aff bls24315.G1Jac var claimedValueBigInt big.Int proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt) + claimedValueG1Aff.ScalarMulUnconverted(&srs.G1[0], &claimedValueBigInt) // [f(α) - f(a)]G₁ - var fminusfaG1Jac, tmpG1Jac bls24315.G1Jac + var fminusfaG1Jac bls24315.G1Jac fminusfaG1Jac.FromAffine(commitment) - tmpG1Jac.FromAffine(&claimedValueG1Aff) - fminusfaG1Jac.SubAssign(&tmpG1Jac) + fminusfaG1Jac.SubAssign(&claimedValueG1Aff) // [-H(α)]G₁ var negH bls24315.G1Affine @@ -190,7 +189,7 @@ func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) point.ToBigIntRegular(&pointBigInt) genG2Jac.FromAffine(&srs.G2[0]) alphaG2Jac.FromAffine(&srs.G2[1]) - alphaMinusaG2Jac.ScalarMultiplication(&genG2Jac, &pointBigInt). + alphaMinusaG2Jac.ScalarMul(&genG2Jac, &pointBigInt). Neg(&alphaMinusaG2Jac). AddAssign(&alphaG2Jac) @@ -419,7 +418,7 @@ func BatchVerifyMultiPoints(digests []Digest, proofs []OpeningProof, points []fr var foldedEvalsCommit bls24315.G1Affine var foldedEvalsBigInt big.Int foldedEvals.ToBigIntRegular(&foldedEvalsBigInt) - foldedEvalsCommit.ScalarMultiplication(&srs.G1[0], &foldedEvalsBigInt) + foldedEvalsCommit.ScalarMul(&srs.G1[0], &foldedEvalsBigInt) // compute foldedDigests = ∑ᵢλᵢ[fᵢ(α)]G₁ - [∑ᵢλᵢfᵢ(aᵢ)]G₁ foldedDigests.Sub(&foldedDigests, &foldedEvalsCommit) diff --git a/ecc/bls24-315/fr/kzg/kzg_test.go b/ecc/bls24-315/fr/kzg/kzg_test.go index 780824d3b..a06032938 100644 --- a/ecc/bls24-315/fr/kzg/kzg_test.go +++ b/ecc/bls24-315/fr/kzg/kzg_test.go @@ -130,7 +130,7 @@ func TestCommit(t *testing.T) { fx.ToBigIntRegular(&fxbi) var manualCommit bls24315.G1Affine manualCommit.Set(&testSRS.G1[0]) - manualCommit.ScalarMultiplication(&manualCommit, &fxbi) + manualCommit.ScalarMul(&manualCommit, &fxbi) // compare both results if !kzgCommit.Equal(&manualCommit) { diff --git a/ecc/bls24-315/fr/plookup/table.go b/ecc/bls24-315/fr/plookup/table.go index 1eb07b83a..02f8fb9c6 100644 --- a/ecc/bls24-315/fr/plookup/table.go +++ b/ecc/bls24-315/fr/plookup/table.go @@ -209,9 +209,9 @@ func VerifyLookupTables(srs *kzg.SRS, proof ProofLookupTables) error { var blambda big.Int lambda.ToBigIntRegular(&blambda) for i := nbRows - 2; i >= 0; i-- { - comf.ScalarMultiplication(&comf, &blambda). + comf.ScalarMul(&comf, &blambda). Add(&comf, &proof.fs[i]) - comt.ScalarMultiplication(&comt, &blambda). + comt.ScalarMul(&comt, &blambda). Add(&comt, &proof.ts[i]) } diff --git a/ecc/bls24-315/g1.go b/ecc/bls24-315/g1.go index 66bbda7b7..0c8ed499d 100644 --- a/ecc/bls24-315/g1.go +++ b/ecc/bls24-315/g1.go @@ -50,8 +50,8 @@ func (p *G1Affine) Set(a *G1Affine) *G1Affine { return p } -// ScalarMultiplication computes and returns p = a ⋅ s -func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { +// ScalarMul computes and returns p = a ⋅ s +func (p *G1Affine) ScalarMul(a *G1Affine, s *big.Int) *G1Affine { var _p G1Jac _p.FromAffine(a) _p.mulGLV(&_p, s) @@ -59,6 +59,14 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { return p } +// ScalarMulUnconverted computes and returns p = a ⋅ s +// Takes an affine point and returns a Jacobian point (useful for KZG) +func (p *G1Jac) ScalarMulUnconverted(a *G1Affine, s *big.Int) *G1Jac { + p.FromAffine(a) + p.mulGLV(p, s) + return p +} + // Add adds two point in affine coordinates. // This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { @@ -323,9 +331,9 @@ func (p *G1Jac) DoubleAssign() *G1Jac { return p } -// ScalarMultiplication computes and returns p = a ⋅ s +// ScalarMul computes and returns p = a ⋅ s // see https://www.iacr.org/archive/crypto2001/21390189.pdf -func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { +func (p *G1Jac) ScalarMul(a *G1Jac, s *big.Int) *G1Jac { return p.mulGLV(a, s) } @@ -374,10 +382,10 @@ func (p *G1Jac) IsInSubGroup() bool { var res G1Jac res.phi(p). - ScalarMultiplication(&res, &xGen). - ScalarMultiplication(&res, &xGen). - ScalarMultiplication(&res, &xGen). - ScalarMultiplication(&res, &xGen). + ScalarMul(&res, &xGen). + ScalarMul(&res, &xGen). + ScalarMul(&res, &xGen). + ScalarMul(&res, &xGen). AddAssign(p) return res.IsOnCurve() && res.Z.IsZero() @@ -508,7 +516,7 @@ func (p *G1Affine) ClearCofactor(a *G1Affine) *G1Affine { func (p *G1Jac) ClearCofactor(a *G1Jac) *G1Jac { // cf https://eprint.iacr.org/2019/403.pdf, 5 var res G1Jac - res.ScalarMultiplication(a, &xGen).AddAssign(a) + res.ScalarMul(a, &xGen).AddAssign(a) p.Set(&res) return p @@ -863,10 +871,10 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) { } -// BatchScalarMultiplicationG1 multiplies the same base by all scalars +// BatchScalarMulG1 multiplies the same base by all scalars // and return resulting points in affine coordinates // uses a simple windowed-NAF like exponentiation algorithm -func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affine { +func BatchScalarMulG1(base *G1Affine, scalars []fr.Element) []G1Affine { // approximate cost in group ops is // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) diff --git a/ecc/bls24-315/g1_test.go b/ecc/bls24-315/g1_test.go index 0321ddaea..405f36efd 100644 --- a/ecc/bls24-315/g1_test.go +++ b/ecc/bls24-315/g1_test.go @@ -110,7 +110,7 @@ func TestG1AffineIsOnCurve(t *testing.T) { var op1, op2 G1Jac op1 = fuzzG1Jac(&g1Gen, a) _r := fr.Modulus() - op2.ScalarMultiplication(&op1, _r) + op2.ScalarMul(&op1, _r) return op1.IsInSubGroup() && op2.Z.IsZero() }, GenFp(), @@ -353,12 +353,12 @@ func TestG1AffineOps(t *testing.T) { var scalar, blindedScalar, rminusone big.Int var op1, op2, op3, gneg G1Jac rminusone.SetUint64(1).Sub(r, &rminusone) - op3.ScalarMultiplication(&g1Gen, &rminusone) + op3.ScalarMul(&g1Gen, &rminusone) gneg.Neg(&g1Gen) s.ToBigIntRegular(&scalar) blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) - op1.ScalarMultiplication(&g1Gen, &scalar) - op2.ScalarMultiplication(&g1Gen, &blindedScalar) + op1.ScalarMul(&g1Gen, &scalar) + op2.ScalarMul(&g1Gen, &blindedScalar) return op1.Equal(&op2) && g.Equal(&g1Infinity) && !op1.Equal(&g1Infinity) && gneg.Equal(&op3) @@ -422,7 +422,7 @@ func TestG1AffineCofactorCleaning(t *testing.T) { } -func TestG1AffineBatchScalarMultiplication(t *testing.T) { +func TestG1AffineBatchScalarMul(t *testing.T) { parameters := gopter.DefaultTestParameters() if testing.Short() { @@ -438,7 +438,7 @@ func TestG1AffineBatchScalarMultiplication(t *testing.T) { // size of the multiExps const nbSamples = 10 - properties.Property("[BLS24-315] BatchScalarMultiplication should be consistent with individual scalar multiplications", prop.ForAll( + properties.Property("[BLS24-315] BatchScalarMul should be consistent with individual scalar multiplications", prop.ForAll( func(mixer fr.Element) bool { // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -449,7 +449,7 @@ func TestG1AffineBatchScalarMultiplication(t *testing.T) { FromMont() } - result := BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:]) + result := BatchScalarMulG1(&g1GenAff, sampleScalars[:]) if len(result) != len(sampleScalars) { return false @@ -508,7 +508,7 @@ func BenchmarkG1AffineBatchScalarMul(b *testing.B) { b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - _ = BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:using]) + _ = BatchScalarMulG1(&g1GenAff, sampleScalars[:using]) } }) } diff --git a/ecc/bls24-315/g2.go b/ecc/bls24-315/g2.go index 538191e50..a756199b4 100644 --- a/ecc/bls24-315/g2.go +++ b/ecc/bls24-315/g2.go @@ -55,8 +55,8 @@ func (p *G2Affine) Set(a *G2Affine) *G2Affine { return p } -// ScalarMultiplication computes and returns p = a ⋅ s -func (p *G2Affine) ScalarMultiplication(a *G2Affine, s *big.Int) *G2Affine { +// ScalarMul computes and returns p = a ⋅ s +func (p *G2Affine) ScalarMul(a *G2Affine, s *big.Int) *G2Affine { var _p G2Jac _p.FromAffine(a) _p.mulGLV(&_p, s) @@ -328,9 +328,9 @@ func (p *G2Jac) DoubleAssign() *G2Jac { return p } -// ScalarMultiplication computes and returns p = a ⋅ s +// ScalarMul computes and returns p = a ⋅ s // see https://www.iacr.org/archive/crypto2001/21390189.pdf -func (p *G2Jac) ScalarMultiplication(a *G2Jac, s *big.Int) *G2Jac { +func (p *G2Jac) ScalarMul(a *G2Jac, s *big.Int) *G2Jac { return p.mulGLV(a, s) } @@ -375,7 +375,7 @@ func (p *G2Jac) IsOnCurve() bool { func (p *G2Jac) IsInSubGroup() bool { var res, tmp G2Jac tmp.psi(p) - res.ScalarMultiplication(p, &xGen). + res.ScalarMul(p, &xGen). AddAssign(&tmp) return res.IsOnCurve() && res.Z.IsZero() @@ -517,10 +517,10 @@ func (p *G2Jac) ClearCofactor(a *G2Jac) *G2Jac { // multiply by (3x⁴-3)*cofacor var xg, xxg, xxxg, xxxxg, res, t G2Jac - xg.ScalarMultiplication(a, &xGen).Neg(&xg).SubAssign(a) - xxg.ScalarMultiplication(&xg, &xGen).Neg(&xxg) - xxxg.ScalarMultiplication(&xxg, &xGen).Neg(&xxxg) - xxxxg.ScalarMultiplication(&xxxg, &xGen).Neg(&xxxxg) + xg.ScalarMul(a, &xGen).Neg(&xg).SubAssign(a) + xxg.ScalarMul(&xg, &xGen).Neg(&xxg) + xxxg.ScalarMul(&xxg, &xGen).Neg(&xxxg) + xxxxg.ScalarMul(&xxxg, &xGen).Neg(&xxxxg) res.Set(&xxxxg). SubAssign(a) @@ -884,10 +884,10 @@ func (p *g2Proj) FromAffine(Q *G2Affine) *g2Proj { return p } -// BatchScalarMultiplicationG2 multiplies the same base by all scalars +// BatchScalarMulG2 multiplies the same base by all scalars // and return resulting points in affine coordinates // uses a simple windowed-NAF like exponentiation algorithm -func BatchScalarMultiplicationG2(base *G2Affine, scalars []fr.Element) []G2Affine { +func BatchScalarMulG2(base *G2Affine, scalars []fr.Element) []G2Affine { // approximate cost in group ops is // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) diff --git a/ecc/bls24-315/g2_test.go b/ecc/bls24-315/g2_test.go index 468b425f5..3721480b5 100644 --- a/ecc/bls24-315/g2_test.go +++ b/ecc/bls24-315/g2_test.go @@ -124,7 +124,7 @@ func TestG2AffineIsOnCurve(t *testing.T) { var op1, op2 G2Jac op1 = fuzzG2Jac(&g2Gen, a) _r := fr.Modulus() - op2.ScalarMultiplication(&op1, _r) + op2.ScalarMul(&op1, _r) return op1.IsInSubGroup() && op2.Z.IsZero() }, GenE4(), @@ -375,12 +375,12 @@ func TestG2AffineOps(t *testing.T) { var scalar, blindedScalar, rminusone big.Int var op1, op2, op3, gneg G2Jac rminusone.SetUint64(1).Sub(r, &rminusone) - op3.ScalarMultiplication(&g2Gen, &rminusone) + op3.ScalarMul(&g2Gen, &rminusone) gneg.Neg(&g2Gen) s.ToBigIntRegular(&scalar) blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) - op1.ScalarMultiplication(&g2Gen, &scalar) - op2.ScalarMultiplication(&g2Gen, &blindedScalar) + op1.ScalarMul(&g2Gen, &scalar) + op2.ScalarMul(&g2Gen, &blindedScalar) return op1.Equal(&op2) && g.Equal(&g2Infinity) && !op1.Equal(&g2Infinity) && gneg.Equal(&op3) @@ -441,7 +441,7 @@ func TestG2AffineCofactorCleaning(t *testing.T) { } -func TestG2AffineBatchScalarMultiplication(t *testing.T) { +func TestG2AffineBatchScalarMul(t *testing.T) { parameters := gopter.DefaultTestParameters() if testing.Short() { @@ -457,7 +457,7 @@ func TestG2AffineBatchScalarMultiplication(t *testing.T) { // size of the multiExps const nbSamples = 10 - properties.Property("[BLS24-315] BatchScalarMultiplication should be consistent with individual scalar multiplications", prop.ForAll( + properties.Property("[BLS24-315] BatchScalarMul should be consistent with individual scalar multiplications", prop.ForAll( func(mixer fr.Element) bool { // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -468,7 +468,7 @@ func TestG2AffineBatchScalarMultiplication(t *testing.T) { FromMont() } - result := BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:]) + result := BatchScalarMulG2(&g2GenAff, sampleScalars[:]) if len(result) != len(sampleScalars) { return false @@ -527,7 +527,7 @@ func BenchmarkG2AffineBatchScalarMul(b *testing.B) { b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - _ = BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:using]) + _ = BatchScalarMulG2(&g2GenAff, sampleScalars[:using]) } }) } diff --git a/ecc/bls24-315/marshal_test.go b/ecc/bls24-315/marshal_test.go index 4a038f6f4..f11cfb21a 100644 --- a/ecc/bls24-315/marshal_test.go +++ b/ecc/bls24-315/marshal_test.go @@ -55,9 +55,9 @@ func TestEncoder(t *testing.T) { inA = rand.Uint64() inB.SetRandom() inC.SetRandom() - inD.ScalarMultiplication(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) + inD.ScalarMul(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) // inE --> infinity - inF.ScalarMultiplication(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) + inF.ScalarMul(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) inG = make([]G1Affine, 2) inH = make([]G2Affine, 0) inG[1] = inD @@ -263,7 +263,7 @@ func TestG1AffineSerialization(t *testing.T) { var start, end G1Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g1GenAff, &ab) + start.ScalarMul(&g1GenAff, &ab) buf := start.RawBytes() n, err := end.SetBytes(buf[:]) @@ -283,7 +283,7 @@ func TestG1AffineSerialization(t *testing.T) { var start, end G1Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g1GenAff, &ab) + start.ScalarMul(&g1GenAff, &ab) buf := start.Bytes() n, err := end.SetBytes(buf[:]) @@ -356,7 +356,7 @@ func TestG2AffineSerialization(t *testing.T) { var start, end G2Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g2GenAff, &ab) + start.ScalarMul(&g2GenAff, &ab) buf := start.RawBytes() n, err := end.SetBytes(buf[:]) @@ -376,7 +376,7 @@ func TestG2AffineSerialization(t *testing.T) { var start, end G2Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g2GenAff, &ab) + start.ScalarMul(&g2GenAff, &ab) buf := start.Bytes() n, err := end.SetBytes(buf[:]) diff --git a/ecc/bls24-315/multiexp_test.go b/ecc/bls24-315/multiexp_test.go index 45e014ede..0f5926ad2 100644 --- a/ecc/bls24-315/multiexp_test.go +++ b/ecc/bls24-315/multiexp_test.go @@ -100,7 +100,7 @@ func TestMultiExpG1(t *testing.T) { // compute expected result with double and add var finalScalar, mixerBigInt big.Int finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) - expected.ScalarMultiplication(&g1Gen, &finalScalar) + expected.ScalarMul(&g1Gen, &finalScalar) // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -150,7 +150,7 @@ func TestMultiExpG1(t *testing.T) { var op1ScalarMul G1Affine finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) finalBigScalar.ToBigIntRegular(&finalBigScalarBi) - op1ScalarMul.ScalarMultiplication(&g1GenAff, &finalBigScalarBi) + op1ScalarMul.ScalarMul(&g1GenAff, &finalBigScalarBi) return op1ScalarMul.Equal(&op1MultiExp) }, @@ -249,7 +249,7 @@ func BenchmarkManyMultiExpG1Reference(b *testing.B) { func fillBenchBasesG1(samplePoints []G1Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) - samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + samplePoints[0].ScalarMul(&samplePoints[0], &r) one := samplePoints[0].X one.SetOne() @@ -330,7 +330,7 @@ func TestMultiExpG2(t *testing.T) { // compute expected result with double and add var finalScalar, mixerBigInt big.Int finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) - expected.ScalarMultiplication(&g2Gen, &finalScalar) + expected.ScalarMul(&g2Gen, &finalScalar) // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -380,7 +380,7 @@ func TestMultiExpG2(t *testing.T) { var op1ScalarMul G2Affine finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) finalBigScalar.ToBigIntRegular(&finalBigScalarBi) - op1ScalarMul.ScalarMultiplication(&g2GenAff, &finalBigScalarBi) + op1ScalarMul.ScalarMul(&g2GenAff, &finalBigScalarBi) return op1ScalarMul.Equal(&op1MultiExp) }, @@ -479,7 +479,7 @@ func BenchmarkManyMultiExpG2Reference(b *testing.B) { func fillBenchBasesG2(samplePoints []G2Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) - samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + samplePoints[0].ScalarMul(&samplePoints[0], &r) one := samplePoints[0].X one.SetOne() diff --git a/ecc/bls24-315/pairing_test.go b/ecc/bls24-315/pairing_test.go index 44a645790..cae8085a5 100644 --- a/ecc/bls24-315/pairing_test.go +++ b/ecc/bls24-315/pairing_test.go @@ -122,8 +122,8 @@ func TestPairing(t *testing.T) { b.ToBigIntRegular(&bbigint) ab.Mul(&abigint, &bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ = Pair([]G1Affine{g1GenAff}, []G2Affine{g2GenAff}) resa, _ = Pair([]G1Affine{ag1}, []G2Affine{g2GenAff}) @@ -187,8 +187,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) P0 := []G1Affine{g1GenAff} P1 := []G1Affine{ag1} @@ -230,8 +230,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) g1Inf.FromJacobian(&g1Infinity) g2Inf.FromJacobian(&g2Infinity) @@ -268,8 +268,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ := Pair([]G1Affine{ag1}, []G2Affine{bg2}) diff --git a/ecc/bls24-317/fr/kzg/kzg.go b/ecc/bls24-317/fr/kzg/kzg.go index 69c2cd411..4c8c021d5 100644 --- a/ecc/bls24-317/fr/kzg/kzg.go +++ b/ecc/bls24-317/fr/kzg/kzg.go @@ -77,7 +77,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { _, _, gen1Aff, gen2Aff := bls24317.Generators() srs.G1[0] = gen1Aff srs.G2[0] = gen2Aff - srs.G2[1].ScalarMultiplication(&gen2Aff, bAlpha) + srs.G2[1].ScalarMul(&gen2Aff, bAlpha) alphas := make([]fr.Element, size-1) alphas[0] = alpha @@ -87,7 +87,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { for i := 0; i < len(alphas); i++ { alphas[i].FromMont() } - g1s := bls24317.BatchScalarMultiplicationG1(&gen1Aff, alphas) + g1s := bls24317.BatchScalarMulG1(&gen1Aff, alphas) copy(srs.G1[1:], g1s) return &srs, nil @@ -169,16 +169,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) { func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error { // [f(a)]G₁ - var claimedValueG1Aff bls24317.G1Affine + var claimedValueG1Aff bls24317.G1Jac var claimedValueBigInt big.Int proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt) + claimedValueG1Aff.ScalarMulUnconverted(&srs.G1[0], &claimedValueBigInt) // [f(α) - f(a)]G₁ - var fminusfaG1Jac, tmpG1Jac bls24317.G1Jac + var fminusfaG1Jac bls24317.G1Jac fminusfaG1Jac.FromAffine(commitment) - tmpG1Jac.FromAffine(&claimedValueG1Aff) - fminusfaG1Jac.SubAssign(&tmpG1Jac) + fminusfaG1Jac.SubAssign(&claimedValueG1Aff) // [-H(α)]G₁ var negH bls24317.G1Affine @@ -190,7 +189,7 @@ func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) point.ToBigIntRegular(&pointBigInt) genG2Jac.FromAffine(&srs.G2[0]) alphaG2Jac.FromAffine(&srs.G2[1]) - alphaMinusaG2Jac.ScalarMultiplication(&genG2Jac, &pointBigInt). + alphaMinusaG2Jac.ScalarMul(&genG2Jac, &pointBigInt). Neg(&alphaMinusaG2Jac). AddAssign(&alphaG2Jac) @@ -419,7 +418,7 @@ func BatchVerifyMultiPoints(digests []Digest, proofs []OpeningProof, points []fr var foldedEvalsCommit bls24317.G1Affine var foldedEvalsBigInt big.Int foldedEvals.ToBigIntRegular(&foldedEvalsBigInt) - foldedEvalsCommit.ScalarMultiplication(&srs.G1[0], &foldedEvalsBigInt) + foldedEvalsCommit.ScalarMul(&srs.G1[0], &foldedEvalsBigInt) // compute foldedDigests = ∑ᵢλᵢ[fᵢ(α)]G₁ - [∑ᵢλᵢfᵢ(aᵢ)]G₁ foldedDigests.Sub(&foldedDigests, &foldedEvalsCommit) diff --git a/ecc/bls24-317/fr/kzg/kzg_test.go b/ecc/bls24-317/fr/kzg/kzg_test.go index 907c67c85..c12e95e0d 100644 --- a/ecc/bls24-317/fr/kzg/kzg_test.go +++ b/ecc/bls24-317/fr/kzg/kzg_test.go @@ -130,7 +130,7 @@ func TestCommit(t *testing.T) { fx.ToBigIntRegular(&fxbi) var manualCommit bls24317.G1Affine manualCommit.Set(&testSRS.G1[0]) - manualCommit.ScalarMultiplication(&manualCommit, &fxbi) + manualCommit.ScalarMul(&manualCommit, &fxbi) // compare both results if !kzgCommit.Equal(&manualCommit) { diff --git a/ecc/bls24-317/fr/plookup/table.go b/ecc/bls24-317/fr/plookup/table.go index bb87525fb..5d079581d 100644 --- a/ecc/bls24-317/fr/plookup/table.go +++ b/ecc/bls24-317/fr/plookup/table.go @@ -209,9 +209,9 @@ func VerifyLookupTables(srs *kzg.SRS, proof ProofLookupTables) error { var blambda big.Int lambda.ToBigIntRegular(&blambda) for i := nbRows - 2; i >= 0; i-- { - comf.ScalarMultiplication(&comf, &blambda). + comf.ScalarMul(&comf, &blambda). Add(&comf, &proof.fs[i]) - comt.ScalarMultiplication(&comt, &blambda). + comt.ScalarMul(&comt, &blambda). Add(&comt, &proof.ts[i]) } diff --git a/ecc/bls24-317/g1.go b/ecc/bls24-317/g1.go index 4b316955a..1ffd9e089 100644 --- a/ecc/bls24-317/g1.go +++ b/ecc/bls24-317/g1.go @@ -50,8 +50,8 @@ func (p *G1Affine) Set(a *G1Affine) *G1Affine { return p } -// ScalarMultiplication computes and returns p = a ⋅ s -func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { +// ScalarMul computes and returns p = a ⋅ s +func (p *G1Affine) ScalarMul(a *G1Affine, s *big.Int) *G1Affine { var _p G1Jac _p.FromAffine(a) _p.mulGLV(&_p, s) @@ -59,6 +59,14 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { return p } +// ScalarMulUnconverted computes and returns p = a ⋅ s +// Takes an affine point and returns a Jacobian point (useful for KZG) +func (p *G1Jac) ScalarMulUnconverted(a *G1Affine, s *big.Int) *G1Jac { + p.FromAffine(a) + p.mulGLV(p, s) + return p +} + // Add adds two point in affine coordinates. // This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { @@ -323,9 +331,9 @@ func (p *G1Jac) DoubleAssign() *G1Jac { return p } -// ScalarMultiplication computes and returns p = a ⋅ s +// ScalarMul computes and returns p = a ⋅ s // see https://www.iacr.org/archive/crypto2001/21390189.pdf -func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { +func (p *G1Jac) ScalarMul(a *G1Jac, s *big.Int) *G1Jac { return p.mulGLV(a, s) } @@ -374,10 +382,10 @@ func (p *G1Jac) IsInSubGroup() bool { var res G1Jac res.phi(p). - ScalarMultiplication(&res, &xGen). - ScalarMultiplication(&res, &xGen). - ScalarMultiplication(&res, &xGen). - ScalarMultiplication(&res, &xGen). + ScalarMul(&res, &xGen). + ScalarMul(&res, &xGen). + ScalarMul(&res, &xGen). + ScalarMul(&res, &xGen). AddAssign(p) return res.IsOnCurve() && res.Z.IsZero() @@ -508,7 +516,7 @@ func (p *G1Affine) ClearCofactor(a *G1Affine) *G1Affine { func (p *G1Jac) ClearCofactor(a *G1Jac) *G1Jac { // cf https://eprint.iacr.org/2019/403.pdf, 5 var res G1Jac - res.ScalarMultiplication(a, &xGen).Neg(&res).AddAssign(a) + res.ScalarMul(a, &xGen).Neg(&res).AddAssign(a) p.Set(&res) return p @@ -863,10 +871,10 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) { } -// BatchScalarMultiplicationG1 multiplies the same base by all scalars +// BatchScalarMulG1 multiplies the same base by all scalars // and return resulting points in affine coordinates // uses a simple windowed-NAF like exponentiation algorithm -func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affine { +func BatchScalarMulG1(base *G1Affine, scalars []fr.Element) []G1Affine { // approximate cost in group ops is // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) diff --git a/ecc/bls24-317/g1_test.go b/ecc/bls24-317/g1_test.go index 2f529cb41..126af8cca 100644 --- a/ecc/bls24-317/g1_test.go +++ b/ecc/bls24-317/g1_test.go @@ -110,7 +110,7 @@ func TestG1AffineIsOnCurve(t *testing.T) { var op1, op2 G1Jac op1 = fuzzG1Jac(&g1Gen, a) _r := fr.Modulus() - op2.ScalarMultiplication(&op1, _r) + op2.ScalarMul(&op1, _r) return op1.IsInSubGroup() && op2.Z.IsZero() }, GenFp(), @@ -353,12 +353,12 @@ func TestG1AffineOps(t *testing.T) { var scalar, blindedScalar, rminusone big.Int var op1, op2, op3, gneg G1Jac rminusone.SetUint64(1).Sub(r, &rminusone) - op3.ScalarMultiplication(&g1Gen, &rminusone) + op3.ScalarMul(&g1Gen, &rminusone) gneg.Neg(&g1Gen) s.ToBigIntRegular(&scalar) blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) - op1.ScalarMultiplication(&g1Gen, &scalar) - op2.ScalarMultiplication(&g1Gen, &blindedScalar) + op1.ScalarMul(&g1Gen, &scalar) + op2.ScalarMul(&g1Gen, &blindedScalar) return op1.Equal(&op2) && g.Equal(&g1Infinity) && !op1.Equal(&g1Infinity) && gneg.Equal(&op3) @@ -422,7 +422,7 @@ func TestG1AffineCofactorCleaning(t *testing.T) { } -func TestG1AffineBatchScalarMultiplication(t *testing.T) { +func TestG1AffineBatchScalarMul(t *testing.T) { parameters := gopter.DefaultTestParameters() if testing.Short() { @@ -438,7 +438,7 @@ func TestG1AffineBatchScalarMultiplication(t *testing.T) { // size of the multiExps const nbSamples = 10 - properties.Property("[BLS24-317] BatchScalarMultiplication should be consistent with individual scalar multiplications", prop.ForAll( + properties.Property("[BLS24-317] BatchScalarMul should be consistent with individual scalar multiplications", prop.ForAll( func(mixer fr.Element) bool { // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -449,7 +449,7 @@ func TestG1AffineBatchScalarMultiplication(t *testing.T) { FromMont() } - result := BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:]) + result := BatchScalarMulG1(&g1GenAff, sampleScalars[:]) if len(result) != len(sampleScalars) { return false @@ -508,7 +508,7 @@ func BenchmarkG1AffineBatchScalarMul(b *testing.B) { b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - _ = BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:using]) + _ = BatchScalarMulG1(&g1GenAff, sampleScalars[:using]) } }) } diff --git a/ecc/bls24-317/g2.go b/ecc/bls24-317/g2.go index 683e112cb..e82dac4b3 100644 --- a/ecc/bls24-317/g2.go +++ b/ecc/bls24-317/g2.go @@ -55,8 +55,8 @@ func (p *G2Affine) Set(a *G2Affine) *G2Affine { return p } -// ScalarMultiplication computes and returns p = a ⋅ s -func (p *G2Affine) ScalarMultiplication(a *G2Affine, s *big.Int) *G2Affine { +// ScalarMul computes and returns p = a ⋅ s +func (p *G2Affine) ScalarMul(a *G2Affine, s *big.Int) *G2Affine { var _p G2Jac _p.FromAffine(a) _p.mulGLV(&_p, s) @@ -328,9 +328,9 @@ func (p *G2Jac) DoubleAssign() *G2Jac { return p } -// ScalarMultiplication computes and returns p = a ⋅ s +// ScalarMul computes and returns p = a ⋅ s // see https://www.iacr.org/archive/crypto2001/21390189.pdf -func (p *G2Jac) ScalarMultiplication(a *G2Jac, s *big.Int) *G2Jac { +func (p *G2Jac) ScalarMul(a *G2Jac, s *big.Int) *G2Jac { return p.mulGLV(a, s) } @@ -375,7 +375,7 @@ func (p *G2Jac) IsOnCurve() bool { func (p *G2Jac) IsInSubGroup() bool { var res, tmp G2Jac tmp.psi(p) - res.ScalarMultiplication(p, &xGen). + res.ScalarMul(p, &xGen). SubAssign(&tmp) return res.IsOnCurve() && res.Z.IsZero() @@ -517,10 +517,10 @@ func (p *G2Jac) ClearCofactor(a *G2Jac) *G2Jac { // multiply by (3x⁴-3)*cofacor var xg, xxg, xxxg, xxxxg, res, t G2Jac - xg.ScalarMultiplication(a, &xGen).SubAssign(a) - xxg.ScalarMultiplication(&xg, &xGen) - xxxg.ScalarMultiplication(&xxg, &xGen) - xxxxg.ScalarMultiplication(&xxxg, &xGen) + xg.ScalarMul(a, &xGen).SubAssign(a) + xxg.ScalarMul(&xg, &xGen) + xxxg.ScalarMul(&xxg, &xGen) + xxxxg.ScalarMul(&xxxg, &xGen) res.Set(&xxxxg). SubAssign(a) @@ -884,10 +884,10 @@ func (p *g2Proj) FromAffine(Q *G2Affine) *g2Proj { return p } -// BatchScalarMultiplicationG2 multiplies the same base by all scalars +// BatchScalarMulG2 multiplies the same base by all scalars // and return resulting points in affine coordinates // uses a simple windowed-NAF like exponentiation algorithm -func BatchScalarMultiplicationG2(base *G2Affine, scalars []fr.Element) []G2Affine { +func BatchScalarMulG2(base *G2Affine, scalars []fr.Element) []G2Affine { // approximate cost in group ops is // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) diff --git a/ecc/bls24-317/g2_test.go b/ecc/bls24-317/g2_test.go index 0fb3b5af2..affa8fe55 100644 --- a/ecc/bls24-317/g2_test.go +++ b/ecc/bls24-317/g2_test.go @@ -124,7 +124,7 @@ func TestG2AffineIsOnCurve(t *testing.T) { var op1, op2 G2Jac op1 = fuzzG2Jac(&g2Gen, a) _r := fr.Modulus() - op2.ScalarMultiplication(&op1, _r) + op2.ScalarMul(&op1, _r) return op1.IsInSubGroup() && op2.Z.IsZero() }, GenE4(), @@ -375,12 +375,12 @@ func TestG2AffineOps(t *testing.T) { var scalar, blindedScalar, rminusone big.Int var op1, op2, op3, gneg G2Jac rminusone.SetUint64(1).Sub(r, &rminusone) - op3.ScalarMultiplication(&g2Gen, &rminusone) + op3.ScalarMul(&g2Gen, &rminusone) gneg.Neg(&g2Gen) s.ToBigIntRegular(&scalar) blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) - op1.ScalarMultiplication(&g2Gen, &scalar) - op2.ScalarMultiplication(&g2Gen, &blindedScalar) + op1.ScalarMul(&g2Gen, &scalar) + op2.ScalarMul(&g2Gen, &blindedScalar) return op1.Equal(&op2) && g.Equal(&g2Infinity) && !op1.Equal(&g2Infinity) && gneg.Equal(&op3) @@ -441,7 +441,7 @@ func TestG2AffineCofactorCleaning(t *testing.T) { } -func TestG2AffineBatchScalarMultiplication(t *testing.T) { +func TestG2AffineBatchScalarMul(t *testing.T) { parameters := gopter.DefaultTestParameters() if testing.Short() { @@ -457,7 +457,7 @@ func TestG2AffineBatchScalarMultiplication(t *testing.T) { // size of the multiExps const nbSamples = 10 - properties.Property("[BLS24-317] BatchScalarMultiplication should be consistent with individual scalar multiplications", prop.ForAll( + properties.Property("[BLS24-317] BatchScalarMul should be consistent with individual scalar multiplications", prop.ForAll( func(mixer fr.Element) bool { // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -468,7 +468,7 @@ func TestG2AffineBatchScalarMultiplication(t *testing.T) { FromMont() } - result := BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:]) + result := BatchScalarMulG2(&g2GenAff, sampleScalars[:]) if len(result) != len(sampleScalars) { return false @@ -527,7 +527,7 @@ func BenchmarkG2AffineBatchScalarMul(b *testing.B) { b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - _ = BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:using]) + _ = BatchScalarMulG2(&g2GenAff, sampleScalars[:using]) } }) } diff --git a/ecc/bls24-317/marshal_test.go b/ecc/bls24-317/marshal_test.go index f4934f6fb..a3f609e17 100644 --- a/ecc/bls24-317/marshal_test.go +++ b/ecc/bls24-317/marshal_test.go @@ -55,9 +55,9 @@ func TestEncoder(t *testing.T) { inA = rand.Uint64() inB.SetRandom() inC.SetRandom() - inD.ScalarMultiplication(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) + inD.ScalarMul(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) // inE --> infinity - inF.ScalarMultiplication(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) + inF.ScalarMul(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) inG = make([]G1Affine, 2) inH = make([]G2Affine, 0) inG[1] = inD @@ -263,7 +263,7 @@ func TestG1AffineSerialization(t *testing.T) { var start, end G1Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g1GenAff, &ab) + start.ScalarMul(&g1GenAff, &ab) buf := start.RawBytes() n, err := end.SetBytes(buf[:]) @@ -283,7 +283,7 @@ func TestG1AffineSerialization(t *testing.T) { var start, end G1Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g1GenAff, &ab) + start.ScalarMul(&g1GenAff, &ab) buf := start.Bytes() n, err := end.SetBytes(buf[:]) @@ -356,7 +356,7 @@ func TestG2AffineSerialization(t *testing.T) { var start, end G2Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g2GenAff, &ab) + start.ScalarMul(&g2GenAff, &ab) buf := start.RawBytes() n, err := end.SetBytes(buf[:]) @@ -376,7 +376,7 @@ func TestG2AffineSerialization(t *testing.T) { var start, end G2Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g2GenAff, &ab) + start.ScalarMul(&g2GenAff, &ab) buf := start.Bytes() n, err := end.SetBytes(buf[:]) diff --git a/ecc/bls24-317/multiexp_test.go b/ecc/bls24-317/multiexp_test.go index d7906d96d..7d880ddb7 100644 --- a/ecc/bls24-317/multiexp_test.go +++ b/ecc/bls24-317/multiexp_test.go @@ -100,7 +100,7 @@ func TestMultiExpG1(t *testing.T) { // compute expected result with double and add var finalScalar, mixerBigInt big.Int finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) - expected.ScalarMultiplication(&g1Gen, &finalScalar) + expected.ScalarMul(&g1Gen, &finalScalar) // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -150,7 +150,7 @@ func TestMultiExpG1(t *testing.T) { var op1ScalarMul G1Affine finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) finalBigScalar.ToBigIntRegular(&finalBigScalarBi) - op1ScalarMul.ScalarMultiplication(&g1GenAff, &finalBigScalarBi) + op1ScalarMul.ScalarMul(&g1GenAff, &finalBigScalarBi) return op1ScalarMul.Equal(&op1MultiExp) }, @@ -249,7 +249,7 @@ func BenchmarkManyMultiExpG1Reference(b *testing.B) { func fillBenchBasesG1(samplePoints []G1Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) - samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + samplePoints[0].ScalarMul(&samplePoints[0], &r) one := samplePoints[0].X one.SetOne() @@ -330,7 +330,7 @@ func TestMultiExpG2(t *testing.T) { // compute expected result with double and add var finalScalar, mixerBigInt big.Int finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) - expected.ScalarMultiplication(&g2Gen, &finalScalar) + expected.ScalarMul(&g2Gen, &finalScalar) // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -380,7 +380,7 @@ func TestMultiExpG2(t *testing.T) { var op1ScalarMul G2Affine finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) finalBigScalar.ToBigIntRegular(&finalBigScalarBi) - op1ScalarMul.ScalarMultiplication(&g2GenAff, &finalBigScalarBi) + op1ScalarMul.ScalarMul(&g2GenAff, &finalBigScalarBi) return op1ScalarMul.Equal(&op1MultiExp) }, @@ -479,7 +479,7 @@ func BenchmarkManyMultiExpG2Reference(b *testing.B) { func fillBenchBasesG2(samplePoints []G2Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) - samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + samplePoints[0].ScalarMul(&samplePoints[0], &r) one := samplePoints[0].X one.SetOne() diff --git a/ecc/bls24-317/pairing_test.go b/ecc/bls24-317/pairing_test.go index 23e7792c8..f496bf084 100644 --- a/ecc/bls24-317/pairing_test.go +++ b/ecc/bls24-317/pairing_test.go @@ -121,8 +121,8 @@ func TestPairing(t *testing.T) { b.ToBigIntRegular(&bbigint) ab.Mul(&abigint, &bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ = Pair([]G1Affine{g1GenAff}, []G2Affine{g2GenAff}) resa, _ = Pair([]G1Affine{ag1}, []G2Affine{g2GenAff}) @@ -186,8 +186,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) P0 := []G1Affine{g1GenAff} P1 := []G1Affine{ag1} @@ -229,8 +229,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) g1Inf.FromJacobian(&g1Infinity) g2Inf.FromJacobian(&g2Infinity) @@ -267,8 +267,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ := Pair([]G1Affine{ag1}, []G2Affine{bg2}) diff --git a/ecc/bn254/fr/kzg/kzg.go b/ecc/bn254/fr/kzg/kzg.go index a0ba3aed5..d948c5c56 100644 --- a/ecc/bn254/fr/kzg/kzg.go +++ b/ecc/bn254/fr/kzg/kzg.go @@ -77,7 +77,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { _, _, gen1Aff, gen2Aff := bn254.Generators() srs.G1[0] = gen1Aff srs.G2[0] = gen2Aff - srs.G2[1].ScalarMultiplication(&gen2Aff, bAlpha) + srs.G2[1].ScalarMul(&gen2Aff, bAlpha) alphas := make([]fr.Element, size-1) alphas[0] = alpha @@ -87,7 +87,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { for i := 0; i < len(alphas); i++ { alphas[i].FromMont() } - g1s := bn254.BatchScalarMultiplicationG1(&gen1Aff, alphas) + g1s := bn254.BatchScalarMulG1(&gen1Aff, alphas) copy(srs.G1[1:], g1s) return &srs, nil @@ -169,16 +169,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) { func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error { // [f(a)]G₁ - var claimedValueG1Aff bn254.G1Affine + var claimedValueG1Aff bn254.G1Jac var claimedValueBigInt big.Int proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt) + claimedValueG1Aff.ScalarMulUnconverted(&srs.G1[0], &claimedValueBigInt) // [f(α) - f(a)]G₁ - var fminusfaG1Jac, tmpG1Jac bn254.G1Jac + var fminusfaG1Jac bn254.G1Jac fminusfaG1Jac.FromAffine(commitment) - tmpG1Jac.FromAffine(&claimedValueG1Aff) - fminusfaG1Jac.SubAssign(&tmpG1Jac) + fminusfaG1Jac.SubAssign(&claimedValueG1Aff) // [-H(α)]G₁ var negH bn254.G1Affine @@ -190,7 +189,7 @@ func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) point.ToBigIntRegular(&pointBigInt) genG2Jac.FromAffine(&srs.G2[0]) alphaG2Jac.FromAffine(&srs.G2[1]) - alphaMinusaG2Jac.ScalarMultiplication(&genG2Jac, &pointBigInt). + alphaMinusaG2Jac.ScalarMul(&genG2Jac, &pointBigInt). Neg(&alphaMinusaG2Jac). AddAssign(&alphaG2Jac) @@ -419,7 +418,7 @@ func BatchVerifyMultiPoints(digests []Digest, proofs []OpeningProof, points []fr var foldedEvalsCommit bn254.G1Affine var foldedEvalsBigInt big.Int foldedEvals.ToBigIntRegular(&foldedEvalsBigInt) - foldedEvalsCommit.ScalarMultiplication(&srs.G1[0], &foldedEvalsBigInt) + foldedEvalsCommit.ScalarMul(&srs.G1[0], &foldedEvalsBigInt) // compute foldedDigests = ∑ᵢλᵢ[fᵢ(α)]G₁ - [∑ᵢλᵢfᵢ(aᵢ)]G₁ foldedDigests.Sub(&foldedDigests, &foldedEvalsCommit) diff --git a/ecc/bn254/fr/kzg/kzg_test.go b/ecc/bn254/fr/kzg/kzg_test.go index 42ad48388..8e5e1d48a 100644 --- a/ecc/bn254/fr/kzg/kzg_test.go +++ b/ecc/bn254/fr/kzg/kzg_test.go @@ -130,7 +130,7 @@ func TestCommit(t *testing.T) { fx.ToBigIntRegular(&fxbi) var manualCommit bn254.G1Affine manualCommit.Set(&testSRS.G1[0]) - manualCommit.ScalarMultiplication(&manualCommit, &fxbi) + manualCommit.ScalarMul(&manualCommit, &fxbi) // compare both results if !kzgCommit.Equal(&manualCommit) { diff --git a/ecc/bn254/fr/plookup/table.go b/ecc/bn254/fr/plookup/table.go index bb2d7d6b0..9d7292184 100644 --- a/ecc/bn254/fr/plookup/table.go +++ b/ecc/bn254/fr/plookup/table.go @@ -209,9 +209,9 @@ func VerifyLookupTables(srs *kzg.SRS, proof ProofLookupTables) error { var blambda big.Int lambda.ToBigIntRegular(&blambda) for i := nbRows - 2; i >= 0; i-- { - comf.ScalarMultiplication(&comf, &blambda). + comf.ScalarMul(&comf, &blambda). Add(&comf, &proof.fs[i]) - comt.ScalarMultiplication(&comt, &blambda). + comt.ScalarMul(&comt, &blambda). Add(&comt, &proof.ts[i]) } diff --git a/ecc/bn254/g1.go b/ecc/bn254/g1.go index ee39596f1..cd8526b75 100644 --- a/ecc/bn254/g1.go +++ b/ecc/bn254/g1.go @@ -50,8 +50,8 @@ func (p *G1Affine) Set(a *G1Affine) *G1Affine { return p } -// ScalarMultiplication computes and returns p = a ⋅ s -func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { +// ScalarMul computes and returns p = a ⋅ s +func (p *G1Affine) ScalarMul(a *G1Affine, s *big.Int) *G1Affine { var _p G1Jac _p.FromAffine(a) _p.mulGLV(&_p, s) @@ -59,6 +59,14 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { return p } +// ScalarMulUnconverted computes and returns p = a ⋅ s +// Takes an affine point and returns a Jacobian point (useful for KZG) +func (p *G1Jac) ScalarMulUnconverted(a *G1Affine, s *big.Int) *G1Jac { + p.FromAffine(a) + p.mulGLV(p, s) + return p +} + // Add adds two point in affine coordinates. // This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { @@ -323,9 +331,9 @@ func (p *G1Jac) DoubleAssign() *G1Jac { return p } -// ScalarMultiplication computes and returns p = a ⋅ s +// ScalarMul computes and returns p = a ⋅ s // see https://www.iacr.org/archive/crypto2001/21390189.pdf -func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { +func (p *G1Jac) ScalarMul(a *G1Jac, s *big.Int) *G1Jac { return p.mulGLV(a, s) } @@ -833,10 +841,10 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) { } -// BatchScalarMultiplicationG1 multiplies the same base by all scalars +// BatchScalarMulG1 multiplies the same base by all scalars // and return resulting points in affine coordinates // uses a simple windowed-NAF like exponentiation algorithm -func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affine { +func BatchScalarMulG1(base *G1Affine, scalars []fr.Element) []G1Affine { // approximate cost in group ops is // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) diff --git a/ecc/bn254/g1_test.go b/ecc/bn254/g1_test.go index 4ab714837..e39be9f95 100644 --- a/ecc/bn254/g1_test.go +++ b/ecc/bn254/g1_test.go @@ -110,7 +110,7 @@ func TestG1AffineIsOnCurve(t *testing.T) { var op1, op2 G1Jac op1 = fuzzG1Jac(&g1Gen, a) _r := fr.Modulus() - op2.ScalarMultiplication(&op1, _r) + op2.ScalarMul(&op1, _r) return op1.IsInSubGroup() && op2.Z.IsZero() }, GenFp(), @@ -353,12 +353,12 @@ func TestG1AffineOps(t *testing.T) { var scalar, blindedScalar, rminusone big.Int var op1, op2, op3, gneg G1Jac rminusone.SetUint64(1).Sub(r, &rminusone) - op3.ScalarMultiplication(&g1Gen, &rminusone) + op3.ScalarMul(&g1Gen, &rminusone) gneg.Neg(&g1Gen) s.ToBigIntRegular(&scalar) blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) - op1.ScalarMultiplication(&g1Gen, &scalar) - op2.ScalarMultiplication(&g1Gen, &blindedScalar) + op1.ScalarMul(&g1Gen, &scalar) + op2.ScalarMul(&g1Gen, &blindedScalar) return op1.Equal(&op2) && g.Equal(&g1Infinity) && !op1.Equal(&g1Infinity) && gneg.Equal(&op3) @@ -383,7 +383,7 @@ func TestG1AffineOps(t *testing.T) { properties.TestingRun(t, gopter.ConsoleReporter(false)) } -func TestG1AffineBatchScalarMultiplication(t *testing.T) { +func TestG1AffineBatchScalarMul(t *testing.T) { parameters := gopter.DefaultTestParameters() if testing.Short() { @@ -399,7 +399,7 @@ func TestG1AffineBatchScalarMultiplication(t *testing.T) { // size of the multiExps const nbSamples = 10 - properties.Property("[BN254] BatchScalarMultiplication should be consistent with individual scalar multiplications", prop.ForAll( + properties.Property("[BN254] BatchScalarMul should be consistent with individual scalar multiplications", prop.ForAll( func(mixer fr.Element) bool { // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -410,7 +410,7 @@ func TestG1AffineBatchScalarMultiplication(t *testing.T) { FromMont() } - result := BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:]) + result := BatchScalarMulG1(&g1GenAff, sampleScalars[:]) if len(result) != len(sampleScalars) { return false @@ -469,7 +469,7 @@ func BenchmarkG1AffineBatchScalarMul(b *testing.B) { b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - _ = BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:using]) + _ = BatchScalarMulG1(&g1GenAff, sampleScalars[:using]) } }) } diff --git a/ecc/bn254/g2.go b/ecc/bn254/g2.go index 1939b3f32..9e5c6dd67 100644 --- a/ecc/bn254/g2.go +++ b/ecc/bn254/g2.go @@ -55,8 +55,8 @@ func (p *G2Affine) Set(a *G2Affine) *G2Affine { return p } -// ScalarMultiplication computes and returns p = a ⋅ s -func (p *G2Affine) ScalarMultiplication(a *G2Affine, s *big.Int) *G2Affine { +// ScalarMul computes and returns p = a ⋅ s +func (p *G2Affine) ScalarMul(a *G2Affine, s *big.Int) *G2Affine { var _p G2Jac _p.FromAffine(a) _p.mulGLV(&_p, s) @@ -328,9 +328,9 @@ func (p *G2Jac) DoubleAssign() *G2Jac { return p } -// ScalarMultiplication computes and returns p = a ⋅ s +// ScalarMul computes and returns p = a ⋅ s // see https://www.iacr.org/archive/crypto2001/21390189.pdf -func (p *G2Jac) ScalarMultiplication(a *G2Jac, s *big.Int) *G2Jac { +func (p *G2Jac) ScalarMul(a *G2Jac, s *big.Int) *G2Jac { return p.mulGLV(a, s) } @@ -374,7 +374,7 @@ func (p *G2Jac) IsOnCurve() bool { func (p *G2Jac) IsInSubGroup() bool { var a, res G2Jac a.psi(p) - res.ScalarMultiplication(p, &fixedCoeff). + res.ScalarMul(p, &fixedCoeff). SubAssign(&a) return res.IsOnCurve() && res.Z.IsZero() @@ -515,7 +515,7 @@ func (p *G2Jac) ClearCofactor(a *G2Jac) *G2Jac { // cf http://cacr.uwaterloo.ca/techreports/2011/cacr2011-26.pdf, 6.1 var points [4]G2Jac - points[0].ScalarMultiplication(a, &xGen) + points[0].ScalarMul(a, &xGen) points[1].Double(&points[0]). AddAssign(&points[0]). @@ -867,10 +867,10 @@ func (p *g2Proj) FromAffine(Q *G2Affine) *g2Proj { return p } -// BatchScalarMultiplicationG2 multiplies the same base by all scalars +// BatchScalarMulG2 multiplies the same base by all scalars // and return resulting points in affine coordinates // uses a simple windowed-NAF like exponentiation algorithm -func BatchScalarMultiplicationG2(base *G2Affine, scalars []fr.Element) []G2Affine { +func BatchScalarMulG2(base *G2Affine, scalars []fr.Element) []G2Affine { // approximate cost in group ops is // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) diff --git a/ecc/bn254/g2_test.go b/ecc/bn254/g2_test.go index 4f0c11d14..fa17f3635 100644 --- a/ecc/bn254/g2_test.go +++ b/ecc/bn254/g2_test.go @@ -123,7 +123,7 @@ func TestG2AffineIsOnCurve(t *testing.T) { var op1, op2 G2Jac op1 = fuzzG2Jac(&g2Gen, a) _r := fr.Modulus() - op2.ScalarMultiplication(&op1, _r) + op2.ScalarMul(&op1, _r) return op1.IsInSubGroup() && op2.Z.IsZero() }, GenE2(), @@ -374,12 +374,12 @@ func TestG2AffineOps(t *testing.T) { var scalar, blindedScalar, rminusone big.Int var op1, op2, op3, gneg G2Jac rminusone.SetUint64(1).Sub(r, &rminusone) - op3.ScalarMultiplication(&g2Gen, &rminusone) + op3.ScalarMul(&g2Gen, &rminusone) gneg.Neg(&g2Gen) s.ToBigIntRegular(&scalar) blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) - op1.ScalarMultiplication(&g2Gen, &scalar) - op2.ScalarMultiplication(&g2Gen, &blindedScalar) + op1.ScalarMul(&g2Gen, &scalar) + op2.ScalarMul(&g2Gen, &blindedScalar) return op1.Equal(&op2) && g.Equal(&g2Infinity) && !op1.Equal(&g2Infinity) && gneg.Equal(&op3) @@ -440,7 +440,7 @@ func TestG2AffineCofactorCleaning(t *testing.T) { } -func TestG2AffineBatchScalarMultiplication(t *testing.T) { +func TestG2AffineBatchScalarMul(t *testing.T) { parameters := gopter.DefaultTestParameters() if testing.Short() { @@ -456,7 +456,7 @@ func TestG2AffineBatchScalarMultiplication(t *testing.T) { // size of the multiExps const nbSamples = 10 - properties.Property("[BN254] BatchScalarMultiplication should be consistent with individual scalar multiplications", prop.ForAll( + properties.Property("[BN254] BatchScalarMul should be consistent with individual scalar multiplications", prop.ForAll( func(mixer fr.Element) bool { // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -467,7 +467,7 @@ func TestG2AffineBatchScalarMultiplication(t *testing.T) { FromMont() } - result := BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:]) + result := BatchScalarMulG2(&g2GenAff, sampleScalars[:]) if len(result) != len(sampleScalars) { return false @@ -526,7 +526,7 @@ func BenchmarkG2AffineBatchScalarMul(b *testing.B) { b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - _ = BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:using]) + _ = BatchScalarMulG2(&g2GenAff, sampleScalars[:using]) } }) } diff --git a/ecc/bn254/marshal_test.go b/ecc/bn254/marshal_test.go index 9d95d3f4a..ab1721825 100644 --- a/ecc/bn254/marshal_test.go +++ b/ecc/bn254/marshal_test.go @@ -55,9 +55,9 @@ func TestEncoder(t *testing.T) { inA = rand.Uint64() inB.SetRandom() inC.SetRandom() - inD.ScalarMultiplication(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) + inD.ScalarMul(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) // inE --> infinity - inF.ScalarMultiplication(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) + inF.ScalarMul(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) inG = make([]G1Affine, 2) inH = make([]G2Affine, 0) inG[1] = inD @@ -263,7 +263,7 @@ func TestG1AffineSerialization(t *testing.T) { var start, end G1Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g1GenAff, &ab) + start.ScalarMul(&g1GenAff, &ab) buf := start.RawBytes() n, err := end.SetBytes(buf[:]) @@ -283,7 +283,7 @@ func TestG1AffineSerialization(t *testing.T) { var start, end G1Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g1GenAff, &ab) + start.ScalarMul(&g1GenAff, &ab) buf := start.Bytes() n, err := end.SetBytes(buf[:]) @@ -356,7 +356,7 @@ func TestG2AffineSerialization(t *testing.T) { var start, end G2Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g2GenAff, &ab) + start.ScalarMul(&g2GenAff, &ab) buf := start.RawBytes() n, err := end.SetBytes(buf[:]) @@ -376,7 +376,7 @@ func TestG2AffineSerialization(t *testing.T) { var start, end G2Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g2GenAff, &ab) + start.ScalarMul(&g2GenAff, &ab) buf := start.Bytes() n, err := end.SetBytes(buf[:]) diff --git a/ecc/bn254/multiexp_test.go b/ecc/bn254/multiexp_test.go index e6d8d92f0..a0f2c6155 100644 --- a/ecc/bn254/multiexp_test.go +++ b/ecc/bn254/multiexp_test.go @@ -100,7 +100,7 @@ func TestMultiExpG1(t *testing.T) { // compute expected result with double and add var finalScalar, mixerBigInt big.Int finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) - expected.ScalarMultiplication(&g1Gen, &finalScalar) + expected.ScalarMul(&g1Gen, &finalScalar) // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -150,7 +150,7 @@ func TestMultiExpG1(t *testing.T) { var op1ScalarMul G1Affine finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) finalBigScalar.ToBigIntRegular(&finalBigScalarBi) - op1ScalarMul.ScalarMultiplication(&g1GenAff, &finalBigScalarBi) + op1ScalarMul.ScalarMul(&g1GenAff, &finalBigScalarBi) return op1ScalarMul.Equal(&op1MultiExp) }, @@ -249,7 +249,7 @@ func BenchmarkManyMultiExpG1Reference(b *testing.B) { func fillBenchBasesG1(samplePoints []G1Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) - samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + samplePoints[0].ScalarMul(&samplePoints[0], &r) one := samplePoints[0].X one.SetOne() @@ -330,7 +330,7 @@ func TestMultiExpG2(t *testing.T) { // compute expected result with double and add var finalScalar, mixerBigInt big.Int finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) - expected.ScalarMultiplication(&g2Gen, &finalScalar) + expected.ScalarMul(&g2Gen, &finalScalar) // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -380,7 +380,7 @@ func TestMultiExpG2(t *testing.T) { var op1ScalarMul G2Affine finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) finalBigScalar.ToBigIntRegular(&finalBigScalarBi) - op1ScalarMul.ScalarMultiplication(&g2GenAff, &finalBigScalarBi) + op1ScalarMul.ScalarMul(&g2GenAff, &finalBigScalarBi) return op1ScalarMul.Equal(&op1MultiExp) }, @@ -479,7 +479,7 @@ func BenchmarkManyMultiExpG2Reference(b *testing.B) { func fillBenchBasesG2(samplePoints []G2Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) - samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + samplePoints[0].ScalarMul(&samplePoints[0], &r) one := samplePoints[0].X one.SetOne() diff --git a/ecc/bn254/pairing_test.go b/ecc/bn254/pairing_test.go index da3325608..f637aae47 100644 --- a/ecc/bn254/pairing_test.go +++ b/ecc/bn254/pairing_test.go @@ -120,8 +120,8 @@ func TestPairing(t *testing.T) { b.ToBigIntRegular(&bbigint) ab.Mul(&abigint, &bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ = Pair([]G1Affine{g1GenAff}, []G2Affine{g2GenAff}) resa, _ = Pair([]G1Affine{ag1}, []G2Affine{g2GenAff}) @@ -185,8 +185,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) P0 := []G1Affine{g1GenAff} P1 := []G1Affine{ag1} @@ -228,8 +228,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) g1Inf.FromJacobian(&g1Infinity) g2Inf.FromJacobian(&g2Infinity) @@ -266,8 +266,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ := Pair([]G1Affine{ag1}, []G2Affine{bg2}) diff --git a/ecc/bw6-633/fr/kzg/kzg.go b/ecc/bw6-633/fr/kzg/kzg.go index 7bd1bbdd8..a21ead073 100644 --- a/ecc/bw6-633/fr/kzg/kzg.go +++ b/ecc/bw6-633/fr/kzg/kzg.go @@ -77,7 +77,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { _, _, gen1Aff, gen2Aff := bw6633.Generators() srs.G1[0] = gen1Aff srs.G2[0] = gen2Aff - srs.G2[1].ScalarMultiplication(&gen2Aff, bAlpha) + srs.G2[1].ScalarMul(&gen2Aff, bAlpha) alphas := make([]fr.Element, size-1) alphas[0] = alpha @@ -87,7 +87,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { for i := 0; i < len(alphas); i++ { alphas[i].FromMont() } - g1s := bw6633.BatchScalarMultiplicationG1(&gen1Aff, alphas) + g1s := bw6633.BatchScalarMulG1(&gen1Aff, alphas) copy(srs.G1[1:], g1s) return &srs, nil @@ -169,16 +169,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) { func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error { // [f(a)]G₁ - var claimedValueG1Aff bw6633.G1Affine + var claimedValueG1Aff bw6633.G1Jac var claimedValueBigInt big.Int proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt) + claimedValueG1Aff.ScalarMulUnconverted(&srs.G1[0], &claimedValueBigInt) // [f(α) - f(a)]G₁ - var fminusfaG1Jac, tmpG1Jac bw6633.G1Jac + var fminusfaG1Jac bw6633.G1Jac fminusfaG1Jac.FromAffine(commitment) - tmpG1Jac.FromAffine(&claimedValueG1Aff) - fminusfaG1Jac.SubAssign(&tmpG1Jac) + fminusfaG1Jac.SubAssign(&claimedValueG1Aff) // [-H(α)]G₁ var negH bw6633.G1Affine @@ -190,7 +189,7 @@ func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) point.ToBigIntRegular(&pointBigInt) genG2Jac.FromAffine(&srs.G2[0]) alphaG2Jac.FromAffine(&srs.G2[1]) - alphaMinusaG2Jac.ScalarMultiplication(&genG2Jac, &pointBigInt). + alphaMinusaG2Jac.ScalarMul(&genG2Jac, &pointBigInt). Neg(&alphaMinusaG2Jac). AddAssign(&alphaG2Jac) @@ -419,7 +418,7 @@ func BatchVerifyMultiPoints(digests []Digest, proofs []OpeningProof, points []fr var foldedEvalsCommit bw6633.G1Affine var foldedEvalsBigInt big.Int foldedEvals.ToBigIntRegular(&foldedEvalsBigInt) - foldedEvalsCommit.ScalarMultiplication(&srs.G1[0], &foldedEvalsBigInt) + foldedEvalsCommit.ScalarMul(&srs.G1[0], &foldedEvalsBigInt) // compute foldedDigests = ∑ᵢλᵢ[fᵢ(α)]G₁ - [∑ᵢλᵢfᵢ(aᵢ)]G₁ foldedDigests.Sub(&foldedDigests, &foldedEvalsCommit) diff --git a/ecc/bw6-633/fr/kzg/kzg_test.go b/ecc/bw6-633/fr/kzg/kzg_test.go index 32acddce5..9ba4c39a6 100644 --- a/ecc/bw6-633/fr/kzg/kzg_test.go +++ b/ecc/bw6-633/fr/kzg/kzg_test.go @@ -130,7 +130,7 @@ func TestCommit(t *testing.T) { fx.ToBigIntRegular(&fxbi) var manualCommit bw6633.G1Affine manualCommit.Set(&testSRS.G1[0]) - manualCommit.ScalarMultiplication(&manualCommit, &fxbi) + manualCommit.ScalarMul(&manualCommit, &fxbi) // compare both results if !kzgCommit.Equal(&manualCommit) { diff --git a/ecc/bw6-633/fr/plookup/table.go b/ecc/bw6-633/fr/plookup/table.go index dd6b02ba7..7fd1f044d 100644 --- a/ecc/bw6-633/fr/plookup/table.go +++ b/ecc/bw6-633/fr/plookup/table.go @@ -209,9 +209,9 @@ func VerifyLookupTables(srs *kzg.SRS, proof ProofLookupTables) error { var blambda big.Int lambda.ToBigIntRegular(&blambda) for i := nbRows - 2; i >= 0; i-- { - comf.ScalarMultiplication(&comf, &blambda). + comf.ScalarMul(&comf, &blambda). Add(&comf, &proof.fs[i]) - comt.ScalarMultiplication(&comt, &blambda). + comt.ScalarMul(&comt, &blambda). Add(&comt, &proof.ts[i]) } diff --git a/ecc/bw6-633/g1.go b/ecc/bw6-633/g1.go index 49115911f..9340616d1 100644 --- a/ecc/bw6-633/g1.go +++ b/ecc/bw6-633/g1.go @@ -55,8 +55,8 @@ func (p *G1Affine) Set(a *G1Affine) *G1Affine { return p } -// ScalarMultiplication computes and returns p = a ⋅ s -func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { +// ScalarMul computes and returns p = a ⋅ s +func (p *G1Affine) ScalarMul(a *G1Affine, s *big.Int) *G1Affine { var _p G1Jac _p.FromAffine(a) _p.mulGLV(&_p, s) @@ -64,6 +64,14 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { return p } +// ScalarMulUnconverted computes and returns p = a ⋅ s +// Takes an affine point and returns a Jacobian point (useful for KZG) +func (p *G1Jac) ScalarMulUnconverted(a *G1Affine, s *big.Int) *G1Jac { + p.FromAffine(a) + p.mulGLV(p, s) + return p +} + // Add adds two point in affine coordinates. // This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { @@ -328,9 +336,9 @@ func (p *G1Jac) DoubleAssign() *G1Jac { return p } -// ScalarMultiplication computes and returns p = a ⋅ s +// ScalarMul computes and returns p = a ⋅ s // see https://www.iacr.org/archive/crypto2001/21390189.pdf -func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { +func (p *G1Jac) ScalarMul(a *G1Jac, s *big.Int) *G1Jac { return p.mulGLV(a, s) } @@ -374,11 +382,11 @@ func (p *G1Jac) IsOnCurve() bool { func (p *G1Jac) IsInSubGroup() bool { var uP, u4P, u5P, q, r G1Jac - uP.ScalarMultiplication(p, &xGen) - u4P.ScalarMultiplication(&uP, &xGen). - ScalarMultiplication(&u4P, &xGen). - ScalarMultiplication(&u4P, &xGen) - u5P.ScalarMultiplication(&u4P, &xGen) + uP.ScalarMul(p, &xGen) + u4P.ScalarMul(&uP, &xGen). + ScalarMul(&u4P, &xGen). + ScalarMul(&u4P, &xGen) + u5P.ScalarMul(&u4P, &xGen) q.Set(p).SubAssign(&uP) r.phi(&q).SubAssign(&uP). AddAssign(&u4P). @@ -519,20 +527,20 @@ func (p *G1Jac) ClearCofactor(a *G1Jac) *G1Jac { ht.SetInt64(7) v.Mul(&xGen, &xGen).Add(&v, &one).Mul(&v, &uPlusOne) - uP.ScalarMultiplication(a, &xGen).Neg(&uP) + uP.ScalarMul(a, &xGen).Neg(&uP) vP.Set(a).SubAssign(&uP). - ScalarMultiplication(&vP, &v) - wP.ScalarMultiplication(&vP, &uMinusOne).Neg(&wP). + ScalarMul(&vP, &v) + wP.ScalarMul(&vP, &uMinusOne).Neg(&wP). AddAssign(&uP) - L0.ScalarMultiplication(&wP, &d1) - tmp.ScalarMultiplication(&vP, &ht) + L0.ScalarMul(&wP, &d1) + tmp.ScalarMul(&vP, &ht) L0.AddAssign(&tmp) tmp.Double(a) L0.AddAssign(&tmp) - L1.Set(&uP).AddAssign(a).ScalarMultiplication(&L1, &d1) - tmp.ScalarMultiplication(&vP, &d2) + L1.Set(&uP).AddAssign(a).ScalarMul(&L1, &d1) + tmp.ScalarMul(&vP, &d2) L1.AddAssign(&tmp) - tmp.ScalarMultiplication(a, &ht) + tmp.ScalarMul(a, &ht) L1.AddAssign(&tmp) p.phi(&L1).AddAssign(&L0) @@ -964,10 +972,10 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) { } -// BatchScalarMultiplicationG1 multiplies the same base by all scalars +// BatchScalarMulG1 multiplies the same base by all scalars // and return resulting points in affine coordinates // uses a simple windowed-NAF like exponentiation algorithm -func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affine { +func BatchScalarMulG1(base *G1Affine, scalars []fr.Element) []G1Affine { // approximate cost in group ops is // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) diff --git a/ecc/bw6-633/g1_test.go b/ecc/bw6-633/g1_test.go index ba715fa7d..2a0a6880a 100644 --- a/ecc/bw6-633/g1_test.go +++ b/ecc/bw6-633/g1_test.go @@ -110,7 +110,7 @@ func TestG1AffineIsOnCurve(t *testing.T) { var op1, op2 G1Jac op1 = fuzzG1Jac(&g1Gen, a) _r := fr.Modulus() - op2.ScalarMultiplication(&op1, _r) + op2.ScalarMul(&op1, _r) return op1.IsInSubGroup() && op2.Z.IsZero() }, GenFp(), @@ -353,12 +353,12 @@ func TestG1AffineOps(t *testing.T) { var scalar, blindedScalar, rminusone big.Int var op1, op2, op3, gneg G1Jac rminusone.SetUint64(1).Sub(r, &rminusone) - op3.ScalarMultiplication(&g1Gen, &rminusone) + op3.ScalarMul(&g1Gen, &rminusone) gneg.Neg(&g1Gen) s.ToBigIntRegular(&scalar) blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) - op1.ScalarMultiplication(&g1Gen, &scalar) - op2.ScalarMultiplication(&g1Gen, &blindedScalar) + op1.ScalarMul(&g1Gen, &scalar) + op2.ScalarMul(&g1Gen, &blindedScalar) return op1.Equal(&op2) && g.Equal(&g1Infinity) && !op1.Equal(&g1Infinity) && gneg.Equal(&op3) @@ -422,7 +422,7 @@ func TestG1AffineCofactorCleaning(t *testing.T) { } -func TestG1AffineBatchScalarMultiplication(t *testing.T) { +func TestG1AffineBatchScalarMul(t *testing.T) { parameters := gopter.DefaultTestParameters() if testing.Short() { @@ -438,7 +438,7 @@ func TestG1AffineBatchScalarMultiplication(t *testing.T) { // size of the multiExps const nbSamples = 10 - properties.Property("[BW6-633] BatchScalarMultiplication should be consistent with individual scalar multiplications", prop.ForAll( + properties.Property("[BW6-633] BatchScalarMul should be consistent with individual scalar multiplications", prop.ForAll( func(mixer fr.Element) bool { // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -449,7 +449,7 @@ func TestG1AffineBatchScalarMultiplication(t *testing.T) { FromMont() } - result := BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:]) + result := BatchScalarMulG1(&g1GenAff, sampleScalars[:]) if len(result) != len(sampleScalars) { return false @@ -508,7 +508,7 @@ func BenchmarkG1AffineBatchScalarMul(b *testing.B) { b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - _ = BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:using]) + _ = BatchScalarMulG1(&g1GenAff, sampleScalars[:using]) } }) } diff --git a/ecc/bw6-633/g2.go b/ecc/bw6-633/g2.go index 20d3c4b37..24167985c 100644 --- a/ecc/bw6-633/g2.go +++ b/ecc/bw6-633/g2.go @@ -50,8 +50,8 @@ func (p *G2Affine) Set(a *G2Affine) *G2Affine { return p } -// ScalarMultiplication computes and returns p = a ⋅ s -func (p *G2Affine) ScalarMultiplication(a *G2Affine, s *big.Int) *G2Affine { +// ScalarMul computes and returns p = a ⋅ s +func (p *G2Affine) ScalarMul(a *G2Affine, s *big.Int) *G2Affine { var _p G2Jac _p.FromAffine(a) _p.mulGLV(&_p, s) @@ -323,9 +323,9 @@ func (p *G2Jac) DoubleAssign() *G2Jac { return p } -// ScalarMultiplication computes and returns p = a ⋅ s +// ScalarMul computes and returns p = a ⋅ s // see https://www.iacr.org/archive/crypto2001/21390189.pdf -func (p *G2Jac) ScalarMultiplication(a *G2Jac, s *big.Int) *G2Jac { +func (p *G2Jac) ScalarMul(a *G2Jac, s *big.Int) *G2Jac { return p.mulGLV(a, s) } @@ -369,11 +369,11 @@ func (p *G2Jac) IsOnCurve() bool { func (p *G2Jac) IsInSubGroup() bool { var uP, u4P, u5P, q, r G2Jac - uP.ScalarMultiplication(p, &xGen) - u4P.ScalarMultiplication(&uP, &xGen). - ScalarMultiplication(&u4P, &xGen). - ScalarMultiplication(&u4P, &xGen) - u5P.ScalarMultiplication(&u4P, &xGen) + uP.ScalarMul(p, &xGen) + u4P.ScalarMul(&uP, &xGen). + ScalarMul(&u4P, &xGen). + ScalarMul(&u4P, &xGen) + u5P.ScalarMul(&u4P, &xGen) q.Set(p).SubAssign(&uP) r.phi(&q).SubAssign(&uP). AddAssign(&u4P). @@ -510,11 +510,11 @@ func (p *G2Jac) ClearCofactor(a *G2Jac) *G2Jac { d1.SetInt64(13) d3.SetInt64(5) // negative - uP.ScalarMultiplication(a, &xGen) // negative - u2P.ScalarMultiplication(&uP, &xGen) - u3P.ScalarMultiplication(&u2P, &xGen) // negative - u4P.ScalarMultiplication(&u3P, &xGen) - u5P.ScalarMultiplication(&u4P, &xGen) // negative + uP.ScalarMul(a, &xGen) // negative + u2P.ScalarMul(&uP, &xGen) + u3P.ScalarMul(&u2P, &xGen) // negative + u4P.ScalarMul(&u3P, &xGen) + u5P.ScalarMul(&u4P, &xGen) // negative vP.Set(&u2P).AddAssign(&uP). AddAssign(&u3P). Double(&vP). @@ -522,15 +522,15 @@ func (p *G2Jac) ClearCofactor(a *G2Jac) *G2Jac { AddAssign(a) wP.Set(&uP).SubAssign(&u4P).SubAssign(&u5P) xP.Set(a).AddAssign(&vP) - L0.Set(&uP).SubAssign(a).ScalarMultiplication(&L0, &d1) - tmp.ScalarMultiplication(&xP, &d3) + L0.Set(&uP).SubAssign(a).ScalarMul(&L0, &d1) + tmp.ScalarMul(&xP, &d3) L0.AddAssign(&tmp) - tmp.ScalarMultiplication(a, &ht) // negative + tmp.ScalarMul(a, &ht) // negative L0.SubAssign(&tmp) - L1.ScalarMultiplication(&wP, &d1) - tmp.ScalarMultiplication(&vP, &ht) + L1.ScalarMul(&wP, &d1) + tmp.ScalarMul(&vP, &ht) L1.AddAssign(&tmp) - tmp.ScalarMultiplication(a, &d3) + tmp.ScalarMul(a, &d3) L1.AddAssign(&tmp) p.phi(&L1).AddAssign(&L0) @@ -840,10 +840,10 @@ func (p *g2JacExtended) doubleMixed(q *G2Affine) *g2JacExtended { return p } -// BatchScalarMultiplicationG2 multiplies the same base by all scalars +// BatchScalarMulG2 multiplies the same base by all scalars // and return resulting points in affine coordinates // uses a simple windowed-NAF like exponentiation algorithm -func BatchScalarMultiplicationG2(base *G2Affine, scalars []fr.Element) []G2Affine { +func BatchScalarMulG2(base *G2Affine, scalars []fr.Element) []G2Affine { // approximate cost in group ops is // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) diff --git a/ecc/bw6-633/g2_test.go b/ecc/bw6-633/g2_test.go index dfc232a30..ab3cf38cf 100644 --- a/ecc/bw6-633/g2_test.go +++ b/ecc/bw6-633/g2_test.go @@ -110,7 +110,7 @@ func TestG2AffineIsOnCurve(t *testing.T) { var op1, op2 G2Jac op1 = fuzzG2Jac(&g2Gen, a) _r := fr.Modulus() - op2.ScalarMultiplication(&op1, _r) + op2.ScalarMul(&op1, _r) return op1.IsInSubGroup() && op2.Z.IsZero() }, GenFp(), @@ -353,12 +353,12 @@ func TestG2AffineOps(t *testing.T) { var scalar, blindedScalar, rminusone big.Int var op1, op2, op3, gneg G2Jac rminusone.SetUint64(1).Sub(r, &rminusone) - op3.ScalarMultiplication(&g2Gen, &rminusone) + op3.ScalarMul(&g2Gen, &rminusone) gneg.Neg(&g2Gen) s.ToBigIntRegular(&scalar) blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) - op1.ScalarMultiplication(&g2Gen, &scalar) - op2.ScalarMultiplication(&g2Gen, &blindedScalar) + op1.ScalarMul(&g2Gen, &scalar) + op2.ScalarMul(&g2Gen, &blindedScalar) return op1.Equal(&op2) && g.Equal(&g2Infinity) && !op1.Equal(&g2Infinity) && gneg.Equal(&op3) @@ -422,7 +422,7 @@ func TestG2AffineCofactorCleaning(t *testing.T) { } -func TestG2AffineBatchScalarMultiplication(t *testing.T) { +func TestG2AffineBatchScalarMul(t *testing.T) { parameters := gopter.DefaultTestParameters() if testing.Short() { @@ -438,7 +438,7 @@ func TestG2AffineBatchScalarMultiplication(t *testing.T) { // size of the multiExps const nbSamples = 10 - properties.Property("[BW6-633] BatchScalarMultiplication should be consistent with individual scalar multiplications", prop.ForAll( + properties.Property("[BW6-633] BatchScalarMul should be consistent with individual scalar multiplications", prop.ForAll( func(mixer fr.Element) bool { // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -449,7 +449,7 @@ func TestG2AffineBatchScalarMultiplication(t *testing.T) { FromMont() } - result := BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:]) + result := BatchScalarMulG2(&g2GenAff, sampleScalars[:]) if len(result) != len(sampleScalars) { return false @@ -508,7 +508,7 @@ func BenchmarkG2AffineBatchScalarMul(b *testing.B) { b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - _ = BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:using]) + _ = BatchScalarMulG2(&g2GenAff, sampleScalars[:using]) } }) } diff --git a/ecc/bw6-633/marshal_test.go b/ecc/bw6-633/marshal_test.go index 469356726..c67248d94 100644 --- a/ecc/bw6-633/marshal_test.go +++ b/ecc/bw6-633/marshal_test.go @@ -55,9 +55,9 @@ func TestEncoder(t *testing.T) { inA = rand.Uint64() inB.SetRandom() inC.SetRandom() - inD.ScalarMultiplication(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) + inD.ScalarMul(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) // inE --> infinity - inF.ScalarMultiplication(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) + inF.ScalarMul(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) inG = make([]G1Affine, 2) inH = make([]G2Affine, 0) inG[1] = inD @@ -263,7 +263,7 @@ func TestG1AffineSerialization(t *testing.T) { var start, end G1Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g1GenAff, &ab) + start.ScalarMul(&g1GenAff, &ab) buf := start.RawBytes() n, err := end.SetBytes(buf[:]) @@ -283,7 +283,7 @@ func TestG1AffineSerialization(t *testing.T) { var start, end G1Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g1GenAff, &ab) + start.ScalarMul(&g1GenAff, &ab) buf := start.Bytes() n, err := end.SetBytes(buf[:]) @@ -356,7 +356,7 @@ func TestG2AffineSerialization(t *testing.T) { var start, end G2Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g2GenAff, &ab) + start.ScalarMul(&g2GenAff, &ab) buf := start.RawBytes() n, err := end.SetBytes(buf[:]) @@ -376,7 +376,7 @@ func TestG2AffineSerialization(t *testing.T) { var start, end G2Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g2GenAff, &ab) + start.ScalarMul(&g2GenAff, &ab) buf := start.Bytes() n, err := end.SetBytes(buf[:]) diff --git a/ecc/bw6-633/multiexp_test.go b/ecc/bw6-633/multiexp_test.go index d4e68b19c..8c7c72264 100644 --- a/ecc/bw6-633/multiexp_test.go +++ b/ecc/bw6-633/multiexp_test.go @@ -100,7 +100,7 @@ func TestMultiExpG1(t *testing.T) { // compute expected result with double and add var finalScalar, mixerBigInt big.Int finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) - expected.ScalarMultiplication(&g1Gen, &finalScalar) + expected.ScalarMul(&g1Gen, &finalScalar) // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -150,7 +150,7 @@ func TestMultiExpG1(t *testing.T) { var op1ScalarMul G1Affine finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) finalBigScalar.ToBigIntRegular(&finalBigScalarBi) - op1ScalarMul.ScalarMultiplication(&g1GenAff, &finalBigScalarBi) + op1ScalarMul.ScalarMul(&g1GenAff, &finalBigScalarBi) return op1ScalarMul.Equal(&op1MultiExp) }, @@ -249,7 +249,7 @@ func BenchmarkManyMultiExpG1Reference(b *testing.B) { func fillBenchBasesG1(samplePoints []G1Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) - samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + samplePoints[0].ScalarMul(&samplePoints[0], &r) one := samplePoints[0].X one.SetOne() @@ -330,7 +330,7 @@ func TestMultiExpG2(t *testing.T) { // compute expected result with double and add var finalScalar, mixerBigInt big.Int finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) - expected.ScalarMultiplication(&g2Gen, &finalScalar) + expected.ScalarMul(&g2Gen, &finalScalar) // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -380,7 +380,7 @@ func TestMultiExpG2(t *testing.T) { var op1ScalarMul G2Affine finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) finalBigScalar.ToBigIntRegular(&finalBigScalarBi) - op1ScalarMul.ScalarMultiplication(&g2GenAff, &finalBigScalarBi) + op1ScalarMul.ScalarMul(&g2GenAff, &finalBigScalarBi) return op1ScalarMul.Equal(&op1MultiExp) }, @@ -479,7 +479,7 @@ func BenchmarkManyMultiExpG2Reference(b *testing.B) { func fillBenchBasesG2(samplePoints []G2Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) - samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + samplePoints[0].ScalarMul(&samplePoints[0], &r) one := samplePoints[0].X one.SetOne() diff --git a/ecc/bw6-633/pairing_test.go b/ecc/bw6-633/pairing_test.go index f8a0f84bb..dc79876a7 100644 --- a/ecc/bw6-633/pairing_test.go +++ b/ecc/bw6-633/pairing_test.go @@ -122,8 +122,8 @@ func TestPairing(t *testing.T) { b.ToBigIntRegular(&bbigint) ab.Mul(&abigint, &bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ = Pair([]G1Affine{g1GenAff}, []G2Affine{g2GenAff}) resa, _ = Pair([]G1Affine{ag1}, []G2Affine{g2GenAff}) @@ -187,8 +187,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) P0 := []G1Affine{g1GenAff} P1 := []G1Affine{ag1} @@ -230,8 +230,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) g1Inf.FromJacobian(&g1Infinity) g2Inf.FromJacobian(&g2Infinity) @@ -268,8 +268,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ := Pair([]G1Affine{ag1}, []G2Affine{bg2}) diff --git a/ecc/bw6-756/fr/kzg/kzg.go b/ecc/bw6-756/fr/kzg/kzg.go index 4a5c11260..7321c1d84 100644 --- a/ecc/bw6-756/fr/kzg/kzg.go +++ b/ecc/bw6-756/fr/kzg/kzg.go @@ -77,7 +77,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { _, _, gen1Aff, gen2Aff := bw6756.Generators() srs.G1[0] = gen1Aff srs.G2[0] = gen2Aff - srs.G2[1].ScalarMultiplication(&gen2Aff, bAlpha) + srs.G2[1].ScalarMul(&gen2Aff, bAlpha) alphas := make([]fr.Element, size-1) alphas[0] = alpha @@ -87,7 +87,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { for i := 0; i < len(alphas); i++ { alphas[i].FromMont() } - g1s := bw6756.BatchScalarMultiplicationG1(&gen1Aff, alphas) + g1s := bw6756.BatchScalarMulG1(&gen1Aff, alphas) copy(srs.G1[1:], g1s) return &srs, nil @@ -169,16 +169,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) { func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error { // [f(a)]G₁ - var claimedValueG1Aff bw6756.G1Affine + var claimedValueG1Aff bw6756.G1Jac var claimedValueBigInt big.Int proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt) + claimedValueG1Aff.ScalarMulUnconverted(&srs.G1[0], &claimedValueBigInt) // [f(α) - f(a)]G₁ - var fminusfaG1Jac, tmpG1Jac bw6756.G1Jac + var fminusfaG1Jac bw6756.G1Jac fminusfaG1Jac.FromAffine(commitment) - tmpG1Jac.FromAffine(&claimedValueG1Aff) - fminusfaG1Jac.SubAssign(&tmpG1Jac) + fminusfaG1Jac.SubAssign(&claimedValueG1Aff) // [-H(α)]G₁ var negH bw6756.G1Affine @@ -190,7 +189,7 @@ func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) point.ToBigIntRegular(&pointBigInt) genG2Jac.FromAffine(&srs.G2[0]) alphaG2Jac.FromAffine(&srs.G2[1]) - alphaMinusaG2Jac.ScalarMultiplication(&genG2Jac, &pointBigInt). + alphaMinusaG2Jac.ScalarMul(&genG2Jac, &pointBigInt). Neg(&alphaMinusaG2Jac). AddAssign(&alphaG2Jac) @@ -419,7 +418,7 @@ func BatchVerifyMultiPoints(digests []Digest, proofs []OpeningProof, points []fr var foldedEvalsCommit bw6756.G1Affine var foldedEvalsBigInt big.Int foldedEvals.ToBigIntRegular(&foldedEvalsBigInt) - foldedEvalsCommit.ScalarMultiplication(&srs.G1[0], &foldedEvalsBigInt) + foldedEvalsCommit.ScalarMul(&srs.G1[0], &foldedEvalsBigInt) // compute foldedDigests = ∑ᵢλᵢ[fᵢ(α)]G₁ - [∑ᵢλᵢfᵢ(aᵢ)]G₁ foldedDigests.Sub(&foldedDigests, &foldedEvalsCommit) diff --git a/ecc/bw6-756/fr/kzg/kzg_test.go b/ecc/bw6-756/fr/kzg/kzg_test.go index b644f7f0c..0a09eaf0c 100644 --- a/ecc/bw6-756/fr/kzg/kzg_test.go +++ b/ecc/bw6-756/fr/kzg/kzg_test.go @@ -130,7 +130,7 @@ func TestCommit(t *testing.T) { fx.ToBigIntRegular(&fxbi) var manualCommit bw6756.G1Affine manualCommit.Set(&testSRS.G1[0]) - manualCommit.ScalarMultiplication(&manualCommit, &fxbi) + manualCommit.ScalarMul(&manualCommit, &fxbi) // compare both results if !kzgCommit.Equal(&manualCommit) { diff --git a/ecc/bw6-756/fr/plookup/table.go b/ecc/bw6-756/fr/plookup/table.go index 318fe646b..e495c1591 100644 --- a/ecc/bw6-756/fr/plookup/table.go +++ b/ecc/bw6-756/fr/plookup/table.go @@ -209,9 +209,9 @@ func VerifyLookupTables(srs *kzg.SRS, proof ProofLookupTables) error { var blambda big.Int lambda.ToBigIntRegular(&blambda) for i := nbRows - 2; i >= 0; i-- { - comf.ScalarMultiplication(&comf, &blambda). + comf.ScalarMul(&comf, &blambda). Add(&comf, &proof.fs[i]) - comt.ScalarMultiplication(&comt, &blambda). + comt.ScalarMul(&comt, &blambda). Add(&comt, &proof.ts[i]) } diff --git a/ecc/bw6-756/g1.go b/ecc/bw6-756/g1.go index 179139b66..83f973c9a 100644 --- a/ecc/bw6-756/g1.go +++ b/ecc/bw6-756/g1.go @@ -55,8 +55,8 @@ func (p *G1Affine) Set(a *G1Affine) *G1Affine { return p } -// ScalarMultiplication computes and returns p = a ⋅ s -func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { +// ScalarMul computes and returns p = a ⋅ s +func (p *G1Affine) ScalarMul(a *G1Affine, s *big.Int) *G1Affine { var _p G1Jac _p.FromAffine(a) _p.mulGLV(&_p, s) @@ -64,6 +64,14 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { return p } +// ScalarMulUnconverted computes and returns p = a ⋅ s +// Takes an affine point and returns a Jacobian point (useful for KZG) +func (p *G1Jac) ScalarMulUnconverted(a *G1Affine, s *big.Int) *G1Jac { + p.FromAffine(a) + p.mulGLV(p, s) + return p +} + // Add adds two point in affine coordinates. // This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { @@ -328,9 +336,9 @@ func (p *G1Jac) DoubleAssign() *G1Jac { return p } -// ScalarMultiplication computes and returns p = a ⋅ s +// ScalarMul computes and returns p = a ⋅ s // see https://www.iacr.org/archive/crypto2001/21390189.pdf -func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { +func (p *G1Jac) ScalarMul(a *G1Jac, s *big.Int) *G1Jac { return p.mulGLV(a, s) } @@ -379,13 +387,13 @@ func (p *G1Jac) IsInSubGroup() bool { var res, phip G1Jac phip.phi(p) - res.ScalarMultiplication(&phip, &xGen). + res.ScalarMul(&phip, &xGen). SubAssign(&phip). - ScalarMultiplication(&res, &xGen). - ScalarMultiplication(&res, &xGen). + ScalarMul(&res, &xGen). + ScalarMul(&res, &xGen). AddAssign(&phip) - phip.ScalarMultiplication(p, &xGen).AddAssign(p).AddAssign(&res) + phip.ScalarMul(p, &xGen).AddAssign(p).AddAssign(&res) return phip.IsOnCurve() && phip.Z.IsZero() @@ -515,9 +523,9 @@ func (p *G1Affine) ClearCofactor(a *G1Affine) *G1Affine { func (p *G1Jac) ClearCofactor(a *G1Jac) *G1Jac { var L0, L1, uP, u2P, u3P, tmp G1Jac - uP.ScalarMultiplication(a, &xGen) - u2P.ScalarMultiplication(&uP, &xGen) - u3P.ScalarMultiplication(&u2P, &xGen) + uP.ScalarMul(a, &xGen) + u2P.ScalarMul(&uP, &xGen) + u3P.ScalarMul(&u2P, &xGen) L0.Set(a).AddAssign(&u3P). SubAssign(&u2P) @@ -964,10 +972,10 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) { } -// BatchScalarMultiplicationG1 multiplies the same base by all scalars +// BatchScalarMulG1 multiplies the same base by all scalars // and return resulting points in affine coordinates // uses a simple windowed-NAF like exponentiation algorithm -func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affine { +func BatchScalarMulG1(base *G1Affine, scalars []fr.Element) []G1Affine { // approximate cost in group ops is // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) diff --git a/ecc/bw6-756/g1_test.go b/ecc/bw6-756/g1_test.go index 27a3bbbca..1de9a64dd 100644 --- a/ecc/bw6-756/g1_test.go +++ b/ecc/bw6-756/g1_test.go @@ -110,7 +110,7 @@ func TestG1AffineIsOnCurve(t *testing.T) { var op1, op2 G1Jac op1 = fuzzG1Jac(&g1Gen, a) _r := fr.Modulus() - op2.ScalarMultiplication(&op1, _r) + op2.ScalarMul(&op1, _r) return op1.IsInSubGroup() && op2.Z.IsZero() }, GenFp(), @@ -353,12 +353,12 @@ func TestG1AffineOps(t *testing.T) { var scalar, blindedScalar, rminusone big.Int var op1, op2, op3, gneg G1Jac rminusone.SetUint64(1).Sub(r, &rminusone) - op3.ScalarMultiplication(&g1Gen, &rminusone) + op3.ScalarMul(&g1Gen, &rminusone) gneg.Neg(&g1Gen) s.ToBigIntRegular(&scalar) blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) - op1.ScalarMultiplication(&g1Gen, &scalar) - op2.ScalarMultiplication(&g1Gen, &blindedScalar) + op1.ScalarMul(&g1Gen, &scalar) + op2.ScalarMul(&g1Gen, &blindedScalar) return op1.Equal(&op2) && g.Equal(&g1Infinity) && !op1.Equal(&g1Infinity) && gneg.Equal(&op3) @@ -422,7 +422,7 @@ func TestG1AffineCofactorCleaning(t *testing.T) { } -func TestG1AffineBatchScalarMultiplication(t *testing.T) { +func TestG1AffineBatchScalarMul(t *testing.T) { parameters := gopter.DefaultTestParameters() if testing.Short() { @@ -438,7 +438,7 @@ func TestG1AffineBatchScalarMultiplication(t *testing.T) { // size of the multiExps const nbSamples = 10 - properties.Property("[BW6-756] BatchScalarMultiplication should be consistent with individual scalar multiplications", prop.ForAll( + properties.Property("[BW6-756] BatchScalarMul should be consistent with individual scalar multiplications", prop.ForAll( func(mixer fr.Element) bool { // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -449,7 +449,7 @@ func TestG1AffineBatchScalarMultiplication(t *testing.T) { FromMont() } - result := BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:]) + result := BatchScalarMulG1(&g1GenAff, sampleScalars[:]) if len(result) != len(sampleScalars) { return false @@ -508,7 +508,7 @@ func BenchmarkG1AffineBatchScalarMul(b *testing.B) { b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - _ = BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:using]) + _ = BatchScalarMulG1(&g1GenAff, sampleScalars[:using]) } }) } diff --git a/ecc/bw6-756/g2.go b/ecc/bw6-756/g2.go index fbcd9c4ee..5d5c8c25d 100644 --- a/ecc/bw6-756/g2.go +++ b/ecc/bw6-756/g2.go @@ -50,8 +50,8 @@ func (p *G2Affine) Set(a *G2Affine) *G2Affine { return p } -// ScalarMultiplication computes and returns p = a ⋅ s -func (p *G2Affine) ScalarMultiplication(a *G2Affine, s *big.Int) *G2Affine { +// ScalarMul computes and returns p = a ⋅ s +func (p *G2Affine) ScalarMul(a *G2Affine, s *big.Int) *G2Affine { var _p G2Jac _p.FromAffine(a) _p.mulGLV(&_p, s) @@ -323,9 +323,9 @@ func (p *G2Jac) DoubleAssign() *G2Jac { return p } -// ScalarMultiplication computes and returns p = a ⋅ s +// ScalarMul computes and returns p = a ⋅ s // see https://www.iacr.org/archive/crypto2001/21390189.pdf -func (p *G2Jac) ScalarMultiplication(a *G2Jac, s *big.Int) *G2Jac { +func (p *G2Jac) ScalarMul(a *G2Jac, s *big.Int) *G2Jac { return p.mulGLV(a, s) } @@ -374,13 +374,13 @@ func (p *G2Jac) IsInSubGroup() bool { var res, phip G2Jac phip.phi(p) - res.ScalarMultiplication(&phip, &xGen). + res.ScalarMul(&phip, &xGen). SubAssign(&phip). - ScalarMultiplication(&res, &xGen). - ScalarMultiplication(&res, &xGen). + ScalarMul(&res, &xGen). + ScalarMul(&res, &xGen). AddAssign(&phip) - phip.ScalarMultiplication(p, &xGen).AddAssign(p).AddAssign(&res) + phip.ScalarMul(p, &xGen).AddAssign(p).AddAssign(&res) return phip.IsOnCurve() && phip.Z.IsZero() @@ -511,9 +511,9 @@ func (p *G2Jac) ClearCofactor(a *G2Jac) *G2Jac { var L0, L1, uP, u2P, u3P, tmp G2Jac - uP.ScalarMultiplication(a, &xGen) - u2P.ScalarMultiplication(&uP, &xGen) - u3P.ScalarMultiplication(&u2P, &xGen) + uP.ScalarMul(a, &xGen) + u2P.ScalarMul(&uP, &xGen) + u3P.ScalarMul(&u2P, &xGen) // ht=-2, hy=0 // d1=1, d2=-1, d3=-1 @@ -834,10 +834,10 @@ func (p *g2JacExtended) doubleMixed(q *G2Affine) *g2JacExtended { return p } -// BatchScalarMultiplicationG2 multiplies the same base by all scalars +// BatchScalarMulG2 multiplies the same base by all scalars // and return resulting points in affine coordinates // uses a simple windowed-NAF like exponentiation algorithm -func BatchScalarMultiplicationG2(base *G2Affine, scalars []fr.Element) []G2Affine { +func BatchScalarMulG2(base *G2Affine, scalars []fr.Element) []G2Affine { // approximate cost in group ops is // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) diff --git a/ecc/bw6-756/g2_test.go b/ecc/bw6-756/g2_test.go index a0bb5b030..181945d6b 100644 --- a/ecc/bw6-756/g2_test.go +++ b/ecc/bw6-756/g2_test.go @@ -110,7 +110,7 @@ func TestG2AffineIsOnCurve(t *testing.T) { var op1, op2 G2Jac op1 = fuzzG2Jac(&g2Gen, a) _r := fr.Modulus() - op2.ScalarMultiplication(&op1, _r) + op2.ScalarMul(&op1, _r) return op1.IsInSubGroup() && op2.Z.IsZero() }, GenFp(), @@ -353,12 +353,12 @@ func TestG2AffineOps(t *testing.T) { var scalar, blindedScalar, rminusone big.Int var op1, op2, op3, gneg G2Jac rminusone.SetUint64(1).Sub(r, &rminusone) - op3.ScalarMultiplication(&g2Gen, &rminusone) + op3.ScalarMul(&g2Gen, &rminusone) gneg.Neg(&g2Gen) s.ToBigIntRegular(&scalar) blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) - op1.ScalarMultiplication(&g2Gen, &scalar) - op2.ScalarMultiplication(&g2Gen, &blindedScalar) + op1.ScalarMul(&g2Gen, &scalar) + op2.ScalarMul(&g2Gen, &blindedScalar) return op1.Equal(&op2) && g.Equal(&g2Infinity) && !op1.Equal(&g2Infinity) && gneg.Equal(&op3) @@ -422,7 +422,7 @@ func TestG2AffineCofactorCleaning(t *testing.T) { } -func TestG2AffineBatchScalarMultiplication(t *testing.T) { +func TestG2AffineBatchScalarMul(t *testing.T) { parameters := gopter.DefaultTestParameters() if testing.Short() { @@ -438,7 +438,7 @@ func TestG2AffineBatchScalarMultiplication(t *testing.T) { // size of the multiExps const nbSamples = 10 - properties.Property("[BW6-756] BatchScalarMultiplication should be consistent with individual scalar multiplications", prop.ForAll( + properties.Property("[BW6-756] BatchScalarMul should be consistent with individual scalar multiplications", prop.ForAll( func(mixer fr.Element) bool { // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -449,7 +449,7 @@ func TestG2AffineBatchScalarMultiplication(t *testing.T) { FromMont() } - result := BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:]) + result := BatchScalarMulG2(&g2GenAff, sampleScalars[:]) if len(result) != len(sampleScalars) { return false @@ -508,7 +508,7 @@ func BenchmarkG2AffineBatchScalarMul(b *testing.B) { b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - _ = BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:using]) + _ = BatchScalarMulG2(&g2GenAff, sampleScalars[:using]) } }) } diff --git a/ecc/bw6-756/marshal_test.go b/ecc/bw6-756/marshal_test.go index ca8954523..403dc8c69 100644 --- a/ecc/bw6-756/marshal_test.go +++ b/ecc/bw6-756/marshal_test.go @@ -55,9 +55,9 @@ func TestEncoder(t *testing.T) { inA = rand.Uint64() inB.SetRandom() inC.SetRandom() - inD.ScalarMultiplication(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) + inD.ScalarMul(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) // inE --> infinity - inF.ScalarMultiplication(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) + inF.ScalarMul(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) inG = make([]G1Affine, 2) inH = make([]G2Affine, 0) inG[1] = inD @@ -263,7 +263,7 @@ func TestG1AffineSerialization(t *testing.T) { var start, end G1Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g1GenAff, &ab) + start.ScalarMul(&g1GenAff, &ab) buf := start.RawBytes() n, err := end.SetBytes(buf[:]) @@ -283,7 +283,7 @@ func TestG1AffineSerialization(t *testing.T) { var start, end G1Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g1GenAff, &ab) + start.ScalarMul(&g1GenAff, &ab) buf := start.Bytes() n, err := end.SetBytes(buf[:]) @@ -356,7 +356,7 @@ func TestG2AffineSerialization(t *testing.T) { var start, end G2Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g2GenAff, &ab) + start.ScalarMul(&g2GenAff, &ab) buf := start.RawBytes() n, err := end.SetBytes(buf[:]) @@ -376,7 +376,7 @@ func TestG2AffineSerialization(t *testing.T) { var start, end G2Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g2GenAff, &ab) + start.ScalarMul(&g2GenAff, &ab) buf := start.Bytes() n, err := end.SetBytes(buf[:]) diff --git a/ecc/bw6-756/multiexp_test.go b/ecc/bw6-756/multiexp_test.go index 584f1d296..f5b7f6a40 100644 --- a/ecc/bw6-756/multiexp_test.go +++ b/ecc/bw6-756/multiexp_test.go @@ -100,7 +100,7 @@ func TestMultiExpG1(t *testing.T) { // compute expected result with double and add var finalScalar, mixerBigInt big.Int finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) - expected.ScalarMultiplication(&g1Gen, &finalScalar) + expected.ScalarMul(&g1Gen, &finalScalar) // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -150,7 +150,7 @@ func TestMultiExpG1(t *testing.T) { var op1ScalarMul G1Affine finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) finalBigScalar.ToBigIntRegular(&finalBigScalarBi) - op1ScalarMul.ScalarMultiplication(&g1GenAff, &finalBigScalarBi) + op1ScalarMul.ScalarMul(&g1GenAff, &finalBigScalarBi) return op1ScalarMul.Equal(&op1MultiExp) }, @@ -249,7 +249,7 @@ func BenchmarkManyMultiExpG1Reference(b *testing.B) { func fillBenchBasesG1(samplePoints []G1Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) - samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + samplePoints[0].ScalarMul(&samplePoints[0], &r) one := samplePoints[0].X one.SetOne() @@ -330,7 +330,7 @@ func TestMultiExpG2(t *testing.T) { // compute expected result with double and add var finalScalar, mixerBigInt big.Int finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) - expected.ScalarMultiplication(&g2Gen, &finalScalar) + expected.ScalarMul(&g2Gen, &finalScalar) // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -380,7 +380,7 @@ func TestMultiExpG2(t *testing.T) { var op1ScalarMul G2Affine finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) finalBigScalar.ToBigIntRegular(&finalBigScalarBi) - op1ScalarMul.ScalarMultiplication(&g2GenAff, &finalBigScalarBi) + op1ScalarMul.ScalarMul(&g2GenAff, &finalBigScalarBi) return op1ScalarMul.Equal(&op1MultiExp) }, @@ -479,7 +479,7 @@ func BenchmarkManyMultiExpG2Reference(b *testing.B) { func fillBenchBasesG2(samplePoints []G2Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) - samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + samplePoints[0].ScalarMul(&samplePoints[0], &r) one := samplePoints[0].X one.SetOne() diff --git a/ecc/bw6-756/pairing_test.go b/ecc/bw6-756/pairing_test.go index 5c111bbd4..632ceeb53 100644 --- a/ecc/bw6-756/pairing_test.go +++ b/ecc/bw6-756/pairing_test.go @@ -121,8 +121,8 @@ func TestPairing(t *testing.T) { b.ToBigIntRegular(&bbigint) ab.Mul(&abigint, &bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ = Pair([]G1Affine{g1GenAff}, []G2Affine{g2GenAff}) resa, _ = Pair([]G1Affine{ag1}, []G2Affine{g2GenAff}) @@ -186,8 +186,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) P0 := []G1Affine{g1GenAff} P1 := []G1Affine{ag1} @@ -229,8 +229,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) g1Inf.FromJacobian(&g1Infinity) g2Inf.FromJacobian(&g2Infinity) @@ -267,8 +267,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ := Pair([]G1Affine{ag1}, []G2Affine{bg2}) diff --git a/ecc/bw6-761/fr/kzg/kzg.go b/ecc/bw6-761/fr/kzg/kzg.go index 4d8b8e850..4c2c2483e 100644 --- a/ecc/bw6-761/fr/kzg/kzg.go +++ b/ecc/bw6-761/fr/kzg/kzg.go @@ -77,7 +77,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { _, _, gen1Aff, gen2Aff := bw6761.Generators() srs.G1[0] = gen1Aff srs.G2[0] = gen2Aff - srs.G2[1].ScalarMultiplication(&gen2Aff, bAlpha) + srs.G2[1].ScalarMul(&gen2Aff, bAlpha) alphas := make([]fr.Element, size-1) alphas[0] = alpha @@ -87,7 +87,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { for i := 0; i < len(alphas); i++ { alphas[i].FromMont() } - g1s := bw6761.BatchScalarMultiplicationG1(&gen1Aff, alphas) + g1s := bw6761.BatchScalarMulG1(&gen1Aff, alphas) copy(srs.G1[1:], g1s) return &srs, nil @@ -169,16 +169,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) { func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error { // [f(a)]G₁ - var claimedValueG1Aff bw6761.G1Affine + var claimedValueG1Aff bw6761.G1Jac var claimedValueBigInt big.Int proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt) + claimedValueG1Aff.ScalarMulUnconverted(&srs.G1[0], &claimedValueBigInt) // [f(α) - f(a)]G₁ - var fminusfaG1Jac, tmpG1Jac bw6761.G1Jac + var fminusfaG1Jac bw6761.G1Jac fminusfaG1Jac.FromAffine(commitment) - tmpG1Jac.FromAffine(&claimedValueG1Aff) - fminusfaG1Jac.SubAssign(&tmpG1Jac) + fminusfaG1Jac.SubAssign(&claimedValueG1Aff) // [-H(α)]G₁ var negH bw6761.G1Affine @@ -190,7 +189,7 @@ func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) point.ToBigIntRegular(&pointBigInt) genG2Jac.FromAffine(&srs.G2[0]) alphaG2Jac.FromAffine(&srs.G2[1]) - alphaMinusaG2Jac.ScalarMultiplication(&genG2Jac, &pointBigInt). + alphaMinusaG2Jac.ScalarMul(&genG2Jac, &pointBigInt). Neg(&alphaMinusaG2Jac). AddAssign(&alphaG2Jac) @@ -419,7 +418,7 @@ func BatchVerifyMultiPoints(digests []Digest, proofs []OpeningProof, points []fr var foldedEvalsCommit bw6761.G1Affine var foldedEvalsBigInt big.Int foldedEvals.ToBigIntRegular(&foldedEvalsBigInt) - foldedEvalsCommit.ScalarMultiplication(&srs.G1[0], &foldedEvalsBigInt) + foldedEvalsCommit.ScalarMul(&srs.G1[0], &foldedEvalsBigInt) // compute foldedDigests = ∑ᵢλᵢ[fᵢ(α)]G₁ - [∑ᵢλᵢfᵢ(aᵢ)]G₁ foldedDigests.Sub(&foldedDigests, &foldedEvalsCommit) diff --git a/ecc/bw6-761/fr/kzg/kzg_test.go b/ecc/bw6-761/fr/kzg/kzg_test.go index ec89a0510..a346e2c0a 100644 --- a/ecc/bw6-761/fr/kzg/kzg_test.go +++ b/ecc/bw6-761/fr/kzg/kzg_test.go @@ -130,7 +130,7 @@ func TestCommit(t *testing.T) { fx.ToBigIntRegular(&fxbi) var manualCommit bw6761.G1Affine manualCommit.Set(&testSRS.G1[0]) - manualCommit.ScalarMultiplication(&manualCommit, &fxbi) + manualCommit.ScalarMul(&manualCommit, &fxbi) // compare both results if !kzgCommit.Equal(&manualCommit) { diff --git a/ecc/bw6-761/fr/plookup/table.go b/ecc/bw6-761/fr/plookup/table.go index d5eaec0b6..ab92b2d10 100644 --- a/ecc/bw6-761/fr/plookup/table.go +++ b/ecc/bw6-761/fr/plookup/table.go @@ -209,9 +209,9 @@ func VerifyLookupTables(srs *kzg.SRS, proof ProofLookupTables) error { var blambda big.Int lambda.ToBigIntRegular(&blambda) for i := nbRows - 2; i >= 0; i-- { - comf.ScalarMultiplication(&comf, &blambda). + comf.ScalarMul(&comf, &blambda). Add(&comf, &proof.fs[i]) - comt.ScalarMultiplication(&comt, &blambda). + comt.ScalarMul(&comt, &blambda). Add(&comt, &proof.ts[i]) } diff --git a/ecc/bw6-761/g1.go b/ecc/bw6-761/g1.go index d2d3ca9a4..833adcd65 100644 --- a/ecc/bw6-761/g1.go +++ b/ecc/bw6-761/g1.go @@ -55,8 +55,8 @@ func (p *G1Affine) Set(a *G1Affine) *G1Affine { return p } -// ScalarMultiplication computes and returns p = a ⋅ s -func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { +// ScalarMul computes and returns p = a ⋅ s +func (p *G1Affine) ScalarMul(a *G1Affine, s *big.Int) *G1Affine { var _p G1Jac _p.FromAffine(a) _p.mulGLV(&_p, s) @@ -64,6 +64,14 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { return p } +// ScalarMulUnconverted computes and returns p = a ⋅ s +// Takes an affine point and returns a Jacobian point (useful for KZG) +func (p *G1Jac) ScalarMulUnconverted(a *G1Affine, s *big.Int) *G1Jac { + p.FromAffine(a) + p.mulGLV(p, s) + return p +} + // Add adds two point in affine coordinates. // This should rarely be used as it is very inefficient compared to Jacobian func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { @@ -328,9 +336,9 @@ func (p *G1Jac) DoubleAssign() *G1Jac { return p } -// ScalarMultiplication computes and returns p = a ⋅ s +// ScalarMul computes and returns p = a ⋅ s // see https://www.iacr.org/archive/crypto2001/21390189.pdf -func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { +func (p *G1Jac) ScalarMul(a *G1Jac, s *big.Int) *G1Jac { return p.mulGLV(a, s) } @@ -379,13 +387,13 @@ func (p *G1Jac) IsInSubGroup() bool { var res, phip G1Jac phip.phi(p) - res.ScalarMultiplication(&phip, &xGen). + res.ScalarMul(&phip, &xGen). SubAssign(&phip). - ScalarMultiplication(&res, &xGen). - ScalarMultiplication(&res, &xGen). + ScalarMul(&res, &xGen). + ScalarMul(&res, &xGen). AddAssign(&phip) - phip.ScalarMultiplication(p, &xGen).AddAssign(p).AddAssign(&res) + phip.ScalarMul(p, &xGen).AddAssign(p).AddAssign(&res) return phip.IsOnCurve() && phip.Z.IsZero() @@ -516,9 +524,9 @@ func (p *G1Jac) ClearCofactor(a *G1Jac) *G1Jac { // https://eprint.iacr.org/2020/351.pdf var points [4]G1Jac points[0].Set(a) - points[1].ScalarMultiplication(a, &xGen) - points[2].ScalarMultiplication(&points[1], &xGen) - points[3].ScalarMultiplication(&points[2], &xGen) + points[1].ScalarMul(a, &xGen) + points[2].ScalarMul(&points[1], &xGen) + points[3].ScalarMul(&points[2], &xGen) var scalars [7]big.Int scalars[0].SetInt64(103) @@ -531,18 +539,18 @@ func (p *G1Jac) ClearCofactor(a *G1Jac) *G1Jac { scalars[6].SetInt64(130) var p1, p2, tmp G1Jac - p1.ScalarMultiplication(&points[3], &scalars[0]) - tmp.ScalarMultiplication(&points[2], &scalars[1]).Neg(&tmp) + p1.ScalarMul(&points[3], &scalars[0]) + tmp.ScalarMul(&points[2], &scalars[1]).Neg(&tmp) p1.AddAssign(&tmp) - tmp.ScalarMultiplication(&points[1], &scalars[2]).Neg(&tmp) + tmp.ScalarMul(&points[1], &scalars[2]).Neg(&tmp) p1.AddAssign(&tmp) - tmp.ScalarMultiplication(&points[0], &scalars[3]) + tmp.ScalarMul(&points[0], &scalars[3]) p1.AddAssign(&tmp) - p2.ScalarMultiplication(&points[2], &scalars[4]) - tmp.ScalarMultiplication(&points[1], &scalars[5]) + p2.ScalarMul(&points[2], &scalars[4]) + tmp.ScalarMul(&points[1], &scalars[5]) p2.AddAssign(&tmp) - tmp.ScalarMultiplication(&points[0], &scalars[6]) + tmp.ScalarMul(&points[0], &scalars[6]) p2.AddAssign(&tmp) p2.phi(&p2) @@ -975,10 +983,10 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) { } -// BatchScalarMultiplicationG1 multiplies the same base by all scalars +// BatchScalarMulG1 multiplies the same base by all scalars // and return resulting points in affine coordinates // uses a simple windowed-NAF like exponentiation algorithm -func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affine { +func BatchScalarMulG1(base *G1Affine, scalars []fr.Element) []G1Affine { // approximate cost in group ops is // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) diff --git a/ecc/bw6-761/g1_test.go b/ecc/bw6-761/g1_test.go index 0deac0bf2..12ce35ab7 100644 --- a/ecc/bw6-761/g1_test.go +++ b/ecc/bw6-761/g1_test.go @@ -110,7 +110,7 @@ func TestG1AffineIsOnCurve(t *testing.T) { var op1, op2 G1Jac op1 = fuzzG1Jac(&g1Gen, a) _r := fr.Modulus() - op2.ScalarMultiplication(&op1, _r) + op2.ScalarMul(&op1, _r) return op1.IsInSubGroup() && op2.Z.IsZero() }, GenFp(), @@ -353,12 +353,12 @@ func TestG1AffineOps(t *testing.T) { var scalar, blindedScalar, rminusone big.Int var op1, op2, op3, gneg G1Jac rminusone.SetUint64(1).Sub(r, &rminusone) - op3.ScalarMultiplication(&g1Gen, &rminusone) + op3.ScalarMul(&g1Gen, &rminusone) gneg.Neg(&g1Gen) s.ToBigIntRegular(&scalar) blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) - op1.ScalarMultiplication(&g1Gen, &scalar) - op2.ScalarMultiplication(&g1Gen, &blindedScalar) + op1.ScalarMul(&g1Gen, &scalar) + op2.ScalarMul(&g1Gen, &blindedScalar) return op1.Equal(&op2) && g.Equal(&g1Infinity) && !op1.Equal(&g1Infinity) && gneg.Equal(&op3) @@ -422,7 +422,7 @@ func TestG1AffineCofactorCleaning(t *testing.T) { } -func TestG1AffineBatchScalarMultiplication(t *testing.T) { +func TestG1AffineBatchScalarMul(t *testing.T) { parameters := gopter.DefaultTestParameters() if testing.Short() { @@ -438,7 +438,7 @@ func TestG1AffineBatchScalarMultiplication(t *testing.T) { // size of the multiExps const nbSamples = 10 - properties.Property("[BW6-761] BatchScalarMultiplication should be consistent with individual scalar multiplications", prop.ForAll( + properties.Property("[BW6-761] BatchScalarMul should be consistent with individual scalar multiplications", prop.ForAll( func(mixer fr.Element) bool { // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -449,7 +449,7 @@ func TestG1AffineBatchScalarMultiplication(t *testing.T) { FromMont() } - result := BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:]) + result := BatchScalarMulG1(&g1GenAff, sampleScalars[:]) if len(result) != len(sampleScalars) { return false @@ -508,7 +508,7 @@ func BenchmarkG1AffineBatchScalarMul(b *testing.B) { b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - _ = BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:using]) + _ = BatchScalarMulG1(&g1GenAff, sampleScalars[:using]) } }) } diff --git a/ecc/bw6-761/g2.go b/ecc/bw6-761/g2.go index a15998b95..d471f606a 100644 --- a/ecc/bw6-761/g2.go +++ b/ecc/bw6-761/g2.go @@ -50,8 +50,8 @@ func (p *G2Affine) Set(a *G2Affine) *G2Affine { return p } -// ScalarMultiplication computes and returns p = a ⋅ s -func (p *G2Affine) ScalarMultiplication(a *G2Affine, s *big.Int) *G2Affine { +// ScalarMul computes and returns p = a ⋅ s +func (p *G2Affine) ScalarMul(a *G2Affine, s *big.Int) *G2Affine { var _p G2Jac _p.FromAffine(a) _p.mulGLV(&_p, s) @@ -323,9 +323,9 @@ func (p *G2Jac) DoubleAssign() *G2Jac { return p } -// ScalarMultiplication computes and returns p = a ⋅ s +// ScalarMul computes and returns p = a ⋅ s // see https://www.iacr.org/archive/crypto2001/21390189.pdf -func (p *G2Jac) ScalarMultiplication(a *G2Jac, s *big.Int) *G2Jac { +func (p *G2Jac) ScalarMul(a *G2Jac, s *big.Int) *G2Jac { return p.mulGLV(a, s) } @@ -374,13 +374,13 @@ func (p *G2Jac) IsInSubGroup() bool { var res, phip G2Jac phip.phi(p) - res.ScalarMultiplication(&phip, &xGen). + res.ScalarMul(&phip, &xGen). SubAssign(&phip). - ScalarMultiplication(&res, &xGen). - ScalarMultiplication(&res, &xGen). + ScalarMul(&res, &xGen). + ScalarMul(&res, &xGen). AddAssign(&phip) - phip.ScalarMultiplication(p, &xGen).AddAssign(p).AddAssign(&res) + phip.ScalarMul(p, &xGen).AddAssign(p).AddAssign(&res) return phip.IsOnCurve() && phip.Z.IsZero() @@ -511,9 +511,9 @@ func (p *G2Jac) ClearCofactor(a *G2Jac) *G2Jac { var points [4]G2Jac points[0].Set(a) - points[1].ScalarMultiplication(a, &xGen) - points[2].ScalarMultiplication(&points[1], &xGen) - points[3].ScalarMultiplication(&points[2], &xGen) + points[1].ScalarMul(a, &xGen) + points[2].ScalarMul(&points[1], &xGen) + points[3].ScalarMul(&points[2], &xGen) var scalars [7]big.Int scalars[0].SetInt64(103) @@ -526,18 +526,18 @@ func (p *G2Jac) ClearCofactor(a *G2Jac) *G2Jac { scalars[6].SetInt64(109) var p1, p2, tmp G2Jac - p1.ScalarMultiplication(&points[3], &scalars[0]) - tmp.ScalarMultiplication(&points[2], &scalars[1]).Neg(&tmp) + p1.ScalarMul(&points[3], &scalars[0]) + tmp.ScalarMul(&points[2], &scalars[1]).Neg(&tmp) p1.AddAssign(&tmp) - tmp.ScalarMultiplication(&points[1], &scalars[2]).Neg(&tmp) + tmp.ScalarMul(&points[1], &scalars[2]).Neg(&tmp) p1.AddAssign(&tmp) - tmp.ScalarMultiplication(&points[0], &scalars[3]) + tmp.ScalarMul(&points[0], &scalars[3]) p1.AddAssign(&tmp) - p2.ScalarMultiplication(&points[2], &scalars[4]) - tmp.ScalarMultiplication(&points[1], &scalars[5]).Neg(&tmp) + p2.ScalarMul(&points[2], &scalars[4]) + tmp.ScalarMul(&points[1], &scalars[5]).Neg(&tmp) p2.AddAssign(&tmp) - tmp.ScalarMultiplication(&points[0], &scalars[6]).Neg(&tmp) + tmp.ScalarMul(&points[0], &scalars[6]).Neg(&tmp) p2.AddAssign(&tmp) p2.phi(&p2).phi(&p2) @@ -848,10 +848,10 @@ func (p *g2JacExtended) doubleMixed(q *G2Affine) *g2JacExtended { return p } -// BatchScalarMultiplicationG2 multiplies the same base by all scalars +// BatchScalarMulG2 multiplies the same base by all scalars // and return resulting points in affine coordinates // uses a simple windowed-NAF like exponentiation algorithm -func BatchScalarMultiplicationG2(base *G2Affine, scalars []fr.Element) []G2Affine { +func BatchScalarMulG2(base *G2Affine, scalars []fr.Element) []G2Affine { // approximate cost in group ops is // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) diff --git a/ecc/bw6-761/g2_test.go b/ecc/bw6-761/g2_test.go index 3f1c02bd0..3a87d8c9e 100644 --- a/ecc/bw6-761/g2_test.go +++ b/ecc/bw6-761/g2_test.go @@ -110,7 +110,7 @@ func TestG2AffineIsOnCurve(t *testing.T) { var op1, op2 G2Jac op1 = fuzzG2Jac(&g2Gen, a) _r := fr.Modulus() - op2.ScalarMultiplication(&op1, _r) + op2.ScalarMul(&op1, _r) return op1.IsInSubGroup() && op2.Z.IsZero() }, GenFp(), @@ -353,12 +353,12 @@ func TestG2AffineOps(t *testing.T) { var scalar, blindedScalar, rminusone big.Int var op1, op2, op3, gneg G2Jac rminusone.SetUint64(1).Sub(r, &rminusone) - op3.ScalarMultiplication(&g2Gen, &rminusone) + op3.ScalarMul(&g2Gen, &rminusone) gneg.Neg(&g2Gen) s.ToBigIntRegular(&scalar) blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) - op1.ScalarMultiplication(&g2Gen, &scalar) - op2.ScalarMultiplication(&g2Gen, &blindedScalar) + op1.ScalarMul(&g2Gen, &scalar) + op2.ScalarMul(&g2Gen, &blindedScalar) return op1.Equal(&op2) && g.Equal(&g2Infinity) && !op1.Equal(&g2Infinity) && gneg.Equal(&op3) @@ -422,7 +422,7 @@ func TestG2AffineCofactorCleaning(t *testing.T) { } -func TestG2AffineBatchScalarMultiplication(t *testing.T) { +func TestG2AffineBatchScalarMul(t *testing.T) { parameters := gopter.DefaultTestParameters() if testing.Short() { @@ -438,7 +438,7 @@ func TestG2AffineBatchScalarMultiplication(t *testing.T) { // size of the multiExps const nbSamples = 10 - properties.Property("[BW6-761] BatchScalarMultiplication should be consistent with individual scalar multiplications", prop.ForAll( + properties.Property("[BW6-761] BatchScalarMul should be consistent with individual scalar multiplications", prop.ForAll( func(mixer fr.Element) bool { // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -449,7 +449,7 @@ func TestG2AffineBatchScalarMultiplication(t *testing.T) { FromMont() } - result := BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:]) + result := BatchScalarMulG2(&g2GenAff, sampleScalars[:]) if len(result) != len(sampleScalars) { return false @@ -508,7 +508,7 @@ func BenchmarkG2AffineBatchScalarMul(b *testing.B) { b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - _ = BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:using]) + _ = BatchScalarMulG2(&g2GenAff, sampleScalars[:using]) } }) } diff --git a/ecc/bw6-761/marshal_test.go b/ecc/bw6-761/marshal_test.go index 51487badf..b42e130f2 100644 --- a/ecc/bw6-761/marshal_test.go +++ b/ecc/bw6-761/marshal_test.go @@ -55,9 +55,9 @@ func TestEncoder(t *testing.T) { inA = rand.Uint64() inB.SetRandom() inC.SetRandom() - inD.ScalarMultiplication(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) + inD.ScalarMul(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) // inE --> infinity - inF.ScalarMultiplication(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) + inF.ScalarMul(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) inG = make([]G1Affine, 2) inH = make([]G2Affine, 0) inG[1] = inD @@ -263,7 +263,7 @@ func TestG1AffineSerialization(t *testing.T) { var start, end G1Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g1GenAff, &ab) + start.ScalarMul(&g1GenAff, &ab) buf := start.RawBytes() n, err := end.SetBytes(buf[:]) @@ -283,7 +283,7 @@ func TestG1AffineSerialization(t *testing.T) { var start, end G1Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g1GenAff, &ab) + start.ScalarMul(&g1GenAff, &ab) buf := start.Bytes() n, err := end.SetBytes(buf[:]) @@ -356,7 +356,7 @@ func TestG2AffineSerialization(t *testing.T) { var start, end G2Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g2GenAff, &ab) + start.ScalarMul(&g2GenAff, &ab) buf := start.RawBytes() n, err := end.SetBytes(buf[:]) @@ -376,7 +376,7 @@ func TestG2AffineSerialization(t *testing.T) { var start, end G2Affine var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&g2GenAff, &ab) + start.ScalarMul(&g2GenAff, &ab) buf := start.Bytes() n, err := end.SetBytes(buf[:]) diff --git a/ecc/bw6-761/multiexp_test.go b/ecc/bw6-761/multiexp_test.go index d6901edc8..d5f020657 100644 --- a/ecc/bw6-761/multiexp_test.go +++ b/ecc/bw6-761/multiexp_test.go @@ -100,7 +100,7 @@ func TestMultiExpG1(t *testing.T) { // compute expected result with double and add var finalScalar, mixerBigInt big.Int finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) - expected.ScalarMultiplication(&g1Gen, &finalScalar) + expected.ScalarMul(&g1Gen, &finalScalar) // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -150,7 +150,7 @@ func TestMultiExpG1(t *testing.T) { var op1ScalarMul G1Affine finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) finalBigScalar.ToBigIntRegular(&finalBigScalarBi) - op1ScalarMul.ScalarMultiplication(&g1GenAff, &finalBigScalarBi) + op1ScalarMul.ScalarMul(&g1GenAff, &finalBigScalarBi) return op1ScalarMul.Equal(&op1MultiExp) }, @@ -249,7 +249,7 @@ func BenchmarkManyMultiExpG1Reference(b *testing.B) { func fillBenchBasesG1(samplePoints []G1Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) - samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + samplePoints[0].ScalarMul(&samplePoints[0], &r) one := samplePoints[0].X one.SetOne() @@ -330,7 +330,7 @@ func TestMultiExpG2(t *testing.T) { // compute expected result with double and add var finalScalar, mixerBigInt big.Int finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) - expected.ScalarMultiplication(&g2Gen, &finalScalar) + expected.ScalarMul(&g2Gen, &finalScalar) // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -380,7 +380,7 @@ func TestMultiExpG2(t *testing.T) { var op1ScalarMul G2Affine finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) finalBigScalar.ToBigIntRegular(&finalBigScalarBi) - op1ScalarMul.ScalarMultiplication(&g2GenAff, &finalBigScalarBi) + op1ScalarMul.ScalarMul(&g2GenAff, &finalBigScalarBi) return op1ScalarMul.Equal(&op1MultiExp) }, @@ -479,7 +479,7 @@ func BenchmarkManyMultiExpG2Reference(b *testing.B) { func fillBenchBasesG2(samplePoints []G2Affine) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) - samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + samplePoints[0].ScalarMul(&samplePoints[0], &r) one := samplePoints[0].X one.SetOne() diff --git a/ecc/bw6-761/pairing_test.go b/ecc/bw6-761/pairing_test.go index 76bf81eb3..e850b467d 100644 --- a/ecc/bw6-761/pairing_test.go +++ b/ecc/bw6-761/pairing_test.go @@ -122,8 +122,8 @@ func TestPairing(t *testing.T) { b.ToBigIntRegular(&bbigint) ab.Mul(&abigint, &bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ = Pair([]G1Affine{g1GenAff}, []G2Affine{g2GenAff}) resa, _ = Pair([]G1Affine{ag1}, []G2Affine{g2GenAff}) @@ -187,8 +187,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) P0 := []G1Affine{g1GenAff} P1 := []G1Affine{ag1} @@ -230,8 +230,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) g1Inf.FromJacobian(&g1Infinity) g2Inf.FromJacobian(&g2Infinity) @@ -268,8 +268,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ := Pair([]G1Affine{ag1}, []G2Affine{bg2}) diff --git a/internal/generator/ecc/template/point.go.tmpl b/internal/generator/ecc/template/point.go.tmpl index 1274d5708..c10c7480c 100644 --- a/internal/generator/ecc/template/point.go.tmpl +++ b/internal/generator/ecc/template/point.go.tmpl @@ -52,8 +52,8 @@ func (p *{{ $TAffine }}) Set(a *{{ $TAffine }}) *{{ $TAffine }} { return p } -// ScalarMultiplication computes and returns p = a ⋅ s -func (p *{{ $TAffine }}) ScalarMultiplication(a *{{ $TAffine }}, s *big.Int) *{{ $TAffine }} { +// ScalarMul computes and returns p = a ⋅ s +func (p *{{ $TAffine }}) ScalarMul(a *{{ $TAffine }}, s *big.Int) *{{ $TAffine }} { var _p {{ $TJacobian }} _p.FromAffine(a) _p.mulGLV(&_p, s) @@ -61,6 +61,16 @@ func (p *{{ $TAffine }}) ScalarMultiplication(a *{{ $TAffine }}, s *big.Int) *{{ return p } +{{- if eq .PointName "g1"}} +// ScalarMulUnconverted computes and returns p = a ⋅ s +// Takes an affine point and returns a Jacobian point (useful for KZG) +func (p *{{ $TJacobian }}) ScalarMulUnconverted(a *{{ $TAffine }}, s *big.Int) *{{ $TJacobian }} { + p.FromAffine(a) + p.mulGLV(p, s) + return p +} +{{- end}} + // Add adds two point in affine coordinates. // This should rarely be used as it is very inefficient compared to Jacobian func (p *{{ $TAffine }}) Add(a, b *{{ $TAffine }}) *{{ $TAffine }} { @@ -335,9 +345,9 @@ func (p *{{ $TJacobian }}) DoubleAssign() *{{ $TJacobian }} { } -// ScalarMultiplication computes and returns p = a ⋅ s +// ScalarMul computes and returns p = a ⋅ s // {{- if .GLV}} see https://www.iacr.org/archive/crypto2001/21390189.pdf {{- else }} using 2-bits windowed exponentiation {{- end }} -func (p *{{ $TJacobian }}) ScalarMultiplication(a *{{ $TJacobian }}, s *big.Int) *{{ $TJacobian }} { +func (p *{{ $TJacobian }}) ScalarMul(a *{{ $TJacobian }}, s *big.Int) *{{ $TJacobian }} { {{- if .GLV}} return p.mulGLV(a, s) {{- else }} @@ -403,7 +413,7 @@ func (p *{{ $TJacobian }}) IsOnCurve() bool { func (p *{{ $TJacobian }}) IsInSubGroup() bool { var a, res G2Jac a.psi(p) - res.ScalarMultiplication(p, &fixedCoeff). + res.ScalarMul(p, &fixedCoeff). SubAssign(&a) return res.IsOnCurve() && res.Z.IsZero() @@ -421,13 +431,13 @@ func (p *{{ $TJacobian }}) IsOnCurve() bool { var res, phip {{ $TJacobian }} phip.phi(p) - res.ScalarMultiplication(&phip, &xGen). + res.ScalarMul(&phip, &xGen). SubAssign(&phip). - ScalarMultiplication(&res, &xGen). - ScalarMultiplication(&res, &xGen). + ScalarMul(&res, &xGen). + ScalarMul(&res, &xGen). AddAssign(&phip) - phip.ScalarMultiplication(p, &xGen).AddAssign(p).AddAssign(&res) + phip.ScalarMul(p, &xGen).AddAssign(p).AddAssign(&res) return phip.IsOnCurve() && phip.Z.IsZero() @@ -438,11 +448,11 @@ func (p *{{ $TJacobian }}) IsOnCurve() bool { func (p *{{ $TJacobian }}) IsInSubGroup() bool { var uP, u4P, u5P, q, r {{ $TJacobian }} - uP.ScalarMultiplication(p, &xGen) - u4P.ScalarMultiplication(&uP, &xGen). - ScalarMultiplication(&u4P, &xGen). - ScalarMultiplication(&u4P, &xGen) - u5P.ScalarMultiplication(&u4P, &xGen) + uP.ScalarMul(p, &xGen) + u4P.ScalarMul(&uP, &xGen). + ScalarMul(&u4P, &xGen). + ScalarMul(&u4P, &xGen) + u5P.ScalarMul(&u4P, &xGen) q.Set(p).SubAssign(&uP) r.phi(&q).SubAssign(&uP). AddAssign(&u4P). @@ -462,10 +472,10 @@ func (p *{{ $TJacobian }}) IsOnCurve() bool { var res {{ $TJacobian }} res.phi(p). - ScalarMultiplication(&res, &xGen). - ScalarMultiplication(&res, &xGen). - ScalarMultiplication(&res, &xGen). - ScalarMultiplication(&res, &xGen). + ScalarMul(&res, &xGen). + ScalarMul(&res, &xGen). + ScalarMul(&res, &xGen). + ScalarMul(&res, &xGen). AddAssign(p) return res.IsOnCurve() && res.Z.IsZero() @@ -478,7 +488,7 @@ func (p *{{ $TJacobian }}) IsOnCurve() bool { func (p *{{ $TJacobian }}) IsInSubGroup() bool { var res, tmp {{ $TJacobian }} tmp.psi(p) - res.ScalarMultiplication(p, &xGen). + res.ScalarMul(p, &xGen). {{ if eq .Name "bls24-315"}} AddAssign(&tmp) {{ else }} @@ -501,8 +511,8 @@ func (p *{{ $TJacobian }}) IsOnCurve() bool { var res {{ $TJacobian }} res.phi(p). - ScalarMultiplication(&res, &xGen). - ScalarMultiplication(&res, &xGen). + ScalarMul(&res, &xGen). + ScalarMul(&res, &xGen). AddAssign(p) return res.IsOnCurve() && res.Z.IsZero() @@ -516,7 +526,7 @@ func (p *{{ $TJacobian }}) IsOnCurve() bool { func (p *{{ $TJacobian }}) IsInSubGroup() bool { var res, tmp {{ $TJacobian }} tmp.psi(p) - res.ScalarMultiplication(p, &xGen). + res.ScalarMul(p, &xGen). AddAssign(&tmp) return res.IsOnCurve() && res.Z.IsZero() @@ -527,7 +537,7 @@ func (p *{{ $TJacobian }}) IsOnCurve() bool { func (p *{{ $TJacobian }}) IsInSubGroup() bool { var res, tmp {{ $TJacobian }} tmp.psi(p) - res.ScalarMultiplication(p, &xGen). + res.ScalarMul(p, &xGen). SubAssign(&tmp) return res.IsOnCurve() && res.Z.IsZero() @@ -695,22 +705,22 @@ func (p *{{$TJacobian}}) ClearCofactor(a *{{$TJacobian}}) *{{$TJacobian}} { {{- if or (eq .Name "bls12-381") (eq .Name "bls24-315")}} // cf https://eprint.iacr.org/2019/403.pdf, 5 var res {{$TJacobian}} - res.ScalarMultiplication(a, &xGen).AddAssign(a) + res.ScalarMul(a, &xGen).AddAssign(a) p.Set(&res) return p {{else if or (eq .Name "bls12-377") (eq .Name "bls12-378") (eq .Name "bls24-317")}} // cf https://eprint.iacr.org/2019/403.pdf, 5 var res {{$TJacobian}} - res.ScalarMultiplication(a, &xGen).Neg(&res).AddAssign(a) + res.ScalarMul(a, &xGen).Neg(&res).AddAssign(a) p.Set(&res) return p {{else if eq .Name "bw6-761"}} // https://eprint.iacr.org/2020/351.pdf var points [4]{{$TJacobian}} points[0].Set(a) - points[1].ScalarMultiplication(a, &xGen) - points[2].ScalarMultiplication(&points[1], &xGen) - points[3].ScalarMultiplication(&points[2], &xGen) + points[1].ScalarMul(a, &xGen) + points[2].ScalarMul(&points[1], &xGen) + points[3].ScalarMul(&points[2], &xGen) var scalars [7]big.Int scalars[0].SetInt64(103) @@ -723,18 +733,18 @@ func (p *{{$TJacobian}}) ClearCofactor(a *{{$TJacobian}}) *{{$TJacobian}} { scalars[6].SetInt64(130) var p1, p2, tmp {{$TJacobian}} - p1.ScalarMultiplication(&points[3], &scalars[0]) - tmp.ScalarMultiplication(&points[2], &scalars[1]).Neg(&tmp) + p1.ScalarMul(&points[3], &scalars[0]) + tmp.ScalarMul(&points[2], &scalars[1]).Neg(&tmp) p1.AddAssign(&tmp) - tmp.ScalarMultiplication(&points[1], &scalars[2]).Neg(&tmp) + tmp.ScalarMul(&points[1], &scalars[2]).Neg(&tmp) p1.AddAssign(&tmp) - tmp.ScalarMultiplication(&points[0], &scalars[3]) + tmp.ScalarMul(&points[0], &scalars[3]) p1.AddAssign(&tmp) - p2.ScalarMultiplication(&points[2], &scalars[4]) - tmp.ScalarMultiplication(&points[1], &scalars[5]) + p2.ScalarMul(&points[2], &scalars[4]) + tmp.ScalarMul(&points[1], &scalars[5]) p2.AddAssign(&tmp) - tmp.ScalarMultiplication(&points[0], &scalars[6]) + tmp.ScalarMul(&points[0], &scalars[6]) p2.AddAssign(&tmp) p2.phi(&p2) @@ -752,20 +762,20 @@ func (p *{{$TJacobian}}) ClearCofactor(a *{{$TJacobian}}) *{{$TJacobian}} { ht.SetInt64(7) v.Mul(&xGen, &xGen).Add(&v, &one).Mul(&v, &uPlusOne) - uP.ScalarMultiplication(a, &xGen).Neg(&uP) + uP.ScalarMul(a, &xGen).Neg(&uP) vP.Set(a).SubAssign(&uP). - ScalarMultiplication(&vP, &v) - wP.ScalarMultiplication(&vP, &uMinusOne).Neg(&wP). + ScalarMul(&vP, &v) + wP.ScalarMul(&vP, &uMinusOne).Neg(&wP). AddAssign(&uP) - L0.ScalarMultiplication(&wP, &d1) - tmp.ScalarMultiplication(&vP, &ht) + L0.ScalarMul(&wP, &d1) + tmp.ScalarMul(&vP, &ht) L0.AddAssign(&tmp) tmp.Double(a) L0.AddAssign(&tmp) - L1.Set(&uP).AddAssign(a).ScalarMultiplication(&L1, &d1) - tmp.ScalarMultiplication(&vP, &d2) + L1.Set(&uP).AddAssign(a).ScalarMul(&L1, &d1) + tmp.ScalarMul(&vP, &d2) L1.AddAssign(&tmp) - tmp.ScalarMultiplication(a, &ht) + tmp.ScalarMul(a, &ht) L1.AddAssign(&tmp) p.phi(&L1).AddAssign(&L0) @@ -774,9 +784,9 @@ func (p *{{$TJacobian}}) ClearCofactor(a *{{$TJacobian}}) *{{$TJacobian}} { {{else if eq .Name "bw6-756"}} var L0, L1, uP, u2P, u3P, tmp G1Jac - uP.ScalarMultiplication(a, &xGen) - u2P.ScalarMultiplication(&uP, &xGen) - u3P.ScalarMultiplication(&u2P, &xGen) + uP.ScalarMul(a, &xGen) + u2P.ScalarMul(&uP, &xGen) + u3P.ScalarMul(&u2P, &xGen) L0.Set(a).AddAssign(&u3P). SubAssign(&u2P) @@ -808,7 +818,7 @@ func (p *{{$TJacobian}}) ClearCofactor(a *{{$TJacobian}}) *{{$TJacobian}} { // cf http://cacr.uwaterloo.ca/techreports/2011/cacr2011-26.pdf, 6.1 var points [4]{{$TJacobian}} - points[0].ScalarMultiplication(a, &xGen) + points[0].ScalarMul(a, &xGen) points[1].Double(&points[0]). AddAssign(&points[0]). @@ -829,8 +839,8 @@ func (p *{{$TJacobian}}) ClearCofactor(a *{{$TJacobian}}) *{{$TJacobian}} { {{else if eq .Name "bls12-381"}} // https://eprint.iacr.org/2017/419.pdf, 4.1 var xg, xxg, res, t G2Jac - xg.ScalarMultiplication(a, &xGen).Neg(&xg) - xxg.ScalarMultiplication(&xg, &xGen).Neg(&xxg) + xg.ScalarMul(a, &xGen).Neg(&xg) + xxg.ScalarMul(&xg, &xGen).Neg(&xxg) res.Set(&xxg). SubAssign(&xg). @@ -854,8 +864,8 @@ func (p *{{$TJacobian}}) ClearCofactor(a *{{$TJacobian}}) *{{$TJacobian}} { {{else if or (eq .Name "bls12-377") (eq .Name "bls12-378")}} // https://eprint.iacr.org/2017/419.pdf, 4.1 var xg, xxg, res, t G2Jac - xg.ScalarMultiplication(a, &xGen) - xxg.ScalarMultiplication(&xg, &xGen) + xg.ScalarMul(a, &xGen) + xxg.ScalarMul(&xg, &xGen) res.Set(&xxg). SubAssign(&xg). @@ -881,16 +891,16 @@ func (p *{{$TJacobian}}) ClearCofactor(a *{{$TJacobian}}) *{{$TJacobian}} { // multiply by (3x⁴-3)*cofacor {{ if eq .Name "bls24-315"}} var xg, xxg, xxxg, xxxxg, res, t G2Jac - xg.ScalarMultiplication(a, &xGen).Neg(&xg).SubAssign(a) - xxg.ScalarMultiplication(&xg, &xGen).Neg(&xxg) - xxxg.ScalarMultiplication(&xxg, &xGen).Neg(&xxxg) - xxxxg.ScalarMultiplication(&xxxg, &xGen).Neg(&xxxxg) + xg.ScalarMul(a, &xGen).Neg(&xg).SubAssign(a) + xxg.ScalarMul(&xg, &xGen).Neg(&xxg) + xxxg.ScalarMul(&xxg, &xGen).Neg(&xxxg) + xxxxg.ScalarMul(&xxxg, &xGen).Neg(&xxxxg) {{ else }} var xg, xxg, xxxg, xxxxg, res, t G2Jac - xg.ScalarMultiplication(a, &xGen).SubAssign(a) - xxg.ScalarMultiplication(&xg, &xGen) - xxxg.ScalarMultiplication(&xxg, &xGen) - xxxxg.ScalarMultiplication(&xxxg, &xGen) + xg.ScalarMul(a, &xGen).SubAssign(a) + xxg.ScalarMul(&xg, &xGen) + xxxg.ScalarMul(&xxg, &xGen) + xxxxg.ScalarMul(&xxxg, &xGen) {{ end }} res.Set(&xxxxg). @@ -925,9 +935,9 @@ func (p *{{$TJacobian}}) ClearCofactor(a *{{$TJacobian}}) *{{$TJacobian}} { var points [4]{{$TJacobian}} points[0].Set(a) - points[1].ScalarMultiplication(a, &xGen) - points[2].ScalarMultiplication(&points[1], &xGen) - points[3].ScalarMultiplication(&points[2], &xGen) + points[1].ScalarMul(a, &xGen) + points[2].ScalarMul(&points[1], &xGen) + points[3].ScalarMul(&points[2], &xGen) var scalars [7]big.Int scalars[0].SetInt64(103) @@ -940,18 +950,18 @@ func (p *{{$TJacobian}}) ClearCofactor(a *{{$TJacobian}}) *{{$TJacobian}} { scalars[6].SetInt64(109) var p1, p2, tmp {{$TJacobian}} - p1.ScalarMultiplication(&points[3], &scalars[0]) - tmp.ScalarMultiplication(&points[2], &scalars[1]).Neg(&tmp) + p1.ScalarMul(&points[3], &scalars[0]) + tmp.ScalarMul(&points[2], &scalars[1]).Neg(&tmp) p1.AddAssign(&tmp) - tmp.ScalarMultiplication(&points[1], &scalars[2]).Neg(&tmp) + tmp.ScalarMul(&points[1], &scalars[2]).Neg(&tmp) p1.AddAssign(&tmp) - tmp.ScalarMultiplication(&points[0], &scalars[3]) + tmp.ScalarMul(&points[0], &scalars[3]) p1.AddAssign(&tmp) - p2.ScalarMultiplication(&points[2], &scalars[4]) - tmp.ScalarMultiplication(&points[1], &scalars[5]).Neg(&tmp) + p2.ScalarMul(&points[2], &scalars[4]) + tmp.ScalarMul(&points[1], &scalars[5]).Neg(&tmp) p2.AddAssign(&tmp) - tmp.ScalarMultiplication(&points[0], &scalars[6]).Neg(&tmp) + tmp.ScalarMul(&points[0], &scalars[6]).Neg(&tmp) p2.AddAssign(&tmp) p2.phi(&p2).phi(&p2) @@ -965,11 +975,11 @@ func (p *{{$TJacobian}}) ClearCofactor(a *{{$TJacobian}}) *{{$TJacobian}} { d1.SetInt64(13) d3.SetInt64(5) // negative - uP.ScalarMultiplication(a, &xGen) // negative - u2P.ScalarMultiplication(&uP, &xGen) - u3P.ScalarMultiplication(&u2P, &xGen) // negative - u4P.ScalarMultiplication(&u3P, &xGen) - u5P.ScalarMultiplication(&u4P, &xGen) // negative + uP.ScalarMul(a, &xGen) // negative + u2P.ScalarMul(&uP, &xGen) + u3P.ScalarMul(&u2P, &xGen) // negative + u4P.ScalarMul(&u3P, &xGen) + u5P.ScalarMul(&u4P, &xGen) // negative vP.Set(&u2P).AddAssign(&uP). AddAssign(&u3P). Double(&vP). @@ -977,15 +987,15 @@ func (p *{{$TJacobian}}) ClearCofactor(a *{{$TJacobian}}) *{{$TJacobian}} { AddAssign(a) wP.Set(&uP).SubAssign(&u4P).SubAssign(&u5P) xP.Set(a).AddAssign(&vP) - L0.Set(&uP).SubAssign(a).ScalarMultiplication(&L0, &d1) - tmp.ScalarMultiplication(&xP, &d3) + L0.Set(&uP).SubAssign(a).ScalarMul(&L0, &d1) + tmp.ScalarMul(&xP, &d3) L0.AddAssign(&tmp) - tmp.ScalarMultiplication(a, &ht) // negative + tmp.ScalarMul(a, &ht) // negative L0.SubAssign(&tmp) - L1.ScalarMultiplication(&wP, &d1) - tmp.ScalarMultiplication(&vP, &ht) + L1.ScalarMul(&wP, &d1) + tmp.ScalarMul(&vP, &ht) L1.AddAssign(&tmp) - tmp.ScalarMultiplication(a, &d3) + tmp.ScalarMul(a, &d3) L1.AddAssign(&tmp) p.phi(&L1).AddAssign(&L0) @@ -995,9 +1005,9 @@ func (p *{{$TJacobian}}) ClearCofactor(a *{{$TJacobian}}) *{{$TJacobian}} { var L0, L1, uP, u2P, u3P, tmp G2Jac - uP.ScalarMultiplication(a, &xGen) - u2P.ScalarMultiplication(&uP, &xGen) - u3P.ScalarMultiplication(&u2P, &xGen) + uP.ScalarMul(a, &xGen) + u2P.ScalarMul(&uP, &xGen) + u3P.ScalarMul(&u2P, &xGen) // ht=-2, hy=0 // d1=1, d2=-1, d3=-1 @@ -1412,10 +1422,10 @@ func BatchJacobianToAffine{{ toUpper .PointName }}(points []{{ $TJacobian }}, re {{- end}} -// BatchScalarMultiplication{{ toUpper .PointName }} multiplies the same base by all scalars +// BatchScalarMul{{ toUpper .PointName }} multiplies the same base by all scalars // and return resulting points in affine coordinates // uses a simple windowed-NAF like exponentiation algorithm -func BatchScalarMultiplication{{ toUpper .PointName }}(base *{{ $TAffine }}, scalars []fr.Element) []{{ $TAffine }} { +func BatchScalarMul{{ toUpper .PointName }}(base *{{ $TAffine }}, scalars []fr.Element) []{{ $TAffine }} { // approximate cost in group ops is // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) diff --git a/internal/generator/ecc/template/tests/marshal.go.tmpl b/internal/generator/ecc/template/tests/marshal.go.tmpl index 2503b5d5b..395cf011f 100644 --- a/internal/generator/ecc/template/tests/marshal.go.tmpl +++ b/internal/generator/ecc/template/tests/marshal.go.tmpl @@ -45,9 +45,9 @@ func TestEncoder(t *testing.T) { inA = rand.Uint64() inB.SetRandom() inC.SetRandom() - inD.ScalarMultiplication(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) + inD.ScalarMul(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) // inE --> infinity - inF.ScalarMultiplication(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) + inF.ScalarMul(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) inG = make([]G1Affine, 2) inH = make([]G2Affine, 0) inG[1] = inD @@ -273,7 +273,7 @@ func Test{{ $.TAffine }}Serialization(t *testing.T) { var start, end {{ $.TAffine }} var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&{{ toLower .PointName }}GenAff, &ab) + start.ScalarMul(&{{ toLower .PointName }}GenAff, &ab) buf := start.RawBytes() n, err := end.SetBytes(buf[:]) @@ -293,7 +293,7 @@ func Test{{ $.TAffine }}Serialization(t *testing.T) { var start, end {{ $.TAffine }} var ab big.Int a.ToBigIntRegular(&ab) - start.ScalarMultiplication(&{{ toLower .PointName }}GenAff, &ab) + start.ScalarMul(&{{ toLower .PointName }}GenAff, &ab) buf := start.Bytes() n, err := end.SetBytes(buf[:]) diff --git a/internal/generator/ecc/template/tests/multiexp.go.tmpl b/internal/generator/ecc/template/tests/multiexp.go.tmpl index 20b921052..6ae057541 100644 --- a/internal/generator/ecc/template/tests/multiexp.go.tmpl +++ b/internal/generator/ecc/template/tests/multiexp.go.tmpl @@ -100,7 +100,7 @@ func TestMultiExp{{toUpper $.PointName}}(t *testing.T) { // compute expected result with double and add var finalScalar,mixerBigInt big.Int finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) - expected.ScalarMultiplication(&{{ toLower $.PointName }}Gen, &finalScalar) + expected.ScalarMul(&{{ toLower $.PointName }}Gen, &finalScalar) // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -152,7 +152,7 @@ func TestMultiExp{{toUpper $.PointName}}(t *testing.T) { var op1ScalarMul {{ $.TAffine }} finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) finalBigScalar.ToBigIntRegular(&finalBigScalarBi) - op1ScalarMul.ScalarMultiplication(&{{ toLower .PointName}}GenAff, &finalBigScalarBi) + op1ScalarMul.ScalarMul(&{{ toLower .PointName}}GenAff, &finalBigScalarBi) return op1ScalarMul.Equal(&op1MultiExp) }, @@ -256,7 +256,7 @@ func BenchmarkManyMultiExp{{ toUpper $.PointName }}Reference(b *testing.B) { func fillBenchBases{{ toUpper $.PointName }}(samplePoints []{{ $.TAffine }}) { var r big.Int r.SetString("340444420969191673093399857471996460938405", 10) - samplePoints[0].ScalarMultiplication(&samplePoints[0], &r) + samplePoints[0].ScalarMul(&samplePoints[0], &r) one := samplePoints[0].X one.SetOne() diff --git a/internal/generator/ecc/template/tests/point.go.tmpl b/internal/generator/ecc/template/tests/point.go.tmpl index 3727be291..632715e26 100644 --- a/internal/generator/ecc/template/tests/point.go.tmpl +++ b/internal/generator/ecc/template/tests/point.go.tmpl @@ -136,7 +136,7 @@ func Test{{ $TAffine }}IsOnCurve(t *testing.T) { var op1, op2 {{ $TJacobian }} op1 = fuzz{{ $TJacobian }}(&{{.PointName}}Gen, a) _r := fr.Modulus() - op2.ScalarMultiplication(&op1, _r) + op2.ScalarMul(&op1, _r) return op1.IsInSubGroup() && op2.Z.IsZero() }, {{$fuzzer}}, @@ -393,12 +393,12 @@ func Test{{ $TAffine }}Ops(t *testing.T) { var scalar, blindedScalar, rminusone big.Int var op1, op2, op3, gneg {{ $TJacobian }} rminusone.SetUint64(1).Sub(r, &rminusone) - op3.ScalarMultiplication(&{{.PointName}}Gen, &rminusone) + op3.ScalarMul(&{{.PointName}}Gen, &rminusone) gneg.Neg(&{{.PointName}}Gen) s.ToBigIntRegular(&scalar) blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) - op1.ScalarMultiplication(&{{.PointName}}Gen, &scalar) - op2.ScalarMultiplication(&{{.PointName}}Gen, &blindedScalar) + op1.ScalarMul(&{{.PointName}}Gen, &scalar) + op2.ScalarMul(&{{.PointName}}Gen, &blindedScalar) return op1.Equal(&op2) && g.Equal(&{{.PointName}}Infinity) && !op1.Equal(&{{.PointName}}Infinity) && gneg.Equal(&op3) @@ -479,7 +479,7 @@ func Test{{ $TAffine }}CofactorCleaning(t *testing.T) { } {{end}} -func Test{{ $TAffine }}BatchScalarMultiplication(t *testing.T) { +func Test{{ $TAffine }}BatchScalarMul(t *testing.T) { parameters := gopter.DefaultTestParameters() if testing.Short() { @@ -495,7 +495,7 @@ func Test{{ $TAffine }}BatchScalarMultiplication(t *testing.T) { // size of the multiExps const nbSamples = 10 - properties.Property("[{{ toUpper .Name }}] BatchScalarMultiplication should be consistent with individual scalar multiplications", prop.ForAll( + properties.Property("[{{ toUpper .Name }}] BatchScalarMul should be consistent with individual scalar multiplications", prop.ForAll( func(mixer fr.Element) bool { // mixer ensures that all the words of a fpElement are set var sampleScalars [nbSamples]fr.Element @@ -506,7 +506,7 @@ func Test{{ $TAffine }}BatchScalarMultiplication(t *testing.T) { FromMont() } - result := BatchScalarMultiplication{{ toUpper .PointName }}(&{{.PointName}}GenAff, sampleScalars[:]) + result := BatchScalarMul{{ toUpper .PointName }}(&{{.PointName}}GenAff, sampleScalars[:]) if len(result) != len(sampleScalars) { return false @@ -565,7 +565,7 @@ func Benchmark{{ $TAffine }}BatchScalarMul(b *testing.B) { b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { b.ResetTimer() for j := 0; j < b.N; j++ { - _ = BatchScalarMultiplication{{ toUpper .PointName }}(&{{.PointName}}GenAff, sampleScalars[:using]) + _ = BatchScalarMul{{ toUpper .PointName }}(&{{.PointName}}GenAff, sampleScalars[:using]) } }) } diff --git a/internal/generator/kzg/template/kzg.go.tmpl b/internal/generator/kzg/template/kzg.go.tmpl index 81c8d87f9..ce70616eb 100644 --- a/internal/generator/kzg/template/kzg.go.tmpl +++ b/internal/generator/kzg/template/kzg.go.tmpl @@ -59,7 +59,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { _, _, gen1Aff, gen2Aff := {{ .CurvePackage }}.Generators() srs.G1[0] = gen1Aff srs.G2[0] = gen2Aff - srs.G2[1].ScalarMultiplication(&gen2Aff, bAlpha) + srs.G2[1].ScalarMul(&gen2Aff, bAlpha) alphas := make([]fr.Element, size-1) alphas[0] = alpha @@ -69,7 +69,7 @@ func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { for i := 0; i < len(alphas); i++ { alphas[i].FromMont() } - g1s := {{ .CurvePackage }}.BatchScalarMultiplicationG1(&gen1Aff, alphas) + g1s := {{ .CurvePackage }}.BatchScalarMulG1(&gen1Aff, alphas) copy(srs.G1[1:], g1s) return &srs, nil @@ -151,16 +151,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) { func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error { // [f(a)]G₁ - var claimedValueG1Aff {{ .CurvePackage }}.G1Affine + var claimedValueG1Aff {{ .CurvePackage }}.G1Jac var claimedValueBigInt big.Int proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt) + claimedValueG1Aff.ScalarMulUnconverted(&srs.G1[0], &claimedValueBigInt) // [f(α) - f(a)]G₁ - var fminusfaG1Jac, tmpG1Jac {{ .CurvePackage }}.G1Jac + var fminusfaG1Jac {{ .CurvePackage }}.G1Jac fminusfaG1Jac.FromAffine(commitment) - tmpG1Jac.FromAffine(&claimedValueG1Aff) - fminusfaG1Jac.SubAssign(&tmpG1Jac) + fminusfaG1Jac.SubAssign(&claimedValueG1Aff) // [-H(α)]G₁ var negH {{ .CurvePackage }}.G1Affine @@ -172,7 +171,7 @@ func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) point.ToBigIntRegular(&pointBigInt) genG2Jac.FromAffine(&srs.G2[0]) alphaG2Jac.FromAffine(&srs.G2[1]) - alphaMinusaG2Jac.ScalarMultiplication(&genG2Jac, &pointBigInt). + alphaMinusaG2Jac.ScalarMul(&genG2Jac, &pointBigInt). Neg(&alphaMinusaG2Jac). AddAssign(&alphaG2Jac) @@ -401,7 +400,7 @@ func BatchVerifyMultiPoints(digests []Digest, proofs []OpeningProof, points []fr var foldedEvalsCommit {{ .CurvePackage }}.G1Affine var foldedEvalsBigInt big.Int foldedEvals.ToBigIntRegular(&foldedEvalsBigInt) - foldedEvalsCommit.ScalarMultiplication(&srs.G1[0], &foldedEvalsBigInt) + foldedEvalsCommit.ScalarMul(&srs.G1[0], &foldedEvalsBigInt) // compute foldedDigests = ∑ᵢλᵢ[fᵢ(α)]G₁ - [∑ᵢλᵢfᵢ(aᵢ)]G₁ foldedDigests.Sub(&foldedDigests, &foldedEvalsCommit) diff --git a/internal/generator/kzg/template/kzg.test.go.tmpl b/internal/generator/kzg/template/kzg.test.go.tmpl index 1f7ba7773..5e54309ad 100644 --- a/internal/generator/kzg/template/kzg.test.go.tmpl +++ b/internal/generator/kzg/template/kzg.test.go.tmpl @@ -112,7 +112,7 @@ func TestCommit(t *testing.T) { fx.ToBigIntRegular(&fxbi) var manualCommit {{ .CurvePackage }}.G1Affine manualCommit.Set(&testSRS.G1[0]) - manualCommit.ScalarMultiplication(&manualCommit, &fxbi) + manualCommit.ScalarMul(&manualCommit, &fxbi) // compare both results if !kzgCommit.Equal(&manualCommit) { diff --git a/internal/generator/pairing/template/tests/pairing.go.tmpl b/internal/generator/pairing/template/tests/pairing.go.tmpl index a8209b7ac..585b45101 100644 --- a/internal/generator/pairing/template/tests/pairing.go.tmpl +++ b/internal/generator/pairing/template/tests/pairing.go.tmpl @@ -118,8 +118,8 @@ func TestPairing(t *testing.T) { b.ToBigIntRegular(&bbigint) ab.Mul(&abigint, &bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ = Pair([]G1Affine{g1GenAff}, []G2Affine{g2GenAff}) resa, _ = Pair([]G1Affine{ag1}, []G2Affine{g2GenAff}) @@ -187,8 +187,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) P0 := []G1Affine{g1GenAff} P1 := []G1Affine{ag1} @@ -230,8 +230,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) g1Inf.FromJacobian(&g1Infinity) g2Inf.FromJacobian(&g2Infinity) @@ -268,8 +268,8 @@ func TestMillerLoop(t *testing.T) { a.ToBigIntRegular(&abigint) b.ToBigIntRegular(&bbigint) - ag1.ScalarMultiplication(&g1GenAff, &abigint) - bg2.ScalarMultiplication(&g2GenAff, &bbigint) + ag1.ScalarMul(&g1GenAff, &abigint) + bg2.ScalarMul(&g2GenAff, &bbigint) res, _ := Pair([]G1Affine{ag1}, []G2Affine{bg2}) diff --git a/internal/generator/plookup/template/table.go.tmpl b/internal/generator/plookup/template/table.go.tmpl index b15a297e9..d55ecd98a 100644 --- a/internal/generator/plookup/template/table.go.tmpl +++ b/internal/generator/plookup/template/table.go.tmpl @@ -191,9 +191,9 @@ func VerifyLookupTables(srs *kzg.SRS, proof ProofLookupTables) error { var blambda big.Int lambda.ToBigIntRegular(&blambda) for i := nbRows - 2; i >= 0; i-- { - comf.ScalarMultiplication(&comf, &blambda). + comf.ScalarMul(&comf, &blambda). Add(&comf, &proof.fs[i]) - comt.ScalarMultiplication(&comt, &blambda). + comt.ScalarMul(&comt, &blambda). Add(&comt, &proof.ts[i]) }