diff --git a/Cargo.lock b/Cargo.lock index 2065138b5a..04e9994ccb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -50,8 +50,9 @@ dependencies = [ [[package]] name = "acvm-backend-barretenberg" -version = "0.1.2" -source = "git+https://github.com/noir-lang/acvm-backend-barretenberg?rev=4358d3b9e8cd98d88a78dda3337e80e90668378e#4358d3b9e8cd98d88a78dda3337e80e90668378e" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa434008757491ec90e630796056de6f76114a04f533dd03c1a748c204503a31" dependencies = [ "acvm", "barretenberg-sys", @@ -62,6 +63,7 @@ dependencies = [ "reqwest", "rust-embed", "serde", + "serde-big-array", "thiserror", "wasmer", ] @@ -2748,6 +2750,15 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-big-array" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11fc7cc2c76d73e0f27ee52abbd64eec84d46f370c88371120433196934e4b7f" +dependencies = [ + "serde", +] + [[package]] name = "serde_bytes" version = "0.11.9" diff --git a/Cargo.toml b/Cargo.toml index 5b6ff322f4..fea173505b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,6 +50,3 @@ toml = "0.7.2" url = "2.2.0" wasm-bindgen = { version = "0.2.83", features = ["serde-serialize"] } wasm-bindgen-test = "0.3.33" - -[patch.crates-io] -acvm-backend-barretenberg = { git = "https://github.com/noir-lang/acvm-backend-barretenberg", rev = "4358d3b9e8cd98d88a78dda3337e80e90668378e" } diff --git a/crates/nargo_cli/Cargo.toml b/crates/nargo_cli/Cargo.toml index 4b0c96e0eb..829f6498f9 100644 --- a/crates/nargo_cli/Cargo.toml +++ b/crates/nargo_cli/Cargo.toml @@ -38,7 +38,7 @@ color-eyre = "0.6.2" tokio = "1.0" # Backends -acvm-backend-barretenberg = { version = "0.1.2", default-features = false } +acvm-backend-barretenberg = { version = "0.2.0", default-features = false } [dev-dependencies] tempdir = "0.3.7" diff --git a/crates/nargo_cli/src/cli/codegen_verifier_cmd.rs b/crates/nargo_cli/src/cli/codegen_verifier_cmd.rs index 5f7358d6c2..b65d64bb91 100644 --- a/crates/nargo_cli/src/cli/codegen_verifier_cmd.rs +++ b/crates/nargo_cli/src/cli/codegen_verifier_cmd.rs @@ -1,6 +1,11 @@ use super::fs::{ - common_reference_string::get_common_reference_string, create_named_dir, - program::read_program_from_file, write_to_file, + common_reference_string::{ + read_cached_common_reference_string, update_common_reference_string, + write_cached_common_reference_string, + }, + create_named_dir, + program::read_program_from_file, + write_to_file, }; use super::NargoConfig; use crate::{ @@ -32,18 +37,25 @@ pub(crate) fn run( .circuit_name .map(|circuit_name| config.program_dir.join(TARGET_DIR).join(circuit_name)); + let common_reference_string = read_cached_common_reference_string(); + let (common_reference_string, preprocessed_program) = match circuit_build_path { Some(circuit_build_path) => { let program = read_program_from_file(circuit_build_path)?; - let common_reference_string = get_common_reference_string(backend, &program.bytecode) - .map_err(CliError::CommonReferenceStringError)?; + let common_reference_string = update_common_reference_string( + backend, + &common_reference_string, + &program.bytecode, + ) + .map_err(CliError::CommonReferenceStringError)?; (common_reference_string, program) } None => { let program = compile_circuit(backend, config.program_dir.as_ref(), &args.compile_options)?; - let common_reference_string = get_common_reference_string(backend, &program.circuit) - .map_err(CliError::CommonReferenceStringError)?; + let common_reference_string = + update_common_reference_string(backend, &common_reference_string, &program.circuit) + .map_err(CliError::CommonReferenceStringError)?; let program = preprocess_program(backend, &common_reference_string, program) .map_err(CliError::ProofSystemCompilerError)?; (common_reference_string, program) @@ -54,6 +66,8 @@ pub(crate) fn run( codegen_verifier(backend, &common_reference_string, &preprocessed_program.verification_key) .map_err(CliError::SmartContractError)?; + write_cached_common_reference_string(&common_reference_string); + let contract_dir = config.program_dir.join(CONTRACT_DIR); create_named_dir(&contract_dir, "contract"); let contract_path = contract_dir.join("plonk_vk").with_extension("sol"); diff --git a/crates/nargo_cli/src/cli/compile_cmd.rs b/crates/nargo_cli/src/cli/compile_cmd.rs index dc90617b89..8c63fab8cb 100644 --- a/crates/nargo_cli/src/cli/compile_cmd.rs +++ b/crates/nargo_cli/src/cli/compile_cmd.rs @@ -12,7 +12,10 @@ use crate::resolver::DependencyResolutionError; use crate::{constants::TARGET_DIR, errors::CliError, resolver::Resolver}; use super::fs::{ - common_reference_string::get_common_reference_string, + common_reference_string::{ + read_cached_common_reference_string, update_common_reference_string, + write_cached_common_reference_string, + }, program::{save_contract_to_file, save_program_to_file}, }; use super::NargoConfig; @@ -41,12 +44,15 @@ pub(crate) fn run( ) -> Result<(), CliError> { let circuit_dir = config.program_dir.join(TARGET_DIR); + let mut common_reference_string = read_cached_common_reference_string(); + // If contracts is set we're compiling every function in a 'contract' rather than just 'main'. if args.contracts { let mut driver = setup_driver(backend, &config.program_dir)?; let compiled_contracts = driver .compile_contracts(&args.compile_options) .map_err(|_| CliError::CompilationError)?; + // TODO: I wonder if it is incorrect for nargo-core to know anything about contracts. // As can be seen here, It seems like a leaky abstraction where ContractFunctions (essentially CompiledPrograms) // are compiled via nargo-core and then the PreprocessedContract is constructed here. @@ -54,9 +60,13 @@ pub(crate) fn run( let preprocessed_contracts: Result, CliError> = try_vecmap(compiled_contracts, |contract| { let preprocessed_contract_functions = try_vecmap(contract.functions, |func| { - let common_reference_string = - get_common_reference_string(backend, &func.bytecode) - .map_err(CliError::CommonReferenceStringError)?; + common_reference_string = update_common_reference_string( + backend, + &common_reference_string, + &func.bytecode, + ) + .map_err(CliError::CommonReferenceStringError)?; + preprocess_contract_function(backend, &common_reference_string, func) .map_err(CliError::ProofSystemCompilerError) })?; @@ -76,12 +86,17 @@ pub(crate) fn run( } } else { let program = compile_circuit(backend, &config.program_dir, &args.compile_options)?; - let common_reference_string = get_common_reference_string(backend, &program.circuit) - .map_err(CliError::CommonReferenceStringError)?; + common_reference_string = + update_common_reference_string(backend, &common_reference_string, &program.circuit) + .map_err(CliError::CommonReferenceStringError)?; + let preprocessed_program = preprocess_program(backend, &common_reference_string, program) .map_err(CliError::ProofSystemCompilerError)?; save_program_to_file(&preprocessed_program, &args.circuit_name, circuit_dir); } + + write_cached_common_reference_string(&common_reference_string); + Ok(()) } diff --git a/crates/nargo_cli/src/cli/fs/common_reference_string.rs b/crates/nargo_cli/src/cli/fs/common_reference_string.rs index a8bfdb50ec..15991f6d0c 100644 --- a/crates/nargo_cli/src/cli/fs/common_reference_string.rs +++ b/crates/nargo_cli/src/cli/fs/common_reference_string.rs @@ -9,34 +9,47 @@ const BACKEND_IDENTIFIER: &str = "acvm-backend-barretenberg"; const TRANSCRIPT_NAME: &str = "common-reference-string.bin"; fn common_reference_string_location() -> PathBuf { - let cache_dir = match env::var("BACKEND_CACHE_DIR") { + let cache_dir = match env::var("NARGO_BACKEND_CACHE_DIR") { Ok(cache_dir) => PathBuf::from(cache_dir), Err(_) => dirs::home_dir().unwrap().join(".nargo").join("backends"), }; cache_dir.join(BACKEND_IDENTIFIER).join(TRANSCRIPT_NAME) } -pub(crate) fn get_common_reference_string( +pub(crate) fn read_cached_common_reference_string() -> Vec { + let crs_path = common_reference_string_location(); + + // TODO: Implement checksum + match std::fs::read(crs_path) { + Ok(common_reference_string) => common_reference_string, + Err(_) => vec![], + } +} + +pub(crate) fn update_common_reference_string( backend: &B, + common_reference_string: &[u8], circuit: &Circuit, ) -> Result, B::Error> { use tokio::runtime::Builder; - let crs_path = common_reference_string_location(); - let runtime = Builder::new_current_thread().enable_all().build().unwrap(); // TODO: Implement retries - let crs = match std::fs::read(&crs_path) { - // If the read data is empty, we don't have a CRS and need to generate one - Ok(common_reference_string) if !common_reference_string.is_empty() => runtime - .block_on(backend.update_common_reference_string(common_reference_string, circuit))?, - Ok(_) | Err(_) => runtime.block_on(backend.generate_common_reference_string(circuit))?, + // If the read data is empty, we don't have a CRS and need to generate one + let fut = if common_reference_string.is_empty() { + backend.generate_common_reference_string(circuit) + } else { + backend.update_common_reference_string(common_reference_string.to_vec(), circuit) }; - create_named_dir(crs_path.parent().unwrap(), "crs"); + runtime.block_on(fut) +} - write_to_file(crs.as_slice(), &crs_path); +pub(crate) fn write_cached_common_reference_string(common_reference_string: &[u8]) { + let crs_path = common_reference_string_location(); + + create_named_dir(crs_path.parent().unwrap(), "crs"); - Ok(crs) + write_to_file(common_reference_string, &crs_path); } diff --git a/crates/nargo_cli/src/cli/prove_cmd.rs b/crates/nargo_cli/src/cli/prove_cmd.rs index 7589dd4bba..1238dbd9f8 100644 --- a/crates/nargo_cli/src/cli/prove_cmd.rs +++ b/crates/nargo_cli/src/cli/prove_cmd.rs @@ -11,7 +11,10 @@ use super::NargoConfig; use super::{ compile_cmd::compile_circuit, fs::{ - common_reference_string::get_common_reference_string, + common_reference_string::{ + read_cached_common_reference_string, update_common_reference_string, + write_cached_common_reference_string, + }, inputs::{read_inputs_from_file, write_inputs_to_file}, program::read_program_from_file, proof::save_proof_to_dir, @@ -73,23 +76,32 @@ pub(crate) fn prove_with_path>( check_proof: bool, compile_options: &CompileOptions, ) -> Result, CliError> { + let common_reference_string = read_cached_common_reference_string(); + let (common_reference_string, preprocessed_program) = match circuit_build_path { Some(circuit_build_path) => { let program = read_program_from_file(circuit_build_path)?; - let common_reference_string = get_common_reference_string(backend, &program.bytecode) - .map_err(CliError::CommonReferenceStringError)?; + let common_reference_string = update_common_reference_string( + backend, + &common_reference_string, + &program.bytecode, + ) + .map_err(CliError::CommonReferenceStringError)?; (common_reference_string, program) } None => { let program = compile_circuit(backend, program_dir.as_ref(), compile_options)?; - let common_reference_string = get_common_reference_string(backend, &program.circuit) - .map_err(CliError::CommonReferenceStringError)?; + let common_reference_string = + update_common_reference_string(backend, &common_reference_string, &program.circuit) + .map_err(CliError::CommonReferenceStringError)?; let program = preprocess_program(backend, &common_reference_string, program) .map_err(CliError::ProofSystemCompilerError)?; (common_reference_string, program) } }; + write_cached_common_reference_string(&common_reference_string); + let PreprocessedProgram { abi, bytecode, proving_key, verification_key, .. } = preprocessed_program; diff --git a/crates/nargo_cli/src/cli/verify_cmd.rs b/crates/nargo_cli/src/cli/verify_cmd.rs index 62333ff0bd..26eb39f3f8 100644 --- a/crates/nargo_cli/src/cli/verify_cmd.rs +++ b/crates/nargo_cli/src/cli/verify_cmd.rs @@ -1,7 +1,12 @@ use super::compile_cmd::compile_circuit; use super::fs::{ - common_reference_string::get_common_reference_string, inputs::read_inputs_from_file, - load_hex_data, program::read_program_from_file, + common_reference_string::{ + read_cached_common_reference_string, update_common_reference_string, + write_cached_common_reference_string, + }, + inputs::read_inputs_from_file, + load_hex_data, + program::read_program_from_file, }; use super::NargoConfig; use crate::{ @@ -58,23 +63,32 @@ fn verify_with_path>( circuit_build_path: Option

, compile_options: &CompileOptions, ) -> Result<(), CliError> { + let common_reference_string = read_cached_common_reference_string(); + let (common_reference_string, preprocessed_program) = match circuit_build_path { Some(circuit_build_path) => { let program = read_program_from_file(circuit_build_path)?; - let common_reference_string = get_common_reference_string(backend, &program.bytecode) - .map_err(CliError::CommonReferenceStringError)?; + let common_reference_string = update_common_reference_string( + backend, + &common_reference_string, + &program.bytecode, + ) + .map_err(CliError::CommonReferenceStringError)?; (common_reference_string, program) } None => { let program = compile_circuit(backend, program_dir.as_ref(), compile_options)?; - let common_reference_string = get_common_reference_string(backend, &program.circuit) - .map_err(CliError::CommonReferenceStringError)?; + let common_reference_string = + update_common_reference_string(backend, &common_reference_string, &program.circuit) + .map_err(CliError::CommonReferenceStringError)?; let program = preprocess_program(backend, &common_reference_string, program) .map_err(CliError::ProofSystemCompilerError)?; (common_reference_string, program) } }; + write_cached_common_reference_string(&common_reference_string); + let PreprocessedProgram { abi, bytecode, verification_key, .. } = preprocessed_program; // Load public inputs (if any) from `VERIFIER_INPUT_FILE`. diff --git a/crates/nargo_cli/tests/test_data/eddsa/Nargo.toml b/crates/nargo_cli/tests/test_data/eddsa/Nargo.toml new file mode 100644 index 0000000000..48db376fb1 --- /dev/null +++ b/crates/nargo_cli/tests/test_data/eddsa/Nargo.toml @@ -0,0 +1,5 @@ +[package] +authors = [""] +compiler_version = "0.3.2" + +[dependencies] \ No newline at end of file diff --git a/crates/nargo_cli/tests/test_data/eddsa/Prover.toml b/crates/nargo_cli/tests/test_data/eddsa/Prover.toml new file mode 100644 index 0000000000..53555202ca --- /dev/null +++ b/crates/nargo_cli/tests/test_data/eddsa/Prover.toml @@ -0,0 +1,3 @@ +_priv_key_a = 123 +_priv_key_b = 456 +msg = 789 diff --git a/crates/nargo_cli/tests/test_data/eddsa/src/main.nr b/crates/nargo_cli/tests/test_data/eddsa/src/main.nr new file mode 100644 index 0000000000..8de38011aa --- /dev/null +++ b/crates/nargo_cli/tests/test_data/eddsa/src/main.nr @@ -0,0 +1,55 @@ +use dep::std::compat; +use dep::std::ec::consts::te::baby_jubjub; +use dep::std::hash; +use dep::std::eddsa::eddsa_poseidon_verify; +use dep::std; + +fn main(msg: pub Field, _priv_key_a: Field, _priv_key_b: Field) { + // Skip this test for non-bn254 backends + if compat::is_bn254() { + let bjj = baby_jubjub(); + + let pub_key_a = bjj.curve.mul(_priv_key_a, bjj.curve.gen); + // let pub_key_b = bjj.curve.mul(_priv_key_b, bjj.curve.gen); + + // Manually computed as fields can't use modulo. Importantantly the commitment is within + // the subgroup order. Note that choice of hash is flexible for this step. + // let r_a = hash::pedersen([_priv_key_a, msg])[0] % bjj.suborder; // modulus computed manually + let r_a = 1414770703199880747815475415092878800081323795074043628810774576767372531818; + // let r_b = hash::pedersen([_priv_key_b, msg])[0] % bjj.suborder; // modulus computed manually + let r_b = 571799555715456644614141527517766533395606396271089506978608487688924659618; + + let r8_a = bjj.curve.mul(r_a, bjj.base8); + let r8_b = bjj.curve.mul(r_b, bjj.base8); + + // let h_a: [Field; 6] = hash::poseidon::bn254::hash_5([ + // r8_a.x, + // r8_a.y, + // pub_key_a.x, + // pub_key_a.y, + // msg, + // ]); + + // let h_b: [Field; 6] = hash::poseidon::bn254::hash_5([ + // r8_b.x, + // r8_b.y, + // pub_key_b.x, + // pub_key_b.y, + // msg, + // ]); + + // let s_a = (r_a + _priv_key_a * h_a) % bjj.suborder; // modulus computed manually + let s_a = 30333430637424319196043722294837632681219980330991241982145549329256671548; + // let s_b = (r_b + _priv_key_b * h_b) % bjj.suborder; // modulus computed manually + let s_b = 1646085314320208098241070054368798527940102577261034947654839408482102287019; + + // User A verifies their signature over the message + assert(eddsa_poseidon_verify(pub_key_a.x, pub_key_a.y, s_a, r8_a.x, r8_a.y, msg)); + + // User B's signature over the message can't be used with user A's pub key + assert(!eddsa_poseidon_verify(pub_key_a.x, pub_key_a.y, s_b, r8_b.x, r8_b.y, msg)); + + // User A's signature over the message can't be used with another message + assert(!eddsa_poseidon_verify(pub_key_a.x, pub_key_a.y, s_a, r8_a.x, r8_a.y, msg + 1)); + } +} \ No newline at end of file diff --git a/crates/noirc_driver/src/lib.rs b/crates/noirc_driver/src/lib.rs index d82f6dd6f0..444f209a42 100644 --- a/crates/noirc_driver/src/lib.rs +++ b/crates/noirc_driver/src/lib.rs @@ -68,16 +68,7 @@ impl Default for CompileOptions { impl Driver { pub fn new(language: &Language, is_opcode_supported: Box bool>) -> Self { - let mut driver = - Driver { context: Context::default(), language: language.clone(), is_opcode_supported }; - - // We cannot pass in the real version of `is_opcode_supported` here as we cannot clone the boxed closure. - // TODO(#1102): remove the requirement for the `NodeInterner` to know about which opcodes the backend supports. - #[allow(deprecated)] - let default_is_opcode_supported = - Box::new(acvm::default_is_opcode_supported(language.clone())); - driver.context.def_interner.set_opcode_support(default_is_opcode_supported); - driver + Driver { context: Context::default(), language: language.clone(), is_opcode_supported } } // This is here for backwards compatibility diff --git a/crates/noirc_evaluator/src/ssa_refactor/ir/basic_block.rs b/crates/noirc_evaluator/src/ssa_refactor/ir/basic_block.rs index 30526bc296..c68efa4412 100644 --- a/crates/noirc_evaluator/src/ssa_refactor/ir/basic_block.rs +++ b/crates/noirc_evaluator/src/ssa_refactor/ir/basic_block.rs @@ -96,10 +96,8 @@ impl BasicBlock { /// Removes the given instruction from this block if present or panics otherwise. pub(crate) fn remove_instruction(&mut self, instruction: InstructionId) { - // Iterate in reverse here as an optimization since remove_instruction is most - // often called to remove instructions at the end of a block. let index = - self.instructions.iter().rev().position(|id| *id == instruction).unwrap_or_else(|| { + self.instructions.iter().position(|id| *id == instruction).unwrap_or_else(|| { panic!("remove_instruction: No such instruction {instruction:?} in block") }); self.instructions.remove(index); diff --git a/crates/noirc_evaluator/src/ssa_refactor/ir/dfg.rs b/crates/noirc_evaluator/src/ssa_refactor/ir/dfg.rs index fc15f3e216..f4f6004d41 100644 --- a/crates/noirc_evaluator/src/ssa_refactor/ir/dfg.rs +++ b/crates/noirc_evaluator/src/ssa_refactor/ir/dfg.rs @@ -269,6 +269,15 @@ impl DataFlowGraph { ) { self.blocks[block].set_terminator(terminator); } + + /// Replaces the value specified by the given ValueId with a new Value. + /// + /// This is the preferred method to call for optimizations simplifying + /// values since other instructions referring to the same ValueId need + /// not be modified to refer to a new ValueId. + pub(crate) fn set_value(&mut self, value_id: ValueId, new_value: Value) { + self.values[value_id] = new_value; + } } impl std::ops::Index for DataFlowGraph { diff --git a/crates/noirc_evaluator/src/ssa_refactor/opt/mem2reg.rs b/crates/noirc_evaluator/src/ssa_refactor/opt/mem2reg.rs new file mode 100644 index 0000000000..a020230fda --- /dev/null +++ b/crates/noirc_evaluator/src/ssa_refactor/opt/mem2reg.rs @@ -0,0 +1,326 @@ +//! mem2reg implements a pass for promoting values stored in memory to values in registers where +//! possible. This is particularly important for converting our memory-based representation of +//! mutable variables into values that are easier to manipulate. +use std::collections::{BTreeMap, BTreeSet}; + +use crate::ssa_refactor::{ + ir::{ + basic_block::BasicBlockId, + constant::NumericConstantId, + dfg::DataFlowGraph, + instruction::{BinaryOp, Instruction, InstructionId}, + value::{Value, ValueId}, + }, + ssa_gen::Ssa, +}; + +impl Ssa { + /// Attempts to remove any load instructions that recover values that already available in + /// scope, and attempts to remove store that are subsequently redundant, as long as they are + /// not stores on memory that will be passed into a function call. + /// + /// This pass assumes that the whole program has been inlined into a single block, such that + /// we can be sure that store instructions cannot have side effects outside of this block + /// (apart from intrinsic function calls). + /// + /// This pass also assumes that constant folding has been run, such that all addresses given + /// as input to store/load instructions are represented as one of: + /// - a value that directly resolves to an allocate instruction + /// - a value that directly resolves to a binary add instruction which has a allocate + /// instruction and a numeric constant as its operands + pub(crate) fn mem2reg_final(mut self) -> Ssa { + let func = self.main_mut(); + assert_eq!(func.dfg.basic_blocks_iter().count(), 1); + let block_id = func.entry_block(); + PerBlockContext::new(&mut func.dfg, block_id).eliminate_store_load(); + self + } +} + +#[derive(PartialEq, PartialOrd, Eq, Ord)] +enum Address { + Zeroth(InstructionId), + Offset(InstructionId, NumericConstantId), +} + +impl Address { + fn alloc_id(&self) -> InstructionId { + match self { + Address::Zeroth(alloc_id) => *alloc_id, + Address::Offset(alloc_id, _) => *alloc_id, + } + } +} + +struct PerBlockContext<'dfg> { + dfg: &'dfg mut DataFlowGraph, + block_id: BasicBlockId, +} + +impl<'dfg> PerBlockContext<'dfg> { + fn new(dfg: &'dfg mut DataFlowGraph, block_id: BasicBlockId) -> Self { + PerBlockContext { dfg, block_id } + } + + // Attempts to remove redundant load & store instructions for constant addresses. Returns the + // count of remaining store instructions. + // + // This method assumes the entire program is now represented in a single block (minus any + // intrinsic function calls). Therefore we needn't be concerned with store instructions having + // an effect beyond the scope of this block. + fn eliminate_store_load(&mut self) -> u32 { + let mut store_count: u32 = 0; + let mut last_stores: BTreeMap = BTreeMap::new(); + let mut loads_to_substitute: Vec<(InstructionId, Value)> = Vec::new(); + let mut store_ids: Vec = Vec::new(); + let mut failed_substitutes: BTreeSet

= BTreeSet::new(); + let mut alloc_ids_in_calls: BTreeSet = BTreeSet::new(); + + let block = &self.dfg[self.block_id]; + for instruction_id in block.instructions() { + match &self.dfg[*instruction_id] { + Instruction::Store { address, value } => { + store_count += 1; + if let Some(address) = self.try_const_address(*address) { + // We can only track the address if it is a constant offset from an + // allocation. A previous constant folding pass should make such addresses + // possible to identify. + last_stores.insert(address, *value); + } + // TODO: Consider if it's worth falling back to storing addresses by their + // value id such we can shallowly check for dynamic address reuse. + store_ids.push(*instruction_id); + } + Instruction::Load { address } => { + if let Some(address) = self.try_const_address(*address) { + if let Some(last_value) = last_stores.get(&address) { + let last_value = self.dfg[*last_value]; + loads_to_substitute.push((*instruction_id, last_value)); + } else { + failed_substitutes.insert(address); + } + } + } + Instruction::Call { arguments, .. } => { + for arg in arguments { + if let Some(address) = self.try_const_address(*arg) { + alloc_ids_in_calls.insert(address.alloc_id()); + } + } + } + _ => { + // Nothing to do + } + } + } + + // Substitute load result values + for (instruction_id, new_value) in &loads_to_substitute { + let result_value = *self + .dfg + .instruction_results(*instruction_id) + .first() + .expect("ICE: Load instructions should have single result"); + self.dfg.set_value(result_value, *new_value); + } + + // Delete load instructions + // TODO: should we let DCE do this instead? + let block = &mut self.dfg[self.block_id]; + for (instruction_id, _) in loads_to_substitute { + block.remove_instruction(instruction_id); + } + + // Scan for unused stores + let mut stores_to_remove: Vec = Vec::new(); + for instruction_id in store_ids { + let address = match &self.dfg[instruction_id] { + Instruction::Store { address, .. } => *address, + _ => unreachable!("store_ids should contain only store instructions"), + }; + if let Some(address) = self.try_const_address(address) { + if !failed_substitutes.contains(&address) + && !alloc_ids_in_calls.contains(&address.alloc_id()) + { + stores_to_remove.push(instruction_id); + } + } + } + + // Delete unused stores + let block = &mut self.dfg[self.block_id]; + for instruction_id in stores_to_remove { + store_count -= 1; + block.remove_instruction(instruction_id); + } + + store_count + } + + // Attempts to normalize the given value into a const address + fn try_const_address(&self, value_id: ValueId) -> Option
{ + let value = &self.dfg[value_id]; + let instruction_id = match value { + Value::Instruction { instruction, .. } => *instruction, + _ => return None, + }; + let instruction = &self.dfg[instruction_id]; + match instruction { + Instruction::Allocate { .. } => Some(Address::Zeroth(instruction_id)), + Instruction::Binary(binary) => { + if binary.operator != BinaryOp::Add { + return None; + } + let lhs = &self.dfg[binary.lhs]; + let rhs = &self.dfg[binary.rhs]; + self.try_const_address_offset(lhs, rhs) + .or_else(|| self.try_const_address_offset(rhs, lhs)) + } + _ => None, + } + } + + // Tries val1 as an allocation instruction id and val2 as a constant offset + fn try_const_address_offset(&self, val1: &Value, val2: &Value) -> Option
{ + let alloc_id = match val1 { + Value::Instruction { instruction, .. } => match &self.dfg[*instruction] { + Instruction::Allocate { .. } => *instruction, + _ => return None, + }, + _ => return None, + }; + if let Value::NumericConstant { constant, .. } = val2 { + Some(Address::Offset(alloc_id, *constant)) + } else { + None + } + } +} + +#[cfg(test)] +mod tests { + use acvm::FieldElement; + + use crate::ssa_refactor::{ + ir::{ + instruction::{BinaryOp, Instruction, Intrinsic, TerminatorInstruction}, + map::Id, + types::Type, + }, + ssa_builder::FunctionBuilder, + }; + + use super::PerBlockContext; + + #[test] + fn test_simple() { + // func() { + // block0(): + // v0 = alloc 2 + // v1 = add v0, Field 1 + // store v1, Field 1 + // v2 = add v0, Field 1 + // v3 = load v1 + // return v3 + + let func_id = Id::test_new(0); + let mut builder = FunctionBuilder::new("func".into(), func_id); + let v0 = builder.insert_allocate(2); + let const_one = builder.field_constant(FieldElement::one()); + let v1 = builder.insert_binary(v0, BinaryOp::Add, const_one); + builder.insert_store(v1, const_one); + // v2 is created internally by builder.insert_load + let v3 = builder.insert_load(v0, const_one, Type::field()); + builder.terminate_with_return(vec![v3]); + + let mut ssa = builder.finish(); + + let mut func = ssa.functions.remove(&func_id).unwrap(); + let block_id = func.entry_block(); + + let mut mem2reg_context = PerBlockContext::new(&mut func.dfg, block_id); + let remaining_stores = mem2reg_context.eliminate_store_load(); + + assert_eq!(remaining_stores, 0); + + let block = &func.dfg[block_id]; + let load_count = block + .instructions() + .iter() + .filter(|instruction_id| matches!(func.dfg[**instruction_id], Instruction::Load { .. })) + .count(); + assert_eq!(load_count, 0); + let store_count = block + .instructions() + .iter() + .filter(|instruction_id| { + matches!(func.dfg[**instruction_id], Instruction::Store { .. }) + }) + .count(); + assert_eq!(store_count, 0); + let ret_val_id = match block.terminator().unwrap() { + TerminatorInstruction::Return { return_values } => return_values.first().unwrap(), + _ => unreachable!(), + }; + assert_eq!(func.dfg[*ret_val_id], func.dfg[const_one]); + } + + #[test] + fn test_simple_with_call() { + // func() { + // block0(): + // v0 = alloc 2 + // v1 = add v0, Field 1 + // store v1, Field 1 + // v2 = add v0, Field 1 + // v3 = load v1 + // v4 = call f0, v0 + // return v3 + + let func_id = Id::test_new(0); + let mut builder = FunctionBuilder::new("func".into(), func_id); + let v0 = builder.insert_allocate(2); + let const_one = builder.field_constant(FieldElement::one()); + let v1 = builder.insert_binary(v0, BinaryOp::Add, const_one); + builder.insert_store(v1, const_one); + // v2 is created internally by builder.insert_load + let v3 = builder.insert_load(v0, const_one, Type::field()); + let f0 = builder.import_intrinsic_id(Intrinsic::Println); + builder.insert_call(f0, vec![v0], vec![Type::Unit]); + builder.terminate_with_return(vec![v3]); + + let mut ssa = builder.finish(); + + let mut func = ssa.functions.remove(&func_id).unwrap(); + let block_id = func.entry_block(); + + let mut mem2reg_context = PerBlockContext::new(&mut func.dfg, block_id); + let remaining_stores = mem2reg_context.eliminate_store_load(); + + assert_eq!( + remaining_stores, 1, + "Store cannot be removed as it affects intrinsic function call" + ); + + let block = &func.dfg[block_id]; + let load_count = block + .instructions() + .iter() + .filter(|instruction_id| matches!(func.dfg[**instruction_id], Instruction::Load { .. })) + .count(); + assert_eq!(load_count, 0); + let store_count = block + .instructions() + .iter() + .filter(|instruction_id| { + matches!(func.dfg[**instruction_id], Instruction::Store { .. }) + }) + .count(); + assert_eq!(store_count, 1); + let ret_val_id = match block.terminator().unwrap() { + TerminatorInstruction::Return { return_values } => return_values.first().unwrap(), + _ => unreachable!(), + }; + assert_eq!(func.dfg[*ret_val_id], func.dfg[const_one]); + } +} diff --git a/crates/noirc_evaluator/src/ssa_refactor/opt/mod.rs b/crates/noirc_evaluator/src/ssa_refactor/opt/mod.rs index 46ca7d443b..f4b2a6ca68 100644 --- a/crates/noirc_evaluator/src/ssa_refactor/opt/mod.rs +++ b/crates/noirc_evaluator/src/ssa_refactor/opt/mod.rs @@ -4,3 +4,4 @@ //! simpler form until the IR only has a single function remaining with 1 block within it. //! Generally, these passes are also expected to minimize the final amount of instructions. mod inlining; +mod mem2reg; diff --git a/crates/noirc_evaluator/src/ssa_refactor/ssa_gen/program.rs b/crates/noirc_evaluator/src/ssa_refactor/ssa_gen/program.rs index 7f4b9a8dd2..ba98c65850 100644 --- a/crates/noirc_evaluator/src/ssa_refactor/ssa_gen/program.rs +++ b/crates/noirc_evaluator/src/ssa_refactor/ssa_gen/program.rs @@ -33,6 +33,11 @@ impl Ssa { pub(crate) fn main(&self) -> &Function { &self.functions[&self.main_id] } + + /// Returns the entry-point function of the program as a mutable reference + pub(crate) fn main_mut(&mut self) -> &mut Function { + self.functions.get_mut(&self.main_id).expect("ICE: Ssa should have a main function") + } } impl Display for Ssa { diff --git a/crates/noirc_frontend/src/ast/function.rs b/crates/noirc_frontend/src/ast/function.rs index ad3594a23a..971d42be1b 100644 --- a/crates/noirc_frontend/src/ast/function.rs +++ b/crates/noirc_frontend/src/ast/function.rs @@ -76,7 +76,6 @@ impl From for NoirFunction { let kind = match fd.attribute { Some(Attribute::Builtin(_)) => FunctionKind::Builtin, Some(Attribute::Foreign(_)) => FunctionKind::LowLevel, - Some(Attribute::Alternative(_)) => FunctionKind::Normal, Some(Attribute::Test) => FunctionKind::Normal, None => FunctionKind::Normal, }; diff --git a/crates/noirc_frontend/src/hir/mod.rs b/crates/noirc_frontend/src/hir/mod.rs index 2ebabf8686..9142d2515c 100644 --- a/crates/noirc_frontend/src/hir/mod.rs +++ b/crates/noirc_frontend/src/hir/mod.rs @@ -6,7 +6,6 @@ pub mod type_check; use crate::graph::{CrateGraph, CrateId}; use crate::node_interner::NodeInterner; -use acvm::acir::circuit::Opcode; use def_map::CrateDefMap; use fm::FileManager; use std::collections::HashMap; @@ -29,20 +28,14 @@ pub struct Context { pub type StorageSlot = u32; impl Context { - pub fn new( - file_manager: FileManager, - crate_graph: CrateGraph, - is_opcode_supported: Box bool>, - ) -> Context { - let mut ctx = Context { + pub fn new(file_manager: FileManager, crate_graph: CrateGraph) -> Context { + Context { def_interner: NodeInterner::default(), def_maps: HashMap::new(), crate_graph, file_manager, storage_slots: HashMap::new(), - }; - ctx.def_interner.set_opcode_support(is_opcode_supported); - ctx + } } /// Returns the CrateDefMap for a given CrateId. diff --git a/crates/noirc_frontend/src/hir/resolution/resolver.rs b/crates/noirc_frontend/src/hir/resolution/resolver.rs index d80bca9df1..60da0b6468 100644 --- a/crates/noirc_frontend/src/hir/resolution/resolver.rs +++ b/crates/noirc_frontend/src/hir/resolution/resolver.rs @@ -1149,23 +1149,7 @@ impl<'a> Resolver<'a> { let span = path.span(); let id = self.resolve_path(path)?; - if let Some(mut function) = TryFromModuleDefId::try_from(id) { - // Check if this is an unsupported low level opcode. If so, replace it with - // an alternative in the stdlib. - if let Some(meta) = self.interner.try_function_meta(&function) { - if meta.kind == crate::FunctionKind::LowLevel { - let attribute = meta.attributes.expect("all low level functions must contain an attribute which contains the opcode which it links to"); - let opcode = attribute.foreign().expect( - "ice: function marked as foreign, but attribute kind does not match this", - ); - if !self.interner.foreign(&opcode) { - if let Some(new_id) = self.interner.get_alt(opcode) { - function = new_id; - } - } - } - } - + if let Some(function) = TryFromModuleDefId::try_from(id) { return Ok(self.interner.function_definition_id(function)); } diff --git a/crates/noirc_frontend/src/lexer/token.rs b/crates/noirc_frontend/src/lexer/token.rs index fe0e3bf1f9..5ffa807a33 100644 --- a/crates/noirc_frontend/src/lexer/token.rs +++ b/crates/noirc_frontend/src/lexer/token.rs @@ -324,7 +324,6 @@ impl IntType { pub enum Attribute { Foreign(String), Builtin(String), - Alternative(String), Test, } @@ -333,7 +332,6 @@ impl fmt::Display for Attribute { match *self { Attribute::Foreign(ref k) => write!(f, "#[foreign({k})]"), Attribute::Builtin(ref k) => write!(f, "#[builtin({k})]"), - Attribute::Alternative(ref k) => write!(f, "#[alternative({k})]"), Attribute::Test => write!(f, "#[test]"), } } @@ -365,7 +363,6 @@ impl Attribute { let tok = match attribute_type { "foreign" => Token::Attribute(Attribute::Foreign(attribute_name.to_string())), "builtin" => Token::Attribute(Attribute::Builtin(attribute_name.to_string())), - "alternative" => Token::Attribute(Attribute::Alternative(attribute_name.to_string())), _ => { return Err(LexerErrorKind::MalformedFuncAttribute { span, found: word.to_owned() }) } @@ -401,7 +398,6 @@ impl AsRef for Attribute { match self { Attribute::Foreign(string) => string, Attribute::Builtin(string) => string, - Attribute::Alternative(string) => string, Attribute::Test => "", } } diff --git a/crates/noirc_frontend/src/main.rs b/crates/noirc_frontend/src/main.rs index 023a1440e5..48c6c4b8b1 100644 --- a/crates/noirc_frontend/src/main.rs +++ b/crates/noirc_frontend/src/main.rs @@ -27,12 +27,7 @@ fn main() { let crate_id = crate_graph.add_crate_root(CrateType::Library, root_file_id); // initiate context with file manager and crate graph - let mut context = Context::new( - fm, - crate_graph, - #[allow(deprecated)] - Box::new(acvm::default_is_opcode_supported(acvm::Language::R1CS)), - ); + let mut context = Context::new(fm, crate_graph); // Now create the CrateDefMap // This is preamble for analysis diff --git a/crates/noirc_frontend/src/node_interner.rs b/crates/noirc_frontend/src/node_interner.rs index 61e742dc27..a318d36151 100644 --- a/crates/noirc_frontend/src/node_interner.rs +++ b/crates/noirc_frontend/src/node_interner.rs @@ -1,8 +1,5 @@ use std::collections::{BTreeMap, HashMap}; -use acvm::acir::circuit::opcodes::BlackBoxFuncCall; -use acvm::acir::circuit::Opcode; -use acvm::Language; use arena::{Arena, Index}; use fm::FileId; use iter_extended::vecmap; @@ -69,9 +66,6 @@ pub struct NodeInterner { next_type_variable_id: usize, - //used for fallback mechanism - is_opcode_supported: Box bool>, - delayed_type_checks: Vec, /// A map from a struct type and method name to a function id for the method. @@ -258,8 +252,6 @@ impl Default for NodeInterner { field_indices: HashMap::new(), next_type_variable_id: 0, globals: HashMap::new(), - #[allow(deprecated)] - is_opcode_supported: Box::new(acvm::default_is_opcode_supported(Language::R1CS)), delayed_type_checks: vec![], struct_methods: HashMap::new(), primitive_methods: HashMap::new(), @@ -396,17 +388,6 @@ impl NodeInterner { self.func_meta.insert(func_id, func_data); } - pub fn get_alt(&self, opcode: String) -> Option { - for (func_id, meta) in &self.func_meta { - if let Some(crate::token::Attribute::Alternative(name)) = &meta.attributes { - if *name == opcode { - return Some(*func_id); - } - } - } - None - } - pub fn push_definition( &mut self, name: String, @@ -580,19 +561,6 @@ impl NodeInterner { self.function_definition_ids[&function] } - pub fn set_opcode_support(&mut self, is_opcode_supported: Box bool>) { - self.is_opcode_supported = is_opcode_supported; - } - - #[allow(deprecated)] - pub fn foreign(&self, opcode: &str) -> bool { - let black_box_func_call = match acvm::acir::BlackBoxFunc::lookup(opcode) { - Some(black_box_func) => BlackBoxFuncCall::dummy(black_box_func), - None => return false, - }; - (self.is_opcode_supported)(&Opcode::BlackBoxFuncCall(black_box_func_call)) - } - pub fn push_delayed_type_check(&mut self, f: TypeCheckFn) { self.delayed_type_checks.push(f); } diff --git a/flake.nix b/flake.nix index 4191d1d11a..696c38623e 100644 --- a/flake.nix +++ b/flake.nix @@ -144,10 +144,10 @@ # which avoids exposing the entire Nix store to the static server it starts # The static server is moved to the background and killed after checks are completed # - # We also set the BACKEND_CACHE_DIR environment variable to the $TMP directory so we can successfully cache + # We also set the NARGO_BACKEND_CACHE_DIR environment variable to the $TMP directory so we can successfully cache # the transcript; which isn't possible with the default path because the Nix sandbox disabled $HOME preCheck = '' - export BACKEND_CACHE_DIR=$TMP + export NARGO_BACKEND_CACHE_DIR=$TMP cp ${pkgs.barretenberg-transcript00} . echo "Starting simple static server" ${pkgs.simple-http-server}/bin/simple-http-server --port ${toString port} --silent & diff --git a/noir_stdlib/src/compat.nr b/noir_stdlib/src/compat.nr new file mode 100644 index 0000000000..65ae22c5ab --- /dev/null +++ b/noir_stdlib/src/compat.nr @@ -0,0 +1,4 @@ +fn is_bn254() -> bool { + // bn254 truncates its curve order to 0 + 21888242871839275222246405745257275088548364400416034343698204186575808495617 == 0 +} diff --git a/noir_stdlib/src/ec.nr b/noir_stdlib/src/ec.nr index cc58b714de..59a0731b9a 100644 --- a/noir_stdlib/src/ec.nr +++ b/noir_stdlib/src/ec.nr @@ -5,6 +5,7 @@ mod tecurve; // Twisted Edwards curves mod swcurve; // Elliptic curves in Short Weierstraß form mod montcurve; // Montgomery curves +mod consts; // Commonly used curve presets // // Note that Twisted Edwards and Montgomery curves are (birationally) equivalent, so that // they may be freely converted between one another, whereas Short Weierstraß curves are @@ -120,9 +121,6 @@ mod montcurve; // Montgomery curves // **TODO: Support arrays of structs to make this work. -// TODO: Replace with built-in backend-dependent constant. -global N_BITS = 254; // Maximum number of bits in field element - // Field-dependent constant ZETA = a non-square element of Field // Required for Elligator 2 map // TODO: Replace with built-in constant. @@ -149,20 +147,6 @@ global C5 = 19103219067921713944291392827692070036145651957329286315305642004821 // out //} -// Converts Field element to little-endian bit array of length N_BITS -// TODO: Fix built-in to_le_bits(., N_BITS), which yields a 128-periodic bit array -fn to_bits(x: Field) -> [u1; N_BITS] { - let mut x = x; - let mut out = [0; N_BITS]; - for i in 0..N_BITS { - if x != 0 { - out[i] = x as u1; - x = (x - out[i] as Field)/2; - } - } - out -} - // TODO: Make this built-in. fn safe_inverse(x: Field) -> Field { if x == 0 { @@ -182,8 +166,10 @@ fn is_square(x: Field) -> bool { // Power function of two Field arguments of arbitrary size. // Adapted from std::field::pow_32. fn pow(x: Field, y: Field) -> Field { // As in tests with minor modifications + let N_BITS = crate::field::modulus_num_bits(); + let mut r = 1 as Field; - let b = to_bits(y); + let b = y.to_le_bits(N_BITS as u32); for i in 0..N_BITS { r *= r; diff --git a/noir_stdlib/src/ec/consts.nr b/noir_stdlib/src/ec/consts.nr new file mode 100644 index 0000000000..f4d67e7a92 --- /dev/null +++ b/noir_stdlib/src/ec/consts.nr @@ -0,0 +1 @@ +mod te; diff --git a/noir_stdlib/src/ec/consts/te.nr b/noir_stdlib/src/ec/consts/te.nr new file mode 100644 index 0000000000..8a5bdae512 --- /dev/null +++ b/noir_stdlib/src/ec/consts/te.nr @@ -0,0 +1,33 @@ +use crate::compat; +use crate::ec::tecurve::affine::Point as TEPoint; +use crate::ec::tecurve::affine::Curve as TECurve; + +struct BabyJubjub { + curve: TECurve, + base8: TEPoint, + suborder: Field, +} + +fn baby_jubjub() -> BabyJubjub { + assert(compat::is_bn254()); + + BabyJubjub { + // Baby Jubjub (ERC-2494) parameters in affine representation + curve: TECurve::new( + 168700, + 168696, + // G + TEPoint::new( + 995203441582195749578291179787384436505546430278305826713579947235728471134, + 5472060717959818805561601436314318772137091100104008585924551046643952123905, + ), + ), + // [8]G precalculated + base8: TEPoint::new( + 5299619240641551281634865583518297030282874472190772894086521144482721001553, + 16950150798460657717958625567821834550301663161624707787222815936182638968203, + ), + // The size of the group formed from multiplying the base field by 8. + suborder: 2736030358979909402780800718157159386076813972158567259200215660948447373041, + } +} diff --git a/noir_stdlib/src/ec/swcurve.nr b/noir_stdlib/src/ec/swcurve.nr index eae4f375e4..1f22de5598 100644 --- a/noir_stdlib/src/ec/swcurve.nr +++ b/noir_stdlib/src/ec/swcurve.nr @@ -344,11 +344,18 @@ mod curvegroup { // Scalar multiplication (p + ... + p n times) fn mul(self, n: Field, p: Point) -> Point { - let n_as_bits = crate::ec::to_bits(n); // N_BITS-bit representation + let N_BITS = crate::field::modulus_num_bits(); + + // TODO: temporary workaround until issue 1354 is solved + let mut n_as_bits: [u1; 254] = [0; 254]; + let tmp = n.to_le_bits(N_BITS as u32); + for i in 0..254 { + n_as_bits[i] = tmp[i]; + } self.bit_mul(n_as_bits, p) } - + // Multi-scalar multiplication (n[0]*p[0] + ... + n[N]*p[N], where * denotes scalar multiplication) fn msm(self, n: [Field; N], p: [Point; N]) -> Point { let mut out = Point::zero(); diff --git a/noir_stdlib/src/ec/tecurve.nr b/noir_stdlib/src/ec/tecurve.nr index 8611e4270c..ff2c398a8a 100644 --- a/noir_stdlib/src/ec/tecurve.nr +++ b/noir_stdlib/src/ec/tecurve.nr @@ -364,17 +364,24 @@ mod curvegroup { self.add(out, out), if(bits[n - i - 1] == 0) {Point::zero()} else {p}); } - + out } // Scalar multiplication (p + ... + p n times) fn mul(self, n: Field, p: Point) -> Point { - let n_as_bits = crate::ec::to_bits(n); // N_BITS-bit representation + let N_BITS = crate::field::modulus_num_bits(); + + // TODO: temporary workaround until issue 1354 is solved + let mut n_as_bits: [u1; 254] = [0; 254]; + let tmp = n.to_le_bits(N_BITS as u32); + for i in 0..254 { + n_as_bits[i] = tmp[i]; + } self.bit_mul(n_as_bits, p) } - + // Multi-scalar multiplication (n[0]*p[0] + ... + n[N]*p[N], where * denotes scalar multiplication) fn msm(self, n: [Field; N], p: [Point; N]) -> Point { let mut out = Point::zero(); diff --git a/noir_stdlib/src/eddsa.nr b/noir_stdlib/src/eddsa.nr new file mode 100644 index 0000000000..1db6182501 --- /dev/null +++ b/noir_stdlib/src/eddsa.nr @@ -0,0 +1,74 @@ +use crate::hash::poseidon; +use crate::ec::consts::te::baby_jubjub; +use crate::ec::tecurve::affine::Point as TEPoint; + +// Returns true if x is less than y +fn lt_bytes32(x: Field, y: Field) -> bool { + let x_bytes = x.to_le_bytes(32); + let y_bytes = y.to_le_bytes(32); + let mut x_is_lt = false; + let mut done = false; + for i in 0..32 { + if (!done) { + let x_byte = x_bytes[31 - i] as u8; + let y_byte = y_bytes[31 - i] as u8; + let bytes_match = x_byte == y_byte; + if !bytes_match { + x_is_lt = x_byte < y_byte; + done = true; + } + } + } + x_is_lt +} + +// Returns true if signature is valid +fn eddsa_poseidon_verify( + pub_key_x: Field, + pub_key_y: Field, + signature_s: Field, + signature_r8_x: Field, + signature_r8_y: Field, + message: Field, +) -> bool { + // Verifies by testing: + // S * B8 = R8 + H(R8, A, m) * A8 + + let bjj = baby_jubjub(); + + let pub_key = TEPoint::new(pub_key_x, pub_key_y); + assert(bjj.curve.contains(pub_key)); + + let signature_r8 = TEPoint::new(signature_r8_x, signature_r8_y); + assert(bjj.curve.contains(signature_r8)); + + // Ensure S < Subgroup Order + assert(lt_bytes32(signature_s, bjj.suborder)); + + // Calculate the h = H(R, A, msg) + let hash: Field = poseidon::bn254::hash_5([ + signature_r8_x, + signature_r8_y, + pub_key_x, + pub_key_y, + message, + ]); + + // Calculate second part of the right side: right2 = h*8*A + + // Multiply by 8 by doubling 3 times. This also ensures that the result is in the subgroup. + let pub_key_mul_2 = bjj.curve.add(pub_key, pub_key); + let pub_key_mul_4 = bjj.curve.add(pub_key_mul_2, pub_key_mul_2); + let pub_key_mul_8 = bjj.curve.add(pub_key_mul_4, pub_key_mul_4); + + // We check that A8 is not zero. + assert(!pub_key_mul_8.is_zero()); + + // Compute the right side: R8 + h * A8 + let right = bjj.curve.add(signature_r8, bjj.curve.mul(hash, pub_key_mul_8)); + + // Calculate left side of equation left = S * B8 + let left = bjj.curve.mul(signature_s, bjj.base8); + + left.eq(right) +} diff --git a/noir_stdlib/src/lib.nr b/noir_stdlib/src/lib.nr index f0af06b97b..f6a60a6dee 100644 --- a/noir_stdlib/src/lib.nr +++ b/noir_stdlib/src/lib.nr @@ -3,6 +3,7 @@ mod array; mod merkle; mod schnorr; mod ecdsa_secp256k1; +mod eddsa; mod scalar_mul; mod sha256; mod sha512; @@ -10,6 +11,7 @@ mod field; mod ec; mod unsafe; mod collections; +mod compat; #[builtin(println)] fn println(_input : T) {} diff --git a/noir_stdlib/src/merkle.nr b/noir_stdlib/src/merkle.nr index a47ce86c94..9d01f84fea 100644 --- a/noir_stdlib/src/merkle.nr +++ b/noir_stdlib/src/merkle.nr @@ -13,7 +13,6 @@ fn check_membership(_root : Field, _leaf : Field, _index : Field, _hash_path: [F fn compute_merkle_root(_leaf : Field, _index : Field, _hash_path: [Field]) -> Field {} // Returns the root of the tree from the provided leaf and its hashpath, using pedersen hash -#[alternative(compute_merkle_root)] fn compute_root_from_leaf(leaf : Field, index : Field, hash_path: [Field]) -> Field { let n = hash_path.len(); let index_bits = index.to_le_bits(n as u32); diff --git a/noir_stdlib/src/sha256.nr b/noir_stdlib/src/sha256.nr index 83e08ada7e..cf72fec726 100644 --- a/noir_stdlib/src/sha256.nr +++ b/noir_stdlib/src/sha256.nr @@ -99,7 +99,6 @@ fn msg_u8_to_u32(msg: [u8; 64]) -> [u32; 16] } // SHA-256 hash function -#[alternative(sha256)] fn digest(msg: [u8; N]) -> [u8; 32] { let mut msg_block: [u8; 64] = [0; 64]; let mut h: [u32; 8] = [1779033703,3144134277,1013904242,2773480762,1359893119,2600822924,528734635,1541459225]; // Intermediate hash, starting with the canonical initial value