diff --git a/acvm-repo/acir/src/circuit/black_box_functions.rs b/acvm-repo/acir/src/circuit/black_box_functions.rs index d0ec7d02201..a129ca01f07 100644 --- a/acvm-repo/acir/src/circuit/black_box_functions.rs +++ b/acvm-repo/acir/src/circuit/black_box_functions.rs @@ -7,7 +7,9 @@ use serde::{Deserialize, Serialize}; use strum_macros::EnumIter; #[allow(clippy::upper_case_acronyms)] -#[derive(Clone, Debug, Hash, Copy, PartialEq, Eq, Serialize, Deserialize, EnumIter)] +#[derive( + Clone, Debug, Hash, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, EnumIter, +)] pub enum BlackBoxFunc { /// Ciphers (encrypts) the provided plaintext using AES128 in CBC mode, /// padding the input using PKCS#7. diff --git a/acvm-repo/brillig/src/opcodes.rs b/acvm-repo/brillig/src/opcodes.rs index 1cb31ca3d0a..13dde4c4b08 100644 --- a/acvm-repo/brillig/src/opcodes.rs +++ b/acvm-repo/brillig/src/opcodes.rs @@ -110,8 +110,8 @@ pub enum IntegerBitSize { U128, } -impl From for u32 { - fn from(bit_size: IntegerBitSize) -> u32 { +impl From for u8 { + fn from(bit_size: IntegerBitSize) -> u8 { match bit_size { IntegerBitSize::U1 => 1, IntegerBitSize::U8 => 8, @@ -123,10 +123,16 @@ impl From for u32 { } } -impl TryFrom for IntegerBitSize { +impl From for u32 { + fn from(bit_size: IntegerBitSize) -> u32 { + u8::from(bit_size) as u32 + } +} + +impl TryFrom for IntegerBitSize { type Error = &'static str; - fn try_from(value: u32) -> Result { + fn try_from(value: u8) -> Result { match value { 1 => Ok(IntegerBitSize::U1), 8 => Ok(IntegerBitSize::U8), @@ -159,15 +165,15 @@ pub enum BitSize { } impl BitSize { - pub fn to_u32(self) -> u32 { + pub fn to_u8(self) -> u8 { match self { - BitSize::Field => F::max_num_bits(), + BitSize::Field => F::max_num_bits().try_into().unwrap(), BitSize::Integer(bit_size) => bit_size.into(), } } - pub fn try_from_u32(value: u32) -> Result { - if value == F::max_num_bits() { + pub fn try_from_u8(value: u8) -> Result { + if value as u32 == F::max_num_bits() { Ok(BitSize::Field) } else { Ok(BitSize::Integer(IntegerBitSize::try_from(value)?)) diff --git a/compiler/noirc_driver/src/abi_gen.rs b/compiler/noirc_driver/src/abi_gen.rs index 625a35c8d15..2ca0b8ee96d 100644 --- a/compiler/noirc_driver/src/abi_gen.rs +++ b/compiler/noirc_driver/src/abi_gen.rs @@ -87,7 +87,7 @@ pub(super) fn abi_type_from_hir_type(context: &Context, typ: &Type) -> AbiType { Signedness::Signed => Sign::Signed, }; - AbiType::Integer { sign, width: (*bit_width).into() } + AbiType::Integer { sign, width: bit_width.bit_size() as u32 } } Type::TypeVariable(binding) => { if binding.is_integer() || binding.is_integer_or_field() { diff --git a/compiler/noirc_evaluator/src/acir/acir_variable.rs b/compiler/noirc_evaluator/src/acir/acir_variable.rs index bb277751b9e..53a0db4548d 100644 --- a/compiler/noirc_evaluator/src/acir/acir_variable.rs +++ b/compiler/noirc_evaluator/src/acir/acir_variable.rs @@ -51,12 +51,12 @@ impl AcirType { } /// Returns the bit size of the underlying type - pub(crate) fn bit_size(&self) -> u32 { + pub(crate) fn bit_size(&self) -> u8 { match self { AcirType::NumericType(numeric_type) => match numeric_type { NumericType::Signed { bit_size } => *bit_size, NumericType::Unsigned { bit_size } => *bit_size, - NumericType::NativeField => F::max_num_bits(), + NumericType::NativeField => F::max_num_bits() as u8, }, AcirType::Array(_, _) => unreachable!("cannot fetch bit size of array type"), } @@ -68,7 +68,7 @@ impl AcirType { } /// Returns an unsigned type of the specified bit size - pub(crate) fn unsigned(bit_size: u32) -> Self { + pub(crate) fn unsigned(bit_size: u8) -> Self { AcirType::NumericType(NumericType::Unsigned { bit_size }) } @@ -112,9 +112,7 @@ impl From for AcirType { pub(crate) struct AcirContext> { blackbox_solver: B, - /// Two-way map that links `AcirVar` to `AcirVarData`. - /// - /// The vars object is an instance of the `TwoWayMap`, which provides a bidirectional mapping between `AcirVar` and `AcirVarData`. + /// Map that links `AcirVar` to `AcirVarData`. vars: HashMap>, constant_witnesses: HashMap, @@ -772,7 +770,7 @@ impl> AcirContext { &mut self, lhs: AcirVar, rhs: AcirVar, - bit_size: u32, + bit_size: u8, predicate: AcirVar, ) -> Result<(AcirVar, AcirVar), RuntimeError> { let zero = self.add_constant(F::zero()); @@ -831,7 +829,7 @@ impl> AcirContext { let mut max_rhs_bits = bit_size; // when rhs is constant, we can better estimate the maximum bit sizes if let Some(rhs_const) = rhs_expr.to_const() { - max_rhs_bits = rhs_const.num_bits(); + max_rhs_bits = rhs_const.num_bits().try_into().unwrap(); if max_rhs_bits != 0 { if max_rhs_bits > bit_size { return Ok((zero, zero)); @@ -895,7 +893,7 @@ impl> AcirContext { // Avoids overflow: 'q*b+r < 2^max_q_bits*2^max_rhs_bits' let mut avoid_overflow = false; - if max_q_bits + max_rhs_bits >= F::max_num_bits() - 1 { + if ((max_q_bits + max_rhs_bits) as u32) >= F::max_num_bits() - 1 { // q*b+r can overflow; we avoid this when b is constant if rhs_expr.is_const() { avoid_overflow = true; @@ -928,7 +926,7 @@ impl> AcirContext { r_predicate, max_r_predicate, predicate, - rhs_const.num_bits(), + rhs_const.num_bits().try_into().unwrap(), )?; } } @@ -952,7 +950,7 @@ impl> AcirContext { lhs: AcirVar, rhs: AcirVar, offset: AcirVar, - bits: u32, + bits: u8, ) -> Result<(), RuntimeError> { #[allow(unused_qualifications)] const fn num_bits() -> usize { @@ -964,7 +962,7 @@ impl> AcirContext { } assert!( - bits < F::max_num_bits(), + (bits as u32) < F::max_num_bits(), "range check with bit size of the prime field is not implemented yet" ); @@ -982,13 +980,13 @@ impl> AcirContext { }; // we now have lhs+offset <= rhs <=> lhs_offset <= rhs_offset - let bit_size = bit_size_u128(rhs_offset); + let bit_size = bit_size_u128(rhs_offset) as u8; // r = 2^bit_size - rhs_offset -1, is of bit size 'bit_size' by construction let r = (1_u128 << bit_size) - rhs_offset - 1; // however, since it is a constant, we can compute it's actual bit size let r_bit_size = bit_size_u128(r); // witness = lhs_offset + r - assert!(bits + r_bit_size < F::max_num_bits()); //we need to ensure lhs_offset + r does not overflow + assert!(bits as u32 + r_bit_size < F::max_num_bits()); //we need to ensure lhs_offset + r does not overflow let r_var = self.add_constant(r); let aor = self.add_var(lhs_offset, r_var)?; @@ -1010,7 +1008,7 @@ impl> AcirContext { &mut self, lhs: AcirVar, leading: AcirVar, - max_bit_size: u32, + max_bit_size: u8, ) -> Result { let max_power_of_two = self.add_constant(F::from(2_u128).pow(&F::from(max_bit_size as u128 - 1))); @@ -1029,7 +1027,7 @@ impl> AcirContext { &mut self, lhs: AcirVar, rhs: AcirVar, - bit_size: u32, + bit_size: u8, ) -> Result<(AcirVar, AcirVar), RuntimeError> { // We derive the signed division from the unsigned euclidean division. // note that this is not euclidean division! @@ -1086,7 +1084,7 @@ impl> AcirContext { lhs: AcirVar, rhs: AcirVar, typ: AcirType, - bit_size: u32, + bit_size: u8, predicate: AcirVar, ) -> Result { let numeric_type = match typ { @@ -1115,7 +1113,7 @@ impl> AcirContext { // If `variable` is constant then we don't need to add a constraint. // We _do_ add a constraint if `variable` would fail the range check however so that we throw an error. if let Some(constant) = self.var_to_expression(variable)?.to_const() { - if constant.num_bits() <= *bit_size { + if constant.num_bits() <= *bit_size as u32 { return Ok(variable); } } @@ -1143,8 +1141,8 @@ impl> AcirContext { pub(crate) fn truncate_var( &mut self, lhs: AcirVar, - rhs: u32, - max_bit_size: u32, + rhs: u8, + max_bit_size: u8, ) -> Result { // 2^{rhs} let divisor = self.add_constant(F::from(2_u128).pow(&F::from(rhs as u128))); @@ -1166,7 +1164,7 @@ impl> AcirContext { &mut self, lhs: AcirVar, rhs: AcirVar, - bit_count: u32, + bit_count: u8, ) -> Result { let pow_last = self.add_constant(F::from(1_u128 << (bit_count - 1))); let pow = self.add_constant(F::from(1_u128 << (bit_count))); @@ -1214,7 +1212,7 @@ impl> AcirContext { &mut self, lhs: AcirVar, rhs: AcirVar, - max_bits: u32, + max_bits: u8, ) -> Result { // Returns a `Witness` that is constrained to be: // - `1` if lhs >= rhs @@ -1238,7 +1236,7 @@ impl> AcirContext { // Ensure that 2^{max_bits + 1} is less than the field size // // TODO: perhaps this should be a user error, instead of an assert - assert!(max_bits + 1 < F::max_num_bits()); + assert!(max_bits as u32 + 1 < F::max_num_bits()); let two_max_bits = self.add_constant(F::from(2_u128).pow(&F::from(max_bits as u128))); let diff = self.sub_var(lhs, rhs)?; @@ -1282,7 +1280,7 @@ impl> AcirContext { &mut self, lhs: AcirVar, rhs: AcirVar, - bit_size: u32, + bit_size: u8, ) -> Result { // Flip the result of calling more than equal method to // compute less than. @@ -1518,7 +1516,7 @@ impl> AcirContext { match self.vars[&input].as_constant() { Some(constant) if allow_constant_inputs => { single_val_witnesses.push( - FunctionInput::constant(*constant, num_bits).map_err( + FunctionInput::constant(*constant, num_bits as u32).map_err( |invalid_input_bit_size| { RuntimeError::InvalidBlackBoxInputBitSize { value: invalid_input_bit_size.value, @@ -1533,7 +1531,7 @@ impl> AcirContext { _ => { let witness_var = self.get_or_create_witness_var(input)?; let witness = self.var_to_witness(witness_var)?; - single_val_witnesses.push(FunctionInput::witness(witness, num_bits)); + single_val_witnesses.push(FunctionInput::witness(witness, num_bits as u32)); } } } @@ -1604,6 +1602,7 @@ impl> AcirContext { let input_expr = self.var_to_expression(input_var)?; let bit_size = u32::BITS - (radix - 1).leading_zeros(); + let bit_size: u8 = bit_size.try_into().unwrap(); let limbs = self.acir_ir.radix_le_decompose(&input_expr, radix, limb_count, bit_size)?; let mut limb_vars = vecmap(limbs, |witness| { diff --git a/compiler/noirc_evaluator/src/acir/generated_acir.rs b/compiler/noirc_evaluator/src/acir/generated_acir.rs index a2b161688c0..c943748343b 100644 --- a/compiler/noirc_evaluator/src/acir/generated_acir.rs +++ b/compiler/noirc_evaluator/src/acir/generated_acir.rs @@ -359,11 +359,11 @@ impl GeneratedAcir { input_expr: &Expression, radix: u32, limb_count: u32, - bit_size: u32, + bit_size: u8, ) -> Result, RuntimeError> { let radix_big = BigUint::from(radix); assert_eq!( - BigUint::from(2u128).pow(bit_size), + BigUint::from(2u128).pow(bit_size as u32), radix_big, "ICE: Radix must be a power of 2" ); @@ -574,11 +574,11 @@ impl GeneratedAcir { pub(crate) fn range_constraint( &mut self, witness: Witness, - num_bits: u32, + num_bits: u8, ) -> Result<(), RuntimeError> { // We class this as an error because users should instead // do `as Field`. - if num_bits >= F::max_num_bits() { + if num_bits as u32 >= F::max_num_bits() { return Err(RuntimeError::InvalidRangeConstraint { num_bits: F::max_num_bits(), call_stack: self.call_stack.clone(), @@ -586,7 +586,7 @@ impl GeneratedAcir { }; let constraint = AcirOpcode::BlackBoxFuncCall(BlackBoxFuncCall::RANGE { - input: FunctionInput::witness(witness, num_bits), + input: FunctionInput::witness(witness, num_bits as u32), }); self.push_opcode(constraint); diff --git a/compiler/noirc_evaluator/src/acir/mod.rs b/compiler/noirc_evaluator/src/acir/mod.rs index 95e0dd12132..e9e6371b0d2 100644 --- a/compiler/noirc_evaluator/src/acir/mod.rs +++ b/compiler/noirc_evaluator/src/acir/mod.rs @@ -41,10 +41,9 @@ use crate::ssa::{ Binary, BinaryOp, ConstrainError, Instruction, InstructionId, Intrinsic, TerminatorInstruction, }, - map::Id, printer::try_to_extract_string_from_error_payload, types::{NumericType, Type}, - value::{Value, ValueId}, + value::Value, }, ssa_gen::Ssa, }; @@ -161,7 +160,7 @@ struct Context<'a> { /// AcirVar per SSA value. Before creating an `AcirVar` /// for an SSA value, we check this map. If an `AcirVar` /// already exists for this Value, we return the `AcirVar`. - ssa_values: HashMap, AcirValue>, + ssa_values: HashMap, /// The `AcirVar` that describes the condition belonging to the most recently invoked /// `SideEffectsEnabled` instruction. @@ -181,13 +180,13 @@ struct Context<'a> { /// Maps SSA values to BlockId /// A BlockId is an ACIR structure which identifies a memory block /// Each acir memory block corresponds to a different SSA array. - memory_blocks: HashMap, BlockId>, + memory_blocks: HashMap, /// Maps SSA values to a BlockId used internally /// A BlockId is an ACIR structure which identifies a memory block /// Each memory blocks corresponds to a different SSA value /// which utilizes this internal memory for ACIR generation. - internal_memory_blocks: HashMap, BlockId>, + internal_memory_blocks: HashMap, /// Maps an internal memory block to its length /// @@ -421,7 +420,8 @@ impl<'a> Context<'a> { ) -> Result, RuntimeError> { let dfg = &main_func.dfg; let entry_block = &dfg[main_func.entry_block()]; - let input_witness = self.convert_ssa_block_params(entry_block.parameters(), dfg)?; + let main_parameters = dfg.block_parameters(main_func.entry_block()); + let input_witness = self.convert_ssa_block_params(main_parameters, dfg)?; let num_return_witnesses = self.get_num_return_witnesses(entry_block.unwrap_terminator(), dfg); @@ -491,7 +491,7 @@ impl<'a> Context<'a> { } // Initialize call_data - let call_data_arrays: Vec = + let call_data_arrays: Vec = self.data_bus.call_data.iter().map(|cd| cd.array_id).collect(); for call_data_array in call_data_arrays { self.ensure_array_is_initialized(call_data_array, dfg)?; @@ -506,11 +506,14 @@ impl<'a> Context<'a> { ) -> Result, RuntimeError> { let dfg = &main_func.dfg; - let inputs = try_vecmap(dfg[main_func.entry_block()].parameters(), |param_id| { - let typ = dfg.type_of_value(*param_id); + let main_parameters = dfg.block_parameters(main_func.entry_block()); + let inputs = try_vecmap(main_parameters, |param| { + let typ = dfg.type_of_value(param); self.create_value_from_type(&typ, &mut |this, _| Ok(this.acir_context.add_variable())) })?; - let arguments = self.gen_brillig_parameters(dfg[main_func.entry_block()].parameters(), dfg); + + let main_parameters = dfg.block_parameters(main_func.entry_block()); + let arguments = self.gen_brillig_parameters(main_parameters, dfg); let witness_inputs = self.acir_context.extract_witness(&inputs); @@ -559,18 +562,18 @@ impl<'a> Context<'a> { /// Adds and binds `AcirVar`s for each numeric block parameter or block parameter array element. fn convert_ssa_block_params( &mut self, - params: &[ValueId], + params: impl ExactSizeIterator, dfg: &DataFlowGraph, ) -> Result, RuntimeError> { // The first witness (if any) is the next one let start_witness = self.acir_context.current_witness_index().0; for param_id in params { - let typ = dfg.type_of_value(*param_id); + let typ = dfg.type_of_value(param_id); let value = self.convert_ssa_block_param(&typ)?; match &value { AcirValue::Var(_, _) => (), AcirValue::Array(_) => { - let block_id = self.block_id(param_id); + let block_id = self.block_id(¶m_id); let len = if matches!(typ, Type::Array(_, _)) { typ.flattened_size() as usize } else { @@ -587,7 +590,7 @@ impl<'a> Context<'a> { "The dynamic array type is created in Acir gen and therefore cannot be a block parameter" ), } - self.ssa_values.insert(*param_id, value); + self.ssa_values.insert(param_id, value); } let end_witness = self.acir_context.current_witness_index().0; let witnesses = (start_witness..=end_witness).map(Witness::from).collect(); @@ -625,7 +628,7 @@ impl<'a> Context<'a> { /// Get the BlockId corresponding to the ValueId /// If there is no matching BlockId, we create a new one. - fn block_id(&mut self, value: &ValueId) -> BlockId { + fn block_id(&mut self, value: &Value) -> BlockId { if let Some(block_id) = self.memory_blocks.get(value) { return *block_id; } @@ -640,7 +643,7 @@ impl<'a> Context<'a> { /// This is useful for referencing information that can /// only be computed dynamically, such as the type structure /// of non-homogenous arrays. - fn internal_block_id(&mut self, value: &ValueId) -> BlockId { + fn internal_block_id(&mut self, value: &Value) -> BlockId { if let Some(block_id) = self.internal_memory_blocks.get(value) { return *block_id; } @@ -728,13 +731,12 @@ impl<'a> Context<'a> { self.define_result_var(dfg, instruction_id, acir_var); } Instruction::Call { .. } => { - let result_ids = dfg.instruction_results(instruction_id); warnings.extend(self.convert_ssa_call( instruction, dfg, ssa, brillig, - result_ids, + instruction_id, )?); } Instruction::Not(value_id) => { @@ -757,7 +759,7 @@ impl<'a> Context<'a> { Instruction::ArrayGet { .. } | Instruction::ArraySet { .. } => { self.handle_array_operation(instruction_id, dfg)?; } - Instruction::Allocate => { + Instruction::Allocate { .. } => { return Err(RuntimeError::UnknownReference { call_stack: self.acir_context.get_call_stack().clone(), }); @@ -785,7 +787,7 @@ impl<'a> Context<'a> { Instruction::MakeArray { elements, typ: _ } => { let elements = elements.iter().map(|element| self.convert_value(*element, dfg)); let value = AcirValue::Array(elements.collect()); - let result = dfg.instruction_results(instruction_id)[0]; + let result = Value::instruction_result(instruction_id, 0); self.ssa_values.insert(result, value); } } @@ -800,29 +802,28 @@ impl<'a> Context<'a> { dfg: &DataFlowGraph, ssa: &Ssa, brillig: &Brillig, - result_ids: &[ValueId], + instruction_id: InstructionId, ) -> Result, RuntimeError> { let mut warnings = Vec::new(); match instruction { - Instruction::Call { func, arguments } => { - let function_value = &dfg[*func]; - match function_value { + Instruction::Call { func, arguments, result_types: _ } => { + match *func { Value::Function(id) => { - let func = &ssa.functions[id]; + let func = &ssa.functions[&id]; match func.runtime() { RuntimeType::Acir(inline_type) => { assert!(!matches!(inline_type, InlineType::Inline), "ICE: Got an ACIR function named {} that should have already been inlined", func.name()); let inputs = vecmap(arguments, |arg| self.convert_value(*arg, dfg)); - let output_count = result_ids - .iter() - .map(|result_id| { - dfg.type_of_value(*result_id).flattened_size() as usize + let output_count = dfg + .instruction_results(instruction_id) + .map(|result| { + dfg.type_of_value(result).flattened_size() as usize }) .sum(); - let Some(acir_function_id) = ssa.get_entry_point_index(id) else { + let Some(acir_function_id) = ssa.get_entry_point_index(&id) else { unreachable!("Expected an associated final index for call to acir function {id} with args {arguments:?}"); }; @@ -833,35 +834,39 @@ impl<'a> Context<'a> { self.current_side_effects_enabled_var, )?; + let results = dfg.instruction_results(instruction_id); let output_values = - self.convert_vars_to_values(output_vars, dfg, result_ids); + self.convert_vars_to_values(output_vars, dfg, results); - self.handle_ssa_call_outputs(result_ids, output_values, dfg)?; + let results = dfg.instruction_results(instruction_id); + self.handle_ssa_call_outputs(results, output_values, dfg)?; } RuntimeType::Brillig(_) => { - // Check that we are not attempting to return a slice from - // an unconstrained runtime to a constrained runtime - for result_id in result_ids { - if dfg.type_of_value(*result_id).contains_slice_element() { - return Err( - RuntimeError::UnconstrainedSliceReturnToConstrained { - call_stack: self.acir_context.get_call_stack(), - }, - ); - } - } let inputs = vecmap(arguments, |arg| self.convert_value(*arg, dfg)); + let arguments = arguments.iter().copied(); let arguments = self.gen_brillig_parameters(arguments, dfg); - let outputs: Vec = vecmap(result_ids, |result_id| { - dfg.type_of_value(*result_id).into() - }); + let results = dfg.instruction_results(instruction_id); + let outputs = try_vecmap(results, |result_id| { + let typ = dfg.type_of_value(result_id); + + // Check that we are not attempting to return a slice from + // an unconstrained runtime to a constrained runtime + if typ.contains_slice_element() { + let call_stack = self.acir_context.get_call_stack(); + Err(RuntimeError::UnconstrainedSliceReturnToConstrained { + call_stack, + }) + } else { + Ok(typ.into()) + } + })?; // Check whether we have already generated Brillig for this function // If we have, re-use the generated code to set-up the Brillig call. let output_values = if let Some(generated_pointer) = self .shared_context - .generated_brillig_pointer(*id, arguments.clone()) + .generated_brillig_pointer(id, arguments.clone()) { let code = self .shared_context @@ -891,7 +896,7 @@ impl<'a> Context<'a> { None, )?; self.shared_context.insert_generated_brillig( - *id, + id, arguments, generated_pointer, code, @@ -899,10 +904,10 @@ impl<'a> Context<'a> { output_values }; + let results = dfg.instruction_results(instruction_id); // Compiler sanity check - assert_eq!(result_ids.len(), output_values.len(), "ICE: The number of Brillig output values should match the result ids in SSA"); - - self.handle_ssa_call_outputs(result_ids, output_values, dfg)?; + assert_eq!(results.len(), output_values.len(), "ICE: The number of Brillig output values should match the result ids in SSA"); + self.handle_ssa_call_outputs(results, output_values, dfg)?; } } } @@ -915,13 +920,18 @@ impl<'a> Context<'a> { call_stack: self.acir_context.get_call_stack(), })); } - let outputs = self - .convert_ssa_intrinsic_call(*intrinsic, arguments, dfg, result_ids)?; + let outputs = self.convert_ssa_intrinsic_call( + intrinsic, + arguments, + dfg, + instruction_id, + )?; // Issue #1438 causes this check to fail with intrinsics that return 0 // results but the ssa form instead creates 1 unit result value. // assert_eq!(result_ids.len(), outputs.len()); - self.handle_ssa_call_outputs(result_ids, outputs, dfg)?; + let results = dfg.instruction_results(instruction_id); + self.handle_ssa_call_outputs(results, outputs, dfg)?; } Value::ForeignFunction(_) => { // TODO: Remove this once elaborator is default frontend. This is now caught by a lint inside the frontend. @@ -929,7 +939,7 @@ impl<'a> Context<'a> { call_stack: self.acir_context.get_call_stack(), }); } - _ => unreachable!("expected calling a function but got {function_value:?}"), + other => unreachable!("expected calling a function but got {other:?}"), } } _ => unreachable!("expected calling a call instruction"), @@ -939,13 +949,13 @@ impl<'a> Context<'a> { fn handle_ssa_call_outputs( &mut self, - result_ids: &[ValueId], + results: impl ExactSizeIterator, output_values: Vec, dfg: &DataFlowGraph, ) -> Result<(), RuntimeError> { - for (result_id, output) in result_ids.iter().zip(output_values) { + for (result, output) in results.zip(output_values) { if let AcirValue::Array(_) = &output { - let array_id = dfg.resolve(*result_id); + let array_id = dfg.resolve(result); let block_id = self.block_id(&array_id); let array_typ = dfg.type_of_value(array_id); let len = if matches!(array_typ, Type::Array(_, _)) { @@ -958,43 +968,36 @@ impl<'a> Context<'a> { // Do nothing for AcirValue::DynamicArray and AcirValue::Var // A dynamic array returned from a function call should already be initialized // and a single variable does not require any extra initialization. - self.ssa_values.insert(*result_id, output); + self.ssa_values.insert(result, output); } Ok(()) } fn gen_brillig_parameters( &self, - values: &[ValueId], + parameters: impl ExactSizeIterator, dfg: &DataFlowGraph, ) -> Vec { - values - .iter() - .map(|&value_id| { - let typ = dfg.type_of_value(value_id); - if let Type::Slice(item_types) = typ { - let len = match self - .ssa_values - .get(&value_id) - .expect("ICE: Unknown slice input to brillig") + let parameters = parameters.map(|value| { + let typ = dfg.type_of_value(value); + if let Type::Slice(item_types) = typ { + let len = + match self.ssa_values.get(&value).expect("ICE: Unknown slice input to brillig") { AcirValue::DynamicArray(AcirDynamicArray { len, .. }) => *len, AcirValue::Array(array) => array.len(), _ => unreachable!("ICE: Slice value is not an array"), }; - BrilligParameter::Slice( - item_types - .iter() - .map(BrilligFunctionContext::ssa_type_to_parameter) - .collect(), - len / item_types.len(), - ) - } else { - BrilligFunctionContext::ssa_type_to_parameter(&typ) - } - }) - .collect() + BrilligParameter::Slice( + item_types.iter().map(BrilligFunctionContext::ssa_type_to_parameter).collect(), + len / item_types.len(), + ) + } else { + BrilligFunctionContext::ssa_type_to_parameter(&typ) + } + }); + parameters.collect() } /// Handles an ArrayGet or ArraySet instruction. @@ -1009,7 +1012,7 @@ impl<'a> Context<'a> { // Pass the instruction between array methods rather than the internal fields themselves let (array, index, store_value) = match dfg[instruction] { - Instruction::ArrayGet { array, index } => (array, index, None), + Instruction::ArrayGet { array, index, result_type: _ } => (array, index, None), Instruction::ArraySet { array, index, value, mutable } => { mutable_array_set = mutable; (array, index, Some(value)) @@ -1046,7 +1049,7 @@ impl<'a> Context<'a> { let is_simple_array = dfg.instruction_results(instruction).len() == 1 && can_omit_element_sizes_array(&array_typ); let offset = if is_simple_array { - let result_type = dfg.type_of_value(dfg.instruction_results(instruction)[0]); + let result_type = dfg.type_of_value(Value::instruction_result(instruction, 0)); match array_typ { Type::Array(item_type, _) | Type::Slice(item_type) => item_type .iter() @@ -1078,9 +1081,9 @@ impl<'a> Context<'a> { &mut self, instruction: InstructionId, dfg: &DataFlowGraph, - array: ValueId, - index: ValueId, - store_value: Option, + array: Value, + index: Value, + store_value: Option, ) -> Result { let array_id = dfg.resolve(array); let array_typ = dfg.type_of_value(array_id); @@ -1102,7 +1105,7 @@ impl<'a> Context<'a> { // `AcirValue::Array` supports reading/writing to constant indices at compile-time in some cases. if let Some(constant_index) = dfg.get_numeric_constant(index) { let store_value = store_value.map(|value| self.convert_value(value, dfg)); - self.handle_constant_index(instruction, dfg, array, constant_index, store_value) + self.handle_constant_index(instruction, array, constant_index, store_value) } else { Ok(false) } @@ -1116,7 +1119,6 @@ impl<'a> Context<'a> { fn handle_constant_index( &mut self, instruction: InstructionId, - dfg: &DataFlowGraph, array: im::Vector, index: FieldElement, store_value: Option, @@ -1145,7 +1147,7 @@ impl<'a> Context<'a> { if side_effects_always_enabled { // If we know that this write will always occur then we can perform it at compile time. let value = AcirValue::Array(array.update(index, store_value)); - self.define_result(dfg, instruction, value); + self.define_result(instruction, value); Ok(true) } else { // If a predicate is applied however we must wait until runtime. @@ -1156,7 +1158,7 @@ impl<'a> Context<'a> { // as if the predicate were true. This is as if the predicate were to resolve to false then // the result should not affect the rest of circuit execution. let value = array[index].clone(); - self.define_result(dfg, instruction, value); + self.define_result(instruction, value); Ok(true) } } @@ -1172,10 +1174,10 @@ impl<'a> Context<'a> { /// It is a dummy value because in the case of a false predicate, the value stored at the requested index will be itself. fn convert_array_operation_inputs( &mut self, - array_id: ValueId, + array_id: Value, dfg: &DataFlowGraph, - index: ValueId, - store_value: Option, + index: Value, + store_value: Option, offset: usize, ) -> Result<(AcirVar, Option), RuntimeError> { let array_typ = dfg.type_of_value(array_id); @@ -1320,14 +1322,14 @@ impl<'a> Context<'a> { fn array_get( &mut self, instruction: InstructionId, - array: ValueId, + array: Value, mut var_index: AcirVar, dfg: &DataFlowGraph, mut index_side_effect: bool, ) -> Result { let block_id = self.ensure_array_is_initialized(array, dfg)?; - let results = dfg.instruction_results(instruction); - let res_typ = dfg.type_of_value(results[0]); + let result = Value::instruction_result(instruction, 0); + let res_typ = dfg.type_of_value(result); // Get operations to call-data parameters are replaced by a get to the call-data-bus array let call_data = self.data_bus.call_data.iter().find(|cd| cd.index_map.contains_key(&array)).cloned(); @@ -1338,7 +1340,7 @@ impl<'a> Context<'a> { .add_constant(FieldElement::from(call_data.index_map[&array] as i128)); let mut current_index = self.acir_context.add_var(bus_index, var_index)?; let result = self.get_from_call_data(&mut current_index, call_data_block, &res_typ)?; - self.define_result(dfg, instruction, result.clone()); + self.define_result(instruction, result.clone()); return Ok(result); } // Compiler sanity check @@ -1369,7 +1371,7 @@ impl<'a> Context<'a> { } } - self.define_result(dfg, instruction, value.clone()); + self.define_result(instruction, value.clone()); Ok(value) } @@ -1453,17 +1455,15 @@ impl<'a> Context<'a> { // Since array_set creates a new array, we create a new block ID for this // array, unless map_array is true. In that case, we operate directly on block_id // and we do not create a new block ID. - let result_id = dfg - .instruction_results(instruction) - .first() - .expect("Array set does not have one result"); + let result = Value::instruction_result(instruction, 0); + let result_block_id; if mutate_array { - self.memory_blocks.insert(*result_id, block_id); + self.memory_blocks.insert(result, block_id); result_block_id = block_id; } else { // Initialize the new array with the values from the old array - result_block_id = self.block_id(result_id); + result_block_id = self.block_id(&result); self.copy_dynamic_array(block_id, result_block_id, array_len)?; } @@ -1486,7 +1486,7 @@ impl<'a> Context<'a> { value_types, element_type_sizes, }); - self.define_result(dfg, instruction, result_value); + self.define_result(instruction, result_value); Ok(()) } @@ -1524,7 +1524,7 @@ impl<'a> Context<'a> { fn ensure_array_is_initialized( &mut self, - array: ValueId, + array: Value, dfg: &DataFlowGraph, ) -> Result { // Use the SSA ID to get or create its block ID @@ -1534,8 +1534,7 @@ impl<'a> Context<'a> { // if not, we initialize it using the values from SSA let already_initialized = self.initialized_arrays.contains(&block_id); if !already_initialized { - let value = &dfg[array]; - match value { + match array { Value::Instruction { .. } => { let value = self.convert_value(array, dfg); let array_typ = dfg.type_of_value(array); @@ -1562,11 +1561,11 @@ impl<'a> Context<'a> { fn init_element_type_sizes_array( &mut self, array_typ: &Type, - array_id: ValueId, + array: Value, supplied_acir_value: Option<&AcirValue>, dfg: &DataFlowGraph, ) -> Result { - let element_type_sizes = self.internal_block_id(&array_id); + let element_type_sizes = self.internal_block_id(&array); // Check whether an internal type sizes array has already been initialized // Need to look into how to optimize for slices as this could lead to different element type sizes // for different slices that do not have consistent sizes @@ -1578,11 +1577,11 @@ impl<'a> Context<'a> { flat_elem_type_sizes.push(0); match array_typ { Type::Array(_, _) | Type::Slice(_) => { - match &dfg[array_id] { + match array { Value::Instruction { .. } | Value::Param { .. } => { // An instruction representing the slice means it has been processed previously during ACIR gen. // Use the previously defined result of an array operation to fetch the internal type information. - let array_acir_value = &self.convert_value(array_id, dfg); + let array_acir_value = &self.convert_value(array, dfg); let array_acir_value = supplied_acir_value.unwrap_or(array_acir_value); match array_acir_value { AcirValue::DynamicArray(AcirDynamicArray { @@ -1593,7 +1592,7 @@ impl<'a> Context<'a> { if self.initialized_arrays.contains(inner_elem_type_sizes) { let type_sizes_array_len = *self.internal_mem_block_lengths.get(inner_elem_type_sizes).ok_or_else(|| InternalError::General { - message: format!("Array {array_id}'s inner element type sizes array does not have a tracked length"), + message: format!("Array {array}'s inner element type sizes array does not have a tracked length"), call_stack: self.acir_context.get_call_stack(), } )?; @@ -1607,7 +1606,7 @@ impl<'a> Context<'a> { return Ok(element_type_sizes); } else { return Err(InternalError::General { - message: format!("Array {array_id}'s inner element type sizes array should be initialized"), + message: format!("Array {array}'s inner element type sizes array should be initialized"), call_stack: self.acir_context.get_call_stack(), } .into()); @@ -1635,7 +1634,7 @@ impl<'a> Context<'a> { _ => { return Err(InternalError::Unexpected { expected: "array or instruction".to_owned(), - found: format!("{:?}", &dfg[array_id]), + found: format!("{array}"), call_stack: self.acir_context.get_call_stack(), } .into()) @@ -1689,7 +1688,7 @@ impl<'a> Context<'a> { fn get_flattened_index( &mut self, array_typ: &Type, - array_id: ValueId, + array_id: Value, var_index: AcirVar, dfg: &DataFlowGraph, ) -> Result { @@ -1708,18 +1707,18 @@ impl<'a> Context<'a> { } } - fn flattened_slice_size(&mut self, array_id: ValueId, dfg: &DataFlowGraph) -> usize { + fn flattened_slice_size(&mut self, array: Value, dfg: &DataFlowGraph) -> usize { let mut size = 0; - match &dfg[array_id] { + match array { Value::NumericConstant { .. } => { size += 1; } Value::Instruction { .. } => { - let array_acir_value = self.convert_value(array_id, dfg); + let array_acir_value = self.convert_value(array, dfg); size += Self::flattened_value_size(&array_acir_value); } Value::Param { .. } => { - let array_acir_value = self.convert_value(array_id, dfg); + let array_acir_value = self.convert_value(array, dfg); size += Self::flattened_value_size(&array_acir_value); } _ => { @@ -1775,14 +1774,9 @@ impl<'a> Context<'a> { } /// Remember the result of an instruction returning a single value - fn define_result( - &mut self, - dfg: &DataFlowGraph, - instruction: InstructionId, - result: AcirValue, - ) { - let result_ids = dfg.instruction_results(instruction); - self.ssa_values.insert(result_ids[0], result); + fn define_result(&mut self, instruction: InstructionId, result: AcirValue) { + let ssa_result = Value::instruction_result(instruction, 0); + self.ssa_values.insert(ssa_result, result); } /// Remember the result of instruction returning a single numeric value @@ -1792,9 +1786,9 @@ impl<'a> Context<'a> { instruction: InstructionId, result: AcirVar, ) { - let result_ids = dfg.instruction_results(instruction); - let typ = dfg.type_of_value(result_ids[0]).into(); - self.define_result(dfg, instruction, AcirValue::Var(result, typ)); + let ssa_result = Value::instruction_result(instruction, 0); + let typ = dfg.type_of_value(ssa_result).into(); + self.define_result(instruction, AcirValue::Var(result, typ)); } /// Converts an SSA terminator's return values into their ACIR representations @@ -1864,17 +1858,17 @@ impl<'a> Context<'a> { /// It is not safe to call this function on value ids that represent addresses. Instructions /// involving such values are evaluated via a separate path and stored in /// `ssa_value_to_array_address` instead. - fn convert_value(&mut self, value_id: ValueId, dfg: &DataFlowGraph) -> AcirValue { - let value_id = dfg.resolve(value_id); - let value = &dfg[value_id]; - if let Some(acir_value) = self.ssa_values.get(&value_id) { + fn convert_value(&mut self, value: Value, dfg: &DataFlowGraph) -> AcirValue { + let value = dfg.resolve(value); + if let Some(acir_value) = self.ssa_values.get(&value) { return acir_value.clone(); } let acir_value = match value { Value::NumericConstant { constant, typ } => { - let typ = AcirType::from(Type::Numeric(*typ)); - AcirValue::Var(self.acir_context.add_constant(*constant), typ) + let typ = AcirType::from(Type::Numeric(typ)); + let constant = dfg[constant]; + AcirValue::Var(self.acir_context.add_constant(constant), typ) } Value::Intrinsic(..) => todo!(), Value::Function(function_id) => { @@ -1888,16 +1882,16 @@ impl<'a> Context<'a> { "Oracle calls directly in constrained functions are not yet available." ), Value::Instruction { .. } | Value::Param { .. } => { - unreachable!("ICE: Should have been in cache {value_id} {value:?}") + unreachable!("ICE: Should have been in cache {value}") } }; - self.ssa_values.insert(value_id, acir_value.clone()); + self.ssa_values.insert(value, acir_value.clone()); acir_value } fn convert_numeric_value( &mut self, - value_id: ValueId, + value_id: Value, dfg: &DataFlowGraph, ) -> Result { match self.convert_value(value_id, dfg) { @@ -1931,11 +1925,11 @@ impl<'a> Context<'a> { // Conservative max bit size that is small enough such that two operands can be // multiplied and still fit within the field modulus. This is necessary for the // truncation technique: result % 2^bit_size to be valid. - let max_integer_bit_size = FieldElement::max_num_bits() / 2; + let max_integer_bit_size = (FieldElement::max_num_bits() / 2) as u8; if *bit_size > max_integer_bit_size { return Err(RuntimeError::UnsupportedIntegerSize { - num_bits: *bit_size, - max_num_bits: max_integer_bit_size, + num_bits: *bit_size as u32, + max_num_bits: max_integer_bit_size as u32, call_stack: self.acir_context.get_call_stack(), }); } @@ -1999,9 +1993,9 @@ impl<'a> Context<'a> { fn check_unsigned_overflow( &mut self, result: AcirVar, - bit_size: u32, - lhs: ValueId, - rhs: ValueId, + bit_size: u8, + lhs: Value, + rhs: Value, dfg: &DataFlowGraph, op: BinaryOp, ) -> Result<(), RuntimeError> { @@ -2094,21 +2088,22 @@ impl<'a> Context<'a> { /// Returns an `AcirVar`that is constrained to be result of the truncation. fn convert_ssa_truncate( &mut self, - value_id: ValueId, - bit_size: u32, - max_bit_size: u32, + value_id: Value, + bit_size: u8, + max_bit_size: u8, dfg: &DataFlowGraph, ) -> Result { let mut var = self.convert_numeric_value(value_id, dfg)?; - match &dfg[value_id] { + match value_id { Value::Instruction { instruction, .. } => { if matches!( - &dfg[*instruction], + &dfg[instruction], Instruction::Binary(Binary { operator: BinaryOp::Sub, .. }) ) { // Subtractions must first have the integer modulus added before truncation can be // applied. This is done in order to prevent underflow. - let integer_modulus = self.acir_context.add_constant(2_u128.pow(bit_size)); + let integer_modulus = + self.acir_context.add_constant(2_u128.pow(bit_size as u32)); var = self.acir_context.add_var(var, integer_modulus)?; } } @@ -2130,16 +2125,17 @@ impl<'a> Context<'a> { fn convert_ssa_intrinsic_call( &mut self, intrinsic: Intrinsic, - arguments: &[ValueId], + arguments: &[Value], dfg: &DataFlowGraph, - result_ids: &[ValueId], + instruction_id: InstructionId, ) -> Result, RuntimeError> { match intrinsic { Intrinsic::Hint(Hint::BlackBox) => { // Identity function; at the ACIR level this is a no-op, it only affects the SSA. + let results = dfg.instruction_results(instruction_id); assert_eq!( arguments.len(), - result_ids.len(), + results.len(), "ICE: BlackBox input and output lengths should match." ); Ok(arguments.iter().map(|v| self.convert_value(*v, dfg)).collect()) @@ -2168,13 +2164,15 @@ impl<'a> Context<'a> { let inputs = vecmap(&arguments_no_slice_len, |arg| self.convert_value(*arg, dfg)); - let output_count = result_ids.iter().fold(0usize, |sum, result_id| { - sum + dfg.try_get_array_length(*result_id).unwrap_or(1) as usize + let results = dfg.instruction_results(instruction_id); + let output_count = results.fold(0usize, |sum, result| { + sum + dfg.try_get_array_length(result).unwrap_or(1) as usize }); let vars = self.acir_context.black_box_function(black_box, inputs, output_count)?; - Ok(self.convert_vars_to_values(vars, dfg, result_ids)) + let results = dfg.instruction_results(instruction_id); + Ok(self.convert_vars_to_values(vars, dfg, results)) } Intrinsic::ApplyRangeConstraint => { unreachable!("ICE: `Intrinsic::ApplyRangeConstraint` calls should be transformed into an `Instruction::RangeCheck`"); @@ -2183,8 +2181,8 @@ impl<'a> Context<'a> { let field = self.convert_value(arguments[0], dfg).into_var()?; let radix = self.convert_value(arguments[1], dfg).into_var()?; - let Type::Array(result_type, array_length) = dfg.type_of_value(result_ids[0]) - else { + let result = Value::instruction_result(instruction_id, 0); + let Type::Array(result_type, array_length) = dfg.type_of_value(result) else { unreachable!("ICE: ToRadix result must be an array"); }; @@ -2200,9 +2198,9 @@ impl<'a> Context<'a> { } Intrinsic::ToBits(endian) => { let field = self.convert_value(arguments[0], dfg).into_var()?; + let result = Value::instruction_result(instruction_id, 0); - let Type::Array(result_type, array_length) = dfg.type_of_value(result_ids[0]) - else { + let Type::Array(result_type, array_length) = dfg.type_of_value(result) else { unreachable!("ICE: ToRadix result must be an array"); }; @@ -2224,7 +2222,8 @@ impl<'a> Context<'a> { let block_id = self.ensure_array_is_initialized(slice_contents, dfg)?; assert!(!slice_typ.is_nested_slice(), "ICE: Nested slice used in ACIR generation"); - let result_block_id = self.block_id(&result_ids[1]); + let slice_result = Value::instruction_result(instruction_id, 1); + let result_block_id = self.block_id(&slice_result); let acir_value = self.convert_value(slice_contents, dfg); let array_len = if !slice_typ.contains_slice_element() { @@ -2290,7 +2289,7 @@ impl<'a> Context<'a> { let new_slice_length = self.acir_context.add_var(slice_length, one)?; let new_slice_val = AcirValue::Array(new_slice); - let result_block_id = self.block_id(&result_ids[1]); + let result_block_id = self.block_id(&Value::instruction_result(instruction_id, 1)); self.initialize_array(result_block_id, new_elem_size, Some(new_slice_val.clone()))?; // The previous slice length represents the index we want to write into. let mut var_index = slice_length; @@ -2360,7 +2359,7 @@ impl<'a> Context<'a> { let new_slice_val = AcirValue::Array(new_slice.clone()); - let result_block_id = self.block_id(&result_ids[1]); + let result_block_id = self.block_id(&Value::instruction_result(instruction_id, 1)); self.initialize_array( result_block_id, new_slice_size, @@ -2410,12 +2409,11 @@ impl<'a> Context<'a> { let block_id = self.ensure_array_is_initialized(slice_contents, dfg)?; assert!(!slice_typ.is_nested_slice(), "ICE: Nested slice used in ACIR generation"); - let mut popped_elements = Vec::new(); - for res in &result_ids[2..] { - let elem = - self.array_get_value(&dfg.type_of_value(*res), block_id, &mut var_index)?; - popped_elements.push(elem); - } + let ssa_results = dfg.instruction_results(instruction_id); + + let mut popped_elements = try_vecmap(ssa_results.skip(2), |res| { + self.array_get_value(&dfg.type_of_value(res), block_id, &mut var_index) + })?; let slice = self.convert_value(slice_contents, dfg); let mut new_slice = im::Vector::new(); @@ -2449,19 +2447,20 @@ impl<'a> Context<'a> { let element_size = slice_typ.element_size(); - let mut popped_elements: Vec = Vec::new(); let mut popped_elements_size = 0; let mut var_index = self.acir_context.add_constant(FieldElement::zero()); + // Fetch the values we are popping off of the slice. // In the case of non-nested slice the logic is simple as we do not // need to account for the internal slice sizes or flattening the index. - for res in &result_ids[..element_size] { + let ssa_results = dfg.instruction_results(instruction_id); + let mut popped_elements = try_vecmap(ssa_results.take(element_size), |res| { let element = - self.array_get_value(&dfg.type_of_value(*res), block_id, &mut var_index)?; + self.array_get_value(&dfg.type_of_value(res), block_id, &mut var_index)?; let elem_size = Self::flattened_value_size(&element); popped_elements_size += elem_size; - popped_elements.push(element); - } + Ok::<_, RuntimeError>(element) + })?; // It is expected that the `popped_elements_size` is the flattened size of the elements, // as the input slice should be a dynamic array which is represented by flat memory. @@ -2522,7 +2521,8 @@ impl<'a> Context<'a> { // the flattened element arguments. // 3. If we are above the max insertion index we should insert the previous value from the original slice, // as during an insertion we want to shift all elements after the insertion up an index. - let result_block_id = self.block_id(&result_ids[1]); + let slice_result = Value::instruction_result(instruction_id, 1); + let result_block_id = self.block_id(&slice_result); self.initialize_array(result_block_id, slice_size, None)?; let mut current_insert_index = 0; for i in 0..slice_size { @@ -2652,18 +2652,19 @@ impl<'a> Context<'a> { // Fetch the values we are remove from the slice. // As we fetch the values we can determine the size of the removed values // which we will later use for writing the correct resulting slice. - let mut popped_elements = Vec::new(); let mut popped_elements_size = 0; // Set a temp index just for fetching from the original slice as `array_get_value` mutates // the index internally. let mut temp_index = flat_user_index; - for res in &result_ids[2..(2 + element_size)] { + + let results = dfg.instruction_results(instruction_id); + let mut popped_elements = try_vecmap(results.skip(2).take(element_size), |res| { let element = - self.array_get_value(&dfg.type_of_value(*res), block_id, &mut temp_index)?; + self.array_get_value(&dfg.type_of_value(res), block_id, &mut temp_index)?; let elem_size = Self::flattened_value_size(&element); popped_elements_size += elem_size; - popped_elements.push(element); - } + Ok::<_, RuntimeError>(element) + })?; // Go through the entire slice argument and determine what value should be written to the new slice. // 1. If the current index is greater than the removal index we must write the next value @@ -2671,7 +2672,7 @@ impl<'a> Context<'a> { // 2. At the end of the slice reading from the next value of the original slice // can lead to a potential out of bounds error. In this case we just fetch from the original slice // at the current index. As we are decreasing the slice in length, this is a safe operation. - let result_block_id = self.block_id(&result_ids[1]); + let result_block_id = self.block_id(&Value::instruction_result(instruction_id, 1)); self.initialize_array( result_block_id, slice_size, @@ -2747,10 +2748,11 @@ impl<'a> Context<'a> { Intrinsic::AsWitness => { let arg = arguments[0]; let input = self.convert_value(arg, dfg).into_var()?; + let results = dfg.instruction_results(instruction_id); Ok(self .acir_context .get_or_create_witness_var(input) - .map(|val| self.convert_vars_to_values(vec![val], dfg, result_ids))?) + .map(|val| self.convert_vars_to_values(vec![val], dfg, results))?) } Intrinsic::ArrayAsStrUnchecked => Ok(vec![self.convert_value(arguments[0], dfg)]), Intrinsic::AssertConstant => { @@ -2818,12 +2820,13 @@ impl<'a> Context<'a> { &self, vars: Vec, dfg: &DataFlowGraph, - result_ids: &[ValueId], + results: impl ExactSizeIterator, ) -> Vec { let mut vars = vars.into_iter(); - let mut values: Vec = Vec::new(); - for result in result_ids { - let result_type = dfg.type_of_value(*result); + let mut values: Vec = Vec::with_capacity(results.len()); + + for result in results { + let result_type = dfg.type_of_value(result); if let Type::Slice(elements_type) = result_type { let error = "ICE - cannot get slice length when converting slice to AcirValue"; let len = values.last().expect(error).borrow_var().expect(error); @@ -2907,6 +2910,7 @@ mod test { instruction::BinaryOp, map::Id, types::{NumericType, Type}, + value::Value, }, }, }; @@ -2938,7 +2942,7 @@ mod test { let foo_v1 = builder.add_parameter(Type::field()); let foo_equality_check = builder.insert_binary(foo_v0, BinaryOp::Eq, foo_v1); - let zero = builder.numeric_constant(0u128, NumericType::unsigned(1)); + let zero = builder.constant(0u128.into(), NumericType::unsigned(1)); builder.insert_constrain(foo_equality_check, zero, None); builder.terminate_with_return(vec![foo_v0]); } @@ -2985,12 +2989,13 @@ mod test { let main_v1 = builder.add_parameter(Type::field()); let foo_id = Id::test_new(1); - let foo = builder.import_function(foo_id); - let main_call1_results = - builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); - let main_call2_results = - builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); - builder.insert_constrain(main_call1_results[0], main_call2_results[0], None); + let foo = Value::Function(foo_id); + let main_call1_result = + builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]).next().unwrap(); + let main_call2_result = + builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]).next().unwrap(); + + builder.insert_constrain(main_call1_result, main_call2_result, None); builder.terminate_with_return(vec![]); build_basic_foo_with_return(&mut builder, foo_id, false, inline_type); @@ -3089,13 +3094,16 @@ mod test { let main_v1 = builder.add_parameter(Type::field()); let foo_id = Id::test_new(1); - let foo = builder.import_function(foo_id); - let main_call1_results = - builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); - let main_call2_results = builder - .insert_call(foo, vec![main_call1_results[0], main_v1], vec![Type::field()]) - .to_vec(); - builder.insert_constrain(main_call1_results[0], main_call2_results[0], None); + let foo = Value::Function(foo_id); + let main_call1_result = + builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]).next().unwrap(); + + let main_call2_result = builder + .insert_call(foo, vec![main_call1_result, main_v1], vec![Type::field()]) + .next() + .unwrap(); + + builder.insert_constrain(main_call1_result, main_call2_result, None); builder.terminate_with_return(vec![]); build_basic_foo_with_return(&mut builder, foo_id, false, inline_type); @@ -3171,14 +3179,18 @@ mod test { let main_v1 = builder.add_parameter(Type::field()); let func_with_nested_foo_call_id = Id::test_new(1); - let func_with_nested_foo_call = builder.import_function(func_with_nested_foo_call_id); - let main_call1_results = builder + let func_with_nested_foo_call = Value::Function(func_with_nested_foo_call_id); + let main_call1_result = builder .insert_call(func_with_nested_foo_call, vec![main_v0, main_v1], vec![Type::field()]) - .to_vec(); - let main_call2_results = builder + .next() + .unwrap(); + + let main_call2_result = builder .insert_call(func_with_nested_foo_call, vec![main_v0, main_v1], vec![Type::field()]) - .to_vec(); - builder.insert_constrain(main_call1_results[0], main_call2_results[0], None); + .next() + .unwrap(); + + builder.insert_constrain(main_call1_result, main_call2_result, None); builder.terminate_with_return(vec![]); builder.new_function( @@ -3189,15 +3201,15 @@ mod test { let func_with_nested_call_v0 = builder.add_parameter(Type::field()); let func_with_nested_call_v1 = builder.add_parameter(Type::field()); - let two = builder.field_constant(2u128); + let two = builder.field_constant(2u128.into()); let v0_plus_two = builder.insert_binary(func_with_nested_call_v0, BinaryOp::Add, two); let foo_id = Id::test_new(2); - let foo_call = builder.import_function(foo_id); + let foo_call = Value::Function(foo_id); let foo_call = builder .insert_call(foo_call, vec![v0_plus_two, func_with_nested_call_v1], vec![Type::field()]) - .to_vec(); - builder.terminate_with_return(vec![foo_call[0]]); + .collect(); + builder.terminate_with_return(foo_call); build_basic_foo_with_return(&mut builder, foo_id, false, inline_type); @@ -3307,18 +3319,18 @@ mod test { let main_v1 = builder.add_parameter(Type::field()); let foo_id = Id::test_new(1); - let foo = builder.import_function(foo_id); + let foo = Value::Function(foo_id); let bar_id = Id::test_new(2); - let bar = builder.import_function(bar_id); + let bar = Value::Function(bar_id); // Insert multiple calls to the same Brillig function - builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); - builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); - builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); + builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]); + builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]); + builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]); // Interleave a call to a separate Brillig function to make sure that we can call multiple separate Brillig functions - builder.insert_call(bar, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); - builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); - builder.insert_call(bar, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); + builder.insert_call(bar, vec![main_v0, main_v1], vec![Type::field()]); + builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]); + builder.insert_call(bar, vec![main_v0, main_v1], vec![Type::field()]); builder.terminate_with_return(vec![]); build_basic_foo_with_return(&mut builder, foo_id, true, InlineType::default()); @@ -3382,7 +3394,7 @@ mod test { // Call the same primitive operation again let v1_div_v2 = builder.insert_binary(main_v1, BinaryOp::Div, main_v2); - let one = builder.numeric_constant(1u128, NumericType::unsigned(32)); + let one = builder.constant(1u128.into(), NumericType::unsigned(32)); builder.insert_constrain(v1_div_v2, one, None); builder.terminate_with_return(vec![]); @@ -3443,19 +3455,19 @@ mod test { let main_v2 = builder.add_parameter(Type::unsigned(32)); let foo_id = Id::test_new(1); - let foo = builder.import_function(foo_id); + let foo = Value::Function(foo_id); // Call a primitive operation that uses Brillig let v0_div_v1 = builder.insert_binary(main_v0, BinaryOp::Div, main_v1); builder.insert_constrain(v0_div_v1, main_v2, None); // Insert multiple calls to the same Brillig function - builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); - builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); + builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]); + builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]); // Call the same primitive operation again let v1_div_v2 = builder.insert_binary(main_v1, BinaryOp::Div, main_v2); - let one = builder.numeric_constant(1u128, NumericType::unsigned(32)); + let one = builder.constant(1u128.into(), NumericType::unsigned(32)); builder.insert_constrain(v1_div_v2, one, None); builder.terminate_with_return(vec![]); @@ -3526,22 +3538,22 @@ mod test { let main_v2 = builder.add_parameter(Type::unsigned(32)); let foo_id = Id::test_new(1); - let foo = builder.import_function(foo_id); + let foo = Value::Function(foo_id); let bar_id = Id::test_new(2); - let bar = builder.import_function(bar_id); + let bar = Value::Function(bar_id); // Call a primitive operation that uses Brillig let v0_div_v1 = builder.insert_binary(main_v0, BinaryOp::Div, main_v1); builder.insert_constrain(v0_div_v1, main_v2, None); // Insert multiple calls to the same Brillig function - builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); - builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); - builder.insert_call(bar, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); + builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]); + builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]); + builder.insert_call(bar, vec![main_v0, main_v1], vec![Type::field()]); // Call the same primitive operation again let v1_div_v2 = builder.insert_binary(main_v1, BinaryOp::Div, main_v2); - let one = builder.numeric_constant(1u128, NumericType::unsigned(32)); + let one = builder.constant(1u128.into(), NumericType::unsigned(32)); builder.insert_constrain(v1_div_v2, one, None); builder.terminate_with_return(vec![]); diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs index 5bcddc21275..4e33798cec9 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs @@ -17,7 +17,7 @@ use crate::ssa::ir::{ Binary, BinaryOp, Endian, Instruction, InstructionId, Intrinsic, TerminatorInstruction, }, types::{NumericType, Type}, - value::{Value, ValueId}, + value::Value, }; use acvm::acir::brillig::{MemoryAddress, ValueOrArray}; use acvm::{acir::AcirField, FieldElement}; @@ -41,7 +41,7 @@ pub(crate) struct BrilligBlock<'block> { /// Tracks the available variable during the codegen of the block pub(crate) variables: BlockVariables, /// For each instruction, the set of values that are not used anymore after it. - pub(crate) last_uses: HashMap>, + pub(crate) last_uses: HashMap>, } impl<'block> BrilligBlock<'block> { @@ -146,11 +146,10 @@ impl<'block> BrilligBlock<'block> { arguments, call_stack: _, } => { - let target_block = &dfg[*destination_block]; - for (src, dest) in arguments.iter().zip(target_block.parameters()) { + for (src, dest) in arguments.iter().zip(dfg.block_parameters(*destination_block)) { // Destinations are block parameters so they should have been allocated previously. let destination = - self.variables.get_allocation(self.function_context, *dest, dfg); + self.variables.get_allocation(self.function_context, dest, dfg); let source = self.convert_ssa_value(*src, dfg); self.brillig_context .mov_instruction(destination.extract_register(), source.extract_register()); @@ -160,8 +159,8 @@ impl<'block> BrilligBlock<'block> { ); } TerminatorInstruction::Return { return_values, .. } => { - let return_registers = vecmap(return_values, |value_id| { - self.convert_ssa_value(*value_id, dfg).extract_register() + let return_registers = vecmap(return_values, |value| { + self.convert_ssa_value(*value, dfg).extract_register() }); self.brillig_context.codegen_return(&return_registers); } @@ -175,12 +174,7 @@ impl<'block> BrilligBlock<'block> { // the block parameters need to be defined/allocated before the given block. Variable liveness provides when the block parameters are defined. // For the entry block, the defined block params will be the params of the function + any extra params of blocks it's the immediate dominator of. for param_id in self.function_context.liveness.defined_block_params(&self.block_id) { - let value = &dfg[param_id]; - let param_type = match value { - Value::Param { typ, .. } => typ, - _ => unreachable!("ICE: Only Param type values should appear in block parameters"), - }; - match param_type { + match dfg.type_of_value(param_id) { // Simple parameters and arrays are passed as already filled registers // In the case of arrays, the values should already be in memory and the register should // Be a valid pointer to the array. @@ -215,7 +209,7 @@ impl<'block> BrilligBlock<'block> { let result_var = self.variables.define_single_addr_variable( self.function_context, self.brillig_context, - dfg.instruction_results(instruction_id)[0], + Value::instruction_result(instruction_id, 0), dfg, ); self.convert_ssa_binary(binary, dfg, result_var); @@ -279,8 +273,8 @@ impl<'block> BrilligBlock<'block> { self.brillig_context.deallocate_single_addr(condition); } } - Instruction::Allocate => { - let result_value = dfg.instruction_results(instruction_id)[0]; + Instruction::Allocate { .. } => { + let result_value = Value::instruction_result(instruction_id, 0); let pointer = self.variables.define_single_addr_variable( self.function_context, self.brillig_context, @@ -296,11 +290,11 @@ impl<'block> BrilligBlock<'block> { self.brillig_context .store_instruction(address_var.address, source_variable.extract_register()); } - Instruction::Load { address } => { + Instruction::Load { address, result_type: _ } => { let target_variable = self.variables.define_variable( self.function_context, self.brillig_context, - dfg.instruction_results(instruction_id)[0], + Value::instruction_result(instruction_id, 0), dfg, ); @@ -314,35 +308,37 @@ impl<'block> BrilligBlock<'block> { let result_register = self.variables.define_single_addr_variable( self.function_context, self.brillig_context, - dfg.instruction_results(instruction_id)[0], + Value::instruction_result(instruction_id, 0), dfg, ); self.brillig_context.not_instruction(condition_register, result_register); } - Instruction::Call { func, arguments } => match &dfg[*func] { + Instruction::Call { func, arguments, result_types: _ } => match func { Value::ForeignFunction(func_name) => { - let result_ids = dfg.instruction_results(instruction_id); + let result_ids = dfg.instruction_results(instruction_id).collect::>(); - let input_values = vecmap(arguments, |value_id| { - let variable = self.convert_ssa_value(*value_id, dfg); + let input_values = vecmap(arguments, |value| { + let variable = self.convert_ssa_value(*value, dfg); self.brillig_context.variable_to_value_or_array(variable) }); - let input_value_types = vecmap(arguments, |value_id| { - let value_type = dfg.type_of_value(*value_id); + let input_value_types = vecmap(arguments, |value| { + let value_type = dfg.type_of_value(*value); type_to_heap_value_type(&value_type) }); - let output_variables = vecmap(result_ids, |value_id| { - self.allocate_external_call_result(*value_id, dfg) + let output_variables = vecmap(&result_ids, |value| { + self.allocate_external_call_result(*value, dfg) }); let output_values = vecmap(&output_variables, |variable| { self.brillig_context.variable_to_value_or_array(*variable) }); - let output_value_types = vecmap(result_ids, |value_id| { - let value_type = dfg.type_of_value(*value_id); + let output_value_types = vecmap(&result_ids, |value| { + let value_type = dfg.type_of_value(*value); type_to_heap_value_type(&value_type) }); + + let func_name = dfg[*func_name].to_owned(); self.brillig_context.foreign_call_instruction( - func_name.to_owned(), + func_name, &input_values, &input_value_types, &output_values, @@ -376,7 +372,8 @@ impl<'block> BrilligBlock<'block> { // Update the dynamic slice length maintained in SSA if let ValueOrArray::MemoryAddress(len_index) = output_values[i - 1] { - let element_size = dfg[result_ids[i]].get_type().element_size(); + let element_size = + dfg.type_of_value(result_ids[i]).element_size(); self.brillig_context .mov_instruction(len_index, heap_vector.size); self.brillig_context.codegen_usize_op_in_place( @@ -408,7 +405,7 @@ impl<'block> BrilligBlock<'block> { let result_variable = self.variables.define_single_addr_variable( self.function_context, self.brillig_context, - dfg.instruction_results(instruction_id)[0], + Value::instruction_result(instruction_id, 0), dfg, ); let param_id = arguments[0]; @@ -430,18 +427,18 @@ impl<'block> BrilligBlock<'block> { } Intrinsic::AsSlice => { let source_variable = self.convert_ssa_value(arguments[0], dfg); - let result_ids = dfg.instruction_results(instruction_id); + let destination_len_variable = self.variables.define_single_addr_variable( self.function_context, self.brillig_context, - result_ids[0], + Value::instruction_result(instruction_id, 0), dfg, ); let destination_variable = self.variables.define_variable( self.function_context, self.brillig_context, - result_ids[1], + Value::instruction_result(instruction_id, 1), dfg, ); let destination_vector = destination_variable.extract_vector(); @@ -491,14 +488,12 @@ impl<'block> BrilligBlock<'block> { | Intrinsic::SliceRemove => { self.convert_ssa_slice_intrinsic_call( dfg, - &dfg[dfg.resolve(*func)], + dfg.resolve(*func), instruction_id, arguments, ); } Intrinsic::ToBits(endianness) => { - let results = dfg.instruction_results(instruction_id); - let source = self.convert_ssa_single_addr_value(arguments[0], dfg); let target_array = self @@ -506,7 +501,7 @@ impl<'block> BrilligBlock<'block> { .define_variable( self.function_context, self.brillig_context, - results[0], + Value::instruction_result(instruction_id, 0), dfg, ) .extract_array(); @@ -527,8 +522,6 @@ impl<'block> BrilligBlock<'block> { } Intrinsic::ToRadix(endianness) => { - let results = dfg.instruction_results(instruction_id); - let source = self.convert_ssa_single_addr_value(arguments[0], dfg); let radix = self.convert_ssa_single_addr_value(arguments[1], dfg); @@ -537,7 +530,7 @@ impl<'block> BrilligBlock<'block> { .define_variable( self.function_context, self.brillig_context, - results[0], + Value::instruction_result(instruction_id, 0), dfg, ) .extract_array(); @@ -551,8 +544,9 @@ impl<'block> BrilligBlock<'block> { ); } Intrinsic::Hint(Hint::BlackBox) => { - let result_ids = dfg.instruction_results(instruction_id); - self.convert_ssa_identity_call(arguments, dfg, result_ids); + let result_ids = + dfg.instruction_results(instruction_id).collect::>(); + self.convert_ssa_identity_call(arguments, dfg, &result_ids); } Intrinsic::BlackBox(bb_func) => { // Slices are represented as a tuple of (length, slice contents). @@ -584,7 +578,7 @@ impl<'block> BrilligBlock<'block> { }); let function_results = dfg.instruction_results(instruction_id); let function_results = vecmap(function_results, |result| { - self.allocate_external_call_result(*result, dfg) + self.allocate_external_call_result(result, dfg) }); convert_black_box_call( self.brillig_context, @@ -598,17 +592,16 @@ impl<'block> BrilligBlock<'block> { Intrinsic::AsWitness => (), Intrinsic::FieldLessThan => { let lhs = self.convert_ssa_single_addr_value(arguments[0], dfg); - assert!(lhs.bit_size == FieldElement::max_num_bits()); + assert_eq!(lhs.bit_size as u32, FieldElement::max_num_bits()); let rhs = self.convert_ssa_single_addr_value(arguments[1], dfg); - assert!(rhs.bit_size == FieldElement::max_num_bits()); + assert_eq!(rhs.bit_size as u32, FieldElement::max_num_bits()); - let results = dfg.instruction_results(instruction_id); let destination = self .variables .define_variable( self.function_context, self.brillig_context, - results[0], + Value::instruction_result(instruction_id, 0), dfg, ) .extract_single_addr(); @@ -623,7 +616,7 @@ impl<'block> BrilligBlock<'block> { } Intrinsic::ArrayRefCount | Intrinsic::SliceRefCount => { let array = self.convert_ssa_value(arguments[0], dfg); - let result = dfg.instruction_results(instruction_id)[0]; + let result = Value::instruction_result(instruction_id, 0); let destination = self.variables.define_variable( self.function_context, @@ -642,20 +635,19 @@ impl<'block> BrilligBlock<'block> { | Intrinsic::AssertConstant | Intrinsic::StaticAssert | Intrinsic::ArrayAsStrUnchecked => { - unreachable!("unsupported function call type {:?}", dfg[*func]) + unreachable!("unsupported function call type {func}") } } } Value::Instruction { .. } | Value::Param { .. } | Value::NumericConstant { .. } => { - unreachable!("unsupported function call type {:?}", dfg[*func]) + unreachable!("unsupported function call type {func:?}") } }, Instruction::Truncate { value, bit_size, .. } => { - let result_ids = dfg.instruction_results(instruction_id); let destination_register = self.variables.define_single_addr_variable( self.function_context, self.brillig_context, - result_ids[0], + Value::instruction_result(instruction_id, 0), dfg, ); let source_register = self.convert_ssa_single_addr_value(*value, dfg); @@ -666,22 +658,20 @@ impl<'block> BrilligBlock<'block> { ); } Instruction::Cast(value, _) => { - let result_ids = dfg.instruction_results(instruction_id); let destination_variable = self.variables.define_single_addr_variable( self.function_context, self.brillig_context, - result_ids[0], + Value::instruction_result(instruction_id, 0), dfg, ); let source_variable = self.convert_ssa_single_addr_value(*value, dfg); self.convert_cast(destination_variable, source_variable); } - Instruction::ArrayGet { array, index } => { - let result_ids = dfg.instruction_results(instruction_id); + Instruction::ArrayGet { array, index, result_type: _ } => { let destination_variable = self.variables.define_variable( self.function_context, self.brillig_context, - result_ids[0], + Value::instruction_result(instruction_id, 0), dfg, ); @@ -709,11 +699,10 @@ impl<'block> BrilligBlock<'block> { let index_register = self.convert_ssa_single_addr_value(*index, dfg); let value_variable = self.convert_ssa_value(*value, dfg); - let result_ids = dfg.instruction_results(instruction_id); let destination_variable = self.variables.define_variable( self.function_context, self.brillig_context, - result_ids[0], + Value::instruction_result(instruction_id, 0), dfg, ); @@ -736,15 +725,16 @@ impl<'block> BrilligBlock<'block> { // Cast original value to field let left = SingleAddrVariable { address: self.brillig_context.allocate_register(), - bit_size: FieldElement::max_num_bits(), + bit_size: FieldElement::max_num_bits() as u8, }; self.convert_cast(left, value); // Create a field constant with the max - let max = BigUint::from(2_u128).pow(*max_bit_size) - BigUint::from(1_u128); + let max = + BigUint::from(2_u128).pow(*max_bit_size as u32) - BigUint::from(1_u128); let right = self.brillig_context.make_constant_instruction( FieldElement::from_be_bytes_reduce(&max.to_bytes_be()), - FieldElement::max_num_bits(), + FieldElement::max_num_bits() as u8, ); // Check if lte max @@ -801,12 +791,13 @@ impl<'block> BrilligBlock<'block> { unreachable!("IfElse instructions should not be possible in brillig") } Instruction::MakeArray { elements: array, typ } => { - let value_id = dfg.instruction_results(instruction_id)[0]; - if !self.variables.is_allocated(&value_id) { + let value = Value::instruction_result(instruction_id, 0); + + if !self.variables.is_allocated(&value) { let new_variable = self.variables.define_variable( self.function_context, self.brillig_context, - value_id, + value, dfg, ); @@ -857,9 +848,9 @@ impl<'block> BrilligBlock<'block> { fn convert_ssa_function_call( &mut self, func_id: FunctionId, - arguments: &[ValueId], + arguments: &[Value], dfg: &DataFlowGraph, - result_ids: &[ValueId], + result_ids: impl ExactSizeIterator, ) { let argument_variables = vecmap(arguments, |argument_id| self.convert_ssa_value(*argument_id, dfg)); @@ -867,7 +858,7 @@ impl<'block> BrilligBlock<'block> { self.variables.define_variable( self.function_context, self.brillig_context, - *result_id, + result_id, dfg, ) }); @@ -877,9 +868,9 @@ impl<'block> BrilligBlock<'block> { /// Copy the input arguments to the results. fn convert_ssa_identity_call( &mut self, - arguments: &[ValueId], + arguments: &[Value], dfg: &DataFlowGraph, - result_ids: &[ValueId], + result_ids: &[Value], ) { let argument_variables = vecmap(arguments, |argument_id| self.convert_ssa_value(*argument_id, dfg)); @@ -982,21 +973,23 @@ impl<'block> BrilligBlock<'block> { fn convert_ssa_slice_intrinsic_call( &mut self, dfg: &DataFlowGraph, - intrinsic: &Value, + intrinsic: Value, instruction_id: InstructionId, - arguments: &[ValueId], + arguments: &[Value], ) { let slice_id = arguments[1]; let element_size = dfg.type_of_value(slice_id).element_size(); let source_vector = self.convert_ssa_value(slice_id, dfg).extract_vector(); - let results = dfg.instruction_results(instruction_id); match intrinsic { Value::Intrinsic(Intrinsic::SlicePushBack) => { + let target_len = Value::instruction_result(instruction_id, 0); + let target_variable = Value::instruction_result(instruction_id, 1); + let target_len = match self.variables.define_variable( self.function_context, self.brillig_context, - results[0], + target_len, dfg, ) { BrilligVariable::SingleAddr(register_index) => register_index, @@ -1006,7 +999,7 @@ impl<'block> BrilligBlock<'block> { let target_variable = self.variables.define_variable( self.function_context, self.brillig_context, - results[1], + target_variable, dfg, ); @@ -1025,10 +1018,13 @@ impl<'block> BrilligBlock<'block> { self.slice_push_back_operation(target_vector, source_vector, &item_values); } Value::Intrinsic(Intrinsic::SlicePushFront) => { + let target_len = Value::instruction_result(instruction_id, 0); + let target_variable = Value::instruction_result(instruction_id, 1); + let target_len = match self.variables.define_variable( self.function_context, self.brillig_context, - results[0], + target_len, dfg, ) { BrilligVariable::SingleAddr(register_index) => register_index, @@ -1038,7 +1034,7 @@ impl<'block> BrilligBlock<'block> { let target_variable = self.variables.define_variable( self.function_context, self.brillig_context, - results[1], + target_variable, dfg, ); let target_vector = target_variable.extract_vector(); @@ -1056,6 +1052,7 @@ impl<'block> BrilligBlock<'block> { self.slice_push_front_operation(target_vector, source_vector, &item_values); } Value::Intrinsic(Intrinsic::SlicePopBack) => { + let results = dfg.instruction_results(instruction_id).collect::>(); let target_len = match self.variables.define_variable( self.function_context, self.brillig_context, @@ -1094,6 +1091,7 @@ impl<'block> BrilligBlock<'block> { self.slice_pop_back_operation(target_vector, source_vector, &pop_variables); } Value::Intrinsic(Intrinsic::SlicePopFront) => { + let results = dfg.instruction_results(instruction_id).collect::>(); let target_len = match self.variables.define_variable( self.function_context, self.brillig_context, @@ -1131,17 +1129,19 @@ impl<'block> BrilligBlock<'block> { self.slice_pop_front_operation(target_vector, source_vector, &pop_variables); } Value::Intrinsic(Intrinsic::SliceInsert) => { + let target_len = Value::instruction_result(instruction_id, 0); + let target_id = Value::instruction_result(instruction_id, 1); + let target_len = match self.variables.define_variable( self.function_context, self.brillig_context, - results[0], + target_len, dfg, ) { BrilligVariable::SingleAddr(register_index) => register_index, _ => unreachable!("ICE: first value of a slice must be a register index"), }; - let target_id = results[1]; let target_variable = self.variables.define_variable( self.function_context, self.brillig_context, @@ -1180,6 +1180,7 @@ impl<'block> BrilligBlock<'block> { self.brillig_context.deallocate_single_addr(converted_index); } Value::Intrinsic(Intrinsic::SliceRemove) => { + let results = dfg.instruction_results(instruction_id).collect::>(); let target_len = match self.variables.define_variable( self.function_context, self.brillig_context, @@ -1254,7 +1255,7 @@ impl<'block> BrilligBlock<'block> { fn update_slice_length( &mut self, target_len: MemoryAddress, - source_value: ValueId, + source_value: Value, dfg: &DataFlowGraph, binary_op: BrilligBinaryOp, ) { @@ -1281,8 +1282,8 @@ impl<'block> BrilligBlock<'block> { result_variable: SingleAddrVariable, ) { let binary_type = type_of_binary_operation( - dfg[binary.lhs].get_type().as_ref(), - dfg[binary.rhs].get_type().as_ref(), + &dfg.type_of_value(binary.lhs), + &dfg.type_of_value(binary.rhs), binary.operator, ); @@ -1480,7 +1481,7 @@ impl<'block> BrilligBlock<'block> { let max_lhs_bits = dfg.get_value_max_num_bits(binary.lhs); let max_rhs_bits = dfg.get_value_max_num_bits(binary.rhs); - if bit_size == FieldElement::max_num_bits() { + if bit_size as u32 == FieldElement::max_num_bits() { return; } @@ -1562,43 +1563,43 @@ impl<'block> BrilligBlock<'block> { } } - fn initialize_constants(&mut self, constants: &[ValueId], dfg: &DataFlowGraph) { + fn initialize_constants(&mut self, constants: &[Value], dfg: &DataFlowGraph) { for &constant_id in constants { self.convert_ssa_value(constant_id, dfg); } } /// Converts an SSA `ValueId` into a `RegisterOrMemory`. Initializes if necessary. - fn convert_ssa_value(&mut self, value_id: ValueId, dfg: &DataFlowGraph) -> BrilligVariable { - let value_id = dfg.resolve(value_id); - let value = &dfg[value_id]; + fn convert_ssa_value(&mut self, value: Value, dfg: &DataFlowGraph) -> BrilligVariable { + let value = dfg.resolve(value); match value { Value::Param { .. } | Value::Instruction { .. } => { // All block parameters and instruction results should have already been // converted to registers so we fetch from the cache. - self.variables.get_allocation(self.function_context, value_id, dfg) + self.variables.get_allocation(self.function_context, value, dfg) } Value::NumericConstant { constant, .. } => { // Constants might have been converted previously or not, so we get or create and // (re)initialize the value inside. - if self.variables.is_allocated(&value_id) { - self.variables.get_allocation(self.function_context, value_id, dfg) + if self.variables.is_allocated(&value) { + self.variables.get_allocation(self.function_context, value, dfg) } else { let new_variable = self.variables.define_variable( self.function_context, self.brillig_context, - value_id, + value, dfg, ); + let constant = dfg[constant]; self.brillig_context - .const_instruction(new_variable.extract_single_addr(), *constant); + .const_instruction(new_variable.extract_single_addr(), constant); new_variable } } - Value::Function(_) => { + Value::Function(function_id) => { // For the debugger instrumentation we want to allow passing // around values representing function pointers, even though // there is no interaction with the function possible given that @@ -1606,13 +1607,13 @@ impl<'block> BrilligBlock<'block> { let new_variable = self.variables.define_variable( self.function_context, self.brillig_context, - value_id, + value, dfg, ); self.brillig_context.const_instruction( new_variable.extract_single_addr(), - value_id.to_u32().into(), + function_id.to_u32().into(), ); new_variable } @@ -1624,7 +1625,7 @@ impl<'block> BrilligBlock<'block> { fn initialize_constant_array( &mut self, - data: &im::Vector, + data: &im::Vector, typ: &Type, dfg: &DataFlowGraph, pointer: MemoryAddress, @@ -1665,7 +1666,7 @@ impl<'block> BrilligBlock<'block> { fn initialize_constant_array_runtime( &mut self, item_types: Arc>, - item_to_repeat: Vec, + item_to_repeat: Vec, item_count: usize, pointer: MemoryAddress, dfg: &DataFlowGraph, @@ -1746,7 +1747,7 @@ impl<'block> BrilligBlock<'block> { fn initialize_constant_array_comptime( &mut self, - data: &im::Vector>, + data: &im::Vector, dfg: &DataFlowGraph, pointer: MemoryAddress, ) { @@ -1778,20 +1779,19 @@ impl<'block> BrilligBlock<'block> { /// Converts an SSA `ValueId` into a `MemoryAddress`. Initializes if necessary. fn convert_ssa_single_addr_value( &mut self, - value_id: ValueId, + value: Value, dfg: &DataFlowGraph, ) -> SingleAddrVariable { - let variable = self.convert_ssa_value(value_id, dfg); + let variable = self.convert_ssa_value(value, dfg); variable.extract_single_addr() } fn allocate_external_call_result( &mut self, - result: ValueId, + result: Value, dfg: &DataFlowGraph, ) -> BrilligVariable { - let typ = dfg[result].get_type(); - match typ.as_ref() { + match dfg.type_of_value(result) { Type::Numeric(_) => self.variables.define_variable( self.function_context, self.brillig_context, @@ -1799,7 +1799,7 @@ impl<'block> BrilligBlock<'block> { dfg, ), - Type::Array(..) => { + typ @ Type::Array(..) => { let variable = self.variables.define_variable( self.function_context, self.brillig_context, @@ -1807,7 +1807,7 @@ impl<'block> BrilligBlock<'block> { dfg, ); let array = variable.extract_array(); - self.allocate_foreign_call_result_array(typ.as_ref(), array); + self.allocate_foreign_call_result_array(&typ, array); variable } @@ -1827,9 +1827,7 @@ impl<'block> BrilligBlock<'block> { variable } - _ => { - unreachable!("ICE: unsupported return type for black box call {typ:?}") - } + typ => unreachable!("ICE: unsupported return type for black box call {typ:?}"), } } @@ -1872,7 +1870,7 @@ impl<'block> BrilligBlock<'block> { /// So we divide the length by the number of subitems in an item to get the user-facing length. fn convert_ssa_array_len( &mut self, - array_id: ValueId, + array_id: Value, result_register: MemoryAddress, dfg: &DataFlowGraph, ) { diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block_variables.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block_variables.rs index bf0a1bc7347..8bb9157fd1e 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block_variables.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block_variables.rs @@ -13,7 +13,7 @@ use crate::{ ssa::ir::{ dfg::DataFlowGraph, types::{CompositeType, Type}, - value::ValueId, + value::Value, }, }; @@ -21,12 +21,12 @@ use super::brillig_fn::FunctionContext; #[derive(Debug, Default)] pub(crate) struct BlockVariables { - available_variables: HashSet, + available_variables: HashSet, } impl BlockVariables { /// Creates a BlockVariables instance. It uses the variables that are live in to the block and the global available variables (block parameters) - pub(crate) fn new(live_in: HashSet) -> Self { + pub(crate) fn new(live_in: HashSet) -> Self { BlockVariables { available_variables: live_in } } @@ -52,17 +52,18 @@ impl BlockVariables { &mut self, function_context: &mut FunctionContext, brillig_context: &mut BrilligContext, - value_id: ValueId, + value: Value, dfg: &DataFlowGraph, ) -> BrilligVariable { - let value_id = dfg.resolve(value_id); - let variable = allocate_value(value_id, brillig_context, dfg); + let value = dfg.resolve(value); + + let variable = allocate_value(value, brillig_context, dfg); - if function_context.ssa_value_allocations.insert(value_id, variable).is_some() { - unreachable!("ICE: ValueId {value_id:?} was already in cache"); + if function_context.ssa_value_allocations.insert(value, variable).is_some() { + unreachable!("ICE: Value {value:?} was already in cache"); } - self.available_variables.insert(value_id); + self.available_variables.insert(value); variable } @@ -72,7 +73,7 @@ impl BlockVariables { &mut self, function_context: &mut FunctionContext, brillig_context: &mut BrilligContext, - value: ValueId, + value: Value, dfg: &DataFlowGraph, ) -> SingleAddrVariable { let variable = self.define_variable(function_context, brillig_context, value, dfg); @@ -82,7 +83,7 @@ impl BlockVariables { /// Removes a variable so it's not used anymore within this block. pub(crate) fn remove_variable( &mut self, - value_id: &ValueId, + value_id: &Value, function_context: &mut FunctionContext, brillig_context: &mut BrilligContext, ) { @@ -95,7 +96,7 @@ impl BlockVariables { } /// Checks if a variable is allocated. - pub(crate) fn is_allocated(&self, value_id: &ValueId) -> bool { + pub(crate) fn is_allocated(&self, value_id: &Value) -> bool { self.available_variables.contains(value_id) } @@ -103,7 +104,7 @@ impl BlockVariables { pub(crate) fn get_allocation( &mut self, function_context: &FunctionContext, - value_id: ValueId, + value_id: Value, dfg: &DataFlowGraph, ) -> BrilligVariable { let value_id = dfg.resolve(value_id); @@ -127,7 +128,7 @@ pub(crate) fn compute_array_length(item_typ: &CompositeType, elem_count: usize) /// For a given value_id, allocates the necessary registers to hold it. pub(crate) fn allocate_value( - value_id: ValueId, + value_id: Value, brillig_context: &mut BrilligContext, dfg: &DataFlowGraph, ) -> BrilligVariable { diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_fn.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_fn.rs index 3dea7b3e7f5..fca1b722dc0 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_fn.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_fn.rs @@ -10,7 +10,7 @@ use crate::{ function::{Function, FunctionId}, post_order::PostOrder, types::Type, - value::ValueId, + value::Value, }, }; use fxhash::FxHashMap as HashMap; @@ -20,7 +20,7 @@ use super::{constant_allocation::ConstantAllocation, variable_liveness::Variable pub(crate) struct FunctionContext { pub(crate) function_id: FunctionId, /// Map from SSA values its allocation. Since values can be only defined once in SSA form, we insert them here on when we allocate them at their definition. - pub(crate) ssa_value_allocations: HashMap, + pub(crate) ssa_value_allocations: HashMap, /// The block ids of the function in reverse post order. pub(crate) blocks: Vec, /// Liveness information for each variable in the function. diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/constant_allocation.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/constant_allocation.rs index 61ca20be2f5..6fc06d277f7 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/constant_allocation.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/constant_allocation.rs @@ -4,14 +4,8 @@ use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet}; use crate::ssa::ir::{ - basic_block::BasicBlockId, - cfg::ControlFlowGraph, - dfg::DataFlowGraph, - dom::DominatorTree, - function::Function, - instruction::InstructionId, - post_order::PostOrder, - value::{Value, ValueId}, + basic_block::BasicBlockId, cfg::ControlFlowGraph, dfg::DataFlowGraph, dom::DominatorTree, + function::Function, instruction::InstructionId, post_order::PostOrder, value::Value, }; use super::variable_liveness::{collect_variables_of_value, variables_used_in_instruction}; @@ -23,8 +17,8 @@ pub(crate) enum InstructionLocation { } pub(crate) struct ConstantAllocation { - constant_usage: HashMap>>, - allocation_points: HashMap>>, + constant_usage: HashMap>>, + allocation_points: HashMap>>, dominator_tree: DominatorTree, blocks_within_loops: HashSet, } @@ -47,7 +41,7 @@ impl ConstantAllocation { instance } - pub(crate) fn allocated_in_block(&self, block_id: BasicBlockId) -> Vec { + pub(crate) fn allocated_in_block(&self, block_id: BasicBlockId) -> Vec { self.allocation_points.get(&block_id).map_or(Vec::default(), |allocations| { allocations.iter().flat_map(|(_, constants)| constants.iter()).copied().collect() }) @@ -57,7 +51,7 @@ impl ConstantAllocation { &self, block_id: BasicBlockId, location: InstructionLocation, - ) -> Vec { + ) -> Vec { self.allocation_points.get(&block_id).map_or(Vec::default(), |allocations| { allocations.get(&location).map_or(Vec::default(), |constants| constants.clone()) }) @@ -65,7 +59,7 @@ impl ConstantAllocation { fn collect_constant_usage(&mut self, func: &Function) { let mut record_if_constant = - |block_id: BasicBlockId, value_id: ValueId, location: InstructionLocation| { + |block_id: BasicBlockId, value_id: Value, location: InstructionLocation| { if is_constant_value(value_id, &func.dfg) { self.constant_usage .entry(value_id) @@ -126,7 +120,7 @@ impl ConstantAllocation { fn decide_allocation_point( &self, - constant_id: ValueId, + constant_id: Value, blocks_where_is_used: &[BasicBlockId], func: &Function, ) -> BasicBlockId { @@ -164,8 +158,8 @@ impl ConstantAllocation { } } -pub(crate) fn is_constant_value(id: ValueId, dfg: &DataFlowGraph) -> bool { - matches!(&dfg[dfg.resolve(id)], Value::NumericConstant { .. }) +pub(crate) fn is_constant_value(id: Value, dfg: &DataFlowGraph) -> bool { + matches!(dfg.resolve(id), Value::NumericConstant { .. }) } /// For a given function, finds all the blocks that are within loops diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs index d6851a9ecf9..55b144aaffe 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs @@ -9,7 +9,7 @@ use crate::ssa::ir::{ function::Function, instruction::{Instruction, InstructionId}, post_order::PostOrder, - value::{Value, ValueId}, + value::Value, }; use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet}; @@ -45,16 +45,12 @@ fn find_back_edges( } /// Collects the underlying variables inside a value id. It might be more than one, for example in constant arrays that are constructed with multiple vars. -pub(crate) fn collect_variables_of_value( - value_id: ValueId, - dfg: &DataFlowGraph, -) -> Option { - let value_id = dfg.resolve(value_id); - let value = &dfg[value_id]; +pub(crate) fn collect_variables_of_value(value: Value, dfg: &DataFlowGraph) -> Option { + let value = dfg.resolve(value); match value { Value::Instruction { .. } | Value::Param { .. } | Value::NumericConstant { .. } => { - Some(value_id) + Some(value) } // Functions are not variables in a defunctionalized SSA. Only constant function values should appear. Value::ForeignFunction(_) | Value::Function(_) | Value::Intrinsic(..) => None, @@ -75,7 +71,11 @@ pub(crate) fn variables_used_in_instruction( used } -fn variables_used_in_block(block: &BasicBlock, dfg: &DataFlowGraph) -> Variables { +fn variables_used_in_block( + block: &BasicBlock, + block_id: BasicBlockId, + dfg: &DataFlowGraph, +) -> Variables { let mut used: Variables = block .instructions() .iter() @@ -86,7 +86,7 @@ fn variables_used_in_block(block: &BasicBlock, dfg: &DataFlowGraph) -> Variables .collect(); // We consider block parameters used, so they live up to the block that owns them. - used.extend(block.parameters().iter()); + used.extend(dfg.block_parameters(block_id)); if let Some(terminator) = block.terminator() { terminator.for_each_value(|value_id| { @@ -97,14 +97,15 @@ fn variables_used_in_block(block: &BasicBlock, dfg: &DataFlowGraph) -> Variables used } -type Variables = HashSet; +type Variables = HashSet; fn compute_used_before_def( block: &BasicBlock, + block_id: BasicBlockId, dfg: &DataFlowGraph, defined_in_block: &Variables, ) -> Variables { - variables_used_in_block(block, dfg) + variables_used_in_block(block, block_id, dfg) .into_iter() .filter(|id| !defined_in_block.contains(id)) .collect() @@ -122,7 +123,7 @@ pub(crate) struct VariableLiveness { /// The variables that stop being alive after each specific instruction last_uses: HashMap, /// The list of block params the given block is defining. The order matters for the entry block, so it's a vec. - param_definitions: HashMap>, + param_definitions: HashMap>, } impl VariableLiveness { @@ -172,7 +173,7 @@ impl VariableLiveness { /// Retrieves the list of block params the given block is defining. /// Block params are defined before the block that owns them (since they are used by the predecessor blocks). They must be defined in the immediate dominator. /// This is the last point where the block param can be allocated without it being allocated in different places in different branches. - pub(crate) fn defined_block_params(&self, block_id: &BasicBlockId) -> Vec { + pub(crate) fn defined_block_params(&self, block_id: &BasicBlockId) -> Vec { self.param_definitions.get(block_id).cloned().unwrap_or_default() } @@ -182,13 +183,13 @@ impl VariableLiveness { reverse_post_order.extend_from_slice(self.post_order.as_slice()); reverse_post_order.reverse(); for block in reverse_post_order { - let params = func.dfg[block].parameters(); + let params = func.dfg.block_parameters(block); // If it has no dominator, it's the entry block let dominator_block = self.dominator_tree.immediate_dominator(block).unwrap_or(func.entry_block()); let definitions_for_the_dominator = self.param_definitions.entry(dominator_block).or_default(); - definitions_for_the_dominator.extend(params.iter()); + definitions_for_the_dominator.extend(params); } } @@ -215,9 +216,8 @@ impl VariableLiveness { defined.extend(constants.allocated_in_block(block_id)); - let block: &BasicBlock = &func.dfg[block_id]; - - let used_before_def = compute_used_before_def(block, &func.dfg, &defined); + let block = &func.dfg[block_id]; + let used_before_def = compute_used_before_def(block, block_id, &func.dfg, &defined); let mut live_out = HashSet::default(); @@ -248,9 +248,8 @@ impl VariableLiveness { } for instruction_id in block.instructions() { - let result_values = dfg.instruction_results(*instruction_id); - for result_value in result_values { - defined_vars.insert(dfg.resolve(*result_value)); + for result_value in dfg.instruction_results(*instruction_id) { + defined_vars.insert(dfg.resolve(result_value)); } } @@ -372,7 +371,7 @@ mod test { let v3 = builder.insert_allocate(Type::field()); - let zero = builder.field_constant(0u128); + let zero = builder.field_constant(0u128.into()); builder.insert_store(v3, zero); let v4 = builder.insert_binary(v0, BinaryOp::Eq, zero); @@ -381,7 +380,7 @@ mod test { builder.switch_to_block(b2); - let twenty_seven = builder.field_constant(27u128); + let twenty_seven = builder.field_constant(27u128.into()); let v7 = builder.insert_binary(v0, BinaryOp::Add, twenty_seven); builder.insert_store(v3, v7); @@ -487,7 +486,7 @@ mod test { let v3 = builder.insert_allocate(Type::field()); - let zero = builder.field_constant(0u128); + let zero = builder.field_constant(0u128.into()); builder.insert_store(v3, zero); builder.terminate_with_jmp(b1, vec![zero]); @@ -515,7 +514,7 @@ mod test { builder.switch_to_block(b5); - let twenty_seven = builder.field_constant(27u128); + let twenty_seven = builder.field_constant(27u128.into()); let v10 = builder.insert_binary(v7, BinaryOp::Eq, twenty_seven); let v11 = builder.insert_not(v10); @@ -534,7 +533,7 @@ mod test { builder.switch_to_block(b8); - let one = builder.field_constant(1u128); + let one = builder.field_constant(1u128.into()); let v15 = builder.insert_binary(v7, BinaryOp::Add, one); builder.terminate_with_jmp(b4, vec![v15]); @@ -621,8 +620,8 @@ mod test { builder.terminate_with_jmpif(v0, b1, b2); builder.switch_to_block(b1); - let twenty_seven = builder.field_constant(27_u128); - let twenty_nine = builder.field_constant(29_u128); + let twenty_seven = builder.field_constant(27_u128.into()); + let twenty_nine = builder.field_constant(29_u128.into()); builder.terminate_with_jmp(b3, vec![twenty_seven, twenty_nine]); builder.switch_to_block(b3); @@ -631,8 +630,8 @@ mod test { builder.terminate_with_return(vec![v1]); builder.switch_to_block(b2); - let twenty_eight = builder.field_constant(28_u128); - let forty = builder.field_constant(40_u128); + let twenty_eight = builder.field_constant(28_u128.into()); + let forty = builder.field_constant(40_u128.into()); builder.terminate_with_jmp(b3, vec![twenty_eight, forty]); let ssa = builder.finish(); diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir.rs index 3c100d229a6..4a01e655c7a 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir.rs @@ -42,7 +42,7 @@ use super::ProcedureId; /// The Brillig VM does not apply a limit to the memory address space, /// As a convention, we take use 32 bits. This means that we assume that /// memory has 2^32 memory slots. -pub(crate) const BRILLIG_MEMORY_ADDRESSING_BIT_SIZE: u32 = 32; +pub(crate) const BRILLIG_MEMORY_ADDRESSING_BIT_SIZE: u8 = 32; // Registers reserved in runtime for special purposes. pub(crate) enum ReservedRegisters { diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs index 3654a95a03f..973d4fdf5ea 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs @@ -11,7 +11,7 @@ use super::procedures::ProcedureId; #[derive(Debug, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)] pub(crate) enum BrilligParameter { /// A single address parameter or return value. Holds the bit size of the parameter. - SingleAddr(u32), + SingleAddr(u8), /// An array parameter or return value. Holds the type of an array item and its size. Array(Vec, usize), /// A slice parameter or return value. Holds the type of a slice item. diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/brillig_variable.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/brillig_variable.rs index 0bb18448670..0ebb2b3238e 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/brillig_variable.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/brillig_variable.rs @@ -12,11 +12,11 @@ use super::BRILLIG_MEMORY_ADDRESSING_BIT_SIZE; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Copy)] pub(crate) struct SingleAddrVariable { pub(crate) address: MemoryAddress, - pub(crate) bit_size: u32, + pub(crate) bit_size: u8, } impl SingleAddrVariable { - pub(crate) fn new(address: MemoryAddress, bit_size: u32) -> Self { + pub(crate) fn new(address: MemoryAddress, bit_size: u8) -> Self { SingleAddrVariable { address, bit_size } } @@ -25,7 +25,7 @@ impl SingleAddrVariable { } pub(crate) fn new_field(address: MemoryAddress) -> Self { - SingleAddrVariable { address, bit_size: FieldElement::max_num_bits() } + SingleAddrVariable { address, bit_size: FieldElement::max_num_bits() as u8 } } } @@ -84,7 +84,7 @@ impl BrilligVariable { pub(crate) fn type_to_heap_value_type(typ: &Type) -> HeapValueType { match typ { Type::Numeric(_) | Type::Reference(_) | Type::Function => HeapValueType::Simple( - BitSize::try_from_u32::(get_bit_size_from_ssa_type(typ)).unwrap(), + BitSize::try_from_u8::(get_bit_size_from_ssa_type(typ)).unwrap(), ), Type::Array(elem_type, size) => HeapValueType::Array { value_types: elem_type.as_ref().iter().map(type_to_heap_value_type).collect(), @@ -96,7 +96,7 @@ pub(crate) fn type_to_heap_value_type(typ: &Type) -> HeapValueType { } } -pub(crate) fn get_bit_size_from_ssa_type(typ: &Type) -> u32 { +pub(crate) fn get_bit_size_from_ssa_type(typ: &Type) -> u8 { match typ { Type::Reference(_) => BRILLIG_MEMORY_ADDRESSING_BIT_SIZE, // NB. function references are converted to a constant when diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_intrinsic.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_intrinsic.rs index ba89823ef13..1b8a549a63d 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_intrinsic.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_intrinsic.rs @@ -18,7 +18,7 @@ impl BrilligContext< &mut self, destination_of_truncated_value: SingleAddrVariable, value_to_truncate: SingleAddrVariable, - bit_size: u32, + bit_size: u8, ) { assert!( bit_size <= value_to_truncate.bit_size, @@ -72,7 +72,7 @@ impl BrilligContext< little_endian: bool, output_bits: bool, // If true will generate bit limbs, if false will generate byte limbs ) { - assert!(source_field.bit_size == F::max_num_bits()); + assert!(source_field.bit_size as u32 == F::max_num_bits()); assert!(radix.bit_size == 32); self.codegen_initialize_array(target_array); diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/debug_show.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/debug_show.rs index ef1b5432128..8e26e465b81 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/debug_show.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/debug_show.rs @@ -22,7 +22,7 @@ macro_rules! default_to_string_impl { )*) } -default_to_string_impl! { str usize u32 } +default_to_string_impl! { str usize u32 u8 } impl DebugToString for MemoryAddress { fn debug_to_string(&self) -> String { @@ -131,7 +131,7 @@ impl DebugShow { &self, destination: MemoryAddress, source: MemoryAddress, - bit_size: u32, + bit_size: u8, ) { debug_println!( self.enable_debug_trace, @@ -171,7 +171,7 @@ impl DebugShow { pub(crate) fn not_instruction( &self, condition: MemoryAddress, - bit_size: u32, + bit_size: u8, result: MemoryAddress, ) { debug_println!(self.enable_debug_trace, " i{}_NOT {} = !{}", bit_size, result, condition); diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/entry_point.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/entry_point.rs index 2dbee48b277..1e3f7c4af1d 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/entry_point.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/entry_point.rs @@ -157,7 +157,7 @@ impl BrilligContext { 0, ); - fn flat_bit_sizes(param: &BrilligParameter) -> Box + '_> { + fn flat_bit_sizes(param: &BrilligParameter) -> Box + '_> { match param { BrilligParameter::SingleAddr(bit_size) => Box::new(std::iter::once(*bit_size)), BrilligParameter::Array(item_types, item_count) @@ -169,7 +169,7 @@ impl BrilligContext { for (i, bit_size) in arguments.iter().flat_map(flat_bit_sizes).enumerate() { // Calldatacopy tags everything with field type, so when downcast when necessary - if bit_size < F::max_num_bits() { + if (bit_size as u32) < F::max_num_bits() { self.cast_instruction( SingleAddrVariable::new( MemoryAddress::direct(Self::calldata_start_offset() + i), diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/instructions.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/instructions.rs index 2bf5364414c..bdd3342615f 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/instructions.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/instructions.rs @@ -82,7 +82,7 @@ impl BrilligContext< result: SingleAddrVariable, operation: BrilligBinaryOp, ) { - let is_field_op = lhs.bit_size == FieldElement::max_num_bits(); + let is_field_op = lhs.bit_size as u32 == FieldElement::max_num_bits(); let expected_result_bit_size = Self::binary_result_bit_size(operation, lhs.bit_size); assert!( result.bit_size == expected_result_bit_size, @@ -151,7 +151,7 @@ impl BrilligContext< self.deallocate_single_addr(scratch_var_j); } - fn binary_result_bit_size(operation: BrilligBinaryOp, arguments_bit_size: u32) -> u32 { + fn binary_result_bit_size(operation: BrilligBinaryOp, arguments_bit_size: u8) -> u8 { match operation { BrilligBinaryOp::Equals | BrilligBinaryOp::LessThan @@ -337,7 +337,7 @@ impl BrilligContext< self.push_opcode(BrilligOpcode::Cast { destination: destination.address, source: source.address, - bit_size: BitSize::try_from_u32::(destination.bit_size).unwrap(), + bit_size: BitSize::try_from_u8::(destination.bit_size).unwrap(), }); } @@ -351,16 +351,16 @@ impl BrilligContext< pub(crate) fn indirect_const_instruction( &mut self, result_pointer: MemoryAddress, - bit_size: u32, + bit_size: u8, constant: F, ) { self.debug_show.indirect_const_instruction(result_pointer, constant); self.constant(result_pointer, bit_size, constant, true); } - fn constant(&mut self, result: MemoryAddress, bit_size: u32, constant: F, indirect: bool) { + fn constant(&mut self, result: MemoryAddress, bit_size: u8, constant: F, indirect: bool) { assert!( - bit_size >= constant.num_bits(), + bit_size >= constant.num_bits().try_into().unwrap(), "Constant {} does not fit in bit size {}", constant, bit_size @@ -369,13 +369,13 @@ impl BrilligContext< self.push_opcode(BrilligOpcode::IndirectConst { destination_pointer: result, value: constant, - bit_size: BitSize::try_from_u32::(bit_size).unwrap(), + bit_size: BitSize::try_from_u8::(bit_size).unwrap(), }); } else { self.push_opcode(BrilligOpcode::Const { destination: result, value: constant, - bit_size: BitSize::try_from_u32::(bit_size).unwrap(), + bit_size: BitSize::try_from_u8::(bit_size).unwrap(), }); } } @@ -388,7 +388,7 @@ impl BrilligContext< pub(crate) fn make_constant_instruction( &mut self, constant: F, - bit_size: u32, + bit_size: u8, ) -> SingleAddrVariable { let var = SingleAddrVariable::new(self.allocate_register(), bit_size); self.const_instruction(var, constant); diff --git a/compiler/noirc_evaluator/src/ssa.rs b/compiler/noirc_evaluator/src/ssa.rs index 9377cadb260..39aa0fa7042 100644 --- a/compiler/noirc_evaluator/src/ssa.rs +++ b/compiler/noirc_evaluator/src/ssa.rs @@ -105,6 +105,12 @@ pub(crate) fn optimize_into_acir( &options.emit_ssa, )?; + eprintln!("Value size: {}", std::mem::size_of::() * 8); + eprintln!("Field size: {}", std::mem::size_of::() * 8); + eprintln!("NumTy size: {}", std::mem::size_of::() * 8); + eprintln!("u64 size: {}", std::mem::size_of::() * 8); + eprintln!("Intri size: {}", std::mem::size_of::() * 8); + let mut ssa = optimize_all(builder, options)?; let mut ssa_level_warnings = vec![]; @@ -175,7 +181,7 @@ fn optimize_all(builder: SsaBuilder, options: &SsaEvaluatorOptions) -> Result Result = + let all_brillig_generated_values: HashSet = context.brillig_return_to_argument.keys().copied().collect(); let connected_sets_indices = @@ -98,10 +98,10 @@ struct DependencyContext { visited_blocks: HashSet, block_queue: Vec, // Map keeping track of values stored at memory locations - memory_slots: HashMap, + memory_slots: HashMap, // Map of values resulting from array get instructions // to the actual array values - array_elements: HashMap, + array_elements: HashMap, // Map of brillig call ids to sets of the value ids descending // from their arguments and results tainted: HashMap, @@ -113,22 +113,22 @@ struct DependencyContext { #[derive(Clone, Debug)] struct BrilligTaintedIds { // Argument descendant value ids - arguments: HashSet, + arguments: HashSet, // Results status results: Vec, // Initial result value ids - root_results: HashSet, + root_results: HashSet, } #[derive(Clone, Debug)] enum ResultStatus { // Keep track of descendants until found constrained - Unconstrained { descendants: HashSet }, + Unconstrained { descendants: HashSet }, Constrained, } impl BrilligTaintedIds { - fn new(arguments: &[ValueId], results: &[ValueId]) -> Self { + fn new(arguments: &[Value], results: &[Value]) -> Self { BrilligTaintedIds { arguments: HashSet::from_iter(arguments.iter().copied()), results: results @@ -143,7 +143,7 @@ impl BrilligTaintedIds { /// (for arguments one set is enough, for results we keep them /// separate as the forthcoming check considers the call covered /// if all the results were properly covered) - fn update_children(&mut self, parents: &HashSet, children: &[ValueId]) { + fn update_children(&mut self, parents: &HashSet, children: &[Value]) { if self.arguments.intersection(parents).next().is_some() { self.arguments.extend(children); } @@ -173,7 +173,7 @@ impl BrilligTaintedIds { /// along the way to take them into final consideration /// Generally, a valid partial constraint should link up a result descendant /// and an argument descendant, although there are also edge cases mentioned below. - fn store_partial_constraints(&mut self, constrained_values: &HashSet) { + fn store_partial_constraints(&mut self, constrained_values: &HashSet) { let mut results_involved: Vec = vec![]; // For a valid partial constraint, a value descending from @@ -243,9 +243,9 @@ impl DependencyContext { }); // Collect non-constant instruction results - for value_id in function.dfg.instruction_results(*instruction).iter() { - if function.dfg.get_numeric_constant(*value_id).is_none() { - results.push(function.dfg.resolve(*value_id)); + for value_id in function.dfg.instruction_results(*instruction) { + if function.dfg.get_numeric_constant(value_id).is_none() { + results.push(function.dfg.resolve(value_id)); } } @@ -256,7 +256,7 @@ impl DependencyContext { Instruction::Store { address, value } => { self.memory_slots.insert(*address, function.dfg.resolve(*value)); } - Instruction::Load { address } => { + Instruction::Load { address, result_type: _ } => { // Recall the value stored at address as parent for the results if let Some(value_id) = self.memory_slots.get(address) { self.update_children(&[*value_id], &results); @@ -282,7 +282,7 @@ impl DependencyContext { // as .for_each_value() used previously also includes func_id arguments.remove(0); - match &function.dfg[*func_id] { + match *func_id { Value::Intrinsic(intrinsic) => match intrinsic { Intrinsic::ApplyRangeConstraint | Intrinsic::AssertConstant => { // Consider these intrinsic arguments constrained @@ -314,7 +314,7 @@ impl DependencyContext { self.update_children(&arguments, &results); } }, - Value::Function(callee) => match all_functions[callee].runtime() { + Value::Function(callee) => match all_functions[&callee].runtime() { RuntimeType::Brillig(_) => { // Record arguments/results for each Brillig call for the check self.tainted.insert( @@ -394,7 +394,7 @@ impl DependencyContext { } /// Update sets of value ids that can be traced back to the Brillig calls being tracked - fn update_children(&mut self, parents: &[ValueId], children: &[ValueId]) { + fn update_children(&mut self, parents: &[Value], children: &[Value]) { let parents: HashSet<_> = HashSet::from_iter(parents.iter().copied()); for (_, tainted_ids) in self.tainted.iter_mut() { tainted_ids.update_children(&parents, children); @@ -403,7 +403,7 @@ impl DependencyContext { /// Check if any of the recorded Brillig calls have been properly constrained /// by given values after recording partial constraints, if so stop tracking them - fn clear_constrained(&mut self, constrained_values: &[ValueId], function: &Function) { + fn clear_constrained(&mut self, constrained_values: &[Value], function: &Function) { // Remove numeric constants let constrained_values = constrained_values.iter().filter(|v| function.dfg.get_numeric_constant(**v).is_none()); @@ -433,9 +433,9 @@ impl DependencyContext { struct Context { visited_blocks: HashSet, block_queue: Vec, - value_sets: Vec>, - brillig_return_to_argument: HashMap>, - brillig_return_to_instruction_id: HashMap, + value_sets: Vec>, + brillig_return_to_argument: HashMap>, + brillig_return_to_instruction_id: HashMap, } impl Context { @@ -469,10 +469,9 @@ impl Context { ) -> HashSet { let variable_parameters_and_return_values = function .parameters() - .iter() - .chain(function.returns()) - .filter(|id| function.dfg.get_numeric_constant(**id).is_none()) - .map(|value_id| function.dfg.resolve(*value_id)); + .chain(function.returns().iter().copied()) + .filter(|value| function.dfg.get_numeric_constant(*value).is_none()) + .map(|value| function.dfg.resolve(value)); let mut connected_sets_indices: HashSet = HashSet::new(); @@ -491,8 +490,8 @@ impl Context { /// Find which Brillig calls separate this set from others and return bug warnings about them fn find_disconnecting_brillig_calls_with_results_in_set( &self, - current_set: &HashSet, - all_brillig_generated_values: &HashSet, + current_set: &HashSet, + all_brillig_generated_values: &HashSet, function: &Function, ) -> Vec { let mut warnings = Vec::new(); @@ -502,7 +501,7 @@ impl Context { // Go through all Brillig outputs in the set for brillig_output_in_set in intersection { // Get the inputs that correspond to the output - let inputs: HashSet = + let inputs: HashSet = self.brillig_return_to_argument[&brillig_output_in_set].iter().copied().collect(); // Check if any of them are not in the set @@ -540,9 +539,9 @@ impl Context { } }); // And non-constant results - for value_id in function.dfg.instruction_results(*instruction).iter() { - if function.dfg.get_numeric_constant(*value_id).is_none() { - instruction_arguments_and_results.insert(function.dfg.resolve(*value_id)); + for value_id in function.dfg.instruction_results(*instruction) { + if function.dfg.get_numeric_constant(value_id).is_none() { + instruction_arguments_and_results.insert(function.dfg.resolve(value_id)); } } @@ -562,8 +561,8 @@ impl Context { self.value_sets.push(instruction_arguments_and_results); } - Instruction::Call { func: func_id, arguments: argument_ids } => { - match &function.dfg[*func_id] { + Instruction::Call { func: func_id, arguments: argument_ids, result_types: _ } => { + match *func_id { Value::Intrinsic(intrinsic) => match intrinsic { Intrinsic::ApplyRangeConstraint | Intrinsic::AssertConstant @@ -591,21 +590,19 @@ impl Context { self.value_sets.push(instruction_arguments_and_results); } }, - Value::Function(callee) => match all_functions[callee].runtime() { + Value::Function(callee) => match all_functions[&callee].runtime() { RuntimeType::Brillig(_) => { // For calls to Brillig functions we memorize the mapping of results to argument ValueId's and InstructionId's // The latter are needed to produce the callstack later - for result in - function.dfg.instruction_results(*instruction).iter().filter( - |value_id| { - function.dfg.get_numeric_constant(**value_id).is_none() - }, - ) - { + for result in function.dfg.instruction_results(*instruction).filter( + |value_id| { + function.dfg.get_numeric_constant(*value_id).is_none() + }, + ) { self.brillig_return_to_argument - .insert(*result, argument_ids.clone()); + .insert(result, argument_ids.clone()); self.brillig_return_to_instruction_id - .insert(*result, *instruction); + .insert(result, *instruction); } } RuntimeType::Acir(..) => { @@ -636,15 +633,15 @@ impl Context { /// Merge all small sets into larger ones based on whether the sets intersect or not /// /// If two small sets have a common ValueId, we merge them into one - fn merge_sets(current: &[HashSet]) -> Vec> { + fn merge_sets(current: &[HashSet]) -> Vec> { let mut new_set_id: usize = 0; - let mut updated_sets: HashMap> = HashMap::new(); - let mut value_dictionary: HashMap = HashMap::new(); - let mut parsed_value_set: HashSet = HashSet::new(); + let mut updated_sets: HashMap> = HashMap::new(); + let mut value_dictionary: HashMap = HashMap::new(); + let mut parsed_value_set: HashSet = HashSet::new(); for set in current.iter() { // Check if the set has any of the ValueIds we've encountered at previous iterations - let intersection: HashSet = + let intersection: HashSet = set.intersection(&parsed_value_set).copied().collect(); parsed_value_set.extend(set.iter()); @@ -700,7 +697,7 @@ impl Context { /// Parallel version of merge_sets /// The sets are merged by chunks, and then the chunks are merged together - fn merge_sets_par(sets: &[HashSet]) -> Vec> { + fn merge_sets_par(sets: &[HashSet]) -> Vec> { let mut sets = sets.to_owned(); let mut len = sets.len(); let mut prev_len = len + 1; diff --git a/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs b/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs index 068fff7d284..7724a0ab4cb 100644 --- a/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs +++ b/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs @@ -1,12 +1,8 @@ use std::{collections::BTreeMap, sync::Arc}; -use crate::ssa::ir::{ - function::RuntimeType, - types::{NumericType, Type}, - value::ValueId, -}; -use acvm::FieldElement; +use crate::ssa::ir::{function::RuntimeType, types::Type, value::Value}; use fxhash::FxHashMap as HashMap; +use iter_extended::vecmap; use noirc_frontend::ast; use noirc_frontend::hir_def::function::FunctionSignature; use serde::{Deserialize, Serialize}; @@ -23,10 +19,10 @@ pub(crate) enum DatabusVisibility { /// replacing public inputs #[derive(Clone, Debug)] pub(crate) struct DataBusBuilder { - pub(crate) values: im::Vector, + pub(crate) values: im::Vector, index: usize, - pub(crate) map: HashMap, - pub(crate) databus: Option, + pub(crate) map: HashMap, + pub(crate) databus: Option, call_data_id: Option, } @@ -63,19 +59,19 @@ impl DataBusBuilder { pub(crate) struct CallData { /// The id to this calldata assigned by the user pub(crate) call_data_id: u32, - pub(crate) array_id: ValueId, - pub(crate) index_map: HashMap, + pub(crate) array_id: Value, + pub(crate) index_map: HashMap, } #[derive(Clone, Default, Debug, Serialize, Deserialize)] pub(crate) struct DataBus { pub(crate) call_data: Vec, - pub(crate) return_data: Option, + pub(crate) return_data: Option, } impl DataBus { /// Updates the databus values with the provided function - pub(crate) fn map_values(&self, mut f: impl FnMut(ValueId) -> ValueId) -> DataBus { + pub(crate) fn map_values(&self, mut f: impl FnMut(Value) -> Value) -> DataBus { let call_data = self .call_data .iter() @@ -95,7 +91,7 @@ impl DataBus { } /// Updates the databus values in place with the provided function - pub(crate) fn map_values_mut(&mut self, mut f: impl FnMut(ValueId) -> ValueId) { + pub(crate) fn map_values_mut(&mut self, mut f: impl FnMut(Value) -> Value) { for cd in self.call_data.iter_mut() { cd.array_id = f(cd.array_id); @@ -108,7 +104,7 @@ impl DataBus { } } - pub(crate) fn call_data_array(&self) -> Vec<(u32, ValueId)> { + pub(crate) fn call_data_array(&self) -> Vec<(u32, Value)> { self.call_data.iter().map(|cd| (cd.call_data_id, cd.array_id)).collect() } /// Construct a databus from call_data and return_data data bus builders @@ -131,9 +127,9 @@ impl DataBus { impl FunctionBuilder { /// Insert a value into a data bus builder - fn add_to_data_bus(&mut self, value: ValueId, databus: &mut DataBusBuilder) { + fn add_to_data_bus(&mut self, value: Value, databus: &mut DataBusBuilder) { assert!(databus.databus.is_none(), "initializing finalized call data"); - let typ = self.current_function.dfg[value].get_type().into_owned(); + let typ = self.current_function.dfg.type_of_value(value); match typ { Type::Numeric(_) => { databus.values.push_back(value); @@ -146,10 +142,8 @@ impl FunctionBuilder { for _i in 0..len { for subitem_typ in typ.iter() { // load each element of the array, and add it to the databus - let length_type = NumericType::length_type(); - let index_var = FieldElement::from(index as i128); let index_var = - self.current_function.dfg.make_constant(index_var, length_type); + self.current_function.dfg.length_constant((index as i128).into()); let element = self.insert_array_get(value, index_var, subitem_typ.clone()); index += match subitem_typ { Type::Array(_, _) | Type::Slice(_) => subitem_typ.element_size(), @@ -171,7 +165,7 @@ impl FunctionBuilder { /// Create a data bus builder from a list of values pub(crate) fn initialize_data_bus( &mut self, - values: &[ValueId], + values: &[Value], mut databus: DataBusBuilder, call_data_id: Option, ) -> DataBusBuilder { @@ -203,14 +197,16 @@ impl FunctionBuilder { ) -> Vec { //filter parameters of the first block that have call-data visibility let first_block = self.current_function.entry_block(); - let params = self.current_function.dfg[first_block].parameters(); + let params = self.current_function.dfg.block_parameters(first_block); // Reshape the is_params_databus to map to the SSA-level parameters let is_params_databus = self.deflatten_databus_visibilities(params, flattened_databus_visibilities); - let mut databus_param: BTreeMap> = BTreeMap::new(); - for (param, databus_attribute) in params.iter().zip(is_params_databus) { + let params = self.current_function.dfg.block_parameters(first_block); + let mut databus_param: BTreeMap> = BTreeMap::new(); + + for (param, databus_attribute) in params.zip(is_params_databus) { match databus_attribute { DatabusVisibility::None | DatabusVisibility::ReturnData => continue, DatabusVisibility::CallData(call_data_id) => { @@ -225,30 +221,24 @@ impl FunctionBuilder { } } // create the call-data-bus from the filtered lists - let mut result = Vec::new(); - for id in databus_param.keys() { - let builder = DataBusBuilder::new(); - let call_databus = self.initialize_data_bus(&databus_param[id], builder, Some(*id)); - result.push(call_databus); - } - result + vecmap(databus_param.keys(), |id| { + self.initialize_data_bus(&databus_param[id], DataBusBuilder::new(), Some(*id)) + }) } /// This function takes the flattened databus visibilities and generates the databus visibility for each ssa parameter /// asserting that an ssa parameter is not assigned two different databus visibilities fn deflatten_databus_visibilities( &self, - ssa_params: &[ValueId], + ssa_params: impl ExactSizeIterator, mut flattened_params_databus_visibility: Vec, ) -> Vec { - let ssa_param_sizes: Vec = ssa_params - .iter() - .map(|ssa_param| { - self.current_function.dfg[*ssa_param].get_type().flattened_size() as usize - }) - .collect(); + let ssa_params_len = ssa_params.len(); + let ssa_param_sizes = ssa_params.map(|ssa_param| { + self.current_function.dfg.type_of_value(ssa_param).flattened_size() as usize + }); - let mut is_ssa_params_databus = Vec::with_capacity(ssa_params.len()); + let mut is_ssa_params_databus = Vec::with_capacity(ssa_param_sizes.len()); for size in ssa_param_sizes { let visibilities: Vec = flattened_params_databus_visibility.drain(0..size).collect(); @@ -260,8 +250,7 @@ impl FunctionBuilder { is_ssa_params_databus.push(visibility); } - assert_eq!(is_ssa_params_databus.len(), ssa_params.len()); - + assert_eq!(is_ssa_params_databus.len(), ssa_params_len); is_ssa_params_databus } } diff --git a/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs b/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs index 855034cedd2..f761ff162f6 100644 --- a/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs @@ -1,6 +1,6 @@ pub(crate) mod data_bus; -use std::{borrow::Cow, collections::BTreeMap, sync::Arc}; +use std::collections::BTreeMap; use acvm::{acir::circuit::ErrorSelector, FieldElement}; use noirc_errors::Location; @@ -12,16 +12,18 @@ use crate::ssa::ir::{ function::{Function, FunctionId}, instruction::{Binary, BinaryOp, Instruction, TerminatorInstruction}, types::Type, - value::{Value, ValueId}, + value::Value, }; use super::{ ir::{ basic_block::BasicBlock, call_stack::{CallStack, CallStackId}, - dfg::InsertInstructionResult, function::RuntimeType, - instruction::{ConstrainError, InstructionId, Intrinsic}, + instruction::insert_result::InsertInstructionResult, + instruction::{ + insert_result::InsertInstructionResultIter, ConstrainError, InstructionId, Intrinsic, + }, types::NumericType, }, ssa_gen::Ssa, @@ -118,32 +120,13 @@ impl FunctionBuilder { /// Add a parameter to the current function with the given parameter type. /// Returns the newly-added parameter. - pub(crate) fn add_parameter(&mut self, typ: Type) -> ValueId { + pub(crate) fn add_parameter(&mut self, typ: Type) -> Value { let entry = self.current_function.entry_block(); self.current_function.dfg.add_block_parameter(entry, typ) } - /// Insert a numeric constant into the current function - pub(crate) fn numeric_constant( - &mut self, - value: impl Into, - typ: NumericType, - ) -> ValueId { - self.current_function.dfg.make_constant(value.into(), typ) - } - - /// Insert a numeric constant into the current function of type Field - pub(crate) fn field_constant(&mut self, value: impl Into) -> ValueId { - self.numeric_constant(value.into(), NumericType::NativeField) - } - - /// Insert a numeric constant into the current function of type Type::length_type() - pub(crate) fn length_constant(&mut self, value: impl Into) -> ValueId { - self.numeric_constant(value.into(), NumericType::length_type()) - } - /// Returns the type of the given value. - pub(crate) fn type_of_value(&self, value: ValueId) -> Type { + pub(crate) fn type_of_value(&self, value: Value) -> Type { self.current_function.dfg.type_of_value(value) } @@ -155,12 +138,15 @@ impl FunctionBuilder { /// Adds a parameter with the given type to the given block. /// Returns the newly-added parameter. - pub(crate) fn add_block_parameter(&mut self, block: BasicBlockId, typ: Type) -> ValueId { + pub(crate) fn add_block_parameter(&mut self, block: BasicBlockId, typ: Type) -> Value { self.current_function.dfg.add_block_parameter(block, typ) } /// Returns the parameters of the given block in the current function. - pub(crate) fn block_parameters(&self, block: BasicBlockId) -> &[ValueId] { + pub(crate) fn block_parameters( + &self, + block: BasicBlockId, + ) -> impl ExactSizeIterator { self.current_function.dfg.block_parameters(block) } @@ -168,13 +154,11 @@ impl FunctionBuilder { pub(crate) fn insert_instruction( &mut self, instruction: Instruction, - ctrl_typevars: Option>, ) -> InsertInstructionResult { let block = self.current_block(); self.current_function.dfg.insert_instruction_and_results( instruction, block, - ctrl_typevars, self.call_stack, ) } @@ -194,9 +178,8 @@ impl FunctionBuilder { /// Insert an allocate instruction at the end of the current block, allocating the /// given amount of field elements. Returns the result of the allocate instruction, /// which is always a Reference to the allocated data. - pub(crate) fn insert_allocate(&mut self, element_type: Type) -> ValueId { - let reference_type = Type::Reference(Arc::new(element_type)); - self.insert_instruction(Instruction::Allocate, Some(vec![reference_type])).first() + pub(crate) fn insert_allocate(&mut self, element_type: Type) -> Value { + self.insert_instruction(Instruction::Allocate { element_type }).first() } pub(crate) fn set_location(&mut self, location: Location) -> &mut FunctionBuilder { @@ -217,25 +200,20 @@ impl FunctionBuilder { /// which should point to a previous Allocate instruction. Note that this is limited to loading /// a single value. Loading multiple values (such as a tuple) will require multiple loads. /// Returns the element that was loaded. - pub(crate) fn insert_load(&mut self, address: ValueId, type_to_load: Type) -> ValueId { - self.insert_instruction(Instruction::Load { address }, Some(vec![type_to_load])).first() + pub(crate) fn insert_load(&mut self, address: Value, result_type: Type) -> Value { + self.insert_instruction(Instruction::Load { address, result_type }).first() } /// Insert a Store instruction at the end of the current block, storing the given element /// at the given address. Expects that the address points somewhere /// within a previous Allocate instruction. - pub(crate) fn insert_store(&mut self, address: ValueId, value: ValueId) { - self.insert_instruction(Instruction::Store { address, value }, None); + pub(crate) fn insert_store(&mut self, address: Value, value: Value) { + self.insert_instruction(Instruction::Store { address, value }); } /// Insert a binary instruction at the end of the current block. /// Returns the result of the binary instruction. - pub(crate) fn insert_binary( - &mut self, - lhs: ValueId, - operator: BinaryOp, - rhs: ValueId, - ) -> ValueId { + pub(crate) fn insert_binary(&mut self, lhs: Value, operator: BinaryOp, rhs: Value) -> Value { let lhs_type = self.type_of_value(lhs); let rhs_type = self.type_of_value(rhs); if operator != BinaryOp::Shl && operator != BinaryOp::Shr { @@ -245,126 +223,114 @@ impl FunctionBuilder { ); } let instruction = Instruction::Binary(Binary { lhs, rhs, operator }); - self.insert_instruction(instruction, None).first() + self.insert_instruction(instruction).first() } /// Insert a not instruction at the end of the current block. /// Returns the result of the instruction. - pub(crate) fn insert_not(&mut self, rhs: ValueId) -> ValueId { - self.insert_instruction(Instruction::Not(rhs), None).first() + pub(crate) fn insert_not(&mut self, rhs: Value) -> Value { + self.insert_instruction(Instruction::Not(rhs)).first() } /// Insert a cast instruction at the end of the current block. /// Returns the result of the cast instruction. - pub(crate) fn insert_cast(&mut self, value: ValueId, typ: NumericType) -> ValueId { - self.insert_instruction(Instruction::Cast(value, typ), None).first() + pub(crate) fn insert_cast(&mut self, value: Value, typ: NumericType) -> Value { + self.insert_instruction(Instruction::Cast(value, typ)).first() } /// Insert a truncate instruction at the end of the current block. /// Returns the result of the truncate instruction. pub(crate) fn insert_truncate( &mut self, - value: ValueId, - bit_size: u32, - max_bit_size: u32, - ) -> ValueId { - self.insert_instruction(Instruction::Truncate { value, bit_size, max_bit_size }, None) - .first() + value: Value, + bit_size: u8, + max_bit_size: u8, + ) -> Value { + self.insert_instruction(Instruction::Truncate { value, bit_size, max_bit_size }).first() } /// Insert a constrain instruction at the end of the current block. pub(crate) fn insert_constrain( &mut self, - lhs: ValueId, - rhs: ValueId, + lhs: Value, + rhs: Value, assert_message: Option, ) { - self.insert_instruction(Instruction::Constrain(lhs, rhs, assert_message), None); + self.insert_instruction(Instruction::Constrain(lhs, rhs, assert_message)); } /// Insert a [`Instruction::RangeCheck`] instruction at the end of the current block. pub(crate) fn insert_range_check( &mut self, - value: ValueId, - max_bit_size: u32, + value: Value, + max_bit_size: u8, assert_message: Option, ) { - self.insert_instruction( - Instruction::RangeCheck { value, max_bit_size, assert_message }, - None, - ); + self.insert_instruction(Instruction::RangeCheck { value, max_bit_size, assert_message }); } /// Insert a call instruction at the end of the current block and return /// the results of the call. pub(crate) fn insert_call( &mut self, - func: ValueId, - arguments: Vec, + func: Value, + arguments: Vec, result_types: Vec, - ) -> Cow<[ValueId]> { - self.insert_instruction(Instruction::Call { func, arguments }, Some(result_types)).results() + ) -> InsertInstructionResultIter { + let call = Instruction::Call { func, arguments, result_types }; + self.insert_instruction(call).results() } /// Insert an instruction to extract an element from an array pub(crate) fn insert_array_get( &mut self, - array: ValueId, - index: ValueId, + array: Value, + index: Value, element_type: Type, - ) -> ValueId { - let element_type = Some(vec![element_type]); - self.insert_instruction(Instruction::ArrayGet { array, index }, element_type).first() + ) -> Value { + let get = Instruction::ArrayGet { array, index, result_type: element_type }; + self.insert_instruction(get).first() } /// Insert an instruction to create a new array with the given index replaced with a new value - pub(crate) fn insert_array_set( - &mut self, - array: ValueId, - index: ValueId, - value: ValueId, - ) -> ValueId { - self.insert_instruction(Instruction::ArraySet { array, index, value, mutable: false }, None) + pub(crate) fn insert_array_set(&mut self, array: Value, index: Value, value: Value) -> Value { + self.insert_instruction(Instruction::ArraySet { array, index, value, mutable: false }) .first() } pub(crate) fn insert_mutable_array_set( &mut self, - array: ValueId, - index: ValueId, - value: ValueId, - ) -> ValueId { - self.insert_instruction(Instruction::ArraySet { array, index, value, mutable: true }, None) + array: Value, + index: Value, + value: Value, + ) -> Value { + self.insert_instruction(Instruction::ArraySet { array, index, value, mutable: true }) .first() } /// Insert an instruction to increment an array's reference count. This only has an effect /// in unconstrained code where arrays are reference counted and copy on write. - pub(crate) fn insert_inc_rc(&mut self, value: ValueId) { - self.insert_instruction(Instruction::IncrementRc { value }, None); + pub(crate) fn insert_inc_rc(&mut self, value: Value) { + self.insert_instruction(Instruction::IncrementRc { value }); } /// Insert an instruction to decrement an array's reference count. This only has an effect /// in unconstrained code where arrays are reference counted and copy on write. - pub(crate) fn insert_dec_rc(&mut self, value: ValueId) { - self.insert_instruction(Instruction::DecrementRc { value }, None); + pub(crate) fn insert_dec_rc(&mut self, value: Value) { + self.insert_instruction(Instruction::DecrementRc { value }); } /// Insert an enable_side_effects_if instruction. These are normally only automatically /// inserted during the flattening pass when branching is removed. - pub(crate) fn insert_enable_side_effects_if(&mut self, condition: ValueId) { - self.insert_instruction(Instruction::EnableSideEffectsIf { condition }, None); + pub(crate) fn insert_enable_side_effects_if(&mut self, condition: Value) { + self.insert_instruction(Instruction::EnableSideEffectsIf { condition }); } /// Insert a `make_array` instruction to create a new array or slice. /// Returns the new array value. Expects `typ` to be an array or slice type. - pub(crate) fn insert_make_array( - &mut self, - elements: im::Vector, - typ: Type, - ) -> ValueId { + pub(crate) fn insert_make_array(&mut self, elements: im::Vector, typ: Type) -> Value { assert!(matches!(typ, Type::Array(..) | Type::Slice(_))); - self.insert_instruction(Instruction::MakeArray { elements, typ }, None).first() + self.insert_instruction(Instruction::MakeArray { elements, typ }).first() } /// Terminates the current block with the given terminator instruction @@ -377,16 +343,11 @@ impl FunctionBuilder { /// Terminate the current block with a jmp instruction to jmp to the given /// block with the given arguments. - pub(crate) fn terminate_with_jmp( - &mut self, - destination: BasicBlockId, - arguments: Vec, - ) { - let call_stack = self.call_stack; + pub(crate) fn terminate_with_jmp(&mut self, destination: BasicBlockId, arguments: Vec) { self.terminate_block_with(TerminatorInstruction::Jmp { destination, arguments, - call_stack, + call_stack: self.call_stack, }); } @@ -394,7 +355,7 @@ impl FunctionBuilder { /// block with the given arguments. pub(crate) fn terminate_with_jmpif( &mut self, - condition: ValueId, + condition: Value, then_destination: BasicBlockId, else_destination: BasicBlockId, ) { @@ -408,47 +369,52 @@ impl FunctionBuilder { } /// Terminate the current block with a return instruction - pub(crate) fn terminate_with_return(&mut self, return_values: Vec) { + pub(crate) fn terminate_with_return(&mut self, return_values: Vec) { let call_stack = self.call_stack; self.terminate_block_with(TerminatorInstruction::Return { return_values, call_stack }); } - /// Returns a ValueId pointing to the given function or imports the function - /// into the current function if it was not already, and returns that ID. - pub(crate) fn import_function(&mut self, function: FunctionId) -> ValueId { - self.current_function.dfg.import_function(function) - } - - /// Returns a ValueId pointing to the given oracle/foreign function or imports the oracle + /// Returns a Value pointing to the given oracle/foreign function or imports the oracle /// into the current function if it was not already, and returns that ID. - pub(crate) fn import_foreign_function(&mut self, function: &str) -> ValueId { + pub(crate) fn import_foreign_function(&mut self, function: &str) -> Value { self.current_function.dfg.import_foreign_function(function) } /// Retrieve a value reference to the given intrinsic operation. /// Returns None if there is no intrinsic matching the given name. - pub(crate) fn import_intrinsic(&mut self, name: &str) -> Option { - Intrinsic::lookup(name).map(|intrinsic| self.import_intrinsic_id(intrinsic)) + pub(crate) fn import_intrinsic(&mut self, name: &str) -> Option { + Intrinsic::lookup(name).map(Value::Intrinsic) } - /// Retrieve a value reference to the given intrinsic operation. - pub(crate) fn import_intrinsic_id(&mut self, intrinsic: Intrinsic) -> ValueId { - self.current_function.dfg.import_intrinsic(intrinsic) - } - - pub(crate) fn get_intrinsic_from_value(&mut self, value: ValueId) -> Option { - match self.current_function.dfg[value] { + pub(crate) fn get_intrinsic_from_value(&mut self, value: Value) -> Option { + match value { Value::Intrinsic(intrinsic) => Some(intrinsic), _ => None, } } + pub(crate) fn constant(&mut self, value: FieldElement, typ: NumericType) -> Value { + self.current_function.dfg.constant(value, typ) + } + + pub(crate) fn field_constant(&mut self, value: FieldElement) -> Value { + self.current_function.dfg.constant(value, NumericType::NativeField) + } + + pub(crate) fn length_constant(&mut self, value: FieldElement) -> Value { + self.current_function.dfg.constant(value, NumericType::length_type()) + } + + pub(crate) fn bool_constant(&mut self, value: bool) -> Value { + self.current_function.dfg.constant(value.into(), NumericType::bool()) + } + /// Insert instructions to increment the reference count of any array(s) stored /// within the given value. If the given value is not an array and does not contain /// any arrays, this does nothing. /// /// Returns whether a reference count instruction was issued. - pub(crate) fn increment_array_reference_count(&mut self, value: ValueId) -> bool { + pub(crate) fn increment_array_reference_count(&mut self, value: Value) -> bool { self.update_array_reference_count(value, true) } @@ -457,7 +423,7 @@ impl FunctionBuilder { /// any arrays, this does nothing. /// /// Returns whether a reference count instruction was issued. - pub(crate) fn decrement_array_reference_count(&mut self, value: ValueId) -> bool { + pub(crate) fn decrement_array_reference_count(&mut self, value: Value) -> bool { self.update_array_reference_count(value, false) } @@ -466,7 +432,7 @@ impl FunctionBuilder { /// are ignored outside of unconstrained code. /// /// Returns whether a reference count instruction was issued. - fn update_array_reference_count(&mut self, value: ValueId, increment: bool) -> bool { + fn update_array_reference_count(&mut self, value: Value, increment: bool) -> bool { match self.type_of_value(value) { Type::Numeric(_) => false, Type::Function => false, @@ -498,14 +464,6 @@ impl FunctionBuilder { } } -impl std::ops::Index for FunctionBuilder { - type Output = Value; - - fn index(&self, id: ValueId) -> &Self::Output { - &self.current_function.dfg[id] - } -} - impl std::ops::Index for FunctionBuilder { type Output = Instruction; @@ -526,12 +484,13 @@ impl std::ops::Index for FunctionBuilder { mod tests { use std::sync::Arc; - use acvm::{acir::AcirField, FieldElement}; + use acvm::FieldElement; use crate::ssa::ir::{ instruction::{Endian, Intrinsic}, map::Id, - types::{NumericType, Type}, + types::Type, + value::Value, }; use super::FunctionBuilder; @@ -543,17 +502,17 @@ mod tests { // let bits: [u1; 8] = x.to_le_bits(); let func_id = Id::test_new(0); let mut builder = FunctionBuilder::new("func".into(), func_id); - let one = builder.numeric_constant(FieldElement::one(), NumericType::bool()); - let zero = builder.numeric_constant(FieldElement::zero(), NumericType::bool()); + let one = builder.bool_constant(true); + let zero = builder.bool_constant(false); - let to_bits_id = builder.import_intrinsic_id(Intrinsic::ToBits(Endian::Little)); + let to_bits_id = Value::Intrinsic(Intrinsic::ToBits(Endian::Little)); let input = builder.field_constant(FieldElement::from(7_u128)); let length = builder.field_constant(FieldElement::from(8_u128)); let result_types = vec![Type::Array(Arc::new(vec![Type::bool()]), 8)]; - let call_results = - builder.insert_call(to_bits_id, vec![input, length], result_types).into_owned(); + let mut call_results = builder.insert_call(to_bits_id, vec![input, length], result_types); - let slice = builder.current_function.dfg.get_array_constant(call_results[0]).unwrap().0; + let first_result = call_results.next().unwrap(); + let slice = builder.current_function.dfg.get_array_constant(first_result).unwrap().0; assert_eq!(slice[0], one); assert_eq!(slice[1], one); assert_eq!(slice[2], one); diff --git a/compiler/noirc_evaluator/src/ssa/ir/basic_block.rs b/compiler/noirc_evaluator/src/ssa/ir/basic_block.rs index b2a923c6a51..00ae6f4ddbd 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/basic_block.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/basic_block.rs @@ -2,7 +2,8 @@ use super::{ call_stack::CallStackId, instruction::{InstructionId, TerminatorInstruction}, map::Id, - value::ValueId, + types::Type, + value::Value, }; use serde::{Deserialize, Serialize}; @@ -14,8 +15,8 @@ use serde::{Deserialize, Serialize}; /// block, then all instructions are executed. ie single-entry single-exit. #[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)] pub(crate) struct BasicBlock { - /// Parameters to the basic block. - parameters: Vec, + /// Types of each parameter to this block + parameter_types: Vec, /// Instructions in the basic block. instructions: Vec, @@ -31,33 +32,38 @@ pub(crate) struct BasicBlock { pub(crate) type BasicBlockId = Id; impl BasicBlock { - /// Create a new BasicBlock with the given parameters. - /// Parameters can also be added later via BasicBlock::add_parameter + /// Create a new BasicBlock with no parameters. + /// Parameters can be added later via BasicBlock::add_parameter pub(crate) fn new() -> Self { - Self { parameters: Vec::new(), instructions: Vec::new(), terminator: None } + Self { parameter_types: Vec::new(), instructions: Vec::new(), terminator: None } } - /// Returns the parameters of this block - pub(crate) fn parameters(&self) -> &[ValueId] { - &self.parameters + /// Retrieve the type of the given parameter + pub(crate) fn type_of_parameter(&self, parameter_index: u16) -> &Type { + &self.parameter_types[parameter_index as usize] } - /// Removes all the parameters of this block - pub(crate) fn take_parameters(&mut self) -> Vec { - std::mem::take(&mut self.parameters) + /// Adds a parameter to this BasicBlock. + pub(crate) fn add_parameter(&mut self, typ: Type) { + self.parameter_types.push(typ); } - /// Adds a parameter to this BasicBlock. - /// Expects that the ValueId given should refer to a Value::Param - /// instance with its position equal to self.parameters.len(). - pub(crate) fn add_parameter(&mut self, parameter: ValueId) { - self.parameters.push(parameter); + pub(crate) fn parameter_types(&self) -> &[Type] { + &self.parameter_types + } + + pub(crate) fn parameter_types_mut(&mut self) -> &mut Vec { + &mut self.parameter_types + } + + pub(crate) fn parameter_count(&self) -> usize { + self.parameter_types.len() } /// Replace this block's current parameters with that of the given Vec. /// This does not perform any checks that any previous parameters were unused. - pub(crate) fn set_parameters(&mut self, parameters: Vec) { - self.parameters = parameters; + pub(crate) fn set_parameters(&mut self, types: Vec) { + self.parameter_types = types; } /// Insert an instruction at the end of this block @@ -131,7 +137,7 @@ impl BasicBlock { /// Return the jmp arguments, if any, of this block's TerminatorInstruction. /// /// If this block has no terminator, or a Return terminator this will be empty. - pub(crate) fn terminator_arguments(&self) -> &[ValueId] { + pub(crate) fn jmp_arguments(&self) -> &[Value] { match &self.terminator { Some(TerminatorInstruction::Jmp { arguments, .. }) => arguments, _ => &[], diff --git a/compiler/noirc_evaluator/src/ssa/ir/dfg.rs b/compiler/noirc_evaluator/src/ssa/ir/dfg.rs index 27eeaa0e15b..9bf935a3784 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/dfg.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/dfg.rs @@ -1,26 +1,22 @@ -use std::borrow::Cow; - use crate::ssa::{function_builder::data_bus::DataBus, ir::instruction::SimplifyResult}; use super::{ basic_block::{BasicBlock, BasicBlockId}, call_stack::{CallStack, CallStackHelper, CallStackId}, - function::FunctionId, instruction::{ - Instruction, InstructionId, InstructionResultType, Intrinsic, TerminatorInstruction, + insert_result::InsertInstructionResult, Instruction, InstructionId, InstructionResultType, + TerminatorInstruction, }, - map::DenseMap, + map::{DenseMap, ForeignFunctions, UniqueMap}, types::{NumericType, Type}, - value::{Value, ValueId}, + value::{FieldElementId, ForeignFunctionId, Value}, }; use acvm::{acir::AcirField, FieldElement}; use fxhash::FxHashMap as HashMap; -use iter_extended::vecmap; use noirc_errors::Location; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use serde_with::DisplayFromStr; /// The DataFlowGraph contains most of the actual data in a function including /// its blocks, instructions, and values. This struct is largely responsible for @@ -32,54 +28,22 @@ pub(crate) struct DataFlowGraph { /// All of the instructions in a function instructions: DenseMap, - /// Stores the results for a particular instruction. - /// - /// An instruction may return multiple values - /// and for this, we will also use the cranelift strategy - /// to fetch them via indices. - /// - /// Currently, we need to define them in a better way - /// Call instructions require the func signature, but - /// other instructions may need some more reading on my part - #[serde_as(as = "HashMap")] - results: HashMap>, - - /// Storage for all of the values defined in this - /// function. - values: DenseMap, - - /// Each constant is unique, attempting to insert the same constant - /// twice will return the same ValueId. - #[serde(skip)] - constants: HashMap<(FieldElement, NumericType), ValueId>, - - /// Contains each function that has been imported into the current function. - /// A unique `ValueId` for each function's [`Value::Function`] is stored so any given FunctionId - /// will always have the same ValueId within this function. - #[serde(skip)] - functions: HashMap, - - /// Contains each intrinsic that has been imported into the current function. - /// This map is used to ensure that the ValueId for any given intrinsic is always - /// represented by only 1 ValueId within this function. - #[serde(skip)] - intrinsics: HashMap, - /// Contains each foreign function that has been imported into the current function. - /// This map is used to ensure that the ValueId for any given foreign function is always - /// represented by only 1 ValueId within this function. + /// This map is used to ensure that the Value for any given foreign function is always + /// represented by only 1 Value within this function. #[serde(skip)] - foreign_functions: HashMap, + foreign_functions: ForeignFunctions, /// All blocks in a function blocks: DenseMap, - /// Debugging information about which `ValueId`s have had their underlying `Value` substituted - /// for that of another. In theory this information is purely used for printing the SSA, - /// and has no material effect on the SSA itself, however in practice the IDs can get out of - /// sync and may need this resolution before they can be compared. + /// Debugging information about which Values are substituted for another. + #[serde(skip)] + replaced_values: HashMap, + + /// Each FieldElement is assigned a unique id #[serde(skip)] - replaced_value_ids: HashMap, + numeric_constants: UniqueMap, /// Source location of each instruction for debugging and issuing errors. /// @@ -117,14 +81,8 @@ impl DataFlowGraph { block: BasicBlockId, ) -> BasicBlockId { let new_block = self.make_block(); - let parameters = self.blocks[block].parameters(); - - let parameters = vecmap(parameters.iter().enumerate(), |(position, param)| { - let typ = self.values[*param].get_type().into_owned(); - self.values.insert(Value::Param { block: new_block, position, typ }) - }); - - self.blocks[new_block].set_parameters(parameters); + let parameter_types = self.blocks[block].parameter_types().to_vec(); + self.blocks[new_block].set_parameters(parameter_types); new_block } @@ -138,41 +96,29 @@ impl DataFlowGraph { self.blocks.iter() } - /// Iterate over every Value in this DFG in no particular order, including unused Values - pub(crate) fn values_iter(&self) -> impl ExactSizeIterator { - self.values.iter() - } - - /// Returns the parameters of the given block - pub(crate) fn block_parameters(&self, block: BasicBlockId) -> &[ValueId] { - self.blocks[block].parameters() + /// Iterate over the parameters of a block + pub(crate) fn block_parameters( + &self, + block: BasicBlockId, + ) -> impl ExactSizeIterator { + let parameter_count = self[block].parameter_types().len().try_into().unwrap(); + (0..parameter_count).map(move |position| Value::Param { block, position }) } /// Inserts a new instruction into the DFG. /// This does not add the instruction to the block. - /// Returns the id of the new instruction and its results. - /// - /// Populates the instruction's results with the given ctrl_typevars if the instruction - /// is a Load, Call, or Intrinsic. Otherwise the instruction's results will be known - /// by the instruction itself and None can safely be passed for this parameter. - pub(crate) fn make_instruction( - &mut self, - instruction_data: Instruction, - ctrl_typevars: Option>, - ) -> InstructionId { - let id = self.instructions.insert(instruction_data); - self.make_instruction_results(id, ctrl_typevars); - id + /// Returns the id of the new instruction. + pub(crate) fn make_instruction(&mut self, instruction_data: Instruction) -> InstructionId { + self.instructions.insert(instruction_data) } fn insert_instruction_without_simplification( &mut self, instruction_data: Instruction, block: BasicBlockId, - ctrl_typevars: Option>, call_stack: CallStackId, ) -> InstructionId { - let id = self.make_instruction(instruction_data, ctrl_typevars); + let id = self.make_instruction(instruction_data); self.blocks[block].insert_instruction(id); self.locations.insert(id, call_stack); id @@ -182,17 +128,13 @@ impl DataFlowGraph { &mut self, instruction_data: Instruction, block: BasicBlockId, - ctrl_typevars: Option>, call_stack: CallStackId, ) -> InsertInstructionResult { - let id = self.insert_instruction_without_simplification( - instruction_data, - block, - ctrl_typevars, - call_stack, - ); + let result_count = instruction_data.result_count(); + let id = + self.insert_instruction_without_simplification(instruction_data, block, call_stack); - InsertInstructionResult::Results(id, self.instruction_results(id)) + InsertInstructionResult::Results { id, result_count } } /// Inserts a new instruction at the end of the given block and returns its results @@ -200,10 +142,9 @@ impl DataFlowGraph { &mut self, instruction: Instruction, block: BasicBlockId, - ctrl_typevars: Option>, call_stack: CallStackId, ) -> InsertInstructionResult { - match instruction.simplify(self, block, ctrl_typevars.clone(), call_stack) { + match instruction.simplify(self, block, call_stack) { SimplifyResult::SimplifiedTo(simplification) => { InsertInstructionResult::SimplifiedTo(simplification) } @@ -211,12 +152,7 @@ impl DataFlowGraph { InsertInstructionResult::SimplifiedToMultiple(simplification) } SimplifyResult::Remove => InsertInstructionResult::InstructionRemoved, - result @ (SimplifyResult::SimplifiedToInstruction(_) - | SimplifyResult::SimplifiedToInstructionMultiple(_) - | SimplifyResult::None) => { - let mut instructions = result.instructions().unwrap_or(vec![instruction]); - assert!(!instructions.is_empty(), "`SimplifyResult::SimplifiedToInstructionMultiple` must not return empty vector"); - + SimplifyResult::SimplifiedToInstructionMultiple(instructions) => { if instructions.len() > 1 { // There's currently no way to pass results from one instruction in `instructions` on to the next. // We then restrict this to only support multiple instructions if they're all `Instruction::Constrain` @@ -227,168 +163,86 @@ impl DataFlowGraph { ); } - // Pull off the last instruction as we want to return its results. - let last_instruction = instructions.pop().expect("`instructions` can't be empty"); + let mut last_id = None; + let mut last_count = 0; + for instruction in instructions { - self.insert_instruction_without_simplification( - instruction, - block, - ctrl_typevars.clone(), - call_stack, - ); + last_count = instruction.result_count(); + let id = self.make_instruction(instruction); + self.blocks[block].insert_instruction(id); + self.locations.insert(id, call_stack); + last_id = Some(id); } - self.insert_instruction_and_results_without_simplification( - last_instruction, - block, - ctrl_typevars, - call_stack, - ) + + let id = last_id.expect("There should be at least 1 simplified instruction"); + InsertInstructionResult::Results { id, result_count: last_count } } - } - } + result @ (SimplifyResult::SimplifiedToInstruction(_) | SimplifyResult::None) => { + let instruction = result.instruction().unwrap_or(instruction); + let result_count = instruction.result_count(); - /// Insert a value into the dfg's storage and return an id to reference it. - /// Until the value is used in an instruction it is unreachable. - pub(crate) fn make_value(&mut self, value: Value) -> ValueId { - self.values.insert(value) + let id = self.make_instruction(instruction); + self.blocks[block].insert_instruction(id); + self.locations.insert(id, call_stack); + + InsertInstructionResult::Results { id, result_count } + } + } } /// Set the value of value_to_replace to refer to the value referred to by new_value. /// /// This is the preferred method to call for optimizations simplifying - /// values since other instructions referring to the same ValueId need - /// not be modified to refer to a new ValueId. - pub(crate) fn set_value_from_id(&mut self, value_to_replace: ValueId, new_value: ValueId) { + /// values since other instructions referring to the same Value need + /// not be modified to refer to a new Value. + pub(crate) fn replace_value(&mut self, value_to_replace: Value, new_value: Value) { if value_to_replace != new_value { - self.replaced_value_ids.insert(value_to_replace, self.resolve(new_value)); - let new_value = self.values[new_value].clone(); - self.values[value_to_replace] = new_value; - } - } - - /// Set the type of value_id to the target_type. - pub(crate) fn set_type_of_value(&mut self, value_id: ValueId, target_type: Type) { - let value = &mut self.values[value_id]; - match value { - Value::Instruction { typ, .. } | Value::Param { typ, .. } => { - *typ = target_type; - } - Value::NumericConstant { typ, .. } => { - *typ = target_type.unwrap_numeric(); - } - _ => { - unreachable!("ICE: Cannot set type of {:?}", value); - } + self.replaced_values.insert(value_to_replace, self.resolve(new_value)); } } /// If `original_value_id`'s underlying `Value` has been substituted for that of another - /// `ValueId`, this function will return the `ValueId` from which the substitution was taken. - /// If `original_value_id`'s underlying `Value` has not been substituted, the same `ValueId` + /// `Value`, this function will return the `Value` from which the substitution was taken. + /// If `original_value_id`'s underlying `Value` has not been substituted, the same `Value` /// is returned. - pub(crate) fn resolve(&self, original_value_id: ValueId) -> ValueId { - match self.replaced_value_ids.get(&original_value_id) { + pub(crate) fn resolve(&self, original_value_id: Value) -> Value { + match self.replaced_values.get(&original_value_id) { Some(id) => self.resolve(*id), None => original_value_id, } } - /// Creates a new constant value, or returns the Id to an existing one if - /// one already exists. - pub(crate) fn make_constant(&mut self, constant: FieldElement, typ: NumericType) -> ValueId { - if let Some(id) = self.constants.get(&(constant, typ)) { - return *id; - } - let id = self.values.insert(Value::NumericConstant { constant, typ }); - self.constants.insert((constant, typ), id); - id - } - - /// Gets or creates a ValueId for the given FunctionId. - pub(crate) fn import_function(&mut self, function: FunctionId) -> ValueId { - if let Some(existing) = self.functions.get(&function) { - return *existing; - } - self.values.insert(Value::Function(function)) - } - - /// Gets or creates a ValueId for the given FunctionId. - pub(crate) fn import_foreign_function(&mut self, function: &str) -> ValueId { - if let Some(existing) = self.foreign_functions.get(function) { - return *existing; - } - self.values.insert(Value::ForeignFunction(function.to_owned())) - } - - /// Gets or creates a ValueId for the given Intrinsic. - pub(crate) fn import_intrinsic(&mut self, intrinsic: Intrinsic) -> ValueId { - if let Some(existing) = self.get_intrinsic(intrinsic) { - return *existing; - } - let intrinsic_value_id = self.values.insert(Value::Intrinsic(intrinsic)); - self.intrinsics.insert(intrinsic, intrinsic_value_id); - intrinsic_value_id - } - - pub(crate) fn get_intrinsic(&self, intrinsic: Intrinsic) -> Option<&ValueId> { - self.intrinsics.get(&intrinsic) - } - - /// Attaches results to the instruction, clearing any previous results. - /// - /// This does not normally need to be called manually as it is called within - /// make_instruction automatically. - /// - /// Returns the results of the instruction - pub(crate) fn make_instruction_results( - &mut self, - instruction_id: InstructionId, - ctrl_typevars: Option>, - ) { - let result_types = self.instruction_result_types(instruction_id, ctrl_typevars); - let results = vecmap(result_types.into_iter().enumerate(), |(position, typ)| { - let instruction = instruction_id; - self.values.insert(Value::Instruction { typ, position, instruction }) - }); - - self.results.insert(instruction_id, results); + /// Gets or creates a Value for the given FunctionId. + pub(crate) fn import_foreign_function(&mut self, function: &str) -> Value { + Value::ForeignFunction(self.foreign_functions.get_or_insert(function)) } - /// Return the result types of this instruction. - /// - /// In the case of Load, Call, and Intrinsic, the function's result - /// type may be unknown. In this case, the given ctrl_typevars are returned instead. - /// ctrl_typevars is taken in as an Option since it is common to omit them when getting - /// the type of an instruction that does not require them. Compared to passing an empty Vec, - /// Option has the benefit of panicking if it is accidentally used for a Call instruction, - /// rather than silently returning the empty Vec and continuing. - fn instruction_result_types( - &self, - instruction_id: InstructionId, - ctrl_typevars: Option>, - ) -> Vec { - let instruction = &self.instructions[instruction_id]; - match instruction.result_type() { - InstructionResultType::Known(typ) => vec![typ], - InstructionResultType::Operand(value) => vec![self.type_of_value(value)], - InstructionResultType::None => vec![], - InstructionResultType::Unknown => { - ctrl_typevars.expect("Control typevars required but not given") + /// Returns the type of a given value + pub(crate) fn type_of_value(&self, value: Value) -> Type { + match value { + Value::Instruction { instruction, position } => { + match self[instruction].result_type() { + // How expensive is this recursive call? Maybe we should store types + InstructionResultType::Operand(value) => self.type_of_value(value), + InstructionResultType::Known(typ) => typ, + InstructionResultType::None => unreachable!("Instruction has no results"), + InstructionResultType::Multiple(types) => types[position as usize].clone(), + } } + Value::Param { block, position } => self[block].type_of_parameter(position).clone(), + Value::NumericConstant { typ, .. } => Type::Numeric(typ), + Value::Function(_) => Type::Function, + Value::Intrinsic(_) => Type::Function, + Value::ForeignFunction(_) => Type::Function, } } - /// Returns the type of a given value - pub(crate) fn type_of_value(&self, value: ValueId) -> Type { - self.values[value].get_type().into_owned() - } - /// Returns the maximum possible number of bits that `value` can potentially be. /// /// Should `value` be a numeric constant then this function will return the exact number of bits required, /// otherwise it will return the minimum number of bits based on type information. - pub(crate) fn get_value_max_num_bits(&self, value: ValueId) -> u32 { - match self[value] { + pub(crate) fn get_value_max_num_bits(&self, value: Value) -> u8 { + match value { Value::Instruction { instruction, .. } => { if let Instruction::Cast(original_value, _) = self[instruction] { self.type_of_value(original_value).bit_size() @@ -397,64 +251,39 @@ impl DataFlowGraph { } } - Value::NumericConstant { constant, .. } => constant.num_bits(), + Value::NumericConstant { constant, .. } => { + self[constant].num_bits().try_into().unwrap() + } _ => self.type_of_value(value).bit_size(), } } /// True if the type of this value is Type::Reference. /// Using this method over type_of_value avoids cloning the value's type. - pub(crate) fn value_is_reference(&self, value: ValueId) -> bool { - matches!(self.values[value].get_type().as_ref(), Type::Reference(_)) - } - - /// Replaces an instruction result with a fresh id. - pub(crate) fn replace_result( - &mut self, - instruction_id: InstructionId, - prev_value_id: ValueId, - ) -> ValueId { - let typ = self.type_of_value(prev_value_id); - let results = self.results.get_mut(&instruction_id).unwrap(); - let res_position = results - .iter() - .position(|&id| id == prev_value_id) - .expect("Result id not found while replacing"); - - let value_id = self.values.insert(Value::Instruction { - typ, - position: res_position, - instruction: instruction_id, - }); - - // Replace the value in list of results for this instruction - results[res_position] = value_id; - value_id - } - - /// Returns the number of instructions - /// inserted into functions. - pub(crate) fn num_instructions(&self) -> usize { - self.instructions.len() + pub(crate) fn value_is_reference(&self, value: Value) -> bool { + matches!(self.type_of_value(value), Type::Reference(_)) } /// Returns all of result values which are attached to this instruction. - pub(crate) fn instruction_results(&self, instruction_id: InstructionId) -> &[ValueId] { - self.results.get(&instruction_id).expect("expected a list of Values").as_slice() + pub(crate) fn instruction_results( + &self, + instruction: InstructionId, + ) -> impl ExactSizeIterator { + let result_count: u16 = self[instruction].result_count().try_into().unwrap(); + (0..result_count).map(move |position| Value::Instruction { instruction, position }) } /// Add a parameter to the given block - pub(crate) fn add_block_parameter(&mut self, block_id: BasicBlockId, typ: Type) -> ValueId { + pub(crate) fn add_block_parameter(&mut self, block_id: BasicBlockId, typ: Type) -> Value { let block = &mut self.blocks[block_id]; - let position = block.parameters().len(); - let parameter = self.values.insert(Value::Param { block: block_id, position, typ }); - block.add_parameter(parameter); - parameter + let position = block.parameter_types().len().try_into().unwrap(); + block.add_parameter(typ); + Value::Param { block: block_id, position } } /// Returns the field element represented by this value if it is a numeric constant. /// Returns None if the given value is not a numeric constant. - pub(crate) fn get_numeric_constant(&self, value: ValueId) -> Option { + pub(crate) fn get_numeric_constant(&self, value: Value) -> Option { self.get_numeric_constant_with_type(value).map(|(value, _typ)| value) } @@ -462,19 +291,42 @@ impl DataFlowGraph { /// Returns None if the given value is not a numeric constant. pub(crate) fn get_numeric_constant_with_type( &self, - value: ValueId, + value: Value, ) -> Option<(FieldElement, NumericType)> { - match &self.values[self.resolve(value)] { - Value::NumericConstant { constant, typ } => Some((*constant, *typ)), + match self.resolve(value) { + Value::NumericConstant { constant, typ } => Some((self[constant], typ)), _ => None, } } - /// Returns the Value::Array associated with this ValueId if it refers to an array constant. + pub(crate) fn constant(&mut self, constant: FieldElement, typ: NumericType) -> Value { + self.constant_by_ref(&constant, typ) + } + + /// The same as `self.constant` but avoids copying the given FieldElement + /// unless the underlying map doesn't contain it already. + pub(crate) fn constant_by_ref(&mut self, constant: &FieldElement, typ: NumericType) -> Value { + let constant = self.numeric_constants.get_or_insert(constant); + Value::NumericConstant { constant, typ } + } + + pub(crate) fn field_constant(&mut self, constant: FieldElement) -> Value { + self.constant(constant, NumericType::NativeField) + } + + pub(crate) fn length_constant(&mut self, constant: FieldElement) -> Value { + self.constant(constant, NumericType::length_type()) + } + + pub(crate) fn bool_constant(&mut self, constant: bool) -> Value { + self.constant(constant.into(), NumericType::bool()) + } + + /// Returns the Value::Array associated with this Value if it refers to an array constant. /// Otherwise, this returns None. - pub(crate) fn get_array_constant(&self, value: ValueId) -> Option<(im::Vector, Type)> { - match &self.values[self.resolve(value)] { - Value::Instruction { instruction, .. } => match &self.instructions[*instruction] { + pub(crate) fn get_array_constant(&self, value: Value) -> Option<(im::Vector, Type)> { + match self.resolve(value) { + Value::Instruction { instruction, .. } => match &self.instructions[instruction] { Instruction::MakeArray { elements, typ } => Some((elements.clone(), typ.clone())), _ => None, }, @@ -485,7 +337,7 @@ impl DataFlowGraph { /// If this value is an array, return the length of the array as indicated by its type. /// Otherwise, return None. - pub(crate) fn try_get_array_length(&self, value: ValueId) -> Option { + pub(crate) fn try_get_array_length(&self, value: Value) -> Option { match self.type_of_value(value) { Type::Array(_, length) => Some(length), _ => None, @@ -493,13 +345,14 @@ impl DataFlowGraph { } /// A constant index less than the array length is safe - pub(crate) fn is_safe_index(&self, index: ValueId, array: ValueId) -> bool { + pub(crate) fn is_safe_index(&self, index: Value, array: Value) -> bool { #[allow(clippy::match_like_matches_macro)] match (self.type_of_value(array), self.get_numeric_constant(index)) { (Type::Array(_, len), Some(index)) if index.to_u128() < (len as u128) => true, _ => false, } } + /// Sets the terminator instruction for the given basic block pub(crate) fn set_block_terminator( &mut self, @@ -545,27 +398,27 @@ impl DataFlowGraph { self.call_stack_data.get_call_stack(call_stack) } - pub(crate) fn get_value_call_stack(&self, value: ValueId) -> CallStack { - match &self.values[self.resolve(value)] { - Value::Instruction { instruction, .. } => self.get_instruction_call_stack(*instruction), + pub(crate) fn get_value_call_stack(&self, value: Value) -> CallStack { + match self.resolve(value) { + Value::Instruction { instruction, .. } => self.get_instruction_call_stack(instruction), _ => CallStack::new(), } } - pub(crate) fn get_value_call_stack_id(&self, value: ValueId) -> CallStackId { - match &self.values[self.resolve(value)] { + pub(crate) fn get_value_call_stack_id(&self, value: Value) -> CallStackId { + match self.resolve(value) { Value::Instruction { instruction, .. } => { - self.get_instruction_call_stack_id(*instruction) + self.get_instruction_call_stack_id(instruction) } _ => CallStackId::root(), } } - /// True if the given ValueId refers to a (recursively) constant value - pub(crate) fn is_constant(&self, argument: ValueId) -> bool { - match &self[self.resolve(argument)] { + /// True if the given Value refers to a (recursively) constant value + pub(crate) fn is_constant(&self, argument: Value) -> bool { + match self.resolve(argument) { Value::Param { .. } => false, - Value::Instruction { instruction, .. } => match &self[*instruction] { + Value::Instruction { instruction, .. } => match &self[instruction] { Instruction::MakeArray { elements, .. } => { elements.iter().all(|element| self.is_constant(*element)) } @@ -576,7 +429,7 @@ impl DataFlowGraph { } /// True that the input is a non-zero `Value::NumericConstant` - pub(crate) fn is_constant_true(&self, argument: ValueId) -> bool { + pub(crate) fn is_constant_true(&self, argument: Value) -> bool { if let Some(constant) = self.get_numeric_constant(argument) { !constant.is_zero() } else { @@ -598,13 +451,6 @@ impl std::ops::IndexMut for DataFlowGraph { } } -impl std::ops::Index for DataFlowGraph { - type Output = Value; - fn index(&self, id: ValueId) -> &Self::Output { - &self.values[id] - } -} - impl std::ops::Index for DataFlowGraph { type Output = BasicBlock; fn index(&self, id: BasicBlockId) -> &Self::Output { @@ -619,71 +465,19 @@ impl std::ops::IndexMut for DataFlowGraph { } } -// The result of calling DataFlowGraph::insert_instruction can -// be a list of results or a single ValueId if the instruction was simplified -// to an existing value. -#[derive(Debug)] -pub(crate) enum InsertInstructionResult<'dfg> { - /// Results is the standard case containing the instruction id and the results of that instruction. - Results(InstructionId, &'dfg [ValueId]), - SimplifiedTo(ValueId), - SimplifiedToMultiple(Vec), - InstructionRemoved, -} +impl std::ops::Index for DataFlowGraph { + type Output = String; -impl<'dfg> InsertInstructionResult<'dfg> { - /// Retrieve the first (and expected to be the only) result. - pub(crate) fn first(&self) -> ValueId { - match self { - InsertInstructionResult::SimplifiedTo(value) => *value, - InsertInstructionResult::SimplifiedToMultiple(values) => values[0], - InsertInstructionResult::Results(_, results) => { - assert_eq!(results.len(), 1); - results[0] - } - InsertInstructionResult::InstructionRemoved => { - panic!("Instruction was removed, no results") - } - } - } - - /// Return all the results contained in the internal results array. - /// This is used for instructions returning multiple results like function calls. - pub(crate) fn results(self) -> Cow<'dfg, [ValueId]> { - match self { - InsertInstructionResult::Results(_, results) => Cow::Borrowed(results), - InsertInstructionResult::SimplifiedTo(result) => Cow::Owned(vec![result]), - InsertInstructionResult::SimplifiedToMultiple(results) => Cow::Owned(results), - InsertInstructionResult::InstructionRemoved => Cow::Owned(vec![]), - } - } - - /// Returns the amount of ValueIds contained - pub(crate) fn len(&self) -> usize { - match self { - InsertInstructionResult::SimplifiedTo(_) => 1, - InsertInstructionResult::SimplifiedToMultiple(results) => results.len(), - InsertInstructionResult::Results(_, results) => results.len(), - InsertInstructionResult::InstructionRemoved => 0, - } + fn index(&self, id: ForeignFunctionId) -> &Self::Output { + &self.foreign_functions[id] } } -impl<'dfg> std::ops::Index for InsertInstructionResult<'dfg> { - type Output = ValueId; +impl std::ops::Index for DataFlowGraph { + type Output = FieldElement; - fn index(&self, index: usize) -> &Self::Output { - match self { - InsertInstructionResult::Results(_, results) => &results[index], - InsertInstructionResult::SimplifiedTo(result) => { - assert_eq!(index, 0); - result - } - InsertInstructionResult::SimplifiedToMultiple(results) => &results[index], - InsertInstructionResult::InstructionRemoved => { - panic!("Cannot index into InsertInstructionResult::InstructionRemoved") - } - } + fn index(&self, id: FieldElementId) -> &Self::Output { + &self.numeric_constants[id] } } @@ -695,8 +489,8 @@ mod tests { #[test] fn make_instruction() { let mut dfg = DataFlowGraph::default(); - let ins = Instruction::Allocate; - let ins_id = dfg.make_instruction(ins, Some(vec![Type::field()])); + let ins = Instruction::Allocate { element_type: Type::field() }; + let ins_id = dfg.make_instruction(ins); let results = dfg.instruction_results(ins_id); assert_eq!(results.len(), 1); diff --git a/compiler/noirc_evaluator/src/ssa/ir/function.rs b/compiler/noirc_evaluator/src/ssa/ir/function.rs index 6413107c04a..63a8dd58887 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/function.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/function.rs @@ -9,7 +9,7 @@ use super::dfg::DataFlowGraph; use super::instruction::TerminatorInstruction; use super::map::Id; use super::types::Type; -use super::value::ValueId; +use super::value::Value; #[derive(Clone, Copy, PartialEq, Eq, Debug, Hash, Serialize, Deserialize)] pub(crate) enum RuntimeType { @@ -143,23 +143,20 @@ impl Function { /// Returns the parameters of this function. /// The parameters will always match that of this function's entry block. - pub(crate) fn parameters(&self) -> &[ValueId] { + pub(crate) fn parameters(&self) -> impl ExactSizeIterator { self.dfg.block_parameters(self.entry_block) } /// Returns the return types of this function. - pub(crate) fn returns(&self) -> &[ValueId] { + pub(crate) fn returns(&self) -> &[Value] { let blocks = self.reachable_blocks(); - let mut function_return_values = None; for block in blocks { let terminator = self.dfg[block].terminator(); if let Some(TerminatorInstruction::Return { return_values, .. }) = terminator { - function_return_values = Some(return_values); - break; + return return_values; } } - function_return_values - .expect("Expected a return instruction, as function construction is finished") + panic!("Expected a return instruction, as function construction is finished") } /// Collects all the reachable blocks of this function. @@ -180,7 +177,7 @@ impl Function { } pub(crate) fn signature(&self) -> Signature { - let params = vecmap(self.parameters(), |param| self.dfg.type_of_value(*param)); + let params = vecmap(self.parameters(), |param| self.dfg.type_of_value(param)); let returns = vecmap(self.returns(), |ret| self.dfg.type_of_value(*ret)); Signature { params, returns } } diff --git a/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs b/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs index 9e4557e06a6..0aeda411dcf 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs @@ -5,10 +5,10 @@ use crate::ssa::ir::types::Type; use super::{ basic_block::BasicBlockId, call_stack::CallStackId, - dfg::InsertInstructionResult, function::Function, + instruction::insert_result::InsertInstructionResult, instruction::{Instruction, InstructionId}, - value::ValueId, + value::Value, }; use fxhash::FxHashMap as HashMap; @@ -18,7 +18,7 @@ use fxhash::FxHashMap as HashMap; pub(crate) struct FunctionInserter<'f> { pub(crate) function: &'f mut Function, - values: HashMap, + values: HashMap, /// Map containing repeat array constants so that we do not initialize a new /// array unnecessarily. An extra tuple field is included as part of the key to @@ -34,7 +34,7 @@ pub(crate) struct FunctionInserter<'f> { pre_loop: Option, } -pub(crate) type ArrayCache = HashMap, HashMap>; +pub(crate) type ArrayCache = HashMap, HashMap>; impl<'f> FunctionInserter<'f> { pub(crate) fn new(function: &'f mut Function) -> FunctionInserter<'f> { @@ -44,7 +44,7 @@ impl<'f> FunctionInserter<'f> { /// Resolves a ValueId to its new, updated value. /// If there is no updated value for this id, this returns the same /// ValueId that was passed in. - pub(crate) fn resolve(&mut self, mut value: ValueId) -> ValueId { + pub(crate) fn resolve(&mut self, mut value: Value) -> Value { value = self.function.dfg.resolve(value); match self.values.get(&value) { Some(value) => self.resolve(*value), @@ -53,21 +53,23 @@ impl<'f> FunctionInserter<'f> { } /// Insert a key, value pair if the key isn't already present in the map - pub(crate) fn try_map_value(&mut self, key: ValueId, value: ValueId) { + pub(crate) fn try_map_value(&mut self, key: Value, value: Value) { if key == value { // This case is technically not needed since try_map_value isn't meant to change // existing entries, but we should never have a value in the map referring to itself anyway. self.values.remove(&key); } else { + assert!(!key.is_constant()); self.values.entry(key).or_insert(value); } } /// Insert a key, value pair in the map - pub(crate) fn map_value(&mut self, key: ValueId, value: ValueId) { + pub(crate) fn map_value(&mut self, key: Value, value: Value) { if key == value { self.values.remove(&key); } else { + assert!(!key.is_constant()); self.values.insert(key, value); } } @@ -105,7 +107,7 @@ impl<'f> FunctionInserter<'f> { let (instruction, location) = self.map_instruction(id); match self.push_instruction_value(instruction, id, block, location) { - InsertInstructionResult::Results(new_id, _) => Some(new_id), + InsertInstructionResult::Results { id: new_id, .. } => Some(new_id), _ => None, } } @@ -118,11 +120,7 @@ impl<'f> FunctionInserter<'f> { call_stack: CallStackId, ) -> InsertInstructionResult { let results = self.function.dfg.instruction_results(id); - let results = vecmap(results, |id| self.function.dfg.resolve(*id)); - - let ctrl_typevars = instruction - .requires_ctrl_typevars() - .then(|| vecmap(&results, |result| self.function.dfg.type_of_value(*result))); + let results = vecmap(results, |id| self.function.dfg.resolve(id)); // Large arrays can lead to OOM panics if duplicated from being unrolled in loops. // To prevent this, try to reuse the same ID for identical arrays instead of inserting @@ -148,12 +146,8 @@ impl<'f> FunctionInserter<'f> { None }; - let new_results = self.function.dfg.insert_instruction_and_results( - instruction, - block, - ctrl_typevars, - call_stack, - ); + let new_results = + self.function.dfg.insert_instruction_and_results(instruction, block, call_stack); // Cache an array in the fresh_array_cache if array caching is enabled. // The fresh cache isn't used for deduplication until an external pass confirms we @@ -167,22 +161,22 @@ impl<'f> FunctionInserter<'f> { new_results } - fn get_cached_array(&self, elements: &im::Vector, typ: &Type) -> Option { + fn get_cached_array(&self, elements: &im::Vector, typ: &Type) -> Option { self.array_cache.as_ref()?.get(elements)?.get(typ).copied() } fn cache_array( arrays: &mut Option, - elements: im::Vector, + elements: im::Vector, typ: Type, - result_id: ValueId, + result_id: Value, ) { if let Some(arrays) = arrays { arrays.entry(elements).or_default().insert(typ, result_id); } } - fn array_is_constant(&self, elements: &im::Vector) -> bool { + fn array_is_constant(&self, elements: &im::Vector) -> bool { elements.iter().all(|element| self.function.dfg.is_constant(*element)) } @@ -206,35 +200,41 @@ impl<'f> FunctionInserter<'f> { /// Modify the values HashMap to remember the mapping between an instruction result's previous /// ValueId (from the source_function) and its new ValueId in the destination function. pub(crate) fn insert_new_instruction_results( - values: &mut HashMap, - old_results: &[ValueId], + values: &mut HashMap, + old_results: &[Value], new_results: &InsertInstructionResult, ) { - assert_eq!(old_results.len(), new_results.len()); + assert_eq!(old_results.len(), new_results.len() as usize); match new_results { InsertInstructionResult::SimplifiedTo(new_result) => { - values.insert(old_results[0], *new_result); + if !old_results[0].is_constant() { + values.insert(old_results[0], *new_result); + } } InsertInstructionResult::SimplifiedToMultiple(new_results) => { for (old_result, new_result) in old_results.iter().zip(new_results) { - values.insert(*old_result, *new_result); + if !old_result.is_constant() { + values.insert(*old_result, *new_result); + } } } - InsertInstructionResult::Results(_, new_results) => { - for (old_result, new_result) in old_results.iter().zip(*new_results) { - values.insert(*old_result, *new_result); + InsertInstructionResult::Results { id, result_count: _ } => { + for (i, old_result) in old_results.iter().enumerate() { + if !old_result.is_constant() { + values.insert(*old_result, Value::instruction_result(*id, i as u16)); + } } } InsertInstructionResult::InstructionRemoved => (), } } - pub(crate) fn remember_block_params(&mut self, block: BasicBlockId, new_values: &[ValueId]) { + pub(crate) fn remember_block_params(&mut self, block: BasicBlockId, new_values: &[Value]) { let old_parameters = self.function.dfg.block_parameters(block); - for (param, new_param) in old_parameters.iter().zip(new_values) { - self.values.entry(*param).or_insert(*new_param); + for (param, new_param) in old_parameters.zip(new_values) { + self.values.entry(param).or_insert(*new_param); } } @@ -246,9 +246,9 @@ impl<'f> FunctionInserter<'f> { let old_parameters = self.function.dfg.block_parameters(block); let new_parameters = self.function.dfg.block_parameters(new_block); - for (param, new_param) in old_parameters.iter().zip(new_parameters) { + for (param, new_param) in old_parameters.zip(new_parameters) { // Don't overwrite any existing entries to avoid overwriting the induction variable - self.values.entry(*param).or_insert(*new_param); + self.values.entry(param).or_insert(new_param); } } } diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs index fccd3b87d3a..a6da60fc4c1 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs @@ -1,5 +1,8 @@ use serde::{Deserialize, Serialize}; -use std::hash::{Hash, Hasher}; +use std::{ + hash::{Hash, Hasher}, + sync::Arc, +}; use acvm::{ acir::AcirField, @@ -19,13 +22,14 @@ use super::{ function::Function, map::Id, types::{NumericType, Type}, - value::{Value, ValueId}, + value::Value, }; mod binary; mod call; mod cast; mod constrain; +pub(crate) mod insert_result; pub(crate) use binary::{Binary, BinaryOp}; use call::simplify_call; @@ -40,13 +44,15 @@ use constrain::decompose_constrain; /// placement within a block. pub(crate) type InstructionId = Id; +pub(crate) type BitSize = u8; + /// These are similar to built-ins in other languages. /// These can be classified under two categories: /// - Opcodes which the IR knows the target machine has /// special support for. (LowLevel) /// - Opcodes which have no function definition in the /// source code and must be processed by the IR. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] pub(crate) enum Intrinsic { ArrayLen, ArrayAsStrUnchecked, @@ -221,14 +227,14 @@ impl Intrinsic { } /// The endian-ness of bits when encoding values as bits in e.g. ToBits or ToRadix -#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] pub(crate) enum Endian { Big, Little, } /// Compiler hints. -#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] pub(crate) enum Hint { /// Hint to the compiler to treat the call as having potential side effects, /// so that the value passed to it can survive SSA passes without being @@ -245,33 +251,33 @@ pub(crate) enum Instruction { Binary(Binary), /// Converts `Value` into the given NumericType - Cast(ValueId, NumericType), + Cast(Value, NumericType), /// Computes a bit wise not - Not(ValueId), + Not(Value), /// Truncates `value` to `bit_size` - Truncate { value: ValueId, bit_size: u32, max_bit_size: u32 }, + Truncate { value: Value, bit_size: BitSize, max_bit_size: BitSize }, /// Constrains two values to be equal to one another. - Constrain(ValueId, ValueId, Option), + Constrain(Value, Value, Option), /// Range constrain `value` to `max_bit_size` - RangeCheck { value: ValueId, max_bit_size: u32, assert_message: Option }, + RangeCheck { value: Value, max_bit_size: BitSize, assert_message: Option }, /// Performs a function call with a list of its arguments. - Call { func: ValueId, arguments: Vec }, + Call { func: Value, arguments: Vec, result_types: Vec }, /// Allocates a region of memory. Note that this is not concerned with /// the type of memory, the type of element is determined when loading this memory. /// This is used for representing mutable variables and references. - Allocate, + Allocate { element_type: Type }, /// Loads a value from memory. - Load { address: ValueId }, + Load { address: Value, result_type: Type }, /// Writes a value to memory. - Store { address: ValueId, value: ValueId }, + Store { address: Value, value: Value }, /// Provides a context for all instructions that follow up until the next /// `EnableSideEffectsIf` is encountered, for stating a condition that determines whether @@ -290,29 +296,29 @@ pub(crate) enum Instruction { /// This instruction is only emitted after the cfg flattening pass, and is used to annotate /// instruction regions with a condition that corresponds to their position in the CFG's /// if-branching structure. - EnableSideEffectsIf { condition: ValueId }, + EnableSideEffectsIf { condition: Value }, /// Retrieve a value from an array at the given index - ArrayGet { array: ValueId, index: ValueId }, + ArrayGet { array: Value, index: Value, result_type: Type }, /// Creates a new array with the new value at the given index. All other elements are identical /// to those in the given array. This will not modify the original array unless `mutable` is /// set. This flag is off by default and only enabled when optimizations determine it is safe. - ArraySet { array: ValueId, index: ValueId, value: ValueId, mutable: bool }, + ArraySet { array: Value, index: Value, value: Value, mutable: bool }, /// An instruction to increment the reference count of a value. /// /// This currently only has an effect in Brillig code where array sharing and copy on write is /// implemented via reference counting. In ACIR code this is done with im::Vector and these /// IncrementRc instructions are ignored. - IncrementRc { value: ValueId }, + IncrementRc { value: Value }, /// An instruction to decrement the reference count of a value. /// /// This currently only has an effect in Brillig code where array sharing and copy on write is /// implemented via reference counting. In ACIR code this is done with im::Vector and these /// DecrementRc instructions are ignored. - DecrementRc { value: ValueId }, + DecrementRc { value: Value }, /// Merge two values returned from opposite branches of a conditional into one. /// @@ -323,23 +329,18 @@ pub(crate) enum Instruction { /// else_value /// } /// ``` - IfElse { - then_condition: ValueId, - then_value: ValueId, - else_condition: ValueId, - else_value: ValueId, - }, + IfElse { then_condition: Value, then_value: Value, else_condition: Value, else_value: Value }, /// Creates a new array or slice. /// /// `typ` should be an array or slice type with an element type /// matching each of the `elements` values' types. - MakeArray { elements: im::Vector, typ: Type }, + MakeArray { elements: im::Vector, typ: Type }, } impl Instruction { /// Returns a binary instruction with the given operator, lhs, and rhs - pub(crate) fn binary(operator: BinaryOp, lhs: ValueId, rhs: ValueId) -> Instruction { + pub(crate) fn binary(operator: BinaryOp, lhs: Value, rhs: Value) -> Instruction { Instruction::Binary(Binary { lhs, operator, rhs }) } @@ -347,6 +348,17 @@ impl Instruction { pub(crate) fn result_type(&self) -> InstructionResultType { match self { Instruction::Binary(binary) => binary.result_type(), + Instruction::Load { result_type: typ, .. } + | Instruction::ArrayGet { result_type: typ, .. } => { + InstructionResultType::Known(typ.clone()) + } + Instruction::Call { result_types, .. } => { + InstructionResultType::Multiple(result_types.clone()) + } + Instruction::Allocate { element_type } => { + let typ = Type::Reference(Arc::new(element_type.clone())); + InstructionResultType::Known(typ) + } Instruction::Cast(_, typ) => InstructionResultType::Known(Type::Numeric(*typ)), Instruction::MakeArray { typ, .. } => InstructionResultType::Known(typ.clone()), Instruction::Not(value) @@ -361,19 +373,9 @@ impl Instruction { | Instruction::DecrementRc { .. } | Instruction::RangeCheck { .. } | Instruction::EnableSideEffectsIf { .. } => InstructionResultType::None, - Instruction::Allocate { .. } - | Instruction::Load { .. } - | Instruction::ArrayGet { .. } - | Instruction::Call { .. } => InstructionResultType::Unknown, } } - /// True if this instruction requires specifying the control type variables when - /// inserting this instruction into a DataFlowGraph. - pub(crate) fn requires_ctrl_typevars(&self) -> bool { - matches!(self.result_type(), InstructionResultType::Unknown) - } - /// Indicates if the instruction has a side effect, ie. it can fail, or it interacts with memory. /// /// This is similar to `can_be_deduplicated`, but it doesn't depend on whether the caller takes @@ -384,13 +386,13 @@ impl Instruction { match self { // These either have side-effects or interact with memory EnableSideEffectsIf { .. } - | Allocate + | Allocate { .. } | Load { .. } | Store { .. } | IncrementRc { .. } | DecrementRc { .. } => true, - Call { func, .. } => match dfg[*func] { + Call { func, .. } => match *func { Value::Intrinsic(intrinsic) => intrinsic.has_side_effects(), _ => true, // Be conservative and assume other functions can have side effects. }, @@ -444,13 +446,13 @@ impl Instruction { match self { // These either have side-effects or interact with memory EnableSideEffectsIf { .. } - | Allocate + | Allocate { .. } | Load { .. } | Store { .. } | IncrementRc { .. } | DecrementRc { .. } => false, - Call { func, .. } => match function.dfg[*func] { + Call { func, .. } => match *func { Value::Intrinsic(intrinsic) => { intrinsic.can_be_deduplicated(deduplicate_with_predicate) } @@ -499,7 +501,7 @@ impl Instruction { Cast(_, _) | Not(_) | Truncate { .. } - | Allocate + | Allocate { .. } | Load { .. } | ArrayGet { .. } | IfElse { .. } @@ -525,7 +527,7 @@ impl Instruction { | RangeCheck { .. } => false, // Some `Intrinsic`s have side effects so we must check what kind of `Call` this is. - Call { func, .. } => match function.dfg[*func] { + Call { func, .. } => match *func { // Explicitly allows removal of unused ec operations, even if they can fail Value::Intrinsic(Intrinsic::BlackBox(BlackBoxFunc::MultiScalarMul)) | Value::Intrinsic(Intrinsic::BlackBox(BlackBoxFunc::EmbeddedCurveAdd)) => true, @@ -569,14 +571,14 @@ impl Instruction { } } - Instruction::ArrayGet { array, index } => { + Instruction::ArrayGet { array, index, result_type: _ } => { // `ArrayGet`s which read from "known good" indices from an array should not need a predicate. !dfg.is_safe_index(*index, *array) } Instruction::EnableSideEffectsIf { .. } | Instruction::ArraySet { .. } => true, - Instruction::Call { func, .. } => match dfg[*func] { + Instruction::Call { func, .. } => match *func { Value::Function(_) => true, Value::Intrinsic(intrinsic) => { matches!(intrinsic, Intrinsic::SliceInsert | Intrinsic::SliceRemove) @@ -588,7 +590,7 @@ impl Instruction { | Instruction::Truncate { .. } | Instruction::Constrain(_, _, _) | Instruction::RangeCheck { .. } - | Instruction::Allocate + | Instruction::Allocate { .. } | Instruction::Load { .. } | Instruction::Store { .. } | Instruction::IfElse { .. } @@ -598,10 +600,10 @@ impl Instruction { } } - /// Maps each ValueId inside this instruction to a new ValueId, returning the new instruction. + /// Maps each Value inside this instruction to a new Value, returning the new instruction. /// Note that the returned instruction is fresh and will not have an assigned InstructionId /// until it is manually inserted in a DataFlowGraph later. - pub(crate) fn map_values(&self, mut f: impl FnMut(ValueId) -> ValueId) -> Instruction { + pub(crate) fn map_values(&self, mut f: impl FnMut(Value) -> Value) -> Instruction { match self { Instruction::Binary(binary) => Instruction::Binary(Binary { lhs: f(binary.lhs), @@ -631,21 +633,28 @@ impl Instruction { }); Instruction::Constrain(lhs, rhs, assert_message) } - Instruction::Call { func, arguments } => Instruction::Call { + Instruction::Call { func, arguments, result_types } => Instruction::Call { func: f(*func), arguments: vecmap(arguments.iter().copied(), f), + result_types: result_types.clone(), }, - Instruction::Allocate => Instruction::Allocate, - Instruction::Load { address } => Instruction::Load { address: f(*address) }, + Instruction::Allocate { element_type } => { + Instruction::Allocate { element_type: element_type.clone() } + } + Instruction::Load { address, result_type } => { + Instruction::Load { address: f(*address), result_type: result_type.clone() } + } Instruction::Store { address, value } => { Instruction::Store { address: f(*address), value: f(*value) } } Instruction::EnableSideEffectsIf { condition } => { Instruction::EnableSideEffectsIf { condition: f(*condition) } } - Instruction::ArrayGet { array, index } => { - Instruction::ArrayGet { array: f(*array), index: f(*index) } - } + Instruction::ArrayGet { array, index, result_type } => Instruction::ArrayGet { + array: f(*array), + index: f(*index), + result_type: result_type.clone(), + }, Instruction::ArraySet { array, index, value, mutable } => Instruction::ArraySet { array: f(*array), index: f(*index), @@ -676,8 +685,8 @@ impl Instruction { } } - /// Maps each ValueId inside this instruction to a new ValueId in place. - pub(crate) fn map_values_mut(&mut self, mut f: impl FnMut(ValueId) -> ValueId) { + /// Maps each Value inside this instruction to a new Value in place. + pub(crate) fn map_values_mut(&mut self, mut f: impl FnMut(Value) -> Value) { match self { Instruction::Binary(binary) => { binary.lhs = f(binary.lhs); @@ -697,14 +706,14 @@ impl Instruction { } } } - Instruction::Call { func, arguments } => { + Instruction::Call { func, arguments, result_types: _ } => { *func = f(*func); for argument in arguments { *argument = f(*argument); } } - Instruction::Allocate => (), - Instruction::Load { address } => *address = f(*address), + Instruction::Allocate { element_type: _ } => (), + Instruction::Load { address, result_type: _ } => *address = f(*address), Instruction::Store { address, value } => { *address = f(*address); *value = f(*value); @@ -712,7 +721,7 @@ impl Instruction { Instruction::EnableSideEffectsIf { condition } => { *condition = f(*condition); } - Instruction::ArrayGet { array, index } => { + Instruction::ArrayGet { array, index, result_type: _ } => { *array = f(*array); *index = f(*index); } @@ -741,13 +750,13 @@ impl Instruction { } /// Applies a function to each input value this instruction holds. - pub(crate) fn for_each_value(&self, mut f: impl FnMut(ValueId) -> T) { + pub(crate) fn for_each_value(&self, mut f: impl FnMut(Value) -> T) { match self { Instruction::Binary(binary) => { f(binary.lhs); f(binary.rhs); } - Instruction::Call { func, arguments } => { + Instruction::Call { func, arguments, result_types: _ } => { f(*func); for argument in arguments { f(*argument); @@ -756,7 +765,7 @@ impl Instruction { Instruction::Cast(value, _) | Instruction::Not(value) | Instruction::Truncate { value, .. } - | Instruction::Load { address: value } => { + | Instruction::Load { address: value, result_type: _ } => { f(*value); } Instruction::Constrain(lhs, rhs, assert_error) => { @@ -774,7 +783,7 @@ impl Instruction { f(*value); } Instruction::Allocate { .. } => (), - Instruction::ArrayGet { array, index } => { + Instruction::ArrayGet { array, index, result_type: _ } => { f(*array); f(*index); } @@ -805,6 +814,54 @@ impl Instruction { } } + /// Iterate over the types in this instruction, if any. + /// Note that this skips over any `NumericType`s. + pub(crate) fn for_each_type(&self, mut f: impl FnMut(&Type)) { + match self { + Instruction::Binary(_) + | Instruction::Cast(_, _) + | Instruction::Not(_) + | Instruction::Truncate { .. } + | Instruction::Constrain(..) + | Instruction::RangeCheck { .. } + | Instruction::ArraySet { .. } + | Instruction::IncrementRc { .. } + | Instruction::DecrementRc { .. } + | Instruction::IfElse { .. } + | Instruction::Store { .. } + | Instruction::EnableSideEffectsIf { .. } => (), + Instruction::Allocate { element_type } => f(element_type), + Instruction::Load { result_type, .. } => f(result_type), + Instruction::ArrayGet { result_type, .. } => f(result_type), + Instruction::MakeArray { typ, .. } => f(typ), + Instruction::Call { result_types, .. } => result_types.iter().for_each(f), + } + } + + /// Mutate the types on this instruction. + /// Note that `NumericType`s are skipped over. + pub(crate) fn map_types_mut(&mut self, mut f: impl FnMut(&mut Type)) { + match self { + Instruction::Binary(_) + | Instruction::Cast(_, _) + | Instruction::Not(_) + | Instruction::Truncate { .. } + | Instruction::Constrain(..) + | Instruction::RangeCheck { .. } + | Instruction::ArraySet { .. } + | Instruction::IncrementRc { .. } + | Instruction::DecrementRc { .. } + | Instruction::IfElse { .. } + | Instruction::Store { .. } + | Instruction::EnableSideEffectsIf { .. } => (), + Instruction::Allocate { element_type } => f(element_type), + Instruction::Load { result_type, .. } => f(result_type), + Instruction::ArrayGet { result_type, .. } => f(result_type), + Instruction::MakeArray { typ, .. } => f(typ), + Instruction::Call { result_types, .. } => result_types.iter_mut().for_each(f), + } + } + /// Try to simplify this instruction. If the instruction can be simplified to a known value, /// that value is returned. Otherwise None is returned. /// @@ -814,7 +871,6 @@ impl Instruction { &self, dfg: &mut DataFlowGraph, block: BasicBlockId, - ctrl_typevars: Option>, call_stack: CallStackId, ) -> SimplifyResult { use SimplifyResult::*; @@ -822,18 +878,19 @@ impl Instruction { Instruction::Binary(binary) => binary.simplify(dfg), Instruction::Cast(value, typ) => simplify_cast(*value, *typ, dfg), Instruction::Not(value) => { - match &dfg[dfg.resolve(*value)] { + match dfg.resolve(*value) { // Limit optimizing ! on constants to only booleans. If we tried it on fields, // there is no Not on FieldElement, so we'd need to convert between u128. This // would be incorrect however since the extra bits on the field would not be flipped. Value::NumericConstant { constant, typ } if typ.is_unsigned() => { // As we're casting to a `u128`, we need to clear out any upper bits that the NOT fills. + let constant = &dfg[constant]; let value = !constant.to_u128() % (1 << typ.bit_size()); - SimplifiedTo(dfg.make_constant(value.into(), *typ)) + SimplifiedTo(dfg.constant(value.into(), typ)) } Value::Instruction { instruction, .. } => { // !!v => v - if let Instruction::Not(value) = &dfg[*instruction] { + if let Instruction::Not(value) = &dfg[instruction] { SimplifiedTo(*value) } else { None @@ -850,7 +907,7 @@ impl Instruction { SimplifiedToInstructionMultiple(constraints) } } - Instruction::ArrayGet { array, index } => { + Instruction::ArrayGet { array, index, result_type: _ } => { if let Some(index) = dfg.get_numeric_constant(*index) { try_optimize_array_get_from_previous_set(dfg, *array, index) } else { @@ -868,12 +925,8 @@ impl Instruction { let elements = array.update(index, *value); let typ = dfg.type_of_value(*array_id); let instruction = Instruction::MakeArray { elements, typ }; - let new_array = dfg.insert_instruction_and_results( - instruction, - block, - Option::None, - call_stack, - ); + let new_array = + dfg.insert_instruction_and_results(instruction, block, call_stack); return SimplifiedTo(new_array.first()); } } @@ -885,11 +938,11 @@ impl Instruction { return SimplifiedTo(*value); } if let Some((numeric_constant, typ)) = dfg.get_numeric_constant_with_type(*value) { - let integer_modulus = 2_u128.pow(*bit_size); + let integer_modulus = 2_u128.pow(*bit_size as u32); let truncated = numeric_constant.to_u128() % integer_modulus; - SimplifiedTo(dfg.make_constant(truncated.into(), typ)) - } else if let Value::Instruction { instruction, .. } = &dfg[dfg.resolve(*value)] { - match &dfg[*instruction] { + SimplifiedTo(dfg.constant(truncated.into(), typ)) + } else if let Value::Instruction { instruction, .. } = dfg.resolve(*value) { + match &dfg[instruction] { Instruction::Truncate { bit_size: src_bit_size, .. } => { // If we're truncating the value to fit into the same or larger bit size then this is a noop. if src_bit_size <= bit_size && src_bit_size <= max_bit_size { @@ -911,7 +964,8 @@ impl Instruction { let divisor = dfg .get_numeric_constant(*rhs) .expect("rhs is checked to be constant."); - let divisor_bits = divisor.num_bits(); + + let divisor_bits: BitSize = divisor.num_bits().try_into().unwrap(); // 2^{max_quotient_bits} = 2^{max_numerator_bits} / 2^{divisor_bits} // => max_quotient_bits = max_numerator_bits - divisor_bits @@ -931,8 +985,8 @@ impl Instruction { None } } - Instruction::Call { func, arguments } => { - simplify_call(*func, arguments, dfg, block, ctrl_typevars, call_stack) + Instruction::Call { func, arguments, result_types } => { + simplify_call(*func, arguments, result_types, dfg, block, call_stack) } Instruction::EnableSideEffectsIf { condition } => { if let Some(last) = dfg[block].instructions().last().copied() { @@ -994,6 +1048,29 @@ impl Instruction { Instruction::MakeArray { .. } => None, } } + + /// Returns the number of results this instruction produces + pub(crate) fn result_count(&self) -> u32 { + match self { + Instruction::Constrain(..) + | Instruction::RangeCheck { .. } + | Instruction::Store { .. } + | Instruction::EnableSideEffectsIf { .. } + | Instruction::IncrementRc { .. } + | Instruction::DecrementRc { .. } => 0, + Instruction::Binary(_) + | Instruction::Cast(_, _) + | Instruction::Not(_) + | Instruction::Truncate { .. } + | Instruction::Allocate { .. } + | Instruction::Load { .. } + | Instruction::ArrayGet { .. } + | Instruction::ArraySet { .. } + | Instruction::IfElse { .. } + | Instruction::MakeArray { .. } => 1, + Instruction::Call { result_types, .. } => result_types.len() as u32, + } + } } /// Given a chain of operations like: @@ -1013,7 +1090,7 @@ impl Instruction { /// - If the array value is from a previous array-set, we recur. fn try_optimize_array_get_from_previous_set( dfg: &DataFlowGraph, - mut array_id: Id, + mut the_array: Value, target_index: FieldElement, ) -> SimplifyResult { let mut elements = None; @@ -1021,16 +1098,16 @@ fn try_optimize_array_get_from_previous_set( // Arbitrary number of maximum tries just to prevent this optimization from taking too long. let max_tries = 5; for _ in 0..max_tries { - match &dfg[array_id] { + match the_array { Value::Instruction { instruction, .. } => { - match &dfg[*instruction] { + match &dfg[instruction] { Instruction::ArraySet { array, index, value, .. } => { if let Some(constant) = dfg.get_numeric_constant(*index) { if constant == target_index { return SimplifyResult::SimplifiedTo(*value); } - array_id = *array; // recur + the_array = *array; // recur } else { return SimplifyResult::None; } @@ -1084,13 +1161,13 @@ fn try_optimize_array_get_from_previous_set( /// - If they are not equal, recur marking the current `array_set` array as the new array id to use in the checks fn try_optimize_array_set_from_previous_get( dfg: &DataFlowGraph, - mut array_id: ValueId, - target_index: ValueId, - target_value: ValueId, + mut array_id: Value, + target_index: Value, + target_value: Value, ) -> SimplifyResult { - let array_from_get = match &dfg[target_value] { - Value::Instruction { instruction, .. } => match &dfg[*instruction] { - Instruction::ArrayGet { array, index } => { + let array_from_get = match target_value { + Value::Instruction { instruction, .. } => match &dfg[instruction] { + Instruction::ArrayGet { array, index, result_type: _ } => { if *array == array_id && *index == target_index { // If array and index match from the value, we can immediately simplify return SimplifyResult::SimplifiedTo(array_id); @@ -1121,8 +1198,8 @@ fn try_optimize_array_set_from_previous_get( // Arbitrary number of maximum tries just to prevent this optimization from taking too long. let max_tries = 5; for _ in 0..max_tries { - match &dfg[array_id] { - Value::Instruction { instruction, .. } => match &dfg[*instruction] { + match array_id { + Value::Instruction { instruction, .. } => match &dfg[instruction] { Instruction::ArraySet { array, index, .. } => { let Some(index) = dfg.get_numeric_constant(*index) else { return SimplifyResult::None; @@ -1168,7 +1245,7 @@ pub(crate) enum ConstrainError { StaticString(String), // These errors are handled by the program as data. // We use a boolean to indicate if the error is a string for printing purposes. - Dynamic(ErrorSelector, /* is_string */ bool, Vec), + Dynamic(ErrorSelector, /* is_string */ bool, Vec), } impl From for ConstrainError { @@ -1186,14 +1263,13 @@ impl From for Box { /// The possible return values for Instruction::return_types pub(crate) enum InstructionResultType { /// The result type of this instruction matches that of this operand - Operand(ValueId), + Operand(Value), /// The result type of this instruction is known to be this type - independent of its operands. Known(Type), - /// The result type of this function is unknown and separate from its operand types. - /// This occurs for function calls and load operations. - Unknown, + /// Function calls are a special case, they may return multiple values + Multiple(Vec), /// This instruction does not return any results. None, @@ -1214,7 +1290,7 @@ pub(crate) enum TerminatorInstruction { /// If the condition is true: jump to the specified `then_destination`. /// Otherwise, jump to the specified `else_destination`. JmpIf { - condition: ValueId, + condition: Value, then_destination: BasicBlockId, else_destination: BasicBlockId, call_stack: CallStackId, @@ -1225,7 +1301,7 @@ pub(crate) enum TerminatorInstruction { /// Jumps to specified `destination` with `arguments`. /// The CallStack here is expected to be used to issue an error when the start range of /// a for loop cannot be deduced at compile-time. - Jmp { destination: BasicBlockId, arguments: Vec, call_stack: CallStackId }, + Jmp { destination: BasicBlockId, arguments: Vec, call_stack: CallStackId }, /// Return from the current function with the given return values. /// @@ -1234,15 +1310,12 @@ pub(crate) enum TerminatorInstruction { /// unconditionally jump to a single exit block with the return values /// as the block arguments. Then the exit block can terminate in a return /// instruction returning these values. - Return { return_values: Vec, call_stack: CallStackId }, + Return { return_values: Vec, call_stack: CallStackId }, } impl TerminatorInstruction { - /// Map each ValueId in this terminator to a new value. - pub(crate) fn map_values( - &self, - mut f: impl FnMut(ValueId) -> ValueId, - ) -> TerminatorInstruction { + /// Map each Value in this terminator to a new value. + pub(crate) fn map_values(&self, mut f: impl FnMut(Value) -> Value) -> TerminatorInstruction { use TerminatorInstruction::*; match self { JmpIf { condition, then_destination, else_destination, call_stack } => JmpIf { @@ -1263,8 +1336,8 @@ impl TerminatorInstruction { } } - /// Mutate each ValueId to a new ValueId using the given mapping function - pub(crate) fn map_values_mut(&mut self, mut f: impl FnMut(ValueId) -> ValueId) { + /// Mutate each Value to a new Value using the given mapping function + pub(crate) fn map_values_mut(&mut self, mut f: impl FnMut(Value) -> Value) { use TerminatorInstruction::*; match self { JmpIf { condition, .. } => { @@ -1284,7 +1357,7 @@ impl TerminatorInstruction { } /// Apply a function to each value - pub(crate) fn for_each_value(&self, mut f: impl FnMut(ValueId) -> T) { + pub(crate) fn for_each_value(&self, mut f: impl FnMut(Value) -> T) { use TerminatorInstruction::*; match self { JmpIf { condition, .. } => { @@ -1339,12 +1412,12 @@ impl TerminatorInstruction { /// should be simplified. pub(crate) enum SimplifyResult { /// Replace this function's result with the given value - SimplifiedTo(ValueId), + SimplifiedTo(Value), /// Replace this function's results with the given values /// Used for when there are multiple return values from /// a function such as a tuple - SimplifiedToMultiple(Vec), + SimplifiedToMultiple(Vec), /// Replace this function with an simpler but equivalent instruction. SimplifiedToInstruction(Instruction), @@ -1361,10 +1434,14 @@ pub(crate) enum SimplifyResult { } impl SimplifyResult { - pub(crate) fn instructions(self) -> Option> { + pub(crate) fn instruction(self) -> Option { match self { - SimplifyResult::SimplifiedToInstruction(instruction) => Some(vec![instruction]), - SimplifyResult::SimplifiedToInstructionMultiple(instructions) => Some(instructions), + SimplifyResult::SimplifiedToInstruction(instruction) => Some(instruction), + SimplifyResult::SimplifiedToInstructionMultiple(mut instructions) + if instructions.len() == 1 => + { + Some(instructions.pop().unwrap()) + } _ => None, } } diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction/binary.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction/binary.rs index 81f2f3b1e01..16f8cab9afe 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction/binary.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction/binary.rs @@ -2,7 +2,7 @@ use acvm::{acir::AcirField, FieldElement}; use serde::{Deserialize, Serialize}; use super::{ - DataFlowGraph, Instruction, InstructionResultType, NumericType, SimplifyResult, Type, ValueId, + DataFlowGraph, Instruction, InstructionResultType, NumericType, SimplifyResult, Type, Value, }; /// Binary Operations allowed in the IR. @@ -68,9 +68,9 @@ impl std::fmt::Display for BinaryOp { #[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)] pub(crate) struct Binary { /// Left hand side of the binary operation - pub(crate) lhs: ValueId, + pub(crate) lhs: Value, /// Right hand side of the binary operation - pub(crate) rhs: ValueId, + pub(crate) rhs: Value, /// The binary operation to apply pub(crate) operator: BinaryOp, } @@ -93,7 +93,7 @@ impl Binary { if let (Some(lhs), Some(rhs)) = (lhs, rhs) { return match eval_constant_binary_op(lhs, rhs, self.operator, operand_type) { Some((result, result_type)) => { - let value = dfg.make_constant(result, result_type); + let value = dfg.constant(result, result_type); SimplifyResult::SimplifiedTo(value) } None => SimplifyResult::None, @@ -128,7 +128,7 @@ impl Binary { return SimplifyResult::SimplifiedTo(self.lhs); } if lhs_is_zero || rhs_is_zero { - let zero = dfg.make_constant(FieldElement::zero(), operand_type); + let zero = dfg.constant(FieldElement::zero(), operand_type); return SimplifyResult::SimplifiedTo(zero); } if dfg.resolve(self.lhs) == dfg.resolve(self.rhs) @@ -145,7 +145,7 @@ impl Binary { } BinaryOp::Mod => { if rhs_is_one { - let zero = dfg.make_constant(FieldElement::zero(), operand_type); + let zero = dfg.constant(FieldElement::zero(), operand_type); return SimplifyResult::SimplifiedTo(zero); } if operand_type.is_unsigned() { @@ -154,11 +154,10 @@ impl Binary { if let Some(modulus) = rhs { let modulus = modulus.to_u128(); if modulus.is_power_of_two() { - let bit_size = modulus.ilog2(); return SimplifyResult::SimplifiedToInstruction( Instruction::Truncate { value: self.lhs, - bit_size, + bit_size: modulus.ilog2().try_into().unwrap(), max_bit_size: operand_type.bit_size(), }, ); @@ -168,8 +167,7 @@ impl Binary { } BinaryOp::Eq => { if dfg.resolve(self.lhs) == dfg.resolve(self.rhs) { - let one = dfg.make_constant(FieldElement::one(), NumericType::bool()); - return SimplifyResult::SimplifiedTo(one); + return SimplifyResult::SimplifiedTo(dfg.bool_constant(true)); } if operand_type == NumericType::bool() { @@ -191,27 +189,24 @@ impl Binary { } BinaryOp::Lt => { if dfg.resolve(self.lhs) == dfg.resolve(self.rhs) { - let zero = dfg.make_constant(FieldElement::zero(), NumericType::bool()); - return SimplifyResult::SimplifiedTo(zero); + return SimplifyResult::SimplifiedTo(dfg.bool_constant(false)); } if operand_type.is_unsigned() { if rhs_is_zero { // Unsigned values cannot be less than zero. - let zero = dfg.make_constant(FieldElement::zero(), NumericType::bool()); - return SimplifyResult::SimplifiedTo(zero); + return SimplifyResult::SimplifiedTo(dfg.bool_constant(false)); } else if rhs_is_one { - let zero = dfg.make_constant(FieldElement::zero(), operand_type); return SimplifyResult::SimplifiedToInstruction(Instruction::binary( BinaryOp::Eq, self.lhs, - zero, + dfg.constant(FieldElement::zero(), operand_type), )); } } } BinaryOp::And => { if lhs_is_zero || rhs_is_zero { - let zero = dfg.make_constant(FieldElement::zero(), operand_type); + let zero = dfg.constant(FieldElement::zero(), operand_type); return SimplifyResult::SimplifiedTo(zero); } if dfg.resolve(self.lhs) == dfg.resolve(self.rhs) { @@ -234,7 +229,7 @@ impl Binary { let bitmask_plus_one = bitmask.to_u128() + 1; if bitmask_plus_one.is_power_of_two() { let value = if lhs.is_some() { self.rhs } else { self.lhs }; - let num_bits = bitmask_plus_one.ilog2(); + let num_bits = bitmask_plus_one.ilog2() as u8; return SimplifyResult::SimplifiedToInstruction( Instruction::Truncate { value, @@ -257,7 +252,7 @@ impl Binary { return SimplifyResult::SimplifiedTo(self.lhs); } if operand_type == NumericType::bool() && (lhs_is_one || rhs_is_one) { - let one = dfg.make_constant(FieldElement::one(), operand_type); + let one = dfg.constant(FieldElement::one(), operand_type); return SimplifyResult::SimplifiedTo(one); } if dfg.resolve(self.lhs) == dfg.resolve(self.rhs) { @@ -272,7 +267,7 @@ impl Binary { return SimplifyResult::SimplifiedTo(self.lhs); } if dfg.resolve(self.lhs) == dfg.resolve(self.rhs) { - let zero = dfg.make_constant(FieldElement::zero(), operand_type); + let zero = dfg.constant(FieldElement::zero(), operand_type); return SimplifyResult::SimplifiedTo(zero); } } @@ -282,7 +277,7 @@ impl Binary { if let Some(rhs_const) = rhs { if rhs_const >= FieldElement::from(operand_type.bit_size() as u128) { // Shifting by the full width of the operand type, any `lhs` goes to zero. - let zero = dfg.make_constant(FieldElement::zero(), operand_type); + let zero = dfg.constant(FieldElement::zero(), operand_type); return SimplifyResult::SimplifiedTo(zero); } return SimplifyResult::None; @@ -365,7 +360,7 @@ fn eval_constant_binary_op( /// Values in the range `[0, 2^(bit_size-1))` are interpreted as positive integers /// /// Values in the range `[2^(bit_size-1), 2^bit_size)` are interpreted as negative integers. -fn try_convert_field_element_to_signed_integer(field: FieldElement, bit_size: u32) -> Option { +fn try_convert_field_element_to_signed_integer(field: FieldElement, bit_size: u8) -> Option { let unsigned_int = truncate(field.try_into_u128()?, bit_size); let max_positive_value = 1 << (bit_size - 1); @@ -381,7 +376,7 @@ fn try_convert_field_element_to_signed_integer(field: FieldElement, bit_size: u3 Some(signed_int) } -fn convert_signed_integer_to_field_element(int: i128, bit_size: u32) -> FieldElement { +fn convert_signed_integer_to_field_element(int: i128, bit_size: u8) -> FieldElement { if int >= 0 { FieldElement::from(int) } else { @@ -391,7 +386,7 @@ fn convert_signed_integer_to_field_element(int: i128, bit_size: u32) -> FieldEle } } -fn truncate(int: u128, bit_size: u32) -> u128 { +fn truncate(int: u128, bit_size: u8) -> u128 { let max = 1 << bit_size; int % max } @@ -460,7 +455,7 @@ mod test { proptest! { #[test] - fn signed_int_roundtrip(int: i128, bit_size in 1u32..=64) { + fn signed_int_roundtrip(int: i128, bit_size in 1u8..=64) { let int = int % (1i128 << (bit_size - 1)); let int_as_field = convert_signed_integer_to_field_element(int, bit_size); diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs index 6da4c7702c8..3b8eab3fb0f 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs @@ -15,9 +15,8 @@ use crate::ssa::{ call_stack::CallStackId, dfg::DataFlowGraph, instruction::Intrinsic, - map::Id, types::{NumericType, Type}, - value::{Value, ValueId}, + value::Value, }, opt::flatten_cfg::value_merger::ValueMerger, }; @@ -34,19 +33,19 @@ mod blackbox; /// to the slice length, which requires inserting a binary instruction. This update instruction /// must be inserted into the same block that the call itself is being simplified into. pub(super) fn simplify_call( - func: ValueId, - arguments: &[ValueId], + func: Value, + arguments: &[Value], + return_types: &[Type], dfg: &mut DataFlowGraph, block: BasicBlockId, - ctrl_typevars: Option>, call_stack: CallStackId, ) -> SimplifyResult { - let intrinsic = match &dfg[func] { - Value::Intrinsic(intrinsic) => *intrinsic, + let intrinsic = match func { + Value::Intrinsic(intrinsic) => intrinsic, _ => return SimplifyResult::None, }; - let return_type = ctrl_typevars.and_then(|return_types| return_types.first().cloned()); + let return_type = return_types.get(0); let constant_args: Option> = arguments.iter().map(|value_id| dfg.get_numeric_constant(*value_id)).collect(); @@ -54,10 +53,10 @@ pub(super) fn simplify_call( let simplified_result = match intrinsic { Intrinsic::ToBits(endian) => { // TODO: simplify to a range constraint if `limb_count == 1` - if let (Some(constant_args), Some(return_type)) = (constant_args, return_type.clone()) { + if let (Some(constant_args), Some(return_type)) = (constant_args, return_type) { let field = constant_args[0]; let limb_count = if let Type::Array(_, array_len) = return_type { - array_len + *array_len } else { unreachable!("ICE: Intrinsic::ToRadix return type must be array") }; @@ -76,11 +75,11 @@ pub(super) fn simplify_call( } Intrinsic::ToRadix(endian) => { // TODO: simplify to a range constraint if `limb_count == 1` - if let (Some(constant_args), Some(return_type)) = (constant_args, return_type.clone()) { + if let (Some(constant_args), Some(return_type)) = (constant_args, return_type) { let field = constant_args[0]; let radix = constant_args[1].to_u128() as u32; let limb_count = if let Type::Array(_, array_len) = return_type { - array_len + *array_len } else { unreachable!("ICE: Intrinsic::ToRadix return type must be array") }; @@ -100,7 +99,7 @@ pub(super) fn simplify_call( Intrinsic::ArrayLen => { if let Some(length) = dfg.try_get_array_length(arguments[0]) { let length = FieldElement::from(length as u128); - SimplifyResult::SimplifiedTo(dfg.make_constant(length, NumericType::length_type())) + SimplifyResult::SimplifiedTo(dfg.length_constant(length)) } else if matches!(dfg.type_of_value(arguments[1]), Type::Slice(_)) { SimplifyResult::SimplifiedTo(arguments[0]) } else { @@ -122,8 +121,7 @@ pub(super) fn simplify_call( "expected array length to be multiple of its elements size" ); let slice_length_value = array.len() / elements_size; - let slice_length = - dfg.make_constant(slice_length_value.into(), NumericType::length_type()); + let slice_length = dfg.length_constant(slice_length_value.into()); let new_slice = make_array(dfg, array, Type::Slice(inner_element_types), block, call_stack); SimplifyResult::SimplifiedToMultiple(vec![slice_length, new_slice]) @@ -311,7 +309,7 @@ pub(super) fn simplify_call( let value = arguments[0]; let max_bit_size = dfg.get_numeric_constant(arguments[1]); if let Some(max_bit_size) = max_bit_size { - let max_bit_size = max_bit_size.to_u128() as u32; + let max_bit_size = max_bit_size.to_u128().try_into().unwrap(); let max_potential_bits = dfg.get_value_max_num_bits(value); if max_potential_bits < max_bit_size { SimplifyResult::Remove @@ -328,13 +326,13 @@ pub(super) fn simplify_call( } Intrinsic::Hint(Hint::BlackBox) => SimplifyResult::None, Intrinsic::BlackBox(bb_func) => { - simplify_black_box_func(bb_func, arguments, dfg, block, call_stack) + simplify_black_box_func(bb_func, arguments, return_types, dfg, block, call_stack) } Intrinsic::AsWitness => SimplifyResult::None, Intrinsic::IsUnconstrained => SimplifyResult::None, Intrinsic::DerivePedersenGenerators => { - if let Some(Type::Array(_, len)) = return_type.clone() { - simplify_derive_generators(dfg, arguments, len, block, call_stack) + if let Some(Type::Array(_, len)) = return_type { + simplify_derive_generators(dfg, arguments, *len, block, call_stack) } else { unreachable!("Derive Pedersen Generators must return an array"); } @@ -343,7 +341,7 @@ pub(super) fn simplify_call( if let Some(constants) = constant_args { let lhs = constants[0]; let rhs = constants[1]; - let result = dfg.make_constant((lhs < rhs).into(), NumericType::bool()); + let result = dfg.bool_constant(lhs < rhs); SimplifyResult::SimplifiedTo(result) } else { SimplifyResult::None @@ -358,7 +356,7 @@ pub(super) fn simplify_call( { assert_eq!( dfg.type_of_value(*result), - expected_types, + *expected_types, "Simplification should not alter return type" ); } @@ -374,35 +372,34 @@ pub(super) fn simplify_call( /// This is because the slice length holds the user length (length as displayed by a `.len()` call), /// and not a flattened length used internally to represent arrays of tuples. fn update_slice_length( - slice_len: ValueId, + slice_len: Value, dfg: &mut DataFlowGraph, operator: BinaryOp, block: BasicBlockId, -) -> ValueId { - let one = dfg.make_constant(FieldElement::one(), NumericType::length_type()); +) -> Value { + let one = dfg.length_constant(FieldElement::one()); let instruction = Instruction::Binary(Binary { lhs: slice_len, operator, rhs: one }); let call_stack = dfg.get_value_call_stack_id(slice_len); - dfg.insert_instruction_and_results(instruction, block, None, call_stack).first() + dfg.insert_instruction_and_results(instruction, block, call_stack).first() } fn simplify_slice_push_back( - mut slice: im::Vector, + mut slice: im::Vector, element_type: Type, - arguments: &[ValueId], + arguments: &[Value], dfg: &mut DataFlowGraph, block: BasicBlockId, call_stack: CallStackId, ) -> SimplifyResult { // The capacity must be an integer so that we can compare it against the slice length - let capacity = dfg.make_constant((slice.len() as u128).into(), NumericType::length_type()); + let capacity = dfg.length_constant((slice.len() as u128).into()); let len_equals_capacity_instr = Instruction::Binary(Binary { lhs: arguments[0], operator: BinaryOp::Eq, rhs: capacity }); - let len_equals_capacity = dfg - .insert_instruction_and_results(len_equals_capacity_instr, block, None, call_stack) - .first(); + let len_equals_capacity = + dfg.insert_instruction_and_results(len_equals_capacity_instr, block, call_stack).first(); let len_not_equals_capacity_instr = Instruction::Not(len_equals_capacity); let len_not_equals_capacity = dfg - .insert_instruction_and_results(len_not_equals_capacity_instr, block, None, call_stack) + .insert_instruction_and_results(len_not_equals_capacity_instr, block, call_stack) .first(); let new_slice_length = update_slice_length(arguments[0], dfg, BinaryOp::Add, block); @@ -421,9 +418,8 @@ fn simplify_slice_push_back( mutable: false, }; - let set_last_slice_value = dfg - .insert_instruction_and_results(set_last_slice_value_instr, block, None, call_stack) - .first(); + let set_last_slice_value = + dfg.insert_instruction_and_results(set_last_slice_value_instr, block, call_stack).first(); let mut slice_sizes = HashMap::default(); slice_sizes.insert(set_last_slice_value, slice_size / element_size); @@ -445,7 +441,7 @@ fn simplify_slice_push_back( fn simplify_slice_pop_back( slice_type: Type, - arguments: &[ValueId], + arguments: &[Value], dfg: &mut DataFlowGraph, block: BasicBlockId, call_stack: CallStackId, @@ -456,30 +452,27 @@ fn simplify_slice_pop_back( let new_slice_length = update_slice_length(arguments[0], dfg, BinaryOp::Sub, block); - let element_size = - dfg.make_constant((element_count as u128).into(), NumericType::length_type()); + let element_size = dfg.length_constant((element_count as u128).into()); let flattened_len_instr = Instruction::binary(BinaryOp::Mul, arguments[0], element_size); let mut flattened_len = - dfg.insert_instruction_and_results(flattened_len_instr, block, None, call_stack).first(); + dfg.insert_instruction_and_results(flattened_len_instr, block, call_stack).first(); flattened_len = update_slice_length(flattened_len, dfg, BinaryOp::Sub, block); // We must pop multiple elements in the case of a slice of tuples // Iterating through element types in reverse here since we're popping from the end - for element_type in element_types.iter().rev() { + for result_type in element_types.iter().rev() { + let result_type = result_type.clone(); let get_last_elem_instr = - Instruction::ArrayGet { array: arguments[1], index: flattened_len }; + Instruction::ArrayGet { array: arguments[1], index: flattened_len, result_type }; - let element_type = Some(vec![element_type.clone()]); - let get_last_elem = dfg - .insert_instruction_and_results(get_last_elem_instr, block, element_type, call_stack) - .first(); - results.push_front(get_last_elem); + let get_last_elem = + dfg.insert_instruction_and_results(get_last_elem_instr, block, call_stack).first(); + results.push_front(get_last_elem); flattened_len = update_slice_length(flattened_len, dfg, BinaryOp::Sub, block); } results.push_front(arguments[1]); - results.push_front(new_slice_length); SimplifyResult::SimplifiedToMultiple(results.into()) } @@ -488,7 +481,8 @@ fn simplify_slice_pop_back( /// that value is returned. Otherwise [`SimplifyResult::None`] is returned. fn simplify_black_box_func( bb_func: BlackBoxFunc, - arguments: &[ValueId], + arguments: &[Value], + result_types: &[Type], dfg: &mut DataFlowGraph, block: BasicBlockId, call_stack: CallStackId, @@ -555,7 +549,7 @@ fn simplify_black_box_func( ), BlackBoxFunc::MultiScalarMul => { - blackbox::simplify_msm(dfg, solver, arguments, block, call_stack) + blackbox::simplify_msm(dfg, solver, arguments, result_types, block, call_stack) } BlackBoxFunc::EmbeddedCurveAdd => { blackbox::simplify_ec_add(dfg, solver, arguments, block, call_stack) @@ -591,9 +585,9 @@ fn make_constant_array( typ: NumericType, block: BasicBlockId, call_stack: CallStackId, -) -> ValueId { +) -> Value { let result_constants: im::Vector<_> = - results.map(|element| dfg.make_constant(element, typ)).collect(); + results.map(|element| dfg.constant(element, typ)).collect(); let typ = Type::Array(Arc::new(vec![Type::Numeric(typ)]), result_constants.len() as u32); make_array(dfg, result_constants, typ, block, call_stack) @@ -601,13 +595,13 @@ fn make_constant_array( fn make_array( dfg: &mut DataFlowGraph, - elements: im::Vector, + elements: im::Vector, typ: Type, block: BasicBlockId, call_stack: CallStackId, -) -> ValueId { +) -> Value { let instruction = Instruction::MakeArray { elements, typ }; - dfg.insert_instruction_and_results(instruction, block, None, call_stack).first() + dfg.insert_instruction_and_results(instruction, block, call_stack).first() } /// Returns a slice (represented by a tuple (len, slice)) of constants corresponding to the limbs of the radix decomposition. @@ -616,7 +610,7 @@ fn constant_to_radix( field: FieldElement, radix: u32, limb_count: u32, - mut make_array: impl FnMut(Vec) -> ValueId, + mut make_array: impl FnMut(Vec) -> Value, ) -> SimplifyResult { let bit_size = u32::BITS - (radix - 1).leading_zeros(); let radix_big = BigUint::from(radix); @@ -642,25 +636,21 @@ fn constant_to_radix( } } -fn to_u8_vec(dfg: &DataFlowGraph, values: im::Vector>) -> Vec { - values - .iter() - .map(|id| { - let field = dfg - .get_numeric_constant(*id) - .expect("value id from array should point at constant"); - *field.to_be_bytes().last().unwrap() - }) - .collect() +fn to_u8_vec(dfg: &DataFlowGraph, values: im::Vector) -> Vec { + vecmap(values, |value| { + let field = + dfg.get_numeric_constant(value).expect("value id from array should point at constant"); + *field.to_be_bytes().last().unwrap() + }) } -fn array_is_constant(dfg: &DataFlowGraph, values: &im::Vector>) -> bool { +fn array_is_constant(dfg: &DataFlowGraph, values: &im::Vector) -> bool { values.iter().all(|value| dfg.get_numeric_constant(*value).is_some()) } fn simplify_hash( dfg: &mut DataFlowGraph, - arguments: &[ValueId], + arguments: &[Value], hash_function: fn(&[u8]) -> Result<[u8; 32], BlackBoxResolutionError>, block: BasicBlockId, call_stack: CallStackId, @@ -690,7 +680,7 @@ type ECDSASignatureVerifier = fn( ) -> Result; fn simplify_signature( dfg: &mut DataFlowGraph, - arguments: &[ValueId], + arguments: &[Value], signature_verifier: ECDSASignatureVerifier, ) -> SimplifyResult { match ( @@ -723,7 +713,7 @@ fn simplify_signature( signature_verifier(&hashed_message, &public_key_x, &public_key_y, &signature) .expect("Rust solvable black box function should not fail"); - let valid_signature = dfg.make_constant(valid_signature.into(), NumericType::bool()); + let valid_signature = dfg.bool_constant(valid_signature); SimplifyResult::SimplifiedTo(valid_signature) } _ => SimplifyResult::None, @@ -732,7 +722,7 @@ fn simplify_signature( fn simplify_derive_generators( dfg: &mut DataFlowGraph, - arguments: &[ValueId], + arguments: &[Value], num_generators: u32, block: BasicBlockId, call_stack: CallStackId, @@ -753,15 +743,15 @@ fn simplify_derive_generators( num_generators, starting_index.try_to_u32().expect("argument is declared as u32"), ); - let is_infinite = dfg.make_constant(FieldElement::zero(), NumericType::bool()); + let is_infinite = dfg.bool_constant(false); let mut results = Vec::new(); for gen in generators { let x_big: BigUint = gen.x.into(); let x = FieldElement::from_be_bytes_reduce(&x_big.to_bytes_be()); let y_big: BigUint = gen.y.into(); let y = FieldElement::from_be_bytes_reduce(&y_big.to_bytes_be()); - results.push(dfg.make_constant(x, NumericType::NativeField)); - results.push(dfg.make_constant(y, NumericType::NativeField)); + results.push(dfg.field_constant(x)); + results.push(dfg.field_constant(y)); results.push(is_infinite); } let len = results.len() as u32; diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs index ffacf6fe8b5..7a71e7d992b 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs @@ -10,7 +10,7 @@ use crate::ssa::ir::{ dfg::DataFlowGraph, instruction::{Instruction, Intrinsic, SimplifyResult}, types::Type, - value::ValueId, + value::Value, }; use super::{array_is_constant, make_constant_array, to_u8_vec}; @@ -18,7 +18,7 @@ use super::{array_is_constant, make_constant_array, to_u8_vec}; pub(super) fn simplify_ec_add( dfg: &mut DataFlowGraph, solver: impl BlackBoxFunctionSolver, - arguments: &[ValueId], + arguments: &[Value], block: BasicBlockId, call_stack: CallStackId, ) -> SimplifyResult { @@ -49,17 +49,16 @@ pub(super) fn simplify_ec_add( return SimplifyResult::None; }; - let result_x = dfg.make_constant(result_x, NumericType::NativeField); - let result_y = dfg.make_constant(result_y, NumericType::NativeField); - let result_is_infinity = - dfg.make_constant(result_is_infinity, NumericType::NativeField); + let result_x = dfg.field_constant(result_x); + let result_y = dfg.field_constant(result_y); + // TODO: Should this be a bool? + let result_is_infinity = dfg.field_constant(result_is_infinity); let typ = Type::Array(Arc::new(vec![Type::field()]), 3); let elements = im::vector![result_x, result_y, result_is_infinity]; let instruction = Instruction::MakeArray { elements, typ }; - let result_array = - dfg.insert_instruction_and_results(instruction, block, None, call_stack); + let result_array = dfg.insert_instruction_and_results(instruction, block, call_stack); SimplifyResult::SimplifiedTo(result_array.first()) } @@ -70,7 +69,8 @@ pub(super) fn simplify_ec_add( pub(super) fn simplify_msm( dfg: &mut DataFlowGraph, solver: impl BlackBoxFunctionSolver, - arguments: &[ValueId], + arguments: &[Value], + result_types: &[Type], block: BasicBlockId, call_stack: CallStackId, ) -> SimplifyResult { @@ -145,16 +145,17 @@ pub(super) fn simplify_msm( // If there are no variable term, we can directly return the constant result if var_scalars.is_empty() { - let result_x = dfg.make_constant(result_x, NumericType::NativeField); - let result_y = dfg.make_constant(result_y, NumericType::NativeField); - let result_is_infinity = - dfg.make_constant(result_is_infinity, NumericType::NativeField); + let result_x = dfg.field_constant(result_x); + let result_y = dfg.field_constant(result_y); + + // TODO: Is this correct? Seems this is meant for var_points not var_scalars + let result_is_infinity = dfg.field_constant(result_is_infinity); let elements = im::vector![result_x, result_y, result_is_infinity]; let typ = Type::Array(Arc::new(vec![Type::field()]), 3); let instruction = Instruction::MakeArray { elements, typ }; let result_array = - dfg.insert_instruction_and_results(instruction, block, None, call_stack); + dfg.insert_instruction_and_results(instruction, block, call_stack); return SimplifyResult::SimplifiedTo(result_array.first()); } @@ -163,17 +164,17 @@ pub(super) fn simplify_msm( return SimplifyResult::None; } // Add the constant part back to the non-constant part, if it is not null - let one = dfg.make_constant(FieldElement::one(), NumericType::NativeField); - let zero = dfg.make_constant(FieldElement::zero(), NumericType::NativeField); + let one = dfg.field_constant(FieldElement::one()); + let zero = dfg.field_constant(FieldElement::zero()); if result_is_infinity.is_zero() { var_scalars.push(one); var_scalars.push(zero); - let result_x = dfg.make_constant(result_x, NumericType::NativeField); - let result_y = dfg.make_constant(result_y, NumericType::NativeField); + let result_x = dfg.field_constant(result_x); + let result_y = dfg.field_constant(result_y); // Pushing a bool here is intentional, multi_scalar_mul takes two arguments: // `points: [(Field, Field, bool); N]` and `scalars: [(Field, Field); N]`. - let result_is_infinity = dfg.make_constant(result_is_infinity, NumericType::bool()); + let result_is_infinity = dfg.bool_constant(false); var_points.push(result_x); var_points.push(result_y); @@ -182,16 +183,16 @@ pub(super) fn simplify_msm( // Construct the simplified MSM expression let typ = Type::Array(Arc::new(vec![Type::field()]), var_scalars.len() as u32); let scalars = Instruction::MakeArray { elements: var_scalars.into(), typ }; - let scalars = - dfg.insert_instruction_and_results(scalars, block, None, call_stack).first(); + let scalars = dfg.insert_instruction_and_results(scalars, block, call_stack).first(); let typ = Type::Array(Arc::new(vec![Type::field()]), var_points.len() as u32); let points = Instruction::MakeArray { elements: var_points.into(), typ }; - let points = - dfg.insert_instruction_and_results(points, block, None, call_stack).first(); - let msm = dfg.import_intrinsic(Intrinsic::BlackBox(BlackBoxFunc::MultiScalarMul)); + let points = dfg.insert_instruction_and_results(points, block, call_stack).first(); + let msm = Value::Intrinsic(Intrinsic::BlackBox(BlackBoxFunc::MultiScalarMul)); + SimplifyResult::SimplifiedToInstruction(Instruction::Call { func: msm, arguments: vec![points, scalars], + result_types: result_types.to_vec(), }) } _ => SimplifyResult::None, @@ -201,7 +202,7 @@ pub(super) fn simplify_msm( pub(super) fn simplify_poseidon2_permutation( dfg: &mut DataFlowGraph, solver: impl BlackBoxFunctionSolver, - arguments: &[ValueId], + arguments: &[Value], block: BasicBlockId, call_stack: CallStackId, ) -> SimplifyResult { @@ -235,7 +236,7 @@ pub(super) fn simplify_poseidon2_permutation( pub(super) fn simplify_hash( dfg: &mut DataFlowGraph, - arguments: &[ValueId], + arguments: &[Value], hash_function: fn(&[u8]) -> Result<[u8; 32], BlackBoxResolutionError>, block: BasicBlockId, call_stack: CallStackId, @@ -266,7 +267,7 @@ type ECDSASignatureVerifier = fn( pub(super) fn simplify_signature( dfg: &mut DataFlowGraph, - arguments: &[ValueId], + arguments: &[Value], signature_verifier: ECDSASignatureVerifier, ) -> SimplifyResult { match ( @@ -299,7 +300,7 @@ pub(super) fn simplify_signature( signature_verifier(&hashed_message, &public_key_x, &public_key_y, &signature) .expect("Rust solvable black box function should not fail"); - let valid_signature = dfg.make_constant(valid_signature.into(), NumericType::bool()); + let valid_signature = dfg.bool_constant(valid_signature); SimplifyResult::SimplifiedTo(valid_signature) } _ => SimplifyResult::None, diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction/cast.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction/cast.rs index ee2ab43aa5d..152f564925d 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction/cast.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction/cast.rs @@ -1,20 +1,20 @@ use acvm::{acir::AcirField, FieldElement}; use num_bigint::BigUint; -use super::{DataFlowGraph, Instruction, NumericType, SimplifyResult, Type, Value, ValueId}; +use super::{DataFlowGraph, Instruction, NumericType, SimplifyResult, Type, Value}; /// Try to simplify this cast instruction. If the instruction can be simplified to a known value, /// that value is returned. Otherwise None is returned. pub(super) fn simplify_cast( - value: ValueId, + value: Value, dst_typ: NumericType, dfg: &mut DataFlowGraph, ) -> SimplifyResult { use SimplifyResult::*; let value = dfg.resolve(value); - if let Value::Instruction { instruction, .. } = &dfg[value] { - if let Instruction::Cast(original_value, _) = &dfg[*instruction] { + if let Value::Instruction { instruction, .. } = value { + if let Instruction::Cast(original_value, _) = &dfg[instruction] { return SimplifiedToInstruction(Instruction::Cast(*original_value, dst_typ)); } } @@ -31,7 +31,7 @@ pub(super) fn simplify_cast( NumericType::NativeField, ) => { // Unsigned/Signed -> Field: redefine same constant as Field - SimplifiedTo(dfg.make_constant(constant, dst_typ)) + SimplifiedTo(dfg.constant(constant, dst_typ)) } ( NumericType::NativeField @@ -40,11 +40,11 @@ pub(super) fn simplify_cast( NumericType::Unsigned { bit_size }, ) => { // Field/Unsigned -> unsigned: truncate - let integer_modulus = BigUint::from(2u128).pow(bit_size); + let integer_modulus = BigUint::from(2u128).pow(bit_size as u32); let constant: BigUint = BigUint::from_bytes_be(&constant.to_be_bytes()); let truncated = constant % integer_modulus; let truncated = FieldElement::from_be_bytes_reduce(&truncated.to_bytes_be()); - SimplifiedTo(dfg.make_constant(truncated, dst_typ)) + SimplifiedTo(dfg.constant(truncated, dst_typ)) } ( NumericType::NativeField @@ -54,10 +54,10 @@ pub(super) fn simplify_cast( ) => { // Field/Unsigned -> signed // We only simplify to signed when we are below the maximum signed integer of the destination type. - let integer_modulus = BigUint::from(2u128).pow(bit_size - 1); + let integer_modulus = BigUint::from(2u128).pow(bit_size as u32 - 1); let constant_uint: BigUint = BigUint::from_bytes_be(&constant.to_be_bytes()); if constant_uint < integer_modulus { - SimplifiedTo(dfg.make_constant(constant, dst_typ)) + SimplifiedTo(dfg.constant(constant, dst_typ)) } else { None } diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction/constrain.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction/constrain.rs index 5ae6a642a57..d14d5c90c47 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction/constrain.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction/constrain.rs @@ -2,13 +2,13 @@ use acvm::{acir::AcirField, FieldElement}; use crate::ssa::ir::types::NumericType; -use super::{Binary, BinaryOp, ConstrainError, DataFlowGraph, Instruction, Type, Value, ValueId}; +use super::{Binary, BinaryOp, ConstrainError, DataFlowGraph, Instruction, Type, Value}; /// Try to decompose this constrain instruction. This constraint will be broken down such that it instead constrains /// all the values which are used to compute the values which were being constrained. pub(super) fn decompose_constrain( - lhs: ValueId, - rhs: ValueId, + lhs: Value, + rhs: Value, msg: &Option, dfg: &mut DataFlowGraph, ) -> Vec { @@ -19,12 +19,13 @@ pub(super) fn decompose_constrain( // Remove trivial case `assert_eq(x, x)` Vec::new() } else { - match (&dfg[lhs], &dfg[rhs]) { + match (lhs, rhs) { (Value::NumericConstant { constant, typ }, Value::Instruction { instruction, .. }) | (Value::Instruction { instruction, .. }, Value::NumericConstant { constant, typ }) - if *typ == NumericType::bool() => + if typ == NumericType::bool() => { - match dfg[*instruction] { + let constant = dfg[constant]; + match dfg[instruction] { Instruction::Binary(Binary { lhs, rhs, operator: BinaryOp::Eq }) if constant.is_one() => { @@ -62,14 +63,11 @@ pub(super) fn decompose_constrain( // // Note that this doesn't remove the value `v2` as it may be used in other instructions, but it // will likely be removed through dead instruction elimination. - let one = FieldElement::one(); - let one = dfg.make_constant(one, NumericType::bool()); + let one = dfg.bool_constant(true); - [ - decompose_constrain(lhs, one, msg, dfg), - decompose_constrain(rhs, one, msg, dfg), - ] - .concat() + let mut result = decompose_constrain(lhs, one, msg, dfg); + result.append(&mut decompose_constrain(rhs, one, msg, dfg)); + result } Instruction::Binary(Binary { lhs, rhs, operator: BinaryOp::Or }) @@ -91,13 +89,11 @@ pub(super) fn decompose_constrain( // Note that this doesn't remove the value `v2` as it may be used in other instructions, but it // will likely be removed through dead instruction elimination. let zero = FieldElement::zero(); - let zero = dfg.make_constant(zero, dfg.type_of_value(lhs).unwrap_numeric()); + let zero = dfg.constant(zero, dfg.type_of_value(lhs).unwrap_numeric()); - [ - decompose_constrain(lhs, zero, msg, dfg), - decompose_constrain(rhs, zero, msg, dfg), - ] - .concat() + let mut result = decompose_constrain(lhs, zero, msg, dfg); + result.append(&mut decompose_constrain(rhs, zero, msg, dfg)); + result } Instruction::Not(value) => { @@ -113,9 +109,7 @@ pub(super) fn decompose_constrain( // // Note that this doesn't remove the value `v1` as it may be used in other instructions, but it // will likely be removed through dead instruction elimination. - let reversed_constant = FieldElement::from(!constant.is_one()); - let reversed_constant = - dfg.make_constant(reversed_constant, NumericType::bool()); + let reversed_constant = dfg.bool_constant(!constant.is_one()); decompose_constrain(value, reversed_constant, msg, dfg) } @@ -127,7 +121,7 @@ pub(super) fn decompose_constrain( Value::Instruction { instruction: instruction_lhs, .. }, Value::Instruction { instruction: instruction_rhs, .. }, ) => { - match (&dfg[*instruction_lhs], &dfg[*instruction_rhs]) { + match (&dfg[instruction_lhs], &dfg[instruction_rhs]) { // Casting two values just to enforce an equality on them. // // This is equivalent to enforcing equality on the original values. diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction/insert_result.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction/insert_result.rs new file mode 100644 index 00000000000..4ff0ca46d58 --- /dev/null +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction/insert_result.rs @@ -0,0 +1,88 @@ +use super::InstructionId; +use crate::ssa::ir::value::Value; + +// The result of calling DataFlowGraph::insert_instruction can +// be a list of results or a single ValueId if the instruction was simplified +// to an existing value. +#[derive(Debug)] +pub(crate) enum InsertInstructionResult { + Results { id: InstructionId, result_count: u32 }, + SimplifiedTo(Value), + SimplifiedToMultiple(Vec), + InstructionRemoved, +} + +impl InsertInstructionResult { + /// Retrieve the first (and expected to be the only) result. + pub(crate) fn first(&self) -> Value { + match self { + InsertInstructionResult::SimplifiedTo(value) => *value, + InsertInstructionResult::SimplifiedToMultiple(values) => values[0], + InsertInstructionResult::Results { id, result_count } => { + assert_eq!(*result_count, 1); + Value::instruction_result(*id, 0) + } + InsertInstructionResult::InstructionRemoved => { + panic!("Instruction was removed, no results") + } + } + } + + /// Return all the results contained in the internal results array. + /// This is used for instructions returning multiple results like function calls. + pub(crate) fn results(self) -> InsertInstructionResultIter { + InsertInstructionResultIter { results: self, index: 0 } + } + + /// Returns the amount of ValueIds contained + pub(crate) fn len(&self) -> u32 { + match self { + InsertInstructionResult::SimplifiedTo(_) => 1, + InsertInstructionResult::SimplifiedToMultiple(results) => results.len() as u32, + InsertInstructionResult::Results { result_count, .. } => *result_count, + InsertInstructionResult::InstructionRemoved => 0, + } + } +} + +pub(crate) struct InsertInstructionResultIter { + results: InsertInstructionResult, + index: u16, +} + +impl Iterator for InsertInstructionResultIter { + type Item = Value; + + fn next(&mut self) -> Option { + use InsertInstructionResult::*; + match &self.results { + Results { id, result_count } if (self.index as u32) < *result_count => { + let result = Value::Instruction { instruction: *id, position: self.index }; + self.index += 1; + Some(result) + } + SimplifiedTo(value) if self.index == 0 => { + self.index += 1; + Some(*value) + } + SimplifiedToMultiple(results) if self.index < results.len() as u16 => { + let result = results[self.index as usize]; + self.index += 1; + Some(result) + } + InstructionRemoved | Results { .. } | SimplifiedTo(..) | SimplifiedToMultiple(_) => { + None + } + } + } + + fn size_hint(&self) -> (usize, Option) { + (0, Some(self.len())) + } +} + +impl ExactSizeIterator for InsertInstructionResultIter { + fn len(&self) -> usize { + (self.results.len() - self.index as u32) as usize + } +} diff --git a/compiler/noirc_evaluator/src/ssa/ir/map.rs b/compiler/noirc_evaluator/src/ssa/ir/map.rs index 0fb02f19b14..ccb2515236f 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/map.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/map.rs @@ -1,6 +1,8 @@ +use acvm::FieldElement; use fxhash::FxHashMap as HashMap; use serde::{Deserialize, Serialize}; use std::{ + borrow::Borrow, collections::BTreeMap, hash::Hash, str::FromStr, @@ -8,6 +10,8 @@ use std::{ }; use thiserror::Error; +use super::value::ForeignFunctionId; + /// A unique ID corresponding to a value of type T. /// This type can be used to retrieve a value of type T from /// either a DenseMap or SparseMap. @@ -100,15 +104,15 @@ impl std::fmt::Display for Id { } } -impl std::fmt::Display for Id { +impl std::fmt::Display for Id { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "v{}", self.index) + write!(f, "f{}", self.index) } } -impl std::fmt::Display for Id { +impl std::fmt::Display for Id { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "f{}", self.index) + write!(f, "ff{}", self.index) } } @@ -118,6 +122,12 @@ impl std::fmt::Display for Id { } } +impl std::fmt::Display for Id { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "n{}", self.index) + } +} + #[derive(Error, Debug)] pub(crate) enum IdDisplayFromStrErr { #[error("Invalid id when deserializing SSA: {0}")] @@ -304,13 +314,13 @@ impl std::ops::IndexMut> for SparseMap { /// This is accomplished by keeping the map bijective - for every /// value there is exactly one key and vice-versa. Any duplicate values /// are prevented in the call to insert. -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct TwoWayMap { key_to_value: HashMap, value_to_key: HashMap, } -impl TwoWayMap { +impl TwoWayMap { /// Returns the number of elements in the map. pub(crate) fn len(&self) -> usize { self.key_to_value.len() @@ -329,7 +339,11 @@ impl TwoWayMap { key } - pub(crate) fn get(&self, key: &K) -> Option<&V> { + pub(crate) fn get(&self, key: &Q) -> Option<&V> + where + K: Borrow, + Q: ?Sized + Hash + Eq, + { self.key_to_value.get(key) } } @@ -388,3 +402,81 @@ impl Default for AtomicCounter { Self { next: Default::default(), _marker: Default::default() } } } + +#[derive(Debug, Default, Clone)] +pub(crate) struct ForeignFunctions { + map: TwoWayMap, +} + +impl ForeignFunctions { + /// Returns an existing id for the given element, or creates a new + /// one if it doesn't already exist. + pub(crate) fn get_or_insert(&mut self, element: &str) -> ForeignFunctionId { + if let Some(existing) = self.map.get(element) { + return *existing; + } + + let id = Id::new(self.map.len().try_into().unwrap()); + self.map.insert(element.to_string(), id); + id + } +} + +impl std::ops::Index for ForeignFunctions { + type Output = String; + + fn index(&self, index: ForeignFunctionId) -> &Self::Output { + &self.map.value_to_key[&index] + } +} + +/// A UniqueMap is a map which keeps each T unique. +/// It can be used as an interner where each equal T needs to be given the same id. +#[derive(Debug, Clone)] +pub(crate) struct UniqueMap { + key_to_value: HashMap, T>, + value_to_key: HashMap>, +} + +impl UniqueMap { + /// Adds an element to the map. + /// Returns the identifier/reference to that element. + pub(crate) fn get_or_insert(&mut self, value: &T) -> Id + where + T: Clone, + { + if let Some(existing) = self.value_to_key.get(value) { + return *existing; + } + + let key = Id::new(self.value_to_key.len() as u32); + self.key_to_value.insert(key, value.clone()); + self.value_to_key.insert(value.clone(), key); + key + } +} + +impl Default for UniqueMap { + fn default() -> Self { + Self { key_to_value: HashMap::default(), value_to_key: HashMap::default() } + } +} + +// Note that there is no impl for IndexMut, +// if we allowed mutable access to map elements they may be +// mutated such that elements are no longer unique +impl std::ops::Index> for UniqueMap { + type Output = T; + + fn index(&self, id: Id) -> &Self::Output { + &self.key_to_value[&id] + } +} + +impl<'a, T: Eq + Hash> std::ops::Index<&'a T> for UniqueMap { + type Output = Id; + + fn index(&self, value: &'a T) -> &Self::Output { + &self.value_to_key[value] + } +} diff --git a/compiler/noirc_evaluator/src/ssa/ir/printer.rs b/compiler/noirc_evaluator/src/ssa/ir/printer.rs index 29e79728303..e9f36dbab32 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/printer.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/printer.rs @@ -12,7 +12,7 @@ use super::{ dfg::DataFlowGraph, function::Function, instruction::{ConstrainError, Instruction, InstructionId, TerminatorInstruction}, - value::{Value, ValueId}, + value::Value, }; /// Helper function for Function's Display impl to pretty-print the function with the given formatter. @@ -32,7 +32,8 @@ pub(crate) fn display_block( ) -> Result { let block = &function.dfg[block_id]; - writeln!(f, " {}({}):", block_id, value_list_with_types(function, block.parameters()))?; + let parameters = function.dfg.block_parameters(block_id); + writeln!(f, " {}({}):", block_id, value_list_with_types(function, parameters))?; for instruction in block.instructions() { display_instruction(function, *instruction, f)?; @@ -43,33 +44,36 @@ pub(crate) fn display_block( /// Specialize displaying value ids so that if they refer to a numeric /// constant or a function we print those directly. -fn value(function: &Function, id: ValueId) -> String { - let id = function.dfg.resolve(id); - match &function.dfg[id] { - Value::NumericConstant { constant, typ } => { - format!("{typ} {constant}") - } - Value::Function(id) => id.to_string(), - Value::Intrinsic(intrinsic) => intrinsic.to_string(), - Value::Param { .. } | Value::Instruction { .. } | Value::ForeignFunction(_) => { - id.to_string() - } - } +fn value(function: &Function, value: Value) -> String { + function.dfg.resolve(value).to_string() } /// Display each value along with its type. E.g. `v0: Field, v1: u64, v2: u1` -fn value_list_with_types(function: &Function, values: &[ValueId]) -> String { - vecmap(values, |id| { - let value = value(function, *id); - let typ = function.dfg.type_of_value(*id); +fn value_list_with_types( + function: &Function, + values: impl ExactSizeIterator, +) -> String { + vecmap(values, |v| { + let value = value(function, v); + let typ = function.dfg.type_of_value(v); format!("{value}: {typ}") }) .join(", ") } /// Display each value separated by a comma -fn value_list(function: &Function, values: &[ValueId]) -> String { - vecmap(values, |id| value(function, *id)).join(", ") +fn value_list(function: &Function, values: impl ExactSizeIterator) -> String { + vecmap(values, |v| value(function, v)).join(", ") +} + +fn type_list(types: &[Type]) -> String { + if types.is_empty() { + String::new() + } else if types.len() == 1 { + format!(" -> {}", &types[0]) + } else { + format!(" -> ({})", vecmap(types, ToString::to_string).join(", ")) + } } /// Display a terminator instruction @@ -80,7 +84,12 @@ pub(crate) fn display_terminator( ) -> Result { match terminator { Some(TerminatorInstruction::Jmp { destination, arguments, call_stack: _ }) => { - writeln!(f, " jmp {}({})", destination, value_list(function, arguments)) + writeln!( + f, + " jmp {}({})", + destination, + value_list(function, arguments.iter().copied()) + ) } Some(TerminatorInstruction::JmpIf { condition, @@ -100,7 +109,7 @@ pub(crate) fn display_terminator( if return_values.is_empty() { writeln!(f, " return") } else { - writeln!(f, " return {}", value_list(function, return_values)) + writeln!(f, " return {}", value_list(function, return_values.iter().copied())) } } None => writeln!(f, " (no terminator instruction)"), @@ -117,17 +126,16 @@ pub(crate) fn display_instruction( write!(f, " ")?; let results = function.dfg.instruction_results(instruction); - if !results.is_empty() { + if results.len() != 0 { write!(f, "{} = ", value_list(function, results))?; } - display_instruction_inner(function, &function.dfg[instruction], results, f) + display_instruction_inner(function, &function.dfg[instruction], f) } fn display_instruction_inner( function: &Function, instruction: &Instruction, - results: &[ValueId], f: &mut Formatter, ) -> Result { let show = |id| value(function, id); @@ -150,15 +158,15 @@ fn display_instruction_inner( writeln!(f) } } - Instruction::Call { func, arguments } => { - let arguments = value_list(function, arguments); - writeln!(f, "call {}({}){}", show(*func), arguments, result_types(function, results)) + Instruction::Call { func, arguments, result_types } => { + let arguments = value_list(function, arguments.iter().copied()); + writeln!(f, "call {}({}){}", show(*func), arguments, type_list(result_types)) } - Instruction::Allocate => { - writeln!(f, "allocate{}", result_types(function, results)) + Instruction::Allocate { element_type } => { + writeln!(f, "allocate -> &mut {element_type}") } - Instruction::Load { address } => { - writeln!(f, "load {}{}", show(*address), result_types(function, results)) + Instruction::Load { address, result_type } => { + writeln!(f, "load {} -> {result_type}", show(*address)) } Instruction::Store { address, value } => { writeln!(f, "store {} at {}", show(*value), show(*address)) @@ -166,14 +174,8 @@ fn display_instruction_inner( Instruction::EnableSideEffectsIf { condition } => { writeln!(f, "enable_side_effects {}", show(*condition)) } - Instruction::ArrayGet { array, index } => { - writeln!( - f, - "array_get {}, index {}{}", - show(*array), - show(*index), - result_types(function, results) - ) + Instruction::ArrayGet { array, index, result_type } => { + writeln!(f, "array_get {}, index {} -> {result_type}", show(*array), show(*index),) } Instruction::ArraySet { array, index, value, mutable } => { let array = show(*array); @@ -238,7 +240,7 @@ fn display_instruction_inner( } } -fn try_byte_array_to_string(elements: &Vector, function: &Function) -> Option { +fn try_byte_array_to_string(elements: &Vector, function: &Function) -> Option { let mut string = String::new(); for element in elements { let element = function.dfg.get_numeric_constant(*element)?; @@ -257,21 +259,10 @@ fn try_byte_array_to_string(elements: &Vector, function: &Function) -> Some(string) } -fn result_types(function: &Function, results: &[ValueId]) -> String { - let types = vecmap(results, |result| function.dfg.type_of_value(*result).to_string()); - if types.is_empty() { - String::new() - } else if types.len() == 1 { - format!(" -> {}", types[0]) - } else { - format!(" -> ({})", types.join(", ")) - } -} - /// Tries to extract a constant string from an error payload. pub(crate) fn try_to_extract_string_from_error_payload( is_string_type: bool, - values: &[ValueId], + values: &[Value], dfg: &DataFlowGraph, ) -> Option { (is_string_type && (values.len() == 1)) @@ -307,7 +298,7 @@ fn display_constrain_error( { writeln!(f, ", {constant_string:?}") } else { - writeln!(f, ", data {}", value_list(function, values)) + writeln!(f, ", data {}", value_list(function, values.iter().copied())) } } } diff --git a/compiler/noirc_evaluator/src/ssa/ir/types.rs b/compiler/noirc_evaluator/src/ssa/ir/types.rs index 0dd7fd92ee5..5da8b6da0ca 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/types.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/types.rs @@ -16,27 +16,27 @@ use crate::ssa::ssa_gen::SSA_WORD_SIZE; /// is reasonable. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Ord, PartialOrd, Serialize, Deserialize)] pub enum NumericType { - Signed { bit_size: u32 }, - Unsigned { bit_size: u32 }, + Signed { bit_size: u8 }, + Unsigned { bit_size: u8 }, NativeField, } impl NumericType { /// Returns the bit size of the provided numeric type. - pub(crate) fn bit_size(self: &NumericType) -> u32 { + pub(crate) fn bit_size(self: &NumericType) -> u8 { match self { - NumericType::NativeField => FieldElement::max_num_bits(), + NumericType::NativeField => FieldElement::max_num_bits().try_into().unwrap(), NumericType::Unsigned { bit_size } | NumericType::Signed { bit_size } => *bit_size, } } /// Creates a NumericType::Signed type - pub(crate) fn signed(bit_size: u32) -> NumericType { + pub(crate) fn signed(bit_size: u8) -> NumericType { NumericType::Signed { bit_size } } /// Creates a NumericType::Unsigned type - pub(crate) fn unsigned(bit_size: u32) -> NumericType { + pub(crate) fn unsigned(bit_size: u8) -> NumericType { NumericType::Unsigned { bit_size } } @@ -65,6 +65,7 @@ impl NumericType { ) -> Option { match self { NumericType::Unsigned { bit_size } => { + let bit_size = bit_size as u32; let max = 2u128.pow(bit_size) - 1; if negative { return Some(format!("0..={}", max)); @@ -76,6 +77,7 @@ impl NumericType { } } NumericType::Signed { bit_size } => { + let bit_size = bit_size as u32; let min = 2u128.pow(bit_size - 1); let max = 2u128.pow(bit_size - 1) - 1; let target_max = if negative { min } else { max }; @@ -120,12 +122,12 @@ impl Type { } /// Create a new signed integer type with the given amount of bits. - pub(crate) fn signed(bit_size: u32) -> Type { + pub(crate) fn signed(bit_size: u8) -> Type { Type::Numeric(NumericType::Signed { bit_size }) } /// Create a new unsigned integer type with the given amount of bits. - pub(crate) fn unsigned(bit_size: u32) -> Type { + pub(crate) fn unsigned(bit_size: u8) -> Type { Type::Numeric(NumericType::Unsigned { bit_size }) } @@ -151,7 +153,7 @@ impl Type { /// Creates the type of an array's length. pub(crate) fn length_type() -> Type { - Type::unsigned(SSA_WORD_SIZE) + Type::Numeric(NumericType::length_type()) } /// Returns the inner NumericType if this is one, or panics otherwise @@ -167,7 +169,7 @@ impl Type { /// # Panics /// /// Panics if `self` is not a [`Type::Numeric`] - pub(crate) fn bit_size(&self) -> u32 { + pub(crate) fn bit_size(&self) -> u8 { match self { Type::Numeric(numeric_type) => numeric_type.bit_size(), other => panic!("bit_size: Expected numeric type, found {other}"), diff --git a/compiler/noirc_evaluator/src/ssa/ir/value.rs b/compiler/noirc_evaluator/src/ssa/ir/value.rs index ec7a8e25246..94cfa2d5b61 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/value.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/value.rs @@ -1,5 +1,3 @@ -use std::borrow::Cow; - use acvm::FieldElement; use serde::{Deserialize, Serialize}; @@ -9,14 +7,12 @@ use super::{ function::FunctionId, instruction::{InstructionId, Intrinsic}, map::Id, - types::{NumericType, Type}, + types::NumericType, }; -pub(crate) type ValueId = Id; - /// Value is the most basic type allowed in the IR. /// Transition Note: A Id is similar to `NodeId` in our previous IR. -#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone, PartialOrd, Ord, Serialize, Deserialize)] pub(crate) enum Value { /// This value was created due to an instruction /// @@ -27,16 +23,16 @@ pub(crate) enum Value { /// Example, if you add two numbers together, then the resulting /// value would have position `0`, the typ would be the type /// of the operands, and the instruction would map to an add instruction. - Instruction { instruction: InstructionId, position: usize, typ: Type }, + Instruction { instruction: InstructionId, position: u16 }, /// This Value originates from a block parameter. Since function parameters /// are also represented as block parameters, this includes function parameters as well. /// /// position -- the index of this Value in the block parameters list - Param { block: BasicBlockId, position: usize, typ: Type }, + Param { block: BasicBlockId, position: u16 }, /// This Value originates from a numeric constant - NumericConstant { constant: FieldElement, typ: NumericType }, + NumericConstant { constant: FieldElementId, typ: NumericType }, /// This Value refers to a function in the IR. /// Functions always have the type Type::Function. @@ -52,18 +48,62 @@ pub(crate) enum Value { /// This Value refers to an external function in the IR. /// ForeignFunction's always have the type Type::Function and have similar semantics to Function, /// other than generating different backend operations and being only accessible through Brillig. - ForeignFunction(String), + ForeignFunction(ForeignFunctionId), } +pub(crate) struct ForeignFunction(pub(crate) String); +pub(crate) type ForeignFunctionId = Id; + +pub(crate) type FieldElementId = Id; + impl Value { - /// Retrieves the type of this Value - pub(crate) fn get_type(&self) -> Cow { + pub(crate) fn block_param(block: BasicBlockId, position: u16) -> Self { + Self::Param { block, position } + } + + pub(crate) fn instruction_result(instruction: InstructionId, position: u16) -> Self { + Self::Instruction { instruction, position } + } + + #[cfg(test)] + pub(crate) fn test_instruction_result(instruction: u32, position: u16) -> Self { + Self::Instruction { instruction: Id::test_new(instruction), position } + } + + /// Return the instruction id associated with this value. + /// Panics if this is not a Value::Instruction + pub(crate) fn instruction_id(&self) -> InstructionId { + match self { + Value::Instruction { instruction, .. } => *instruction, + other => panic!("Expected Value::Instruction, found {other}"), + } + } + + /// True if this is a constant value like an integer or function. + /// False if this is an instruction result or parameter. + pub(crate) fn is_constant(&self) -> bool { + use Value::*; + matches!(self, NumericConstant { .. } | Function(_) | Intrinsic(_) | ForeignFunction(_)) + } +} + +impl std::fmt::Display for Value { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Value::Instruction { typ, .. } | Value::Param { typ, .. } => Cow::Borrowed(typ), - Value::NumericConstant { typ, .. } => Cow::Owned(Type::Numeric(*typ)), - Value::Function { .. } | Value::Intrinsic { .. } | Value::ForeignFunction { .. } => { - Cow::Owned(Type::Function) + Value::Instruction { instruction, position } => { + // Because these are so common, we don't show the `:0` suffix since + // most instructions only have 1 result + if *position == 0 { + write!(f, "{instruction}") + } else { + write!(f, "{instruction}.{position}") + } } + Value::Param { block, position } => write!(f, "{block}.{position}"), + Value::NumericConstant { constant, typ } => write!(f, "{typ} {constant}"), + Value::Function(id) => write!(f, "{id}"), + Value::Intrinsic(intrinsic) => write!(f, "{intrinsic}"), + Value::ForeignFunction(id) => write!(f, "{id}"), } } } diff --git a/compiler/noirc_evaluator/src/ssa/opt/array_set.rs b/compiler/noirc_evaluator/src/ssa/opt/array_set.rs index 09339cf0797..0a1d33435f6 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/array_set.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/array_set.rs @@ -7,7 +7,7 @@ use crate::ssa::{ function::{Function, RuntimeType}, instruction::{Instruction, InstructionId, TerminatorInstruction}, types::Type::{Array, Slice}, - value::ValueId, + value::Value, }, ssa_gen::Ssa, }; @@ -57,12 +57,12 @@ impl Function { struct Context<'f> { dfg: &'f DataFlowGraph, - array_to_last_use: HashMap, + array_to_last_use: HashMap, instructions_that_can_be_made_mutable: HashSet, // Mapping of an array that comes from a load and whether the address // it was loaded from is a reference parameter passed to the block. - arrays_from_load: HashMap, - inner_nested_arrays: HashMap, + arrays_from_load: HashMap, + inner_nested_arrays: HashMap, } impl<'f> Context<'f> { @@ -142,11 +142,11 @@ impl<'f> Context<'f> { } } } - Instruction::Load { address } => { - let result = self.dfg.instruction_results(*instruction_id)[0]; - if matches!(self.dfg.type_of_value(result), Array { .. } | Slice { .. }) { + Instruction::Load { address, result_type } => { + if matches!(result_type, Array { .. } | Slice { .. }) { + let result = Value::instruction_result(*instruction_id, 0); let is_reference_param = - self.dfg.block_parameters(block_id).contains(address); + self.dfg.block_parameters(block_id).any(|value| value == *address); self.arrays_from_load.insert(result, is_reference_param); } } diff --git a/compiler/noirc_evaluator/src/ssa/opt/as_slice_length.rs b/compiler/noirc_evaluator/src/ssa/opt/as_slice_length.rs index c6cdffd3bc3..e9c0b4ebf03 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/as_slice_length.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/as_slice_length.rs @@ -2,7 +2,7 @@ use crate::ssa::{ ir::{ function::Function, instruction::{Instruction, InstructionId, Intrinsic}, - types::{NumericType, Type}, + types::Type, value::Value, }, ssa_gen::Ssa, @@ -39,11 +39,11 @@ fn known_slice_lengths(func: &Function) -> HashMap { let block = &func.dfg[block_id]; for instruction_id in block.instructions() { let (target_func, arguments) = match &func.dfg[*instruction_id] { - Instruction::Call { func, arguments } => (func, arguments), + Instruction::Call { func, arguments, .. } => (func, arguments), _ => continue, }; - match &func.dfg[*target_func] { + match *target_func { Value::Intrinsic(Intrinsic::AsSlice) => { let array_typ = func.dfg.type_of_value(arguments[0]); if let Type::Array(_, length) = array_typ { @@ -64,15 +64,9 @@ fn replace_known_slice_lengths( known_slice_lengths: HashMap, ) { known_slice_lengths.into_iter().for_each(|(instruction_id, known_length)| { - let call_returns = func.dfg.instruction_results(instruction_id); - let original_slice_length = call_returns[0]; - - // We won't use the new id for the original unknown length. - // This isn't strictly necessary as a new result will be defined the next time for which the instruction - // is reinserted but this avoids leaving the program in an invalid state. - func.dfg.replace_result(instruction_id, original_slice_length); - let known_length = func.dfg.make_constant(known_length.into(), NumericType::length_type()); - func.dfg.set_value_from_id(original_slice_length, known_length); + let original_slice_length = Value::instruction_result(instruction_id, 0); + let known_length = func.dfg.length_constant(known_length.into()); + func.dfg.replace_value(original_slice_length, known_length); }); } diff --git a/compiler/noirc_evaluator/src/ssa/opt/assert_constant.rs b/compiler/noirc_evaluator/src/ssa/opt/assert_constant.rs index 6936c7ad542..695254b62cb 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/assert_constant.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/assert_constant.rs @@ -4,7 +4,7 @@ use crate::{ ir::{ function::Function, instruction::{Instruction, InstructionId, Intrinsic}, - value::ValueId, + value::Value, }, ssa_gen::Ssa, }, @@ -64,10 +64,10 @@ fn check_instruction( function: &mut Function, instruction: InstructionId, ) -> Result { - let assert_constant_id = function.dfg.import_intrinsic(Intrinsic::AssertConstant); - let static_assert_id = function.dfg.import_intrinsic(Intrinsic::StaticAssert); + let assert_constant_id = Value::Intrinsic(Intrinsic::AssertConstant); + let static_assert_id = Value::Intrinsic(Intrinsic::StaticAssert); match &function.dfg[instruction] { - Instruction::Call { func, arguments } => { + Instruction::Call { func, arguments, .. } => { if *func == assert_constant_id { evaluate_assert_constant(function, instruction, arguments) } else if *func == static_assert_id { @@ -87,7 +87,7 @@ fn check_instruction( fn evaluate_assert_constant( function: &Function, instruction: InstructionId, - arguments: &[ValueId], + arguments: &[Value], ) -> Result { if arguments.iter().all(|arg| function.dfg.is_constant(*arg)) { Ok(false) @@ -106,7 +106,7 @@ fn evaluate_assert_constant( fn evaluate_static_assert( function: &Function, instruction: InstructionId, - arguments: &[ValueId], + arguments: &[Value], ) -> Result { if arguments.len() != 2 { panic!("ICE: static_assert called with wrong number of arguments") diff --git a/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs b/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs index c81a557178b..2581efbec73 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs @@ -22,7 +22,6 @@ use std::collections::{BTreeMap, HashSet, VecDeque}; use acvm::{ - acir::AcirField, brillig_vm::{MemoryValue, VMStatus, VM}, FieldElement, }; @@ -39,12 +38,13 @@ use crate::{ ssa::{ ir::{ basic_block::BasicBlockId, - dfg::{DataFlowGraph, InsertInstructionResult}, + dfg::DataFlowGraph, dom::DominatorTree, function::{Function, FunctionId, RuntimeType}, + instruction::insert_result::InsertInstructionResult, instruction::{Instruction, InstructionId}, - types::{NumericType, Type}, - value::{Value, ValueId}, + types::Type, + value::Value, }, ssa_gen::Ssa, }, @@ -112,14 +112,12 @@ impl Ssa { for block_id in function.reachable_blocks() { for instruction_id in function.dfg[block_id].instructions() { let instruction = &function.dfg[*instruction_id]; - let Instruction::Call { func: func_id, arguments: _ } = instruction else { + let Instruction::Call { func, .. } = instruction else { continue; }; - let func_value = &function.dfg[*func_id]; - let Value::Function(func_id) = func_value else { continue }; - - brillig_functions.remove(func_id); + let Value::Function(func_id) = *func else { continue }; + brillig_functions.remove(&func_id); } } } @@ -202,18 +200,18 @@ struct SimplificationCache { /// /// It will always have at least one value because `add` is called /// after the default is constructed. - simplifications: HashMap, + simplifications: HashMap, } impl SimplificationCache { /// Called with a newly encountered simplification. - fn add(&mut self, dfg: &DataFlowGraph, simple: ValueId, block: BasicBlockId) { + fn add(&mut self, simple: Value, block: BasicBlockId) { self.simplifications .entry(block) .and_modify(|existing| { // `SimplificationCache` may already hold a simplification in this block // so we check whether `simple` is a better simplification than the current one. - if let Some((_, simpler)) = simplify(dfg, *existing, simple) { + if let Some((_, simpler)) = simplify(*existing, simple) { *existing = simpler; }; }) @@ -221,7 +219,7 @@ impl SimplificationCache { } /// Try to find a simplification in a visible block. - fn get(&self, block: BasicBlockId, dom: &DominatorTree) -> Option { + fn get(&self, block: BasicBlockId, dom: &DominatorTree) -> Option { // Deterministically walk up the dominator chain until we encounter a block that contains a simplification. dom.find_map_dominator(block, |b| self.simplifications.get(&b).cloned()) } @@ -234,7 +232,7 @@ impl SimplificationCache { /// Only blocks dominated by one in the cache should have access to this information, otherwise /// we create a sort of time paradox where we replace an instruction with a constant we believe /// it _should_ equal to, without ever actually producing and asserting the value. -type ConstraintSimplificationCache = HashMap>; +type ConstraintSimplificationCache = HashMap>; /// HashMap from `(Instruction, side_effects_enabled_var)` to the results of the instruction. /// Stored as a two-level map to avoid cloning Instructions during the `.get` call. @@ -244,14 +242,14 @@ type ConstraintSimplificationCache = HashMap, ResultCache>>; +type InstructionResultCache = HashMap, ResultCache>>; /// Records the results of all duplicate [`Instruction`]s along with the blocks in which they sit. /// /// For more information see [`InstructionResultCache`]. #[derive(Default)] struct ResultCache { - result: Option<(BasicBlockId, Vec)>, + result: Option<(BasicBlockId, Vec)>, } impl<'brillig> Context<'brillig> { @@ -275,8 +273,7 @@ impl<'brillig> Context<'brillig> { let instructions = function.dfg[block].take_instructions(); // Default side effect condition variable with an enabled state. - let mut side_effects_enabled_var = - function.dfg.make_constant(FieldElement::one(), NumericType::bool()); + let mut side_effects_enabled_var = function.dfg.bool_constant(true); for instruction_id in instructions { self.fold_constants_into_instruction( @@ -296,7 +293,7 @@ impl<'brillig> Context<'brillig> { dom: &mut DominatorTree, mut block: BasicBlockId, id: InstructionId, - side_effects_enabled_var: &mut ValueId, + side_effects_enabled_var: &mut Value, ) { let constraint_simplification_mapping = self.get_constraint_map(*side_effects_enabled_var); let dfg = &mut function.dfg; @@ -304,7 +301,8 @@ impl<'brillig> Context<'brillig> { let instruction = Self::resolve_instruction(id, block, dfg, dom, constraint_simplification_mapping); - let old_results = dfg.instruction_results(id).to_vec(); + let old_results = dfg.instruction_results(id); + let old_result_count = old_results.len(); // If a copy of this instruction exists earlier in the block, then reuse the previous results. if let Some(cache_result) = @@ -318,10 +316,10 @@ impl<'brillig> Context<'brillig> { let value = *cached.last().unwrap(); let inc_rc = Instruction::IncrementRc { value }; let call_stack = dfg.get_instruction_call_stack_id(id); - dfg.insert_instruction_and_results(inc_rc, block, None, call_stack); + dfg.insert_instruction_and_results(inc_rc, block, call_stack); } - Self::replace_result_ids(dfg, &old_results, cached); + Self::replace_results(dfg, old_results, cached); return; } CacheResult::NeedToHoistToCommonBlock(dominator) => { @@ -337,17 +335,18 @@ impl<'brillig> Context<'brillig> { // First try to inline a call to a brillig function with all constant arguments. let new_results = Self::try_inline_brillig_call_with_all_constants( &instruction, - &old_results, + old_results, block, dfg, self.brillig_info, ) // Otherwise, try inserting the instruction again to apply any optimizations using the newly resolved inputs. .unwrap_or_else(|| { - Self::push_instruction(id, instruction.clone(), &old_results, block, dfg) + Self::push_instruction(id, instruction.clone(), old_result_count, block, dfg) }); - Self::replace_result_ids(dfg, &old_results, &new_results); + let old_results = dfg.instruction_results(id); + Self::replace_results(dfg, old_results, &new_results); self.cache_instruction( instruction.clone(), @@ -370,7 +369,7 @@ impl<'brillig> Context<'brillig> { block: BasicBlockId, dfg: &DataFlowGraph, dom: &mut DominatorTree, - constraint_simplification_mapping: &HashMap, + constraint_simplification_mapping: &HashMap, ) -> Instruction { let mut instruction = dfg[instruction_id].clone(); @@ -383,9 +382,9 @@ impl<'brillig> Context<'brillig> { block: BasicBlockId, dfg: &DataFlowGraph, dom: &mut DominatorTree, - cache: &HashMap, - value_id: ValueId, - ) -> ValueId { + cache: &HashMap, + value_id: Value, + ) -> Value { let resolved_id = dfg.resolve(value_id); match cache.get(&resolved_id) { Some(simplification_cache) => { @@ -413,35 +412,31 @@ impl<'brillig> Context<'brillig> { fn push_instruction( id: InstructionId, instruction: Instruction, - old_results: &[ValueId], + old_result_count: usize, block: BasicBlockId, dfg: &mut DataFlowGraph, - ) -> Vec { - let ctrl_typevars = instruction - .requires_ctrl_typevars() - .then(|| vecmap(old_results, |result| dfg.type_of_value(*result))); - + ) -> Vec { let call_stack = dfg.get_instruction_call_stack_id(id); - let new_results = - match dfg.insert_instruction_and_results(instruction, block, ctrl_typevars, call_stack) - { - InsertInstructionResult::SimplifiedTo(new_result) => vec![new_result], - InsertInstructionResult::SimplifiedToMultiple(new_results) => new_results, - InsertInstructionResult::Results(_, new_results) => new_results.to_vec(), - InsertInstructionResult::InstructionRemoved => vec![], - }; - // Optimizations while inserting the instruction should not change the number of results. - assert_eq!(old_results.len(), new_results.len()); + let new_results = match dfg.insert_instruction_and_results(instruction, block, call_stack) { + InsertInstructionResult::SimplifiedTo(new_result) => vec![new_result], + InsertInstructionResult::SimplifiedToMultiple(new_results) => new_results, + InsertInstructionResult::InstructionRemoved => vec![], + InsertInstructionResult::Results { id, result_count } => (0..result_count) + .map(|position| Value::instruction_result(id, position as u16)) + .collect(), + }; + // Optimizations while inserting the instruction should not change the number of results. + assert_eq!(old_result_count, new_results.len()); new_results } fn cache_instruction( &mut self, instruction: Instruction, - instruction_results: Vec, + instruction_results: Vec, function: &Function, - side_effects_enabled_var: ValueId, + side_effects_enabled_var: Value, block: BasicBlockId, ) { if self.use_constraint_info { @@ -449,11 +444,11 @@ impl<'brillig> Context<'brillig> { // to map from the more complex to the simpler value. if let Instruction::Constrain(lhs, rhs, _) = instruction { // These `ValueId`s should be fully resolved now. - if let Some((complex, simple)) = simplify(&function.dfg, lhs, rhs) { + if let Some((complex, simple)) = simplify(lhs, rhs) { self.get_constraint_map(side_effects_enabled_var) .entry(complex) .or_default() - .add(&function.dfg, simple, block); + .add(simple, block); } } } @@ -473,7 +468,11 @@ impl<'brillig> Context<'brillig> { self.use_constraint_info && instruction.requires_acir_gen_predicate(&function.dfg); let predicate = use_predicate.then_some(side_effects_enabled_var); - let array_get = Instruction::ArrayGet { array: instruction_results[0], index: *index }; + let array_get = Instruction::ArrayGet { + array: instruction_results[0], + index: *index, + result_type: function.dfg.type_of_value(*value), + }; self.cached_instruction_results .entry(array_get) @@ -511,19 +510,19 @@ impl<'brillig> Context<'brillig> { /// which all depend on the same side effect condition variable. fn get_constraint_map( &mut self, - side_effects_enabled_var: ValueId, - ) -> &mut HashMap { + side_effects_enabled_var: Value, + ) -> &mut HashMap { self.constraint_simplification_mappings.entry(side_effects_enabled_var).or_default() } - /// Replaces a set of [`ValueId`]s inside the [`DataFlowGraph`] with another. - fn replace_result_ids( + /// Replaces a set of [`Value`]s inside the [`DataFlowGraph`] with another. + fn replace_results( dfg: &mut DataFlowGraph, - old_results: &[ValueId], - new_results: &[ValueId], + old_results: impl ExactSizeIterator, + new_results: &[Value], ) { - for (old_result, new_result) in old_results.iter().zip(new_results) { - dfg.set_value_from_id(*old_result, *new_result); + for (old_result, new_result) in old_results.zip(new_results) { + dfg.replace_value(old_result, *new_result); } } @@ -533,7 +532,7 @@ impl<'brillig> Context<'brillig> { dfg: &DataFlowGraph, dom: &mut DominatorTree, instruction: &Instruction, - side_effects_enabled_var: ValueId, + side_effects_enabled_var: Value, block: BasicBlockId, ) -> Option { let results_for_instruction = self.cached_instruction_results.get(instruction)?; @@ -547,11 +546,11 @@ impl<'brillig> Context<'brillig> { /// If so, we can try to evaluate that function and replace the results with the evaluation results. fn try_inline_brillig_call_with_all_constants( instruction: &Instruction, - old_results: &[ValueId], + old_results: impl ExactSizeIterator, block: BasicBlockId, dfg: &mut DataFlowGraph, brillig_info: Option, - ) -> Option> { + ) -> Option> { let evaluation_result = Self::evaluate_const_brillig_call( instruction, brillig_info?.brillig, @@ -564,7 +563,7 @@ impl<'brillig> Context<'brillig> { EvaluationResult::Evaluated(memory_values) => { let mut memory_index = 0; let new_results = vecmap(old_results, |old_result| { - let typ = dfg.type_of_value(*old_result); + let typ = dfg.type_of_value(old_result); Self::new_value_for_type_and_memory_values( typ, block, @@ -587,28 +586,28 @@ impl<'brillig> Context<'brillig> { brillig_functions: &BTreeMap, dfg: &mut DataFlowGraph, ) -> EvaluationResult { - let Instruction::Call { func: func_id, arguments } = instruction else { + let Instruction::Call { func: func_id, arguments, .. } = instruction else { return EvaluationResult::NotABrilligCall; }; - let func_value = &dfg[*func_id]; + let func_value = *func_id; let Value::Function(func_id) = func_value else { return EvaluationResult::NotABrilligCall; }; - let Some(func) = brillig_functions.get(func_id) else { + let Some(func) = brillig_functions.get(&func_id) else { return EvaluationResult::NotABrilligCall; }; if !arguments.iter().all(|argument| dfg.is_constant(*argument)) { - return EvaluationResult::CannotEvaluate(*func_id); + return EvaluationResult::CannotEvaluate(func_id); } let mut brillig_arguments = Vec::new(); for argument in arguments { let typ = dfg.type_of_value(*argument); let Some(parameter) = type_to_brillig_parameter(&typ) else { - return EvaluationResult::CannotEvaluate(*func_id); + return EvaluationResult::CannotEvaluate(func_id); }; brillig_arguments.push(parameter); } @@ -617,12 +616,12 @@ impl<'brillig> Context<'brillig> { for return_id in func.returns().iter() { let typ = func.dfg.type_of_value(*return_id); if type_to_brillig_parameter(&typ).is_none() { - return EvaluationResult::CannotEvaluate(*func_id); + return EvaluationResult::CannotEvaluate(func_id); } } let Ok(generated_brillig) = gen_brillig_for(func, brillig_arguments, brillig) else { - return EvaluationResult::CannotEvaluate(*func_id); + return EvaluationResult::CannotEvaluate(func_id); }; let mut calldata = Vec::new(); @@ -638,7 +637,7 @@ impl<'brillig> Context<'brillig> { VM::new(calldata, bytecode, foreign_call_results, &black_box_solver, profiling_active); let vm_status: VMStatus<_> = vm.process_opcodes(); let VMStatus::Finished { return_data_offset, return_data_size } = vm_status else { - return EvaluationResult::CannotEvaluate(*func_id); + return EvaluationResult::CannotEvaluate(func_id); }; let memory = @@ -656,7 +655,7 @@ impl<'brillig> Context<'brillig> { memory_values: &[MemoryValue], memory_index: &mut usize, dfg: &mut DataFlowGraph, - ) -> ValueId { + ) -> Value { match typ { Type::Numeric(typ) => { let memory = memory_values[*memory_index]; @@ -666,7 +665,7 @@ impl<'brillig> Context<'brillig> { MemoryValue::Field(field_value) => field_value, MemoryValue::Integer(u128_value, _) => u128_value.into(), }; - dfg.make_constant(field_value, typ) + dfg.constant(field_value, typ) } Type::Array(types, length) => { let mut new_array_values = Vector::new(); @@ -687,9 +686,9 @@ impl<'brillig> Context<'brillig> { elements: new_array_values, typ: Type::Array(types, length), }; - let instruction_id = dfg.make_instruction(instruction, None); + let instruction_id = dfg.make_instruction(instruction); dfg[block_id].instructions_mut().push(instruction_id); - *dfg.instruction_results(instruction_id).first().unwrap() + Value::instruction_result(instruction_id, 0) } Type::Reference(_) => { panic!("Unexpected reference type in brillig function result") @@ -712,8 +711,8 @@ impl<'brillig> Context<'brillig> { // Should we consider calls to slice_push_back and similar to be mutating operations as well? if let Store { value: array, .. } | ArraySet { array, .. } = instruction { - let instruction = match &function.dfg[*array] { - Value::Instruction { instruction, .. } => &function.dfg[*instruction], + let instruction = match *array { + Value::Instruction { instruction, .. } => &function.dfg[instruction], _ => return, }; @@ -726,7 +725,7 @@ impl<'brillig> Context<'brillig> { impl ResultCache { /// Records that an `Instruction` in block `block` produced the result values `results`. - fn cache(&mut self, block: BasicBlockId, results: Vec) { + fn cache(&mut self, block: BasicBlockId, results: Vec) { if self.result.is_none() { self.result = Some((block, results)); } @@ -759,7 +758,7 @@ impl ResultCache { } enum CacheResult<'a> { - Cached(&'a [ValueId]), + Cached(&'a [Value]), NeedToHoistToCommonBlock(BasicBlockId), } @@ -791,7 +790,7 @@ pub(crate) fn type_to_brillig_parameter(typ: &Type) -> Option } } -fn value_id_to_calldata(value_id: ValueId, dfg: &DataFlowGraph, calldata: &mut Vec) { +fn value_id_to_calldata(value_id: Value, dfg: &DataFlowGraph, calldata: &mut Vec) { if let Some(value) = dfg.get_numeric_constant(value_id) { calldata.push(value); return; @@ -810,8 +809,8 @@ fn value_id_to_calldata(value_id: ValueId, dfg: &DataFlowGraph, calldata: &mut V /// Check if one expression is simpler than the other. /// Returns `Some((complex, simple))` if a simplification was found, otherwise `None`. /// Expects the `ValueId`s to be fully resolved. -fn simplify(dfg: &DataFlowGraph, lhs: ValueId, rhs: ValueId) -> Option<(ValueId, ValueId)> { - match (&dfg[lhs], &dfg[rhs]) { +fn simplify(lhs: Value, rhs: Value) -> Option<(Value, Value)> { + match (lhs, rhs) { // Ignore trivial constraints (Value::NumericConstant { .. }, Value::NumericConstant { .. }) => None, @@ -858,10 +857,10 @@ mod test { let instructions = main.dfg[main.entry_block()].instructions(); assert_eq!(instructions.len(), 2); // The final return is not counted - let v0 = main.parameters()[0]; - let two = main.dfg.make_constant(2_u128.into(), NumericType::NativeField); + let v0 = main.parameters().next().unwrap(); + let two = main.dfg.constant(2_u128.into(), NumericType::NativeField); - main.dfg.set_value_from_id(v0, two); + main.dfg.replace_value(v0, two); let expected = " acir(inline) fn main f0 { @@ -891,13 +890,13 @@ mod test { let instructions = main.dfg[main.entry_block()].instructions(); assert_eq!(instructions.len(), 2); // The final return is not counted - let v1 = main.parameters()[1]; + let v1 = main.parameters().nth(1).unwrap(); // Note that this constant guarantees that `v0/constant < 2^8`. We then do not need to truncate the result. let constant = 2_u128.pow(8); - let constant = main.dfg.make_constant(constant.into(), NumericType::unsigned(16)); + let constant = main.dfg.constant(constant.into(), NumericType::unsigned(16)); - main.dfg.set_value_from_id(v1, constant); + main.dfg.replace_value(v1, constant); let expected = " acir(inline) fn main f0 { @@ -929,13 +928,13 @@ mod test { let instructions = main.dfg[main.entry_block()].instructions(); assert_eq!(instructions.len(), 2); // The final return is not counted - let v1 = main.parameters()[1]; + let v1 = main.parameters().nth(1).unwrap(); // Note that this constant does not guarantee that `v0/constant < 2^8`. We must then truncate the result. let constant = 2_u128.pow(8) - 1; - let constant = main.dfg.make_constant(constant.into(), NumericType::unsigned(16)); + let constant = main.dfg.constant(constant.into(), NumericType::unsigned(16)); - main.dfg.set_value_from_id(v1, constant); + main.dfg.replace_value(v1, constant); let expected = " acir(inline) fn main f0 { @@ -1154,7 +1153,7 @@ mod test { // Compiling main let mut builder = FunctionBuilder::new("main".into(), main_id); let v0 = builder.add_parameter(Type::unsigned(64)); - let zero = builder.numeric_constant(0u128, NumericType::unsigned(64)); + let zero = builder.constant(0u128.into(), NumericType::unsigned(64)); let typ = Type::Array(Arc::new(vec![Type::unsigned(64)]), 25); let array_contents = im::vector![ @@ -1173,7 +1172,7 @@ mod test { builder.terminate_with_return(Vec::new()); let mut ssa = builder.finish(); - ssa.normalize_ids(); + ssa = ssa.normalize_ids(); println!("{ssa}"); diff --git a/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs b/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs index 7d7798fd30a..c48762bf2f2 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs @@ -4,7 +4,7 @@ //! with a non-literal target can be replaced with a call to an apply function. //! The apply function is a dispatch function that takes the function id as a parameter //! and dispatches to the correct target. -use std::collections::{BTreeMap, BTreeSet, HashSet}; +use std::collections::{BTreeMap, BTreeSet}; use acvm::FieldElement; use iter_extended::vecmap; @@ -14,9 +14,9 @@ use crate::ssa::{ ir::{ basic_block::BasicBlockId, function::{Function, FunctionId, Signature}, - instruction::{BinaryOp, Instruction}, - types::{NumericType, Type}, - value::{Value, ValueId}, + instruction::{BinaryOp, Instruction, InstructionId}, + types::Type, + value::Value, }, ssa_gen::Ssa, }; @@ -76,78 +76,145 @@ impl DefunctionalizationContext { /// Defunctionalize a single function fn defunctionalize(&mut self, func: &mut Function) { - let mut call_target_values = HashSet::new(); + let reachable_blocks = func.reachable_blocks(); - for block_id in func.reachable_blocks() { - let block = &func.dfg[block_id]; - let instructions = block.instructions().to_vec(); + for block_id in &reachable_blocks { + let block_id = *block_id; + let instructions = func.dfg[block_id].take_instructions(); - for instruction_id in instructions { - let instruction = func.dfg[instruction_id].clone(); + for instruction_id in &instructions { + let instruction_id = *instruction_id; let mut replacement_instruction = None; - // Operate on call instructions - let (target_func_id, arguments) = match &instruction { - Instruction::Call { func: target_func_id, arguments } => { - (*target_func_id, arguments) + + // Finally, check to see if this is a call to a non-function literal + // and if so replace the call with a call to the apply function + let (target_func, arguments) = match &func.dfg[instruction_id] { + Instruction::Call { func, arguments, .. } => (*func, arguments), + _ => { + Self::mutate_function_instruction_arguments(func, instruction_id); + continue; } - _ => continue, }; - match func.dfg[target_func_id] { + match target_func { // If the target is a function used as value Value::Param { .. } | Value::Instruction { .. } => { let mut arguments = arguments.clone(); let results = func.dfg.instruction_results(instruction_id); + let returns = vecmap(results, |result| func.dfg.type_of_value(result)); + let signature = Signature { params: vecmap(&arguments, |param| func.dfg.type_of_value(*param)), - returns: vecmap(results, |result| func.dfg.type_of_value(*result)), + returns: returns.clone(), }; // Find the correct apply function let apply_function = self.get_apply_function(&signature); // Replace the instruction with a call to apply - let apply_function_value_id = func.dfg.import_function(apply_function.id); + let apply_function_value_id = Value::Function(apply_function.id); if apply_function.dispatches_to_multiple_functions { - arguments.insert(0, target_func_id); + arguments.insert(0, target_func); } let func = apply_function_value_id; - call_target_values.insert(func); - - replacement_instruction = Some(Instruction::Call { func, arguments }); - } - Value::Function(..) => { - call_target_values.insert(target_func_id); + replacement_instruction = + Some(Instruction::Call { func, arguments, result_types: returns }); } _ => {} } + if let Some(new_instruction) = replacement_instruction { func.dfg[instruction_id] = new_instruction; } + + // Change any function literals in this instruction to fields. + // This must be done after using the apply function + Self::mutate_function_instruction_arguments(func, instruction_id); } + + *func.dfg[block_id].instructions_mut() = instructions; } - // Change the type of all the values that are not call targets to NativeField - let value_ids = vecmap(func.dfg.values_iter(), |(id, _)| id); - for value_id in value_ids { - if let Type::Function = func.dfg[value_id].get_type().as_ref() { - match &func.dfg[value_id] { - // If the value is a static function, transform it to the function id - Value::Function(id) => { - if !call_target_values.contains(&value_id) { - let field = NumericType::NativeField; - let new_value = - func.dfg.make_constant(function_id_to_field(*id), field); - func.dfg.set_value_from_id(value_id, new_value); - } + // After changing the values in the function, go back and update block parameter + // and terminator types. This must be done afterward since otherwise it changes the + // type the apply functions search for. Alternatively we could change the apply functions + // type to convert the Function types there to Field preemptively. + for block_id in reachable_blocks { + for parameter in func.dfg[block_id].parameter_types_mut() { + if *parameter == Type::Function { + *parameter = Type::field(); + } + } + + // Then replace the terminator values + let mut terminator = func.dfg[block_id].take_terminator(); + terminator.map_values_mut(|v| Self::function_to_field(func, v)); + func.dfg[block_id].set_terminator(terminator); + } + } + + /// Mutates any function literals used in the given instruction into field literals, + /// and mutates any function types returned into field types + fn mutate_function_instruction_arguments( + function: &mut Function, + instruction_id: InstructionId, + ) { + let mut contains_function = false; + + match &function.dfg[instruction_id] { + // Special case calls to avoid changing `func` to a field + Instruction::Call { func, arguments, result_types } => { + for argument in arguments { + if matches!(argument, Value::Function(_)) { + contains_function = true; + break; } - // If the value is a function used as value, just change the type of it - Value::Instruction { .. } | Value::Param { .. } => { - func.dfg.set_type_of_value(value_id, Type::field()); + } + contains_function = contains_function + || result_types.iter().any(|typ| matches!(typ, Type::Function)); + + if contains_function { + let func = *func; + let result_types = vecmap(result_types, Self::function_type_to_field); + let mut arguments = arguments.clone(); + for arg in arguments.iter_mut() { + *arg = Self::function_to_field(function, *arg); } - _ => {} + let new_instruction = Instruction::Call { func, arguments, result_types }; + function.dfg[instruction_id] = new_instruction; } } + other => { + other.for_each_value(|value| { + // All uses of functions outside of Call instructions are higher order + contains_function = contains_function || matches!(value, Value::Function(_)); + }); + other.for_each_type(|typ| { + contains_function = contains_function || matches!(typ, Type::Function); + }); + if contains_function { + let mut instruction = function.dfg[instruction_id].clone(); + instruction.map_values_mut(|v| Self::function_to_field(function, v)); + instruction.map_types_mut(|typ| *typ = Self::function_type_to_field(typ)); + function.dfg[instruction_id] = instruction; + } + } + }; + } + + fn function_type_to_field(typ: &Type) -> Type { + if matches!(typ, Type::Function) { + Type::field() + } else { + typ.clone() + } + } + + fn function_to_field(function: &mut Function, value: Value) -> Value { + if let Value::Function(id) = value { + function.dfg.field_constant((id.to_u32() as usize).into()) + } else { + value } } @@ -193,8 +260,8 @@ fn find_variants(ssa: &Ssa) -> BTreeMap> { fn find_functions_as_values(func: &Function) -> BTreeSet { let mut functions_as_values: BTreeSet = BTreeSet::new(); - let mut process_value = |value_id: ValueId| { - if let Value::Function(id) = func.dfg[value_id] { + let mut process_value = |value_id: Value| { + if let Value::Function(id) = value_id { functions_as_values.insert(id); } }; @@ -229,12 +296,11 @@ fn find_dynamic_dispatches(func: &Function) -> BTreeSet { for instruction_id in block.instructions() { let instruction = &func.dfg[*instruction_id]; match instruction { - Instruction::Call { func: target, arguments } => { - if let Value::Param { .. } | Value::Instruction { .. } = &func.dfg[*target] { - let results = func.dfg.instruction_results(*instruction_id); + Instruction::Call { func: target, arguments, result_types } => { + if let Value::Param { .. } | Value::Instruction { .. } = *target { dispatches.insert(Signature { params: vecmap(arguments, |param| func.dfg.type_of_value(*param)), - returns: vecmap(results, |result| func.dfg.type_of_value(*result)), + returns: result_types.clone(), }); } } @@ -288,8 +354,8 @@ fn create_apply_function( let is_last = index == function_ids.len() - 1; let mut next_function_block = None; - let function_id_constant = function_builder - .numeric_constant(function_id_to_field(*function_id), NumericType::NativeField); + let function_id_constant = + function_builder.field_constant(function_id_to_field(*function_id)); // If it's not the last function to dispatch, create an if statement if !is_last { @@ -320,10 +386,10 @@ fn create_apply_function( previous_target_block = Some(target_block); // Call the function - let target_function_value = function_builder.import_function(*function_id); + let target_function_value = Value::Function(*function_id); let call_results = function_builder .insert_call(target_function_value, params_ids.clone(), signature.returns.clone()) - .to_vec(); + .collect(); // Jump to the target block for returning function_builder.terminate_with_jmp(target_block, call_results); diff --git a/compiler/noirc_evaluator/src/ssa/opt/die.rs b/compiler/noirc_evaluator/src/ssa/opt/die.rs index 7b38b764eab..c9d457d8f5e 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/die.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/die.rs @@ -12,7 +12,7 @@ use crate::ssa::{ instruction::{BinaryOp, Instruction, InstructionId, Intrinsic}, post_order::PostOrder, types::{NumericType, Type}, - value::{Value, ValueId}, + value::Value, }, ssa_gen::Ssa, }; @@ -69,7 +69,7 @@ impl Function { /// Per function context for tracking unused values and which instructions to remove. #[derive(Default)] struct Context { - used_values: HashSet, + used_values: HashSet, instructions_to_remove: HashSet, /// IncrementRc & DecrementRc instructions must be revisited after the main DIE pass since @@ -173,12 +173,12 @@ impl Context { let instruction = &function.dfg[instruction_id]; if instruction.can_eliminate_if_unused(function) { - let results = function.dfg.instruction_results(instruction_id); - results.iter().all(|result| !self.used_values.contains(result)) - } else if let Instruction::Call { func, arguments } = instruction { + let mut results = function.dfg.instruction_results(instruction_id); + results.all(|result| !self.used_values.contains(&result)) + } else if let Instruction::Call { func, arguments, .. } = instruction { // TODO: make this more general for instructions which don't have results but have side effects "sometimes" like `Intrinsic::AsWitness` - let as_witness_id = function.dfg.get_intrinsic(Intrinsic::AsWitness); - as_witness_id == Some(func) && !self.used_values.contains(&arguments[0]) + let as_witness = Value::Intrinsic(Intrinsic::AsWitness); + as_witness == *func && !self.used_values.contains(&arguments[0]) } else { // If the instruction has side effects we should never remove it. false @@ -193,10 +193,10 @@ impl Context { } /// Inspects a value and marks all instruction results as used. - fn mark_used_instruction_results(&mut self, dfg: &DataFlowGraph, value_id: ValueId) { - let value_id = dfg.resolve(value_id); - if matches!(&dfg[value_id], Value::Instruction { .. } | Value::Param { .. }) { - self.used_values.insert(value_id); + fn mark_used_instruction_results(&mut self, dfg: &DataFlowGraph, value_id: Value) { + let value = dfg.resolve(value_id); + if matches!(value, Value::Instruction { .. } | Value::Param { .. }) { + self.used_values.insert(value); } } @@ -284,7 +284,7 @@ impl Context { // This is an instruction that might be out of bounds: let's add a constrain. let (array, index) = match instruction { - Instruction::ArrayGet { array, index } + Instruction::ArrayGet { array, index, .. } | Instruction::ArraySet { array, index, .. } => (array, index), _ => panic!("Expected an ArrayGet or ArraySet instruction here"), }; @@ -293,33 +293,29 @@ impl Context { let (lhs, rhs) = if function.dfg.get_numeric_constant(*index).is_some() { // If we are here it means the index is known but out of bounds. That's always an error! - let false_const = function.dfg.make_constant(false.into(), NumericType::bool()); - let true_const = function.dfg.make_constant(true.into(), NumericType::bool()); + let false_const = function.dfg.bool_constant(false); + let true_const = function.dfg.bool_constant(true); (false_const, true_const) } else { // `index` will be relative to the flattened array length, so we need to take that into account let array_length = function.dfg.type_of_value(*array).flattened_size(); // If we are here it means the index is dynamic, so let's add a check that it's less than length - let length_type = NumericType::length_type(); let index = function.dfg.insert_instruction_and_results( - Instruction::Cast(*index, length_type), + Instruction::Cast(*index, NumericType::length_type()), block_id, - None, call_stack, ); let index = index.first(); - let array_length = - function.dfg.make_constant((array_length as u128).into(), length_type); + let array_length = function.dfg.length_constant((array_length as u128).into()); let is_index_out_of_bounds = function.dfg.insert_instruction_and_results( Instruction::binary(BinaryOp::Lt, index, array_length), block_id, - None, call_stack, ); let is_index_out_of_bounds = is_index_out_of_bounds.first(); - let true_const = function.dfg.make_constant(true.into(), NumericType::bool()); + let true_const = function.dfg.bool_constant(true); (is_index_out_of_bounds, true_const) }; @@ -336,7 +332,6 @@ impl Context { function.dfg.insert_instruction_and_results( Instruction::Constrain(lhs, rhs, message), block_id, - None, call_stack, ); inserted_check = true; @@ -356,11 +351,11 @@ impl Context { ) -> bool { use Instruction::*; if let IncrementRc { value } | DecrementRc { value } = instruction { - if let Value::Instruction { instruction, .. } = &dfg[*value] { - return match &dfg[*instruction] { + if let Value::Instruction { instruction, .. } = *value { + return match &dfg[instruction] { MakeArray { .. } => true, Call { func, .. } => { - matches!(&dfg[*func], Value::Intrinsic(_) | Value::ForeignFunction(_)) + matches!(func, Value::Intrinsic(_) | Value::ForeignFunction(_)) } _ => false, }; @@ -376,7 +371,7 @@ fn instruction_might_result_in_out_of_bounds( ) -> bool { use Instruction::*; match instruction { - ArrayGet { array, index } | ArraySet { array, index, .. } => { + ArrayGet { array, index, .. } | ArraySet { array, index, .. } => { if function.dfg.try_get_array_length(*array).is_some() { if let Some(known_index) = function.dfg.get_numeric_constant(*index) { // `index` will be relative to the flattened array length, so we need to take that into account @@ -399,7 +394,7 @@ fn instruction_might_result_in_out_of_bounds( fn handle_array_get_group( function: &Function, - array: &ValueId, + array: &Value, index: usize, next_out_of_bounds_index: &mut Option, possible_index_out_of_bounds_indexes: &mut Vec, @@ -487,13 +482,13 @@ fn handle_array_get_group( // Given `lhs` and `rhs` values, if there's a side effects condition this will // return (`lhs * condition`, `rhs * condition`), otherwise just (`lhs`, `rhs`) fn apply_side_effects( - side_effects_condition: Option, - lhs: ValueId, - rhs: ValueId, + side_effects_condition: Option, + lhs: Value, + rhs: Value, function: &mut Function, block_id: BasicBlockId, call_stack: CallStackId, -) -> (ValueId, ValueId) { +) -> (Value, Value) { // See if there's an active "enable side effects" condition let Some(condition) = side_effects_condition else { return (lhs, rhs); @@ -504,13 +499,12 @@ fn apply_side_effects( // Condition needs to be cast to argument type in order to multiply them together. // In our case, lhs is always a boolean. let cast = Instruction::Cast(condition, NumericType::bool()); - let casted_condition = dfg.insert_instruction_and_results(cast, block_id, None, call_stack); + let casted_condition = dfg.insert_instruction_and_results(cast, block_id, call_stack); let casted_condition = casted_condition.first(); let lhs = dfg.insert_instruction_and_results( Instruction::binary(BinaryOp::Mul, lhs, casted_condition), block_id, - None, call_stack, ); let lhs = lhs.first(); @@ -518,7 +512,6 @@ fn apply_side_effects( let rhs = dfg.insert_instruction_and_results( Instruction::binary(BinaryOp::Mul, rhs, casted_condition), block_id, - None, call_stack, ); let rhs = rhs.first(); @@ -539,13 +532,13 @@ struct RcTracker { rc_pairs_to_remove: HashSet, // We also separately track all IncrementRc instructions and all array types which have been mutably borrowed. // If an array is the same type as one of those non-mutated array types, we can safely remove all IncrementRc instructions on that array. - inc_rcs: HashMap>, + inc_rcs: HashMap>, mutated_array_types: HashSet, // The SSA often creates patterns where after simplifications we end up with repeat // IncrementRc instructions on the same value. We track whether the previous instruction was an IncrementRc, // and if the current instruction is also an IncrementRc on the same value we remove the current instruction. // `None` if the previous instruction was anything other than an IncrementRc - previous_inc_rc: Option, + previous_inc_rc: Option, } impl RcTracker { @@ -776,7 +769,7 @@ mod test { // Compiling main let mut builder = FunctionBuilder::new("main".into(), main_id); - let zero = builder.numeric_constant(0u128, NumericType::unsigned(32)); + let zero = builder.constant(0u128.into(), NumericType::unsigned(32)); let array_type = Type::Array(Arc::new(vec![Type::unsigned(32)]), 2); let v1 = builder.insert_make_array(vector![zero, zero], array_type.clone()); let v2 = builder.insert_allocate(array_type.clone()); @@ -789,7 +782,7 @@ mod test { builder.switch_to_block(b1); let v3 = builder.insert_load(v2, array_type); - let one = builder.numeric_constant(1u128, NumericType::unsigned(32)); + let one = builder.constant(1u128.into(), NumericType::unsigned(32)); let v5 = builder.insert_array_set(v3, zero, one); builder.terminate_with_return(vec![v5]); diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs index afc14eed15e..529dea0a004 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs @@ -133,7 +133,7 @@ //! store v12 at v5 (new store) use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet}; -use acvm::{acir::AcirField, acir::BlackBoxFunc, FieldElement}; +use acvm::acir::BlackBoxFunc; use iter_extended::vecmap; use crate::ssa::{ @@ -141,12 +141,12 @@ use crate::ssa::{ basic_block::BasicBlockId, call_stack::CallStackId, cfg::ControlFlowGraph, - dfg::InsertInstructionResult, function::{Function, FunctionId, RuntimeType}, function_inserter::FunctionInserter, + instruction::insert_result::InsertInstructionResult, instruction::{BinaryOp, Instruction, InstructionId, Intrinsic, TerminatorInstruction}, - types::{NumericType, Type}, - value::{Value, ValueId}, + types::Type, + value::Value, }, ssa_gen::Ssa, }; @@ -196,12 +196,12 @@ struct Context<'f> { /// Maps SSA array values with a slice type to their size. /// This is maintained by appropriate calls to the `SliceCapacityTracker` and is used by the `ValueMerger`. - slice_sizes: HashMap, + slice_sizes: HashMap, /// Stack of block arguments /// When processing a block, we pop this stack to get its arguments /// and at the end we push the arguments for his successor - arguments_stack: Vec>, + arguments_stack: Vec>, /// Stores all allocations local to the current branch. /// @@ -210,7 +210,7 @@ struct Context<'f> { /// the other branch since there is no such value. /// /// The `ValueId` here is that which is returned by the allocate instruction. - local_allocations: HashSet, + local_allocations: HashSet, } #[derive(Clone)] @@ -218,16 +218,16 @@ struct ConditionalBranch { // Contains the last processed block during the processing of the branch. last_block: BasicBlockId, // The unresolved condition of the branch - old_condition: ValueId, + old_condition: Value, // The condition of the branch - condition: ValueId, + condition: Value, // The allocations accumulated when processing the branch - local_allocations: HashSet, + local_allocations: HashSet, } struct ConditionalContext { // Condition from the conditional statement - condition: ValueId, + condition: Value, // Block containing the conditional statement entry_block: BasicBlockId, // First block of the then branch @@ -281,20 +281,20 @@ impl<'f> Context<'f> { /// Returns the updated condition so that /// it is 'AND-ed' with the previous condition (if any) - fn link_condition(&mut self, condition: ValueId) -> ValueId { + fn link_condition(&mut self, condition: Value) -> Value { // Retrieve the previous condition if let Some(context) = self.condition_stack.last() { let previous_branch = context.else_branch.as_ref().unwrap_or(&context.then_branch); let and = Instruction::binary(BinaryOp::And, previous_branch.condition, condition); let call_stack = self.inserter.function.dfg.get_value_call_stack_id(condition); - self.insert_instruction(and, call_stack) + self.insert_instruction(and, call_stack).first() } else { condition } } /// Returns the current condition - fn get_last_condition(&self) -> Option { + fn get_last_condition(&self) -> Option { self.condition_stack.last().map(|context| match &context.else_branch { Some(else_branch) => else_branch.condition, None => context.then_branch.condition, @@ -308,10 +308,10 @@ impl<'f> Context<'f> { instruction: &InstructionId, ) -> bool { let mut result = false; - if let Instruction::Call { func, .. } = self.inserter.function.dfg[*instruction] { - if let Value::Function(fid) = self.inserter.function.dfg[func] { - result = *no_predicates.get(&fid).unwrap_or(&false); - } + if let Instruction::Call { func: Value::Function(fid), .. } = + &self.inserter.function.dfg[*instruction] + { + result = *no_predicates.get(fid).unwrap_or(&false); } result } @@ -333,11 +333,9 @@ impl<'f> Context<'f> { for instruction in instructions.iter() { if self.is_no_predicate(no_predicates, instruction) { // disable side effect for no_predicate functions - let bool_type = NumericType::bool(); - let one = self.inserter.function.dfg.make_constant(FieldElement::one(), bool_type); - self.insert_instruction_with_typevars( + let one = self.inserter.function.dfg.bool_constant(true); + self.insert_instruction( Instruction::EnableSideEffectsIf { condition: one }, - None, CallStackId::root(), ); self.push_instruction(*instruction); @@ -397,7 +395,7 @@ impl<'f> Context<'f> { /// Process a conditional statement fn if_start( &mut self, - condition: &ValueId, + condition: &Value, then_destination: &BasicBlockId, else_destination: &BasicBlockId, if_entry: &BasicBlockId, @@ -443,8 +441,9 @@ impl<'f> Context<'f> { let condition_call_stack = self.inserter.function.dfg.get_value_call_stack_id(cond_context.condition); - let else_condition = - self.insert_instruction(Instruction::Not(cond_context.condition), condition_call_stack); + let else_condition = self + .insert_instruction(Instruction::Not(cond_context.condition), condition_call_stack) + .first(); let else_condition = self.link_condition(else_condition); let old_allocations = std::mem::take(&mut self.local_allocations); @@ -515,10 +514,10 @@ impl<'f> Context<'f> { let mut else_args = Vec::new(); if cond_context.else_branch.is_some() { let last_else = cond_context.else_branch.clone().unwrap().last_block; - else_args = self.inserter.function.dfg[last_else].terminator_arguments().to_vec(); + else_args = self.inserter.function.dfg[last_else].jmp_arguments().to_vec(); } - let then_args = self.inserter.function.dfg[last_then].terminator_arguments().to_vec(); + let then_args = self.inserter.function.dfg[last_then].jmp_arguments().to_vec(); let params = self.inserter.function.dfg.block_parameters(destination); assert_eq!(params.len(), then_args.len()); @@ -530,7 +529,7 @@ impl<'f> Context<'f> { let else_condition = if let Some(branch) = cond_context.else_branch { branch.condition } else { - self.inserter.function.dfg.make_constant(FieldElement::zero(), NumericType::bool()) + self.inserter.function.dfg.bool_constant(false) }; let block = self.inserter.function.entry_block(); @@ -546,7 +545,7 @@ impl<'f> Context<'f> { self.inserter .function .dfg - .insert_instruction_and_results(instruction, block, None, call_stack) + .insert_instruction_and_results(instruction, block, call_stack) .first() }); @@ -559,32 +558,14 @@ impl<'f> Context<'f> { /// Insert a new instruction into the function's entry block. /// Unlike push_instruction, this function will not map any ValueIds. /// within the given instruction, nor will it modify self.values in any way. - fn insert_instruction(&mut self, instruction: Instruction, call_stack: CallStackId) -> ValueId { - let block = self.inserter.function.entry_block(); - self.inserter - .function - .dfg - .insert_instruction_and_results(instruction, block, None, call_stack) - .first() - } - - /// Inserts a new instruction into the function's entry block, using the given - /// control type variables to specify result types if needed. - /// Unlike push_instruction, this function will not map any ValueIds. - /// within the given instruction, nor will it modify self.values in any way. - fn insert_instruction_with_typevars( + fn insert_instruction( &mut self, instruction: Instruction, - ctrl_typevars: Option>, call_stack: CallStackId, ) -> InsertInstructionResult { let block = self.inserter.function.entry_block(); - self.inserter.function.dfg.insert_instruction_and_results( - instruction, - block, - ctrl_typevars, - call_stack, - ) + let dfg = &mut self.inserter.function.dfg; + dfg.insert_instruction_and_results(instruction, block, call_stack) } /// Checks the branch condition on the top of the stack and uses it to build and insert an @@ -595,13 +576,11 @@ impl<'f> Context<'f> { fn insert_current_side_effects_enabled(&mut self) { let condition = match self.get_last_condition() { Some(cond) => cond, - None => { - self.inserter.function.dfg.make_constant(FieldElement::one(), NumericType::bool()) - } + None => self.inserter.function.dfg.bool_constant(true), }; let enable_side_effects = Instruction::EnableSideEffectsIf { condition }; let call_stack = self.inserter.function.dfg.get_value_call_stack_id(condition); - self.insert_instruction_with_typevars(enable_side_effects, None, call_stack); + self.insert_instruction(enable_side_effects, call_stack); } /// Push the given instruction to the end of the entry block of the current function. @@ -618,7 +597,7 @@ impl<'f> Context<'f> { let (instruction, call_stack) = self.inserter.map_instruction(id); let instruction = self.handle_instruction_side_effects(instruction, call_stack); - let instruction_is_allocate = matches!(&instruction, Instruction::Allocate); + let instruction_is_allocate = matches!(&instruction, Instruction::Allocate { .. }); let entry = self.inserter.function.entry_block(); let results = self.inserter.push_instruction_value(instruction, id, entry, call_stack); @@ -645,16 +624,20 @@ impl<'f> Context<'f> { let argument_type = self.inserter.function.dfg.type_of_value(lhs); let cast = Instruction::Cast(condition, argument_type.unwrap_numeric()); - let casted_condition = self.insert_instruction(cast, call_stack); + let casted_condition = self.insert_instruction(cast, call_stack).first(); - let lhs = self.insert_instruction( - Instruction::binary(BinaryOp::Mul, lhs, casted_condition), - call_stack, - ); - let rhs = self.insert_instruction( - Instruction::binary(BinaryOp::Mul, rhs, casted_condition), - call_stack, - ); + let lhs = self + .insert_instruction( + Instruction::binary(BinaryOp::Mul, lhs, casted_condition), + call_stack, + ) + .first(); + let rhs = self + .insert_instruction( + Instruction::binary(BinaryOp::Mul, rhs, casted_condition), + call_stack, + ) + .first(); Instruction::Constrain(lhs, rhs, message) } @@ -666,13 +649,12 @@ impl<'f> Context<'f> { } else { // Instead of storing `value`, store `if condition { value } else { previous_value }` let typ = self.inserter.function.dfg.type_of_value(value); - let load = Instruction::Load { address }; - let previous_value = self - .insert_instruction_with_typevars(load, Some(vec![typ]), call_stack) - .first(); + let load = Instruction::Load { address, result_type: typ }; + let previous_value = self.insert_instruction(load, call_stack).first(); - let else_condition = - self.insert_instruction(Instruction::Not(condition), call_stack); + let else_condition = self + .insert_instruction(Instruction::Not(condition), call_stack) + .first(); let instruction = Instruction::IfElse { then_condition: condition, @@ -681,7 +663,8 @@ impl<'f> Context<'f> { else_value: previous_value, }; - let updated_value = self.insert_instruction(instruction, call_stack); + let updated_value = + self.insert_instruction(instruction, call_stack).first(); Instruction::Store { address, value: updated_value } } } @@ -691,62 +674,68 @@ impl<'f> Context<'f> { // Condition needs to be cast to argument type in order to multiply them together. let argument_type = self.inserter.function.dfg.type_of_value(value); let cast = Instruction::Cast(condition, argument_type.unwrap_numeric()); - let casted_condition = self.insert_instruction(cast, call_stack); + let casted_condition = self.insert_instruction(cast, call_stack).first(); - let value = self.insert_instruction( - Instruction::binary(BinaryOp::Mul, value, casted_condition), - call_stack, - ); + let value = self + .insert_instruction( + Instruction::binary(BinaryOp::Mul, value, casted_condition), + call_stack, + ) + .first(); Instruction::RangeCheck { value, max_bit_size, assert_message } } - Instruction::Call { func, mut arguments } => match self.inserter.function.dfg[func] - { - Value::Intrinsic(Intrinsic::ToBits(_) | Intrinsic::ToRadix(_)) => { - let field = arguments[0]; - let argument_type = self.inserter.function.dfg.type_of_value(field); - - let cast = Instruction::Cast(condition, argument_type.unwrap_numeric()); - let casted_condition = self.insert_instruction(cast, call_stack); - let field = self.insert_instruction( - Instruction::binary(BinaryOp::Mul, field, casted_condition), - call_stack, - ); - - arguments[0] = field; - - Instruction::Call { func, arguments } - } - //Issue #5045: We set curve points to infinity if condition is false - Value::Intrinsic(Intrinsic::BlackBox(BlackBoxFunc::EmbeddedCurveAdd)) => { - arguments[2] = self.var_or_one(arguments[2], condition, call_stack); - arguments[5] = self.var_or_one(arguments[5], condition, call_stack); - - Instruction::Call { func, arguments } - } - Value::Intrinsic(Intrinsic::BlackBox(BlackBoxFunc::MultiScalarMul)) => { - let points_array_idx = if matches!( - self.inserter.function.dfg.type_of_value(arguments[0]), - Type::Array { .. } - ) { - 0 - } else { - // if the first argument is not an array, we assume it is a slice - // which means the array is the second argument - 1 - }; - let (elements, typ) = self.apply_predicate_to_msm_argument( - arguments[points_array_idx], - condition, - call_stack, - ); - - let instruction = Instruction::MakeArray { elements, typ }; - let array = self.insert_instruction(instruction, call_stack); - arguments[points_array_idx] = array; - Instruction::Call { func, arguments } + Instruction::Call { func, mut arguments, result_types } => { + match func { + Value::Intrinsic(Intrinsic::ToBits(_) | Intrinsic::ToRadix(_)) => { + let field = arguments[0]; + let argument_type = self.inserter.function.dfg.type_of_value(field); + + let cast = Instruction::Cast(condition, argument_type.unwrap_numeric()); + let casted_condition = + self.insert_instruction(cast, call_stack).first(); + let field = self + .insert_instruction( + Instruction::binary(BinaryOp::Mul, field, casted_condition), + call_stack, + ) + .first(); + + arguments[0] = field; + + Instruction::Call { func, arguments, result_types } + } + //Issue #5045: We set curve points to infinity if condition is false + Value::Intrinsic(Intrinsic::BlackBox(BlackBoxFunc::EmbeddedCurveAdd)) => { + arguments[2] = self.var_or_one(arguments[2], condition, call_stack); + arguments[5] = self.var_or_one(arguments[5], condition, call_stack); + + Instruction::Call { func, arguments, result_types } + } + Value::Intrinsic(Intrinsic::BlackBox(BlackBoxFunc::MultiScalarMul)) => { + let points_array_idx = if matches!( + self.inserter.function.dfg.type_of_value(arguments[0]), + Type::Array { .. } + ) { + 0 + } else { + // if the first argument is not an array, we assume it is a slice + // which means the array is the second argument + 1 + }; + let (elements, typ) = self.apply_predicate_to_msm_argument( + arguments[points_array_idx], + condition, + call_stack, + ); + + let instruction = Instruction::MakeArray { elements, typ }; + let array = self.insert_instruction(instruction, call_stack).first(); + arguments[points_array_idx] = array; + Instruction::Call { func, arguments, result_types } + } + _ => Instruction::Call { func, arguments, result_types }, } - _ => Instruction::Call { func, arguments }, - }, + } other => other, } } else { @@ -759,10 +748,10 @@ impl<'f> Context<'f> { /// that the points will be on the curve no matter what. fn apply_predicate_to_msm_argument( &mut self, - argument: ValueId, - predicate: ValueId, + argument: Value, + predicate: Value, call_stack: CallStackId, - ) -> (im::Vector, Type) { + ) -> (im::Vector, Type) { let array_typ; let mut array_with_predicate = im::Vector::new(); if let Some((array, typ)) = &self.inserter.function.dfg.get_array_constant(argument) { @@ -785,14 +774,16 @@ impl<'f> Context<'f> { } // Computes: if condition { var } else { 1 } - fn var_or_one(&mut self, var: ValueId, condition: ValueId, call_stack: CallStackId) -> ValueId { - let field = - self.insert_instruction(Instruction::binary(BinaryOp::Mul, var, condition), call_stack); - let not_condition = self.insert_instruction(Instruction::Not(condition), call_stack); + fn var_or_one(&mut self, var: Value, condition: Value, call_stack: CallStackId) -> Value { + let mul = Instruction::binary(BinaryOp::Mul, var, condition); + let field = self.insert_instruction(mul, call_stack).first(); + let not_condition = + self.insert_instruction(Instruction::Not(condition), call_stack).first(); self.insert_instruction( Instruction::binary(BinaryOp::Add, field, not_condition), call_stack, ) + .first() } } @@ -808,7 +799,7 @@ mod test { instruction::{BinaryOp, Instruction, TerminatorInstruction}, map::Id, types::Type, - value::{Value, ValueId}, + value::Value, }, opt::assert_normalized_ssa_equals, Ssa, @@ -1163,7 +1154,7 @@ mod test { instructions.iter().position(|id| predicate(&main.dfg[*id])).unwrap() }; - let allocate_index = find_instruction(|i| matches!(i, Instruction::Allocate)); + let allocate_index = find_instruction(|i| matches!(i, Instruction::Allocate { .. })); let store_index = find_instruction(|i| matches!(i, Instruction::Store { .. })); let load_index = find_instruction(|i| matches!(i, Instruction::Load { .. })); @@ -1185,9 +1176,9 @@ mod test { /// Calling this function on v3 will return [2, 6]. fn get_all_constants_reachable_from_instruction( dfg: &DataFlowGraph, - value: ValueId, + value: Value, ) -> Vec { - match dfg[value] { + match value { Value::Instruction { instruction, .. } => { let mut constants = vec![]; @@ -1199,7 +1190,7 @@ mod test { constants.dedup(); constants } - Value::NumericConstant { constant, .. } => vec![constant.to_u128()], + Value::NumericConstant { constant, .. } => vec![dfg[constant].to_u128()], _ => Vec::new(), } } @@ -1373,12 +1364,12 @@ mod test { let b4 = builder.insert_block(); let b5 = builder.insert_block(); - let zero = builder.field_constant(0u128); - let one = builder.field_constant(1u128); - let two = builder.field_constant(2u128); - let four = builder.field_constant(4u128); - let ten = builder.field_constant(10u128); - let one_hundred = builder.field_constant(100u128); + let zero = builder.field_constant(0u128.into()); + let one = builder.field_constant(1u128.into()); + let two = builder.field_constant(2u128.into()); + let four = builder.field_constant(4u128.into()); + let ten = builder.field_constant(10u128.into()); + let one_hundred = builder.field_constant(100u128.into()); let v0 = builder.insert_allocate(Type::field()); builder.insert_store(v0, zero); diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs index a01be691778..baee6f7636c 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs @@ -2,7 +2,7 @@ use crate::ssa::ir::{ dfg::DataFlowGraph, instruction::{Instruction, Intrinsic}, types::Type, - value::{Value, ValueId}, + value::Value, }; use acvm::{acir::AcirField, FieldElement}; @@ -21,8 +21,8 @@ impl<'a> SliceCapacityTracker<'a> { pub(crate) fn collect_slice_information( &self, instruction: &Instruction, - slice_sizes: &mut HashMap, - results: &[ValueId], + slice_sizes: &mut HashMap, + results: &[Value], ) { match instruction { Instruction::ArrayGet { array, .. } => { @@ -53,75 +53,70 @@ impl<'a> SliceCapacityTracker<'a> { slice_sizes.insert(results[0], *capacity); } } - Instruction::Call { func, arguments } => { - let func = &self.dfg[*func]; - if let Value::Intrinsic(intrinsic) = func { - let (argument_index, result_index) = match intrinsic { - Intrinsic::SlicePushBack - | Intrinsic::SlicePushFront - | Intrinsic::SlicePopBack - | Intrinsic::SliceInsert - | Intrinsic::SliceRemove => (1, 1), - // `pop_front` returns the popped element, and then the respective slice. - // This means in the case of a slice with structs, the result index of the popped slice - // will change depending on the number of elements in the struct. - // For example, a slice with four elements will look as such in SSA: - // v3, v4, v5, v6, v7, v8 = call slice_pop_front(v1, v2) - // where v7 is the slice length and v8 is the popped slice itself. - Intrinsic::SlicePopFront => (1, results.len() - 1), - Intrinsic::AsSlice => (0, 1), - _ => return, - }; - let result_slice = results[result_index]; - match intrinsic { - Intrinsic::SlicePushBack - | Intrinsic::SlicePushFront - | Intrinsic::SliceInsert => { - let slice_contents = arguments[argument_index]; + Instruction::Call { func: Value::Intrinsic(intrinsic), arguments, result_types: _ } => { + let (argument_index, result_index) = match intrinsic { + Intrinsic::SlicePushBack + | Intrinsic::SlicePushFront + | Intrinsic::SlicePopBack + | Intrinsic::SliceInsert + | Intrinsic::SliceRemove => (1, 1), + // `pop_front` returns the popped element, and then the respective slice. + // This means in the case of a slice with structs, the result index of the popped slice + // will change depending on the number of elements in the struct. + // For example, a slice with four elements will look as such in SSA: + // v3, v4, v5, v6, v7, v8 = call slice_pop_front(v1, v2) + // where v7 is the slice length and v8 is the popped slice itself. + Intrinsic::SlicePopFront => (1, results.len() - 1), + Intrinsic::AsSlice => (0, 1), + _ => return, + }; + let result_slice = results[result_index]; + match intrinsic { + Intrinsic::SlicePushBack + | Intrinsic::SlicePushFront + | Intrinsic::SliceInsert => { + let slice_contents = arguments[argument_index]; - for arg in &arguments[(argument_index + 1)..] { - let element_typ = self.dfg.type_of_value(*arg); - if element_typ.contains_slice_element() { - self.compute_slice_capacity(*arg, slice_sizes); - } - } - - if let Some(contents_capacity) = slice_sizes.get(&slice_contents) { - let new_capacity = *contents_capacity + 1; - slice_sizes.insert(result_slice, new_capacity); + for arg in &arguments[(argument_index + 1)..] { + let element_typ = self.dfg.type_of_value(*arg); + if element_typ.contains_slice_element() { + self.compute_slice_capacity(*arg, slice_sizes); } } - Intrinsic::SlicePopBack - | Intrinsic::SliceRemove - | Intrinsic::SlicePopFront => { - let slice_contents = arguments[argument_index]; - if let Some(contents_capacity) = slice_sizes.get(&slice_contents) { - // We use a saturating sub here as calling `pop_front` or `pop_back` - // on a zero-length slice would otherwise underflow. - let new_capacity = contents_capacity.saturating_sub(1); - slice_sizes.insert(result_slice, new_capacity); - } - } - Intrinsic::ToBits(_) => { - // Compiler sanity check - assert!(matches!(self.dfg.type_of_value(result_slice), Type::Slice(_))); - slice_sizes.insert(result_slice, FieldElement::max_num_bits()); - } - Intrinsic::ToRadix(_) => { - // Compiler sanity check - assert!(matches!(self.dfg.type_of_value(result_slice), Type::Slice(_))); - slice_sizes.insert(result_slice, FieldElement::max_num_bytes()); + if let Some(contents_capacity) = slice_sizes.get(&slice_contents) { + let new_capacity = *contents_capacity + 1; + slice_sizes.insert(result_slice, new_capacity); } - Intrinsic::AsSlice => { - let array_size = self - .dfg - .try_get_array_length(arguments[argument_index]) - .expect("ICE: Should be have an array length for AsSlice input"); - slice_sizes.insert(result_slice, array_size); + } + Intrinsic::SlicePopBack | Intrinsic::SliceRemove | Intrinsic::SlicePopFront => { + let slice_contents = arguments[argument_index]; + + if let Some(contents_capacity) = slice_sizes.get(&slice_contents) { + // We use a saturating sub here as calling `pop_front` or `pop_back` + // on a zero-length slice would otherwise underflow. + let new_capacity = contents_capacity.saturating_sub(1); + slice_sizes.insert(result_slice, new_capacity); } - _ => {} } + Intrinsic::ToBits(_) => { + // Compiler sanity check + assert!(matches!(self.dfg.type_of_value(result_slice), Type::Slice(_))); + slice_sizes.insert(result_slice, FieldElement::max_num_bits()); + } + Intrinsic::ToRadix(_) => { + // Compiler sanity check + assert!(matches!(self.dfg.type_of_value(result_slice), Type::Slice(_))); + slice_sizes.insert(result_slice, FieldElement::max_num_bytes()); + } + Intrinsic::AsSlice => { + let array_size = self + .dfg + .try_get_array_length(arguments[argument_index]) + .expect("ICE: Should be have an array length for AsSlice input"); + slice_sizes.insert(result_slice, array_size); + } + _ => {} } } Instruction::Store { address, value } => { @@ -136,9 +131,8 @@ impl<'a> SliceCapacityTracker<'a> { slice_sizes.insert(*address, *value_capacity); } } - Instruction::Load { address } => { - let load_typ = self.dfg.type_of_value(*address); - if load_typ.contains_slice_element() { + Instruction::Load { address, result_type } => { + if result_type.contains_slice_element() { let result = results[0]; let address_capacity = slice_sizes.get(address).unwrap_or_else(|| { @@ -155,8 +149,8 @@ impl<'a> SliceCapacityTracker<'a> { /// Computes the starting capacity of a slice which is still a `Value::Array` pub(crate) fn compute_slice_capacity( &self, - array_id: ValueId, - slice_sizes: &mut HashMap, + array_id: Value, + slice_sizes: &mut HashMap, ) { if let Some((array, typ)) = self.dfg.get_array_constant(array_id) { // Compiler sanity check diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs index df351d6c0cd..f0e0a07649f 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs @@ -4,23 +4,24 @@ use fxhash::{FxHashMap as HashMap, FxHashSet}; use crate::ssa::ir::{ basic_block::BasicBlockId, call_stack::CallStackId, - dfg::{DataFlowGraph, InsertInstructionResult}, + dfg::DataFlowGraph, + instruction::insert_result::InsertInstructionResult, instruction::{BinaryOp, Instruction}, - types::{NumericType, Type}, - value::{Value, ValueId}, + types::Type, + value::Value, }; pub(crate) struct ValueMerger<'a> { dfg: &'a mut DataFlowGraph, block: BasicBlockId, - current_condition: Option, + current_condition: Option, // Maps SSA array values with a slice type to their size. // This must be computed before merging values. - slice_sizes: &'a mut HashMap, + slice_sizes: &'a mut HashMap, - array_set_conditionals: &'a mut HashMap, + array_set_conditionals: &'a mut HashMap, call_stack: CallStackId, } @@ -29,9 +30,9 @@ impl<'a> ValueMerger<'a> { pub(crate) fn new( dfg: &'a mut DataFlowGraph, block: BasicBlockId, - slice_sizes: &'a mut HashMap, - array_set_conditionals: &'a mut HashMap, - current_condition: Option, + slice_sizes: &'a mut HashMap, + array_set_conditionals: &'a mut HashMap, + current_condition: Option, call_stack: CallStackId, ) -> Self { ValueMerger { @@ -54,11 +55,11 @@ impl<'a> ValueMerger<'a> { /// as it is less clear how to merge these. pub(crate) fn merge_values( &mut self, - then_condition: ValueId, - else_condition: ValueId, - then_value: ValueId, - else_value: ValueId, - ) -> ValueId { + then_condition: Value, + else_condition: Value, + then_value: Value, + else_value: Value, + ) -> Value { let then_value = self.dfg.resolve(then_value); let else_value = self.dfg.resolve(else_value); @@ -91,11 +92,11 @@ impl<'a> ValueMerger<'a> { pub(crate) fn merge_numeric_values( dfg: &mut DataFlowGraph, block: BasicBlockId, - then_condition: ValueId, - else_condition: ValueId, - then_value: ValueId, - else_value: ValueId, - ) -> ValueId { + then_condition: Value, + else_condition: Value, + then_value: Value, + else_value: Value, + ) -> Value { let then_type = dfg.type_of_value(then_value).unwrap_numeric(); let else_type = dfg.type_of_value(else_value).unwrap_numeric(); assert_eq!( @@ -114,21 +115,19 @@ impl<'a> ValueMerger<'a> { // We must cast the bool conditions to the actual numeric type used by each value. let cast = Instruction::Cast(then_condition, then_type); - let then_condition = - dfg.insert_instruction_and_results(cast, block, None, call_stack).first(); + let then_condition = dfg.insert_instruction_and_results(cast, block, call_stack).first(); let cast = Instruction::Cast(else_condition, else_type); - let else_condition = - dfg.insert_instruction_and_results(cast, block, None, call_stack).first(); + let else_condition = dfg.insert_instruction_and_results(cast, block, call_stack).first(); let mul = Instruction::binary(BinaryOp::Mul, then_condition, then_value); - let then_value = dfg.insert_instruction_and_results(mul, block, None, call_stack).first(); + let then_value = dfg.insert_instruction_and_results(mul, block, call_stack).first(); let mul = Instruction::binary(BinaryOp::Mul, else_condition, else_value); - let else_value = dfg.insert_instruction_and_results(mul, block, None, call_stack).first(); + let else_value = dfg.insert_instruction_and_results(mul, block, call_stack).first(); let add = Instruction::binary(BinaryOp::Add, then_value, else_value); - dfg.insert_instruction_and_results(add, block, None, call_stack).first() + dfg.insert_instruction_and_results(add, block, call_stack).first() } /// Given an if expression that returns an array: `if c { array1 } else { array2 }`, @@ -137,11 +136,11 @@ impl<'a> ValueMerger<'a> { pub(crate) fn merge_array_values( &mut self, typ: Type, - then_condition: ValueId, - else_condition: ValueId, - then_value: ValueId, - else_value: ValueId, - ) -> ValueId { + then_condition: Value, + else_condition: Value, + then_value: Value, + else_value: Value, + ) -> Value { let mut merged = im::Vector::new(); let (element_types, len) = match &typ { @@ -165,19 +164,18 @@ impl<'a> ValueMerger<'a> { for (element_index, element_type) in element_types.iter().enumerate() { let index = ((i * element_types.len() as u32 + element_index as u32) as u128).into(); - let index = self.dfg.make_constant(index, NumericType::NativeField); + let index = self.dfg.field_constant(index); - let typevars = Some(vec![element_type.clone()]); - - let mut get_element = |array, typevars| { - let get = Instruction::ArrayGet { array, index }; + let mut get_element = |array| { + let result_type = element_type.clone(); + let get = Instruction::ArrayGet { array, index, result_type }; self.dfg - .insert_instruction_and_results(get, self.block, typevars, self.call_stack) + .insert_instruction_and_results(get, self.block, self.call_stack) .first() }; - let then_element = get_element(then_value, typevars.clone()); - let else_element = get_element(else_value, typevars); + let then_element = get_element(then_value); + let else_element = get_element(else_value); merged.push_back(self.merge_values( then_condition, @@ -189,19 +187,17 @@ impl<'a> ValueMerger<'a> { } let instruction = Instruction::MakeArray { elements: merged, typ }; - self.dfg - .insert_instruction_and_results(instruction, self.block, None, self.call_stack) - .first() + self.dfg.insert_instruction_and_results(instruction, self.block, self.call_stack).first() } fn merge_slice_values( &mut self, typ: Type, - then_condition: ValueId, - else_condition: ValueId, - then_value_id: ValueId, - else_value_id: ValueId, - ) -> ValueId { + then_condition: Value, + else_condition: Value, + then_value_id: Value, + else_value_id: Value, + ) -> Value { let mut merged = im::Vector::new(); let element_types = match &typ { @@ -229,35 +225,25 @@ impl<'a> ValueMerger<'a> { for (element_index, element_type) in element_types.iter().enumerate() { let index_u32 = i * element_types.len() as u32 + element_index as u32; let index_value = (index_u32 as u128).into(); - let index = self.dfg.make_constant(index_value, NumericType::NativeField); - - let typevars = Some(vec![element_type.clone()]); + let index = self.dfg.field_constant(index_value); - let mut get_element = |array, typevars, len| { + let mut get_element = |array, len| { // The smaller slice is filled with placeholder data. Codegen for slice accesses must // include checks against the dynamic slice length so that this placeholder data is not incorrectly accessed. if len <= index_u32 { self.make_slice_dummy_data(element_type) } else { - let get = Instruction::ArrayGet { array, index }; + let result_type = element_type.clone(); + let get = Instruction::ArrayGet { array, index, result_type }; self.dfg - .insert_instruction_and_results( - get, - self.block, - typevars, - self.call_stack, - ) + .insert_instruction_and_results(get, self.block, self.call_stack) .first() } }; - let then_element = get_element( - then_value_id, - typevars.clone(), - then_len * element_types.len() as u32, - ); - let else_element = - get_element(else_value_id, typevars, else_len * element_types.len() as u32); + let element_count = element_types.len() as u32; + let then_element = get_element(then_value_id, then_len * element_count); + let else_element = get_element(else_value_id, else_len * element_count); merged.push_back(self.merge_values( then_condition, @@ -269,20 +255,16 @@ impl<'a> ValueMerger<'a> { } let instruction = Instruction::MakeArray { elements: merged, typ }; - let call_stack = self.call_stack; - self.dfg.insert_instruction_and_results(instruction, self.block, None, call_stack).first() + self.dfg.insert_instruction_and_results(instruction, self.block, self.call_stack).first() } /// Construct a dummy value to be attached to the smaller of two slices being merged. /// We need to make sure we follow the internal element type structure of the slice type /// even for dummy data to ensure that we do not have errors later in the compiler, /// such as with dynamic indexing of non-homogenous slices. - fn make_slice_dummy_data(&mut self, typ: &Type) -> ValueId { + fn make_slice_dummy_data(&mut self, typ: &Type) -> Value { match typ { - Type::Numeric(numeric_type) => { - let zero = FieldElement::zero(); - self.dfg.make_constant(zero, *numeric_type) - } + Type::Numeric(numeric_type) => self.dfg.constant(FieldElement::zero(), *numeric_type), Type::Array(element_types, len) => { let mut array = im::Vector::new(); for _ in 0..*len { @@ -292,9 +274,7 @@ impl<'a> ValueMerger<'a> { } let instruction = Instruction::MakeArray { elements: array, typ: typ.clone() }; let call_stack = self.call_stack; - self.dfg - .insert_instruction_and_results(instruction, self.block, None, call_stack) - .first() + self.dfg.insert_instruction_and_results(instruction, self.block, call_stack).first() } Type::Slice(_) => { // TODO(#3188): Need to update flattening to use true user facing length of slices @@ -312,12 +292,12 @@ impl<'a> ValueMerger<'a> { fn try_merge_only_changed_indices( &mut self, - then_condition: ValueId, - else_condition: ValueId, - then_value: ValueId, - else_value: ValueId, + then_condition: Value, + else_condition: Value, + then_value: Value, + else_value: Value, array_length: u32, - ) -> Option { + ) -> Option { let mut found = false; let current_condition = self.current_condition?; @@ -377,20 +357,16 @@ impl<'a> ValueMerger<'a> { let mut array = then_value; for (index, element_type, condition) in changed_indices { - let typevars = Some(vec![element_type.clone()]); - let instruction = Instruction::EnableSideEffectsIf { condition }; self.insert_instruction(instruction); - let mut get_element = |array, typevars| { - let get = Instruction::ArrayGet { array, index }; - self.dfg - .insert_instruction_and_results(get, self.block, typevars, self.call_stack) - .first() + let mut get_element = |array, result_type| { + let get = Instruction::ArrayGet { array, index, result_type }; + self.dfg.insert_instruction_and_results(get, self.block, self.call_stack).first() }; - let then_element = get_element(then_value, typevars.clone()); - let else_element = get_element(else_value, typevars); + let then_element = get_element(then_value, element_type.clone()); + let else_element = get_element(else_value, element_type); let value = self.merge_values(then_condition, else_condition, then_element, else_element); @@ -404,31 +380,22 @@ impl<'a> ValueMerger<'a> { } fn insert_instruction(&mut self, instruction: Instruction) -> InsertInstructionResult { - self.dfg.insert_instruction_and_results(instruction, self.block, None, self.call_stack) + self.dfg.insert_instruction_and_results(instruction, self.block, self.call_stack) } fn insert_array_set( &mut self, - array: ValueId, - index: ValueId, - value: ValueId, - condition: Option, + array: Value, + index: Value, + value: Value, + condition: Option, ) -> InsertInstructionResult { let instruction = Instruction::ArraySet { array, index, value, mutable: false }; let result = - self.dfg.insert_instruction_and_results(instruction, self.block, None, self.call_stack); + self.dfg.insert_instruction_and_results(instruction, self.block, self.call_stack); if let Some(condition) = condition { - let result_index = if result.len() == 1 { - 0 - } else { - // Slices return (length, slice) - assert_eq!(result.len(), 2); - 1 - }; - - let result_value = result[result_index]; - self.array_set_conditionals.insert(result_value, condition); + self.array_set_conditionals.insert(result.first(), condition); } result @@ -436,11 +403,11 @@ impl<'a> ValueMerger<'a> { fn find_previous_array_set( &self, - result: ValueId, - changed_indices: &mut Vec<(ValueId, ValueId, Type, ValueId)>, - ) -> ValueId { - match &self.dfg[result] { - Value::Instruction { instruction, .. } => match &self.dfg[*instruction] { + result: Value, + changed_indices: &mut Vec<(Value, Value, Type, Value)>, + ) -> Value { + match result { + Value::Instruction { instruction, .. } => match &self.dfg[instruction] { Instruction::ArraySet { array, index, value, .. } => { let condition = *self.array_set_conditionals.get(&result).unwrap_or_else(|| { diff --git a/compiler/noirc_evaluator/src/ssa/opt/inlining.rs b/compiler/noirc_evaluator/src/ssa/opt/inlining.rs index 11201fc8f85..70911f34f6e 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/inlining.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/inlining.rs @@ -12,10 +12,10 @@ use crate::ssa::{ ir::{ basic_block::BasicBlockId, call_stack::CallStackId, - dfg::InsertInstructionResult, function::{Function, FunctionId, RuntimeType}, + instruction::insert_result::InsertInstructionResult, instruction::{Instruction, InstructionId, TerminatorInstruction}, - value::{Value, ValueId}, + value::Value, }, ssa_gen::Ssa, }; @@ -116,7 +116,7 @@ struct PerFunctionContext<'function> { /// Maps ValueIds in the function being inlined to the new ValueIds to use in the function /// being inlined into. This mapping also contains the mapping from parameter values to /// argument values. - values: HashMap, + values: HashMap, /// Maps blocks in the source function to blocks in the function being inlined into, where /// each mapping is from the start of a source block to an inlined block in which the @@ -136,11 +136,11 @@ fn called_functions_vec(func: &Function) -> Vec { let mut called_function_ids = Vec::new(); for block_id in func.reachable_blocks() { for instruction_id in func.dfg[block_id].instructions() { - let Instruction::Call { func: called_value_id, .. } = &func.dfg[*instruction_id] else { + let Instruction::Call { func: called_value, .. } = &func.dfg[*instruction_id] else { continue; }; - if let Value::Function(function_id) = func.dfg[*called_value_id] { + if let Value::Function(function_id) = func.dfg.resolve(*called_value) { called_function_ids.push(function_id); } } @@ -386,9 +386,9 @@ impl InlineContext { let original_parameters = context.source_function.parameters(); for parameter in original_parameters { - let typ = context.source_function.dfg.type_of_value(*parameter); + let typ = context.source_function.dfg.type_of_value(parameter); let new_parameter = context.context.builder.add_block_parameter(entry_block, typ); - context.values.insert(*parameter, new_parameter); + context.values.insert(parameter, new_parameter); } context.blocks.insert(context.source_function.entry_block(), entry_block); @@ -404,14 +404,9 @@ impl InlineContext { new_func } - /// Inlines a function into the current function and returns the translated return values - /// of the inlined function. - fn inline_function( - &mut self, - ssa: &Ssa, - id: FunctionId, - arguments: &[ValueId], - ) -> Vec { + /// Inlines a function into the current function. + /// Returns the returned values of the function. + fn inline_function(&mut self, ssa: &Ssa, id: FunctionId, arguments: &[Value]) -> Vec { self.recursion_level += 1; let source_function = &ssa.functions[&id]; @@ -426,7 +421,7 @@ impl InlineContext { let parameters = source_function.parameters(); assert_eq!(parameters.len(), arguments.len()); - context.values = parameters.iter().copied().zip(arguments.iter().copied()).collect(); + context.values = parameters.zip(arguments.iter().copied()).collect(); let current_block = context.context.builder.current_block(); context.blocks.insert(source_function.entry_block(), current_block); @@ -457,29 +452,32 @@ impl<'function> PerFunctionContext<'function> { /// Value::Param values are already handled as a result of previous inlining of instructions /// and blocks respectively. If these assertions trigger it means a value is being used before /// the instruction or block that defines the value is inserted. - fn translate_value(&mut self, id: ValueId) -> ValueId { - if let Some(value) = self.values.get(&id) { + fn translate_value(&mut self, old_value: Value) -> Value { + let old_value = self.source_function.dfg.resolve(old_value); + if let Some(value) = self.values.get(&old_value) { return *value; } - let new_value = match &self.source_function.dfg[id] { + let new_value = match old_value { value @ Value::Instruction { .. } => { - unreachable!("All Value::Instructions should already be known during inlining after creating the original inlined instruction. Unknown value {id} = {value:?}") + unreachable!("All Value::Instructions should already be known during inlining after creating the original inlined instruction. Unknown value {value}") } value @ Value::Param { .. } => { - unreachable!("All Value::Params should already be known from previous calls to translate_block. Unknown value {id} = {value:?}") + unreachable!("All Value::Params should already be known from previous calls to translate_block. Unknown value {value}") } Value::NumericConstant { constant, typ } => { - self.context.builder.numeric_constant(*constant, *typ) + let field = &self.source_function.dfg[constant]; + self.context.builder.current_function.dfg.constant_by_ref(field, typ) } - Value::Function(function) => self.context.builder.import_function(*function), - Value::Intrinsic(intrinsic) => self.context.builder.import_intrinsic_id(*intrinsic), + Value::Function(function) => Value::Function(function), + Value::Intrinsic(intrinsic) => Value::Intrinsic(intrinsic), Value::ForeignFunction(function) => { + let function = &self.source_function.dfg[function]; self.context.builder.import_foreign_function(function) } }; - self.values.insert(id, new_value); + self.values.insert(old_value, new_value); new_value } @@ -507,9 +505,9 @@ impl<'function> PerFunctionContext<'function> { let original_parameters = self.source_function.dfg.block_parameters(source_block); for parameter in original_parameters { - let typ = self.source_function.dfg.type_of_value(*parameter); + let typ = self.source_function.dfg.type_of_value(parameter); let new_parameter = self.context.builder.add_block_parameter(new_block, typ); - self.values.insert(*parameter, new_parameter); + self.values.insert(parameter, new_parameter); } self.blocks.insert(source_block, new_block); @@ -520,9 +518,9 @@ impl<'function> PerFunctionContext<'function> { /// Expects that the given ValueId belongs to the source_function. /// /// Returns None if the id is not known to refer to a function. - fn get_function(&mut self, mut id: ValueId) -> Option { - id = self.translate_value(id); - match self.context.builder[id] { + fn get_function(&mut self, mut value: Value) -> Option { + value = self.translate_value(value); + match value { Value::Function(id) => Some(id), // We don't set failed_to_inline_a_call for intrinsics since those // don't correspond to actual functions in the SSA program that would @@ -533,7 +531,8 @@ impl<'function> PerFunctionContext<'function> { } /// Inline all reachable blocks within the source_function into the destination function. - fn inline_blocks(&mut self, ssa: &Ssa) -> Vec { + /// Returns the returned values of this function. + fn inline_blocks(&mut self, ssa: &Ssa) -> Vec { let mut seen_blocks = HashSet::new(); let mut block_queue = VecDeque::new(); block_queue.push_back(self.source_function.entry_block()); @@ -565,10 +564,12 @@ impl<'function> PerFunctionContext<'function> { /// Handle inlining a function's possibly multiple return instructions. /// If there is only 1 return we can just continue inserting into that block. /// If there are multiple, we'll need to create a join block to jump to with each value. + /// + /// Returns the returned values. fn handle_function_returns( &mut self, - mut returns: Vec<(BasicBlockId, Vec)>, - ) -> Vec { + mut returns: Vec<(BasicBlockId, Vec)>, + ) -> Vec { // Clippy complains if this were written as an if statement match returns.len() { 1 => { @@ -587,7 +588,7 @@ impl<'function> PerFunctionContext<'function> { } self.context.builder.switch_to_block(return_block); - self.context.builder.block_parameters(return_block).to_vec() + self.context.builder.block_parameters(return_block).collect() } _ => unreachable!("Inlined function had no return values"), } @@ -596,33 +597,35 @@ impl<'function> PerFunctionContext<'function> { /// Inline each instruction in the given block into the function being inlined into. /// This may recurse if it finds another function to inline if a call instruction is within this block. fn inline_block_instructions(&mut self, ssa: &Ssa, block_id: BasicBlockId) { - let mut side_effects_enabled: Option = None; + let mut side_effects_enabled: Option = None; let block = &self.source_function.dfg[block_id]; for id in block.instructions() { match &self.source_function.dfg[*id] { - Instruction::Call { func, arguments } => match self.get_function(*func) { - Some(func_id) => { - if self.should_inline_call(ssa, func_id) { - self.inline_function(ssa, *id, func_id, arguments); - - // This is only relevant during handling functions with `InlineType::NoPredicates` as these - // can pollute the function they're being inlined into with `Instruction::EnabledSideEffects`, - // resulting in predicates not being applied properly. - // - // Note that this doesn't cover the case in which there exists an `Instruction::EnabledSideEffects` - // within the function being inlined whilst the source function has not encountered one yet. - // In practice this isn't an issue as the last `Instruction::EnabledSideEffects` in the - // function being inlined will be to turn off predicates rather than to create one. - if let Some(condition) = side_effects_enabled { - self.context.builder.insert_enable_side_effects_if(condition); + Instruction::Call { func, arguments, result_types: _ } => { + match self.get_function(*func) { + Some(func_id) => { + if self.should_inline_call(ssa, func_id) { + self.inline_function(ssa, *id, func_id, arguments); + + // This is only relevant during handling functions with `InlineType::NoPredicates` as these + // can pollute the function they're being inlined into with `Instruction::EnabledSideEffects`, + // resulting in predicates not being applied properly. + // + // Note that this doesn't cover the case in which there exists an `Instruction::EnabledSideEffects` + // within the function being inlined whilst the source function has not encountered one yet. + // In practice this isn't an issue as the last `Instruction::EnabledSideEffects` in the + // function being inlined will be to turn off predicates rather than to create one. + if let Some(condition) = side_effects_enabled { + self.context.builder.insert_enable_side_effects_if(condition); + } + } else { + self.push_instruction(*id); } - } else { - self.push_instruction(*id); } + None => self.push_instruction(*id), } - None => self.push_instruction(*id), - }, + } Instruction::EnableSideEffectsIf { condition } => { side_effects_enabled = Some(self.translate_value(*condition)); self.push_instruction(*id); @@ -656,7 +659,7 @@ impl<'function> PerFunctionContext<'function> { ssa: &Ssa, call_id: InstructionId, function: FunctionId, - arguments: &[ValueId], + arguments: &[Value], ) { let old_results = self.source_function.dfg.instruction_results(call_id); let arguments = vecmap(arguments, |arg| self.translate_value(*arg)); @@ -672,7 +675,7 @@ impl<'function> PerFunctionContext<'function> { .extend_call_stack(self.context.call_stack, &call_stack); self.context.call_stack = new_call_stack; - let new_results = self.context.inline_function(ssa, function, &arguments); + let results = self.context.inline_function(ssa, function, &arguments); self.context.call_stack = self .context .builder @@ -681,7 +684,7 @@ impl<'function> PerFunctionContext<'function> { .call_stack_data .unwind_call_stack(self.context.call_stack, call_stack_len); - let new_results = InsertInstructionResult::Results(call_id, &new_results); + let new_results = InsertInstructionResult::SimplifiedToMultiple(results); Self::insert_new_instruction_results(&mut self.values, old_results, new_results); } @@ -692,47 +695,40 @@ impl<'function> PerFunctionContext<'function> { let mut call_stack = self.context.call_stack; let source_call_stack = self.source_function.dfg.get_instruction_call_stack(id); - call_stack = self - .context - .builder - .current_function - .dfg - .call_stack_data - .extend_call_stack(call_stack, &source_call_stack); - let results = self.source_function.dfg.instruction_results(id); - let results = vecmap(results, |id| self.source_function.dfg.resolve(*id)); + let call_stack_data = &mut self.context.builder.current_function.dfg.call_stack_data; + call_stack = call_stack_data.extend_call_stack(call_stack, &source_call_stack); - let ctrl_typevars = instruction - .requires_ctrl_typevars() - .then(|| vecmap(&results, |result| self.source_function.dfg.type_of_value(*result))); + let results = self.source_function.dfg.instruction_results(id); + let results = results.map(|value| self.source_function.dfg.resolve(value)); self.context.builder.set_call_stack(call_stack); - let new_results = self.context.builder.insert_instruction(instruction, ctrl_typevars); - Self::insert_new_instruction_results(&mut self.values, &results, new_results); + let new_results = self.context.builder.insert_instruction(instruction); + Self::insert_new_instruction_results(&mut self.values, results, new_results); } /// Modify the values HashMap to remember the mapping between an instruction result's previous /// ValueId (from the source_function) and its new ValueId in the destination function. fn insert_new_instruction_results( - values: &mut HashMap, - old_results: &[ValueId], + values: &mut HashMap, + mut old_results: impl ExactSizeIterator, new_results: InsertInstructionResult, ) { - assert_eq!(old_results.len(), new_results.len()); + assert_eq!(old_results.len() as u32, new_results.len()); match new_results { InsertInstructionResult::SimplifiedTo(new_result) => { - values.insert(old_results[0], new_result); + let old_result = old_results.next().unwrap(); + values.insert(old_result, new_result); } InsertInstructionResult::SimplifiedToMultiple(new_results) => { - for (old_result, new_result) in old_results.iter().zip(new_results) { - values.insert(*old_result, new_result); + for (old_result, new_result) in old_results.zip(new_results) { + values.insert(old_result, new_result); } } - InsertInstructionResult::Results(_, new_results) => { - for (old_result, new_result) in old_results.iter().zip(new_results) { - values.insert(*old_result, *new_result); + InsertInstructionResult::Results { id, result_count: _ } => { + for (i, old_result) in old_results.enumerate() { + values.insert(old_result, Value::instruction_result(id, i as u16)); } } InsertInstructionResult::InstructionRemoved => (), @@ -749,7 +745,7 @@ impl<'function> PerFunctionContext<'function> { &mut self, block_id: BasicBlockId, block_queue: &mut VecDeque, - ) -> Option<(BasicBlockId, Vec)> { + ) -> Option<(BasicBlockId, Vec)> { match self.source_function.dfg[block_id].unwrap_terminator() { TerminatorInstruction::Jmp { destination, arguments, call_stack } => { let destination = self.translate_block(*destination, block_queue); @@ -856,6 +852,7 @@ mod test { instruction::{BinaryOp, Intrinsic, TerminatorInstruction}, map::Id, types::{NumericType, Type}, + value::Value, }, }; @@ -874,13 +871,13 @@ mod test { let mut builder = FunctionBuilder::new("foo".into(), foo_id); let bar_id = Id::test_new(1); - let bar = builder.import_function(bar_id); - let results = builder.insert_call(bar, Vec::new(), vec![Type::field()]).to_vec(); + let bar = Value::Function(bar_id); + let results = builder.insert_call(bar, Vec::new(), vec![Type::field()]).collect(); builder.terminate_with_return(results); builder.new_function("bar".into(), bar_id, InlineType::default()); let expected_return = 72u128; - let seventy_two = builder.field_constant(expected_return); + let seventy_two = builder.field_constant(expected_return.into()); builder.terminate_with_return(vec![seventy_two]); let ssa = builder.finish(); @@ -923,13 +920,16 @@ mod test { let mut builder = FunctionBuilder::new("main".into(), main_id); let main_v0 = builder.add_parameter(Type::field()); - let main_f1 = builder.import_function(square_id); - let main_f2 = builder.import_function(id1_id); - let main_f3 = builder.import_function(id2_id); + let main_f1 = Value::Function(square_id); + let main_f2 = Value::Function(id1_id); + let main_f3 = Value::Function(id2_id); - let main_v7 = builder.insert_call(main_f2, vec![main_f1], vec![Type::Function])[0]; - let main_v13 = builder.insert_call(main_f3, vec![main_v7], vec![Type::Function])[0]; - let main_v16 = builder.insert_call(main_v13, vec![main_v0], vec![Type::field()])[0]; + let main_v7 = + builder.insert_call(main_f2, vec![main_f1], vec![Type::Function]).next().unwrap(); + let main_v13 = + builder.insert_call(main_f3, vec![main_v7], vec![Type::Function]).next().unwrap(); + let main_v16 = + builder.insert_call(main_v13, vec![main_v0], vec![Type::field()]).next().unwrap(); builder.terminate_with_return(vec![main_v16]); // Compiling square f1 @@ -979,17 +979,17 @@ mod test { let mut builder = FunctionBuilder::new("main".into(), main_id); let factorial_id = Id::test_new(1); - let factorial = builder.import_function(factorial_id); + let factorial = Value::Function(factorial_id); - let five = builder.field_constant(5u128); - let results = builder.insert_call(factorial, vec![five], vec![Type::field()]).to_vec(); + let five = builder.field_constant(5u128.into()); + let results = builder.insert_call(factorial, vec![five], vec![Type::field()]).collect(); builder.terminate_with_return(results); builder.new_function("factorial".into(), factorial_id, InlineType::default()); let b1 = builder.insert_block(); let b2 = builder.insert_block(); - let one = builder.field_constant(1u128); + let one = builder.field_constant(1u128.into()); let v0 = builder.add_parameter(Type::field()); let v1 = builder.insert_binary(v0, BinaryOp::Lt, one); @@ -999,9 +999,9 @@ mod test { builder.terminate_with_return(vec![one]); builder.switch_to_block(b2); - let factorial_id = builder.import_function(factorial_id); + let factorial_id = Value::Function(factorial_id); let v2 = builder.insert_binary(v0, BinaryOp::Sub, one); - let v3 = builder.insert_call(factorial_id, vec![v2], vec![Type::field()])[0]; + let v3 = builder.insert_call(factorial_id, vec![v2], vec![Type::field()]).next().unwrap(); let v4 = builder.insert_binary(v0, BinaryOp::Mul, v3); builder.terminate_with_return(vec![v4]); @@ -1080,17 +1080,19 @@ mod test { let main_cond = builder.add_parameter(Type::bool()); let inner1_id = Id::test_new(1); - let inner1 = builder.import_function(inner1_id); - let main_v2 = builder.insert_call(inner1, vec![main_cond], vec![Type::field()])[0]; - let assert_constant = builder.import_intrinsic_id(Intrinsic::AssertConstant); + let inner1 = Value::Function(inner1_id); + let mut main_v2 = builder.insert_call(inner1, vec![main_cond], vec![Type::field()]); + let main_v2 = main_v2.next().unwrap(); + let assert_constant = Value::Intrinsic(Intrinsic::AssertConstant); builder.insert_call(assert_constant, vec![main_v2], vec![]); builder.terminate_with_return(vec![]); builder.new_function("inner1".into(), inner1_id, InlineType::default()); let inner1_cond = builder.add_parameter(Type::bool()); let inner2_id = Id::test_new(2); - let inner2 = builder.import_function(inner2_id); - let inner1_v2 = builder.insert_call(inner2, vec![inner1_cond], vec![Type::field()])[0]; + let inner2 = Value::Function(inner2_id); + let mut inner1_v2 = builder.insert_call(inner2, vec![inner1_cond], vec![Type::field()]); + let inner1_v2 = inner1_v2.next().unwrap(); builder.terminate_with_return(vec![inner1_v2]); builder.new_function("inner2".into(), inner2_id, InlineType::default()); @@ -1139,8 +1141,8 @@ mod test { let main_id = Id::test_new(0); let mut builder = FunctionBuilder::new("main".into(), main_id); - let main = builder.import_function(main_id); - let results = builder.insert_call(main, Vec::new(), vec![]).to_vec(); + let main = Value::Function(main_id); + let results = builder.insert_call(main, Vec::new(), vec![]).collect(); builder.terminate_with_return(results); let ssa = builder.finish(); @@ -1166,13 +1168,13 @@ mod test { builder.set_runtime(RuntimeType::Brillig(InlineType::default())); let bar_id = Id::test_new(1); - let bar = builder.import_function(bar_id); - let results = builder.insert_call(bar, Vec::new(), vec![Type::field()]).to_vec(); + let bar = Value::Function(bar_id); + let results = builder.insert_call(bar, Vec::new(), vec![Type::field()]).collect(); builder.terminate_with_return(results); builder.new_brillig_function("bar".into(), bar_id, InlineType::default()); let expected_return = 72u128; - let seventy_two = builder.field_constant(expected_return); + let seventy_two = builder.field_constant(expected_return.into()); builder.terminate_with_return(vec![seventy_two]); let ssa = builder.finish(); @@ -1208,14 +1210,14 @@ mod test { builder.set_runtime(RuntimeType::Brillig(InlineType::default())); let bar_id = Id::test_new(1); - let bar = builder.import_function(bar_id); - let v0 = builder.insert_call(bar, Vec::new(), vec![Type::field()]).to_vec(); - let _v1 = builder.insert_call(bar, Vec::new(), vec![Type::field()]).to_vec(); - let _v2 = builder.insert_call(bar, Vec::new(), vec![Type::field()]).to_vec(); + let bar = Value::Function(bar_id); + let v0 = builder.insert_call(bar, Vec::new(), vec![Type::field()]).collect(); + let _v1 = builder.insert_call(bar, Vec::new(), vec![Type::field()]); + let _v2 = builder.insert_call(bar, Vec::new(), vec![Type::field()]); builder.terminate_with_return(v0); builder.new_brillig_function("bar".into(), bar_id, InlineType::default()); - let bar_v0 = builder.numeric_constant(1_usize, NumericType::bool()); + let bar_v0 = builder.constant(1_usize.into(), NumericType::bool()); let then_block = builder.insert_block(); let else_block = builder.insert_block(); let join_block = builder.insert_block(); @@ -1224,7 +1226,7 @@ mod test { let one = builder.field_constant(FieldElement::one()); builder.terminate_with_jmp(join_block, vec![one]); builder.switch_to_block(else_block); - let two = builder.field_constant(FieldElement::from(2_u128)); + let two = builder.field_constant(2_u128.into()); builder.terminate_with_jmp(join_block, vec![two]); let join_param = builder.add_block_parameter(join_block, Type::field()); builder.switch_to_block(join_block); diff --git a/compiler/noirc_evaluator/src/ssa/opt/loop_invariant.rs b/compiler/noirc_evaluator/src/ssa/opt/loop_invariant.rs index c188ed1f80f..1476e6c6953 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/loop_invariant.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/loop_invariant.rs @@ -17,7 +17,7 @@ use crate::ssa::{ function_inserter::FunctionInserter, instruction::{Instruction, InstructionId}, types::Type, - value::ValueId, + value::Value, }, Ssa, }; @@ -77,17 +77,17 @@ impl Loop { /// jmpif v5 then: b3, else: b2 /// ``` /// In the example above, `v1` is the induction variable - fn get_induction_variable(&self, function: &Function) -> ValueId { - function.dfg.block_parameters(self.header)[0] + fn get_induction_variable(&self) -> Value { + Value::block_param(self.header, 0) } } struct LoopInvariantContext<'f> { inserter: FunctionInserter<'f>, - defined_in_loop: HashSet, - loop_invariants: HashSet, + defined_in_loop: HashSet, + loop_invariants: HashSet, // Maps induction variable -> fixed upper loop bound - outer_induction_variables: HashMap, + outer_induction_variables: HashMap, } impl<'f> LoopInvariantContext<'f> { @@ -116,8 +116,7 @@ impl<'f> LoopInvariantContext<'f> { self.inserter.function.dfg[instruction_id], Instruction::MakeArray { .. } ) { - let result = - self.inserter.function.dfg.instruction_results(instruction_id)[0]; + let result = Value::instruction_result(instruction_id, 0); let inc_rc = Instruction::IncrementRc { value: result }; let call_stack = self .inserter @@ -127,7 +126,7 @@ impl<'f> LoopInvariantContext<'f> { self.inserter .function .dfg - .insert_instruction_and_results(inc_rc, *block, None, call_stack); + .insert_instruction_and_results(inc_rc, *block, call_stack); } } else { self.inserter.push_instruction(instruction_id, *block); @@ -142,7 +141,7 @@ impl<'f> LoopInvariantContext<'f> { // reliant upon the maximum induction variable. let upper_bound = loop_.get_const_upper_bound(self.inserter.function); if let Some(upper_bound) = upper_bound { - let induction_variable = loop_.get_induction_variable(self.inserter.function); + let induction_variable = loop_.get_induction_variable(); let induction_variable = self.inserter.resolve(induction_variable); self.outer_induction_variables.insert(induction_variable, upper_bound); } @@ -158,11 +157,20 @@ impl<'f> LoopInvariantContext<'f> { for block in loop_.blocks.iter() { let params = self.inserter.function.dfg.block_parameters(*block); + let params = params.map(|value| self.inserter.resolve(value)); + let params = params.filter(|value| matches!(value, Value::Param { .. })); self.defined_in_loop.extend(params); - for instruction_id in self.inserter.function.dfg[*block].instructions() { + + let instructions = self.inserter.function.dfg[*block].take_instructions(); + + for instruction_id in &instructions { let results = self.inserter.function.dfg.instruction_results(*instruction_id); + let results = results.map(|value| self.inserter.resolve(value)); + let results = results.filter(|value| matches!(value, Value::Instruction { .. })); self.defined_in_loop.extend(results); } + + *self.inserter.function.dfg[*block].instructions_mut() = instructions; } } @@ -173,18 +181,21 @@ impl<'f> LoopInvariantContext<'f> { instruction_id: InstructionId, hoist_invariant: bool, ) { - let results = self.inserter.function.dfg.instruction_results(instruction_id).to_vec(); - // We will have new IDs after pushing instructions. - // We should mark the resolved result IDs as also being defined within the loop. - let results = - results.into_iter().map(|value| self.inserter.resolve(value)).collect::>(); - self.defined_in_loop.extend(results.iter()); - - // We also want the update result IDs when we are marking loop invariants as we may not - // be going through the blocks of the loop in execution order - if hoist_invariant { - // Track already found loop invariants - self.loop_invariants.extend(results.iter()); + for result in self.inserter.function.dfg.instruction_results(instruction_id) { + // We will have new IDs after pushing instructions. + // We should mark the resolved result IDs as also being defined within the loop. + let result = self.inserter.resolve(result); + + if matches!(result, Value::Instruction { .. }) { + self.defined_in_loop.insert(result); + + // We also want the update result IDs when we are marking loop invariants as we may not + // be going through the blocks of the loop in execution order + if hoist_invariant { + // Track already found loop invariants + self.loop_invariants.insert(result); + } + } } } @@ -194,7 +205,10 @@ impl<'f> LoopInvariantContext<'f> { // We may have already re-inserted new instructions if two loops share blocks // so we need to map all the values in the instruction which we want to check. let (instruction, _) = self.inserter.map_instruction(instruction_id); + instruction.for_each_value(|value| { + let value = self.inserter.resolve(value); + // If an instruction value is defined in the loop and not already a loop invariant // the instruction results are not loop invariants. // @@ -222,7 +236,7 @@ impl<'f> LoopInvariantContext<'f> { /// we can safely hoist the array access. fn can_be_deduplicated_from_upper_bound(&self, instruction: &Instruction) -> bool { match instruction { - Instruction::ArrayGet { array, index } => { + Instruction::ArrayGet { array, index, result_type: _ } => { let array_typ = self.inserter.function.dfg.type_of_value(*array); let upper_bound = self.outer_induction_variables.get(index); if let (Type::Array(_, len), Some(upper_bound)) = (array_typ, upper_bound) { @@ -245,6 +259,7 @@ impl<'f> LoopInvariantContext<'f> { /// Leaving out this mapping could lead to instructions with values that do not exist. fn map_dependent_instructions(&mut self) { let blocks = self.inserter.function.reachable_blocks(); + for block in blocks { for instruction_id in self.inserter.function.dfg[block].take_instructions() { self.inserter.push_instruction(instruction_id, block); diff --git a/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs b/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs index 4356a23335c..2dabc0ed513 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs @@ -89,7 +89,7 @@ use crate::ssa::{ instruction::{Instruction, InstructionId, TerminatorInstruction}, post_order::PostOrder, types::Type, - value::ValueId, + value::Value, }, ssa_gen::Ssa, }; @@ -134,16 +134,16 @@ struct PerFunctionContext<'f> { /// Track a value's last load across all blocks. /// If a value is not used in anymore loads we can remove the last store to that value. - last_loads: HashMap, + last_loads: HashMap, /// Track whether a reference was passed into another entry point /// This is needed to determine whether we can remove a store. - calls_reference_input: HashSet, + calls_reference_input: HashSet, /// Track whether a reference has been aliased, and store the respective /// instruction that aliased that reference. /// If that store has been set for removal, we can also remove this instruction. - aliased_references: HashMap>, + aliased_references: HashMap>, } impl<'f> PerFunctionContext<'f> { @@ -178,10 +178,10 @@ impl<'f> PerFunctionContext<'f> { } let mut all_terminator_values = HashSet::default(); - let mut per_func_block_params: HashSet = HashSet::default(); + let mut per_func_block_params: HashSet = HashSet::default(); for (block_id, _) in self.blocks.iter() { let block_params = self.inserter.function.dfg.block_parameters(*block_id); - per_func_block_params.extend(block_params.iter()); + per_func_block_params.extend(block_params); let terminator = self.inserter.function.dfg[*block_id].unwrap_terminator(); terminator.for_each_value(|value| all_terminator_values.insert(value)); } @@ -217,10 +217,10 @@ impl<'f> PerFunctionContext<'f> { // an allocation did not come from an entry point or was passed to an entry point. fn is_store_alias_used( &self, - store_address: &ValueId, + store_address: &Value, block: &Block, - all_terminator_values: &HashSet, - per_func_block_params: &HashSet, + all_terminator_values: &HashSet, + per_func_block_params: &HashSet, ) -> bool { let reference_parameters = self.reference_parameters(); @@ -270,12 +270,9 @@ impl<'f> PerFunctionContext<'f> { /// All references are mutable, so these inputs are shared with the function caller /// and thus stores should not be eliminated, even if the blocks in this function /// don't use them anywhere. - fn reference_parameters(&self) -> BTreeSet { - let parameters = self.inserter.function.parameters().iter(); - parameters - .filter(|param| self.inserter.function.dfg.value_is_reference(**param)) - .copied() - .collect() + fn reference_parameters(&self) -> BTreeSet { + let parameters = self.inserter.function.parameters(); + parameters.filter(|param| self.inserter.function.dfg.value_is_reference(*param)).collect() } /// The value of each reference at the start of the given block is the unification @@ -343,7 +340,7 @@ impl<'f> PerFunctionContext<'f> { let mut aliases: HashMap = HashMap::default(); for param in params { - match dfg.type_of_value(*param) { + match dfg.type_of_value(param) { // If the type indirectly contains a reference we have to assume all references // are unknown since we don't have any ValueIds to use. Type::Reference(element) if element.contains_reference() => return, @@ -351,7 +348,7 @@ impl<'f> PerFunctionContext<'f> { let empty_aliases = AliasSet::known_empty(); let alias_set = aliases.entry(element.as_ref().clone()).or_insert(empty_aliases); - alias_set.insert(*param); + alias_set.insert(param); } typ if typ.contains_reference() => return, _ => continue, @@ -409,15 +406,14 @@ impl<'f> PerFunctionContext<'f> { } match &self.inserter.function.dfg[instruction] { - Instruction::Load { address } => { + Instruction::Load { address, result_type: _ } => { let address = self.inserter.function.dfg.resolve(*address); - let result = self.inserter.function.dfg.instruction_results(instruction)[0]; + let result = Value::instruction_result(instruction, 0); references.remember_dereference(self.inserter.function, address, result); // If the load is known, replace it with the known value and remove the load if let Some(value) = references.get_known_value(address) { - let result = self.inserter.function.dfg.instruction_results(instruction)[0]; self.inserter.map_value(result, value); self.instructions_to_remove.insert(instruction); } else { @@ -429,14 +425,13 @@ impl<'f> PerFunctionContext<'f> { // Check whether the block has a repeat load from the same address (w/ no calls or stores in between the loads). // If we do have a repeat load, we can remove the current load and map its result to the previous load's result. if let Some(last_load) = references.last_loads.get(&address) { - let Instruction::Load { address: previous_address } = + let Instruction::Load { address: previous_address, result_type: _ } = &self.inserter.function.dfg[*last_load] else { panic!("Expected a Load instruction here"); }; - let result = self.inserter.function.dfg.instruction_results(instruction)[0]; - let previous_result = - self.inserter.function.dfg.instruction_results(*last_load)[0]; + let result = Value::instruction_result(instruction, 0); + let previous_result = Value::instruction_result(*last_load, 0); if *previous_address == address { self.inserter.map_value(result, previous_result); self.instructions_to_remove.insert(instruction); @@ -476,14 +471,14 @@ impl<'f> PerFunctionContext<'f> { references.keep_last_load_for(address, self.inserter.function); references.last_stores.insert(address, instruction); } - Instruction::Allocate => { + Instruction::Allocate { element_type: _ } => { // Register the new reference - let result = self.inserter.function.dfg.instruction_results(instruction)[0]; + let result = Value::instruction_result(instruction, 0); references.expressions.insert(result, Expression::Other(result)); references.aliases.insert(Expression::Other(result), AliasSet::known(result)); } Instruction::ArrayGet { array, .. } => { - let result = self.inserter.function.dfg.instruction_results(instruction)[0]; + let result = Value::instruction_result(instruction, 0); references.mark_value_used(*array, self.inserter.function); if self.inserter.function.dfg.value_is_reference(result) { @@ -500,7 +495,7 @@ impl<'f> PerFunctionContext<'f> { let element_type = self.inserter.function.dfg.type_of_value(*value); if Self::contains_references(&element_type) { - let result = self.inserter.function.dfg.instruction_results(instruction)[0]; + let result = Value::instruction_result(instruction, 0); let array = self.inserter.function.dfg.resolve(*array); let expression = Expression::ArrayElement(Box::new(Expression::Other(array))); @@ -542,7 +537,7 @@ impl<'f> PerFunctionContext<'f> { // If `array` is an array constant that contains reference types, then insert each element // as a potential alias to the array itself. if Self::contains_references(typ) { - let array = self.inserter.function.dfg.instruction_results(instruction)[0]; + let array = Value::instruction_result(instruction, 0); let expr = Expression::ArrayElement(Box::new(Expression::Other(array))); references.expressions.insert(array, expr.clone()); @@ -568,14 +563,14 @@ impl<'f> PerFunctionContext<'f> { } } - fn set_aliases(&self, references: &mut Block, address: ValueId, new_aliases: AliasSet) { + fn set_aliases(&self, references: &mut Block, address: Value, new_aliases: AliasSet) { let expression = references.expressions.entry(address).or_insert(Expression::Other(address)); let aliases = references.aliases.entry(expression.clone()).or_default(); *aliases = new_aliases; } - fn mark_all_unknown(&self, values: &[ValueId], references: &mut Block) { + fn mark_all_unknown(&self, values: &[Value], references: &mut Block) { for value in values { if self.inserter.function.dfg.value_is_reference(*value) { let value = self.inserter.function.dfg.resolve(*value); @@ -612,30 +607,31 @@ impl<'f> PerFunctionContext<'f> { match self.inserter.function.dfg[block].unwrap_terminator() { TerminatorInstruction::JmpIf { .. } => (), // Nothing to do TerminatorInstruction::Jmp { destination, arguments, .. } => { - let destination_parameters = self.inserter.function.dfg[*destination].parameters(); + let destination_parameters = + self.inserter.function.dfg.block_parameters(*destination); assert_eq!(destination_parameters.len(), arguments.len()); // If we have multiple parameters that alias that same argument value, // then those parameters also alias each other. // We save parameters with repeat arguments to later mark those // parameters as aliasing one another. - let mut arg_set: HashMap> = HashMap::default(); + let mut arg_set: HashMap> = HashMap::default(); // Add an alias for each reference parameter - for (parameter, argument) in destination_parameters.iter().zip(arguments) { - if self.inserter.function.dfg.value_is_reference(*parameter) { + for (parameter, argument) in destination_parameters.zip(arguments) { + if self.inserter.function.dfg.value_is_reference(parameter) { let argument = self.inserter.function.dfg.resolve(*argument); if let Some(expression) = references.expressions.get(&argument) { if let Some(aliases) = references.aliases.get_mut(expression) { // The argument reference is possibly aliased by this block parameter - aliases.insert(*parameter); + aliases.insert(parameter); // Check if we have seen the same argument let seen_parameters = arg_set.entry(argument).or_default(); // Add the current parameter to the parameters we have seen for this argument. // The previous parameters and the current one alias one another. - seen_parameters.insert(*parameter); + seen_parameters.insert(parameter); } } } @@ -677,6 +673,7 @@ mod tests { instruction::{BinaryOp, Instruction, Intrinsic, TerminatorInstruction}, map::Id, types::Type, + value::Value, }, opt::assert_normalized_ssa_equals, Ssa, @@ -687,7 +684,7 @@ mod tests { // fn func() { // b0(): // v0 = allocate - // v1 = make_array [Field 1, Field 2] + // v1 = make_array [Field 1, Field 0] // store v1 in v0 // v2 = load v0 // v3 = array_get v2, index 1 @@ -697,12 +694,13 @@ mod tests { let func_id = Id::test_new(0); let mut builder = FunctionBuilder::new("func".into(), func_id); let v0 = builder.insert_allocate(Type::Array(Arc::new(vec![Type::field()]), 2)); + + let zero = builder.field_constant(FieldElement::zero()); let one = builder.field_constant(FieldElement::one()); - let two = builder.field_constant(FieldElement::one()); let element_type = Arc::new(vec![Type::field()]); let array_type = Type::Array(element_type, 2); - let v1 = builder.insert_make_array(vector![one, two], array_type.clone()); + let v1 = builder.insert_make_array(vector![one, zero], array_type.clone()); builder.insert_store(v0, v1); let v2 = builder.insert_load(v0, array_type); @@ -721,7 +719,7 @@ mod tests { TerminatorInstruction::Return { return_values, .. } => return_values.first().unwrap(), _ => unreachable!(), }; - assert_eq!(func.dfg[*ret_val_id], func.dfg[two]); + assert_eq!(*ret_val_id, zero); } #[test] @@ -741,7 +739,7 @@ mod tests { let one = builder.field_constant(FieldElement::one()); builder.insert_store(v0, one); let v1 = builder.insert_load(v0, Type::field()); - let f0 = builder.import_intrinsic_id(Intrinsic::AssertConstant); + let f0 = Value::Intrinsic(Intrinsic::AssertConstant); builder.insert_call(f0, vec![v0], vec![]); builder.terminate_with_return(vec![v1]); @@ -754,10 +752,10 @@ mod tests { assert_eq!(count_stores(block_id, &func.dfg), 1); let ret_val_id = match func.dfg[block_id].terminator().unwrap() { - TerminatorInstruction::Return { return_values, .. } => return_values.first().unwrap(), + TerminatorInstruction::Return { return_values, .. } => *return_values.first().unwrap(), _ => unreachable!(), }; - assert_eq!(func.dfg[*ret_val_id], func.dfg[one]); + assert_eq!(ret_val_id, one); } #[test] @@ -791,10 +789,7 @@ mod tests { _ => unreachable!(), }; - // Since the mem2reg pass simplifies as it goes, the id of the allocate instruction result - // is most likely no longer v0. We have to retrieve the new id here. - let allocate_id = func.dfg.instruction_results(instructions[0])[0]; - assert_eq!(ret_val_id, allocate_id); + assert_eq!(ret_val_id, v0); } fn count_stores(block: BasicBlockId, dfg: &DataFlowGraph) -> usize { @@ -833,7 +828,7 @@ mod tests { let v0 = builder.insert_allocate(Type::field()); - let five = builder.field_constant(5u128); + let five = builder.field_constant(5u128.into()); builder.insert_store(v0, five); let v1 = builder.insert_load(v0, Type::field()); @@ -844,7 +839,7 @@ mod tests { let v2 = builder.add_block_parameter(b1, Type::field()); let v3 = builder.insert_load(v0, Type::field()); - let six = builder.field_constant(6u128); + let six = builder.field_constant(6u128.into()); builder.insert_store(v0, six); let v4 = builder.insert_load(v0, Type::field()); @@ -975,7 +970,7 @@ mod tests { let mut builder = FunctionBuilder::new("main".into(), main_id); let v0 = builder.insert_allocate(Type::field()); - let zero = builder.field_constant(0u128); + let zero = builder.field_constant(0u128.into()); builder.insert_store(v0, zero); let v2 = builder.insert_allocate(Type::field()); @@ -999,9 +994,9 @@ mod tests { // Loop body builder.switch_to_block(b2); let v5 = builder.insert_load(v2, v2_type.clone()); - let two = builder.field_constant(2u128); + let two = builder.field_constant(2u128.into()); builder.insert_store(v5, two); - let one = builder.field_constant(1u128); + let one = builder.field_constant(1u128.into()); let v3_plus_one = builder.insert_binary(v3, BinaryOp::Add, one); builder.terminate_with_jmp(b1, vec![v3_plus_one]); @@ -1054,8 +1049,8 @@ mod tests { let v0 = builder.add_parameter(field_ref.clone()); let v1 = builder.add_parameter(field_ref.clone()); - let zero = builder.field_constant(0u128); - let one = builder.field_constant(0u128); + let zero = builder.field_constant(0u128.into()); + let one = builder.field_constant(1u128.into()); builder.insert_store(v0, zero); builder.insert_store(v1, one); diff --git a/compiler/noirc_evaluator/src/ssa/opt/mem2reg/alias_set.rs b/compiler/noirc_evaluator/src/ssa/opt/mem2reg/alias_set.rs index e32eaa70186..2ee3504cd44 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/mem2reg/alias_set.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/mem2reg/alias_set.rs @@ -1,6 +1,6 @@ use std::collections::BTreeSet; -use crate::ssa::ir::value::ValueId; +use crate::ssa::ir::value::Value; /// A set of possible aliases. Each ValueId in this set represents one possible value the reference /// holding this AliasSet may be aliased to. This struct wrapper is provided so that when we take @@ -10,7 +10,7 @@ use crate::ssa::ir::value::ValueId; /// "unknown which aliases this may refer to" - `None`. #[derive(Debug, Default, Clone)] pub(super) struct AliasSet { - aliases: Option>, + aliases: Option>, } impl AliasSet { @@ -18,13 +18,13 @@ impl AliasSet { Self { aliases: None } } - pub(super) fn known(value: ValueId) -> AliasSet { + pub(super) fn known(value: Value) -> AliasSet { let mut aliases = BTreeSet::new(); aliases.insert(value); Self { aliases: Some(aliases) } } - pub(super) fn known_multiple(values: BTreeSet) -> AliasSet { + pub(super) fn known_multiple(values: BTreeSet) -> AliasSet { Self { aliases: Some(values) } } @@ -41,7 +41,7 @@ impl AliasSet { /// Return the single known alias if there is exactly one. /// Otherwise, return None. - pub(super) fn single_alias(&self) -> Option { + pub(super) fn single_alias(&self) -> Option { self.aliases .as_ref() .and_then(|aliases| (aliases.len() == 1).then(|| *aliases.first().unwrap())) @@ -58,7 +58,7 @@ impl AliasSet { } /// Inserts a new alias into this set if it is not unknown - pub(super) fn insert(&mut self, new_alias: ValueId) { + pub(super) fn insert(&mut self, new_alias: Value) { if let Some(aliases) = &mut self.aliases { aliases.insert(new_alias); } @@ -66,11 +66,11 @@ impl AliasSet { /// Returns `Some(true)` if `f` returns true for any known alias in this set. /// If this alias set is unknown, None is returned. - pub(super) fn any(&self, f: impl FnMut(ValueId) -> bool) -> Option { + pub(super) fn any(&self, f: impl FnMut(Value) -> bool) -> Option { self.aliases.as_ref().map(|aliases| aliases.iter().copied().any(f)) } - pub(super) fn for_each(&self, mut f: impl FnMut(ValueId)) { + pub(super) fn for_each(&self, mut f: impl FnMut(Value)) { if let Some(aliases) = &self.aliases { for alias in aliases { f(*alias); @@ -81,7 +81,7 @@ impl AliasSet { /// Return the first ValueId in the alias set as long as there is at least one. /// The ordering is arbitrary (by lowest ValueId) so this method should only be /// used when you need an arbitrary ValueId from the alias set. - pub(super) fn first(&self) -> Option { + pub(super) fn first(&self) -> Option { self.aliases.as_ref().and_then(|aliases| aliases.first().copied()) } } diff --git a/compiler/noirc_evaluator/src/ssa/opt/mem2reg/block.rs b/compiler/noirc_evaluator/src/ssa/opt/mem2reg/block.rs index f4265b2466d..dd1ff785454 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/mem2reg/block.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/mem2reg/block.rs @@ -3,7 +3,7 @@ use std::borrow::Cow; use crate::ssa::ir::{ function::Function, instruction::{Instruction, InstructionId}, - value::ValueId, + value::Value, }; use super::alias_set::AliasSet; @@ -19,7 +19,7 @@ pub(super) struct Block { /// Maps a ValueId to the Expression it represents. /// Multiple ValueIds can map to the same Expression, e.g. /// dereferences to the same allocation. - pub(super) expressions: im::OrdMap, + pub(super) expressions: im::OrdMap, /// Each expression is tracked as to how many aliases it /// may have. If there is only 1, we can attempt to optimize @@ -30,13 +30,13 @@ pub(super) struct Block { /// Each allocate instruction result (and some reference block parameters) /// will map to a Reference value which tracks whether the last value stored /// to the reference is known. - pub(super) references: im::OrdMap, + pub(super) references: im::OrdMap, /// The last instance of a `Store` instruction to each address in this block - pub(super) last_stores: im::OrdMap, + pub(super) last_stores: im::OrdMap, // The last instance of a `Load` instruction to each address in this block - pub(super) last_loads: im::OrdMap, + pub(super) last_loads: im::OrdMap, } /// An `Expression` here is used to represent a canonical key @@ -46,14 +46,14 @@ pub(super) struct Block { pub(super) enum Expression { Dereference(Box), ArrayElement(Box), - Other(ValueId), + Other(Value), } /// Every reference's value is either Known and can be optimized away, or Unknown. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub(super) enum ReferenceValue { Unknown, - Known(ValueId), + Known(Value), } impl ReferenceValue { @@ -68,7 +68,7 @@ impl ReferenceValue { impl Block { /// If the given reference id points to a known value, return the value - pub(super) fn get_known_value(&self, address: ValueId) -> Option { + pub(super) fn get_known_value(&self, address: Value) -> Option { if let Some(expression) = self.expressions.get(&address) { if let Some(aliases) = self.aliases.get(expression) { // We could allow multiple aliases if we check that the reference @@ -84,15 +84,15 @@ impl Block { } /// If the given address is known, set its value to `ReferenceValue::Known(value)`. - pub(super) fn set_known_value(&mut self, address: ValueId, value: ValueId) { + pub(super) fn set_known_value(&mut self, address: Value, value: Value) { self.set_value(address, ReferenceValue::Known(value)); } - pub(super) fn set_unknown(&mut self, address: ValueId) { + pub(super) fn set_unknown(&mut self, address: Value) { self.set_value(address, ReferenceValue::Unknown); } - fn set_value(&mut self, address: ValueId, value: ReferenceValue) { + fn set_value(&mut self, address: Value, value: ReferenceValue) { let expression = self.expressions.entry(address).or_insert(Expression::Other(address)); let aliases = self.aliases.entry(expression.clone()).or_default(); @@ -153,8 +153,8 @@ impl Block { pub(super) fn remember_dereference( &mut self, function: &Function, - address: ValueId, - result: ValueId, + address: Value, + result: Value, ) { if function.dfg.value_is_reference(result) { if let Some(known_address) = self.get_known_value(address) { @@ -170,11 +170,7 @@ impl Block { } /// Iterate through each known alias of the given address and apply the function `f` to each. - fn for_each_alias_of( - &mut self, - address: ValueId, - mut f: impl FnMut(&mut Self, ValueId) -> T, - ) { + fn for_each_alias_of(&mut self, address: Value, mut f: impl FnMut(&mut Self, Value) -> T) { if let Some(expr) = self.expressions.get(&address) { if let Some(aliases) = self.aliases.get(expr).cloned() { aliases.for_each(|alias| { @@ -184,13 +180,13 @@ impl Block { } } - fn keep_last_stores_for(&mut self, address: ValueId, function: &Function) { + fn keep_last_stores_for(&mut self, address: Value, function: &Function) { let address = function.dfg.resolve(address); self.keep_last_store(address, function); self.for_each_alias_of(address, |t, alias| t.keep_last_store(alias, function)); } - fn keep_last_store(&mut self, address: ValueId, function: &Function) { + fn keep_last_store(&mut self, address: Value, function: &Function) { let address = function.dfg.resolve(address); if let Some(instruction) = self.last_stores.remove(&address) { @@ -207,7 +203,7 @@ impl Block { } } - pub(super) fn mark_value_used(&mut self, value: ValueId, function: &Function) { + pub(super) fn mark_value_used(&mut self, value: Value, function: &Function) { self.keep_last_stores_for(value, function); // We must do a recursive check for arrays since they're the only Values which may contain @@ -220,10 +216,7 @@ impl Block { } /// Collect all aliases used by the given value list - pub(super) fn collect_all_aliases( - &self, - values: impl IntoIterator, - ) -> AliasSet { + pub(super) fn collect_all_aliases(&self, values: impl IntoIterator) -> AliasSet { let mut aliases = AliasSet::known_empty(); for value in values { aliases.unify(&self.get_aliases_for_value(value)); @@ -231,7 +224,7 @@ impl Block { aliases } - pub(super) fn get_aliases_for_value(&self, value: ValueId) -> Cow { + pub(super) fn get_aliases_for_value(&self, value: Value) -> Cow { if let Some(expression) = self.expressions.get(&value) { if let Some(aliases) = self.aliases.get(expression) { return Cow::Borrowed(aliases); @@ -241,11 +234,11 @@ impl Block { Cow::Owned(AliasSet::unknown()) } - pub(super) fn set_last_load(&mut self, address: ValueId, instruction: InstructionId) { + pub(super) fn set_last_load(&mut self, address: Value, instruction: InstructionId) { self.last_loads.insert(address, instruction); } - pub(super) fn keep_last_load_for(&mut self, address: ValueId, function: &Function) { + pub(super) fn keep_last_load_for(&mut self, address: Value, function: &Function) { let address = function.dfg.resolve(address); self.last_loads.remove(&address); self.for_each_alias_of(address, |block, alias| block.last_loads.remove(&alias)); diff --git a/compiler/noirc_evaluator/src/ssa/opt/mod.rs b/compiler/noirc_evaluator/src/ssa/opt/mod.rs index bd0c86570e2..bcdcde30918 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/mod.rs @@ -39,7 +39,7 @@ pub(crate) fn assert_normalized_ssa_equals(mut ssa: super::Ssa, expected: &str) use crate::{ssa::Ssa, trim_comments_from_lines, trim_leading_whitespace_from_lines}; - ssa.normalize_ids(); + ssa = ssa.normalize_ids(); let ssa = ssa.to_string(); let ssa = trim_leading_whitespace_from_lines(&ssa); diff --git a/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs b/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs index 63ca523bd57..fbf49ff6cbb 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs @@ -6,12 +6,11 @@ use crate::ssa::{ function::{Function, FunctionId}, map::SparseMap, post_order::PostOrder, - value::{Value, ValueId}, + value::Value, }, ssa_gen::Ssa, }; use fxhash::FxHashMap as HashMap; -use iter_extended::vecmap; impl Ssa { /// This is a debugging pass which re-inserts each instruction @@ -21,13 +20,14 @@ impl Ssa { /// During normal compilation this is often not the case since prior passes /// may increase the ID counter so that later passes start at different offsets, /// even if they contain the same SSA code. - pub(crate) fn normalize_ids(&mut self) { + pub(crate) fn normalize_ids(mut self) -> Self { let mut context = Context::default(); context.populate_functions(&self.functions); for function in self.functions.values_mut() { context.normalize_ids(function); } self.functions = context.functions.into_btree(); + self } } @@ -52,7 +52,7 @@ struct IdMaps { // Maps old value id -> new value id // Cleared in between each function. - values: HashMap, + values: HashMap, } impl Context { @@ -92,22 +92,16 @@ impl Context { new_function.dfg.call_stack_data.get_or_insert_locations(locations); let old_results = old_function.dfg.instruction_results(old_instruction_id); - let ctrl_typevars = instruction - .requires_ctrl_typevars() - .then(|| vecmap(old_results, |result| old_function.dfg.type_of_value(*result))); - let new_results = new_function.dfg.insert_instruction_and_results( instruction, new_block_id, - ctrl_typevars, new_call_stack, ); - assert_eq!(old_results.len(), new_results.len()); - for (old_result, new_result) in old_results.iter().zip(new_results.results().iter()) - { - let old_result = old_function.dfg.resolve(*old_result); - self.new_ids.values.insert(old_result, *new_result); + assert_eq!(old_results.len() as u32, new_results.len()); + for (old_result, new_result) in old_results.zip(new_results.results()) { + let old_result = old_function.dfg.resolve(old_result); + self.new_ids.values.insert(old_result, new_result); } } @@ -148,8 +142,8 @@ impl IdMaps { } let new_id = self.blocks[old_id]; - let old_block = &mut old_function.dfg[*old_id]; - for old_parameter in old_block.take_parameters() { + + for old_parameter in old_function.dfg.block_parameters(*old_id) { let old_parameter = old_function.dfg.resolve(old_parameter); let typ = old_function.dfg.type_of_value(old_parameter); let new_parameter = new_function.dfg.add_block_parameter(new_id, typ); @@ -162,33 +156,35 @@ impl IdMaps { &mut self, new_function: &mut Function, old_function: &Function, - old_value: ValueId, - ) -> ValueId { - let old_value = old_function.dfg.resolve(old_value); - match &old_function.dfg[old_value] { + old_value: Value, + ) -> Value { + match old_function.dfg.resolve(old_value) { value @ Value::Instruction { instruction, .. } => { - *self.values.get(&old_value).unwrap_or_else(|| { - let instruction = &old_function.dfg[*instruction]; - unreachable!("Unmapped value with id {old_value}: {value:?}\n from instruction: {instruction:?}, SSA: {old_function}") + *self.values.get(&value).unwrap_or_else(|| { + let instruction = &old_function.dfg[instruction]; + unreachable!("Unmapped value {value}\n from instruction: {instruction:?}, SSA: {old_function}") }) } value @ Value::Param { .. } => { - *self.values.get(&old_value).unwrap_or_else(|| { - unreachable!("Unmapped value with id {old_value}: {value:?}") + *self.values.get(&value).unwrap_or_else(|| { + unreachable!("Unmapped value: {value}") }) } Value::Function(id) => { - let new_id = self.function_ids[id]; - new_function.dfg.import_function(new_id) + let new_id = *self.function_ids.get(&id).unwrap_or_else(|| { + unreachable!("Unmapped function {id}\nOld function:\n{old_function}\n\nNew function:\n{new_function}") + }); + Value::Function(new_id) } - Value::NumericConstant { constant, typ } => { - new_function.dfg.make_constant(*constant, *typ) + value @ Value::NumericConstant { .. } => value, + Value::Intrinsic(intrinsic) => Value::Intrinsic(intrinsic), + Value::ForeignFunction(id) => { + let name = &old_function.dfg[id]; + new_function.dfg.import_foreign_function(name) } - Value::Intrinsic(intrinsic) => new_function.dfg.import_intrinsic(*intrinsic), - Value::ForeignFunction(name) => new_function.dfg.import_foreign_function(name), } } } diff --git a/compiler/noirc_evaluator/src/ssa/opt/rc.rs b/compiler/noirc_evaluator/src/ssa/opt/rc.rs index 64f6e2ddfea..a25614efb6e 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/rc.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/rc.rs @@ -5,7 +5,7 @@ use crate::ssa::{ function::Function, instruction::{Instruction, InstructionId}, types::Type, - value::ValueId, + value::Value, }, ssa_gen::Ssa, }; @@ -40,7 +40,7 @@ struct Context { pub(crate) struct RcInstruction { pub(crate) id: InstructionId, - pub(crate) array: ValueId, + pub(crate) array: Value, pub(crate) possibly_mutated: bool, } @@ -69,8 +69,8 @@ impl Function { } fn contains_array_parameter(function: &mut Function) -> bool { - let mut parameters = function.parameters().iter(); - parameters.any(|parameter| function.dfg.type_of_value(*parameter).contains_an_array()) + let mut parameters = function.parameters(); + parameters.any(|parameter| function.dfg.type_of_value(parameter).contains_an_array()) } impl Context { @@ -129,7 +129,7 @@ impl Context { /// Finds and pops the IncRc for the given array value if possible. pub(crate) fn pop_rc_for( - value: ValueId, + value: Value, function: &Function, inc_rcs: &mut HashMap>, ) -> Option { @@ -255,8 +255,8 @@ mod test { builder.insert_inc_rc(v0); let v2 = builder.insert_load(v1, array_type); - let zero = builder.numeric_constant(0u128, NumericType::unsigned(64)); - let five = builder.field_constant(5u128); + let zero = builder.constant(0u128.into(), NumericType::unsigned(64)); + let five = builder.field_constant(5u128.into()); let v7 = builder.insert_array_set(v2, zero, five); builder.insert_store(v1, v7); @@ -306,8 +306,8 @@ mod test { builder.insert_store(v0, v1); let v2 = builder.insert_load(v1, array_type.clone()); - let zero = builder.numeric_constant(0u128, NumericType::unsigned(64)); - let five = builder.field_constant(5u128); + let zero = builder.constant(0u128.into(), NumericType::unsigned(64)); + let five = builder.field_constant(5u128.into()); let v7 = builder.insert_array_set(v2, zero, five); builder.insert_store(v0, v7); diff --git a/compiler/noirc_evaluator/src/ssa/opt/remove_bit_shifts.rs b/compiler/noirc_evaluator/src/ssa/opt/remove_bit_shifts.rs index 4c5189d8c91..b75f1c450d0 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/remove_bit_shifts.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/remove_bit_shifts.rs @@ -1,16 +1,18 @@ -use std::{borrow::Cow, sync::Arc}; - use acvm::{acir::AcirField, FieldElement}; +use std::sync::Arc; use crate::ssa::{ ir::{ basic_block::BasicBlockId, call_stack::CallStackId, - dfg::InsertInstructionResult, function::{Function, RuntimeType}, - instruction::{Binary, BinaryOp, Endian, Instruction, InstructionId, Intrinsic}, + instruction::insert_result::InsertInstructionResult, + instruction::{ + insert_result::InsertInstructionResultIter, Binary, BinaryOp, Endian, Instruction, + InstructionId, Intrinsic, + }, types::{NumericType, Type}, - value::ValueId, + value::Value, }, ssa_gen::Ssa, }; @@ -67,8 +69,7 @@ impl Context<'_> { { self.call_stack = self.function.dfg.get_instruction_call_stack_id(instruction_id); - let old_result = - *self.function.dfg.instruction_results(instruction_id).first().unwrap(); + let old_result = Value::instruction_result(instruction_id, 0); let bit_size = match self.function.dfg.type_of_value(lhs) { Type::Numeric(NumericType::Signed { bit_size }) @@ -81,7 +82,7 @@ impl Context<'_> { self.insert_shift_right(lhs, rhs, bit_size) }; - self.function.dfg.set_value_from_id(old_result, new_result); + self.function.dfg.replace_value(old_result, new_result); } _ => { self.new_instructions.push(instruction_id); @@ -97,11 +98,11 @@ impl Context<'_> { /// and truncate the result to bit_size pub(crate) fn insert_wrapping_shift_left( &mut self, - lhs: ValueId, - rhs: ValueId, - bit_size: u32, - ) -> ValueId { - let base = self.field_constant(FieldElement::from(2_u128)); + lhs: Value, + rhs: Value, + bit_size: u8, + ) -> Value { + let base = self.function.dfg.field_constant(FieldElement::from(2_u128)); let typ = self.function.dfg.type_of_value(lhs).unwrap_numeric(); let (max_bit, pow) = if let Some(rhs_constant) = self.function.dfg.get_numeric_constant(rhs) { @@ -113,26 +114,29 @@ impl Context<'_> { if overflows { assert!(bit_size < 128, "ICE - shift left with big integers are not supported"); if bit_size < 128 { - let zero = self.numeric_constant(FieldElement::zero(), typ); - return InsertInstructionResult::SimplifiedTo(zero).first(); + return self.function.dfg.constant(FieldElement::zero(), typ); } } - let pow = self.numeric_constant(FieldElement::from(rhs_bit_size_pow_2), typ); + let pow = self.function.dfg.constant(FieldElement::from(rhs_bit_size_pow_2), typ); let max_lhs_bits = self.function.dfg.get_value_max_num_bits(lhs); - (max_lhs_bits + bit_shift_size, pow) + (max_lhs_bits + bit_shift_size as u8, pow) } else { // we use a predicate to nullify the result in case of overflow let u8_type = NumericType::unsigned(8); - let bit_size_var = self.numeric_constant(FieldElement::from(bit_size as u128), u8_type); + let bit_size_var = + self.function.dfg.constant(FieldElement::from(bit_size as u128), u8_type); let overflow = self.insert_binary(rhs, BinaryOp::Lt, bit_size_var); let predicate = self.insert_cast(overflow, typ); // we can safely cast to unsigned because overflow_checks prevent bit-shift with a negative value let rhs_unsigned = self.insert_cast(rhs, NumericType::unsigned(bit_size)); let pow = self.pow(base, rhs_unsigned); let pow = self.insert_cast(pow, typ); - (FieldElement::max_num_bits(), self.insert_binary(predicate, BinaryOp::Mul, pow)) + ( + FieldElement::max_num_bits().try_into().unwrap(), + self.insert_binary(predicate, BinaryOp::Mul, pow), + ) }; if max_bit <= bit_size { @@ -149,21 +153,17 @@ impl Context<'_> { /// Insert ssa instructions which computes lhs >> rhs by doing lhs/2^rhs /// For negative signed integers, we do the division on the 1-complement representation of lhs, /// before converting back the result to the 2-complement representation. - pub(crate) fn insert_shift_right( - &mut self, - lhs: ValueId, - rhs: ValueId, - bit_size: u32, - ) -> ValueId { + pub(crate) fn insert_shift_right(&mut self, lhs: Value, rhs: Value, bit_size: u8) -> Value { let lhs_typ = self.function.dfg.type_of_value(lhs).unwrap_numeric(); - let base = self.field_constant(FieldElement::from(2_u128)); + let base = self.function.dfg.field_constant(FieldElement::from(2_u128)); let pow = self.pow(base, rhs); if lhs_typ.is_unsigned() { // unsigned right bit shift is just a normal division self.insert_binary(lhs, BinaryOp::Div, pow) } else { // Get the sign of the operand; positive signed operand will just do a division as well - let zero = self.numeric_constant(FieldElement::zero(), NumericType::signed(bit_size)); + let zero = + self.function.dfg.constant(FieldElement::zero(), NumericType::signed(bit_size)); let lhs_sign = self.insert_binary(lhs, BinaryOp::Lt, zero); let lhs_sign_as_field = self.insert_cast(lhs_sign, NumericType::NativeField); let lhs_as_field = self.insert_cast(lhs, NumericType::NativeField); @@ -189,20 +189,21 @@ impl Context<'_> { /// let b = rhs_bits[bit_size - i]; /// r = (r_squared * lhs * b) + (1 - b) * r_squared; /// } - fn pow(&mut self, lhs: ValueId, rhs: ValueId) -> ValueId { + fn pow(&mut self, lhs: Value, rhs: Value) -> Value { let typ = self.function.dfg.type_of_value(rhs); if let Type::Numeric(NumericType::Unsigned { bit_size }) = typ { - let to_bits = self.function.dfg.import_intrinsic(Intrinsic::ToBits(Endian::Little)); - let result_types = vec![Type::Array(Arc::new(vec![Type::bool()]), bit_size)]; - let rhs_bits = self.insert_call(to_bits, vec![rhs], result_types); + let to_bits = Value::Intrinsic(Intrinsic::ToBits(Endian::Little)); + let result_types = vec![Type::Array(Arc::new(vec![Type::bool()]), bit_size as u32)]; + + let rhs_bits = self.insert_call(to_bits, vec![rhs], result_types).next().unwrap(); - let rhs_bits = rhs_bits[0]; - let one = self.field_constant(FieldElement::one()); + let one = self.function.dfg.field_constant(FieldElement::one()); let mut r = one; for i in 1..bit_size + 1 { let r_squared = self.insert_binary(r, BinaryOp::Mul, r); let a = self.insert_binary(r_squared, BinaryOp::Mul, lhs); - let idx = self.field_constant(FieldElement::from((bit_size - i) as i128)); + let idx = + self.function.dfg.field_constant(FieldElement::from((bit_size - i) as i128)); let b = self.insert_array_get(rhs_bits, idx, Type::bool()); let not_b = self.insert_not(b); let b = self.insert_cast(b, NumericType::NativeField); @@ -217,91 +218,70 @@ impl Context<'_> { } } - pub(crate) fn field_constant(&mut self, constant: FieldElement) -> ValueId { - self.function.dfg.make_constant(constant, NumericType::NativeField) - } - - /// Insert a numeric constant into the current function - pub(crate) fn numeric_constant( - &mut self, - value: impl Into, - typ: NumericType, - ) -> ValueId { - self.function.dfg.make_constant(value.into(), typ) - } - /// Insert a binary instruction at the end of the current block. /// Returns the result of the binary instruction. - pub(crate) fn insert_binary( - &mut self, - lhs: ValueId, - operator: BinaryOp, - rhs: ValueId, - ) -> ValueId { + pub(crate) fn insert_binary(&mut self, lhs: Value, operator: BinaryOp, rhs: Value) -> Value { let instruction = Instruction::Binary(Binary { lhs, rhs, operator }); - self.insert_instruction(instruction, None).first() + self.insert_instruction(instruction).first() } /// Insert a not instruction at the end of the current block. /// Returns the result of the instruction. - pub(crate) fn insert_not(&mut self, rhs: ValueId) -> ValueId { - self.insert_instruction(Instruction::Not(rhs), None).first() + pub(crate) fn insert_not(&mut self, rhs: Value) -> Value { + self.insert_instruction(Instruction::Not(rhs)).first() } /// Insert a truncate instruction at the end of the current block. /// Returns the result of the truncate instruction. pub(crate) fn insert_truncate( &mut self, - value: ValueId, - bit_size: u32, - max_bit_size: u32, - ) -> ValueId { - self.insert_instruction(Instruction::Truncate { value, bit_size, max_bit_size }, None) - .first() + value: Value, + bit_size: u8, + max_bit_size: u8, + ) -> Value { + self.insert_instruction(Instruction::Truncate { value, bit_size, max_bit_size }).first() } /// Insert a cast instruction at the end of the current block. /// Returns the result of the cast instruction. - pub(crate) fn insert_cast(&mut self, value: ValueId, typ: NumericType) -> ValueId { - self.insert_instruction(Instruction::Cast(value, typ), None).first() + pub(crate) fn insert_cast(&mut self, value: Value, typ: NumericType) -> Value { + self.insert_instruction(Instruction::Cast(value, typ)).first() } /// Insert a call instruction at the end of the current block and return /// the results of the call. pub(crate) fn insert_call( &mut self, - func: ValueId, - arguments: Vec, + func: Value, + arguments: Vec, result_types: Vec, - ) -> Cow<[ValueId]> { - self.insert_instruction(Instruction::Call { func, arguments }, Some(result_types)).results() + ) -> InsertInstructionResultIter { + let call = Instruction::Call { func, arguments, result_types }; + self.insert_instruction(call).results() } /// Insert an instruction to extract an element from an array pub(crate) fn insert_array_get( &mut self, - array: ValueId, - index: ValueId, - element_type: Type, - ) -> ValueId { - let element_type = Some(vec![element_type]); - self.insert_instruction(Instruction::ArrayGet { array, index }, element_type).first() + array: Value, + index: Value, + result_type: Type, + ) -> Value { + self.insert_instruction(Instruction::ArrayGet { array, index, result_type }).first() } pub(crate) fn insert_instruction( &mut self, instruction: Instruction, - ctrl_typevars: Option>, ) -> InsertInstructionResult { let result = self.function.dfg.insert_instruction_and_results( instruction, self.block, - ctrl_typevars, self.call_stack, ); - if let InsertInstructionResult::Results(instruction_id, _) = result { - self.new_instructions.push(instruction_id); + if let InsertInstructionResult::Results { id, .. } = result { + self.new_instructions.push(id); } result diff --git a/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs b/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs index e99f239e82e..272b11f98f7 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs @@ -18,7 +18,6 @@ use crate::ssa::{ dfg::DataFlowGraph, function::{Function, RuntimeType}, instruction::{BinaryOp, Hint, Instruction, Intrinsic}, - types::NumericType, value::Value, }, ssa_gen::Ssa, @@ -70,8 +69,7 @@ impl Context { ) { let instructions = function.dfg[block].take_instructions(); - let one = FieldElement::one(); - let mut active_condition = function.dfg.make_constant(one, NumericType::bool()); + let mut active_condition = function.dfg.bool_constant(true); let mut last_side_effects_enabled_instruction = None; let mut new_instructions = Vec::with_capacity(instructions.len()); @@ -152,12 +150,12 @@ impl Context { EnableSideEffectsIf { .. } | ArrayGet { .. } | ArraySet { .. } - | Allocate + | Allocate { .. } | Store { .. } | Load { .. } => true, // Some `Intrinsic`s have side effects so we must check what kind of `Call` this is. - Call { func, .. } => match dfg[*func] { + Call { func, .. } => match *func { Value::Intrinsic(intrinsic) => match intrinsic { Intrinsic::SlicePushBack | Intrinsic::SlicePushFront @@ -202,7 +200,7 @@ mod test { ir::{ instruction::{BinaryOp, Instruction}, map::Id, - types::{NumericType, Type}, + types::Type, }, }; @@ -233,19 +231,18 @@ mod test { let mut builder = FunctionBuilder::new("main".into(), main_id); let v0 = builder.add_parameter(Type::field()); - let two = builder.field_constant(2u128); + let two = builder.field_constant(2u128.into()); + let true_bool = builder.bool_constant(true); - let one = builder.numeric_constant(1u128, NumericType::bool()); - - builder.insert_enable_side_effects_if(one); + builder.insert_enable_side_effects_if(true_bool); builder.insert_binary(v0, BinaryOp::Mul, two); - builder.insert_enable_side_effects_if(one); + builder.insert_enable_side_effects_if(true_bool); builder.insert_binary(v0, BinaryOp::Mul, two); - builder.insert_enable_side_effects_if(one); + builder.insert_enable_side_effects_if(true_bool); builder.insert_binary(v0, BinaryOp::Mul, two); - builder.insert_enable_side_effects_if(one); + builder.insert_enable_side_effects_if(true_bool); builder.insert_binary(v0, BinaryOp::Mul, two); - builder.insert_enable_side_effects_if(one); + builder.insert_enable_side_effects_if(true_bool); let ssa = builder.finish(); diff --git a/compiler/noirc_evaluator/src/ssa/opt/remove_if_else.rs b/compiler/noirc_evaluator/src/ssa/opt/remove_if_else.rs index 9a931e36ea2..28fe95bce66 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/remove_if_else.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/remove_if_else.rs @@ -1,17 +1,12 @@ use std::collections::hash_map::Entry; -use acvm::{acir::AcirField, FieldElement}; use fxhash::FxHashMap as HashMap; -use crate::ssa::ir::function::RuntimeType; -use crate::ssa::ir::instruction::Hint; -use crate::ssa::ir::types::NumericType; -use crate::ssa::ir::value::ValueId; use crate::ssa::{ ir::{ dfg::DataFlowGraph, - function::Function, - instruction::{Instruction, Intrinsic}, + function::{Function, RuntimeType}, + instruction::{Hint, Instruction, Intrinsic}, types::Type, value::Value, }, @@ -50,22 +45,21 @@ impl Function { #[derive(Default)] struct Context { - slice_sizes: HashMap, + slice_sizes: HashMap, // Maps array_set result -> element that was overwritten by that instruction. // Used to undo array_sets while merging values - prev_array_set_elem_values: HashMap, + prev_array_set_elem_values: HashMap, // Maps array_set result -> enable_side_effects_if value which was active during it. - array_set_conditionals: HashMap, + array_set_conditionals: HashMap, } impl Context { fn remove_if_else(&mut self, function: &mut Function) { let block = function.entry_block(); let instructions = function.dfg[block].take_instructions(); - let one = FieldElement::one(); - let mut current_conditional = function.dfg.make_constant(one, NumericType::bool()); + let mut current_conditional = function.dfg.bool_constant(true); for instruction in instructions { match &function.dfg[instruction] { @@ -96,19 +90,13 @@ impl Context { ); let _typ = function.dfg.type_of_value(value); - let results = function.dfg.instruction_results(instruction); - let result = results[0]; - // let result = match typ { - // Type::Array(..) => results[0], - // Type::Slice(..) => results[1], - // other => unreachable!("IfElse instructions should only have arrays or slices at this point. Found {other:?}"), - // }; - - function.dfg.set_value_from_id(result, value); + let result = Value::instruction_result(instruction, 0); + + function.dfg.replace_value(result, value); self.array_set_conditionals.insert(result, current_conditional); } - Instruction::Call { func, arguments } => { - if let Value::Intrinsic(intrinsic) = function.dfg[*func] { + Instruction::Call { func, arguments, result_types: _ } => { + if let Value::Intrinsic(intrinsic) = *func { let results = function.dfg.instruction_results(instruction); match slice_capacity_change(&function.dfg, intrinsic, arguments, results) { @@ -131,9 +119,7 @@ impl Context { function.dfg[block].instructions_mut().push(instruction); } Instruction::ArraySet { array, .. } => { - let results = function.dfg.instruction_results(instruction); - let result = if results.len() == 2 { results[1] } else { results[0] }; - + let result = Value::instruction_result(instruction, 0); self.array_set_conditionals.insert(result, current_conditional); let old_capacity = self.get_or_find_capacity(&function.dfg, *array); @@ -151,7 +137,7 @@ impl Context { } } - fn get_or_find_capacity(&mut self, dfg: &DataFlowGraph, value: ValueId) -> u32 { + fn get_or_find_capacity(&mut self, dfg: &DataFlowGraph, value: Value) -> u32 { match self.slice_sizes.entry(value) { Entry::Occupied(entry) => return *entry.get(), Entry::Vacant(entry) => { @@ -166,34 +152,33 @@ impl Context { } } - let dbg_value = &dfg[value]; - unreachable!("No size for slice {value} = {dbg_value:?}") + unreachable!("No size for slice {value}") } } enum SizeChange { None, - SetTo(ValueId, u32), + SetTo(Value, u32), // These two variants store the old and new slice ids // not their lengths which should be old_len = new_len +/- 1 - Inc { old: ValueId, new: ValueId }, - Dec { old: ValueId, new: ValueId }, + Inc { old: Value, new: Value }, + Dec { old: Value, new: Value }, } /// Find the change to a slice's capacity an instruction would have fn slice_capacity_change( dfg: &DataFlowGraph, intrinsic: Intrinsic, - arguments: &[ValueId], - results: &[ValueId], + arguments: &[Value], + mut results: impl ExactSizeIterator, ) -> SizeChange { match intrinsic { Intrinsic::SlicePushBack | Intrinsic::SlicePushFront | Intrinsic::SliceInsert => { // Expecting: len, slice = ... assert_eq!(results.len(), 2); let old = arguments[1]; - let new = results[1]; + let new = results.nth(1).unwrap(); assert!(matches!(dfg.type_of_value(old), Type::Slice(_))); assert!(matches!(dfg.type_of_value(new), Type::Slice(_))); SizeChange::Inc { old, new } @@ -201,7 +186,7 @@ fn slice_capacity_change( Intrinsic::SlicePopBack | Intrinsic::SliceRemove => { let old = arguments[1]; - let new = results[1]; + let new = results.nth(1).unwrap(); assert!(matches!(dfg.type_of_value(old), Type::Slice(_))); assert!(matches!(dfg.type_of_value(new), Type::Slice(_))); SizeChange::Dec { old, new } @@ -209,7 +194,7 @@ fn slice_capacity_change( Intrinsic::SlicePopFront => { let old = arguments[1]; - let new = results[results.len() - 1]; + let new = results.last().unwrap(); assert!(matches!(dfg.type_of_value(old), Type::Slice(_))); assert!(matches!(dfg.type_of_value(new), Type::Slice(_))); SizeChange::Dec { old, new } @@ -222,8 +207,9 @@ fn slice_capacity_change( Type::Array(_, length) => length, other => unreachable!("slice_capacity_change expected array, found {other:?}"), }; - assert!(matches!(dfg.type_of_value(results[1]), Type::Slice(_))); - SizeChange::SetTo(results[1], length) + let slice = results.nth(1).unwrap(); + assert!(matches!(dfg.type_of_value(slice), Type::Slice(_))); + SizeChange::SetTo(slice, length) } // These cases don't affect slice capacities diff --git a/compiler/noirc_evaluator/src/ssa/opt/resolve_is_unconstrained.rs b/compiler/noirc_evaluator/src/ssa/opt/resolve_is_unconstrained.rs index eff6576b87f..27271dace93 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/resolve_is_unconstrained.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/resolve_is_unconstrained.rs @@ -25,6 +25,8 @@ impl Ssa { impl Function { pub(crate) fn replace_is_unconstrained_result(&mut self) { let mut is_unconstrained_calls = HashSet::default(); + let mut blocks_with_is_unconstrained_calls = HashSet::default(); + // Collect all calls to is_unconstrained for block_id in self.reachable_blocks() { for &instruction_id in self.dfg[block_id].instructions() { @@ -33,23 +35,26 @@ impl Function { _ => continue, }; - if let Value::Intrinsic(Intrinsic::IsUnconstrained) = &self.dfg[target_func] { + if let Value::Intrinsic(Intrinsic::IsUnconstrained) = self.dfg.resolve(target_func) + { is_unconstrained_calls.insert(instruction_id); + blocks_with_is_unconstrained_calls.insert(block_id); } } } let is_unconstrained = matches!(self.runtime(), RuntimeType::Brillig(_)).into(); - let is_within_unconstrained = self.dfg.make_constant(is_unconstrained, NumericType::bool()); - for instruction_id in is_unconstrained_calls { - let call_returns = self.dfg.instruction_results(instruction_id); - let original_return_id = call_returns[0]; - - // We replace the result with a fresh id. This will be unused, so the DIE pass will remove the leftover intrinsic call. - self.dfg.replace_result(instruction_id, original_return_id); + let is_within_unconstrained = self.dfg.constant(is_unconstrained, NumericType::bool()); + for instruction_id in &is_unconstrained_calls { // Replace all uses of the original return value with the constant - self.dfg.set_value_from_id(original_return_id, is_within_unconstrained); + let original_return_id = Value::instruction_result(*instruction_id, 0); + self.dfg.replace_value(original_return_id, is_within_unconstrained); + } + + // Manually remove each call instruction that we just mapped to a constant + for block in blocks_with_is_unconstrained_calls { + self.dfg[block].instructions_mut().retain(|id| !is_unconstrained_calls.contains(id)); } } } diff --git a/compiler/noirc_evaluator/src/ssa/opt/runtime_separation.rs b/compiler/noirc_evaluator/src/ssa/opt/runtime_separation.rs index b671d5011a1..089ea4d75ab 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/runtime_separation.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/runtime_separation.rs @@ -6,7 +6,7 @@ use crate::ssa::{ ir::{ function::{Function, FunctionId, RuntimeType}, instruction::Instruction, - value::{Value, ValueId}, + value::Value, }, ssa_gen::Ssa, }; @@ -111,13 +111,11 @@ impl RuntimeSeparatorContext { fn replace_calls_to_mapped_functions(&self, ssa: &mut Ssa) { for (_function_id, func) in ssa.functions.iter_mut() { if matches!(func.runtime(), RuntimeType::Brillig(_)) { - for called_func_value_id in called_functions_values(func).iter() { - let Value::Function(called_func_id) = &func.dfg[*called_func_value_id] else { - unreachable!("Value should be a function") - }; - if let Some(mapped_func_id) = self.mapped_functions.get(called_func_id) { - let mapped_value_id = func.dfg.import_function(*mapped_func_id); - func.dfg.set_value_from_id(*called_func_value_id, mapped_value_id); + for called_func_id in called_functions(func) { + if let Some(mapped_func_id) = self.mapped_functions.get(&called_func_id) { + let mapped_value_id = Value::Function(*mapped_func_id); + let called_func_value = Value::Function(called_func_id); + func.dfg.replace_value(called_func_value, mapped_value_id); } } } @@ -126,16 +124,16 @@ impl RuntimeSeparatorContext { } // We only consider direct calls to functions since functions as values should have been resolved -fn called_functions_values(func: &Function) -> BTreeSet { +fn called_functions(func: &Function) -> BTreeSet { let mut called_function_ids = BTreeSet::default(); for block_id in func.reachable_blocks() { for instruction_id in func.dfg[block_id].instructions() { - let Instruction::Call { func: called_value_id, .. } = &func.dfg[*instruction_id] else { + let Instruction::Call { func: called_value, .. } = &func.dfg[*instruction_id] else { continue; }; - if let Value::Function(_) = func.dfg[*called_value_id] { - called_function_ids.insert(*called_value_id); + if let Value::Function(function_id) = func.dfg.resolve(*called_value) { + called_function_ids.insert(function_id); } } } @@ -143,18 +141,6 @@ fn called_functions_values(func: &Function) -> BTreeSet { called_function_ids } -fn called_functions(func: &Function) -> BTreeSet { - called_functions_values(func) - .into_iter() - .map(|value_id| { - let Value::Function(func_id) = func.dfg[value_id] else { - unreachable!("Value should be a function") - }; - func_id - }) - .collect() -} - fn collect_reachable_functions( ssa: &Ssa, current_func_id: FunctionId, @@ -168,8 +154,8 @@ fn collect_reachable_functions( let func = &ssa.functions[¤t_func_id]; let called_functions = called_functions(func); - for called_func_id in called_functions.iter() { - collect_reachable_functions(ssa, *called_func_id, reachable_functions); + for called_func_id in called_functions { + collect_reachable_functions(ssa, called_func_id, reachable_functions); } } @@ -192,6 +178,7 @@ mod test { function::{Function, FunctionId, RuntimeType}, map::Id, types::Type, + value::Value, }, opt::runtime_separation::called_functions, ssa_gen::Ssa, @@ -213,13 +200,13 @@ mod test { builder.current_function.set_runtime(RuntimeType::Brillig(InlineType::default())); let bar_id = Id::test_new(1); - let bar = builder.import_function(bar_id); - let results = builder.insert_call(bar, Vec::new(), vec![Type::field()]).to_vec(); + let bar = Value::Function(bar_id); + let results = builder.insert_call(bar, Vec::new(), vec![Type::field()]).collect(); builder.terminate_with_return(results); builder.new_function("bar".into(), bar_id, InlineType::default()); let expected_return = 72u128; - let seventy_two = builder.field_constant(expected_return); + let seventy_two = builder.field_constant(expected_return.into()); builder.terminate_with_return(vec![seventy_two]); let ssa = builder.finish(); @@ -286,20 +273,20 @@ mod test { let bar_id = Id::test_new(1); let baz_id = Id::test_new(2); - let bar = builder.import_function(bar_id); - let baz = builder.import_function(baz_id); - let v0 = builder.insert_call(bar, Vec::new(), vec![Type::field()]).to_vec(); - let v1 = builder.insert_call(baz, Vec::new(), vec![Type::field()]).to_vec(); - builder.terminate_with_return(vec![v0[0], v1[0]]); + let bar = Value::Function(bar_id); + let baz = Value::Function(baz_id); + let v0 = builder.insert_call(bar, Vec::new(), vec![Type::field()]).next().unwrap(); + let v1 = builder.insert_call(baz, Vec::new(), vec![Type::field()]).next().unwrap(); + builder.terminate_with_return(vec![v0, v1]); builder.new_brillig_function("bar".into(), bar_id, InlineType::default()); - let baz = builder.import_function(baz_id); - let v0 = builder.insert_call(baz, Vec::new(), vec![Type::field()]).to_vec(); + let baz = Value::Function(baz_id); + let v0 = builder.insert_call(baz, Vec::new(), vec![Type::field()]).collect(); builder.terminate_with_return(v0); builder.new_function("baz".into(), baz_id, InlineType::default()); let expected_return = 72u128; - let seventy_two = builder.field_constant(expected_return); + let seventy_two = builder.field_constant(expected_return.into()); builder.terminate_with_return(vec![seventy_two]); let ssa = builder.finish(); diff --git a/compiler/noirc_evaluator/src/ssa/opt/simplify_cfg.rs b/compiler/noirc_evaluator/src/ssa/opt/simplify_cfg.rs index 22fdf0a7987..165f6c45cca 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/simplify_cfg.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/simplify_cfg.rs @@ -130,8 +130,7 @@ fn check_for_double_jmp(function: &mut Function, block: BasicBlockId, cfg: &mut return; } - if !function.dfg[block].instructions().is_empty() - || !function.dfg[block].parameters().is_empty() + if !function.dfg[block].instructions().is_empty() || function.dfg[block].parameter_count() != 0 { return; } @@ -215,24 +214,21 @@ fn check_for_negated_jmpif_condition( } if let Some(TerminatorInstruction::JmpIf { - condition, + condition: Value::Instruction { instruction, .. }, then_destination, else_destination, call_stack, }) = function.dfg[block].terminator() { - if let Value::Instruction { instruction, .. } = function.dfg[*condition] { - if let Instruction::Not(negated_condition) = function.dfg[instruction] { - let call_stack = *call_stack; - let jmpif = TerminatorInstruction::JmpIf { - condition: negated_condition, - then_destination: *else_destination, - else_destination: *then_destination, - call_stack, - }; - function.dfg[block].set_terminator(jmpif); - cfg.recompute_block(function, block); - } + if let Instruction::Not(negated_condition) = function.dfg[*instruction] { + let jmpif = TerminatorInstruction::JmpIf { + condition: negated_condition, + then_destination: *else_destination, + else_destination: *then_destination, + call_stack: *call_stack, + }; + function.dfg[block].set_terminator(jmpif); + cfg.recompute_block(function, block); } } } @@ -247,11 +243,9 @@ fn remove_block_parameters( block: BasicBlockId, predecessor: BasicBlockId, ) { - let block = &mut function.dfg[block]; - - if !block.parameters().is_empty() { - let block_params = block.take_parameters(); + let parameters = function.dfg.block_parameters(block); + if parameters.len() != 0 { let jump_args = match function.dfg[predecessor].unwrap_terminator_mut() { TerminatorInstruction::Jmp { arguments, .. } => std::mem::take(arguments), TerminatorInstruction::JmpIf { .. } => unreachable!("If jmpif instructions are modified to support block arguments in the future, this match will need to be updated"), @@ -260,9 +254,9 @@ fn remove_block_parameters( ), }; - assert_eq!(block_params.len(), jump_args.len()); - for (param, arg) in block_params.iter().zip(jump_args) { - function.dfg.set_value_from_id(*param, arg); + assert_eq!(parameters.len(), jump_args.len()); + for (param, arg) in parameters.zip(jump_args) { + function.dfg.replace_value(param, arg); } } } @@ -324,7 +318,7 @@ mod test { let v1 = builder.add_block_parameter(b2, Type::field()); let expected_return = 7u128; - let seven = builder.field_constant(expected_return); + let seven = builder.field_constant(expected_return.into()); builder.terminate_with_jmp(b1, vec![seven]); builder.switch_to_block(b1); @@ -377,8 +371,8 @@ mod test { let b1 = builder.insert_block(); let b2 = builder.insert_block(); - let one = builder.field_constant(1u128); - let two = builder.field_constant(2u128); + let one = builder.field_constant(1u128.into()); + let two = builder.field_constant(2u128.into()); let v1 = builder.insert_binary(v0, BinaryOp::Eq, v0); builder.terminate_with_jmpif(v1, b1, b2); diff --git a/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs b/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs index ab4256197b9..4aee02be326 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs @@ -37,7 +37,7 @@ use crate::{ function_inserter::{ArrayCache, FunctionInserter}, instruction::{Binary, BinaryOp, Instruction, InstructionId, TerminatorInstruction}, post_order::PostOrder, - value::ValueId, + value::Value, }, ssa_gen::Ssa, }, @@ -453,7 +453,7 @@ impl Loop { &'a self, function: &'a mut Function, unroll_into: BasicBlockId, - induction_value: ValueId, + induction_value: Value, ) -> Result>, CallStack> { // We insert into a fresh block first and move instructions into the unroll_into block later // only once we verify the jmpif instruction has a constant condition. If it does not, we can @@ -462,10 +462,10 @@ impl Loop { let mut context = LoopIteration::new(function, self, fresh_block, self.header); let source_block = &context.dfg()[context.source_block]; - assert_eq!(source_block.parameters().len(), 1, "Expected only 1 argument in loop header"); + assert_eq!(source_block.parameter_count(), 1, "Expected only 1 argument in loop header"); // Insert the current value of the loop induction variable into our context. - let first_param = source_block.parameters()[0]; + let first_param = Value::block_param(context.source_block, 0); context.inserter.try_map_value(first_param, induction_value); // Copy over all instructions and a fresh terminator. context.inline_instructions_from_block(); @@ -551,7 +551,7 @@ impl Loop { &self, function: &Function, cfg: &ControlFlowGraph, - ) -> Option> { + ) -> Option> { // We need to traverse blocks from the pre-header up to the block entry point. let pre_header = self.get_pre_header(function, cfg).ok()?; let function_entry = function.entry_block(); @@ -564,14 +564,13 @@ impl Loop { let allocations = blocks.iter().flat_map(|block| { let instructions = function.dfg[*block].instructions().iter(); instructions - .filter(|i| matches!(&function.dfg[**i], Instruction::Allocate)) + .filter(|i| matches!(&function.dfg[**i], Instruction::Allocate { .. })) // Get the value into which the allocation was stored. - .map(|i| function.dfg.instruction_results(*i)[0]) + .map(|i| Value::instruction_result(*i, 0)) }); // Collect reference parameters of the function itself. - let params = - function.parameters().iter().filter(|p| function.dfg.value_is_reference(**p)).copied(); + let params = function.parameters().filter(|p| function.dfg.value_is_reference(*p)); Some(params.chain(allocations).collect()) } @@ -579,17 +578,13 @@ impl Loop { /// Count the number of load and store instructions of specific variables in the loop. /// /// Returns `(loads, stores)` in case we want to differentiate in the estimates. - fn count_loads_and_stores( - &self, - function: &Function, - refs: &HashSet, - ) -> (usize, usize) { + fn count_loads_and_stores(&self, function: &Function, refs: &HashSet) -> (usize, usize) { let mut loads = 0; let mut stores = 0; for block in &self.blocks { for instruction in function.dfg[*block].instructions() { match &function.dfg[*instruction] { - Instruction::Load { address } if refs.contains(address) => { + Instruction::Load { address, .. } if refs.contains(address) => { loads += 1; } Instruction::Store { address, .. } if refs.contains(address) => { @@ -616,8 +611,7 @@ impl Loop { /// The increment should be in the block where the back-edge was found. fn count_induction_increments(&self, function: &Function) -> usize { let back = &function.dfg[self.back_edge_start]; - let header = &function.dfg[self.header]; - let induction_var = header.parameters()[0]; + let induction_var = function.dfg.block_parameters(self.header).next().unwrap(); back.instructions().iter().filter(|instruction| { let instruction = &function.dfg[**instruction]; @@ -737,7 +731,7 @@ impl BoilerplateStats { /// ... /// ``` /// We're looking for the terminating jump of the `main` predecessor of `loop_entry`. -fn get_induction_variable(function: &Function, block: BasicBlockId) -> Result { +fn get_induction_variable(function: &Function, block: BasicBlockId) -> Result { match function.dfg[block].terminator() { Some(TerminatorInstruction::Jmp { arguments, call_stack: location, .. }) => { // This assumption will no longer be valid if e.g. mutable variables are represented as @@ -779,7 +773,7 @@ struct LoopIteration<'f> { /// the variable traditionally called `i` on each iteration of the loop. /// This is None until we visit the block which jumps back to the start of the /// loop, at which point we record its value and the block it was found in. - induction_value: Option<(BasicBlockId, ValueId)>, + induction_value: Option<(BasicBlockId, Value)>, } impl<'f> LoopIteration<'f> { @@ -807,7 +801,7 @@ impl<'f> LoopIteration<'f> { /// It is expected the terminator instructions are set up to branch into an empty block /// for further unrolling. When the loop is finished this will need to be mutated to /// jump to the end of the loop instead. - fn unroll_loop_iteration(mut self) -> (BasicBlockId, ValueId, Option) { + fn unroll_loop_iteration(mut self) -> (BasicBlockId, Value, Option) { let mut next_blocks = self.unroll_loop_block(); while let Some(block) = next_blocks.pop() { @@ -873,7 +867,7 @@ impl<'f> LoopIteration<'f> { /// destination indicated by the constant condition (ie. the `then` or the `else`). fn handle_jmpif( &mut self, - condition: ValueId, + condition: Value, then_destination: BasicBlockId, else_destination: BasicBlockId, call_stack: CallStackId, @@ -1005,7 +999,7 @@ mod tests { use test_case::test_case; use crate::errors::RuntimeError; - use crate::ssa::{ir::value::ValueId, opt::assert_normalized_ssa_equals, Ssa}; + use crate::ssa::{ir::value::Value, opt::assert_normalized_ssa_equals, Ssa}; use super::{is_new_size_ok, BoilerplateStats, Loops}; @@ -1144,7 +1138,7 @@ mod tests { let refs = loop0.find_pre_header_reference_values(function, &loops.cfg).unwrap(); assert_eq!(refs.len(), 1); - assert!(refs.contains(&ValueId::test_new(2))); + assert!(refs.contains(&Value::test_instruction_result(0, 0))); let (loads, stores) = loop0.count_loads_and_stores(function, &refs); assert_eq!(loads, 1); diff --git a/compiler/noirc_evaluator/src/ssa/parser/ast.rs b/compiler/noirc_evaluator/src/ssa/parser/ast.rs index 6c7608a2f16..6bd34f143b8 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/ast.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/ast.rs @@ -116,7 +116,7 @@ pub(crate) enum ParsedInstruction { }, RangeCheck { value: ParsedValue, - max_bit_size: u32, + max_bit_size: u8, }, Store { value: ParsedValue, @@ -125,8 +125,8 @@ pub(crate) enum ParsedInstruction { Truncate { target: Identifier, value: ParsedValue, - bit_size: u32, - max_bit_size: u32, + bit_size: u8, + max_bit_size: u8, }, } @@ -146,5 +146,9 @@ pub(crate) enum ParsedTerminator { #[derive(Debug, Clone)] pub(crate) enum ParsedValue { NumericConstant { constant: FieldElement, typ: Type }, - Variable(Identifier), + InstructionResult { id: u32, position: u32 }, + BlockParameter { id: u32, position: u32 }, + Function { id: u32 }, + ForeignFunction { id: u32 }, + Intrinsic(Identifier), } diff --git a/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs b/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs index 7c7e977c6ce..d45456cf086 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs @@ -5,8 +5,7 @@ use acvm::acir::circuit::ErrorSelector; use crate::ssa::{ function_builder::FunctionBuilder, ir::{ - basic_block::BasicBlockId, function::FunctionId, instruction::ConstrainError, - value::ValueId, + basic_block::BasicBlockId, function::FunctionId, instruction::ConstrainError, value::Value, }, }; @@ -35,7 +34,7 @@ struct Translator { /// This is necessary because the SSA we parse might have undergone some /// passes already which replaced some of the original IDs. The translator /// will recreate the SSA step by step, which can result in a new ID layout. - variables: HashMap>, + variables: HashMap>, error_selector_counter: u64, } @@ -156,59 +155,58 @@ impl Translator { fn translate_instruction(&mut self, instruction: ParsedInstruction) -> Result<(), SsaError> { match instruction { ParsedInstruction::Allocate { target, typ } => { - let value_id = self.builder.insert_allocate(typ); - self.define_variable(target, value_id)?; + let value = self.builder.insert_allocate(typ); + self.define_variable(target, value)?; } ParsedInstruction::ArrayGet { target, element_type, array, index } => { let array = self.translate_value(array)?; let index = self.translate_value(index)?; - let value_id = self.builder.insert_array_get(array, index, element_type); - self.define_variable(target, value_id)?; + let value = self.builder.insert_array_get(array, index, element_type); + self.define_variable(target, value)?; } ParsedInstruction::ArraySet { target, array, index, value, mutable } => { let array = self.translate_value(array)?; let index = self.translate_value(index)?; let value = self.translate_value(value)?; - let value_id = if mutable { + let value = if mutable { self.builder.insert_mutable_array_set(array, index, value) } else { self.builder.insert_array_set(array, index, value) }; - self.define_variable(target, value_id)?; + self.define_variable(target, value)?; } ParsedInstruction::BinaryOp { target, lhs, op, rhs } => { let lhs = self.translate_value(lhs)?; let rhs = self.translate_value(rhs)?; - let value_id = self.builder.insert_binary(lhs, op, rhs); - self.define_variable(target, value_id)?; + let value = self.builder.insert_binary(lhs, op, rhs); + self.define_variable(target, value)?; } ParsedInstruction::Call { targets, function, arguments, types } => { let function_id = if let Some(id) = self.builder.import_intrinsic(&function.name) { id } else { - let function_id = self.lookup_function(function)?; - self.builder.import_function(function_id) + Value::Function(self.lookup_function(function)?) }; let arguments = self.translate_values(arguments)?; - let value_ids = self.builder.insert_call(function_id, arguments, types).to_vec(); + let values = self.builder.insert_call(function_id, arguments, types); - if value_ids.len() != targets.len() { + if values.len() != targets.len() { return Err(SsaError::MismatchedReturnValues { returns: targets, - expected: value_ids.len(), + expected: values.len(), }); } - for (target, value_id) in targets.into_iter().zip(value_ids.into_iter()) { - self.define_variable(target, value_id)?; + for (target, value) in targets.into_iter().zip(values) { + self.define_variable(target, value)?; } } ParsedInstruction::Cast { target, lhs, typ } => { let lhs = self.translate_value(lhs)?; - let value_id = self.builder.insert_cast(lhs, typ.unwrap_numeric()); - self.define_variable(target, value_id)?; + let value = self.builder.insert_cast(lhs, typ.unwrap_numeric()); + self.define_variable(target, value)?; } ParsedInstruction::Constrain { lhs, rhs, assert_message } => { let lhs = self.translate_value(lhs)?; @@ -247,18 +245,18 @@ impl Translator { .into_iter() .map(|element| self.translate_value(element)) .collect::>()?; - let value_id = self.builder.insert_make_array(elements, typ); - self.define_variable(target, value_id)?; + let value = self.builder.insert_make_array(elements, typ); + self.define_variable(target, value)?; } ParsedInstruction::Load { target, value, typ } => { let value = self.translate_value(value)?; - let value_id = self.builder.insert_load(value, typ); - self.define_variable(target, value_id)?; + let value = self.builder.insert_load(value, typ); + self.define_variable(target, value)?; } ParsedInstruction::Not { target, value } => { let value = self.translate_value(value)?; - let value_id = self.builder.insert_not(value); - self.define_variable(target, value_id)?; + let value = self.builder.insert_not(value); + self.define_variable(target, value)?; } ParsedInstruction::RangeCheck { value, max_bit_size } => { let value = self.translate_value(value)?; @@ -271,15 +269,15 @@ impl Translator { } ParsedInstruction::Truncate { target, value, bit_size, max_bit_size } => { let value = self.translate_value(value)?; - let value_id = self.builder.insert_truncate(value, bit_size, max_bit_size); - self.define_variable(target, value_id)?; + let value = self.builder.insert_truncate(value, bit_size, max_bit_size); + self.define_variable(target, value)?; } } Ok(()) } - fn translate_values(&mut self, values: Vec) -> Result, SsaError> { + fn translate_values(&mut self, values: Vec) -> Result, SsaError> { let mut translated_values = Vec::with_capacity(values.len()); for value in values { translated_values.push(self.translate_value(value)?); @@ -287,20 +285,16 @@ impl Translator { Ok(translated_values) } - fn translate_value(&mut self, value: ParsedValue) -> Result { + fn translate_value(&mut self, value: ParsedValue) -> Result { match value { ParsedValue::NumericConstant { constant, typ } => { - Ok(self.builder.numeric_constant(constant, typ.unwrap_numeric())) + Ok(Value::constant(constant, typ.unwrap_numeric())) } ParsedValue::Variable(identifier) => self.lookup_variable(identifier), } } - fn define_variable( - &mut self, - identifier: Identifier, - value_id: ValueId, - ) -> Result<(), SsaError> { + fn define_variable(&mut self, identifier: Identifier, value: Value) -> Result<(), SsaError> { if let Some(vars) = self.variables.get(&self.current_function_id()) { if vars.contains_key(&identifier.name) { return Err(SsaError::VariableAlreadyDefined(identifier)); @@ -308,14 +302,14 @@ impl Translator { } let entry = self.variables.entry(self.current_function_id()).or_default(); - entry.insert(identifier.name, value_id); + entry.insert(identifier.name, value); Ok(()) } - fn lookup_variable(&mut self, identifier: Identifier) -> Result { - if let Some(value_id) = self.variables[&self.current_function_id()].get(&identifier.name) { - Ok(*value_id) + fn lookup_variable(&mut self, identifier: Identifier) -> Result { + if let Some(value) = self.variables[&self.current_function_id()].get(&identifier.name) { + Ok(*value) } else { Err(SsaError::UnknownVariable(identifier)) } @@ -343,7 +337,7 @@ impl Translator { // after the step-by-step reconstruction done during translation. This assumes // that the SSA we parsed was printed by the `SsaBuilder`, which normalizes // before each print. - ssa.normalize_ids(); + ssa = ssa.normalize_ids(); ssa } diff --git a/compiler/noirc_evaluator/src/ssa/parser/mod.rs b/compiler/noirc_evaluator/src/ssa/parser/mod.rs index 24a5ff43071..83d5f0a1c99 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/mod.rs @@ -366,7 +366,7 @@ impl<'a> Parser<'a> { let value = self.parse_value_or_error()?; self.eat_or_error(Token::Keyword(Keyword::To))?; - let max_bit_size = self.eat_int_or_error()?.to_u128() as u32; + let max_bit_size = self.eat_int_or_error()?.to_u128() as u8; self.eat_or_error(Token::Keyword(Keyword::Bits))?; Ok(Some(ParsedInstruction::RangeCheck { value, max_bit_size })) } @@ -494,12 +494,12 @@ impl<'a> Parser<'a> { if self.eat_keyword(Keyword::Truncate)? { let value = self.parse_value_or_error()?; self.eat_or_error(Token::Keyword(Keyword::To))?; - let bit_size = self.eat_int_or_error()?.to_u128() as u32; + let bit_size = self.eat_int_or_error()?.to_u128() as u8; self.eat_or_error(Token::Keyword(Keyword::Bits))?; self.eat_or_error(Token::Comma)?; self.eat_or_error(Token::Keyword(Keyword::MaxBitSize))?; self.eat_or_error(Token::Colon)?; - let max_bit_size = self.eat_int_or_error()?.to_u128() as u32; + let max_bit_size = self.eat_int_or_error()?.to_u128() as u8; return Ok(ParsedInstruction::Truncate { target, value, bit_size, max_bit_size }); } diff --git a/compiler/noirc_evaluator/src/ssa/parser/tests.rs b/compiler/noirc_evaluator/src/ssa/parser/tests.rs index dab96dfa04f..9e5b30c333c 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/tests.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/tests.rs @@ -58,8 +58,8 @@ fn test_make_array() { let src = " acir(inline) fn main f0 { b0(): - v1 = make_array [Field 1] : [Field; 1] - return v1 + i0 = make_array [Field 1] : [Field; 1] + return i0 } "; assert_ssa_roundtrip(src); @@ -303,8 +303,8 @@ fn test_array_set() { fn test_mutable_array_set() { let src = " acir(inline) fn main f0 { - b0(v0: [Field; 3]): - v3 = array_set mut v0, index Field 0, value Field 1 + b0(b0.0: [Field; 3]): + i0 = array_set mut b0.0, index Field 0, value Field 1 return } "; @@ -315,9 +315,9 @@ fn test_mutable_array_set() { fn test_array_get_set_bug() { let src = " acir(inline) fn main f0 { - b0(v0: [u32; 3]): - v3 = array_set v0, index u32 1, value u32 2 - v5 = array_get v3, index u32 0 -> u32 + b0(b0.0: [u32; 3]): + i0 = array_set b0.0, index u32 1, value u32 2 + i1 = array_get i0, index u32 0 -> u32 return } "; @@ -330,8 +330,8 @@ fn test_binary() { let src = format!( " acir(inline) fn main f0 {{ - b0(v0: Field, v1: Field): - v2 = {op} v0, v1 + b0(b0.0: Field, v1: Field): + i0 = {op} b0.0, v1 return }} " @@ -344,8 +344,8 @@ fn test_binary() { fn test_truncate() { let src = " acir(inline) fn main f0 { - b0(v0: Field): - v1 = truncate v0 to 8 bits, max_bit_size: 16 + b0(b0.0: Field): + i0 = truncate b0.0 to 8 bits, max_bit_size: 16 return } "; @@ -356,8 +356,8 @@ fn test_truncate() { fn test_not() { let src = " acir(inline) fn main f0 { - b0(v0: Field): - v1 = not v0 + b0(b0.0: Field): + i0 = not b0.0 return } "; @@ -368,8 +368,8 @@ fn test_not() { fn test_range_check() { let src = " acir(inline) fn main f0 { - b0(v0: Field): - range_check v0 to 8 bits + b0(b0.0: Field): + range_check b0.0 to 8 bits return } "; @@ -381,7 +381,7 @@ fn test_allocate() { let src = " acir(inline) fn main f0 { b0(): - v0 = allocate -> &mut [Field; 3] + i0 = allocate -> &mut [Field; 3] return } "; @@ -392,8 +392,8 @@ fn test_allocate() { fn test_load() { let src = " acir(inline) fn main f0 { - b0(v0: Field): - v1 = load v0 -> Field + b0(b0.0: Field): + i0 = load b0.0 -> Field return } "; @@ -404,8 +404,8 @@ fn test_load() { fn test_store() { let src = " acir(inline) fn main f0 { - b0(v0: Field): - store Field 1 at v0 + b0(b0.0: Field): + store Field 1 at b0.0 return } "; @@ -416,8 +416,8 @@ fn test_store() { fn test_inc_rc() { let src = " acir(inline) fn main f0 { - b0(v0: [Field; 3]): - inc_rc v0 + b0(b0.0: [Field; 3]): + inc_rc b0.0 return } "; @@ -428,8 +428,8 @@ fn test_inc_rc() { fn test_dec_rc() { let src = " acir(inline) fn main f0 { - b0(v0: [Field; 3]): - dec_rc v0 + b0(b0.0: [Field; 3]): + dec_rc b0.0 return } "; @@ -440,7 +440,7 @@ fn test_dec_rc() { fn test_mutable_reference_type() { let src = " acir(inline) fn main f0 { - b0(v0: &mut Field): + b0(b0.0: &mut Field): return } "; @@ -452,14 +452,14 @@ fn test_parses_with_comments() { let src = " // This is a comment acir(inline) fn main f0 { - b0(v0: &mut Field): // This is a block + b0(b0.0: &mut Field): // This is a block return // Returns nothing } "; let expected = " acir(inline) fn main f0 { - b0(v0: &mut Field): + b0(b0.0: &mut Field): return } "; @@ -472,8 +472,8 @@ fn test_parses_with_comments() { fn test_slice() { let src = " acir(inline) fn main f0 { - b0(v0: [Field; 3]): - v2, v3 = call as_slice(v0) -> (u32, [Field]) + b0(b0.0: [Field; 3]): + i0, i0.1 = call as_slice(b0.0) -> (u32, [Field]) return } "; @@ -496,7 +496,7 @@ fn test_function_type() { let src = " acir(inline) fn main f0 { b0(): - v0 = allocate -> &mut function + i0 = allocate -> &mut function return } "; diff --git a/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs b/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs index 7807658dabb..ace0c00d2b6 100644 --- a/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs +++ b/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs @@ -16,9 +16,9 @@ use crate::ssa::ir::instruction::BinaryOp; use crate::ssa::ir::instruction::Instruction; use crate::ssa::ir::map::AtomicCounter; use crate::ssa::ir::types::{NumericType, Type}; -use crate::ssa::ir::value::ValueId; +use crate::ssa::ir::value::Value; -use super::value::{Tree, Value, Values}; +use super::value::{Tree, Value as SsaGenValue, Values}; use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet}; /// The FunctionContext is the main context object for translating a @@ -78,7 +78,7 @@ pub(super) struct SharedContext { #[derive(Copy, Clone)] pub(super) struct Loop { pub(super) loop_entry: BasicBlockId, - pub(super) loop_index: ValueId, + pub(super) loop_index: Value, pub(super) loop_end: BasicBlockId, } @@ -172,9 +172,9 @@ impl<'a> FunctionContext<'a> { /// Always returns a Value::Mutable wrapping the allocate instruction. pub(super) fn new_mutable_variable( &mut self, - value_to_store: ValueId, + value_to_store: Value, increment_array_rc: bool, - ) -> Value { + ) -> SsaGenValue { let element_type = self.builder.current_function.dfg.type_of_value(value_to_store); if increment_array_rc { @@ -184,7 +184,7 @@ impl<'a> FunctionContext<'a> { let alloc = self.builder.insert_allocate(element_type); self.builder.insert_store(alloc, value_to_store); let typ = self.builder.type_of_value(value_to_store); - Value::Mutable(alloc, typ) + SsaGenValue::Mutable(alloc, typ) } /// Maps the given type to a Tree of the result type. @@ -281,7 +281,7 @@ impl<'a> FunctionContext<'a> { value: impl Into, negative: bool, numeric_type: NumericType, - ) -> Result { + ) -> Result { let value = value.into(); if let Some(range) = numeric_type.value_is_outside_limits(value, negative) { @@ -306,17 +306,17 @@ impl<'a> FunctionContext<'a> { value }; - Ok(self.builder.numeric_constant(value, numeric_type)) + Ok(self.builder.constant(value, numeric_type)) } /// helper function which add instructions to the block computing the absolute value of the /// given signed integer input. When the input is negative, we return its two complement, and itself when it is positive. - fn absolute_value_helper(&mut self, input: ValueId, sign: ValueId, bit_size: u32) -> ValueId { + fn absolute_value_helper(&mut self, input: Value, sign: Value, bit_size: u8) -> Value { assert_eq!(self.builder.type_of_value(sign), Type::bool()); // We compute the absolute value of lhs - let bit_width = FieldElement::from(2_i128.pow(bit_size)); - let bit_width = self.builder.numeric_constant(bit_width, NumericType::NativeField); + let bit_width = FieldElement::from(2_i128.pow(bit_size as u32)); + let bit_width = self.builder.field_constant(bit_width); let sign_not = self.builder.insert_not(sign); // We use unsafe casts here, this is fine as we're casting to a `field` type. @@ -343,12 +343,12 @@ impl<'a> FunctionContext<'a> { /// overflow the bit size, however the operation is still valid (i.e it is not a signed overflow) fn check_overflow( &mut self, - result: ValueId, - lhs: ValueId, - rhs: ValueId, + result: Value, + lhs: Value, + rhs: Value, operator: BinaryOpKind, location: Location, - ) -> ValueId { + ) -> Value { let result_type = self.builder.current_function.dfg.type_of_value(result).unwrap_numeric(); match result_type { NumericType::Signed { bit_size } => { @@ -391,7 +391,7 @@ impl<'a> FunctionContext<'a> { } BinaryOpKind::ShiftLeft => { if let Some(rhs_const) = dfg.get_numeric_constant(rhs) { - let bit_shift_size = rhs_const.to_u128() as u32; + let bit_shift_size = rhs_const.to_u128() as u8; if max_lhs_bits + bit_shift_size <= bit_size { // `lhs` has been casted up from a smaller type such that shifting it by a constant @@ -418,16 +418,16 @@ impl<'a> FunctionContext<'a> { /// If not, we do not overflow and shift with 0 when bits are falling out of the bit size fn check_shift_overflow( &mut self, - result: ValueId, - rhs: ValueId, - bit_size: u32, + result: Value, + rhs: Value, + bit_size: u8, location: Location, - ) -> ValueId { - let one = self.builder.numeric_constant(FieldElement::one(), NumericType::bool()); + ) -> Value { + let one = self.builder.bool_constant(true); assert!(self.builder.current_function.dfg.type_of_value(rhs) == Type::unsigned(8)); let bit_size_field = FieldElement::from(bit_size as i128); - let max = self.builder.numeric_constant(bit_size_field, NumericType::unsigned(8)); + let max = self.builder.constant(bit_size_field, NumericType::unsigned(8)); let overflow = self.builder.insert_binary(rhs, BinaryOp::Lt, max); self.builder.set_location(location).insert_constrain( overflow, @@ -451,16 +451,16 @@ impl<'a> FunctionContext<'a> { /// then we check that the result has the proper sign, using the rule of signs fn check_signed_overflow( &mut self, - result: ValueId, - lhs: ValueId, - rhs: ValueId, + result: Value, + lhs: Value, + rhs: Value, operator: BinaryOpKind, - bit_size: u32, + bit_size: u8, location: Location, ) { let is_sub = operator == BinaryOpKind::Subtract; - let half_width = self.builder.numeric_constant( - FieldElement::from(2_i128.pow(bit_size - 1)), + let half_width = self.builder.constant( + FieldElement::from(2_i128.pow((bit_size - 1) as u32)), NumericType::unsigned(bit_size), ); // We compute the sign of the operands. The overflow checks for signed integers depends on these signs @@ -489,7 +489,7 @@ impl<'a> FunctionContext<'a> { same_sign, Some(message.into()), ); - self.builder.set_location(location).insert_instruction(overflow_check, None); + self.builder.set_location(location).insert_instruction(overflow_check); } BinaryOpKind::Multiply => { // Overflow check for the multiplication: @@ -515,7 +515,7 @@ impl<'a> FunctionContext<'a> { let product_overflow_check = self.builder.insert_binary(product, BinaryOp::Lt, positive_maximum_with_offset); - let one = self.builder.numeric_constant(FieldElement::one(), NumericType::bool()); + let one = self.builder.bool_constant(true); self.builder.set_location(location).insert_constrain( product_overflow_check, one, @@ -532,9 +532,9 @@ impl<'a> FunctionContext<'a> { /// For example, (a <= b) is represented as !(b < a) pub(super) fn insert_binary( &mut self, - mut lhs: ValueId, + mut lhs: Value, operator: BinaryOpKind, - mut rhs: ValueId, + mut rhs: Value, location: Location, ) -> Values { let op = convert_operator(operator); @@ -568,22 +568,18 @@ impl<'a> FunctionContext<'a> { /// back into a Values tree of the proper shape. pub(super) fn insert_call( &mut self, - function: ValueId, - arguments: Vec, + function: Value, + arguments: Vec, result_type: &ast::Type, location: Location, ) -> Values { let result_types = Self::convert_type(result_type).flatten(); - let results = + let mut results = self.builder.set_location(location).insert_call(function, arguments, result_types); - let mut i = 0; - let reshaped_return_values = Self::map_type(result_type, |_| { - let result = results[i].into(); - i += 1; - result - }); - assert_eq!(i, results.len()); + let reshaped_return_values = + Self::map_type(result_type, |_| results.next().unwrap().into()); + assert!(results.next().is_none()); reshaped_return_values } @@ -593,10 +589,10 @@ impl<'a> FunctionContext<'a> { /// Compared to `self.builder.insert_cast`, this version will automatically truncate `value` to be a valid `typ`. pub(super) fn insert_safe_cast( &mut self, - mut value: ValueId, + mut value: Value, typ: NumericType, location: Location, - ) -> ValueId { + ) -> Value { self.builder.set_location(location); // To ensure that `value` is a valid `typ`, we insert an `Instruction::Truncate` instruction beforehand if @@ -611,17 +607,17 @@ impl<'a> FunctionContext<'a> { } /// Create a const offset of an address for an array load or store - pub(super) fn make_offset(&mut self, mut address: ValueId, offset: u128) -> ValueId { + pub(super) fn make_offset(&mut self, mut address: Value, offset: u128) -> Value { if offset != 0 { let typ = self.builder.type_of_value(address).unwrap_numeric(); - let offset = self.builder.numeric_constant(offset, typ); + let offset = self.builder.constant(offset.into(), typ); address = self.builder.insert_binary(address, BinaryOp::Add, offset); } address } /// Array indexes are u32. This function casts values used as indexes to u32. - pub(super) fn make_array_index(&mut self, index: ValueId) -> ValueId { + pub(super) fn make_array_index(&mut self, index: Value) -> Value { self.builder.insert_cast(index, NumericType::length_type()) } @@ -678,7 +674,7 @@ impl<'a> FunctionContext<'a> { /// if it is not yet compiled. pub(super) fn get_or_queue_function(&mut self, id: FuncId) -> Values { let function = self.shared_context.get_or_queue_function(id); - self.builder.import_function(function).into() + Value::Function(function).into() } /// Extracts the current value out of an LValue. @@ -765,7 +761,7 @@ impl<'a> FunctionContext<'a> { array: &ast::LValue, index: &ast::Expression, location: &Location, - ) -> Result<(ValueId, ValueId, LValue, Option), RuntimeError> { + ) -> Result<(Value, Value, LValue, Option), RuntimeError> { let (old_array, array_lvalue) = self.extract_current_value_recursive(array)?; let index = self.codegen_non_tuple_expression(index)?; let array_lvalue = Box::new(array_lvalue); @@ -864,18 +860,17 @@ impl<'a> FunctionContext<'a> { fn assign_lvalue_index( &mut self, new_value: Values, - mut array: ValueId, - index: ValueId, + mut array: Value, + index: Value, location: Location, - ) -> ValueId { + ) -> Value { let index = self.make_array_index(index); - let element_size = - self.builder.numeric_constant(self.element_size(array), NumericType::length_type()); + let element_size = self.builder.length_constant(self.element_size(array)); // The actual base index is the user's index * the array element type's size let mut index = self.builder.set_location(location).insert_binary(index, BinaryOp::Mul, element_size); - let one = self.builder.numeric_constant(FieldElement::one(), NumericType::length_type()); + let one = self.builder.length_constant(FieldElement::one()); new_value.for_each(|value| { let value = value.eval(self); @@ -885,7 +880,7 @@ impl<'a> FunctionContext<'a> { array } - fn element_size(&self, array: ValueId) -> FieldElement { + fn element_size(&self, array: Value) -> FieldElement { let size = self.builder.type_of_value(array).element_size(); FieldElement::from(size as u128) } @@ -920,9 +915,9 @@ impl<'a> FunctionContext<'a> { /// /// This is done on parameters rather than call arguments so that we can optimize out /// paired inc/dec instructions within brillig functions more easily. - pub(crate) fn increment_parameter_rcs(&mut self) -> HashSet { + pub(crate) fn increment_parameter_rcs(&mut self) -> HashSet { let entry = self.builder.current_function.entry_block(); - let parameters = self.builder.current_function.dfg.block_parameters(entry).to_vec(); + let parameters = self.builder.current_function.dfg.block_parameters(entry); let mut incremented = HashSet::default(); let mut seen_array_types = HashSet::default(); @@ -953,8 +948,8 @@ impl<'a> FunctionContext<'a> { /// ignored. pub(crate) fn end_scope( &mut self, - mut incremented_params: HashSet, - terminator_args: &[ValueId], + mut incremented_params: HashSet, + terminator_args: &[Value], ) { incremented_params.retain(|parameter| !terminator_args.contains(parameter)); @@ -968,7 +963,7 @@ impl<'a> FunctionContext<'a> { pub(crate) fn enter_loop( &mut self, loop_entry: BasicBlockId, - loop_index: ValueId, + loop_index: Value, loop_end: BasicBlockId, ) { self.loops.push(Loop { loop_entry, loop_index, loop_end }); @@ -1068,8 +1063,8 @@ impl SharedContext { #[derive(Debug)] pub(super) enum LValue { Ident, - Index { old_array: ValueId, index: ValueId, array_lvalue: Box, location: Location }, - SliceIndex { old_slice: Values, index: ValueId, slice_lvalue: Box, location: Location }, + Index { old_array: Value, index: Value, array_lvalue: Box, location: Location }, + SliceIndex { old_slice: Values, index: Value, slice_lvalue: Box, location: Location }, MemberAccess { old_object: Values, index: usize, object_lvalue: Box }, Dereference { reference: Values }, } diff --git a/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs b/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs index d3821158b80..18ac0b32160 100644 --- a/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs @@ -30,11 +30,11 @@ use super::{ function::RuntimeType, instruction::{BinaryOp, ConstrainError, TerminatorInstruction}, types::Type, - value::ValueId, + value::Value, }, }; -pub(crate) const SSA_WORD_SIZE: u32 = 32; +pub(crate) const SSA_WORD_SIZE: u8 = 32; /// Generates SSA for the given monomorphized program. /// @@ -171,7 +171,7 @@ impl<'a> FunctionContext<'a> { /// Codegen any non-tuple expression so that we can unwrap the Values /// tree to return a single value for use with most SSA instructions. - fn codegen_non_tuple_expression(&mut self, expr: &Expression) -> Result { + fn codegen_non_tuple_expression(&mut self, expr: &Expression) -> Result { Ok(self.codegen_expression(expr)?.into_leaf().eval(self)) } @@ -220,7 +220,7 @@ impl<'a> FunctionContext<'a> { Ok(match array.typ { ast::Type::Slice(_) => { let slice_length = - self.builder.length_constant(array.contents.len() as u128); + self.builder.length_constant((array.contents.len() as u128).into()); let slice_contents = self.codegen_array_checked(elements, typ[1].clone())?; Tree::Branch(vec![slice_length.into(), slice_contents]) @@ -235,7 +235,7 @@ impl<'a> FunctionContext<'a> { } ast::Literal::Bool(value) => { // Don't need to call checked_numeric_constant here since `value` can only be true or false - Ok(self.builder.numeric_constant(*value as u128, NumericType::bool()).into()) + Ok(self.builder.bool_constant(*value).into()) } ast::Literal::Str(string) => Ok(self.codegen_string(string)), ast::Literal::FmtStr(fragments, number_of_fields, fields) => { @@ -258,7 +258,7 @@ impl<'a> FunctionContext<'a> { // A caller needs multiple pieces of information to make use of a format string // The message string, the number of fields to be formatted, and the fields themselves let string = self.codegen_string(&string); - let field_count = self.builder.length_constant(*number_of_fields as u128); + let field_count = self.builder.length_constant((*number_of_fields as u128).into()); let fields = self.codegen_expression(fields)?; Ok(Tree::Branch(vec![string, field_count.into(), fields])) @@ -279,7 +279,7 @@ impl<'a> FunctionContext<'a> { fn codegen_string(&mut self, string: &str) -> Values { let elements = vecmap(string.as_bytes(), |byte| { - let char = self.builder.numeric_constant(*byte as u128, NumericType::char()); + let char = self.builder.constant((*byte as u128).into(), NumericType::char()); (char.into(), false) }); let typ = Self::convert_non_tuple_type(&ast::Type::String(elements.len() as u32)); @@ -357,7 +357,7 @@ impl<'a> FunctionContext<'a> { let rhs = self.codegen_expression(&unary.rhs)?; let rhs = rhs.into_leaf().eval(self); let typ = self.builder.type_of_value(rhs).unwrap_numeric(); - let zero = self.builder.numeric_constant(0u128, typ); + let zero = self.builder.constant(0u128.into(), typ); Ok(self.insert_binary( zero, noirc_frontend::ast::BinaryOpKind::Subtract, @@ -440,17 +440,16 @@ impl<'a> FunctionContext<'a> { /// return a reference to each element, for use with the store instruction. fn codegen_array_index( &mut self, - array: ValueId, - index: ValueId, + array: Value, + index: Value, element_type: &ast::Type, location: Location, - length: Option, + length: Option, ) -> Result { // base_index = index * type_size let index = self.make_array_index(index); let type_size = Self::convert_type(element_type).size_of_type(); - let type_size = - self.builder.numeric_constant(type_size as u128, NumericType::length_type()); + let type_size = self.builder.length_constant((type_size as u128).into()); let base_index = self.builder.set_location(location).insert_binary(index, BinaryOp::Mul, type_size); @@ -482,14 +481,14 @@ impl<'a> FunctionContext<'a> { /// Prepare a slice access. /// Check that the index being used to access a slice element /// is less than the dynamic slice length. - fn codegen_slice_access_check(&mut self, index: ValueId, length: Option) { + fn codegen_slice_access_check(&mut self, index: Value, length: Option) { let index = self.make_array_index(index); // We convert the length as an array index type for comparison let array_len = self .make_array_index(length.expect("ICE: a length must be supplied for indexing slices")); let is_offset_out_of_bounds = self.builder.insert_binary(index, BinaryOp::Lt, array_len); - let true_const = self.builder.numeric_constant(true, NumericType::bool()); + let true_const = self.builder.bool_constant(true); self.builder.insert_constrain( is_offset_out_of_bounds, @@ -672,8 +671,8 @@ impl<'a> FunctionContext<'a> { fn codegen_intrinsic_call_checks( &mut self, - function: ValueId, - arguments: &[ValueId], + function: Value, + arguments: &[Value], location: Location, ) { if let Some(intrinsic) = @@ -681,7 +680,7 @@ impl<'a> FunctionContext<'a> { { match intrinsic { Intrinsic::SliceInsert => { - let one = self.builder.length_constant(1u128); + let one = self.builder.length_constant(1u128.into()); // We add one here in the case of a slice insert as a slice insert at the length of the slice // can be converted to a slice push back @@ -739,7 +738,7 @@ impl<'a> FunctionContext<'a> { assert_payload: &Option>, ) -> Result { let expr = self.codegen_non_tuple_expression(expr)?; - let true_literal = self.builder.numeric_constant(true, NumericType::bool()); + let true_literal = self.builder.bool_constant(true); // Set the location here for any errors that may occur when we codegen the assert message self.builder.set_location(location); diff --git a/compiler/noirc_evaluator/src/ssa/ssa_gen/program.rs b/compiler/noirc_evaluator/src/ssa/ssa_gen/program.rs index de01a4596ad..0ef20cb5fb1 100644 --- a/compiler/noirc_evaluator/src/ssa/ssa_gen/program.rs +++ b/compiler/noirc_evaluator/src/ssa/ssa_gen/program.rs @@ -140,8 +140,8 @@ mod test { let mut builder = FunctionBuilder::new("main".into(), main_id); let v0 = builder.add_parameter(Type::field()); - let one = builder.field_constant(1u128); - let three = builder.field_constant(3u128); + let one = builder.field_constant(1u128.into()); + let three = builder.field_constant(3u128.into()); let v1 = builder.insert_binary(v0, BinaryOp::Add, one); let v2 = builder.insert_binary(v1, BinaryOp::Mul, three); diff --git a/compiler/noirc_evaluator/src/ssa/ssa_gen/value.rs b/compiler/noirc_evaluator/src/ssa/ssa_gen/value.rs index d71d4e5604e..67ceaf89766 100644 --- a/compiler/noirc_evaluator/src/ssa/ssa_gen/value.rs +++ b/compiler/noirc_evaluator/src/ssa/ssa_gen/value.rs @@ -1,7 +1,7 @@ use iter_extended::vecmap; use crate::ssa::ir::types::Type; -use crate::ssa::ir::value::ValueId as IrValueId; +use crate::ssa::ir::value::Value as IrValue; use super::context::FunctionContext; @@ -28,17 +28,17 @@ pub(super) enum Tree { /// would be invalid. #[derive(Debug, Clone)] pub(super) enum Value { - Normal(IrValueId), + Normal(IrValue), /// A mutable variable that must be loaded as the given type before being used - Mutable(IrValueId, Type), + Mutable(IrValue, Type), } impl Value { /// Evaluate a value, returning an IrValue from it. /// This has no effect on Value::Normal, but any variables will /// need to be loaded from memory - pub(super) fn eval(self, ctx: &mut FunctionContext) -> IrValueId { + pub(super) fn eval(self, ctx: &mut FunctionContext) -> IrValue { match self { Value::Normal(value) => value, Value::Mutable(address, typ) => ctx.builder.insert_load(address, typ), @@ -47,7 +47,7 @@ impl Value { /// Evaluates the value, returning a reference to the mutable variable found within /// if possible. Compared to .eval, this method will not load from self if it is Value::Mutable. - pub(super) fn eval_reference(self) -> IrValueId { + pub(super) fn eval_reference(self) -> IrValue { match self { Value::Normal(value) => value, Value::Mutable(address, _) => address, @@ -163,14 +163,14 @@ impl Tree { } } -impl From for Values { - fn from(id: IrValueId) -> Self { +impl From for Values { + fn from(id: IrValue) -> Self { Self::Leaf(Value::Normal(id)) } } -impl From for Value { - fn from(id: IrValueId) -> Self { +impl From for Value { + fn from(id: IrValue) -> Self { Value::Normal(id) } } @@ -187,7 +187,7 @@ impl Tree { impl Tree { /// Flattens and evaluates this Tree into a list of ir values /// for return statements, branching instructions, or function parameters. - pub(super) fn into_value_list(self, ctx: &mut FunctionContext) -> Vec { + pub(super) fn into_value_list(self, ctx: &mut FunctionContext) -> Vec { vecmap(self.flatten(), |value| value.eval(ctx)) } } diff --git a/compiler/noirc_frontend/src/ast/mod.rs b/compiler/noirc_frontend/src/ast/mod.rs index 35e57cd4528..032a3bc0615 100644 --- a/compiler/noirc_frontend/src/ast/mod.rs +++ b/compiler/noirc_frontend/src/ast/mod.rs @@ -68,21 +68,22 @@ impl IntegerBitSize { } } -impl From for u32 { - fn from(size: IntegerBitSize) -> u32 { - use IntegerBitSize::*; - match size { - One => 1, - Eight => 8, - Sixteen => 16, - ThirtyTwo => 32, - SixtyFour => 64, - } +impl From for u8 { + fn from(size: IntegerBitSize) -> u8 { + size.bit_size() } } pub struct InvalidIntegerBitSizeError(pub u32); +impl TryFrom for IntegerBitSize { + type Error = InvalidIntegerBitSizeError; + + fn try_from(value: u8) -> Result { + Self::try_from(value as u32) + } +} + impl TryFrom for IntegerBitSize { type Error = InvalidIntegerBitSizeError; @@ -101,7 +102,7 @@ impl TryFrom for IntegerBitSize { impl core::fmt::Display for IntegerBitSize { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", u32::from(*self)) + write!(f, "{}", self.bit_size()) } } diff --git a/compiler/noirc_frontend/src/elaborator/lints.rs b/compiler/noirc_frontend/src/elaborator/lints.rs index d3b776bea24..378569a3bf0 100644 --- a/compiler/noirc_frontend/src/elaborator/lints.rs +++ b/compiler/noirc_frontend/src/elaborator/lints.rs @@ -203,7 +203,7 @@ pub(crate) fn overflowing_int( match expr { HirExpression::Literal(HirLiteral::Integer(value, negative)) => match annotated_type { Type::Integer(Signedness::Unsigned, bit_count) => { - let bit_count: u32 = (*bit_count).into(); + let bit_count = bit_count.bit_size() as u32; let max = 2u128.pow(bit_count) - 1; if value > max.into() || negative { errors.push(TypeCheckError::OverflowingAssignment { @@ -215,7 +215,7 @@ pub(crate) fn overflowing_int( } } Type::Integer(Signedness::Signed, bit_count) => { - let bit_count: u32 = (*bit_count).into(); + let bit_count = bit_count.bit_size() as u32; let min = 2u128.pow(bit_count - 1); let max = 2u128.pow(bit_count - 1) - 1; if (negative && value > min.into()) || (!negative && value > max.into()) { diff --git a/compiler/noirc_frontend/src/lexer/token.rs b/compiler/noirc_frontend/src/lexer/token.rs index f35515045db..38600427efc 100644 --- a/compiler/noirc_frontend/src/lexer/token.rs +++ b/compiler/noirc_frontend/src/lexer/token.rs @@ -609,8 +609,8 @@ impl Token { #[derive(PartialEq, Eq, Hash, Debug, Clone, PartialOrd, Ord)] pub enum IntType { - Unsigned(u32), // u32 = Unsigned(32) - Signed(u32), // i64 = Signed(64) + Unsigned(u8), // u32 = Unsigned(32) + Signed(u8), // i64 = Signed(64) } impl fmt::Display for IntType { @@ -638,16 +638,12 @@ impl IntType { }; // Word start with 'u' or 'i'. Check if the latter is an integer - - let str_as_u32 = match word[1..].parse::() { - Ok(str_as_u32) => str_as_u32, - Err(_) => return None, - }; + let str_as_u8 = word[1..].parse::().ok()?; if is_signed { - Some(IntType::Signed(str_as_u32)) + Some(IntType::Signed(str_as_u8)) } else { - Some(IntType::Unsigned(str_as_u32)) + Some(IntType::Unsigned(str_as_u8)) } } } diff --git a/compiler/noirc_printable_type/src/lib.rs b/compiler/noirc_printable_type/src/lib.rs index 6ae187da27f..6c603b7195d 100644 --- a/compiler/noirc_printable_type/src/lib.rs +++ b/compiler/noirc_printable_type/src/lib.rs @@ -23,10 +23,10 @@ pub enum PrintableType { types: Vec, }, SignedInteger { - width: u32, + width: u8, }, UnsignedInteger { - width: u32, + width: u8, }, Boolean, Struct { diff --git a/tooling/debugger/src/repl.rs b/tooling/debugger/src/repl.rs index eda3cbfd895..c30de453d5d 100644 --- a/tooling/debugger/src/repl.rs +++ b/tooling/debugger/src/repl.rs @@ -391,7 +391,8 @@ impl<'a, B: BlackBoxFunctionSolver> ReplDebugger<'a, B> { return; }; - let Ok(bit_size) = BitSize::try_from_u32::(bit_size) else { + let Ok(bit_size) = BitSize::try_from_u8::(bit_size.try_into().unwrap()) + else { println!("Invalid bit size: {bit_size}"); return; }; diff --git a/tooling/fuzzer/src/dictionary/mod.rs b/tooling/fuzzer/src/dictionary/mod.rs index 172edfa54c2..a47548afaee 100644 --- a/tooling/fuzzer/src/dictionary/mod.rs +++ b/tooling/fuzzer/src/dictionary/mod.rs @@ -112,18 +112,18 @@ fn build_dictionary_from_unconstrained_function( for opcode in &function.bytecode { match opcode { BrilligOpcode::Cast { bit_size, .. } => { - let bit_size = bit_size.to_u32::(); + let bit_size = bit_size.to_u8::(); - let field = 1u128.wrapping_shl(bit_size); + let field = 1u128.wrapping_shl(bit_size as u32); constants.insert(F::from(field)); constants.insert(F::from(field - 1)); } BrilligOpcode::Const { bit_size, value, .. } => { - let bit_size = bit_size.to_u32::(); + let bit_size = bit_size.to_u8::(); constants.insert(*value); - let field = 1u128.wrapping_shl(bit_size); + let field = 1u128.wrapping_shl(bit_size as u32); constants.insert(F::from(field)); constants.insert(F::from(field - 1)); } diff --git a/tooling/noirc_abi/src/lib.rs b/tooling/noirc_abi/src/lib.rs index bd5674d64f1..7e4805911d4 100644 --- a/tooling/noirc_abi/src/lib.rs +++ b/tooling/noirc_abi/src/lib.rs @@ -138,10 +138,10 @@ impl From<&AbiType> for PrintableType { } } AbiType::Integer { sign: Sign::Unsigned, width } => { - PrintableType::UnsignedInteger { width: *width } + PrintableType::UnsignedInteger { width: (*width).try_into().unwrap() } } AbiType::Integer { sign: Sign::Signed, width } => { - PrintableType::SignedInteger { width: *width } + PrintableType::SignedInteger { width: (*width).try_into().unwrap() } } } }