diff --git a/.github/workflows/publish-nargo.yml b/.github/workflows/publish-nargo.yml index 55ba754e6ae..fa0b6f2d9fb 100644 --- a/.github/workflows/publish-nargo.yml +++ b/.github/workflows/publish-nargo.yml @@ -23,7 +23,7 @@ permissions: jobs: build-apple-darwin: - runs-on: macos-12 + runs-on: macos-14 env: CROSS_CONFIG: ${{ github.workspace }}/.github/Cross.toml NIGHTLY_RELEASE: ${{ inputs.tag == '' }} @@ -41,7 +41,7 @@ jobs: - name: Setup for Apple Silicon if: matrix.target == 'aarch64-apple-darwin' run: | - sudo xcode-select -s /Applications/Xcode_13.2.1.app/Contents/Developer/ + sudo xcode-select -s /Applications/Xcode_15.4.0.app/Contents/Developer/ echo "SDKROOT=$(xcrun -sdk macosx$(sw_vers -productVersion) --show-sdk-path)" >> $GITHUB_ENV echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx$(sw_vers -productVersion) --show-sdk-platform-version)" >> $GITHUB_ENV diff --git a/acvm-repo/acvm/src/compiler/optimizers/merge_expressions.rs b/acvm-repo/acvm/src/compiler/optimizers/merge_expressions.rs index 56cfa000da2..33ad66c809b 100644 --- a/acvm-repo/acvm/src/compiler/optimizers/merge_expressions.rs +++ b/acvm-repo/acvm/src/compiler/optimizers/merge_expressions.rs @@ -152,14 +152,12 @@ impl MergeExpressionsOptimizer { // Returns the input witnesses used by the opcode fn witness_inputs(&self, opcode: &Opcode) -> BTreeSet { - let mut witnesses = BTreeSet::new(); match opcode { Opcode::AssertZero(expr) => CircuitSimulator::expr_wit(expr), Opcode::BlackBoxFuncCall(bb_func) => bb_func.get_input_witnesses(), Opcode::MemoryOp { block_id: _, op, predicate } => { //index et value, et predicate - let mut witnesses = BTreeSet::new(); - witnesses.extend(CircuitSimulator::expr_wit(&op.index)); + let mut witnesses = CircuitSimulator::expr_wit(&op.index); witnesses.extend(CircuitSimulator::expr_wit(&op.value)); if let Some(p) = predicate { witnesses.extend(CircuitSimulator::expr_wit(p)); @@ -171,6 +169,7 @@ impl MergeExpressionsOptimizer { init.iter().cloned().collect() } Opcode::BrilligCall { inputs, outputs, .. } => { + let mut witnesses = BTreeSet::new(); for i in inputs { witnesses.extend(self.brillig_input_wit(i)); } @@ -180,12 +179,9 @@ impl MergeExpressionsOptimizer { witnesses } Opcode::Call { id: _, inputs, outputs, predicate } => { - for i in inputs { - witnesses.insert(*i); - } - for i in outputs { - witnesses.insert(*i); - } + let mut witnesses: BTreeSet = BTreeSet::from_iter(inputs.iter().copied()); + witnesses.extend(outputs); + if let Some(p) = predicate { witnesses.extend(CircuitSimulator::expr_wit(p)); } @@ -233,7 +229,7 @@ mod tests { acir_field::AcirField, circuit::{ brillig::{BrilligFunctionId, BrilligOutputs}, - opcodes::FunctionInput, + opcodes::{BlackBoxFuncCall, FunctionInput}, Circuit, ExpressionWidth, Opcode, PublicInputs, }, native_types::{Expression, Witness}, @@ -241,7 +237,7 @@ mod tests { }; use std::collections::BTreeSet; - fn check_circuit(circuit: Circuit) { + fn check_circuit(circuit: Circuit) -> Circuit { assert!(CircuitSimulator::default().check_circuit(&circuit)); let mut merge_optimizer = MergeExpressionsOptimizer::new(); let acir_opcode_positions = vec![0; 20]; @@ -251,6 +247,7 @@ mod tests { optimized_circuit.opcodes = opcodes; // check that the circuit is still valid after optimization assert!(CircuitSimulator::default().check_circuit(&optimized_circuit)); + optimized_circuit } #[test] @@ -293,6 +290,7 @@ mod tests { public_parameters: PublicInputs::default(), return_values: PublicInputs::default(), assert_messages: Default::default(), + recursive: false, }; check_circuit(circuit); } @@ -345,7 +343,54 @@ mod tests { public_parameters: PublicInputs::default(), return_values: PublicInputs::default(), assert_messages: Default::default(), + recursive: false, }; check_circuit(circuit); } + + #[test] + fn takes_blackbox_opcode_outputs_into_account() { + // Regression test for https://github.com/noir-lang/noir/issues/6527 + // Previously we would not track the usage of witness 4 in the output of the blackbox function. + // We would then merge the final two opcodes losing the check that the brillig call must match + // with `_0 ^ _1`. + + let circuit: Circuit = Circuit { + current_witness_index: 7, + opcodes: vec![ + Opcode::BrilligCall { + id: BrilligFunctionId(0), + inputs: Vec::new(), + outputs: vec![BrilligOutputs::Simple(Witness(3))], + predicate: None, + }, + Opcode::BlackBoxFuncCall(BlackBoxFuncCall::AND { + lhs: FunctionInput::witness(Witness(0), 8), + rhs: FunctionInput::witness(Witness(1), 8), + output: Witness(4), + }), + Opcode::AssertZero(Expression { + linear_combinations: vec![ + (FieldElement::one(), Witness(3)), + (-FieldElement::one(), Witness(4)), + ], + ..Default::default() + }), + Opcode::AssertZero(Expression { + linear_combinations: vec![ + (-FieldElement::one(), Witness(2)), + (FieldElement::one(), Witness(4)), + ], + ..Default::default() + }), + ], + expression_width: ExpressionWidth::Bounded { width: 4 }, + private_parameters: BTreeSet::from([Witness(0), Witness(1)]), + return_values: PublicInputs(BTreeSet::from([Witness(2)])), + ..Default::default() + }; + + let new_circuit = check_circuit(circuit.clone()); + assert_eq!(circuit, new_circuit); + } } diff --git a/acvm-repo/acvm/src/compiler/simulator.rs b/acvm-repo/acvm/src/compiler/simulator.rs index 893195f342a..d89b53aa564 100644 --- a/acvm-repo/acvm/src/compiler/simulator.rs +++ b/acvm-repo/acvm/src/compiler/simulator.rs @@ -1,6 +1,7 @@ use acir::{ circuit::{ brillig::{BrilligInputs, BrilligOutputs}, + directives::Directive, opcodes::{BlockId, FunctionInput}, Circuit, Opcode, }, @@ -83,6 +84,17 @@ impl CircuitSimulator { } true } + Opcode::Directive(directive) => match directive { + Directive::ToLeRadix { a, b, .. } => { + if !self.can_solve_expression(a) { + return false; + } + for w in b { + self.mark_solvable(*w); + } + true + } + }, Opcode::MemoryOp { block_id, op, predicate } => { if !self.can_solve_expression(&op.index) { return false; @@ -234,6 +246,7 @@ mod tests { public_parameters, return_values: PublicInputs::default(), assert_messages: Default::default(), + recursive: false, } } diff --git a/acvm-repo/bn254_blackbox_solver/src/embedded_curve_ops.rs b/acvm-repo/bn254_blackbox_solver/src/embedded_curve_ops.rs index 35cc68051d7..a02711fda1e 100644 --- a/acvm-repo/bn254_blackbox_solver/src/embedded_curve_ops.rs +++ b/acvm-repo/bn254_blackbox_solver/src/embedded_curve_ops.rs @@ -16,7 +16,6 @@ pub fn multi_scalar_mul( scalars_hi: &[FieldElement], ) -> Result<(FieldElement, FieldElement, FieldElement), BlackBoxResolutionError> { if points.len() != 3 * scalars_lo.len() || scalars_lo.len() != scalars_hi.len() { - dbg!(&points.len(), &scalars_lo.len(), &scalars_hi.len()); return Err(BlackBoxResolutionError::Failed( BlackBoxFunc::MultiScalarMul, "Points and scalars must have the same length".to_string(), diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs b/compiler/noirc_evaluator/src/acir/acir_variable.rs similarity index 99% rename from compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs rename to compiler/noirc_evaluator/src/acir/acir_variable.rs index c6e4a261897..6ba072f01a4 100644 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs +++ b/compiler/noirc_evaluator/src/acir/acir_variable.rs @@ -1,27 +1,18 @@ -use super::big_int::BigIntContext; -use super::generated_acir::{BrilligStdlibFunc, GeneratedAcir, PLACEHOLDER_BRILLIG_INDEX}; -use crate::brillig::brillig_gen::brillig_directive; -use crate::brillig::brillig_ir::artifact::GeneratedBrillig; -use crate::errors::{InternalBug, InternalError, RuntimeError, SsaReport}; -use crate::ssa::acir_gen::{AcirDynamicArray, AcirValue}; -use crate::ssa::ir::dfg::CallStack; -use crate::ssa::ir::types::Type as SsaType; -use crate::ssa::ir::{instruction::Endian, types::NumericType}; -use acvm::acir::circuit::brillig::{BrilligFunctionId, BrilligInputs, BrilligOutputs}; -use acvm::acir::circuit::opcodes::{ - AcirFunctionId, BlockId, BlockType, ConstantOrWitnessEnum, MemOp, -}; -use acvm::acir::circuit::{AssertionPayload, ExpressionOrMemory, ExpressionWidth, Opcode}; -use acvm::brillig_vm::{MemoryValue, VMStatus, VM}; -use acvm::BlackBoxFunctionSolver; use acvm::{ - acir::AcirField, acir::{ brillig::Opcode as BrilligOpcode, - circuit::opcodes::FunctionInput, + circuit::{ + brillig::{BrilligFunctionId, BrilligInputs, BrilligOutputs}, + opcodes::{ + AcirFunctionId, BlockId, BlockType, ConstantOrWitnessEnum, FunctionInput, MemOp, + }, + AssertionPayload, ExpressionOrMemory, ExpressionWidth, Opcode, + }, native_types::{Expression, Witness}, - BlackBoxFunc, + AcirField, BlackBoxFunc, }, + brillig_vm::{MemoryValue, VMStatus, VM}, + BlackBoxFunctionSolver, }; use fxhash::FxHashMap as HashMap; use iter_extended::{try_vecmap, vecmap}; @@ -29,6 +20,16 @@ use num_bigint::BigUint; use std::cmp::Ordering; use std::{borrow::Cow, hash::Hash}; +use crate::brillig::brillig_ir::artifact::GeneratedBrillig; +use crate::errors::{InternalBug, InternalError, RuntimeError, SsaReport}; +use crate::ssa::ir::{ + dfg::CallStack, instruction::Endian, types::NumericType, types::Type as SsaType, +}; + +use super::big_int::BigIntContext; +use super::generated_acir::{BrilligStdlibFunc, GeneratedAcir, PLACEHOLDER_BRILLIG_INDEX}; +use super::{brillig_directive, AcirDynamicArray, AcirValue}; + #[derive(Clone, Debug, PartialEq, Eq, Hash)] /// High level Type descriptor for Variables. /// diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/big_int.rs b/compiler/noirc_evaluator/src/acir/big_int.rs similarity index 100% rename from compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/big_int.rs rename to compiler/noirc_evaluator/src/acir/big_int.rs diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_directive.rs b/compiler/noirc_evaluator/src/acir/brillig_directive.rs similarity index 100% rename from compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_directive.rs rename to compiler/noirc_evaluator/src/acir/brillig_directive.rs diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs b/compiler/noirc_evaluator/src/acir/generated_acir.rs similarity index 99% rename from compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs rename to compiler/noirc_evaluator/src/acir/generated_acir.rs index 6b215839f34..af118518735 100644 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs +++ b/compiler/noirc_evaluator/src/acir/generated_acir.rs @@ -1,12 +1,7 @@ //! `GeneratedAcir` is constructed as part of the `acir_gen` pass to accumulate all of the ACIR //! program as it is being converted from SSA form. -use std::{collections::BTreeMap, u32}; +use std::collections::BTreeMap; -use crate::{ - brillig::{brillig_gen::brillig_directive, brillig_ir::artifact::GeneratedBrillig}, - errors::{InternalError, RuntimeError, SsaReport}, - ssa::ir::{dfg::CallStack, instruction::ErrorType}, -}; use acvm::acir::{ circuit::{ brillig::{BrilligFunctionId, BrilligInputs, BrilligOutputs}, @@ -16,7 +11,17 @@ use acvm::acir::{ native_types::Witness, BlackBoxFunc, }; -use acvm::{acir::native_types::Expression, acir::AcirField}; +use acvm::{ + acir::AcirField, + acir::{circuit::directives::Directive, native_types::Expression}, +}; + +use super::brillig_directive; +use crate::{ + brillig::brillig_ir::artifact::GeneratedBrillig, + errors::{InternalError, RuntimeError, SsaReport}, + ssa::ir::dfg::CallStack, +}; use iter_extended::vecmap; use noirc_errors::debug_info::ProcedureDebugId; @@ -155,7 +160,7 @@ impl GeneratedAcir { /// This means you cannot multiply an infinite amount of `Expression`s together. /// Once the `Expression` goes over degree-2, then it needs to be reduced to a `Witness` /// which has degree-1 in order to be able to continue the multiplication chain. - pub(crate) fn create_witness_for_expression(&mut self, expression: &Expression) -> Witness { + fn create_witness_for_expression(&mut self, expression: &Expression) -> Witness { let fresh_witness = self.next_witness_index(); // Create a constraint that sets them to be equal to each other diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs b/compiler/noirc_evaluator/src/acir/mod.rs similarity index 98% rename from compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs rename to compiler/noirc_evaluator/src/acir/mod.rs index 33fdf2abc82..5c7af97c85d 100644 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs +++ b/compiler/noirc_evaluator/src/acir/mod.rs @@ -1,47 +1,57 @@ //! This file holds the pass to convert from Noir's SSA IR to ACIR. -mod acir_ir; +use fxhash::FxHashMap as HashMap; +use im::Vector; use std::collections::{BTreeMap, HashSet}; use std::fmt::Debug; -use self::acir_ir::acir_variable::{AcirContext, AcirType, AcirVar}; -use self::acir_ir::generated_acir::BrilligStdlibFunc; -use super::function_builder::data_bus::DataBus; -use super::ir::dfg::CallStack; -use super::ir::function::FunctionId; -use super::ir::instruction::ConstrainError; -use super::ir::printer::try_to_extract_string_from_error_payload; -use super::{ +use acvm::acir::{ + circuit::{ + brillig::{BrilligBytecode, BrilligFunctionId}, + opcodes::{AcirFunctionId, BlockType}, + AssertionPayload, ErrorSelector, ExpressionWidth, OpcodeLocation, + }, + native_types::Witness, + BlackBoxFunc, +}; +use acvm::{acir::circuit::opcodes::BlockId, acir::AcirField, FieldElement}; +use bn254_blackbox_solver::Bn254BlackBoxSolver; +use iter_extended::{try_vecmap, vecmap}; +use noirc_frontend::monomorphization::ast::InlineType; + +mod acir_variable; +mod big_int; +mod brillig_directive; +mod generated_acir; + +use crate::brillig::{ + brillig_gen::brillig_fn::FunctionContext as BrilligFunctionContext, + brillig_ir::{ + artifact::{BrilligParameter, GeneratedBrillig}, + BrilligContext, + }, + Brillig, +}; +use crate::errors::{InternalError, InternalWarning, RuntimeError, SsaReport}; +use crate::ssa::{ + function_builder::data_bus::DataBus, ir::{ - dfg::DataFlowGraph, - function::{Function, RuntimeType}, + dfg::{CallStack, DataFlowGraph}, + function::{Function, FunctionId, RuntimeType}, instruction::{ - Binary, BinaryOp, Instruction, InstructionId, Intrinsic, TerminatorInstruction, + Binary, BinaryOp, ConstrainError, ErrorType, Instruction, InstructionId, Intrinsic, + TerminatorInstruction, }, map::Id, + printer::try_to_extract_string_from_error_payload, types::{NumericType, Type}, value::{Value, ValueId}, }, ssa_gen::Ssa, }; -use crate::brillig::brillig_ir::artifact::{BrilligParameter, GeneratedBrillig}; -use crate::brillig::brillig_ir::BrilligContext; -use crate::brillig::{brillig_gen::brillig_fn::FunctionContext as BrilligFunctionContext, Brillig}; -use crate::errors::{InternalError, InternalWarning, RuntimeError, SsaReport}; -pub(crate) use acir_ir::generated_acir::GeneratedAcir; -use acvm::acir::circuit::opcodes::{AcirFunctionId, BlockType}; -use bn254_blackbox_solver::Bn254BlackBoxSolver; -use noirc_frontend::monomorphization::ast::InlineType; - -use acvm::acir::circuit::brillig::{BrilligBytecode, BrilligFunctionId}; -use acvm::acir::circuit::{AssertionPayload, ErrorSelector, ExpressionWidth, OpcodeLocation}; -use acvm::acir::native_types::Witness; -use acvm::acir::BlackBoxFunc; -use acvm::{acir::circuit::opcodes::BlockId, acir::AcirField, FieldElement}; -use fxhash::FxHashMap as HashMap; -use im::Vector; -use iter_extended::{try_vecmap, vecmap}; -use noirc_frontend::Type as HirType; +use acir_variable::{AcirContext, AcirType, AcirVar}; +use generated_acir::BrilligStdlibFunc; +pub(crate) use generated_acir::GeneratedAcir; #[derive(Default)] struct SharedContext { @@ -772,6 +782,12 @@ impl<'a> Context<'a> { Instruction::IfElse { .. } => { unreachable!("IfElse instruction remaining in acir-gen") } + Instruction::MakeArray { elements, typ: _ } => { + let elements = elements.iter().map(|element| self.convert_value(*element, dfg)); + let value = AcirValue::Array(elements.collect()); + let result = dfg.instruction_results(instruction_id)[0]; + self.ssa_values.insert(result, value); + } } self.acir_context.set_call_stack(CallStack::new()); @@ -1562,7 +1578,7 @@ impl<'a> Context<'a> { if !already_initialized { let value = &dfg[array]; match value { - Value::Array { .. } | Value::Instruction { .. } => { + Value::Instruction { .. } => { let value = self.convert_value(array, dfg); let array_typ = dfg.type_of_value(array); let len = if !array_typ.contains_slice_element() { @@ -1605,13 +1621,6 @@ impl<'a> Context<'a> { match array_typ { Type::Array(_, _) | Type::Slice(_) => { match &dfg[array_id] { - Value::Array { array, .. } => { - for (i, value) in array.iter().enumerate() { - flat_elem_type_sizes.push( - self.flattened_slice_size(*value, dfg) + flat_elem_type_sizes[i], - ); - } - } Value::Instruction { .. } | Value::Param { .. } => { // An instruction representing the slice means it has been processed previously during ACIR gen. // Use the previously defined result of an array operation to fetch the internal type information. @@ -1744,13 +1753,6 @@ impl<'a> Context<'a> { fn flattened_slice_size(&mut self, array_id: ValueId, dfg: &DataFlowGraph) -> usize { let mut size = 0; match &dfg[array_id] { - Value::Array { array, .. } => { - // The array is going to be the flattened outer array - // Flattened slice size from SSA value does not need to be multiplied by the len - for value in array { - size += self.flattened_slice_size(*value, dfg); - } - } Value::NumericConstant { .. } => { size += 1; } @@ -1914,10 +1916,6 @@ impl<'a> Context<'a> { Value::NumericConstant { constant, typ } => { AcirValue::Var(self.acir_context.add_constant(*constant), typ.into()) } - Value::Array { array, .. } => { - let elements = array.iter().map(|element| self.convert_value(*element, dfg)); - AcirValue::Array(elements.collect()) - } Value::Intrinsic(..) => todo!(), Value::Function(function_id) => { // This conversion is for debugging support only, to allow the @@ -2840,22 +2838,6 @@ impl<'a> Context<'a> { Ok(()) } - /// Given an array value, return the numerical type of its element. - /// Panics if the given value is not an array or has a non-numeric element type. - fn array_element_type(dfg: &DataFlowGraph, value: ValueId) -> AcirType { - match dfg.type_of_value(value) { - Type::Array(elements, _) => { - assert_eq!(elements.len(), 1); - (&elements[0]).into() - } - Type::Slice(elements) => { - assert_eq!(elements.len(), 1); - (&elements[0]).into() - } - _ => unreachable!("Expected array type"), - } - } - /// Convert a Vec into a Vec using the given result ids. /// If the type of a result id is an array, several acir vars are collected into /// a single AcirValue::Array of the same length. @@ -2946,9 +2928,9 @@ mod test { use std::collections::BTreeMap; use crate::{ + acir::BrilligStdlibFunc, brillig::Brillig, ssa::{ - acir_gen::acir_ir::generated_acir::BrilligStdlibFunc, function_builder::FunctionBuilder, ir::{function::FunctionId, instruction::BinaryOp, map::Id, types::Type}, }, diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen.rs index 313fd65a197..786a03031d6 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen.rs @@ -1,7 +1,6 @@ pub(crate) mod brillig_black_box; pub(crate) mod brillig_block; pub(crate) mod brillig_block_variables; -pub(crate) mod brillig_directive; pub(crate) mod brillig_fn; pub(crate) mod brillig_slice_ops; mod constant_allocation; diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs index 40224e132ab..36e1ee90e11 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs @@ -160,13 +160,9 @@ impl<'block> BrilligBlock<'block> { ); } TerminatorInstruction::Return { return_values, .. } => { - let return_registers: Vec<_> = return_values - .iter() - .map(|value_id| { - let return_variable = self.convert_ssa_value(*value_id, dfg); - return_variable.extract_register() - }) - .collect(); + let return_registers = vecmap(return_values, |value_id| { + self.convert_ssa_value(*value_id, dfg).extract_register() + }); self.brillig_context.codegen_return(&return_registers); } } @@ -763,6 +759,43 @@ impl<'block> BrilligBlock<'block> { Instruction::IfElse { .. } => { unreachable!("IfElse instructions should not be possible in brillig") } + Instruction::MakeArray { elements: array, typ } => { + let value_id = dfg.instruction_results(instruction_id)[0]; + if !self.variables.is_allocated(&value_id) { + let new_variable = self.variables.define_variable( + self.function_context, + self.brillig_context, + value_id, + dfg, + ); + + // Initialize the variable + match new_variable { + BrilligVariable::BrilligArray(brillig_array) => { + self.brillig_context.codegen_initialize_array(brillig_array); + } + BrilligVariable::BrilligVector(vector) => { + let size = self + .brillig_context + .make_usize_constant_instruction(array.len().into()); + self.brillig_context.codegen_initialize_vector(vector, size, None); + self.brillig_context.deallocate_single_addr(size); + } + _ => unreachable!( + "ICE: Cannot initialize array value created as {new_variable:?}" + ), + }; + + // Write the items + let items_pointer = self + .brillig_context + .codegen_make_array_or_vector_items_pointer(new_variable); + + self.initialize_constant_array(array, typ, dfg, items_pointer); + + self.brillig_context.deallocate_register(items_pointer); + } + } }; let dead_variables = self @@ -1500,46 +1533,6 @@ impl<'block> BrilligBlock<'block> { new_variable } } - Value::Array { array, typ } => { - if self.variables.is_allocated(&value_id) { - self.variables.get_allocation(self.function_context, value_id, dfg) - } else { - let new_variable = self.variables.define_variable( - self.function_context, - self.brillig_context, - value_id, - dfg, - ); - - // Initialize the variable - match new_variable { - BrilligVariable::BrilligArray(brillig_array) => { - self.brillig_context.codegen_initialize_array(brillig_array); - } - BrilligVariable::BrilligVector(vector) => { - let size = self - .brillig_context - .make_usize_constant_instruction(array.len().into()); - self.brillig_context.codegen_initialize_vector(vector, size, None); - self.brillig_context.deallocate_single_addr(size); - } - _ => unreachable!( - "ICE: Cannot initialize array value created as {new_variable:?}" - ), - }; - - // Write the items - let items_pointer = self - .brillig_context - .codegen_make_array_or_vector_items_pointer(new_variable); - - self.initialize_constant_array(array, typ, dfg, items_pointer); - - self.brillig_context.deallocate_register(items_pointer); - - new_variable - } - } Value::Function(_) => { // For the debugger instrumentation we want to allow passing // around values representing function pointers, even though diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/constant_allocation.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/constant_allocation.rs index f9ded224b33..61ca20be2f5 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/constant_allocation.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/constant_allocation.rs @@ -89,8 +89,7 @@ impl ConstantAllocation { } if let Some(terminator_instruction) = block.terminator() { terminator_instruction.for_each_value(|value_id| { - let variables = collect_variables_of_value(value_id, &func.dfg); - for variable in variables { + if let Some(variable) = collect_variables_of_value(value_id, &func.dfg) { record_if_constant(block_id, variable, InstructionLocation::Terminator); } }); @@ -166,7 +165,7 @@ impl ConstantAllocation { } pub(crate) fn is_constant_value(id: ValueId, dfg: &DataFlowGraph) -> bool { - matches!(&dfg[dfg.resolve(id)], Value::NumericConstant { .. } | Value::Array { .. }) + matches!(&dfg[dfg.resolve(id)], Value::NumericConstant { .. }) } /// For a given function, finds all the blocks that are within loops diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs index a18461bc0cd..87165c36dff 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs @@ -45,32 +45,19 @@ fn find_back_edges( } /// Collects the underlying variables inside a value id. It might be more than one, for example in constant arrays that are constructed with multiple vars. -pub(crate) fn collect_variables_of_value(value_id: ValueId, dfg: &DataFlowGraph) -> Vec { +pub(crate) fn collect_variables_of_value( + value_id: ValueId, + dfg: &DataFlowGraph, +) -> Option { let value_id = dfg.resolve(value_id); let value = &dfg[value_id]; match value { - Value::Instruction { .. } | Value::Param { .. } => { - vec![value_id] - } - // Literal arrays are constants, but might use variable values to initialize. - Value::Array { array, .. } => { - let mut value_ids = vec![value_id]; - - array.iter().for_each(|item_id| { - let underlying_ids = collect_variables_of_value(*item_id, dfg); - value_ids.extend(underlying_ids); - }); - - value_ids - } - Value::NumericConstant { .. } => { - vec![value_id] + Value::Instruction { .. } | Value::Param { .. } | Value::NumericConstant { .. } => { + Some(value_id) } // Functions are not variables in a defunctionalized SSA. Only constant function values should appear. - Value::ForeignFunction(_) | Value::Function(_) | Value::Intrinsic(..) => { - vec![] - } + Value::ForeignFunction(_) | Value::Function(_) | Value::Intrinsic(..) => None, } } diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs index 0e5b72c0b85..c13d3aae548 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs @@ -7,6 +7,8 @@ use crate::ErrorType; use super::procedures::ProcedureId; +use super::procedures::ProcedureId; + /// Represents a parameter or a return value of an entry point function. #[derive(Debug, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)] pub(crate) enum BrilligParameter { diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_stack.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_stack.rs index a0e2a500e20..599c05fc0e8 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_stack.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_stack.rs @@ -47,6 +47,7 @@ impl BrilligContext< let destinations_of_temp = movements_map.remove(first_source).unwrap(); movements_map.insert(temp_register, destinations_of_temp); } + // After removing loops we should have an DAG with each node having only one ancestor (but could have multiple successors) // Now we should be able to move the registers just by performing a DFS on the movements map let heads: Vec<_> = movements_map @@ -54,6 +55,7 @@ impl BrilligContext< .filter(|source| !destinations_set.contains(source)) .copied() .collect(); + for head in heads { self.perform_movements(&movements_map, head); } diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/procedures/mod.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/procedures/mod.rs index 0955142e414..0d6d30221d3 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/procedures/mod.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/procedures/mod.rs @@ -17,6 +17,7 @@ use array_copy::compile_array_copy_procedure; use array_reverse::compile_array_reverse_procedure; use check_max_stack_depth::compile_check_max_stack_depth_procedure; use mem_copy::compile_mem_copy_procedure; +use noirc_errors::debug_info::ProcedureDebugId; use prepare_vector_insert::compile_prepare_vector_insert_procedure; use prepare_vector_push::compile_prepare_vector_push_procedure; use revert_with_string::compile_revert_with_string_procedure; @@ -108,6 +109,59 @@ impl std::fmt::Display for ProcedureId { } } +impl ProcedureId { + pub(crate) fn to_debug_id(self) -> ProcedureDebugId { + ProcedureDebugId(match self { + ProcedureId::ArrayCopy => 0, + ProcedureId::ArrayReverse => 1, + ProcedureId::VectorCopy => 2, + ProcedureId::MemCopy => 3, + ProcedureId::PrepareVectorPush(true) => 4, + ProcedureId::PrepareVectorPush(false) => 5, + ProcedureId::VectorPopFront => 6, + ProcedureId::VectorPopBack => 7, + ProcedureId::PrepareVectorInsert => 8, + ProcedureId::VectorRemove => 9, + ProcedureId::CheckMaxStackDepth => 10, + }) + } + + pub fn from_debug_id(debug_id: ProcedureDebugId) -> Self { + let inner = debug_id.0; + match inner { + 0 => ProcedureId::ArrayCopy, + 1 => ProcedureId::ArrayReverse, + 2 => ProcedureId::VectorCopy, + 3 => ProcedureId::MemCopy, + 4 => ProcedureId::PrepareVectorPush(true), + 5 => ProcedureId::PrepareVectorPush(false), + 6 => ProcedureId::VectorPopFront, + 7 => ProcedureId::VectorPopBack, + 8 => ProcedureId::PrepareVectorInsert, + 9 => ProcedureId::VectorRemove, + 10 => ProcedureId::CheckMaxStackDepth, + _ => panic!("Unsupported procedure debug ID of {inner} was supplied"), + } + } +} + +impl std::fmt::Display for ProcedureId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ProcedureId::ArrayCopy => write!(f, "ArrayCopy"), + ProcedureId::ArrayReverse => write!(f, "ArrayReverse"), + ProcedureId::VectorCopy => write!(f, "VectorCopy"), + ProcedureId::MemCopy => write!(f, "MemCopy"), + ProcedureId::PrepareVectorPush(flag) => write!(f, "PrepareVectorPush({flag})"), + ProcedureId::VectorPopFront => write!(f, "VectorPopFront"), + ProcedureId::VectorPopBack => write!(f, "VectorPopBack"), + ProcedureId::PrepareVectorInsert => write!(f, "PrepareVectorInsert"), + ProcedureId::VectorRemove => write!(f, "VectorRemove"), + ProcedureId::CheckMaxStackDepth => write!(f, "CheckMaxStackDepth"), + } + } +} + pub(crate) fn compile_procedure( procedure_id: ProcedureId, ) -> BrilligArtifact { diff --git a/compiler/noirc_evaluator/src/lib.rs b/compiler/noirc_evaluator/src/lib.rs index 5f0c7a5bbb8..427f9e606f6 100644 --- a/compiler/noirc_evaluator/src/lib.rs +++ b/compiler/noirc_evaluator/src/lib.rs @@ -5,11 +5,9 @@ pub mod errors; -// SSA code to create the SSA based IR -// for functions and execute different optimizations. -pub mod ssa; - +mod acir; pub mod brillig; +pub mod ssa; pub use ssa::create_program; pub use ssa::ir::instruction::ErrorType; diff --git a/compiler/noirc_evaluator/src/ssa.rs b/compiler/noirc_evaluator/src/ssa.rs index 5e2c0f0827d..1c1c97524f2 100644 --- a/compiler/noirc_evaluator/src/ssa.rs +++ b/compiler/noirc_evaluator/src/ssa.rs @@ -33,12 +33,8 @@ use noirc_frontend::ast::Visibility; use noirc_frontend::{hir_def::function::FunctionSignature, monomorphization::ast::Program}; use tracing::{span, Level}; -use self::{ - acir_gen::{Artifacts, GeneratedAcir}, - ssa_gen::Ssa, -}; +use crate::acir::{Artifacts, GeneratedAcir}; -mod acir_gen; mod checks; pub(super) mod function_builder; pub mod ir; @@ -96,28 +92,28 @@ pub(crate) fn optimize_into_acir( .run_pass(Ssa::remove_paired_rc, "After Removing Paired rc_inc & rc_decs:") .run_pass(Ssa::separate_runtime, "After Runtime Separation:") .run_pass(Ssa::resolve_is_unconstrained, "After Resolving IsUnconstrained:") - .run_pass(|ssa| ssa.inline_functions(options.inliner_aggressiveness), "After Inlining:") + .run_pass(|ssa| ssa.inline_functions(options.inliner_aggressiveness), "After Inlining (1st):") // Run mem2reg with the CFG separated into blocks - .run_pass(Ssa::mem2reg, "After Mem2Reg:") - .run_pass(Ssa::simplify_cfg, "After Simplifying:") + .run_pass(Ssa::mem2reg, "After Mem2Reg (1st):") + .run_pass(Ssa::simplify_cfg, "After Simplifying (1st):") .run_pass(Ssa::as_slice_optimization, "After `as_slice` optimization") .try_run_pass( Ssa::evaluate_static_assert_and_assert_constant, "After `static_assert` and `assert_constant`:", )? .try_run_pass(Ssa::unroll_loops_iteratively, "After Unrolling:")? - .run_pass(Ssa::simplify_cfg, "After Simplifying:") + .run_pass(Ssa::simplify_cfg, "After Simplifying (2nd):") .run_pass(Ssa::flatten_cfg, "After Flattening:") .run_pass(Ssa::remove_bit_shifts, "After Removing Bit Shifts:") // Run mem2reg once more with the flattened CFG to catch any remaining loads/stores - .run_pass(Ssa::mem2reg, "After Mem2Reg:") + .run_pass(Ssa::mem2reg, "After Mem2Reg (2nd):") // Run the inlining pass again to handle functions with `InlineType::NoPredicates`. // Before flattening is run, we treat functions marked with the `InlineType::NoPredicates` as an entry point. // This pass must come immediately following `mem2reg` as the succeeding passes // may create an SSA which inlining fails to handle. .run_pass( |ssa| ssa.inline_functions_with_no_predicates(options.inliner_aggressiveness), - "After Inlining:", + "After Inlining (2nd):", ) .run_pass(Ssa::remove_if_else, "After Remove IfElse:") .run_pass(Ssa::fold_constants, "After Constant Folding:") diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir.rs b/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir.rs deleted file mode 100644 index 090d5bb0a83..00000000000 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub(crate) mod acir_variable; -pub(crate) mod big_int; -pub(crate) mod generated_acir; diff --git a/compiler/noirc_evaluator/src/ssa/checks/check_for_underconstrained_values.rs b/compiler/noirc_evaluator/src/ssa/checks/check_for_underconstrained_values.rs index 90eb79ccb69..cf884c98be9 100644 --- a/compiler/noirc_evaluator/src/ssa/checks/check_for_underconstrained_values.rs +++ b/compiler/noirc_evaluator/src/ssa/checks/check_for_underconstrained_values.rs @@ -191,7 +191,8 @@ impl Context { | Instruction::Load { .. } | Instruction::Not(..) | Instruction::Store { .. } - | Instruction::Truncate { .. } => { + | Instruction::Truncate { .. } + | Instruction::MakeArray { .. } => { self.value_sets.push(instruction_arguments_and_results); } @@ -247,8 +248,7 @@ impl Context { Value::ForeignFunction(..) => { panic!("Should not be able to reach foreign function from non-brillig functions, {func_id} in function {}", function.name()); } - Value::Array { .. } - | Value::Instruction { .. } + Value::Instruction { .. } | Value::NumericConstant { .. } | Value::Param { .. } => { panic!("At the point we are running disconnect there shouldn't be any other values as arguments") diff --git a/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs b/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs index 5a62e9c8e9a..e4a2eeb8c22 100644 --- a/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs +++ b/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs @@ -1,6 +1,6 @@ use std::{collections::BTreeMap, sync::Arc}; -use crate::ssa::ir::{types::Type, value::ValueId}; +use crate::ssa::ir::{function::RuntimeType, types::Type, value::ValueId}; use acvm::FieldElement; use fxhash::FxHashMap as HashMap; use noirc_frontend::ast; @@ -100,7 +100,8 @@ impl DataBus { ) -> DataBus { let mut call_data_args = Vec::new(); for call_data_item in call_data { - let array_id = call_data_item.databus.expect("Call data should have an array id"); + // databus can be None if `main` is a brillig function + let Some(array_id) = call_data_item.databus else { continue }; let call_data_id = call_data_item.call_data_id.expect("Call data should have a user id"); call_data_args.push(CallData { array_id, call_data_id, index_map: call_data_item.map }); @@ -161,13 +162,11 @@ impl FunctionBuilder { } let len = databus.values.len(); - let array = if len > 0 { - let array = self - .array_constant(databus.values, Type::Array(Arc::new(vec![Type::field()]), len)); - Some(array) - } else { - None - }; + let array = (len > 0 && matches!(self.current_function.runtime(), RuntimeType::Acir(_))) + .then(|| { + let array_type = Type::Array(Arc::new(vec![Type::field()]), len); + self.insert_make_array(databus.values, array_type) + }); DataBusBuilder { index: 0, diff --git a/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs b/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs index 63a9453a430..0479f8da0b7 100644 --- a/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs @@ -137,11 +137,6 @@ impl FunctionBuilder { self.numeric_constant(value.into(), Type::length_type()) } - /// Insert an array constant into the current function with the given element values. - pub(crate) fn array_constant(&mut self, elements: im::Vector, typ: Type) -> ValueId { - self.current_function.dfg.make_array(elements, typ) - } - /// Returns the type of the given value. pub(crate) fn type_of_value(&self, value: ValueId) -> Type { self.current_function.dfg.type_of_value(value) @@ -356,6 +351,17 @@ impl FunctionBuilder { self.insert_instruction(Instruction::EnableSideEffectsIf { condition }, None); } + /// Insert a `make_array` instruction to create a new array or slice. + /// Returns the new array value. Expects `typ` to be an array or slice type. + pub(crate) fn insert_make_array( + &mut self, + elements: im::Vector, + typ: Type, + ) -> ValueId { + assert!(matches!(typ, Type::Array(..) | Type::Slice(_))); + self.insert_instruction(Instruction::MakeArray { elements, typ }, None).first() + } + /// Terminates the current block with the given terminator instruction /// if the current block does not already have a terminator instruction. fn terminate_block_with(&mut self, terminator: TerminatorInstruction) { @@ -511,7 +517,6 @@ mod tests { instruction::{Endian, Intrinsic}, map::Id, types::Type, - value::Value, }; use super::FunctionBuilder; @@ -533,10 +538,7 @@ mod tests { let call_results = builder.insert_call(to_bits_id, vec![input, length], result_types).into_owned(); - let slice = match &builder.current_function.dfg[call_results[0]] { - Value::Array { array, .. } => array, - _ => panic!(), - }; + let slice = builder.current_function.dfg.get_array_constant(call_results[0]).unwrap().0; assert_eq!(slice[0], one); assert_eq!(slice[1], one); assert_eq!(slice[2], one); diff --git a/compiler/noirc_evaluator/src/ssa/ir/dfg.rs b/compiler/noirc_evaluator/src/ssa/ir/dfg.rs index 2be9ffa9afa..e3f3f33682b 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/dfg.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/dfg.rs @@ -266,12 +266,6 @@ impl DataFlowGraph { id } - /// Create a new constant array value from the given elements - pub(crate) fn make_array(&mut self, array: im::Vector, typ: Type) -> ValueId { - assert!(matches!(typ, Type::Array(..) | Type::Slice(_))); - self.make_value(Value::Array { array, typ }) - } - /// Gets or creates a ValueId for the given FunctionId. pub(crate) fn import_function(&mut self, function: FunctionId) -> ValueId { if let Some(existing) = self.functions.get(&function) { @@ -458,8 +452,11 @@ impl DataFlowGraph { /// Otherwise, this returns None. pub(crate) fn get_array_constant(&self, value: ValueId) -> Option<(im::Vector, Type)> { match &self.values[self.resolve(value)] { + Value::Instruction { instruction, .. } => match &self.instructions[*instruction] { + Instruction::MakeArray { elements, typ } => Some((elements.clone(), typ.clone())), + _ => None, + }, // Arrays are shared, so cloning them is cheap - Value::Array { array, typ } => Some((array.clone(), typ.clone())), _ => None, } } @@ -522,8 +519,13 @@ impl DataFlowGraph { /// True if the given ValueId refers to a (recursively) constant value pub(crate) fn is_constant(&self, argument: ValueId) -> bool { match &self[self.resolve(argument)] { - Value::Instruction { .. } | Value::Param { .. } => false, - Value::Array { array, .. } => array.iter().all(|element| self.is_constant(*element)), + Value::Param { .. } => false, + Value::Instruction { instruction, .. } => match &self[*instruction] { + Instruction::MakeArray { elements, .. } => { + elements.iter().all(|element| self.is_constant(*element)) + } + _ => false, + }, _ => true, } } @@ -575,6 +577,7 @@ impl std::ops::IndexMut for DataFlowGraph { // The result of calling DataFlowGraph::insert_instruction can // be a list of results or a single ValueId if the instruction was simplified // to an existing value. +#[derive(Debug)] pub(crate) enum InsertInstructionResult<'dfg> { /// Results is the standard case containing the instruction id and the results of that instruction. Results(InstructionId, &'dfg [ValueId]), diff --git a/compiler/noirc_evaluator/src/ssa/ir/dom.rs b/compiler/noirc_evaluator/src/ssa/ir/dom.rs index 94f7a405c05..c1a7f14e0d1 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/dom.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/dom.rs @@ -86,7 +86,7 @@ impl DominatorTree { /// /// This function panics if either of the blocks are unreachable. /// - /// An instruction is considered to dominate itself. + /// A block is considered to dominate itself. pub(crate) fn dominates(&mut self, block_a_id: BasicBlockId, block_b_id: BasicBlockId) -> bool { if let Some(res) = self.cache.get(&(block_a_id, block_b_id)) { return *res; diff --git a/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs b/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs index 991ff22c902..708b02b9102 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs @@ -18,15 +18,26 @@ pub(crate) struct FunctionInserter<'f> { pub(crate) function: &'f mut Function, values: HashMap, + /// Map containing repeat array constants so that we do not initialize a new /// array unnecessarily. An extra tuple field is included as part of the key to /// distinguish between array/slice types. - const_arrays: HashMap<(im::Vector, Type), ValueId>, + /// + /// This is optional since caching arrays relies on the inserter inserting strictly + /// in control-flow order. Otherwise, if arrays later in the program are cached first, + /// they may be refered to by instructions earlier in the program. + array_cache: Option, + + /// If this pass is loop unrolling, store the block before the loop to optionally + /// hoist any make_array instructions up to after they are retrieved from the `array_cache`. + pre_loop: Option, } +pub(crate) type ArrayCache = HashMap, HashMap>; + impl<'f> FunctionInserter<'f> { pub(crate) fn new(function: &'f mut Function) -> FunctionInserter<'f> { - Self { function, values: HashMap::default(), const_arrays: HashMap::default() } + Self { function, values: HashMap::default(), array_cache: None, pre_loop: None } } /// Resolves a ValueId to its new, updated value. @@ -36,27 +47,7 @@ impl<'f> FunctionInserter<'f> { value = self.function.dfg.resolve(value); match self.values.get(&value) { Some(value) => self.resolve(*value), - None => match &self.function.dfg[value] { - super::value::Value::Array { array, typ } => { - let array = array.clone(); - let typ = typ.clone(); - let new_array: im::Vector = - array.iter().map(|id| self.resolve(*id)).collect(); - - if let Some(fetched_value) = - self.const_arrays.get(&(new_array.clone(), typ.clone())) - { - return *fetched_value; - }; - - let new_array_clone = new_array.clone(); - let new_id = self.function.dfg.make_array(new_array, typ.clone()); - self.values.insert(value, new_id); - self.const_arrays.insert((new_array_clone, typ), new_id); - new_id - } - _ => value, - }, + None => value, } } @@ -122,7 +113,7 @@ impl<'f> FunctionInserter<'f> { &mut self, instruction: Instruction, id: InstructionId, - block: BasicBlockId, + mut block: BasicBlockId, call_stack: CallStack, ) -> InsertInstructionResult { let results = self.function.dfg.instruction_results(id); @@ -132,6 +123,30 @@ impl<'f> FunctionInserter<'f> { .requires_ctrl_typevars() .then(|| vecmap(&results, |result| self.function.dfg.type_of_value(*result))); + // Large arrays can lead to OOM panics if duplicated from being unrolled in loops. + // To prevent this, try to reuse the same ID for identical arrays instead of inserting + // another MakeArray instruction. Note that this assumes the function inserter is inserting + // in control-flow order. Otherwise we could refer to ValueIds defined later in the program. + let make_array = if let Instruction::MakeArray { elements, typ } = &instruction { + if self.array_is_constant(elements) { + if let Some(fetched_value) = self.get_cached_array(elements, typ) { + assert_eq!(results.len(), 1); + self.values.insert(results[0], fetched_value); + return InsertInstructionResult::SimplifiedTo(fetched_value); + } + + // Hoist constant arrays out of the loop and cache their value + if let Some(pre_loop) = self.pre_loop { + block = pre_loop; + } + Some((elements.clone(), typ.clone())) + } else { + None + } + } else { + None + }; + let new_results = self.function.dfg.insert_instruction_and_results( instruction, block, @@ -139,10 +154,54 @@ impl<'f> FunctionInserter<'f> { call_stack, ); + // Cache an array in the fresh_array_cache if array caching is enabled. + // The fresh cache isn't used for deduplication until an external pass confirms we + // pass a sequence point and all blocks that may be before the current insertion point + // are finished. + if let Some((elements, typ)) = make_array { + Self::cache_array(&mut self.array_cache, elements, typ, new_results.first()); + } + Self::insert_new_instruction_results(&mut self.values, &results, &new_results); new_results } + fn get_cached_array(&self, elements: &im::Vector, typ: &Type) -> Option { + self.array_cache.as_ref()?.get(elements)?.get(typ).copied() + } + + fn cache_array( + arrays: &mut Option, + elements: im::Vector, + typ: Type, + result_id: ValueId, + ) { + if let Some(arrays) = arrays { + arrays.entry(elements).or_default().insert(typ, result_id); + } + } + + fn array_is_constant(&self, elements: &im::Vector) -> bool { + elements.iter().all(|element| self.function.dfg.is_constant(*element)) + } + + pub(crate) fn set_array_cache( + &mut self, + new_cache: Option, + pre_loop: BasicBlockId, + ) { + self.array_cache = new_cache; + self.pre_loop = Some(pre_loop); + } + + /// Finish this inserter, returning its array cache merged with the fresh array cache. + /// Since this consumes the inserter this assumes we're at a sequence point where all + /// predecessor blocks to the current block are finished. Since this is true, the fresh + /// array cache can be merged with the existing array cache. + pub(crate) fn into_array_cache(self) -> Option { + self.array_cache + } + /// Modify the values HashMap to remember the mapping between an instruction result's previous /// ValueId (from the source_function) and its new ValueId in the destination function. pub(crate) fn insert_new_instruction_results( diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs index 2b40bccca7b..3ac0b2e3f5e 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs @@ -11,11 +11,12 @@ use fxhash::FxHasher64; use iter_extended::vecmap; use noirc_frontend::hir_def::types::Type as HirType; -use crate::ssa::opt::flatten_cfg::value_merger::ValueMerger; +use crate::ssa::{ir::function::RuntimeType, opt::flatten_cfg::value_merger::ValueMerger}; use super::{ basic_block::BasicBlockId, dfg::{CallStack, DataFlowGraph}, + function::Function, map::Id, types::{NumericType, Type}, value::{Value, ValueId}, @@ -269,15 +270,13 @@ pub(crate) enum Instruction { /// else_value /// } /// ``` + IfElse { then_condition: ValueId, then_value: ValueId, else_value: ValueId }, + + /// Creates a new array or slice. /// - /// Where we save the result of !then_condition so that we have the same - /// ValueId for it each time. - IfElse { - then_condition: ValueId, - then_value: ValueId, - else_condition: ValueId, - else_value: ValueId, - }, + /// `typ` should be an array or slice type with an element type + /// matching each of the `elements` values' types. + MakeArray { elements: im::Vector, typ: Type }, } impl Instruction { @@ -290,7 +289,9 @@ impl Instruction { pub(crate) fn result_type(&self) -> InstructionResultType { match self { Instruction::Binary(binary) => binary.result_type(), - Instruction::Cast(_, typ) => InstructionResultType::Known(typ.clone()), + Instruction::Cast(_, typ) | Instruction::MakeArray { typ, .. } => { + InstructionResultType::Known(typ.clone()) + } Instruction::Not(value) | Instruction::Truncate { value, .. } | Instruction::ArraySet { array: value, .. } @@ -344,6 +345,9 @@ impl Instruction { // We can deduplicate these instructions if we know the predicate is also the same. Constrain(..) | RangeCheck { .. } => deduplicate_with_predicate, + // This should never be side-effectful + MakeArray { .. } => true, + // These can have different behavior depending on the EnableSideEffectsIf context. // Replacing them with a similar instruction potentially enables replacing an instruction // with one that was disabled. See @@ -360,12 +364,12 @@ impl Instruction { } } - pub(crate) fn can_eliminate_if_unused(&self, dfg: &DataFlowGraph) -> bool { + pub(crate) fn can_eliminate_if_unused(&self, function: &Function) -> bool { use Instruction::*; match self { Binary(binary) => { if matches!(binary.operator, BinaryOp::Div | BinaryOp::Mod) { - if let Some(rhs) = dfg.get_numeric_constant(binary.rhs) { + if let Some(rhs) = function.dfg.get_numeric_constant(binary.rhs) { rhs != FieldElement::zero() } else { false @@ -381,17 +385,29 @@ impl Instruction { | Load { .. } | ArrayGet { .. } | IfElse { .. } - | ArraySet { .. } => true, + | ArraySet { .. } + | MakeArray { .. } => true, + + // Store instructions must be removed by DIE in acir code, any load + // instructions should already be unused by that point. + // + // Note that this check assumes that it is being performed after the flattening + // pass and after the last mem2reg pass. This is currently the case for the DIE + // pass where this check is done, but does mean that we cannot perform mem2reg + // after the DIE pass. + Store { .. } => { + matches!(function.runtime(), RuntimeType::Acir(_)) + && function.reachable_blocks().len() == 1 + } Constrain(..) - | Store { .. } | EnableSideEffectsIf { .. } | IncrementRc { .. } | DecrementRc { .. } | RangeCheck { .. } => false, // Some `Intrinsic`s have side effects so we must check what kind of `Call` this is. - Call { func, .. } => match dfg[*func] { + Call { func, .. } => match function.dfg[*func] { // Explicitly allows removal of unused ec operations, even if they can fail Value::Intrinsic(Intrinsic::BlackBox(BlackBoxFunc::MultiScalarMul)) | Value::Intrinsic(Intrinsic::BlackBox(BlackBoxFunc::EmbeddedCurveAdd)) => true, @@ -444,7 +460,8 @@ impl Instruction { | Instruction::Store { .. } | Instruction::IfElse { .. } | Instruction::IncrementRc { .. } - | Instruction::DecrementRc { .. } => false, + | Instruction::DecrementRc { .. } + | Instruction::MakeArray { .. } => false, } } @@ -511,14 +528,15 @@ impl Instruction { assert_message: assert_message.clone(), } } - Instruction::IfElse { then_condition, then_value, else_condition, else_value } => { - Instruction::IfElse { - then_condition: f(*then_condition), - then_value: f(*then_value), - else_condition: f(*else_condition), - else_value: f(*else_value), - } - } + Instruction::IfElse { then_condition, then_value, else_value } => Instruction::IfElse { + then_condition: f(*then_condition), + then_value: f(*then_value), + else_value: f(*else_value), + }, + Instruction::MakeArray { elements, typ } => Instruction::MakeArray { + elements: elements.iter().copied().map(f).collect(), + typ: typ.clone(), + }, } } @@ -573,12 +591,16 @@ impl Instruction { | Instruction::RangeCheck { value, .. } => { f(*value); } - Instruction::IfElse { then_condition, then_value, else_condition, else_value } => { + Instruction::IfElse { then_condition, then_value, else_value } => { f(*then_condition); f(*then_value); - f(*else_condition); f(*else_value); } + Instruction::MakeArray { elements, typ: _ } => { + for element in elements { + f(*element); + } + } } } @@ -634,20 +656,28 @@ impl Instruction { None } } - Instruction::ArraySet { array, index, value, .. } => { - let array_const = dfg.get_array_constant(*array); - let index_const = dfg.get_numeric_constant(*index); - if let (Some((array, element_type)), Some(index)) = (array_const, index_const) { + Instruction::ArraySet { array: array_id, index: index_id, value, .. } => { + let array = dfg.get_array_constant(*array_id); + let index = dfg.get_numeric_constant(*index_id); + if let (Some((array, _element_type)), Some(index)) = (array, index) { let index = index.try_to_u32().expect("Expected array index to fit in u32") as usize; if index < array.len() { - let new_array = dfg.make_array(array.update(index, *value), element_type); - return SimplifiedTo(new_array); + let elements = array.update(index, *value); + let typ = dfg.type_of_value(*array_id); + let instruction = Instruction::MakeArray { elements, typ }; + let new_array = dfg.insert_instruction_and_results( + instruction, + block, + Option::None, + call_stack.clone(), + ); + return SimplifiedTo(new_array.first()); } } - try_optimize_array_set_from_previous_get(dfg, *array, *index, *value) + try_optimize_array_set_from_previous_get(dfg, *array_id, *index_id, *value) } Instruction::Truncate { value, bit_size, max_bit_size } => { if bit_size == max_bit_size { @@ -726,7 +756,7 @@ impl Instruction { None } } - Instruction::IfElse { then_condition, then_value, else_condition, else_value } => { + Instruction::IfElse { then_condition, then_value, else_value } => { let typ = dfg.type_of_value(*then_value); if let Some(constant) = dfg.get_numeric_constant(*then_condition) { @@ -745,13 +775,11 @@ impl Instruction { if matches!(&typ, Type::Numeric(_)) { let then_condition = *then_condition; - let else_condition = *else_condition; let result = ValueMerger::merge_numeric_values( dfg, block, then_condition, - else_condition, then_value, else_value, ); @@ -760,6 +788,7 @@ impl Instruction { None } } + Instruction::MakeArray { .. } => None, } } } @@ -803,13 +832,13 @@ fn try_optimize_array_get_from_previous_set( return SimplifyResult::None; } } + Instruction::MakeArray { elements: array, typ: _ } => { + elements = Some(array.clone()); + break; + } _ => return SimplifyResult::None, } } - Value::Array { array, typ: _ } => { - elements = Some(array.clone()); - break; - } _ => return SimplifyResult::None, } } diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs index 9dbd2c56993..95447f941b0 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs @@ -60,7 +60,7 @@ pub(super) fn simplify_call( } else { unreachable!("ICE: Intrinsic::ToRadix return type must be array") }; - constant_to_radix(endian, field, 2, limb_count, dfg) + constant_to_radix(endian, field, 2, limb_count, dfg, block, call_stack) } else { SimplifyResult::None } @@ -77,7 +77,7 @@ pub(super) fn simplify_call( } else { unreachable!("ICE: Intrinsic::ToRadix return type must be array") }; - constant_to_radix(endian, field, radix, limb_count, dfg) + constant_to_radix(endian, field, radix, limb_count, dfg, block, call_stack) } else { SimplifyResult::None } @@ -109,7 +109,8 @@ pub(super) fn simplify_call( let slice_length_value = array.len() / elements_size; let slice_length = dfg.make_constant(slice_length_value.into(), Type::length_type()); - let new_slice = dfg.make_array(array, Type::Slice(inner_element_types)); + let new_slice = + make_array(dfg, array, Type::Slice(inner_element_types), block, call_stack); SimplifyResult::SimplifiedToMultiple(vec![slice_length, new_slice]) } else { SimplifyResult::None @@ -129,7 +130,7 @@ pub(super) fn simplify_call( let new_slice_length = update_slice_length(arguments[0], dfg, BinaryOp::Add, block); - let new_slice = dfg.make_array(slice, element_type); + let new_slice = make_array(dfg, slice, element_type, block, call_stack); return SimplifyResult::SimplifiedToMultiple(vec![new_slice_length, new_slice]); } @@ -154,7 +155,7 @@ pub(super) fn simplify_call( let new_slice_length = update_slice_length(arguments[0], dfg, BinaryOp::Add, block); - let new_slice = dfg.make_array(slice, element_type); + let new_slice = make_array(dfg, slice, element_type, block, call_stack); SimplifyResult::SimplifiedToMultiple(vec![new_slice_length, new_slice]) } else { SimplifyResult::None @@ -196,7 +197,7 @@ pub(super) fn simplify_call( results.push(new_slice_length); - let new_slice = dfg.make_array(slice, typ); + let new_slice = make_array(dfg, slice, typ, block, call_stack); // The slice is the last item returned for pop_front results.push(new_slice); @@ -227,7 +228,7 @@ pub(super) fn simplify_call( let new_slice_length = update_slice_length(arguments[0], dfg, BinaryOp::Add, block); - let new_slice = dfg.make_array(slice, typ); + let new_slice = make_array(dfg, slice, typ, block, call_stack); SimplifyResult::SimplifiedToMultiple(vec![new_slice_length, new_slice]) } else { SimplifyResult::None @@ -260,7 +261,7 @@ pub(super) fn simplify_call( results.push(slice.remove(index)); } - let new_slice = dfg.make_array(slice, typ); + let new_slice = make_array(dfg, slice, typ, block, call_stack); results.insert(0, new_slice); let new_slice_length = update_slice_length(arguments[0], dfg, BinaryOp::Sub, block); @@ -317,7 +318,9 @@ pub(super) fn simplify_call( SimplifyResult::None } } - Intrinsic::BlackBox(bb_func) => simplify_black_box_func(bb_func, arguments, dfg), + Intrinsic::BlackBox(bb_func) => { + simplify_black_box_func(bb_func, arguments, dfg, block, call_stack) + } Intrinsic::AsField => { let instruction = Instruction::Cast( arguments[0], @@ -350,7 +353,7 @@ pub(super) fn simplify_call( Intrinsic::IsUnconstrained => SimplifyResult::None, Intrinsic::DerivePedersenGenerators => { if let Some(Type::Array(_, len)) = ctrl_typevars.unwrap().first() { - simplify_derive_generators(dfg, arguments, *len as u32) + simplify_derive_generators(dfg, arguments, *len as u32, block, call_stack) } else { unreachable!("Derive Pedersen Generators must return an array"); } @@ -419,7 +422,7 @@ fn simplify_slice_push_back( } let slice_size = slice.len(); let element_size = element_type.element_size(); - let new_slice = dfg.make_array(slice, element_type); + let new_slice = make_array(dfg, slice, element_type, block, &call_stack); let set_last_slice_value_instr = Instruction::ArraySet { array: new_slice, @@ -440,12 +443,8 @@ fn simplify_slice_push_back( let mut value_merger = ValueMerger::new(dfg, block, &mut slice_sizes, unknown, None, call_stack); - let new_slice = value_merger.merge_values( - len_not_equals_capacity, - len_equals_capacity, - set_last_slice_value, - new_slice, - ); + let new_slice = + value_merger.merge_values(len_not_equals_capacity, set_last_slice_value, new_slice); SimplifyResult::SimplifiedToMultiple(vec![new_slice_length, new_slice]) } @@ -505,6 +504,8 @@ fn simplify_black_box_func( bb_func: BlackBoxFunc, arguments: &[ValueId], dfg: &mut DataFlowGraph, + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { cfg_if::cfg_if! { if #[cfg(feature = "bn254")] { @@ -514,8 +515,12 @@ fn simplify_black_box_func( } }; match bb_func { - BlackBoxFunc::Blake2s => simplify_hash(dfg, arguments, acvm::blackbox_solver::blake2s), - BlackBoxFunc::Blake3 => simplify_hash(dfg, arguments, acvm::blackbox_solver::blake3), + BlackBoxFunc::Blake2s => { + simplify_hash(dfg, arguments, acvm::blackbox_solver::blake2s, block, call_stack) + } + BlackBoxFunc::Blake3 => { + simplify_hash(dfg, arguments, acvm::blackbox_solver::blake3, block, call_stack) + } BlackBoxFunc::Keccakf1600 => { if let Some((array_input, _)) = dfg.get_array_constant(arguments[0]) { if array_is_constant(dfg, &array_input) { @@ -533,8 +538,14 @@ fn simplify_black_box_func( const_input.try_into().expect("Keccakf1600 input should have length of 25"), ) .expect("Rust solvable black box function should not fail"); - let state_values = vecmap(state, |x| FieldElement::from(x as u128)); - let result_array = make_constant_array(dfg, state_values, Type::unsigned(64)); + let state_values = state.iter().map(|x| FieldElement::from(*x as u128)); + let result_array = make_constant_array( + dfg, + state_values, + Type::unsigned(64), + block, + call_stack, + ); SimplifyResult::SimplifiedTo(result_array) } else { SimplifyResult::None @@ -544,7 +555,7 @@ fn simplify_black_box_func( } } BlackBoxFunc::Poseidon2Permutation => { - blackbox::simplify_poseidon2_permutation(dfg, solver, arguments) + blackbox::simplify_poseidon2_permutation(dfg, solver, arguments, block, call_stack) } BlackBoxFunc::EcdsaSecp256k1 => blackbox::simplify_signature( dfg, @@ -557,8 +568,12 @@ fn simplify_black_box_func( acvm::blackbox_solver::ecdsa_secp256r1_verify, ), - BlackBoxFunc::MultiScalarMul => SimplifyResult::None, - BlackBoxFunc::EmbeddedCurveAdd => blackbox::simplify_ec_add(dfg, solver, arguments), + BlackBoxFunc::MultiScalarMul => { + blackbox::simplify_msm(dfg, solver, arguments, block, call_stack) + } + BlackBoxFunc::EmbeddedCurveAdd => { + blackbox::simplify_ec_add(dfg, solver, arguments, block, call_stack) + } BlackBoxFunc::SchnorrVerify => blackbox::simplify_schnorr_verify(dfg, solver, arguments), BlackBoxFunc::BigIntAdd @@ -585,23 +600,47 @@ fn simplify_black_box_func( } } -fn make_constant_array(dfg: &mut DataFlowGraph, results: Vec, typ: Type) -> ValueId { - let result_constants = vecmap(results, |element| dfg.make_constant(element, typ.clone())); +fn make_constant_array( + dfg: &mut DataFlowGraph, + results: impl Iterator, + typ: Type, + block: BasicBlockId, + call_stack: &CallStack, +) -> ValueId { + let result_constants: im::Vector<_> = + results.map(|element| dfg.make_constant(element, typ.clone())).collect(); let typ = Type::Array(Arc::new(vec![typ]), result_constants.len()); - dfg.make_array(result_constants.into(), typ) + make_array(dfg, result_constants, typ, block, call_stack) +} + +fn make_array( + dfg: &mut DataFlowGraph, + elements: im::Vector, + typ: Type, + block: BasicBlockId, + call_stack: &CallStack, +) -> ValueId { + let instruction = Instruction::MakeArray { elements, typ }; + let call_stack = call_stack.clone(); + dfg.insert_instruction_and_results(instruction, block, None, call_stack).first() } fn make_constant_slice( dfg: &mut DataFlowGraph, results: Vec, typ: Type, + block: BasicBlockId, + call_stack: &CallStack, ) -> (ValueId, ValueId) { let result_constants = vecmap(results, |element| dfg.make_constant(element, typ.clone())); let typ = Type::Slice(Arc::new(vec![typ])); let length = FieldElement::from(result_constants.len() as u128); - (dfg.make_constant(length, Type::length_type()), dfg.make_array(result_constants.into(), typ)) + let length = dfg.make_constant(length, Type::length_type()); + + let slice = make_array(dfg, result_constants.into(), typ, block, call_stack); + (length, slice) } /// Returns a slice (represented by a tuple (len, slice)) of constants corresponding to the limbs of the radix decomposition. @@ -611,6 +650,8 @@ fn constant_to_radix( radix: u32, limb_count: u32, dfg: &mut DataFlowGraph, + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { let bit_size = u32::BITS - (radix - 1).leading_zeros(); let radix_big = BigUint::from(radix); @@ -631,7 +672,13 @@ fn constant_to_radix( if endian == Endian::Big { limbs.reverse(); } - let result_array = make_constant_array(dfg, limbs, Type::unsigned(bit_size)); + let result_array = make_constant_array( + dfg, + limbs.into_iter(), + Type::unsigned(bit_size), + block, + call_stack, + ); SimplifyResult::SimplifiedTo(result_array) } } @@ -656,6 +703,8 @@ fn simplify_hash( dfg: &mut DataFlowGraph, arguments: &[ValueId], hash_function: fn(&[u8]) -> Result<[u8; 32], BlackBoxResolutionError>, + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { match dfg.get_array_constant(arguments[0]) { Some((input, _)) if array_is_constant(dfg, &input) => { @@ -664,9 +713,10 @@ fn simplify_hash( let hash = hash_function(&input_bytes) .expect("Rust solvable black box function should not fail"); - let hash_values = vecmap(hash, |byte| FieldElement::from_be_bytes_reduce(&[byte])); + let hash_values = hash.iter().map(|byte| FieldElement::from_be_bytes_reduce(&[*byte])); - let result_array = make_constant_array(dfg, hash_values, Type::unsigned(8)); + let result_array = + make_constant_array(dfg, hash_values, Type::unsigned(8), block, call_stack); SimplifyResult::SimplifiedTo(result_array) } _ => SimplifyResult::None, @@ -725,6 +775,8 @@ fn simplify_derive_generators( dfg: &mut DataFlowGraph, arguments: &[ValueId], num_generators: u32, + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { if arguments.len() == 2 { let domain_separator_string = dfg.get_array_constant(arguments[0]); @@ -754,8 +806,8 @@ fn simplify_derive_generators( results.push(is_infinite); } let len = results.len(); - let result = - dfg.make_array(results.into(), Type::Array(vec![Type::field()].into(), len)); + let typ = Type::Array(vec![Type::field()].into(), len); + let result = make_array(dfg, results.into(), typ, block, call_stack); SimplifyResult::SimplifiedTo(result) } else { SimplifyResult::None diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs index 3881646d5e4..4f2a31e2fb0 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs @@ -1,8 +1,13 @@ +use std::sync::Arc; + use acvm::{acir::AcirField, BlackBoxFunctionSolver, BlackBoxResolutionError, FieldElement}; -use iter_extended::vecmap; use crate::ssa::ir::{ - dfg::DataFlowGraph, instruction::SimplifyResult, types::Type, value::ValueId, + basic_block::BasicBlockId, + dfg::{CallStack, DataFlowGraph}, + instruction::{Instruction, SimplifyResult}, + types::Type, + value::ValueId, }; use super::{array_is_constant, make_constant_array, to_u8_vec}; @@ -11,6 +16,8 @@ pub(super) fn simplify_ec_add( dfg: &mut DataFlowGraph, solver: impl BlackBoxFunctionSolver, arguments: &[ValueId], + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { match ( dfg.get_numeric_constant(arguments[0]), @@ -39,13 +46,76 @@ pub(super) fn simplify_ec_add( return SimplifyResult::None; }; - let result_array = make_constant_array( - dfg, - vec![result_x, result_y, result_is_infinity], - Type::field(), - ); + let result_x = dfg.make_constant(result_x, Type::field()); + let result_y = dfg.make_constant(result_y, Type::field()); + let result_is_infinity = dfg.make_constant(result_is_infinity, Type::bool()); - SimplifyResult::SimplifiedTo(result_array) + let typ = Type::Array(Arc::new(vec![Type::field()]), 3); + + let elements = im::vector![result_x, result_y, result_is_infinity]; + let instruction = Instruction::MakeArray { elements, typ }; + let result_array = + dfg.insert_instruction_and_results(instruction, block, None, call_stack.clone()); + + SimplifyResult::SimplifiedTo(result_array.first()) + } + _ => SimplifyResult::None, + } +} + +pub(super) fn simplify_msm( + dfg: &mut DataFlowGraph, + solver: impl BlackBoxFunctionSolver, + arguments: &[ValueId], + block: BasicBlockId, + call_stack: &CallStack, +) -> SimplifyResult { + // TODO: Handle MSMs where a subset of the terms are constant. + match (dfg.get_array_constant(arguments[0]), dfg.get_array_constant(arguments[1])) { + (Some((points, _)), Some((scalars, _))) => { + let Some(points) = points + .into_iter() + .map(|id| dfg.get_numeric_constant(id)) + .collect::>>() + else { + return SimplifyResult::None; + }; + + let Some(scalars) = scalars + .into_iter() + .map(|id| dfg.get_numeric_constant(id)) + .collect::>>() + else { + return SimplifyResult::None; + }; + + let mut scalars_lo = Vec::new(); + let mut scalars_hi = Vec::new(); + for (i, scalar) in scalars.into_iter().enumerate() { + if i % 2 == 0 { + scalars_lo.push(scalar); + } else { + scalars_hi.push(scalar); + } + } + + let Ok((result_x, result_y, result_is_infinity)) = + solver.multi_scalar_mul(&points, &scalars_lo, &scalars_hi) + else { + return SimplifyResult::None; + }; + + let result_x = dfg.make_constant(result_x, Type::field()); + let result_y = dfg.make_constant(result_y, Type::field()); + let result_is_infinity = dfg.make_constant(result_is_infinity, Type::bool()); + + let elements = im::vector![result_x, result_y, result_is_infinity]; + let typ = Type::Array(Arc::new(vec![Type::field()]), 3); + let instruction = Instruction::MakeArray { elements, typ }; + let result_array = + dfg.insert_instruction_and_results(instruction, block, None, call_stack.clone()); + + SimplifyResult::SimplifiedTo(result_array.first()) } _ => SimplifyResult::None, } @@ -55,6 +125,8 @@ pub(super) fn simplify_poseidon2_permutation( dfg: &mut DataFlowGraph, solver: impl BlackBoxFunctionSolver, arguments: &[ValueId], + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { match (dfg.get_array_constant(arguments[0]), dfg.get_numeric_constant(arguments[1])) { (Some((state, _)), Some(state_length)) if array_is_constant(dfg, &state) => { @@ -74,7 +146,9 @@ pub(super) fn simplify_poseidon2_permutation( return SimplifyResult::None; }; - let result_array = make_constant_array(dfg, new_state, Type::field()); + let new_state = new_state.into_iter(); + let typ = Type::field(); + let result_array = make_constant_array(dfg, new_state, typ, block, call_stack); SimplifyResult::SimplifiedTo(result_array) } @@ -119,6 +193,8 @@ pub(super) fn simplify_hash( dfg: &mut DataFlowGraph, arguments: &[ValueId], hash_function: fn(&[u8]) -> Result<[u8; 32], BlackBoxResolutionError>, + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { match dfg.get_array_constant(arguments[0]) { Some((input, _)) if array_is_constant(dfg, &input) => { @@ -127,9 +203,10 @@ pub(super) fn simplify_hash( let hash = hash_function(&input_bytes) .expect("Rust solvable black box function should not fail"); - let hash_values = vecmap(hash, |byte| FieldElement::from_be_bytes_reduce(&[byte])); + let hash_values = hash.iter().map(|byte| FieldElement::from_be_bytes_reduce(&[*byte])); - let result_array = make_constant_array(dfg, hash_values, Type::unsigned(8)); + let u8_type = Type::unsigned(8); + let result_array = make_constant_array(dfg, hash_values, u8_type, block, call_stack); SimplifyResult::SimplifiedTo(result_array) } _ => SimplifyResult::None, diff --git a/compiler/noirc_evaluator/src/ssa/ir.rs b/compiler/noirc_evaluator/src/ssa/ir/mod.rs similarity index 100% rename from compiler/noirc_evaluator/src/ssa/ir.rs rename to compiler/noirc_evaluator/src/ssa/ir/mod.rs diff --git a/compiler/noirc_evaluator/src/ssa/ir/printer.rs b/compiler/noirc_evaluator/src/ssa/ir/printer.rs index 3bbe14f866a..9b12c47b5fb 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/printer.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/printer.rs @@ -69,17 +69,6 @@ fn value(function: &Function, id: ValueId) -> String { } Value::Function(id) => id.to_string(), Value::Intrinsic(intrinsic) => intrinsic.to_string(), - Value::Array { array, typ } => { - let elements = vecmap(array, |element| value(function, *element)); - let element_types = &typ.clone().element_types(); - let element_types_str = - element_types.iter().map(|typ| typ.to_string()).collect::>().join(", "); - if element_types.len() == 1 { - format!("[{}] of {}", elements.join(", "), element_types_str) - } else { - format!("[{}] of ({})", elements.join(", "), element_types_str) - } - } Value::Param { .. } | Value::Instruction { .. } | Value::ForeignFunction(_) => { id.to_string() } @@ -220,15 +209,23 @@ fn display_instruction_inner( Instruction::RangeCheck { value, max_bit_size, .. } => { writeln!(f, "range_check {} to {} bits", show(*value), *max_bit_size,) } - Instruction::IfElse { then_condition, then_value, else_condition, else_value } => { + Instruction::IfElse { then_condition, then_value, else_value } => { let then_condition = show(*then_condition); let then_value = show(*then_value); - let else_condition = show(*else_condition); let else_value = show(*else_value); - writeln!( - f, - "if {then_condition} then {then_value} else if {else_condition} then {else_value}" - ) + writeln!(f, "if {then_condition} then {then_value} else {else_value}") + } + Instruction::MakeArray { elements, typ } => { + write!(f, "make_array [")?; + + for (i, element) in elements.iter().enumerate() { + if i != 0 { + write!(f, ", ")?; + } + write!(f, "{}", show(*element))?; + } + + writeln!(f, "] : {typ}") } } } @@ -244,6 +241,17 @@ fn result_types(function: &Function, results: &[ValueId]) -> String { } } +fn result_types(function: &Function, results: &[ValueId]) -> String { + let types = vecmap(results, |result| function.dfg.type_of_value(*result).to_string()); + if types.is_empty() { + String::new() + } else if types.len() == 1 { + format!(" -> {}", types[0]) + } else { + format!(" -> ({})", types.join(", ")) + } +} + /// Tries to extract a constant string from an error payload. pub(crate) fn try_to_extract_string_from_error_payload( is_string_type: bool, @@ -253,13 +261,9 @@ pub(crate) fn try_to_extract_string_from_error_payload( (is_string_type && (values.len() == 1)) .then_some(()) .and_then(|()| { - let Value::Array { array: values, .. } = &dfg[values[0]] else { - return None; - }; - let fields: Option> = - values.iter().map(|value_id| dfg.get_numeric_constant(*value_id)).collect(); - - fields + let (values, _) = &dfg.get_array_constant(values[0])?; + let values = values.iter().map(|value_id| dfg.get_numeric_constant(*value_id)); + values.collect::>>() }) .map(|fields| { fields diff --git a/compiler/noirc_evaluator/src/ssa/ir/value.rs b/compiler/noirc_evaluator/src/ssa/ir/value.rs index 795d45c75e9..ef494200308 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/value.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/value.rs @@ -36,9 +36,6 @@ pub(crate) enum Value { /// This Value originates from a numeric constant NumericConstant { constant: FieldElement, typ: Type }, - /// Represents a constant array value - Array { array: im::Vector, typ: Type }, - /// This Value refers to a function in the IR. /// Functions always have the type Type::Function. /// If the argument or return types are needed, users should retrieve @@ -63,7 +60,6 @@ impl Value { Value::Instruction { typ, .. } => typ, Value::Param { typ, .. } => typ, Value::NumericConstant { typ, .. } => typ, - Value::Array { typ, .. } => typ, Value::Function { .. } => &Type::Function, Value::Intrinsic { .. } => &Type::Function, Value::ForeignFunction { .. } => &Type::Function, diff --git a/compiler/noirc_evaluator/src/ssa/opt/array_set.rs b/compiler/noirc_evaluator/src/ssa/opt/array_set.rs index 865a1e31eb3..96de22600a4 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/array_set.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/array_set.rs @@ -199,29 +199,31 @@ mod tests { let src = " brillig(inline) fn main f0 { b0(): - v1 = allocate -> &mut [Field; 5] - store [[Field 0, Field 0, Field 0, Field 0, Field 0] of Field, [Field 0, Field 0, Field 0, Field 0, Field 0] of Field] of [Field; 5] at v1 - v6 = allocate -> &mut [Field; 5] - store [[Field 0, Field 0, Field 0, Field 0, Field 0] of Field, [Field 0, Field 0, Field 0, Field 0, Field 0] of Field] of [Field; 5] at v6 + v2 = make_array [Field 0, Field 0, Field 0, Field 0, Field 0] : [Field; 5] + v3 = make_array [v2, v2] : [[Field; 5]; 2] + v4 = allocate -> &mut [Field; 5] + store v3 at v4 + v5 = allocate -> &mut [Field; 5] + store v3 at v5 jmp b1(u32 0) b1(v0: u32): - v12 = lt v0, u32 5 - jmpif v12 then: b3, else: b2 + v8 = lt v0, u32 5 + jmpif v8 then: b3, else: b2 b3(): - v13 = eq v0, u32 5 - jmpif v13 then: b4, else: b5 + v9 = eq v0, u32 5 + jmpif v9 then: b4, else: b5 b4(): - v14 = load v1 -> [[Field; 5]; 2] - store v14 at v6 + v10 = load v4 -> [[Field; 5]; 2] + store v10 at v5 jmp b5() b5(): - v15 = load v1 -> [[Field; 5]; 2] - v16 = array_get v15, index Field 0 -> [Field; 5] - v18 = array_set v16, index v0, value Field 20 - v19 = array_set v15, index v0, value v18 - store v19 at v1 - v21 = add v0, u32 1 - jmp b1(v21) + v11 = load v4 -> [[Field; 5]; 2] + v12 = array_get v11, index Field 0 -> [Field; 5] + v14 = array_set v12, index v0, value Field 20 + v15 = array_set v11, index v0, value v14 + store v15 at v4 + v17 = add v0, u32 1 + jmp b1(v17) b2(): return } diff --git a/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs b/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs index b5ab688cb2c..32f66e5a0f0 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs @@ -19,7 +19,7 @@ //! //! This is the only pass which removes duplicated pure [`Instruction`]s however and so is needed when //! different blocks are merged, i.e. after the [`flatten_cfg`][super::flatten_cfg] pass. -use std::collections::HashSet; +use std::collections::{HashSet, VecDeque}; use acvm::{acir::AcirField, FieldElement}; use iter_extended::vecmap; @@ -28,6 +28,7 @@ use crate::ssa::{ ir::{ basic_block::BasicBlockId, dfg::{DataFlowGraph, InsertInstructionResult}, + dom::DominatorTree, function::Function, instruction::{Instruction, InstructionId}, types::Type, @@ -67,10 +68,10 @@ impl Function { /// The structure of this pass is simple: /// Go through each block and re-insert all instructions. pub(crate) fn constant_fold(&mut self, use_constraint_info: bool) { - let mut context = Context { use_constraint_info, ..Default::default() }; - context.block_queue.push(self.entry_block()); + let mut context = Context::new(self, use_constraint_info); + context.block_queue.push_back(self.entry_block()); - while let Some(block) = context.block_queue.pop() { + while let Some(block) = context.block_queue.pop_front() { if context.visited_blocks.contains(&block) { continue; } @@ -81,34 +82,57 @@ impl Function { } } -#[derive(Default)] struct Context { use_constraint_info: bool, /// Maps pre-folded ValueIds to the new ValueIds obtained by re-inserting the instruction. visited_blocks: HashSet, - block_queue: Vec, + block_queue: VecDeque, + + /// Contains sets of values which are constrained to be equivalent to each other. + /// + /// The mapping's structure is `side_effects_enabled_var => (constrained_value => simplified_value)`. + /// + /// We partition the maps of constrained values according to the side-effects flag at the point + /// at which the values are constrained. This prevents constraints which are only sometimes enforced + /// being used to modify the rest of the program. + constraint_simplification_mappings: HashMap>, + + // Cache of instructions without any side-effects along with their outputs. + cached_instruction_results: InstructionResultCache, + + dom: DominatorTree, } /// HashMap from (Instruction, side_effects_enabled_var) to the results of the instruction. /// Stored as a two-level map to avoid cloning Instructions during the `.get` call. -type InstructionResultCache = HashMap, Vec>>; +/// +/// In addition to each result, the original BasicBlockId is stored as well. This allows us +/// to deduplicate instructions across blocks as long as the new block dominates the original. +type InstructionResultCache = HashMap, ResultCache>>; + +/// Records the results of all duplicate [`Instruction`]s along with the blocks in which they sit. +/// +/// For more information see [`InstructionResultCache`]. +#[derive(Default)] +struct ResultCache { + result: Option<(BasicBlockId, Vec)>, +} impl Context { + fn new(function: &Function, use_constraint_info: bool) -> Self { + Self { + use_constraint_info, + visited_blocks: Default::default(), + block_queue: Default::default(), + constraint_simplification_mappings: Default::default(), + cached_instruction_results: Default::default(), + dom: DominatorTree::with_function(function), + } + } + fn fold_constants_in_block(&mut self, function: &mut Function, block: BasicBlockId) { let instructions = function.dfg[block].take_instructions(); - // Cache of instructions without any side-effects along with their outputs. - let mut cached_instruction_results = HashMap::default(); - - // Contains sets of values which are constrained to be equivalent to each other. - // - // The mapping's structure is `side_effects_enabled_var => (constrained_value => simplified_value)`. - // - // We partition the maps of constrained values according to the side-effects flag at the point - // at which the values are constrained. This prevents constraints which are only sometimes enforced - // being used to modify the rest of the program. - let mut constraint_simplification_mappings: HashMap> = - HashMap::default(); let mut side_effects_enabled_var = function.dfg.make_constant(FieldElement::one(), Type::bool()); @@ -117,8 +141,6 @@ impl Context { &mut function.dfg, block, instruction_id, - &mut cached_instruction_results, - &mut constraint_simplification_mappings, &mut side_effects_enabled_var, ); } @@ -126,25 +148,33 @@ impl Context { } fn fold_constants_into_instruction( - &self, + &mut self, dfg: &mut DataFlowGraph, - block: BasicBlockId, + mut block: BasicBlockId, id: InstructionId, - instruction_result_cache: &mut InstructionResultCache, - constraint_simplification_mappings: &mut HashMap>, side_effects_enabled_var: &mut ValueId, ) { - let constraint_simplification_mapping = - constraint_simplification_mappings.entry(*side_effects_enabled_var).or_default(); + let constraint_simplification_mapping = self.get_constraint_map(*side_effects_enabled_var); let instruction = Self::resolve_instruction(id, dfg, constraint_simplification_mapping); let old_results = dfg.instruction_results(id).to_vec(); // If a copy of this instruction exists earlier in the block, then reuse the previous results. - if let Some(cached_results) = - Self::get_cached(dfg, instruction_result_cache, &instruction, *side_effects_enabled_var) + if let Some(cache_result) = + self.get_cached(dfg, &instruction, *side_effects_enabled_var, block) { - Self::replace_result_ids(dfg, &old_results, cached_results); - return; + match cache_result { + CacheResult::Cached(cached) => { + Self::replace_result_ids(dfg, &old_results, cached); + return; + } + CacheResult::NeedToHoistToCommonBlock(dominator, _cached) => { + // Just change the block to insert in the common dominator instead. + // This will only move the current instance of the instruction right now. + // When constant folding is run a second time later on, it'll catch + // that the previous instance can be deduplicated to this instance. + block = dominator; + } + } } // Otherwise, try inserting the instruction again to apply any optimizations using the newly resolved inputs. @@ -156,9 +186,8 @@ impl Context { instruction.clone(), new_results, dfg, - instruction_result_cache, - constraint_simplification_mapping, *side_effects_enabled_var, + block, ); // If we just inserted an `Instruction::EnableSideEffectsIf`, we need to update `side_effects_enabled_var` @@ -229,13 +258,12 @@ impl Context { } fn cache_instruction( - &self, + &mut self, instruction: Instruction, instruction_results: Vec, dfg: &DataFlowGraph, - instruction_result_cache: &mut InstructionResultCache, - constraint_simplification_mapping: &mut HashMap, side_effects_enabled_var: ValueId, + block: BasicBlockId, ) { if self.use_constraint_info { // If the instruction was a constraint, then create a link between the two `ValueId`s @@ -248,18 +276,18 @@ impl Context { // Prefer replacing with constants where possible. (Value::NumericConstant { .. }, _) => { - constraint_simplification_mapping.insert(rhs, lhs); + self.get_constraint_map(side_effects_enabled_var).insert(rhs, lhs); } (_, Value::NumericConstant { .. }) => { - constraint_simplification_mapping.insert(lhs, rhs); + self.get_constraint_map(side_effects_enabled_var).insert(lhs, rhs); } // Otherwise prefer block parameters over instruction results. // This is as block parameters are more likely to be a single witness rather than a full expression. (Value::Param { .. }, Value::Instruction { .. }) => { - constraint_simplification_mapping.insert(rhs, lhs); + self.get_constraint_map(side_effects_enabled_var).insert(rhs, lhs); } (Value::Instruction { .. }, Value::Param { .. }) => { - constraint_simplification_mapping.insert(lhs, rhs); + self.get_constraint_map(side_effects_enabled_var).insert(lhs, rhs); } (_, _) => (), } @@ -273,13 +301,22 @@ impl Context { self.use_constraint_info && instruction.requires_acir_gen_predicate(dfg); let predicate = use_predicate.then_some(side_effects_enabled_var); - instruction_result_cache + self.cached_instruction_results .entry(instruction) .or_default() - .insert(predicate, instruction_results); + .entry(predicate) + .or_default() + .cache(block, instruction_results); } } + fn get_constraint_map( + &mut self, + side_effects_enabled_var: ValueId, + ) -> &mut HashMap { + self.constraint_simplification_mappings.entry(side_effects_enabled_var).or_default() + } + /// Replaces a set of [`ValueId`]s inside the [`DataFlowGraph`] with another. fn replace_result_ids( dfg: &mut DataFlowGraph, @@ -291,24 +328,52 @@ impl Context { } } - fn get_cached<'a>( + fn get_cached( + &mut self, dfg: &DataFlowGraph, - instruction_result_cache: &'a mut InstructionResultCache, instruction: &Instruction, side_effects_enabled_var: ValueId, - ) -> Option<&'a Vec> { - let results_for_instruction = instruction_result_cache.get(instruction); + block: BasicBlockId, + ) -> Option { + let results_for_instruction = self.cached_instruction_results.get(instruction)?; - // See if there's a cached version with no predicate first - if let Some(results) = results_for_instruction.and_then(|map| map.get(&None)) { - return Some(results); - } + let predicate = self.use_constraint_info && instruction.requires_acir_gen_predicate(dfg); + let predicate = predicate.then_some(side_effects_enabled_var); - let predicate = - instruction.requires_acir_gen_predicate(dfg).then_some(side_effects_enabled_var); + results_for_instruction.get(&predicate)?.get(block, &mut self.dom) + } +} - results_for_instruction.and_then(|map| map.get(&predicate)) +impl ResultCache { + /// Records that an `Instruction` in block `block` produced the result values `results`. + fn cache(&mut self, block: BasicBlockId, results: Vec) { + if self.result.is_none() { + self.result = Some((block, results)); + } } + + /// Returns a set of [`ValueId`]s produced from a copy of this [`Instruction`] which sits + /// within a block which dominates `block`. + /// + /// We require that the cached instruction's block dominates `block` in order to avoid + /// cycles causing issues (e.g. two instructions being replaced with the results of each other + /// such that neither instruction exists anymore.) + fn get(&self, block: BasicBlockId, dom: &mut DominatorTree) -> Option { + self.result.as_ref().map(|(origin_block, results)| { + if dom.dominates(*origin_block, block) { + CacheResult::Cached(results) + } else { + // Insert a copy of this instruction in the common dominator + let dominator = dom.common_dominator(*origin_block, block); + CacheResult::NeedToHoistToCommonBlock(dominator, results) + } + }) + } +} + +enum CacheResult<'a> { + Cached(&'a [ValueId]), + NeedToHoistToCommonBlock(BasicBlockId, &'a [ValueId]), } #[cfg(test)] @@ -440,7 +505,8 @@ mod test { acir(inline) fn main f0 { b0(v0: Field): v2 = add v0, Field 1 - return [v2] of Field + v3 = make_array [v2] : [Field; 1] + return v3 } "; let ssa = Ssa::from_str(src).unwrap(); @@ -546,6 +612,16 @@ mod test { // Regression for #4600 #[test] fn array_get_regression() { + // fn main f0 { + // b0(v0: u1, v1: u64): + // enable_side_effects_if v0 + // v2 = make_array [Field 0, Field 1] + // v3 = array_get v2, index v1 + // v4 = not v0 + // enable_side_effects_if v4 + // v5 = array_get v2, index v1 + // } + // // We want to make sure after constant folding both array_gets remain since they are // under different enable_side_effects_if contexts and thus one may be disabled while // the other is not. If one is removed, it is possible e.g. v4 is replaced with v2 which @@ -554,10 +630,11 @@ mod test { acir(inline) fn main f0 { b0(v0: u1, v1: u64): enable_side_effects v0 - v5 = array_get [Field 0, Field 1] of Field, index v1 -> Field + v4 = make_array [Field 0, Field 1] : [Field; 2] + v5 = array_get v4, index v1 -> Field v6 = not v0 enable_side_effects v6 - v8 = array_get [Field 0, Field 1] of Field, index v1 -> Field + v7 = array_get v4, index v1 -> Field return } "; @@ -618,14 +695,14 @@ mod test { assert_normalized_ssa_equals(ssa, expected); } - // This test currently fails. It being fixed will address the issue https://github.com/noir-lang/noir/issues/5756 #[test] - #[should_panic] fn constant_array_deduplication() { // fn main f0 { // b0(v0: u64): - // v5 = call keccakf1600([v0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0]) - // v6 = call keccakf1600([v0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0]) + // v1 = make_array [v0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0] + // v2 = make_array [v0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0] + // v5 = call keccakf1600(v1) + // v6 = call keccakf1600(v2) // } // // Here we're checking a situation where two identical arrays are being initialized twice and being assigned separate `ValueId`s. @@ -638,19 +715,20 @@ mod test { let zero = builder.numeric_constant(0u128, Type::unsigned(64)); let typ = Type::Array(Arc::new(vec![Type::unsigned(64)]), 25); - let array_contents = vec![ + let array_contents = im::vector![ v0, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, ]; - let array1 = builder.array_constant(array_contents.clone().into(), typ.clone()); - let array2 = builder.array_constant(array_contents.into(), typ.clone()); + let array1 = builder.insert_make_array(array_contents.clone(), typ.clone()); + let array2 = builder.insert_make_array(array_contents, typ.clone()); - assert_eq!(array1, array2, "arrays were assigned different value ids"); + assert_ne!(array1, array2, "arrays were not assigned different value ids"); let keccakf1600 = builder.import_intrinsic("keccakf1600").expect("keccakf1600 intrinsic should exist"); let _v10 = builder.insert_call(keccakf1600, vec![array1], vec![typ.clone()]); let _v11 = builder.insert_call(keccakf1600, vec![array2], vec![typ.clone()]); + builder.terminate_with_return(Vec::new()); let mut ssa = builder.finish(); ssa.normalize_ids(); @@ -660,8 +738,13 @@ mod test { let main = ssa.main(); let instructions = main.dfg[main.entry_block()].instructions(); let starting_instruction_count = instructions.len(); - assert_eq!(starting_instruction_count, 2); + assert_eq!(starting_instruction_count, 4); + // fn main f0 { + // b0(v0: u64): + // v1 = make_array [v0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0] + // v5 = call keccakf1600(v1) + // } let ssa = ssa.fold_constants(); println!("{ssa}"); @@ -669,6 +752,106 @@ mod test { let main = ssa.main(); let instructions = main.dfg[main.entry_block()].instructions(); let ending_instruction_count = instructions.len(); - assert_eq!(ending_instruction_count, 1); + assert_eq!(ending_instruction_count, 2); + } + + #[test] + fn deduplicate_across_blocks() { + // fn main f0 { + // b0(v0: u1): + // v1 = not v0 + // jmp b1() + // b1(): + // v2 = not v0 + // return v2 + // } + let main_id = Id::test_new(0); + + // Compiling main + let mut builder = FunctionBuilder::new("main".into(), main_id); + let b1 = builder.insert_block(); + + let v0 = builder.add_parameter(Type::bool()); + let _v1 = builder.insert_not(v0); + builder.terminate_with_jmp(b1, Vec::new()); + + builder.switch_to_block(b1); + let v2 = builder.insert_not(v0); + builder.terminate_with_return(vec![v2]); + + let ssa = builder.finish(); + let main = ssa.main(); + assert_eq!(main.dfg[main.entry_block()].instructions().len(), 1); + assert_eq!(main.dfg[b1].instructions().len(), 1); + + // Expected output: + // + // fn main f0 { + // b0(v0: u1): + // v1 = not v0 + // jmp b1() + // b1(): + // return v1 + // } + let ssa = ssa.fold_constants_using_constraints(); + let main = ssa.main(); + assert_eq!(main.dfg[main.entry_block()].instructions().len(), 1); + assert_eq!(main.dfg[b1].instructions().len(), 0); + } + + #[test] + fn deduplicate_across_non_dominated_blocks() { + let src = " + brillig(inline) fn main f0 { + b0(v0: u32): + v2 = lt u32 1000, v0 + jmpif v2 then: b1, else: b2 + b1(): + v4 = add v0, u32 1 + v5 = lt v0, v4 + constrain v5 == u1 1 + jmp b2() + b2(): + v7 = lt u32 1000, v0 + jmpif v7 then: b3, else: b4 + b3(): + v8 = add v0, u32 1 + v9 = lt v0, v8 + constrain v9 == u1 1 + jmp b4() + b4(): + return + } + "; + let ssa = Ssa::from_str(src).unwrap(); + + // v4 has been hoisted, although: + // - v5 has not yet been removed since it was encountered earlier in the program + // - v8 hasn't been recognized as a duplicate of v6 yet since they still reference v4 and + // v5 respectively + let expected = " + brillig(inline) fn main f0 { + b0(v0: u32): + v2 = lt u32 1000, v0 + v4 = add v0, u32 1 + jmpif v2 then: b1, else: b2 + b1(): + v5 = add v0, u32 1 + v6 = lt v0, v5 + constrain v6 == u1 1 + jmp b2() + b2(): + jmpif v2 then: b3, else: b4 + b3(): + v8 = lt v0, v4 + constrain v8 == u1 1 + jmp b4() + b4(): + return + } + "; + + let ssa = ssa.fold_constants_using_constraints(); + assert_normalized_ssa_equals(ssa, expected); } } diff --git a/compiler/noirc_evaluator/src/ssa/opt/die.rs b/compiler/noirc_evaluator/src/ssa/opt/die.rs index f28b076a5f9..8d3fa9cc615 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/die.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/die.rs @@ -172,7 +172,7 @@ impl Context { fn is_unused(&self, instruction_id: InstructionId, function: &Function) -> bool { let instruction = &function.dfg[instruction_id]; - if instruction.can_eliminate_if_unused(&function.dfg) { + if instruction.can_eliminate_if_unused(function) { let results = function.dfg.instruction_results(instruction_id); results.iter().all(|result| !self.used_values.contains(result)) } else if let Instruction::Call { func, arguments } = instruction { @@ -192,29 +192,11 @@ impl Context { }); } - /// Inspects a value recursively (as it could be an array) and marks all comprised instruction - /// results as used. + /// Inspects a value and marks all instruction results as used. fn mark_used_instruction_results(&mut self, dfg: &DataFlowGraph, value_id: ValueId) { let value_id = dfg.resolve(value_id); - match &dfg[value_id] { - Value::Instruction { .. } => { - self.used_values.insert(value_id); - } - Value::Array { array, .. } => { - self.used_values.insert(value_id); - for elem in array { - self.mark_used_instruction_results(dfg, *elem); - } - } - Value::Param { .. } => { - self.used_values.insert(value_id); - } - Value::NumericConstant { .. } => { - self.used_values.insert(value_id); - } - _ => { - // Does not comprise of any instruction results - } + if matches!(&dfg[value_id], Value::Instruction { .. } | Value::Param { .. }) { + self.used_values.insert(value_id); } } @@ -740,10 +722,11 @@ mod test { fn keep_inc_rc_on_borrowed_array_store() { // acir(inline) fn main f0 { // b0(): + // v1 = make_array [u32 0, u32 0] // v2 = allocate - // inc_rc [u32 0, u32 0] - // store [u32 0, u32 0] at v2 - // inc_rc [u32 0, u32 0] + // inc_rc v1 + // store v1 at v2 + // inc_rc v1 // jmp b1() // b1(): // v3 = load v2 @@ -756,11 +739,11 @@ mod test { let mut builder = FunctionBuilder::new("main".into(), main_id); let zero = builder.numeric_constant(0u128, Type::unsigned(32)); let array_type = Type::Array(Arc::new(vec![Type::unsigned(32)]), 2); - let array = builder.array_constant(vector![zero, zero], array_type.clone()); + let v1 = builder.insert_make_array(vector![zero, zero], array_type.clone()); let v2 = builder.insert_allocate(array_type.clone()); - builder.increment_array_reference_count(array); - builder.insert_store(v2, array); - builder.increment_array_reference_count(array); + builder.increment_array_reference_count(v1); + builder.insert_store(v2, v1); + builder.increment_array_reference_count(v1); let b1 = builder.insert_block(); builder.terminate_with_jmp(b1, vec![]); @@ -775,14 +758,14 @@ mod test { let main = ssa.main(); // The instruction count never includes the terminator instruction - assert_eq!(main.dfg[main.entry_block()].instructions().len(), 4); + assert_eq!(main.dfg[main.entry_block()].instructions().len(), 5); assert_eq!(main.dfg[b1].instructions().len(), 2); // We expect the output to be unchanged let ssa = ssa.dead_instruction_elimination(); let main = ssa.main(); - assert_eq!(main.dfg[main.entry_block()].instructions().len(), 4); + assert_eq!(main.dfg[main.entry_block()].instructions().len(), 5); assert_eq!(main.dfg[b1].instructions().len(), 2); } diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs index db2d96aac81..61a93aee58d 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs @@ -132,7 +132,6 @@ //! v12 = add v10, v11 //! store v12 at v5 (new store) use fxhash::FxHashMap as HashMap; -use std::collections::{BTreeMap, HashSet}; use acvm::{acir::AcirField, acir::BlackBoxFunc, FieldElement}; use iter_extended::vecmap; @@ -186,18 +185,6 @@ struct Context<'f> { /// Maps start of branch -> end of branch branch_ends: HashMap, - /// Maps an address to the old and new value of the element at that address - /// These only hold stores for one block at a time and is cleared - /// between inlining of branches. - store_values: HashMap, - - /// Stores all allocations local to the current branch. - /// Since these branches are local to the current branch (ie. only defined within one branch of - /// an if expression), they should not be merged with their previous value or stored value in - /// the other branch since there is no such value. The ValueId here is that which is returned - /// by the allocate instruction. - local_allocations: HashSet, - /// A stack of each jmpif condition that was taken to reach a particular point in the program. /// When two branches are merged back into one, this constitutes a join point, and is analogous /// to the rest of the program after an if statement. When such a join point / end block is @@ -216,13 +203,6 @@ struct Context<'f> { arguments_stack: Vec>, } -#[derive(Clone)] -pub(crate) struct Store { - old_value: ValueId, - new_value: ValueId, - call_stack: CallStack, -} - #[derive(Clone)] struct ConditionalBranch { // Contains the last processed block during the processing of the branch. @@ -231,10 +211,6 @@ struct ConditionalBranch { old_condition: ValueId, // The condition of the branch condition: ValueId, - // The store values accumulated when processing the branch - store_values: HashMap, - // The allocations accumulated when processing the branch - local_allocations: HashSet, } struct ConditionalContext { @@ -263,8 +239,6 @@ fn flatten_function_cfg(function: &mut Function, no_predicates: &HashMap Context<'f> { // If this is not a separate variable, clippy gets confused and says the to_vec is // unnecessary, when removing it actually causes an aliasing/mutability error. let instructions = self.inserter.function.dfg[block].instructions().to_vec(); + let mut previous_allocate_result = None; + for instruction in instructions.iter() { if self.is_no_predicate(no_predicates, instruction) { // disable side effect for no_predicate functions @@ -356,10 +332,10 @@ impl<'f> Context<'f> { None, im::Vector::new(), ); - self.push_instruction(*instruction); + self.push_instruction(*instruction, &mut previous_allocate_result); self.insert_current_side_effects_enabled(); } else { - self.push_instruction(*instruction); + self.push_instruction(*instruction, &mut previous_allocate_result); } } } @@ -429,13 +405,9 @@ impl<'f> Context<'f> { let old_condition = *condition; let then_condition = self.inserter.resolve(old_condition); - let old_stores = std::mem::take(&mut self.store_values); - let old_allocations = std::mem::take(&mut self.local_allocations); let branch = ConditionalBranch { old_condition, condition: self.link_condition(then_condition), - store_values: old_stores, - local_allocations: old_allocations, last_block: *then_destination, }; let cond_context = ConditionalContext { @@ -463,21 +435,11 @@ impl<'f> Context<'f> { ); let else_condition = self.link_condition(else_condition); - // Make sure the else branch sees the previous values of each store - // rather than any values created in the 'then' branch. - let old_stores = std::mem::take(&mut cond_context.then_branch.store_values); - cond_context.then_branch.store_values = std::mem::take(&mut self.store_values); - self.undo_stores_in_then_branch(&cond_context.then_branch.store_values); - - let old_allocations = std::mem::take(&mut self.local_allocations); let else_branch = ConditionalBranch { old_condition: cond_context.then_branch.old_condition, condition: else_condition, - store_values: old_stores, - local_allocations: old_allocations, last_block: *block, }; - cond_context.then_branch.local_allocations.clear(); cond_context.else_branch = Some(else_branch); self.condition_stack.push(cond_context); @@ -499,10 +461,7 @@ impl<'f> Context<'f> { } let mut else_branch = cond_context.else_branch.unwrap(); - let stores_in_branch = std::mem::replace(&mut self.store_values, else_branch.store_values); - self.local_allocations = std::mem::take(&mut else_branch.local_allocations); else_branch.last_block = *block; - else_branch.store_values = stores_in_branch; cond_context.else_branch = Some(else_branch); // We must remember to reset whether side effects are enabled when both branches @@ -560,7 +519,6 @@ impl<'f> Context<'f> { let instruction = Instruction::IfElse { then_condition: cond_context.then_branch.condition, then_value: then_arg, - else_condition: cond_context.else_branch.as_ref().unwrap().condition, else_value: else_arg, }; let call_stack = cond_context.call_stack.clone(); @@ -571,8 +529,6 @@ impl<'f> Context<'f> { .first() }); - let call_stack = cond_context.call_stack; - self.merge_stores(cond_context.then_branch, cond_context.else_branch, call_stack); self.arguments_stack.pop(); self.arguments_stack.pop(); self.arguments_stack.push(args); @@ -627,130 +583,45 @@ impl<'f> Context<'f> { self.insert_instruction_with_typevars(enable_side_effects, None, call_stack); } - /// Merge any store instructions found in each branch. - /// - /// This function relies on the 'then' branch being merged before the 'else' branch of a jmpif - /// instruction. If this ordering is changed, the ordering that store values are merged within - /// this function also needs to be changed to reflect that. - fn merge_stores( - &mut self, - then_branch: ConditionalBranch, - else_branch: Option, - call_stack: CallStack, - ) { - // Address -> (then_value, else_value, value_before_the_if) - let mut new_map = BTreeMap::new(); - - for (address, store) in then_branch.store_values { - new_map.insert(address, (store.new_value, store.old_value, store.old_value)); - } - - if else_branch.is_some() { - for (address, store) in else_branch.clone().unwrap().store_values { - if let Some(entry) = new_map.get_mut(&address) { - entry.1 = store.new_value; - } else { - new_map.insert(address, (store.old_value, store.new_value, store.old_value)); - } - } - } - - let then_condition = then_branch.condition; - let else_condition = if let Some(branch) = else_branch { - branch.condition - } else { - self.inserter.function.dfg.make_constant(FieldElement::zero(), Type::bool()) - }; - let block = self.inserter.function.entry_block(); - - // Merging must occur in a separate loop as we cannot borrow `self` as mutable while `value_merger` does - let mut new_values = HashMap::default(); - for (address, (then_case, else_case, _)) in &new_map { - let instruction = Instruction::IfElse { - then_condition, - then_value: *then_case, - else_condition, - else_value: *else_case, - }; - let dfg = &mut self.inserter.function.dfg; - let value = dfg - .insert_instruction_and_results(instruction, block, None, call_stack.clone()) - .first(); - - new_values.insert(address, value); - } - - // Replace stores with new merged values - for (address, (_, _, old_value)) in &new_map { - let value = new_values[address]; - let address = *address; - self.insert_instruction_with_typevars( - Instruction::Store { address, value }, - None, - call_stack.clone(), - ); - - if let Some(store) = self.store_values.get_mut(&address) { - store.new_value = value; - } else { - self.store_values.insert( - address, - Store { - old_value: *old_value, - new_value: value, - call_stack: call_stack.clone(), - }, - ); - } - } - } - - fn remember_store(&mut self, address: ValueId, new_value: ValueId, call_stack: CallStack) { - if !self.local_allocations.contains(&address) { - if let Some(store_value) = self.store_values.get_mut(&address) { - store_value.new_value = new_value; - } else { - let load = Instruction::Load { address }; - - let load_type = Some(vec![self.inserter.function.dfg.type_of_value(new_value)]); - let old_value = self - .insert_instruction_with_typevars(load.clone(), load_type, call_stack.clone()) - .first(); - - self.store_values.insert(address, Store { old_value, new_value, call_stack }); - } - } - } - /// Push the given instruction to the end of the entry block of the current function. /// /// Note that each ValueId of the instruction will be mapped via self.inserter.resolve. /// As a result, the instruction that will be pushed will actually be a new instruction /// with a different InstructionId from the original. The results of the given instruction /// will also be mapped to the results of the new instruction. - fn push_instruction(&mut self, id: InstructionId) -> Vec { + /// + /// `previous_allocate_result` should only be set to the result of an allocate instruction + /// if that instruction was the instruction immediately previous to this one - if there are + /// any instructions in between it should be None. + fn push_instruction( + &mut self, + id: InstructionId, + previous_allocate_result: &mut Option, + ) { let (instruction, call_stack) = self.inserter.map_instruction(id); - let instruction = self.handle_instruction_side_effects(instruction, call_stack.clone()); - let is_allocate = matches!(instruction, Instruction::Allocate); + let instruction = self.handle_instruction_side_effects( + instruction, + call_stack.clone(), + *previous_allocate_result, + ); + let instruction_is_allocate = matches!(&instruction, Instruction::Allocate); let entry = self.inserter.function.entry_block(); let results = self.inserter.push_instruction_value(instruction, id, entry, call_stack); - - // Remember an allocate was created local to this branch so that we do not try to merge store - // values across branches for it later. - if is_allocate { - self.local_allocations.insert(results.first()); - } - - results.results().into_owned() + *previous_allocate_result = instruction_is_allocate.then(|| results.first()); } /// If we are currently in a branch, we need to modify constrain instructions /// to multiply them by the branch's condition (see optimization #1 in the module comment). + /// + /// `previous_allocate_result` should only be set to the result of an allocate instruction + /// if that instruction was the instruction immediately previous to this one - if there are + /// any instructions in between it should be None. fn handle_instruction_side_effects( &mut self, instruction: Instruction, call_stack: CallStack, + previous_allocate_result: Option, ) -> Instruction { if let Some(condition) = self.get_last_condition() { match instruction { @@ -779,8 +650,32 @@ impl<'f> Context<'f> { Instruction::Constrain(lhs, rhs, message) } Instruction::Store { address, value } => { - self.remember_store(address, value, call_stack); - Instruction::Store { address, value } + // If this instruction immediately follows an allocate, and stores to that + // address there is no previous value to load and we don't need a merge anyway. + if Some(address) == previous_allocate_result { + Instruction::Store { address, value } + } else { + // Instead of storing `value`, store `if condition { value } else { previous_value }` + let typ = self.inserter.function.dfg.type_of_value(value); + let load = Instruction::Load { address }; + let previous_value = self + .insert_instruction_with_typevars( + load, + Some(vec![typ]), + call_stack.clone(), + ) + .first(); + + let instruction = Instruction::IfElse { + then_condition: condition, + then_value: value, + + else_value: previous_value, + }; + + let updated_value = self.insert_instruction(instruction, call_stack); + Instruction::Store { address, value: updated_value } + } } Instruction::RangeCheck { value, max_bit_size, assert_message } => { // Replace value with `value * predicate` to zero out value when predicate is inactive. @@ -826,8 +721,8 @@ impl<'f> Context<'f> { } Value::Intrinsic(Intrinsic::BlackBox(BlackBoxFunc::MultiScalarMul)) => { let points_array_idx = if matches!( - self.inserter.function.dfg[arguments[0]], - Value::Array { .. } + self.inserter.function.dfg.type_of_value(arguments[0]), + Type::Array { .. } ) { 0 } else { @@ -835,15 +730,15 @@ impl<'f> Context<'f> { // which means the array is the second argument 1 }; - let (array_with_predicate, array_typ) = self - .apply_predicate_to_msm_argument( - arguments[points_array_idx], - condition, - call_stack.clone(), - ); + let (elements, typ) = self.apply_predicate_to_msm_argument( + arguments[points_array_idx], + condition, + call_stack.clone(), + ); - arguments[points_array_idx] = - self.inserter.function.dfg.make_array(array_with_predicate, array_typ); + let instruction = Instruction::MakeArray { elements, typ }; + let array = self.insert_instruction(instruction, call_stack); + arguments[points_array_idx] = array; Instruction::Call { func, arguments } } _ => Instruction::Call { func, arguments }, @@ -866,7 +761,7 @@ impl<'f> Context<'f> { ) -> (im::Vector, Type) { let array_typ; let mut array_with_predicate = im::Vector::new(); - if let Value::Array { array, typ } = &self.inserter.function.dfg[argument] { + if let Some((array, typ)) = &self.inserter.function.dfg.get_array_constant(argument) { array_typ = typ.clone(); for (i, value) in array.clone().iter().enumerate() { if i % 3 == 2 { @@ -902,22 +797,10 @@ impl<'f> Context<'f> { call_stack, ) } - - fn undo_stores_in_then_branch(&mut self, store_values: &HashMap) { - for (address, store) in store_values { - let address = *address; - let value = store.old_value; - let instruction = Instruction::Store { address, value }; - // Considering the location of undoing a store to be the same as the original store. - self.insert_instruction_with_typevars(instruction, None, store.call_stack.clone()); - } - } } #[cfg(test)] mod test { - use std::sync::Arc; - use acvm::acir::AcirField; use crate::ssa::{ @@ -958,11 +841,9 @@ mod test { v1 = not v0 enable_side_effects u1 1 v3 = cast v0 as Field - v4 = cast v1 as Field - v6 = mul v3, Field 3 - v8 = mul v4, Field 4 - v9 = add v6, v8 - return v9 + v5 = mul v3, Field -1 + v7 = add Field 4, v5 + return v7 } "; @@ -1022,16 +903,13 @@ mod test { b0(v0: u1, v1: &mut Field): enable_side_effects v0 v2 = load v1 -> Field - store Field 5 at v1 - v4 = not v0 - store v2 at v1 + v3 = cast v0 as Field + v5 = sub Field 5, v2 + v6 = mul v3, v5 + v7 = add v2, v6 + store v7 at v1 + v8 = not v0 enable_side_effects u1 1 - v6 = cast v0 as Field - v7 = cast v4 as Field - v8 = mul v6, Field 5 - v9 = mul v7, v2 - v10 = add v8, v9 - store v10 at v1 return } "; @@ -1062,19 +940,20 @@ mod test { b0(v0: u1, v1: &mut Field): enable_side_effects v0 v2 = load v1 -> Field - store Field 5 at v1 - v4 = not v0 - store v2 at v1 - enable_side_effects v4 - v5 = load v1 -> Field - store Field 6 at v1 + v3 = cast v0 as Field + v5 = sub Field 5, v2 + v6 = mul v3, v5 + v7 = add v2, v6 + store v7 at v1 + v8 = not v0 + enable_side_effects v8 + v9 = load v1 -> Field + v10 = cast v8 as Field + v12 = sub Field 6, v9 + v13 = mul v10, v12 + v14 = add v9, v13 + store v14 at v1 enable_side_effects u1 1 - v8 = cast v0 as Field - v9 = cast v4 as Field - v10 = mul v8, Field 5 - v11 = mul v9, Field 6 - v12 = add v10, v11 - store v12 at v1 return } "; @@ -1242,7 +1121,7 @@ mod test { }; let merged_values = get_all_constants_reachable_from_instruction(&main.dfg, ret); - assert_eq!(merged_values, vec![3, 5, 6]); + assert_eq!(merged_values, vec![1, 3, 5, 6]); } #[test] @@ -1259,56 +1138,41 @@ mod test { // }; // } // - // // Translates to the following before the flattening pass: - // fn main f2 { - // b0(v0: u1): - // jmpif v0 then: b1, else: b2 - // b1(): - // v2 = allocate - // store Field 0 at v2 - // v4 = load v2 - // jmp b2() - // b2(): - // return - // } + // Translates to the following before the flattening pass: + let src = " + acir(inline) fn main f0 { + b0(v0: u1): + jmpif v0 then: b1, else: b2 + b1(): + v1 = allocate -> &mut Field + store Field 0 at v1 + v3 = load v1 -> Field + jmp b2() + b2(): + return + }"; // The bug is that the flattening pass previously inserted a load // before the first store to allocate, which loaded an uninitialized value. // In this test we assert the ordering is strictly Allocate then Store then Load. - let main_id = Id::test_new(0); - let mut builder = FunctionBuilder::new("main".into(), main_id); - - let b1 = builder.insert_block(); - let b2 = builder.insert_block(); - - let v0 = builder.add_parameter(Type::bool()); - builder.terminate_with_jmpif(v0, b1, b2); - - builder.switch_to_block(b1); - let v2 = builder.insert_allocate(Type::field()); - let zero = builder.field_constant(0u128); - builder.insert_store(v2, zero); - let _v4 = builder.insert_load(v2, Type::field()); - builder.terminate_with_jmp(b2, vec![]); - - builder.switch_to_block(b2); - builder.terminate_with_return(vec![]); - - let ssa = builder.finish().flatten_cfg(); - let main = ssa.main(); + let ssa = Ssa::from_str(src).unwrap(); + let flattened_ssa = ssa.flatten_cfg(); // Now assert that there is not a load between the allocate and its first store // The Expected IR is: - // - // fn main f2 { - // b0(v0: u1): - // enable_side_effects v0 - // v6 = allocate - // store Field 0 at v6 - // v7 = load v6 - // v8 = not v0 - // enable_side_effects u1 1 - // return - // } + let expected = " + acir(inline) fn main f0 { + b0(v0: u1): + enable_side_effects v0 + v1 = allocate -> &mut Field + store Field 0 at v1 + v3 = load v1 -> Field + v4 = not v0 + enable_side_effects u1 1 + return + } + "; + + let main = flattened_ssa.main(); let instructions = main.dfg[main.entry_block()].instructions(); let find_instruction = |predicate: fn(&Instruction) -> bool| { @@ -1321,6 +1185,8 @@ mod test { assert!(allocate_index < store_index); assert!(store_index < load_index); + + assert_normalized_ssa_equals(flattened_ssa, expected); } /// Work backwards from an instruction to find all the constant values @@ -1393,71 +1259,70 @@ mod test { fn should_not_merge_incorrectly_to_false() { // Regression test for #1792 // Tests that it does not simplify a true constraint an always-false constraint - // acir(inline) fn main f1 { - // b0(v0: [u8; 2]): - // v5 = array_get v0, index u8 0 - // v6 = cast v5 as u32 - // v8 = truncate v6 to 1 bits, max_bit_size: 32 - // v9 = cast v8 as u1 - // v10 = allocate - // store u8 0 at v10 - // jmpif v9 then: b2, else: b3 - // b2(): - // v12 = cast v5 as Field - // v13 = add v12, Field 1 - // store v13 at v10 - // jmp b4() - // b4(): - // constrain v9 == u1 1 - // return - // b3(): - // store u8 0 at v10 - // jmp b4() - // } - let main_id = Id::test_new(1); - let mut builder = FunctionBuilder::new("main".into(), main_id); - - builder.insert_block(); // b0 - let b1 = builder.insert_block(); - let b2 = builder.insert_block(); - let b3 = builder.insert_block(); - - let element_type = Arc::new(vec![Type::unsigned(8)]); - let array_type = Type::Array(element_type.clone(), 2); - let array = builder.add_parameter(array_type); - - let zero = builder.numeric_constant(0_u128, Type::unsigned(8)); - - let v5 = builder.insert_array_get(array, zero, Type::unsigned(8)); - let v6 = builder.insert_cast(v5, Type::unsigned(32)); - let i_two = builder.numeric_constant(2_u128, Type::unsigned(32)); - let v8 = builder.insert_binary(v6, BinaryOp::Mod, i_two); - let v9 = builder.insert_cast(v8, Type::bool()); - - let v10 = builder.insert_allocate(Type::field()); - builder.insert_store(v10, zero); - - builder.terminate_with_jmpif(v9, b1, b2); - - builder.switch_to_block(b1); - let one = builder.field_constant(1_u128); - let v5b = builder.insert_cast(v5, Type::field()); - let v13: Id = builder.insert_binary(v5b, BinaryOp::Add, one); - let v14 = builder.insert_cast(v13, Type::unsigned(8)); - builder.insert_store(v10, v14); - builder.terminate_with_jmp(b3, vec![]); + let src = " + acir(inline) fn main f0 { + b0(v0: [u8; 2]): + v2 = array_get v0, index u8 0 -> u8 + v3 = cast v2 as u32 + v4 = truncate v3 to 1 bits, max_bit_size: 32 + v5 = cast v4 as u1 + v6 = allocate -> &mut Field + store u8 0 at v6 + jmpif v5 then: b2, else: b1 + b2(): + v7 = cast v2 as Field + v9 = add v7, Field 1 + v10 = cast v9 as u8 + store v10 at v6 + jmp b3() + b3(): + constrain v5 == u1 1 + return + b1(): + store u8 0 at v6 + jmp b3() + } + "; - builder.switch_to_block(b2); - builder.insert_store(v10, zero); - builder.terminate_with_jmp(b3, vec![]); + let ssa = Ssa::from_str(src).unwrap(); - builder.switch_to_block(b3); - let v_true = builder.numeric_constant(true, Type::bool()); - let v12 = builder.insert_binary(v9, BinaryOp::Eq, v_true); - builder.insert_constrain(v12, v_true, None); - builder.terminate_with_return(vec![]); + let expected = " + acir(inline) fn main f0 { + b0(v0: [u8; 2]): + v2 = array_get v0, index u8 0 -> u8 + v3 = cast v2 as u32 + v4 = truncate v3 to 1 bits, max_bit_size: 32 + v5 = cast v4 as u1 + v6 = allocate -> &mut Field + store u8 0 at v6 + enable_side_effects v5 + v7 = cast v2 as Field + v9 = add v7, Field 1 + v10 = cast v9 as u8 + v11 = load v6 -> u8 + v12 = cast v4 as Field + v13 = cast v11 as Field + v14 = sub v9, v13 + v15 = mul v12, v14 + v16 = add v13, v15 + v17 = cast v16 as u8 + store v17 at v6 + v18 = not v5 + enable_side_effects v18 + v19 = load v6 -> u8 + v20 = cast v18 as Field + v21 = cast v19 as Field + v23 = sub Field 0, v21 + v24 = mul v20, v23 + v25 = add v21, v24 + v26 = cast v25 as u8 + store v26 at v6 + enable_side_effects u1 1 + constrain v5 == u1 1 + return + } + "; - let ssa = builder.finish(); let flattened_ssa = ssa.flatten_cfg(); let main = flattened_ssa.main(); @@ -1474,6 +1339,8 @@ mod test { } } assert_eq!(constrain_count, 1); + + assert_normalized_ssa_equals(flattened_ssa, expected); } #[test] diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs index ef208588718..ddc8b0bfe6b 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs @@ -26,25 +26,23 @@ impl<'a> SliceCapacityTracker<'a> { ) { match instruction { Instruction::ArrayGet { array, .. } => { - let array_typ = self.dfg.type_of_value(*array); - let array_value = &self.dfg[*array]; - if matches!(array_value, Value::Array { .. }) && array_typ.contains_slice_element() - { - // Initial insertion into the slice sizes map - // Any other insertions should only occur if the value is already - // a part of the map. - self.compute_slice_capacity(*array, slice_sizes); + if let Some((_, array_type)) = self.dfg.get_array_constant(*array) { + if array_type.contains_slice_element() { + // Initial insertion into the slice sizes map + // Any other insertions should only occur if the value is already + // a part of the map. + self.compute_slice_capacity(*array, slice_sizes); + } } } Instruction::ArraySet { array, value, .. } => { - let array_typ = self.dfg.type_of_value(*array); - let array_value = &self.dfg[*array]; - if matches!(array_value, Value::Array { .. }) && array_typ.contains_slice_element() - { - // Initial insertion into the slice sizes map - // Any other insertions should only occur if the value is already - // a part of the map. - self.compute_slice_capacity(*array, slice_sizes); + if let Some((_, array_type)) = self.dfg.get_array_constant(*array) { + if array_type.contains_slice_element() { + // Initial insertion into the slice sizes map + // Any other insertions should only occur if the value is already + // a part of the map. + self.compute_slice_capacity(*array, slice_sizes); + } } let value_typ = self.dfg.type_of_value(*value); @@ -161,7 +159,7 @@ impl<'a> SliceCapacityTracker<'a> { array_id: ValueId, slice_sizes: &mut HashMap, ) { - if let Value::Array { array, typ } = &self.dfg[array_id] { + if let Some((array, typ)) = self.dfg.get_array_constant(array_id) { // Compiler sanity check assert!(!typ.is_nested_slice(), "ICE: Nested slices are not allowed and should not have reached the flattening pass of SSA"); if let Type::Slice(_) = typ { diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs index 75ee57dd4fa..8ea26d4e96d 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs @@ -45,7 +45,7 @@ impl<'a> ValueMerger<'a> { /// Merge two values a and b from separate basic blocks to a single value. /// If these two values are numeric, the result will be - /// `then_condition * then_value + else_condition * else_value`. + /// `then_condition * (then_value - else_value) + else_value`. /// Otherwise, if the values being merged are arrays, a new array will be made /// recursively from combining each element of both input arrays. /// @@ -54,7 +54,6 @@ impl<'a> ValueMerger<'a> { pub(crate) fn merge_values( &mut self, then_condition: ValueId, - else_condition: ValueId, then_value: ValueId, else_value: ValueId, ) -> ValueId { @@ -70,15 +69,14 @@ impl<'a> ValueMerger<'a> { self.dfg, self.block, then_condition, - else_condition, then_value, else_value, ), typ @ Type::Array(_, _) => { - self.merge_array_values(typ, then_condition, else_condition, then_value, else_value) + self.merge_array_values(typ, then_condition, then_value, else_value) } typ @ Type::Slice(_) => { - self.merge_slice_values(typ, then_condition, else_condition, then_value, else_value) + self.merge_slice_values(typ, then_condition, then_value, else_value) } Type::Reference(_) => panic!("Cannot return references from an if expression"), Type::Function => panic!("Cannot return functions from an if expression"), @@ -86,12 +84,11 @@ impl<'a> ValueMerger<'a> { } /// Merge two numeric values a and b from separate basic blocks to a single value. This - /// function would return the result of `if c { a } else { b }` as `c*a + (!c)*b`. + /// function would return the result of `if c { a } else { b }` as `c * (a-b) + b`. pub(crate) fn merge_numeric_values( dfg: &mut DataFlowGraph, block: BasicBlockId, then_condition: ValueId, - else_condition: ValueId, then_value: ValueId, else_value: ValueId, ) -> ValueId { @@ -114,31 +111,38 @@ impl<'a> ValueMerger<'a> { // We must cast the bool conditions to the actual numeric type used by each value. let then_condition = dfg .insert_instruction_and_results( - Instruction::Cast(then_condition, then_type), - block, - None, - call_stack.clone(), - ) - .first(); - let else_condition = dfg - .insert_instruction_and_results( - Instruction::Cast(else_condition, else_type), + Instruction::Cast(then_condition, Type::field()), block, None, call_stack.clone(), ) .first(); - let mul = Instruction::binary(BinaryOp::Mul, then_condition, then_value); - let then_value = - dfg.insert_instruction_and_results(mul, block, None, call_stack.clone()).first(); + let then_field = Instruction::Cast(then_value, Type::field()); + let then_field_value = + dfg.insert_instruction_and_results(then_field, block, None, call_stack.clone()).first(); + + let else_field = Instruction::Cast(else_value, Type::field()); + let else_field_value = + dfg.insert_instruction_and_results(else_field, block, None, call_stack.clone()).first(); - let mul = Instruction::binary(BinaryOp::Mul, else_condition, else_value); - let else_value = - dfg.insert_instruction_and_results(mul, block, None, call_stack.clone()).first(); + let diff = Instruction::binary(BinaryOp::Sub, then_field_value, else_field_value); + let diff_value = + dfg.insert_instruction_and_results(diff, block, None, call_stack.clone()).first(); - let add = Instruction::binary(BinaryOp::Add, then_value, else_value); - dfg.insert_instruction_and_results(add, block, None, call_stack).first() + let conditional_diff = Instruction::binary(BinaryOp::Mul, then_condition, diff_value); + let conditional_diff_value = dfg + .insert_instruction_and_results(conditional_diff, block, None, call_stack.clone()) + .first(); + + let merged_field = + Instruction::binary(BinaryOp::Add, else_field_value, conditional_diff_value); + let merged_field_value = dfg + .insert_instruction_and_results(merged_field, block, None, call_stack.clone()) + .first(); + + let merged = Instruction::Cast(merged_field_value, then_type); + dfg.insert_instruction_and_results(merged, block, None, call_stack).first() } /// Given an if expression that returns an array: `if c { array1 } else { array2 }`, @@ -148,7 +152,6 @@ impl<'a> ValueMerger<'a> { &mut self, typ: Type, then_condition: ValueId, - else_condition: ValueId, then_value: ValueId, else_value: ValueId, ) -> ValueId { @@ -163,7 +166,6 @@ impl<'a> ValueMerger<'a> { if let Some(result) = self.try_merge_only_changed_indices( then_condition, - else_condition, then_value, else_value, actual_length, @@ -193,23 +195,19 @@ impl<'a> ValueMerger<'a> { let then_element = get_element(then_value, typevars.clone()); let else_element = get_element(else_value, typevars); - merged.push_back(self.merge_values( - then_condition, - else_condition, - then_element, - else_element, - )); + merged.push_back(self.merge_values(then_condition, then_element, else_element)); } } - self.dfg.make_array(merged, typ) + let instruction = Instruction::MakeArray { elements: merged, typ }; + let call_stack = self.call_stack.clone(); + self.dfg.insert_instruction_and_results(instruction, self.block, None, call_stack).first() } fn merge_slice_values( &mut self, typ: Type, then_condition: ValueId, - else_condition: ValueId, then_value_id: ValueId, else_value_id: ValueId, ) -> ValueId { @@ -267,16 +265,13 @@ impl<'a> ValueMerger<'a> { let else_element = get_element(else_value_id, typevars, else_len * element_types.len()); - merged.push_back(self.merge_values( - then_condition, - else_condition, - then_element, - else_element, - )); + merged.push_back(self.merge_values(then_condition, then_element, else_element)); } } - self.dfg.make_array(merged, typ) + let instruction = Instruction::MakeArray { elements: merged, typ }; + let call_stack = self.call_stack.clone(); + self.dfg.insert_instruction_and_results(instruction, self.block, None, call_stack).first() } /// Construct a dummy value to be attached to the smaller of two slices being merged. @@ -296,7 +291,11 @@ impl<'a> ValueMerger<'a> { array.push_back(self.make_slice_dummy_data(typ)); } } - self.dfg.make_array(array, typ.clone()) + let instruction = Instruction::MakeArray { elements: array, typ: typ.clone() }; + let call_stack = self.call_stack.clone(); + self.dfg + .insert_instruction_and_results(instruction, self.block, None, call_stack) + .first() } Type::Slice(_) => { // TODO(#3188): Need to update flattening to use true user facing length of slices @@ -315,7 +314,6 @@ impl<'a> ValueMerger<'a> { fn try_merge_only_changed_indices( &mut self, then_condition: ValueId, - else_condition: ValueId, then_value: ValueId, else_value: ValueId, array_length: usize, @@ -399,8 +397,7 @@ impl<'a> ValueMerger<'a> { let then_element = get_element(then_value, typevars.clone()); let else_element = get_element(else_value, typevars); - let value = - self.merge_values(then_condition, else_condition, then_element, else_element); + let value = self.merge_values(then_condition, then_element, else_element); array = self.insert_array_set(array, index, value, Some(condition)).first(); } diff --git a/compiler/noirc_evaluator/src/ssa/opt/inlining.rs b/compiler/noirc_evaluator/src/ssa/opt/inlining.rs index 2eb0f2eda0f..f91487fd73e 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/inlining.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/inlining.rs @@ -476,10 +476,6 @@ impl<'function> PerFunctionContext<'function> { Value::ForeignFunction(function) => { self.context.builder.import_foreign_function(function) } - Value::Array { array, typ } => { - let elements = array.iter().map(|value| self.translate_value(*value)).collect(); - self.context.builder.array_constant(elements, typ.clone()) - } }; self.values.insert(id, new_value); diff --git a/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs b/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs index a052abc5e16..77133d7d88d 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs @@ -171,9 +171,7 @@ impl<'f> PerFunctionContext<'f> { let block_params = self.inserter.function.dfg.block_parameters(*block_id); per_func_block_params.extend(block_params.iter()); let terminator = self.inserter.function.dfg[*block_id].unwrap_terminator(); - terminator.for_each_value(|value| { - self.recursively_add_values(value, &mut all_terminator_values); - }); + terminator.for_each_value(|value| all_terminator_values.insert(value)); } // If we never load from an address within a function we can remove all stores to that address. @@ -268,15 +266,6 @@ impl<'f> PerFunctionContext<'f> { .collect() } - fn recursively_add_values(&self, value: ValueId, set: &mut HashSet) { - set.insert(value); - if let Some((elements, _)) = self.inserter.function.dfg.get_array_constant(value) { - for array_element in elements { - self.recursively_add_values(array_element, set); - } - } - } - /// The value of each reference at the start of the given block is the unification /// of the value of the same reference at the end of its predecessor blocks. fn find_starting_references(&mut self, block: BasicBlockId) -> Block { @@ -426,13 +415,13 @@ impl<'f> PerFunctionContext<'f> { let address = self.inserter.function.dfg.resolve(*address); let value = self.inserter.function.dfg.resolve(*value); - self.check_array_aliasing(references, value); - + // FIXME: This causes errors in the sha256 tests + // // If there was another store to this instruction without any (unremoved) loads or // function calls in-between, we can remove the previous store. - if let Some(last_store) = references.last_stores.get(&address) { - self.instructions_to_remove.insert(*last_store); - } + // if let Some(last_store) = references.last_stores.get(&address) { + // self.instructions_to_remove.insert(*last_store); + // } if self.inserter.function.dfg.value_is_reference(value) { if let Some(expression) = references.expressions.get(&value) { @@ -512,24 +501,22 @@ impl<'f> PerFunctionContext<'f> { } self.mark_all_unknown(arguments, references); } - _ => (), - } - } - - /// If `array` is an array constant that contains reference types, then insert each element - /// as a potential alias to the array itself. - fn check_array_aliasing(&self, references: &mut Block, array: ValueId) { - if let Some((elements, typ)) = self.inserter.function.dfg.get_array_constant(array) { - if Self::contains_references(&typ) { - // TODO: Check if type directly holds references or holds arrays that hold references - let expr = Expression::ArrayElement(Box::new(Expression::Other(array))); - references.expressions.insert(array, expr.clone()); - let aliases = references.aliases.entry(expr).or_default(); - - for element in elements { - aliases.insert(element); + Instruction::MakeArray { elements, typ } => { + // If `array` is an array constant that contains reference types, then insert each element + // as a potential alias to the array itself. + if Self::contains_references(typ) { + let array = self.inserter.function.dfg.instruction_results(instruction)[0]; + + let expr = Expression::ArrayElement(Box::new(Expression::Other(array))); + references.expressions.insert(array, expr.clone()); + let aliases = references.aliases.entry(expr).or_default(); + + for element in elements { + aliases.insert(*element); + } } } + _ => (), } } @@ -634,10 +621,11 @@ mod tests { // fn func() { // b0(): // v0 = allocate - // store [Field 1, Field 2] in v0 - // v1 = load v0 - // v2 = array_get v1, index 1 - // return v2 + // v1 = make_array [Field 1, Field 2] + // store v1 in v0 + // v2 = load v0 + // v3 = array_get v2, index 1 + // return v3 // } let func_id = Id::test_new(0); @@ -648,12 +636,12 @@ mod tests { let element_type = Arc::new(vec![Type::field()]); let array_type = Type::Array(element_type, 2); - let array = builder.array_constant(vector![one, two], array_type.clone()); + let v1 = builder.insert_make_array(vector![one, two], array_type.clone()); - builder.insert_store(v0, array); - let v1 = builder.insert_load(v0, array_type); - let v2 = builder.insert_array_get(v1, one, Type::field()); - builder.terminate_with_return(vec![v2]); + builder.insert_store(v0, v1); + let v2 = builder.insert_load(v0, array_type); + let v3 = builder.insert_array_get(v2, one, Type::field()); + builder.terminate_with_return(vec![v3]); let ssa = builder.finish().mem2reg().fold_constants(); @@ -908,16 +896,19 @@ mod tests { // We would need to track whether the store where `v9` is the store value gets removed to know whether // to remove it. assert_eq!(count_stores(main.entry_block(), &main.dfg), 1); + // The first store in b1 is removed since there is another store to the same reference // in the same block, and the store is not needed before the later store. // The rest of the stores are also removed as no loads are done within any blocks // to the stored values. - assert_eq!(count_stores(b1, &main.dfg), 0); + // + // NOTE: This store is not removed due to the FIXME when handling Instruction::Store. + assert_eq!(count_stores(b1, &main.dfg), 1); let b1_instructions = main.dfg[b1].instructions(); - // We expect the last eq to be optimized out - assert_eq!(b1_instructions.len(), 0); + // We expect the last eq to be optimized out, only the store from above remains + assert_eq!(b1_instructions.len(), 1); } #[test] diff --git a/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs b/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs index 6914bf87c5d..a5b60fb5fcd 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs @@ -179,19 +179,6 @@ impl IdMaps { Value::NumericConstant { constant, typ } => { new_function.dfg.make_constant(*constant, typ.clone()) } - Value::Array { array, typ } => { - if let Some(value) = self.values.get(&old_value) { - return *value; - } - - let array = array - .iter() - .map(|value| self.map_value(new_function, old_function, *value)) - .collect(); - let new_value = new_function.dfg.make_array(array, typ.clone()); - self.values.insert(old_value, new_value); - new_value - } Value::Intrinsic(intrinsic) => new_function.dfg.import_intrinsic(*intrinsic), Value::ForeignFunction(name) => new_function.dfg.import_foreign_function(name), } diff --git a/compiler/noirc_evaluator/src/ssa/opt/rc.rs b/compiler/noirc_evaluator/src/ssa/opt/rc.rs index c3606ac4311..ffe4ada39b7 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/rc.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/rc.rs @@ -197,7 +197,8 @@ mod test { // inc_rc v0 // inc_rc v0 // dec_rc v0 - // return [v0] + // v1 = make_array [v0] + // return v1 // } let main_id = Id::test_new(0); let mut builder = FunctionBuilder::new("foo".into(), main_id); @@ -211,8 +212,8 @@ mod test { builder.insert_dec_rc(v0); let outer_array_type = Type::Array(Arc::new(vec![inner_array_type]), 1); - let array = builder.array_constant(vec![v0].into(), outer_array_type); - builder.terminate_with_return(vec![array]); + let v1 = builder.insert_make_array(vec![v0].into(), outer_array_type); + builder.terminate_with_return(vec![v1]); let ssa = builder.finish().remove_paired_rc(); let main = ssa.main(); diff --git a/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs b/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs index 012f6e6b27d..0517f9ef89f 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs @@ -145,7 +145,8 @@ impl Context { | RangeCheck { .. } | IfElse { .. } | IncrementRc { .. } - | DecrementRc { .. } => false, + | DecrementRc { .. } + | MakeArray { .. } => false, EnableSideEffectsIf { .. } | ArrayGet { .. } diff --git a/compiler/noirc_evaluator/src/ssa/opt/remove_if_else.rs b/compiler/noirc_evaluator/src/ssa/opt/remove_if_else.rs index c387e0b6234..8076bc3cc99 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/remove_if_else.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/remove_if_else.rs @@ -66,10 +66,9 @@ impl Context { for instruction in instructions { match &function.dfg[instruction] { - Instruction::IfElse { then_condition, then_value, else_condition, else_value } => { + Instruction::IfElse { then_condition, then_value, else_value } => { let then_condition = *then_condition; let then_value = *then_value; - let else_condition = *else_condition; let else_value = *else_value; let typ = function.dfg.type_of_value(then_value); @@ -85,12 +84,7 @@ impl Context { call_stack, ); - let value = value_merger.merge_values( - then_condition, - else_condition, - then_value, - else_value, - ); + let value = value_merger.merge_values(then_condition, then_value, else_value); let _typ = function.dfg.type_of_value(value); let results = function.dfg.instruction_results(instruction); diff --git a/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs b/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs index 267dc6a3c20..3ff0e630a69 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs @@ -27,7 +27,7 @@ use crate::{ dfg::{CallStack, DataFlowGraph}, dom::DominatorTree, function::{Function, RuntimeType}, - function_inserter::FunctionInserter, + function_inserter::{ArrayCache, FunctionInserter}, instruction::{Instruction, TerminatorInstruction}, post_order::PostOrder, value::ValueId, @@ -213,11 +213,21 @@ fn unroll_loop( ) -> Result<(), CallStack> { let mut unroll_into = get_pre_header(cfg, loop_); let mut jump_value = get_induction_variable(function, unroll_into)?; + let mut array_cache = Some(ArrayCache::default()); - while let Some(context) = unroll_loop_header(function, loop_, unroll_into, jump_value)? { - let (last_block, last_value) = context.unroll_loop_iteration(); - unroll_into = last_block; - jump_value = last_value; + while let Some(mut context) = unroll_loop_header(function, loop_, unroll_into, jump_value)? { + // The inserter's array cache must be explicitly enabled. This is to + // confirm that we're inserting in insertion order. This is true here since: + // 1. We have a fresh inserter for each loop + // 2. Each loop is unrolled in iteration order + // + // Within a loop we do not insert in insertion order. This is fine however since the + // array cache is buffered with a separate fresh_array_cache which collects arrays + // but does not deduplicate. When we later call `into_array_cache`, that will merge + // the fresh cache in with the old one so that each iteration of the loop can cache + // from previous iterations but not the current iteration. + context.inserter.set_array_cache(array_cache, unroll_into); + (unroll_into, jump_value, array_cache) = context.unroll_loop_iteration(); } Ok(()) @@ -357,7 +367,7 @@ impl<'f> LoopIteration<'f> { /// It is expected the terminator instructions are set up to branch into an empty block /// for further unrolling. When the loop is finished this will need to be mutated to /// jump to the end of the loop instead. - fn unroll_loop_iteration(mut self) -> (BasicBlockId, ValueId) { + fn unroll_loop_iteration(mut self) -> (BasicBlockId, ValueId, Option) { let mut next_blocks = self.unroll_loop_block(); while let Some(block) = next_blocks.pop() { @@ -370,8 +380,11 @@ impl<'f> LoopIteration<'f> { } } - self.induction_value - .expect("Expected to find the induction variable by end of loop iteration") + let (end_block, induction_value) = self + .induction_value + .expect("Expected to find the induction variable by end of loop iteration"); + + (end_block, induction_value, self.inserter.into_array_cache()) } /// Unroll a single block in the current iteration of the loop diff --git a/compiler/noirc_evaluator/src/ssa/parser/ast.rs b/compiler/noirc_evaluator/src/ssa/parser/ast.rs index f8fe8c68a98..a34b7fd70d3 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/ast.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/ast.rs @@ -104,6 +104,11 @@ pub(crate) enum ParsedInstruction { value: ParsedValue, typ: Type, }, + MakeArray { + target: Identifier, + elements: Vec, + typ: Type, + }, Not { target: Identifier, value: ParsedValue, @@ -131,9 +136,8 @@ pub(crate) enum ParsedTerminator { Return(Vec), } -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) enum ParsedValue { NumericConstant { constant: FieldElement, typ: Type }, - Array { values: Vec, typ: Type }, Variable(Identifier), } diff --git a/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs b/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs index 2a94a4fd1eb..9ca4f52cb14 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs @@ -1,7 +1,5 @@ use std::collections::HashMap; -use im::Vector; - use crate::ssa::{ function_builder::FunctionBuilder, ir::{basic_block::BasicBlockId, function::FunctionId, value::ValueId}, @@ -213,6 +211,14 @@ impl Translator { let value = self.translate_value(value)?; self.builder.increment_array_reference_count(value); } + ParsedInstruction::MakeArray { target, elements, typ } => { + let elements = elements + .into_iter() + .map(|element| self.translate_value(element)) + .collect::>()?; + let value_id = self.builder.insert_make_array(elements, typ); + self.define_variable(target, value_id)?; + } ParsedInstruction::Load { target, value, typ } => { let value = self.translate_value(value)?; let value_id = self.builder.insert_load(value, typ); @@ -255,13 +261,6 @@ impl Translator { ParsedValue::NumericConstant { constant, typ } => { Ok(self.builder.numeric_constant(constant, typ)) } - ParsedValue::Array { values, typ } => { - let mut translated_values = Vector::new(); - for value in values { - translated_values.push_back(self.translate_value(value)?); - } - Ok(self.builder.array_constant(translated_values, typ)) - } ParsedValue::Variable(identifier) => self.lookup_variable(identifier), } } diff --git a/compiler/noirc_evaluator/src/ssa/parser/mod.rs b/compiler/noirc_evaluator/src/ssa/parser/mod.rs new file mode 100644 index 00000000000..2db2c636a8f --- /dev/null +++ b/compiler/noirc_evaluator/src/ssa/parser/mod.rs @@ -0,0 +1,897 @@ +use std::{ + fmt::{self, Debug, Formatter}, + sync::Arc, +}; + +use super::{ + ir::{instruction::BinaryOp, types::Type}, + Ssa, +}; + +use acvm::{AcirField, FieldElement}; +use ast::{ + Identifier, ParsedBlock, ParsedFunction, ParsedInstruction, ParsedParameter, ParsedSsa, + ParsedValue, +}; +use lexer::{Lexer, LexerError}; +use noirc_errors::Span; +use noirc_frontend::{monomorphization::ast::InlineType, token::IntType}; +use thiserror::Error; +use token::{Keyword, SpannedToken, Token}; + +use crate::ssa::{ir::function::RuntimeType, parser::ast::ParsedTerminator}; + +mod ast; +mod into_ssa; +mod lexer; +mod tests; +mod token; + +impl Ssa { + pub(crate) fn from_str(src: &str) -> Result { + let mut parser = + Parser::new(src).map_err(|err| SsaErrorWithSource::parse_error(err, src))?; + let parsed_ssa = + parser.parse_ssa().map_err(|err| SsaErrorWithSource::parse_error(err, src))?; + parsed_ssa.into_ssa().map_err(|error| SsaErrorWithSource { src: src.to_string(), error }) + } +} + +pub(crate) struct SsaErrorWithSource { + src: String, + error: SsaError, +} + +impl SsaErrorWithSource { + fn parse_error(error: ParserError, src: &str) -> Self { + Self { src: src.to_string(), error: SsaError::ParserError(error) } + } +} + +impl Debug for SsaErrorWithSource { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let span = self.error.span(); + + let mut byte: usize = 0; + for line in self.src.lines() { + let has_error = + byte <= span.start() as usize && span.end() as usize <= byte + line.len(); + if has_error { + writeln!(f)?; + } + + writeln!(f, "{}", line)?; + + if has_error { + let offset = span.start() as usize - byte; + write!(f, "{}", " ".repeat(offset))?; + writeln!(f, "{}", "^".repeat((span.end() - span.start()) as usize))?; + write!(f, "{}", " ".repeat(offset))?; + writeln!(f, "{}", self.error)?; + writeln!(f)?; + } + + byte += line.len() + 1; // "+ 1" for the newline + } + Ok(()) + } +} + +#[derive(Debug, Error)] +pub(crate) enum SsaError { + #[error("{0}")] + ParserError(ParserError), + #[error("Unknown variable '{0}'")] + UnknownVariable(Identifier), + #[error("Unknown block '{0}'")] + UnknownBlock(Identifier), + #[error("Unknown function '{0}'")] + UnknownFunction(Identifier), + #[error("Mismatched return values")] + MismatchedReturnValues { returns: Vec, expected: usize }, + #[error("Variable '{0}' already defined")] + VariableAlreadyDefined(Identifier), +} + +impl SsaError { + fn span(&self) -> Span { + match self { + SsaError::ParserError(parser_error) => parser_error.span(), + SsaError::UnknownVariable(identifier) + | SsaError::UnknownBlock(identifier) + | SsaError::VariableAlreadyDefined(identifier) + | SsaError::UnknownFunction(identifier) => identifier.span, + SsaError::MismatchedReturnValues { returns, expected: _ } => returns[0].span, + } + } +} + +type ParseResult = Result; + +pub(crate) struct Parser<'a> { + lexer: Lexer<'a>, + token: SpannedToken, +} + +impl<'a> Parser<'a> { + pub(crate) fn new(source: &'a str) -> ParseResult { + let lexer = Lexer::new(source); + let mut parser = Self { lexer, token: eof_spanned_token() }; + parser.token = parser.read_token_internal()?; + Ok(parser) + } + + pub(crate) fn parse_ssa(&mut self) -> ParseResult { + let mut functions = Vec::new(); + while !self.at(Token::Eof) { + let function = self.parse_function()?; + functions.push(function); + } + Ok(ParsedSsa { functions }) + } + + fn parse_function(&mut self) -> ParseResult { + let runtime_type = self.parse_runtime_type()?; + self.eat_or_error(Token::Keyword(Keyword::Fn))?; + + let external_name = self.eat_ident_or_error()?; + let internal_name = self.eat_ident_or_error()?; + + self.eat_or_error(Token::LeftBrace)?; + + let blocks = self.parse_blocks()?; + + self.eat_or_error(Token::RightBrace)?; + + Ok(ParsedFunction { runtime_type, external_name, internal_name, blocks }) + } + + fn parse_runtime_type(&mut self) -> ParseResult { + let acir = if self.eat_keyword(Keyword::Acir)? { + true + } else if self.eat_keyword(Keyword::Brillig)? { + false + } else { + return self.expected_one_of_tokens(&[ + Token::Keyword(Keyword::Acir), + Token::Keyword(Keyword::Brillig), + ]); + }; + + self.eat_or_error(Token::LeftParen)?; + let inline_type = self.parse_inline_type()?; + self.eat_or_error(Token::RightParen)?; + + if acir { + Ok(RuntimeType::Acir(inline_type)) + } else { + Ok(RuntimeType::Brillig(inline_type)) + } + } + + fn parse_inline_type(&mut self) -> ParseResult { + if self.eat_keyword(Keyword::Inline)? { + Ok(InlineType::Inline) + } else if self.eat_keyword(Keyword::InlineAlways)? { + Ok(InlineType::InlineAlways) + } else if self.eat_keyword(Keyword::Fold)? { + Ok(InlineType::Fold) + } else if self.eat_keyword(Keyword::NoPredicates)? { + Ok(InlineType::NoPredicates) + } else { + self.expected_one_of_tokens(&[ + Token::Keyword(Keyword::Inline), + Token::Keyword(Keyword::InlineAlways), + Token::Keyword(Keyword::Fold), + Token::Keyword(Keyword::NoPredicates), + ]) + } + } + + fn parse_blocks(&mut self) -> ParseResult> { + let mut blocks = Vec::new(); + while !self.at(Token::RightBrace) { + let block = self.parse_block()?; + blocks.push(block); + } + Ok(blocks) + } + + fn parse_block(&mut self) -> ParseResult { + let name = self.eat_ident_or_error()?; + self.eat_or_error(Token::LeftParen)?; + + let mut parameters = Vec::new(); + while !self.at(Token::RightParen) { + parameters.push(self.parse_parameter()?); + if !self.eat(Token::Comma)? { + break; + } + } + + self.eat_or_error(Token::RightParen)?; + self.eat_or_error(Token::Colon)?; + + let instructions = self.parse_instructions()?; + let terminator = self.parse_terminator()?; + Ok(ParsedBlock { name, parameters, instructions, terminator }) + } + + fn parse_parameter(&mut self) -> ParseResult { + let identifier = self.eat_identifier_or_error()?; + self.eat_or_error(Token::Colon)?; + let typ = self.parse_type()?; + Ok(ParsedParameter { identifier, typ }) + } + + fn parse_instructions(&mut self) -> ParseResult> { + let mut instructions = Vec::new(); + while let Some(instruction) = self.parse_instruction()? { + instructions.push(instruction); + } + Ok(instructions) + } + + fn parse_instruction(&mut self) -> ParseResult> { + if let Some(instruction) = self.parse_call()? { + return Ok(Some(instruction)); + } + + if let Some(instruction) = self.parse_constrain()? { + return Ok(Some(instruction)); + } + + if let Some(instruction) = self.parse_decrement_rc()? { + return Ok(Some(instruction)); + } + + if let Some(instruction) = self.parse_enable_side_effects()? { + return Ok(Some(instruction)); + } + + if let Some(instruction) = self.parse_increment_rc()? { + return Ok(Some(instruction)); + } + + if let Some(instruction) = self.parse_range_check()? { + return Ok(Some(instruction)); + } + + if let Some(instruction) = self.parse_store()? { + return Ok(Some(instruction)); + } + + if let Some(target) = self.eat_identifier()? { + return Ok(Some(self.parse_assignment(target)?)); + } + + Ok(None) + } + + fn eat_binary_op(&mut self) -> ParseResult> { + let op = match self.token.token() { + Token::Keyword(Keyword::Add) => BinaryOp::Add, + Token::Keyword(Keyword::Sub) => BinaryOp::Sub, + Token::Keyword(Keyword::Mul) => BinaryOp::Mul, + Token::Keyword(Keyword::Div) => BinaryOp::Div, + Token::Keyword(Keyword::Eq) => BinaryOp::Eq, + Token::Keyword(Keyword::Mod) => BinaryOp::Mod, + Token::Keyword(Keyword::Lt) => BinaryOp::Lt, + Token::Keyword(Keyword::And) => BinaryOp::And, + Token::Keyword(Keyword::Or) => BinaryOp::Or, + Token::Keyword(Keyword::Xor) => BinaryOp::Xor, + Token::Keyword(Keyword::Shl) => BinaryOp::Shl, + Token::Keyword(Keyword::Shr) => BinaryOp::Shr, + _ => return Ok(None), + }; + + self.bump()?; + + Ok(Some(op)) + } + + fn parse_call(&mut self) -> ParseResult> { + if !self.eat_keyword(Keyword::Call)? { + return Ok(None); + } + + let function = self.eat_identifier_or_error()?; + let arguments = self.parse_arguments()?; + Ok(Some(ParsedInstruction::Call { targets: vec![], function, arguments, types: vec![] })) + } + + fn parse_constrain(&mut self) -> ParseResult> { + if !self.eat_keyword(Keyword::Constrain)? { + return Ok(None); + } + + let lhs = self.parse_value_or_error()?; + self.eat_or_error(Token::Equal)?; + let rhs = self.parse_value_or_error()?; + Ok(Some(ParsedInstruction::Constrain { lhs, rhs })) + } + + fn parse_decrement_rc(&mut self) -> ParseResult> { + if !self.eat_keyword(Keyword::DecRc)? { + return Ok(None); + } + + let value = self.parse_value_or_error()?; + Ok(Some(ParsedInstruction::DecrementRc { value })) + } + + fn parse_enable_side_effects(&mut self) -> ParseResult> { + if !self.eat_keyword(Keyword::EnableSideEffects)? { + return Ok(None); + } + + let condition = self.parse_value_or_error()?; + Ok(Some(ParsedInstruction::EnableSideEffectsIf { condition })) + } + + fn parse_increment_rc(&mut self) -> ParseResult> { + if !self.eat_keyword(Keyword::IncRc)? { + return Ok(None); + } + + let value = self.parse_value_or_error()?; + Ok(Some(ParsedInstruction::IncrementRc { value })) + } + + fn parse_range_check(&mut self) -> ParseResult> { + if !self.eat_keyword(Keyword::RangeCheck)? { + return Ok(None); + } + + let value = self.parse_value_or_error()?; + self.eat_or_error(Token::Keyword(Keyword::To))?; + let max_bit_size = self.eat_int_or_error()?.to_u128() as u32; + self.eat_or_error(Token::Keyword(Keyword::Bits))?; + Ok(Some(ParsedInstruction::RangeCheck { value, max_bit_size })) + } + + fn parse_store(&mut self) -> ParseResult> { + if !self.eat_keyword(Keyword::Store)? { + return Ok(None); + } + + let value = self.parse_value_or_error()?; + self.eat_or_error(Token::Keyword(Keyword::At))?; + let address = self.parse_value_or_error()?; + Ok(Some(ParsedInstruction::Store { address, value })) + } + + fn parse_assignment(&mut self, target: Identifier) -> ParseResult { + let mut targets = vec![target]; + + while self.eat(Token::Comma)? { + let target = self.eat_identifier_or_error()?; + targets.push(target); + } + + self.eat_or_error(Token::Assign)?; + + if self.eat_keyword(Keyword::Call)? { + let function = self.eat_identifier_or_error()?; + let arguments = self.parse_arguments()?; + self.eat_or_error(Token::Arrow)?; + let types = self.parse_types()?; + return Ok(ParsedInstruction::Call { targets, function, arguments, types }); + } + + if targets.len() > 1 { + return Err(ParserError::MultipleReturnValuesOnlyAllowedForCall { + second_target: targets[1].clone(), + }); + } + + let target = targets.remove(0); + + if self.eat_keyword(Keyword::Allocate)? { + self.eat_or_error(Token::Arrow)?; + let typ = self.parse_mutable_reference_type_or_error()?; + return Ok(ParsedInstruction::Allocate { target, typ }); + } + + if self.eat_keyword(Keyword::ArrayGet)? { + let array = self.parse_value_or_error()?; + self.eat_or_error(Token::Comma)?; + self.eat_or_error(Token::Keyword(Keyword::Index))?; + let index = self.parse_value_or_error()?; + self.eat_or_error(Token::Arrow)?; + let element_type = self.parse_type()?; + return Ok(ParsedInstruction::ArrayGet { target, element_type, array, index }); + } + + if self.eat_keyword(Keyword::ArraySet)? { + let mutable = self.eat_keyword(Keyword::Mut)?; + let array = self.parse_value_or_error()?; + self.eat_or_error(Token::Comma)?; + self.eat_or_error(Token::Keyword(Keyword::Index))?; + let index = self.parse_value_or_error()?; + self.eat_or_error(Token::Comma)?; + self.eat_or_error(Token::Keyword(Keyword::Value))?; + let value = self.parse_value_or_error()?; + return Ok(ParsedInstruction::ArraySet { target, array, index, value, mutable }); + } + + if self.eat_keyword(Keyword::Cast)? { + let lhs = self.parse_value_or_error()?; + self.eat_or_error(Token::Keyword(Keyword::As))?; + let typ = self.parse_type()?; + return Ok(ParsedInstruction::Cast { target, lhs, typ }); + } + + if self.eat_keyword(Keyword::Load)? { + let value = self.parse_value_or_error()?; + self.eat_or_error(Token::Arrow)?; + let typ = self.parse_type()?; + return Ok(ParsedInstruction::Load { target, value, typ }); + } + + if self.eat_keyword(Keyword::MakeArray)? { + self.eat_or_error(Token::LeftBracket)?; + let elements = self.parse_comma_separated_values()?; + self.eat_or_error(Token::RightBracket)?; + self.eat_or_error(Token::Colon)?; + let typ = self.parse_type()?; + return Ok(ParsedInstruction::MakeArray { target, elements, typ }); + } + + if self.eat_keyword(Keyword::Not)? { + let value = self.parse_value_or_error()?; + return Ok(ParsedInstruction::Not { target, value }); + } + + if self.eat_keyword(Keyword::Truncate)? { + let value = self.parse_value_or_error()?; + self.eat_or_error(Token::Keyword(Keyword::To))?; + let bit_size = self.eat_int_or_error()?.to_u128() as u32; + self.eat_or_error(Token::Keyword(Keyword::Bits))?; + self.eat_or_error(Token::Comma)?; + self.eat_or_error(Token::Keyword(Keyword::MaxBitSize))?; + self.eat_or_error(Token::Colon)?; + let max_bit_size = self.eat_int_or_error()?.to_u128() as u32; + return Ok(ParsedInstruction::Truncate { target, value, bit_size, max_bit_size }); + } + + if let Some(op) = self.eat_binary_op()? { + let lhs = self.parse_value_or_error()?; + self.eat_or_error(Token::Comma)?; + let rhs = self.parse_value_or_error()?; + return Ok(ParsedInstruction::BinaryOp { target, lhs, op, rhs }); + } + + self.expected_instruction_or_terminator() + } + + fn parse_terminator(&mut self) -> ParseResult { + if let Some(terminator) = self.parse_return()? { + return Ok(terminator); + } + + if let Some(terminator) = self.parse_jmp()? { + return Ok(terminator); + } + + if let Some(terminator) = self.parse_jmpif()? { + return Ok(terminator); + } + + self.expected_instruction_or_terminator() + } + + fn parse_return(&mut self) -> ParseResult> { + // Before advancing to the next token (after a potential return keyword), + // we check if a newline follows. This is because if we have this: + // + // return + // b1(): + // ... + // + // then unless we check for a newline we can't know if the return + // returns `b1` or not (we could check if a parentheses comes next, but + // that would require a look-ahead and, for the purpose of the SSA parser, + // it's just simpler to check if a newline follows) + let newline_follows = self.newline_follows(); + + if !self.eat_keyword(Keyword::Return)? { + return Ok(None); + } + + let values = + if newline_follows { Vec::new() } else { self.parse_comma_separated_values()? }; + Ok(Some(ParsedTerminator::Return(values))) + } + + fn parse_jmp(&mut self) -> ParseResult> { + if !self.eat_keyword(Keyword::Jmp)? { + return Ok(None); + } + + let destination = self.eat_identifier_or_error()?; + let arguments = self.parse_arguments()?; + Ok(Some(ParsedTerminator::Jmp { destination, arguments })) + } + + fn parse_jmpif(&mut self) -> ParseResult> { + if !self.eat_keyword(Keyword::Jmpif)? { + return Ok(None); + } + + let condition = self.parse_value_or_error()?; + self.eat_or_error(Token::Keyword(Keyword::Then))?; + self.eat_or_error(Token::Colon)?; + let then_block = self.eat_identifier_or_error()?; + self.eat_or_error(Token::Comma)?; + self.eat_or_error(Token::Keyword(Keyword::Else))?; + self.eat_or_error(Token::Colon)?; + let else_block = self.eat_identifier_or_error()?; + + Ok(Some(ParsedTerminator::Jmpif { condition, then_block, else_block })) + } + + fn parse_arguments(&mut self) -> ParseResult> { + self.eat_or_error(Token::LeftParen)?; + let arguments = self.parse_comma_separated_values()?; + self.eat_or_error(Token::RightParen)?; + Ok(arguments) + } + + fn parse_comma_separated_values(&mut self) -> ParseResult> { + let mut values = Vec::new(); + while let Some(value) = self.parse_value()? { + values.push(value); + if !self.eat(Token::Comma)? { + break; + } + } + Ok(values) + } + + fn parse_value_or_error(&mut self) -> ParseResult { + if let Some(value) = self.parse_value()? { + Ok(value) + } else { + self.expected_value() + } + } + + fn parse_value(&mut self) -> ParseResult> { + if let Some(value) = self.parse_field_value()? { + return Ok(Some(value)); + } + + if let Some(value) = self.parse_int_value()? { + return Ok(Some(value)); + } + + if let Some(identifier) = self.eat_identifier()? { + return Ok(Some(ParsedValue::Variable(identifier))); + } + + Ok(None) + } + + fn parse_field_value(&mut self) -> ParseResult> { + if self.eat_keyword(Keyword::Field)? { + let constant = self.eat_int_or_error()?; + Ok(Some(ParsedValue::NumericConstant { constant, typ: Type::field() })) + } else { + Ok(None) + } + } + + fn parse_int_value(&mut self) -> ParseResult> { + if let Some(int_type) = self.eat_int_type()? { + let constant = self.eat_int_or_error()?; + let typ = match int_type { + IntType::Unsigned(bit_size) => Type::unsigned(bit_size), + IntType::Signed(bit_size) => Type::signed(bit_size), + }; + Ok(Some(ParsedValue::NumericConstant { constant, typ })) + } else { + Ok(None) + } + } + + fn parse_types(&mut self) -> ParseResult> { + if self.eat(Token::LeftParen)? { + let types = self.parse_comma_separated_types()?; + self.eat_or_error(Token::RightParen)?; + Ok(types) + } else { + Ok(vec![self.parse_type()?]) + } + } + + fn parse_comma_separated_types(&mut self) -> ParseResult> { + let mut types = Vec::new(); + loop { + let typ = self.parse_type()?; + types.push(typ); + if !self.eat(Token::Comma)? { + break; + } + } + Ok(types) + } + + fn parse_type(&mut self) -> ParseResult { + if self.eat_keyword(Keyword::Bool)? { + return Ok(Type::bool()); + } + + if self.eat_keyword(Keyword::Field)? { + return Ok(Type::field()); + } + + if let Some(int_type) = self.eat_int_type()? { + return Ok(match int_type { + IntType::Unsigned(bit_size) => Type::unsigned(bit_size), + IntType::Signed(bit_size) => Type::signed(bit_size), + }); + } + + if self.eat(Token::LeftBracket)? { + let element_types = self.parse_types()?; + if self.eat(Token::Semicolon)? { + let length = self.eat_int_or_error()?; + self.eat_or_error(Token::RightBracket)?; + return Ok(Type::Array(Arc::new(element_types), length.to_u128() as usize)); + } else { + self.eat_or_error(Token::RightBracket)?; + return Ok(Type::Slice(Arc::new(element_types))); + } + } + + if let Some(typ) = self.parse_mutable_reference_type()? { + return Ok(Type::Reference(Arc::new(typ))); + } + + self.expected_type() + } + + /// Parses `&mut Type`, returns `Type` if `&mut` was found, errors otherwise. + fn parse_mutable_reference_type_or_error(&mut self) -> ParseResult { + if let Some(typ) = self.parse_mutable_reference_type()? { + Ok(typ) + } else { + self.expected_token(Token::Ampersand) + } + } + + /// Parses `&mut Type`, returns `Some(Type)` if `&mut` was found, `None` otherwise. + fn parse_mutable_reference_type(&mut self) -> ParseResult> { + if !self.eat(Token::Ampersand)? { + return Ok(None); + } + + self.eat_or_error(Token::Keyword(Keyword::Mut))?; + let typ = self.parse_type()?; + Ok(Some(typ)) + } + + fn eat_identifier_or_error(&mut self) -> ParseResult { + if let Some(identifier) = self.eat_identifier()? { + Ok(identifier) + } else { + self.expected_identifier() + } + } + + fn eat_identifier(&mut self) -> ParseResult> { + let span = self.token.to_span(); + if let Some(name) = self.eat_ident()? { + Ok(Some(Identifier::new(name, span))) + } else { + Ok(None) + } + } + + fn eat_keyword(&mut self, keyword: Keyword) -> ParseResult { + if let Token::Keyword(kw) = self.token.token() { + if *kw == keyword { + self.bump()?; + Ok(true) + } else { + Ok(false) + } + } else { + Ok(false) + } + } + + fn eat_ident(&mut self) -> ParseResult> { + if !matches!(self.token.token(), Token::Ident(..)) { + return Ok(None); + } + + let token = self.bump()?; + match token.into_token() { + Token::Ident(ident) => Ok(Some(ident)), + _ => unreachable!(), + } + } + + fn eat_ident_or_error(&mut self) -> ParseResult { + if let Some(ident) = self.eat_ident()? { + Ok(ident) + } else { + self.expected_identifier() + } + } + + fn eat_int(&mut self) -> ParseResult> { + let negative = self.eat(Token::Dash)?; + + if matches!(self.token.token(), Token::Int(..)) { + let token = self.bump()?; + match token.into_token() { + Token::Int(mut int) => { + if negative { + int = -int; + } + Ok(Some(int)) + } + _ => unreachable!(), + } + } else { + Ok(None) + } + } + + fn eat_int_or_error(&mut self) -> ParseResult { + if let Some(int) = self.eat_int()? { + Ok(int) + } else { + self.expected_int() + } + } + + fn eat_int_type(&mut self) -> ParseResult> { + let is_int_type = matches!(self.token.token(), Token::IntType(..)); + if is_int_type { + let token = self.bump()?; + match token.into_token() { + Token::IntType(int_type) => Ok(Some(int_type)), + _ => unreachable!(), + } + } else { + Ok(None) + } + } + + fn eat(&mut self, token: Token) -> ParseResult { + if self.token.token() == &token { + self.bump()?; + Ok(true) + } else { + Ok(false) + } + } + + fn eat_or_error(&mut self, token: Token) -> ParseResult<()> { + if self.eat(token.clone())? { + Ok(()) + } else { + self.expected_token(token) + } + } + + fn at(&self, token: Token) -> bool { + self.token.token() == &token + } + + fn at_keyword(&self, keyword: Keyword) -> bool { + self.at(Token::Keyword(keyword)) + } + + fn newline_follows(&self) -> bool { + self.lexer.newline_follows() + } + + fn bump(&mut self) -> ParseResult { + let token = self.read_token_internal()?; + Ok(std::mem::replace(&mut self.token, token)) + } + + fn read_token_internal(&mut self) -> ParseResult { + self.lexer.next_token().map_err(ParserError::LexerError) + } + + fn expected_instruction_or_terminator(&mut self) -> ParseResult { + Err(ParserError::ExpectedInstructionOrTerminator { + found: self.token.token().clone(), + span: self.token.to_span(), + }) + } + + fn expected_identifier(&mut self) -> ParseResult { + Err(ParserError::ExpectedIdentifier { + found: self.token.token().clone(), + span: self.token.to_span(), + }) + } + + fn expected_int(&mut self) -> ParseResult { + Err(ParserError::ExpectedInt { + found: self.token.token().clone(), + span: self.token.to_span(), + }) + } + + fn expected_type(&mut self) -> ParseResult { + Err(ParserError::ExpectedType { + found: self.token.token().clone(), + span: self.token.to_span(), + }) + } + + fn expected_value(&mut self) -> ParseResult { + Err(ParserError::ExpectedValue { + found: self.token.token().clone(), + span: self.token.to_span(), + }) + } + + fn expected_token(&mut self, token: Token) -> ParseResult { + Err(ParserError::ExpectedToken { + token, + found: self.token.token().clone(), + span: self.token.to_span(), + }) + } + + fn expected_one_of_tokens(&mut self, tokens: &[Token]) -> ParseResult { + Err(ParserError::ExpectedOneOfTokens { + tokens: tokens.to_vec(), + found: self.token.token().clone(), + span: self.token.to_span(), + }) + } +} + +#[derive(Debug, Error)] +pub(crate) enum ParserError { + #[error("{0}")] + LexerError(LexerError), + #[error("Expected '{token}', found '{found}'")] + ExpectedToken { token: Token, found: Token, span: Span }, + #[error("Expected one of {tokens:?}, found {found}")] + ExpectedOneOfTokens { tokens: Vec, found: Token, span: Span }, + #[error("Expected an identifier, found '{found}'")] + ExpectedIdentifier { found: Token, span: Span }, + #[error("Expected an int, found '{found}'")] + ExpectedInt { found: Token, span: Span }, + #[error("Expected a type, found '{found}'")] + ExpectedType { found: Token, span: Span }, + #[error("Expected an instruction or terminator, found '{found}'")] + ExpectedInstructionOrTerminator { found: Token, span: Span }, + #[error("Expected a value, found '{found}'")] + ExpectedValue { found: Token, span: Span }, + #[error("Multiple return values only allowed for call")] + MultipleReturnValuesOnlyAllowedForCall { second_target: Identifier }, +} + +impl ParserError { + fn span(&self) -> Span { + match self { + ParserError::LexerError(err) => err.span(), + ParserError::ExpectedToken { span, .. } + | ParserError::ExpectedOneOfTokens { span, .. } + | ParserError::ExpectedIdentifier { span, .. } + | ParserError::ExpectedInt { span, .. } + | ParserError::ExpectedType { span, .. } + | ParserError::ExpectedInstructionOrTerminator { span, .. } + | ParserError::ExpectedValue { span, .. } => *span, + ParserError::MultipleReturnValuesOnlyAllowedForCall { second_target, .. } => { + second_target.span + } + } + } +} + +fn eof_spanned_token() -> SpannedToken { + SpannedToken::new(Token::Eof, Default::default()) +} diff --git a/compiler/noirc_evaluator/src/ssa/parser/tests.rs b/compiler/noirc_evaluator/src/ssa/parser/tests.rs index 9205353151e..f318e317473 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/tests.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/tests.rs @@ -54,33 +54,36 @@ fn test_return_integer() { } #[test] -fn test_return_array() { +fn test_make_array() { let src = " acir(inline) fn main f0 { b0(): - return [Field 1] of Field + v1 = make_array [Field 1] : [Field; 1] + return v1 } "; assert_ssa_roundtrip(src); } #[test] -fn test_return_empty_array() { +fn test_make_empty_array() { let src = " acir(inline) fn main f0 { b0(): - return [] of Field + v0 = make_array [] : [Field; 0] + return v0 } "; assert_ssa_roundtrip(src); } #[test] -fn test_return_composite_array() { +fn test_make_composite_array() { let src = " acir(inline) fn main f0 { b0(): - return [Field 1, Field 2] of (Field, Field) + v2 = make_array [Field 1, Field 2] : [(Field, Field); 1] + return v2 } "; assert_ssa_roundtrip(src); @@ -151,7 +154,9 @@ fn test_call_multiple_return_values() { } acir(inline) fn foo f1 { b0(): - return [Field 1, Field 2, Field 3] of Field, [Field 4] of Field + v3 = make_array [Field 1, Field 2, Field 3] : [Field; 3] + v5 = make_array [Field 4] : [Field; 1] + return v3, v5 } "; assert_ssa_roundtrip(src); diff --git a/compiler/noirc_evaluator/src/ssa/parser/token.rs b/compiler/noirc_evaluator/src/ssa/parser/token.rs index d648f58de41..f663879e899 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/token.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/token.rs @@ -136,6 +136,7 @@ pub(crate) enum Keyword { Jmpif, Load, Lt, + MakeArray, MaxBitSize, Mod, Mul, @@ -190,6 +191,7 @@ impl Keyword { "jmpif" => Keyword::Jmpif, "load" => Keyword::Load, "lt" => Keyword::Lt, + "make_array" => Keyword::MakeArray, "max_bit_size" => Keyword::MaxBitSize, "mod" => Keyword::Mod, "mul" => Keyword::Mul, @@ -248,6 +250,7 @@ impl Display for Keyword { Keyword::Jmpif => write!(f, "jmpif"), Keyword::Load => write!(f, "load"), Keyword::Lt => write!(f, "lt"), + Keyword::MakeArray => write!(f, "make_array"), Keyword::MaxBitSize => write!(f, "max_bit_size"), Keyword::Mod => write!(f, "mod"), Keyword::Mul => write!(f, "mul"), diff --git a/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs b/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs index 96e779482a4..df094ff3441 100644 --- a/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs @@ -291,7 +291,7 @@ impl<'a> FunctionContext<'a> { }); } - self.builder.array_constant(array, typ).into() + self.builder.insert_make_array(array, typ).into() } fn codegen_block(&mut self, block: &[Expression]) -> Result { diff --git a/compiler/noirc_frontend/src/elaborator/types.rs b/compiler/noirc_frontend/src/elaborator/types.rs index b296c4f1805..ae2bb942f48 100644 --- a/compiler/noirc_frontend/src/elaborator/types.rs +++ b/compiler/noirc_frontend/src/elaborator/types.rs @@ -1,4 +1,4 @@ -use std::{borrow::Cow, collections::BTreeMap, rc::Rc}; +use std::{borrow::Cow, rc::Rc}; use acvm::acir::AcirField; use iter_extended::vecmap; @@ -25,7 +25,7 @@ use crate::{ HirBinaryOp, HirCallExpression, HirExpression, HirLiteral, HirMemberAccess, HirMethodReference, HirPrefixExpression, TraitMethod, }, - function::{FuncMeta, Parameters}, + function::FuncMeta, stmt::HirStatement, traits::{NamedType, ResolvedTraitBound, Trait, TraitConstraint}, }, @@ -34,8 +34,7 @@ use crate::{ TraitImplKind, TraitMethodId, }, token::SecondaryAttribute, - Generics, Kind, ResolvedGeneric, Type, TypeBinding, TypeBindings, TypeVariable, - UnificationError, + Generics, Kind, ResolvedGeneric, Type, TypeBinding, TypeBindings, UnificationError, }; use super::{lints, path_resolution::PathResolutionItem, Elaborator}; @@ -1725,110 +1724,6 @@ impl<'context> Elaborator<'context> { } } - pub fn find_numeric_generics( - parameters: &Parameters, - return_type: &Type, - ) -> Vec<(String, TypeVariable)> { - let mut found = BTreeMap::new(); - for (_, parameter, _) in ¶meters.0 { - Self::find_numeric_generics_in_type(parameter, &mut found); - } - Self::find_numeric_generics_in_type(return_type, &mut found); - found.into_iter().collect() - } - - fn find_numeric_generics_in_type(typ: &Type, found: &mut BTreeMap) { - match typ { - Type::FieldElement - | Type::Integer(_, _) - | Type::Bool - | Type::Unit - | Type::Error - | Type::TypeVariable(_) - | Type::Constant(..) - | Type::NamedGeneric(_, _) - | Type::Quoted(_) - | Type::Forall(_, _) => (), - - Type::CheckedCast { from, to } => { - Self::find_numeric_generics_in_type(from, found); - Self::find_numeric_generics_in_type(to, found); - } - - Type::TraitAsType(_, _, args) => { - for arg in &args.ordered { - Self::find_numeric_generics_in_type(arg, found); - } - for arg in &args.named { - Self::find_numeric_generics_in_type(&arg.typ, found); - } - } - - Type::Array(length, element_type) => { - if let Type::NamedGeneric(type_variable, name) = length.as_ref() { - found.insert(name.to_string(), type_variable.clone()); - } - Self::find_numeric_generics_in_type(element_type, found); - } - - Type::Slice(element_type) => { - Self::find_numeric_generics_in_type(element_type, found); - } - - Type::Tuple(fields) => { - for field in fields { - Self::find_numeric_generics_in_type(field, found); - } - } - - Type::Function(parameters, return_type, _env, _unconstrained) => { - for parameter in parameters { - Self::find_numeric_generics_in_type(parameter, found); - } - Self::find_numeric_generics_in_type(return_type, found); - } - - Type::Struct(struct_type, generics) => { - for (i, generic) in generics.iter().enumerate() { - if let Type::NamedGeneric(type_variable, name) = generic { - if struct_type.borrow().generics[i].is_numeric() { - found.insert(name.to_string(), type_variable.clone()); - } - } else { - Self::find_numeric_generics_in_type(generic, found); - } - } - } - Type::Alias(alias, generics) => { - for (i, generic) in generics.iter().enumerate() { - if let Type::NamedGeneric(type_variable, name) = generic { - if alias.borrow().generics[i].is_numeric() { - found.insert(name.to_string(), type_variable.clone()); - } - } else { - Self::find_numeric_generics_in_type(generic, found); - } - } - } - Type::MutableReference(element) => Self::find_numeric_generics_in_type(element, found), - Type::String(length) => { - if let Type::NamedGeneric(type_variable, name) = length.as_ref() { - found.insert(name.to_string(), type_variable.clone()); - } - } - Type::FmtString(length, fields) => { - if let Type::NamedGeneric(type_variable, name) = length.as_ref() { - found.insert(name.to_string(), type_variable.clone()); - } - Self::find_numeric_generics_in_type(fields, found); - } - Type::InfixExpr(lhs, _op, rhs) => { - Self::find_numeric_generics_in_type(lhs, found); - Self::find_numeric_generics_in_type(rhs, found); - } - } - } - /// Push a type variable into the current FunctionContext to be defaulted if needed /// at the end of the earlier of either the current function or the current comptime scope. fn push_type_variable(&mut self, typ: Type) { diff --git a/compiler/noirc_frontend/src/hir_def/types.rs b/compiler/noirc_frontend/src/hir_def/types.rs index a0fea3aa774..ef8a697966c 100644 --- a/compiler/noirc_frontend/src/hir_def/types.rs +++ b/compiler/noirc_frontend/src/hir_def/types.rs @@ -184,10 +184,6 @@ impl Kind { } } - pub(crate) fn is_numeric(&self) -> bool { - matches!(self.follow_bindings(), Self::Numeric { .. }) - } - pub(crate) fn is_type_level_field_element(&self) -> bool { let type_level = false; self.is_field_element(type_level) @@ -375,10 +371,6 @@ impl ResolvedGeneric { pub fn kind(&self) -> Kind { self.type_var.kind() } - - pub(crate) fn is_numeric(&self) -> bool { - self.kind().is_numeric() - } } enum FunctionCoercionResult { @@ -524,13 +516,6 @@ impl StructType { self.fields.iter().map(|field| field.name.clone()).collect() } - /// Search the fields of a struct for any types with a `TypeKind::Numeric` - pub fn find_numeric_generics_in_fields(&self, found_names: &mut Vec) { - for field in self.fields.iter() { - field.typ.find_numeric_type_vars(found_names); - } - } - /// Instantiate this struct type, returning a Vec of the new generic args (in /// the same order as self.generics) pub fn instantiate(&self, interner: &mut NodeInterner) -> Vec { @@ -1102,99 +1087,6 @@ impl Type { } } - pub fn find_numeric_type_vars(&self, found_names: &mut Vec) { - // Return whether the named generic has a Kind::Numeric and save its name - let named_generic_is_numeric = |typ: &Type, found_names: &mut Vec| { - if let Type::NamedGeneric(var, name) = typ { - if var.kind().is_numeric() { - found_names.push(name.to_string()); - true - } else { - false - } - } else { - false - } - }; - - match self { - Type::FieldElement - | Type::Integer(_, _) - | Type::Bool - | Type::Unit - | Type::Error - | Type::Constant(_, _) - | Type::Forall(_, _) - | Type::Quoted(_) => {} - - Type::TypeVariable(type_var) => { - if let TypeBinding::Bound(typ) = &*type_var.borrow() { - named_generic_is_numeric(typ, found_names); - } - } - - Type::NamedGeneric(_, _) => { - named_generic_is_numeric(self, found_names); - } - Type::CheckedCast { from, to } => { - to.find_numeric_type_vars(found_names); - from.find_numeric_type_vars(found_names); - } - - Type::TraitAsType(_, _, args) => { - for arg in args.ordered.iter() { - arg.find_numeric_type_vars(found_names); - } - for arg in args.named.iter() { - arg.typ.find_numeric_type_vars(found_names); - } - } - Type::Array(length, elem) => { - elem.find_numeric_type_vars(found_names); - named_generic_is_numeric(length, found_names); - } - Type::Slice(elem) => elem.find_numeric_type_vars(found_names), - Type::Tuple(fields) => { - for field in fields.iter() { - field.find_numeric_type_vars(found_names); - } - } - Type::Function(parameters, return_type, env, _unconstrained) => { - for parameter in parameters.iter() { - parameter.find_numeric_type_vars(found_names); - } - return_type.find_numeric_type_vars(found_names); - env.find_numeric_type_vars(found_names); - } - Type::Struct(_, generics) => { - for generic in generics.iter() { - if !named_generic_is_numeric(generic, found_names) { - generic.find_numeric_type_vars(found_names); - } - } - } - Type::Alias(_, generics) => { - for generic in generics.iter() { - if !named_generic_is_numeric(generic, found_names) { - generic.find_numeric_type_vars(found_names); - } - } - } - Type::MutableReference(element) => element.find_numeric_type_vars(found_names), - Type::String(length) => { - named_generic_is_numeric(length, found_names); - } - Type::FmtString(length, elements) => { - elements.find_numeric_type_vars(found_names); - named_generic_is_numeric(length, found_names); - } - Type::InfixExpr(lhs, _op, rhs) => { - lhs.find_numeric_type_vars(found_names); - rhs.find_numeric_type_vars(found_names); - } - } - } - /// True if this type can be used as a parameter to `main` or a contract function. /// This is only false for unsized types like slices or slices that do not make sense /// as a program input such as named generics or mutable references. diff --git a/compiler/noirc_frontend/src/parser/parser/attributes.rs b/compiler/noirc_frontend/src/parser/parser/attributes.rs index 12cb37edb4b..55919708f8c 100644 --- a/compiler/noirc_frontend/src/parser/parser/attributes.rs +++ b/compiler/noirc_frontend/src/parser/parser/attributes.rs @@ -215,6 +215,10 @@ impl<'a> Parser<'a> { "oracle" => self.parse_single_name_attribute(ident, arguments, start_span, |name| { Attribute::Function(FunctionAttribute::Oracle(name)) }), + "recursive" => { + let attr = Attribute::Function(FunctionAttribute::Recursive); + self.parse_no_args_attribute(ident, arguments, attr) + } "use_callers_scope" => { let attr = Attribute::Secondary(SecondaryAttribute::UseCallersScope); self.parse_no_args_attribute(ident, arguments, attr) @@ -500,6 +504,13 @@ mod tests { parse_attribute_no_errors(src, expected); } + #[test] + fn parses_attribute_recursive() { + let src = "#[recursive]"; + let expected = Attribute::Function(FunctionAttribute::Recursive); + parse_attribute_no_errors(src, expected); + } + #[test] fn parses_attribute_fold() { let src = "#[fold]"; diff --git a/docs/versioned_docs/version-v0.37.0/getting_started/quick_start.md b/docs/versioned_docs/version-v0.37.0/getting_started/quick_start.md index 3c10086123f..4ce48409818 100644 --- a/docs/versioned_docs/version-v0.37.0/getting_started/quick_start.md +++ b/docs/versioned_docs/version-v0.37.0/getting_started/quick_start.md @@ -66,7 +66,6 @@ We can now use `nargo` to generate a _Prover.toml_ file, where our input values ```sh cd hello_world nargo check -``` Let's feed some valid values into this file: @@ -88,7 +87,7 @@ The command also automatically compiles your Noir program if it was not already With circuit compiled and witness generated, we're ready to prove. ## Proving backend - + Different proving backends may provide different tools and commands to work with Noir programs. Here Barretenberg's `bb` CLI tool is used as an example: ```sh diff --git a/docs/versioned_docs/version-v0.38.0/getting_started/quick_start.md b/docs/versioned_docs/version-v0.38.0/getting_started/quick_start.md index e389339e8c5..c693624eb82 100644 --- a/docs/versioned_docs/version-v0.38.0/getting_started/quick_start.md +++ b/docs/versioned_docs/version-v0.38.0/getting_started/quick_start.md @@ -68,7 +68,6 @@ We can now use `nargo` to generate a _Prover.toml_ file, where our input values ```sh cd hello_world nargo check -``` Let's feed some valid values into this file: @@ -90,7 +89,7 @@ The command also automatically compiles your Noir program if it was not already With circuit compiled and witness generated, we're ready to prove. ## Proving backend - + Different proving backends may provide different tools and commands to work with Noir programs. Here Barretenberg's `bb` CLI tool is used as an example: ```sh diff --git a/noir_stdlib/src/ec/tecurve.nr b/noir_stdlib/src/ec/tecurve.nr index 08f017c4f91..45a6b322ed1 100644 --- a/noir_stdlib/src/ec/tecurve.nr +++ b/noir_stdlib/src/ec/tecurve.nr @@ -27,7 +27,7 @@ pub mod affine { impl Point { // Point constructor - //#[deprecated = "It's recommmended to use the external noir-edwards library (https://github.com/noir-lang/noir-edwards)"] + // #[deprecated("It's recommmended to use the external noir-edwards library (https://github.com/noir-lang/noir-edwards)")] pub fn new(x: Field, y: Field) -> Self { Self { x, y } } diff --git a/noir_stdlib/src/hash/poseidon/mod.nr b/noir_stdlib/src/hash/poseidon/mod.nr index 6f0e461a610..e2e47959024 100644 --- a/noir_stdlib/src/hash/poseidon/mod.nr +++ b/noir_stdlib/src/hash/poseidon/mod.nr @@ -346,6 +346,7 @@ impl Hasher for PoseidonHasher { result } + #[inline_always] fn write(&mut self, input: Field) { self._state = self._state.push_back(input); } diff --git a/test_programs/compile_success_empty/comptime_apply_range_constraint/Nargo.toml b/test_programs/compile_success_empty/comptime_apply_range_constraint/Nargo.toml index bfd6fa75728..84d16060440 100644 --- a/test_programs/compile_success_empty/comptime_apply_range_constraint/Nargo.toml +++ b/test_programs/compile_success_empty/comptime_apply_range_constraint/Nargo.toml @@ -2,4 +2,5 @@ name = "comptime_apply_range_constraint" type = "bin" authors = [""] + [dependencies] diff --git a/test_programs/execution_success/brillig_assert/Nargo.toml b/test_programs/compile_success_empty/embedded_curve_msm_simplification/Nargo.toml similarity index 55% rename from test_programs/execution_success/brillig_assert/Nargo.toml rename to test_programs/compile_success_empty/embedded_curve_msm_simplification/Nargo.toml index b7d9231ab75..9c9bd8de04a 100644 --- a/test_programs/execution_success/brillig_assert/Nargo.toml +++ b/test_programs/compile_success_empty/embedded_curve_msm_simplification/Nargo.toml @@ -1,5 +1,5 @@ [package] -name = "brillig_assert" +name = "embedded_curve_msm_simplification" type = "bin" authors = [""] diff --git a/test_programs/compile_success_empty/embedded_curve_msm_simplification/src/main.nr b/test_programs/compile_success_empty/embedded_curve_msm_simplification/src/main.nr new file mode 100644 index 00000000000..e5aaa0f4d15 --- /dev/null +++ b/test_programs/compile_success_empty/embedded_curve_msm_simplification/src/main.nr @@ -0,0 +1,12 @@ +fn main() { + let pub_x = 0x0000000000000000000000000000000000000000000000000000000000000001; + let pub_y = 0x0000000000000002cf135e7506a45d632d270d45f1181294833fc48d823f272c; + + let g1_y = 17631683881184975370165255887551781615748388533673675138860; + let g1 = std::embedded_curve_ops::EmbeddedCurvePoint { x: 1, y: g1_y, is_infinite: false }; + let scalar = std::embedded_curve_ops::EmbeddedCurveScalar { lo: 1, hi: 0 }; + // Test that multi_scalar_mul correctly derives the public key + let res = std::embedded_curve_ops::multi_scalar_mul([g1], [scalar]); + assert(res.x == pub_x); + assert(res.y == pub_y); +} diff --git a/test_programs/execution_success/assert/src/main.nr b/test_programs/execution_success/assert/src/main.nr index 00e94414c0b..b95665d502b 100644 --- a/test_programs/execution_success/assert/src/main.nr +++ b/test_programs/execution_success/assert/src/main.nr @@ -1,3 +1,10 @@ fn main(x: Field) { assert(x == 1); + assert(1 == conditional(x as bool)); +} + +fn conditional(x: bool) -> Field { + assert(x, f"Expected x to be true but got {x}"); + assert_eq(x, true, f"Expected x to be true but got {x}"); + 1 } diff --git a/test_programs/execution_success/brillig_array_to_slice/Nargo.toml b/test_programs/execution_success/brillig_array_to_slice/Nargo.toml deleted file mode 100644 index 58157c38c26..00000000000 --- a/test_programs/execution_success/brillig_array_to_slice/Nargo.toml +++ /dev/null @@ -1,7 +0,0 @@ -[package] -name = "brillig_array_to_slice" -type = "bin" -authors = [""] -compiler_version = ">=0.25.0" - -[dependencies] \ No newline at end of file diff --git a/test_programs/execution_success/brillig_array_to_slice/Prover.toml b/test_programs/execution_success/brillig_array_to_slice/Prover.toml deleted file mode 100644 index 11497a473bc..00000000000 --- a/test_programs/execution_success/brillig_array_to_slice/Prover.toml +++ /dev/null @@ -1 +0,0 @@ -x = "0" diff --git a/test_programs/execution_success/brillig_array_to_slice/src/main.nr b/test_programs/execution_success/brillig_array_to_slice/src/main.nr deleted file mode 100644 index f54adb39963..00000000000 --- a/test_programs/execution_success/brillig_array_to_slice/src/main.nr +++ /dev/null @@ -1,20 +0,0 @@ -unconstrained fn brillig_as_slice(x: Field) -> (u32, Field, Field) { - let mut dynamic: [Field; 1] = [1]; - dynamic[x] = 2; - assert(dynamic[0] == 2); - - let brillig_slice = dynamic.as_slice(); - assert(brillig_slice.len() == 1); - - (brillig_slice.len(), dynamic[0], brillig_slice[0]) -} - -fn main(x: Field) { - unsafe { - let (slice_len, dynamic_0, slice_0) = brillig_as_slice(x); - assert(slice_len == 1); - assert(dynamic_0 == 2); - assert(slice_0 == 2); - } -} - diff --git a/test_programs/execution_success/brillig_assert/Prover.toml b/test_programs/execution_success/brillig_assert/Prover.toml deleted file mode 100644 index 4dd6b405159..00000000000 --- a/test_programs/execution_success/brillig_assert/Prover.toml +++ /dev/null @@ -1 +0,0 @@ -x = "1" diff --git a/test_programs/execution_success/brillig_assert/src/main.nr b/test_programs/execution_success/brillig_assert/src/main.nr deleted file mode 100644 index dc0138d3f05..00000000000 --- a/test_programs/execution_success/brillig_assert/src/main.nr +++ /dev/null @@ -1,14 +0,0 @@ -// Tests a very simple program. -// -// The features being tested is using assert on brillig -fn main(x: Field) { - unsafe { - assert(1 == conditional(x as bool)); - } -} - -unconstrained fn conditional(x: bool) -> Field { - assert(x, f"Expected x to be false but got {x}"); - assert_eq(x, true, f"Expected x to be false but got {x}"); - 1 -} diff --git a/test_programs/execution_success/brillig_blake3/Nargo.toml b/test_programs/execution_success/brillig_blake3/Nargo.toml deleted file mode 100644 index 879476dbdcf..00000000000 --- a/test_programs/execution_success/brillig_blake3/Nargo.toml +++ /dev/null @@ -1,7 +0,0 @@ -[package] -name = "brillig_blake3" -type = "bin" -authors = [""] -compiler_version = ">=0.22.0" - -[dependencies] diff --git a/test_programs/execution_success/brillig_blake3/Prover.toml b/test_programs/execution_success/brillig_blake3/Prover.toml deleted file mode 100644 index c807701479b..00000000000 --- a/test_programs/execution_success/brillig_blake3/Prover.toml +++ /dev/null @@ -1,37 +0,0 @@ -# hello as bytes -# https://connor4312.github.io/blake3/index.html -x = [104, 101, 108, 108, 111] -result = [ - 0xea, - 0x8f, - 0x16, - 0x3d, - 0xb3, - 0x86, - 0x82, - 0x92, - 0x5e, - 0x44, - 0x91, - 0xc5, - 0xe5, - 0x8d, - 0x4b, - 0xb3, - 0x50, - 0x6e, - 0xf8, - 0xc1, - 0x4e, - 0xb7, - 0x8a, - 0x86, - 0xe9, - 0x08, - 0xc5, - 0x62, - 0x4a, - 0x67, - 0x20, - 0x0f, -] diff --git a/test_programs/execution_success/brillig_blake3/src/main.nr b/test_programs/execution_success/brillig_blake3/src/main.nr deleted file mode 100644 index 64852d775f4..00000000000 --- a/test_programs/execution_success/brillig_blake3/src/main.nr +++ /dev/null @@ -1,4 +0,0 @@ -unconstrained fn main(x: [u8; 5], result: [u8; 32]) { - let digest = std::hash::blake3(x); - assert(digest == result); -} diff --git a/test_programs/execution_success/brillig_ecdsa_secp256k1/Nargo.toml b/test_programs/execution_success/brillig_ecdsa_secp256k1/Nargo.toml deleted file mode 100644 index 495a49f2247..00000000000 --- a/test_programs/execution_success/brillig_ecdsa_secp256k1/Nargo.toml +++ /dev/null @@ -1,6 +0,0 @@ -[package] -name = "brillig_ecdsa_secp256k1" -type = "bin" -authors = [""] - -[dependencies] diff --git a/test_programs/execution_success/brillig_ecdsa_secp256k1/Prover.toml b/test_programs/execution_success/brillig_ecdsa_secp256k1/Prover.toml deleted file mode 100644 index e78fc19cb71..00000000000 --- a/test_programs/execution_success/brillig_ecdsa_secp256k1/Prover.toml +++ /dev/null @@ -1,169 +0,0 @@ - -hashed_message = [ - 0x3a, - 0x73, - 0xf4, - 0x12, - 0x3a, - 0x5c, - 0xd2, - 0x12, - 0x1f, - 0x21, - 0xcd, - 0x7e, - 0x8d, - 0x35, - 0x88, - 0x35, - 0x47, - 0x69, - 0x49, - 0xd0, - 0x35, - 0xd9, - 0xc2, - 0xda, - 0x68, - 0x06, - 0xb4, - 0x63, - 0x3a, - 0xc8, - 0xc1, - 0xe2, -] -pub_key_x = [ - 0xa0, - 0x43, - 0x4d, - 0x9e, - 0x47, - 0xf3, - 0xc8, - 0x62, - 0x35, - 0x47, - 0x7c, - 0x7b, - 0x1a, - 0xe6, - 0xae, - 0x5d, - 0x34, - 0x42, - 0xd4, - 0x9b, - 0x19, - 0x43, - 0xc2, - 0xb7, - 0x52, - 0xa6, - 0x8e, - 0x2a, - 0x47, - 0xe2, - 0x47, - 0xc7, -] -pub_key_y = [ - 0x89, - 0x3a, - 0xba, - 0x42, - 0x54, - 0x19, - 0xbc, - 0x27, - 0xa3, - 0xb6, - 0xc7, - 0xe6, - 0x93, - 0xa2, - 0x4c, - 0x69, - 0x6f, - 0x79, - 0x4c, - 0x2e, - 0xd8, - 0x77, - 0xa1, - 0x59, - 0x3c, - 0xbe, - 0xe5, - 0x3b, - 0x03, - 0x73, - 0x68, - 0xd7, -] -signature = [ - 0xe5, - 0x08, - 0x1c, - 0x80, - 0xab, - 0x42, - 0x7d, - 0xc3, - 0x70, - 0x34, - 0x6f, - 0x4a, - 0x0e, - 0x31, - 0xaa, - 0x2b, - 0xad, - 0x8d, - 0x97, - 0x98, - 0xc3, - 0x80, - 0x61, - 0xdb, - 0x9a, - 0xe5, - 0x5a, - 0x4e, - 0x8d, - 0xf4, - 0x54, - 0xfd, - 0x28, - 0x11, - 0x98, - 0x94, - 0x34, - 0x4e, - 0x71, - 0xb7, - 0x87, - 0x70, - 0xcc, - 0x93, - 0x1d, - 0x61, - 0xf4, - 0x80, - 0xec, - 0xbb, - 0x0b, - 0x89, - 0xd6, - 0xeb, - 0x69, - 0x69, - 0x01, - 0x61, - 0xe4, - 0x9a, - 0x71, - 0x5f, - 0xcd, - 0x55, -] diff --git a/test_programs/execution_success/brillig_ecdsa_secp256k1/src/main.nr b/test_programs/execution_success/brillig_ecdsa_secp256k1/src/main.nr deleted file mode 100644 index 6bde8ac4ac7..00000000000 --- a/test_programs/execution_success/brillig_ecdsa_secp256k1/src/main.nr +++ /dev/null @@ -1,17 +0,0 @@ -// Tests a very simple program. -// -// The features being tested is ecdsa in brillig -fn main(hashed_message: [u8; 32], pub_key_x: [u8; 32], pub_key_y: [u8; 32], signature: [u8; 64]) { - unsafe { - assert(ecdsa(hashed_message, pub_key_x, pub_key_y, signature)); - } -} - -unconstrained fn ecdsa( - hashed_message: [u8; 32], - pub_key_x: [u8; 32], - pub_key_y: [u8; 32], - signature: [u8; 64], -) -> bool { - std::ecdsa_secp256k1::verify_signature(pub_key_x, pub_key_y, signature, hashed_message) -} diff --git a/test_programs/execution_success/brillig_ecdsa_secp256r1/Nargo.toml b/test_programs/execution_success/brillig_ecdsa_secp256r1/Nargo.toml deleted file mode 100644 index 0a71e782104..00000000000 --- a/test_programs/execution_success/brillig_ecdsa_secp256r1/Nargo.toml +++ /dev/null @@ -1,6 +0,0 @@ -[package] -name = "brillig_ecdsa_secp256r1" -type = "bin" -authors = [""] - -[dependencies] diff --git a/test_programs/execution_success/brillig_ecdsa_secp256r1/Prover.toml b/test_programs/execution_success/brillig_ecdsa_secp256r1/Prover.toml deleted file mode 100644 index a45f799877b..00000000000 --- a/test_programs/execution_success/brillig_ecdsa_secp256r1/Prover.toml +++ /dev/null @@ -1,20 +0,0 @@ -hashed_message = [ - 84, 112, 91, 163, 186, 175, 219, 223, 186, 140, 95, 154, 112, 247, 168, 155, 238, 152, - 217, 6, 181, 62, 49, 7, 77, 167, 186, 236, 220, 13, 169, 173, -] -pub_key_x = [ - 85, 15, 71, 16, 3, 243, 223, 151, 195, 223, 80, 106, 199, 151, 246, 114, 31, 177, 161, - 251, 123, 143, 111, 131, 210, 36, 73, 138, 101, 200, 142, 36, -] -pub_key_y = [ - 19, 96, 147, 215, 1, 46, 80, 154, 115, 113, 92, 189, 11, 0, 163, 204, 15, 244, 181, - 192, 27, 63, 250, 25, 106, 177, 251, 50, 112, 54, 184, 230, -] -signature = [ - 44, 112, 168, 208, 132, 182, 43, 252, 92, 224, 54, 65, 202, 249, 247, 42, - 212, 218, 140, 129, 191, 230, 236, 148, 135, 187, 94, 27, 239, 98, 161, 50, - 24, 173, 158, 226, 158, 175, 53, 31, 220, 80, 241, 82, 12, 66, 94, 155, - 144, 138, 7, 39, 139, 67, 176, 236, 123, 135, 39, 120, 193, 78, 7, 132 -] - - diff --git a/test_programs/execution_success/brillig_ecdsa_secp256r1/src/main.nr b/test_programs/execution_success/brillig_ecdsa_secp256r1/src/main.nr deleted file mode 100644 index 091905a3d01..00000000000 --- a/test_programs/execution_success/brillig_ecdsa_secp256r1/src/main.nr +++ /dev/null @@ -1,17 +0,0 @@ -// Tests a very simple program. -// -// The features being tested is ecdsa in brillig -fn main(hashed_message: [u8; 32], pub_key_x: [u8; 32], pub_key_y: [u8; 32], signature: [u8; 64]) { - unsafe { - assert(ecdsa(hashed_message, pub_key_x, pub_key_y, signature)); - } -} - -unconstrained fn ecdsa( - hashed_message: [u8; 32], - pub_key_x: [u8; 32], - pub_key_y: [u8; 32], - signature: [u8; 64], -) -> bool { - std::ecdsa_secp256r1::verify_signature(pub_key_x, pub_key_y, signature, hashed_message) -} diff --git a/test_programs/execution_success/brillig_hash_to_field/Nargo.toml b/test_programs/execution_success/brillig_hash_to_field/Nargo.toml deleted file mode 100644 index 7cfcc745f0d..00000000000 --- a/test_programs/execution_success/brillig_hash_to_field/Nargo.toml +++ /dev/null @@ -1,6 +0,0 @@ -[package] -name = "brillig_hash_to_field" -type = "bin" -authors = [""] - -[dependencies] diff --git a/test_programs/execution_success/brillig_hash_to_field/Prover.toml b/test_programs/execution_success/brillig_hash_to_field/Prover.toml deleted file mode 100644 index ecdcfd1fb00..00000000000 --- a/test_programs/execution_success/brillig_hash_to_field/Prover.toml +++ /dev/null @@ -1 +0,0 @@ -input = "27" \ No newline at end of file diff --git a/test_programs/execution_success/brillig_hash_to_field/src/main.nr b/test_programs/execution_success/brillig_hash_to_field/src/main.nr deleted file mode 100644 index 48c628020b6..00000000000 --- a/test_programs/execution_success/brillig_hash_to_field/src/main.nr +++ /dev/null @@ -1,12 +0,0 @@ -// Tests a very simple program. -// -// The features being tested is hash_to_field in brillig -fn main(input: Field) -> pub Field { - unsafe { - hash_to_field(input) - } -} - -unconstrained fn hash_to_field(input: Field) -> Field { - std::hash::hash_to_field(&[input]) -} diff --git a/test_programs/execution_success/brillig_keccak/Nargo.toml b/test_programs/execution_success/brillig_keccak/Nargo.toml deleted file mode 100644 index 8cacf2186b8..00000000000 --- a/test_programs/execution_success/brillig_keccak/Nargo.toml +++ /dev/null @@ -1,6 +0,0 @@ -[package] -name = "brillig_keccak" -type = "bin" -authors = [""] - -[dependencies] diff --git a/test_programs/execution_success/brillig_keccak/Prover.toml b/test_programs/execution_success/brillig_keccak/Prover.toml deleted file mode 100644 index d65c4011d3f..00000000000 --- a/test_programs/execution_success/brillig_keccak/Prover.toml +++ /dev/null @@ -1,35 +0,0 @@ -x = 0xbd -result = [ - 0x5a, - 0x50, - 0x2f, - 0x9f, - 0xca, - 0x46, - 0x7b, - 0x26, - 0x6d, - 0x5b, - 0x78, - 0x33, - 0x65, - 0x19, - 0x37, - 0xe8, - 0x05, - 0x27, - 0x0c, - 0xa3, - 0xf3, - 0xaf, - 0x1c, - 0x0d, - 0xd2, - 0x46, - 0x2d, - 0xca, - 0x4b, - 0x3b, - 0x1a, - 0xbf, -] diff --git a/test_programs/execution_success/brillig_keccak/src/main.nr b/test_programs/execution_success/brillig_keccak/src/main.nr deleted file mode 100644 index e5c8e5f493e..00000000000 --- a/test_programs/execution_success/brillig_keccak/src/main.nr +++ /dev/null @@ -1,26 +0,0 @@ -// Tests a very simple program. -// -// The features being tested is keccak256 in brillig -fn main(x: Field, result: [u8; 32]) { - unsafe { - // We use the `as` keyword here to denote the fact that we want to take just the first byte from the x Field - // The padding is taken care of by the program - let digest = keccak256([x as u8], 1); - assert(digest == result); - //#1399: variable message size - let message_size = 4; - let hash_a = keccak256([1, 2, 3, 4], message_size); - let hash_b = keccak256([1, 2, 3, 4, 0, 0, 0, 0], message_size); - - assert(hash_a == hash_b); - - let message_size_big = 8; - let hash_c = keccak256([1, 2, 3, 4, 0, 0, 0, 0], message_size_big); - - assert(hash_a != hash_c); - } -} - -unconstrained fn keccak256(data: [u8; N], msg_len: u32) -> [u8; 32] { - std::hash::keccak256(data, msg_len) -} diff --git a/test_programs/execution_success/brillig_loop/Nargo.toml b/test_programs/execution_success/brillig_loop/Nargo.toml deleted file mode 100644 index 1212397c4db..00000000000 --- a/test_programs/execution_success/brillig_loop/Nargo.toml +++ /dev/null @@ -1,6 +0,0 @@ -[package] -name = "brillig_loop" -type = "bin" -authors = [""] - -[dependencies] diff --git a/test_programs/execution_success/brillig_loop/Prover.toml b/test_programs/execution_success/brillig_loop/Prover.toml deleted file mode 100644 index 22cd5b7c12f..00000000000 --- a/test_programs/execution_success/brillig_loop/Prover.toml +++ /dev/null @@ -1 +0,0 @@ -sum = "6" diff --git a/test_programs/execution_success/brillig_loop/src/main.nr b/test_programs/execution_success/brillig_loop/src/main.nr deleted file mode 100644 index 2d073afb482..00000000000 --- a/test_programs/execution_success/brillig_loop/src/main.nr +++ /dev/null @@ -1,34 +0,0 @@ -// Tests a very simple program. -// -// The features being tested is basic looping on brillig -fn main(sum: u32) { - unsafe { - assert(loop(4) == sum); - assert(loop_incl(3) == sum); - assert(plain_loop() == sum); - } -} - -unconstrained fn loop(x: u32) -> u32 { - let mut sum = 0; - for i in 0..x { - sum = sum + i; - } - sum -} - -unconstrained fn loop_incl(x: u32) -> u32 { - let mut sum = 0; - for i in 0..=x { - sum = sum + i; - } - sum -} - -unconstrained fn plain_loop() -> u32 { - let mut sum = 0; - for i in 0..4 { - sum = sum + i; - } - sum -} diff --git a/test_programs/execution_success/brillig_references/Nargo.toml b/test_programs/execution_success/brillig_references/Nargo.toml deleted file mode 100644 index 0f64b862ba0..00000000000 --- a/test_programs/execution_success/brillig_references/Nargo.toml +++ /dev/null @@ -1,6 +0,0 @@ -[package] -name = "brillig_references" -type = "bin" -authors = [""] - -[dependencies] diff --git a/test_programs/execution_success/brillig_references/Prover.toml b/test_programs/execution_success/brillig_references/Prover.toml deleted file mode 100644 index 151faa5a9b1..00000000000 --- a/test_programs/execution_success/brillig_references/Prover.toml +++ /dev/null @@ -1 +0,0 @@ -x = "2" \ No newline at end of file diff --git a/test_programs/execution_success/brillig_references/src/main.nr b/test_programs/execution_success/brillig_references/src/main.nr deleted file mode 100644 index 47f263cf557..00000000000 --- a/test_programs/execution_success/brillig_references/src/main.nr +++ /dev/null @@ -1,53 +0,0 @@ -unconstrained fn main(mut x: Field) { - add1(&mut x); - assert(x == 3); - // https://github.com/noir-lang/noir/issues/1899 - // let mut s = S { y: x }; - // s.add2(); - // assert(s.y == 5); - // Test that normal mutable variables are still copied - let mut a = 0; - mutate_copy(a); - assert(a == 0); - // Test something 3 allocations deep - let mut nested_allocations = Nested { y: &mut &mut 0 }; - add1(*nested_allocations.y); - assert(**nested_allocations.y == 1); - // Test nested struct allocations with a mutable reference to an array. - let mut c = C { foo: 0, bar: &mut C2 { array: &mut [1, 2] } }; - *c.bar.array = [3, 4]; - let arr: [Field; 2] = *c.bar.array; - assert(arr[0] == 3); - assert(arr[1] == 4); -} - -unconstrained fn add1(x: &mut Field) { - *x += 1; -} - -struct S { - y: Field, -} - -struct Nested { - y: &mut &mut Field, -} - -struct C { - foo: Field, - bar: &mut C2, -} - -struct C2 { - array: &mut [Field; 2], -} - -impl S { - unconstrained fn add2(&mut self) { - self.y += 2; - } -} - -unconstrained fn mutate_copy(mut a: Field) { - a = 7; -} diff --git a/test_programs/execution_success/brillig_sha256/Nargo.toml b/test_programs/execution_success/brillig_sha256/Nargo.toml deleted file mode 100644 index 7140fa0fd0b..00000000000 --- a/test_programs/execution_success/brillig_sha256/Nargo.toml +++ /dev/null @@ -1,6 +0,0 @@ -[package] -name = "brillig_sha256" -type = "bin" -authors = [""] - -[dependencies] diff --git a/test_programs/execution_success/brillig_sha256/Prover.toml b/test_programs/execution_success/brillig_sha256/Prover.toml deleted file mode 100644 index 374ae90ad78..00000000000 --- a/test_programs/execution_success/brillig_sha256/Prover.toml +++ /dev/null @@ -1,35 +0,0 @@ -x = 0xbd -result = [ - 0x68, - 0x32, - 0x57, - 0x20, - 0xaa, - 0xbd, - 0x7c, - 0x82, - 0xf3, - 0x0f, - 0x55, - 0x4b, - 0x31, - 0x3d, - 0x05, - 0x70, - 0xc9, - 0x5a, - 0xcc, - 0xbb, - 0x7d, - 0xc4, - 0xb5, - 0xaa, - 0xe1, - 0x12, - 0x04, - 0xc0, - 0x8f, - 0xfe, - 0x73, - 0x2b, -] diff --git a/test_programs/execution_success/brillig_sha256/src/main.nr b/test_programs/execution_success/brillig_sha256/src/main.nr deleted file mode 100644 index e574676965d..00000000000 --- a/test_programs/execution_success/brillig_sha256/src/main.nr +++ /dev/null @@ -1,15 +0,0 @@ -// Tests a very simple program. -// -// The features being tested is sha256 in brillig -fn main(x: Field, result: [u8; 32]) { - unsafe { - assert(result == sha256(x)); - } -} - -unconstrained fn sha256(x: Field) -> [u8; 32] { - // We use the `as` keyword here to denote the fact that we want to take just the first byte from the x Field - // The padding is taken care of by the program - std::hash::sha256([x as u8]) -} - diff --git a/test_programs/execution_success/brillig_slices/Nargo.toml b/test_programs/execution_success/brillig_slices/Nargo.toml deleted file mode 100644 index 5f6caad088a..00000000000 --- a/test_programs/execution_success/brillig_slices/Nargo.toml +++ /dev/null @@ -1,5 +0,0 @@ -[package] -name = "brillig_slices" -type = "bin" -authors = [""] -[dependencies] diff --git a/test_programs/execution_success/brillig_slices/Prover.toml b/test_programs/execution_success/brillig_slices/Prover.toml deleted file mode 100644 index f28f2f8cc48..00000000000 --- a/test_programs/execution_success/brillig_slices/Prover.toml +++ /dev/null @@ -1,2 +0,0 @@ -x = "5" -y = "10" diff --git a/test_programs/execution_success/brillig_slices/src/main.nr b/test_programs/execution_success/brillig_slices/src/main.nr deleted file mode 100644 index f3928cbc026..00000000000 --- a/test_programs/execution_success/brillig_slices/src/main.nr +++ /dev/null @@ -1,144 +0,0 @@ -use std::slice; -unconstrained fn main(x: Field, y: Field) { - let mut slice: [Field] = &[y, x]; - assert(slice.len() == 2); - - slice = slice.push_back(7); - assert(slice.len() == 3); - assert(slice[0] == y); - assert(slice[1] == x); - assert(slice[2] == 7); - // Array set on slice target - slice[0] = x; - slice[1] = y; - slice[2] = 1; - - assert(slice[0] == x); - assert(slice[1] == y); - assert(slice[2] == 1); - - slice = push_front_to_slice(slice, 2); - assert(slice.len() == 4); - assert(slice[0] == 2); - assert(slice[1] == x); - assert(slice[2] == y); - assert(slice[3] == 1); - - let (item, popped_front_slice) = slice.pop_front(); - slice = popped_front_slice; - assert(item == 2); - - assert(slice.len() == 3); - assert(slice[0] == x); - assert(slice[1] == y); - assert(slice[2] == 1); - - let (popped_back_slice, another_item) = slice.pop_back(); - slice = popped_back_slice; - assert(another_item == 1); - - assert(slice.len() == 2); - assert(slice[0] == x); - assert(slice[1] == y); - - slice = slice.insert(1, 2); - assert(slice.len() == 3); - assert(slice[0] == x); - assert(slice[1] == 2); - assert(slice[2] == y); - - let (removed_slice, should_be_2) = slice.remove(1); - slice = removed_slice; - assert(should_be_2 == 2); - - assert(slice.len() == 2); - assert(slice[0] == x); - assert(slice[1] == y); - - let (slice_with_only_x, should_be_y) = slice.remove(1); - slice = slice_with_only_x; - assert(should_be_y == y); - - assert(slice.len() == 1); - assert(slice[0] == x); - - let (empty_slice, should_be_x) = slice.remove(0); - assert(should_be_x == x); - assert(empty_slice.len() == 0); - - regression_merge_slices(x, y); -} -// Tests slice passing to/from functions -unconstrained fn push_front_to_slice(slice: [T], item: T) -> [T] { - slice.push_front(item) -} -// The parameters to this function must come from witness values (inputs to main) -unconstrained fn regression_merge_slices(x: Field, y: Field) { - merge_slices_if(x, y); - merge_slices_else(x); -} - -unconstrained fn merge_slices_if(x: Field, y: Field) { - let slice = merge_slices_return(x, y); - assert(slice[2] == 10); - assert(slice.len() == 3); - - let slice = merge_slices_mutate(x, y); - assert(slice[3] == 5); - assert(slice.len() == 4); - - let slice = merge_slices_mutate_in_loop(x, y); - assert(slice[6] == 4); - assert(slice.len() == 7); -} - -unconstrained fn merge_slices_else(x: Field) { - let slice = merge_slices_return(x, 5); - assert(slice[0] == 0); - assert(slice[1] == 0); - assert(slice.len() == 2); - - let slice = merge_slices_mutate(x, 5); - assert(slice[2] == 5); - assert(slice.len() == 3); - - let slice = merge_slices_mutate_in_loop(x, 5); - assert(slice[2] == 5); - assert(slice.len() == 3); -} -// Test returning a merged slice without a mutation -unconstrained fn merge_slices_return(x: Field, y: Field) -> [Field] { - let slice = &[0; 2]; - if x != y { - if x != 20 { - slice.push_back(y) - } else { - slice - } - } else { - slice - } -} -// Test mutating a slice inside of an if statement -unconstrained fn merge_slices_mutate(x: Field, y: Field) -> [Field] { - let mut slice = &[0; 2]; - if x != y { - slice = slice.push_back(y); - slice = slice.push_back(x); - } else { - slice = slice.push_back(x); - } - slice -} -// Test mutating a slice inside of a loop in an if statement -unconstrained fn merge_slices_mutate_in_loop(x: Field, y: Field) -> [Field] { - let mut slice = &[0; 2]; - if x != y { - for i in 0..5 { - slice = slice.push_back(i as Field); - } - } else { - slice = slice.push_back(x); - } - slice -} diff --git a/test_programs/execution_success/loop/src/main.nr b/test_programs/execution_success/loop/src/main.nr index 4482fdb3443..b3be4c4c3ff 100644 --- a/test_programs/execution_success/loop/src/main.nr +++ b/test_programs/execution_success/loop/src/main.nr @@ -4,6 +4,7 @@ fn main(six_as_u32: u32) { assert_eq(loop(4), six_as_u32); assert_eq(loop_incl(3), six_as_u32); + assert(plain_loop() == six_as_u32); } fn loop(x: u32) -> u32 { @@ -21,3 +22,11 @@ fn loop_incl(x: u32) -> u32 { } sum } + +fn plain_loop() -> u32 { + let mut sum = 0; + for i in 0..4 { + sum = sum + i; + } + sum +} diff --git a/test_programs/execution_success/sha256/src/main.nr b/test_programs/execution_success/sha256/src/main.nr index d26d916ccff..8e5e46b9837 100644 --- a/test_programs/execution_success/sha256/src/main.nr +++ b/test_programs/execution_success/sha256/src/main.nr @@ -17,6 +17,9 @@ fn main(x: Field, result: [u8; 32], input: [u8; 2], toggle: bool) { // docs:end:sha256_var assert(digest == result); + let digest = std::hash::sha256([x as u8]); + assert(digest == result); + // variable size let size: Field = 1 + toggle as Field; let var_sha = std::hash::sha256_var(input, size as u64); diff --git a/tooling/nargo_cli/build.rs b/tooling/nargo_cli/build.rs index ce46a717113..ad1f82f4e45 100644 --- a/tooling/nargo_cli/build.rs +++ b/tooling/nargo_cli/build.rs @@ -152,7 +152,7 @@ fn generate_test_cases( } cases } else { - vec![Inliner::Max] + vec![Inliner::Default] }; // We can't use a `#[test_matrix(brillig_cases, inliner_cases)` if we only want to limit the @@ -163,7 +163,10 @@ fn generate_test_cases( if *brillig && inliner.value() < matrix_config.min_inliner { continue; } - test_cases.push(format!("#[test_case::test_case({brillig}, {})]", inliner.label())); + test_cases.push(format!( + "#[test_case::test_case(ForceBrillig({brillig}), Inliner({}))]", + inliner.label() + )); } } let test_cases = test_cases.join("\n"); @@ -183,7 +186,7 @@ lazy_static::lazy_static! {{ }} {test_cases} -fn test_{test_name}(force_brillig: bool, inliner_aggressiveness: i64) {{ +fn test_{test_name}(force_brillig: ForceBrillig, inliner_aggressiveness: Inliner) {{ let test_program_dir = PathBuf::from("{test_dir}"); // Ignore poisoning errors if some of the matrix cases failed. @@ -198,8 +201,8 @@ fn test_{test_name}(force_brillig: bool, inliner_aggressiveness: i64) {{ let mut nargo = Command::cargo_bin("nargo").unwrap(); nargo.arg("--program-dir").arg(test_program_dir); nargo.arg("{test_command}").arg("--force"); - nargo.arg("--inliner-aggressiveness").arg(inliner_aggressiveness.to_string()); - if force_brillig {{ + nargo.arg("--inliner-aggressiveness").arg(inliner_aggressiveness.0.to_string()); + if force_brillig.0 {{ nargo.arg("--force-brillig"); }} @@ -237,7 +240,7 @@ fn generate_execution_success_tests(test_file: &mut File, test_data_dir: &Path) "#, &MatrixConfig { vary_brillig: !IGNORED_BRILLIG_TESTS.contains(&test_name.as_str()), - vary_inliner: false, + vary_inliner: true, min_inliner: INLINER_MIN_OVERRIDES .iter() .find(|(n, _)| *n == test_name.as_str()) diff --git a/tooling/nargo_cli/tests/execute.rs b/tooling/nargo_cli/tests/execute.rs index e2bef43b571..561520c57a9 100644 --- a/tooling/nargo_cli/tests/execute.rs +++ b/tooling/nargo_cli/tests/execute.rs @@ -14,6 +14,12 @@ mod tests { test_binary::build_test_binary_once!(mock_backend, "../backend_interface/test-binaries"); + // Utilities to keep the test matrix labels more intuitive. + #[derive(Debug, Clone, Copy)] + struct ForceBrillig(pub bool); + #[derive(Debug, Clone, Copy)] + struct Inliner(pub i64); + // include tests generated by `build.rs` include!(concat!(env!("OUT_DIR"), "/execute.rs")); } diff --git a/tooling/nargo_fmt/src/formatter/attribute.rs b/tooling/nargo_fmt/src/formatter/attribute.rs index 19d5730a546..7eae129f590 100644 --- a/tooling/nargo_fmt/src/formatter/attribute.rs +++ b/tooling/nargo_fmt/src/formatter/attribute.rs @@ -50,7 +50,8 @@ impl<'a> Formatter<'a> { | FunctionAttribute::Builtin(_) | FunctionAttribute::Oracle(_) => self.format_one_arg_attribute(), FunctionAttribute::Test(test_scope) => self.format_test_attribute(test_scope), - FunctionAttribute::Fold + FunctionAttribute::Recursive + | FunctionAttribute::Fold | FunctionAttribute::NoPredicates | FunctionAttribute::InlineAlways => self.format_no_args_attribute(), } diff --git a/tooling/profiler/Cargo.toml b/tooling/profiler/Cargo.toml index 604208b5a54..798a21ea0d6 100644 --- a/tooling/profiler/Cargo.toml +++ b/tooling/profiler/Cargo.toml @@ -44,6 +44,3 @@ noirc_abi.workspace = true noirc_driver.workspace = true tempfile.workspace = true -[features] -default = ["bn254"] -bn254 = ["acir/bn254"] diff --git a/tooling/profiler/src/cli/execution_flamegraph_cmd.rs b/tooling/profiler/src/cli/execution_flamegraph_cmd.rs index a0b3d6a3128..981d08a3eb1 100644 --- a/tooling/profiler/src/cli/execution_flamegraph_cmd.rs +++ b/tooling/profiler/src/cli/execution_flamegraph_cmd.rs @@ -1,13 +1,12 @@ use std::path::{Path, PathBuf}; use acir::circuit::OpcodeLocation; -use acir::FieldElement; use clap::Args; use color_eyre::eyre::{self, Context}; -use crate::flamegraph::{FlamegraphGenerator, InfernoFlamegraphGenerator, Sample}; +use crate::flamegraph::{BrilligExecutionSample, FlamegraphGenerator, InfernoFlamegraphGenerator}; use crate::fs::{read_inputs_from_file, read_program_from_file}; -use crate::opcode_formatter::AcirOrBrilligOpcode; +use crate::opcode_formatter::format_brillig_opcode; use bn254_blackbox_solver::Bn254BlackBoxSolver; use nargo::ops::DefaultForeignCallExecutor; use noirc_abi::input_parser::Format; @@ -51,7 +50,7 @@ fn run_with_generator( let initial_witness = program.abi.encode(&inputs_map, None)?; println!("Executing"); - let (_, profiling_samples) = nargo::ops::execute_program_with_profiling( + let (_, mut profiling_samples) = nargo::ops::execute_program_with_profiling( &program.bytecode, initial_witness, &Bn254BlackBoxSolver, @@ -59,11 +58,13 @@ fn run_with_generator( )?; println!("Executed"); - let profiling_samples: Vec> = profiling_samples - .into_iter() + println!("Collecting {} samples", profiling_samples.len()); + + let profiling_samples: Vec = profiling_samples + .iter_mut() .map(|sample| { - let call_stack = sample.call_stack; - let brillig_function_id = sample.brillig_function_id; + let call_stack = std::mem::take(&mut sample.call_stack); + let brillig_function_id = std::mem::take(&mut sample.brillig_function_id); let last_entry = call_stack.last(); let opcode = brillig_function_id .and_then(|id| program.bytecode.unconstrained_functions.get(id.0 as usize)) @@ -74,8 +75,8 @@ fn run_with_generator( None } }) - .map(|opcode| AcirOrBrilligOpcode::Brillig(opcode.clone())); - Sample { opcode, call_stack, count: 1, brillig_function_id } + .map(format_brillig_opcode); + BrilligExecutionSample { opcode, call_stack, brillig_function_id } }) .collect(); diff --git a/tooling/profiler/src/cli/gates_flamegraph_cmd.rs b/tooling/profiler/src/cli/gates_flamegraph_cmd.rs index 20cc1b747c3..c3ae29de058 100644 --- a/tooling/profiler/src/cli/gates_flamegraph_cmd.rs +++ b/tooling/profiler/src/cli/gates_flamegraph_cmd.rs @@ -6,10 +6,10 @@ use color_eyre::eyre::{self, Context}; use noirc_artifacts::debug::DebugArtifact; -use crate::flamegraph::{FlamegraphGenerator, InfernoFlamegraphGenerator, Sample}; +use crate::flamegraph::{CompilationSample, FlamegraphGenerator, InfernoFlamegraphGenerator}; use crate::fs::read_program_from_file; use crate::gates_provider::{BackendGatesProvider, GatesProvider}; -use crate::opcode_formatter::AcirOrBrilligOpcode; +use crate::opcode_formatter::format_acir_opcode; #[derive(Debug, Clone, Args)] pub(crate) struct GatesFlamegraphCommand { @@ -83,8 +83,8 @@ fn run_with_provider( .into_iter() .zip(bytecode.opcodes) .enumerate() - .map(|(index, (gates, opcode))| Sample { - opcode: Some(AcirOrBrilligOpcode::Acir(opcode)), + .map(|(index, (gates, opcode))| CompilationSample { + opcode: Some(format_acir_opcode(&opcode)), call_stack: vec![OpcodeLocation::Acir(index)], count: gates, brillig_function_id: None, @@ -106,10 +106,7 @@ fn run_with_provider( #[cfg(test)] mod tests { - use acir::{ - circuit::{Circuit, Program}, - AcirField, - }; + use acir::circuit::{Circuit, Program}; use color_eyre::eyre::{self}; use fm::codespan_files::Files; use noirc_artifacts::program::ProgramArtifact; @@ -143,9 +140,9 @@ mod tests { struct TestFlamegraphGenerator {} impl super::FlamegraphGenerator for TestFlamegraphGenerator { - fn generate_flamegraph<'files, F: AcirField>( + fn generate_flamegraph<'files, S: Sample>( &self, - _samples: Vec>, + _samples: Vec, _debug_symbols: &DebugInfo, _files: &'files impl Files<'files, FileId = fm::FileId>, _artifact_name: &str, diff --git a/tooling/profiler/src/cli/opcodes_flamegraph_cmd.rs b/tooling/profiler/src/cli/opcodes_flamegraph_cmd.rs index bb3df86c339..4e271c9f838 100644 --- a/tooling/profiler/src/cli/opcodes_flamegraph_cmd.rs +++ b/tooling/profiler/src/cli/opcodes_flamegraph_cmd.rs @@ -7,9 +7,9 @@ use color_eyre::eyre::{self, Context}; use noirc_artifacts::debug::DebugArtifact; -use crate::flamegraph::{FlamegraphGenerator, InfernoFlamegraphGenerator, Sample}; +use crate::flamegraph::{CompilationSample, FlamegraphGenerator, InfernoFlamegraphGenerator}; use crate::fs::read_program_from_file; -use crate::opcode_formatter::AcirOrBrilligOpcode; +use crate::opcode_formatter::{format_acir_opcode, format_brillig_opcode}; #[derive(Debug, Clone, Args)] pub(crate) struct OpcodesFlamegraphCommand { @@ -59,8 +59,8 @@ fn run_with_generator( .opcodes .iter() .enumerate() - .map(|(index, opcode)| Sample { - opcode: Some(AcirOrBrilligOpcode::Acir(opcode.clone())), + .map(|(index, opcode)| CompilationSample { + opcode: Some(format_acir_opcode(opcode)), call_stack: vec![OpcodeLocation::Acir(index)], count: 1, brillig_function_id: None, @@ -96,8 +96,8 @@ fn run_with_generator( .bytecode .into_iter() .enumerate() - .map(|(brillig_index, opcode)| Sample { - opcode: Some(AcirOrBrilligOpcode::Brillig(opcode)), + .map(|(brillig_index, opcode)| CompilationSample { + opcode: Some(format_brillig_opcode(&opcode)), call_stack: vec![OpcodeLocation::Brillig { acir_index: acir_opcode_index, brillig_index, @@ -146,7 +146,7 @@ mod tests { brillig::{BrilligBytecode, BrilligFunctionId}, Circuit, Opcode, Program, }, - AcirField, FieldElement, + FieldElement, }; use color_eyre::eyre::{self}; use fm::codespan_files::Files; @@ -160,9 +160,9 @@ mod tests { struct TestFlamegraphGenerator {} impl super::FlamegraphGenerator for TestFlamegraphGenerator { - fn generate_flamegraph<'files, F: AcirField>( + fn generate_flamegraph<'files, S: Sample>( &self, - _samples: Vec>, + _samples: Vec, _debug_symbols: &DebugInfo, _files: &'files impl Files<'files, FileId = fm::FileId>, _artifact_name: &str, diff --git a/tooling/profiler/src/flamegraph.rs b/tooling/profiler/src/flamegraph.rs index 7882ac903ef..38966902314 100644 --- a/tooling/profiler/src/flamegraph.rs +++ b/tooling/profiler/src/flamegraph.rs @@ -3,7 +3,6 @@ use std::{collections::BTreeMap, io::BufWriter}; use acir::circuit::brillig::BrilligFunctionId; use acir::circuit::OpcodeLocation; -use acir::AcirField; use color_eyre::eyre::{self}; use fm::codespan_files::Files; use fxhash::FxHashMap as HashMap; @@ -13,18 +12,66 @@ use noirc_errors::reporter::line_and_column_from_span; use noirc_errors::Location; use noirc_evaluator::brillig::ProcedureId; -use crate::opcode_formatter::AcirOrBrilligOpcode; +pub(crate) trait Sample { + fn count(&self) -> usize; -use super::opcode_formatter::format_opcode; + fn brillig_function_id(&self) -> Option; + + fn call_stack(&self) -> &[OpcodeLocation]; + + fn opcode(self) -> Option; +} #[derive(Debug)] -pub(crate) struct Sample { - pub(crate) opcode: Option>, +pub(crate) struct CompilationSample { + pub(crate) opcode: Option, pub(crate) call_stack: Vec, pub(crate) count: usize, pub(crate) brillig_function_id: Option, } +impl Sample for CompilationSample { + fn count(&self) -> usize { + self.count + } + + fn brillig_function_id(&self) -> Option { + self.brillig_function_id + } + + fn call_stack(&self) -> &[OpcodeLocation] { + &self.call_stack + } + + fn opcode(self) -> Option { + self.opcode + } +} + +pub(crate) struct BrilligExecutionSample { + pub(crate) opcode: Option, + pub(crate) call_stack: Vec, + pub(crate) brillig_function_id: Option, +} + +impl Sample for BrilligExecutionSample { + fn count(&self) -> usize { + 1 + } + + fn brillig_function_id(&self) -> Option { + self.brillig_function_id + } + + fn call_stack(&self) -> &[OpcodeLocation] { + &self.call_stack + } + + fn opcode(self) -> Option { + self.opcode + } +} + #[derive(Debug, Default)] pub(crate) struct FoldedStackItem { pub(crate) total_samples: usize, @@ -33,9 +80,9 @@ pub(crate) struct FoldedStackItem { pub(crate) trait FlamegraphGenerator { #[allow(clippy::too_many_arguments)] - fn generate_flamegraph<'files, F: AcirField>( + fn generate_flamegraph<'files, S: Sample>( &self, - samples: Vec>, + samples: Vec, debug_symbols: &DebugInfo, files: &'files impl Files<'files, FileId = fm::FileId>, artifact_name: &str, @@ -49,9 +96,9 @@ pub(crate) struct InfernoFlamegraphGenerator { } impl FlamegraphGenerator for InfernoFlamegraphGenerator { - fn generate_flamegraph<'files, F: AcirField>( + fn generate_flamegraph<'files, S: Sample>( &self, - samples: Vec>, + samples: Vec, debug_symbols: &DebugInfo, files: &'files impl Files<'files, FileId = fm::FileId>, artifact_name: &str, @@ -82,8 +129,8 @@ impl FlamegraphGenerator for InfernoFlamegraphGenerator { } } -fn generate_folded_sorted_lines<'files, F: AcirField>( - samples: Vec>, +fn generate_folded_sorted_lines<'files, S: Sample>( + samples: Vec, debug_symbols: &DebugInfo, files: &'files impl Files<'files, FileId = fm::FileId>, ) -> Vec { @@ -92,15 +139,15 @@ fn generate_folded_sorted_lines<'files, F: AcirField>( let mut resolution_cache: HashMap> = HashMap::default(); for sample in samples { - let mut location_names = Vec::with_capacity(sample.call_stack.len()); - for opcode_location in sample.call_stack { + let mut location_names = Vec::with_capacity(sample.call_stack().len()); + for opcode_location in sample.call_stack() { let callsite_labels = resolution_cache - .entry(opcode_location) + .entry(*opcode_location) .or_insert_with(|| { find_callsite_labels( debug_symbols, - &opcode_location, - sample.brillig_function_id, + opcode_location, + sample.brillig_function_id(), files, ) }) @@ -109,11 +156,14 @@ fn generate_folded_sorted_lines<'files, F: AcirField>( location_names.extend(callsite_labels); } - if let Some(opcode) = &sample.opcode { - location_names.push(format_opcode(opcode)); + // We move `sample` by calling `sample.opcode()` so we want to fetch the sample count here. + let count = sample.count(); + + if let Some(opcode) = sample.opcode() { + location_names.push(opcode); } - add_locations_to_folded_stack_items(&mut folded_stack_items, location_names, sample.count); + add_locations_to_folded_stack_items(&mut folded_stack_items, location_names, count); } to_folded_sorted_lines(&folded_stack_items, Default::default()) @@ -251,7 +301,7 @@ mod tests { use noirc_errors::{debug_info::DebugInfo, Location, Span}; use std::{collections::BTreeMap, path::Path}; - use crate::{flamegraph::Sample, opcode_formatter::AcirOrBrilligOpcode}; + use crate::{flamegraph::CompilationSample, opcode_formatter::format_acir_opcode}; use super::generate_folded_sorted_lines; @@ -338,25 +388,25 @@ mod tests { BTreeMap::default(), ); - let samples: Vec> = vec![ - Sample { - opcode: Some(AcirOrBrilligOpcode::Acir(AcirOpcode::AssertZero( + let samples: Vec = vec![ + CompilationSample { + opcode: Some(format_acir_opcode(&AcirOpcode::AssertZero::( Expression::default(), ))), call_stack: vec![OpcodeLocation::Acir(0)], count: 10, brillig_function_id: None, }, - Sample { - opcode: Some(AcirOrBrilligOpcode::Acir(AcirOpcode::AssertZero( + CompilationSample { + opcode: Some(format_acir_opcode(&AcirOpcode::AssertZero::( Expression::default(), ))), call_stack: vec![OpcodeLocation::Acir(1)], count: 20, brillig_function_id: None, }, - Sample { - opcode: Some(AcirOrBrilligOpcode::Acir(AcirOpcode::MemoryInit { + CompilationSample { + opcode: Some(format_acir_opcode(&AcirOpcode::MemoryInit:: { block_id: BlockId(0), init: vec![], block_type: acir::circuit::opcodes::BlockType::Memory, diff --git a/tooling/profiler/src/opcode_formatter.rs b/tooling/profiler/src/opcode_formatter.rs index 3c910fa5401..b4367de9e7e 100644 --- a/tooling/profiler/src/opcode_formatter.rs +++ b/tooling/profiler/src/opcode_formatter.rs @@ -2,12 +2,6 @@ use acir::brillig::{BinaryFieldOp, BinaryIntOp, BlackBoxOp, Opcode as BrilligOpc use acir::circuit::{opcodes::BlackBoxFuncCall, Opcode as AcirOpcode}; use acir::AcirField; -#[derive(Debug)] -pub(crate) enum AcirOrBrilligOpcode { - Acir(AcirOpcode), - Brillig(BrilligOpcode), -} - fn format_blackbox_function(call: &BlackBoxFuncCall) -> String { match call { BlackBoxFuncCall::AES128Encrypt { .. } => "aes128_encrypt".to_string(), @@ -127,11 +121,10 @@ fn format_brillig_opcode_kind(opcode: &BrilligOpcode) -> String { } } -pub(crate) fn format_opcode(opcode: &AcirOrBrilligOpcode) -> String { - match opcode { - AcirOrBrilligOpcode::Acir(opcode) => format!("acir::{}", format_acir_opcode_kind(opcode)), - AcirOrBrilligOpcode::Brillig(opcode) => { - format!("brillig::{}", format_brillig_opcode_kind(opcode)) - } - } +pub(crate) fn format_acir_opcode(opcode: &AcirOpcode) -> String { + format!("acir::{}", format_acir_opcode_kind(opcode)) +} + +pub(crate) fn format_brillig_opcode(opcode: &BrilligOpcode) -> String { + format!("brillig::{}", format_brillig_opcode_kind(opcode)) }