Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(ssa): Track all local allocations during flattening #6619

Merged
merged 4 commits into from
Nov 26, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 28 additions & 17 deletions compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@
//! v11 = mul v4, Field 12
//! v12 = add v10, v11
//! store v12 at v5 (new store)
use fxhash::FxHashMap as HashMap;
use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet};

use acvm::{acir::AcirField, acir::BlackBoxFunc, FieldElement};
use iter_extended::vecmap;
Expand Down Expand Up @@ -201,6 +201,13 @@ struct Context<'f> {
/// When processing a block, we pop this stack to get its arguments
/// and at the end we push the arguments for his successor
arguments_stack: Vec<Vec<ValueId>>,

/// Stores all allocations local to the current branch.
/// Since these branches are local to the current branch (ie. only defined within one branch of
/// an if expression), they should not be merged with their previous value or stored value in
/// the other branch since there is no such value. The ValueId here is that which is returned
/// by the allocate instruction.
vezenovm marked this conversation as resolved.
Show resolved Hide resolved
local_allocations: HashSet<ValueId>,
}

#[derive(Clone)]
Expand All @@ -211,6 +218,8 @@ struct ConditionalBranch {
old_condition: ValueId,
// The condition of the branch
condition: ValueId,
// The allocations accumulated when processing the branch
local_allocations: HashSet<ValueId>,
}

struct ConditionalContext {
Expand Down Expand Up @@ -243,6 +252,7 @@ fn flatten_function_cfg(function: &mut Function, no_predicates: &HashMap<Functio
slice_sizes: HashMap::default(),
condition_stack: Vec::new(),
arguments_stack: Vec::new(),
local_allocations: HashSet::default(),
};
context.flatten(no_predicates);
}
Expand Down Expand Up @@ -317,7 +327,6 @@ impl<'f> Context<'f> {
// If this is not a separate variable, clippy gets confused and says the to_vec is
// unnecessary, when removing it actually causes an aliasing/mutability error.
let instructions = self.inserter.function.dfg[block].instructions().to_vec();
let mut previous_allocate_result = None;

for instruction in instructions.iter() {
if self.is_no_predicate(no_predicates, instruction) {
Expand All @@ -332,10 +341,10 @@ impl<'f> Context<'f> {
None,
im::Vector::new(),
);
self.push_instruction(*instruction, &mut previous_allocate_result);
self.push_instruction(*instruction);
self.insert_current_side_effects_enabled();
} else {
self.push_instruction(*instruction, &mut previous_allocate_result);
self.push_instruction(*instruction);
}
}
}
Expand Down Expand Up @@ -405,10 +414,12 @@ impl<'f> Context<'f> {
let old_condition = *condition;
let then_condition = self.inserter.resolve(old_condition);

let old_allocations = std::mem::take(&mut self.local_allocations);
let branch = ConditionalBranch {
old_condition,
condition: self.link_condition(then_condition),
last_block: *then_destination,
local_allocations: old_allocations,
};
let cond_context = ConditionalContext {
condition: then_condition,
Expand All @@ -435,11 +446,14 @@ impl<'f> Context<'f> {
);
let else_condition = self.link_condition(else_condition);

let old_allocations = std::mem::take(&mut self.local_allocations);
let else_branch = ConditionalBranch {
old_condition: cond_context.then_branch.old_condition,
condition: else_condition,
last_block: *block,
local_allocations: old_allocations,
};
cond_context.then_branch.local_allocations.clear();
cond_context.else_branch = Some(else_branch);
self.condition_stack.push(cond_context);

Expand All @@ -461,6 +475,7 @@ impl<'f> Context<'f> {
}

let mut else_branch = cond_context.else_branch.unwrap();
self.local_allocations = std::mem::take(&mut else_branch.local_allocations);
else_branch.last_block = *block;
cond_context.else_branch = Some(else_branch);

Expand Down Expand Up @@ -593,22 +608,19 @@ impl<'f> Context<'f> {
/// `previous_allocate_result` should only be set to the result of an allocate instruction
/// if that instruction was the instruction immediately previous to this one - if there are
/// any instructions in between it should be None.
fn push_instruction(
&mut self,
id: InstructionId,
previous_allocate_result: &mut Option<ValueId>,
) {
fn push_instruction(&mut self, id: InstructionId) {
let (instruction, call_stack) = self.inserter.map_instruction(id);
let instruction = self.handle_instruction_side_effects(
instruction,
call_stack.clone(),
*previous_allocate_result,
);
let instruction = self.handle_instruction_side_effects(instruction, call_stack.clone());

let instruction_is_allocate = matches!(&instruction, Instruction::Allocate);
let entry = self.inserter.function.entry_block();
let results = self.inserter.push_instruction_value(instruction, id, entry, call_stack);
*previous_allocate_result = instruction_is_allocate.then(|| results.first());

// Remember an allocate was created local to this branch so that we do not try to merge store
// values across branches for it later.
if instruction_is_allocate {
self.local_allocations.insert(results.first());
}
}

/// If we are currently in a branch, we need to modify constrain instructions
Expand All @@ -621,7 +633,6 @@ impl<'f> Context<'f> {
&mut self,
instruction: Instruction,
call_stack: CallStack,
previous_allocate_result: Option<ValueId>,
) -> Instruction {
if let Some(condition) = self.get_last_condition() {
match instruction {
Expand Down Expand Up @@ -652,7 +663,7 @@ impl<'f> Context<'f> {
Instruction::Store { address, value } => {
// If this instruction immediately follows an allocate, and stores to that
// address there is no previous value to load and we don't need a merge anyway.
if Some(address) == previous_allocate_result {
if self.local_allocations.contains(&address) {
Instruction::Store { address, value }
} else {
// Instead of storing `value`, store `if condition { value } else { previous_value }`
Expand Down
16 changes: 6 additions & 10 deletions compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@
//!
//! Repeating this algorithm for each block in the function in program order should result in
//! optimizing out most known loads. However, identifying all aliases correctly has been proven
//! undecidable in general (Landi, 1992). So this pass will not always optimize out all loads

Check warning on line 59 in compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs

View workflow job for this annotation

GitHub Actions / Code

Unknown word (Landi)
//! that could theoretically be optimized out. This pass can be performed at any time in the
//! SSA optimization pipeline, although it will be more successful the simpler the program's CFG is.
//! This pass is currently performed several times to enable other passes - most notably being
Expand Down Expand Up @@ -117,7 +117,7 @@
/// Load and Store instructions that should be removed at the end of the pass.
///
/// We avoid removing individual instructions as we go since removing elements
/// from the middle of Vecs many times will be slower than a single call to `retain`.

Check warning on line 120 in compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs

View workflow job for this annotation

GitHub Actions / Code

Unknown word (Vecs)
instructions_to_remove: HashSet<InstructionId>,

/// Track a value's last load across all blocks.
Expand Down Expand Up @@ -415,13 +415,11 @@
let address = self.inserter.function.dfg.resolve(*address);
let value = self.inserter.function.dfg.resolve(*value);

// FIXME: This causes errors in the sha256 tests
//
// If there was another store to this instruction without any (unremoved) loads or
// function calls in-between, we can remove the previous store.
// if let Some(last_store) = references.last_stores.get(&address) {
// self.instructions_to_remove.insert(*last_store);
// }
if let Some(last_store) = references.last_stores.get(&address) {
self.instructions_to_remove.insert(*last_store);
}

if self.inserter.function.dfg.value_is_reference(value) {
if let Some(expression) = references.expressions.get(&value) {
Expand Down Expand Up @@ -901,14 +899,12 @@
// in the same block, and the store is not needed before the later store.
// The rest of the stores are also removed as no loads are done within any blocks
// to the stored values.
//
// NOTE: This store is not removed due to the FIXME when handling Instruction::Store.
assert_eq!(count_stores(b1, &main.dfg), 1);
assert_eq!(count_stores(b1, &main.dfg), 0);

let b1_instructions = main.dfg[b1].instructions();

// We expect the last eq to be optimized out, only the store from above remains
assert_eq!(b1_instructions.len(), 1);
// We expect the last eq to be optimized out
assert_eq!(b1_instructions.len(), 0);
vezenovm marked this conversation as resolved.
Show resolved Hide resolved
}

#[test]
Expand Down