Skip to content

Commit

Permalink
More self-review
Browse files Browse the repository at this point in the history
  • Loading branch information
michaelsproul committed Mar 25, 2024
1 parent cc891ce commit 1f90350
Show file tree
Hide file tree
Showing 11 changed files with 22 additions and 26 deletions.
6 changes: 3 additions & 3 deletions consensus/state_processing/src/epoch_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@ impl PreEpochCache {

// State root should already have been filled in by `process_slot`, except in the case
// of a `partial_state_advance`.
let decision_block_root = latest_block_header.canonical_root();
if decision_block_root.is_zero() {
return Err(EpochCacheError::ZeroDecisionBlock);
if latest_block_header.state_root.is_zero() {
return Err(EpochCacheError::ZeroStateRoot);
}
let decision_block_root = latest_block_header.canonical_root();

let epoch_key = EpochCacheKey {
epoch: state.next_epoch()?,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ pub fn process_effective_balance_updates<T: EthSpec>(
}
}

state.set_total_active_balance(next_epoch, new_total_active_balance);
state.set_total_active_balance(next_epoch, new_total_active_balance, spec);

Ok(())
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ pub fn process_registry_updates<T: EthSpec>(
// Dequeue validators for activation up to churn limit
let churn_limit = state.get_activation_churn_limit(spec)? as usize;

let epoch_cache = state.epoch_cache().clone();
let epoch_cache = state.epoch_cache();
let activation_queue = epoch_cache
.activation_queue()?
.get_validators_eligible_for_activation(state.finalized_checkpoint().epoch, churn_limit);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ pub fn process_epoch_single_pass<E: EthSpec>(
}

if conf.effective_balance_updates {
state.set_total_active_balance(next_epoch, next_epoch_total_active_balance);
state.set_total_active_balance(next_epoch, next_epoch_total_active_balance, spec);
*state.epoch_cache_mut() = next_epoch_cache.into_epoch_cache(
next_epoch_total_active_balance,
next_epoch_activation_queue,
Expand Down
6 changes: 6 additions & 0 deletions consensus/types/src/activation_queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,15 @@ use std::collections::BTreeSet;
#[derive(Debug, PartialEq, Eq, Default, Clone, arbitrary::Arbitrary)]
pub struct ActivationQueue {
/// Validators represented by `(activation_eligibility_epoch, index)` in sorted order.
///
/// These validators are not *necessarily* going to be activated. Their activation depends
/// on how finalization is updated, and the `churn_limit`.
queue: BTreeSet<(Epoch, usize)>,
}

impl ActivationQueue {
/// Check if `validator` could be eligible for activation in the next epoch and add them to
/// the tentative activation queue if this is the case.
pub fn add_if_could_be_eligible_for_activation(
&mut self,
index: usize,
Expand All @@ -22,6 +27,7 @@ impl ActivationQueue {
}
}

/// Determine the final activation queue after accounting for finalization & the churn limit.
pub fn get_validators_eligible_for_activation(
&self,
finalized_epoch: Epoch,
Expand Down
11 changes: 5 additions & 6 deletions consensus/types/src/beacon_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,6 @@ where
#[ssz(skip_serializing, skip_deserializing)]
#[tree_hash(skip_hashing)]
#[test_random(default)]
#[derivative(Clone(clone_with = "clone_default"))]
pub epoch_cache: EpochCache,
#[serde(skip_serializing, skip_deserializing)]
#[ssz(skip_serializing, skip_deserializing)]
Expand Down Expand Up @@ -1513,8 +1512,11 @@ impl<T: EthSpec> BeaconState<T> {
///
/// This should only be called when the total active balance has been computed as part of
/// single-pass epoch processing (or `process_rewards_and_penalties` for phase0).
pub fn set_total_active_balance(&mut self, epoch: Epoch, balance: u64) {
*self.total_active_balance_mut() = Some((epoch, balance));
///
/// This function will ensure the balance is never set to 0, thus conforming to the spec.
pub fn set_total_active_balance(&mut self, epoch: Epoch, balance: u64, spec: &ChainSpec) {
let safe_balance = std::cmp::max(balance, spec.effective_balance_increment);
*self.total_active_balance_mut() = Some((epoch, safe_balance));
}

/// Build the total active balance cache for the current epoch if it is not already built.
Expand Down Expand Up @@ -1884,9 +1886,6 @@ impl<T: EthSpec> BeaconState<T> {
if config.slashings_cache {
*res.slashings_cache_mut() = self.slashings_cache().clone();
}
if config.epoch_cache {
*res.epoch_cache_mut() = self.epoch_cache().clone();
}
if config.tree_hash_cache {
*res.tree_hash_cache_mut() = self.tree_hash_cache().clone();
}
Expand Down
2 changes: 0 additions & 2 deletions consensus/types/src/beacon_state/clone_config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ pub struct CloneConfig {
pub pubkey_cache: bool,
pub exit_cache: bool,
pub slashings_cache: bool,
pub epoch_cache: bool,
pub tree_hash_cache: bool,
pub progressive_balances_cache: bool,
}
Expand All @@ -17,7 +16,6 @@ impl CloneConfig {
pubkey_cache: true,
exit_cache: true,
slashings_cache: true,
epoch_cache: true,
tree_hash_cache: true,
progressive_balances_cache: true,
}
Expand Down
2 changes: 1 addition & 1 deletion consensus/types/src/beacon_state/committee_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ pub struct CommitteeCache {
///
/// It can happen that states from different epochs computing the same cache have different
/// numbers of validators in `state.validators()` due to recent deposits. These new validators
/// cannot be active however and will always be ommitted from the shuffling. This function checks
/// cannot be active however and will always be omitted from the shuffling. This function checks
/// that two lists of shuffling positions are equivalent by ensuring that they are identical on all
/// common entries, and that new entries at the end are all `None`.
///
Expand Down
7 changes: 3 additions & 4 deletions consensus/types/src/beacon_state/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -223,15 +223,14 @@ async fn clone_config() {
.update_tree_hash_cache()
.expect("should update tree hash cache");

let num_caches = 7;
let num_caches = 6;
let all_configs = (0..2u8.pow(num_caches)).map(|i| CloneConfig {
committee_caches: (i & 1) != 0,
pubkey_cache: ((i >> 1) & 1) != 0,
exit_cache: ((i >> 2) & 1) != 0,
slashings_cache: ((i >> 3) & 1) != 0,
epoch_cache: ((i >> 4) & 1) != 0,
tree_hash_cache: ((i >> 5) & 1) != 0,
progressive_balances_cache: ((i >> 6) & 1) != 0,
tree_hash_cache: ((i >> 4) & 1) != 0,
progressive_balances_cache: ((i >> 5) & 1) != 0,
});

for config in all_configs {
Expand Down
6 changes: 0 additions & 6 deletions consensus/types/src/chain_spec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -502,12 +502,6 @@ impl ChainSpec {
epoch.safe_add(1)?.safe_add(self.max_seed_lookahead)
}

#[allow(clippy::arithmetic_side_effects)]
pub const fn attestation_subnet_prefix_bits(&self) -> u32 {
let attestation_subnet_count_bits = self.attestation_subnet_count.ilog2();
self.attestation_subnet_extra_bits as u32 + attestation_subnet_count_bits
}

pub fn maximum_gossip_clock_disparity(&self) -> Duration {
Duration::from_millis(self.maximum_gossip_clock_disparity_millis)
}
Expand Down
2 changes: 1 addition & 1 deletion consensus/types/src/epoch_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ pub struct EpochCacheKey {
pub enum EpochCacheError {
IncorrectEpoch { cache: Epoch, state: Epoch },
IncorrectDecisionBlock { cache: Hash256, state: Hash256 },
ZeroDecisionBlock,
ZeroStateRoot,
ValidatorIndexOutOfBounds { validator_index: usize },
EffectiveBalanceOutOfBounds { effective_balance_eth: usize },
InvalidSlot { slot: Slot },
Expand Down

0 comments on commit 1f90350

Please sign in to comment.