From 8e4e499b5159c60fbde9ae0b8c03d8c910c402a0 Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Sat, 17 Sep 2022 09:49:12 +0200 Subject: [PATCH 001/263] start adding types --- consensus/types/src/beacon_block.rs | 7 +- consensus/types/src/beacon_block_body.rs | 8 +- consensus/types/src/eth_spec.rs | 14 ++- consensus/types/src/kzg_commitment.rs | 112 +++++++++++++++++++++++ consensus/types/src/kzg_proof.rs | 112 +++++++++++++++++++++++ consensus/types/src/lib.rs | 5 + 6 files changed, 252 insertions(+), 6 deletions(-) create mode 100644 consensus/types/src/kzg_commitment.rs create mode 100644 consensus/types/src/kzg_proof.rs diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index da8566dcb23..cbfbd250f9e 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -1,6 +1,6 @@ use crate::beacon_block_body::{ BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyMerge, BeaconBlockBodyRef, - BeaconBlockBodyRefMut, + BeaconBlockBodyRefMut, BeaconBlockBobyEip4844 }; use crate::test_utils::TestRandom; use crate::*; @@ -17,7 +17,7 @@ use tree_hash_derive::TreeHash; /// A block of the `BeaconChain`. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Eip4844), variant_attributes( derive( Debug, @@ -64,6 +64,8 @@ pub struct BeaconBlock = FullPayload> { pub body: BeaconBlockBodyAltair, #[superstruct(only(Merge), partial_getter(rename = "body_merge"))] pub body: BeaconBlockBodyMerge, + #[superstruct(only(Eip4844), partial_getter(rename = "body_eip4844"))] + pub body: BeaconBlockBodyEip4844, } impl> SignedRoot for BeaconBlock {} @@ -540,6 +542,7 @@ macro_rules! impl_from { impl_from!(BeaconBlockBase, >, >, |body: BeaconBlockBodyBase<_, _>| body.into()); impl_from!(BeaconBlockAltair, >, >, |body: BeaconBlockBodyAltair<_, _>| body.into()); impl_from!(BeaconBlockMerge, >, >, |body: BeaconBlockBodyMerge<_, _>| body.into()); +impl_from!(BeaconBlockEip4844, >, >, |body: BeaconBlockBodyEip4844<_, _>| body.into()); // We can clone blocks with payloads to blocks without payloads, without cloning the payload. macro_rules! impl_clone_as_blinded { diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 381a9bd43e3..a4fdbf6896c 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -13,7 +13,7 @@ use tree_hash_derive::TreeHash; /// /// This *superstruct* abstracts over the hard-fork. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Eip4844), variant_attributes( derive( Debug, @@ -47,14 +47,16 @@ pub struct BeaconBlockBody = FullPayload> pub attestations: VariableList, T::MaxAttestations>, pub deposits: VariableList, pub voluntary_exits: VariableList, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Eip4844))] pub sync_aggregate: SyncAggregate, // We flatten the execution payload so that serde can use the name of the inner type, // either `execution_payload` for full payloads, or `execution_payload_header` for blinded // payloads. - #[superstruct(only(Merge))] + #[superstruct(only(Merge, Eip4844))] #[serde(flatten)] pub execution_payload: Payload, + #[superstruct(only(Eip4844))] + pub blob_kzg_commitments: VariableList, #[superstruct(only(Base, Altair))] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index e6169760264..aafb3d236b6 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -95,6 +95,10 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + type GasLimitDenominator: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MinGasLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxExtraDataBytes: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* + * New in Eip4844 + */ + type MaxBlobsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * Derived values (set these CAREFULLY) */ @@ -222,6 +226,11 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + fn bytes_per_logs_bloom() -> usize { Self::BytesPerLogsBloom::to_usize() } + + /// Returns the `MAX_BLOBS_PER_BLOCK` constant for this specification. + fn max_blobs_per_block() -> usize { + Self::MaxBlobsPerBlock::to_usize() + } } /// Macro to inherit some type values from another EthSpec. @@ -265,6 +274,7 @@ impl EthSpec for MainnetEthSpec { type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch + type MaxBlobsPerBlock = U16; fn default_spec() -> ChainSpec { ChainSpec::mainnet() @@ -309,7 +319,8 @@ impl EthSpec for MinimalEthSpec { BytesPerLogsBloom, GasLimitDenominator, MinGasLimit, - MaxExtraDataBytes + MaxExtraDataBytes, + MaxBlobsPerBlock }); fn default_spec() -> ChainSpec { @@ -354,6 +365,7 @@ impl EthSpec for GnosisEthSpec { type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U2048; // 128 max attestations * 16 slots per epoch type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch + type MaxBlobsPerBlock = U16; fn default_spec() -> ChainSpec { ChainSpec::gnosis() diff --git a/consensus/types/src/kzg_commitment.rs b/consensus/types/src/kzg_commitment.rs new file mode 100644 index 00000000000..688f96e0507 --- /dev/null +++ b/consensus/types/src/kzg_commitment.rs @@ -0,0 +1,112 @@ +use std::fmt; +use serde::{Deserialize, Deserializer, Serializer}; +use ssz::{Decode, DecodeError, Encode}; +use tree_hash::TreeHash; + +const KZG_COMMITMENT_BYTES_LEN: usize = 48; + +#[derive(Default, Debug, PartialEq, Hash, Clone, Copy, Serialize, Deserialize)] +#[serde(transparent)] +pub struct KzgCommitment(#[serde(with = "serde_kzg_commitment")] pub [u8; KZG_COMMITMENT_BYTES_LEN]); + +impl fmt::Display for KzgCommitment { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", eth2_serde_utils::hex::encode(&self.0)) + } +} + +impl From<[u8; KZG_COMMITMENT_BYTES_LEN]> for KzgCommitment { + fn from(bytes: [u8; KZG_COMMITMENT_BYTES_LEN]) -> Self { + Self(bytes) + } +} + +impl Into<[u8; KZG_COMMITMENT_BYTES_LEN]> for KzgCommitment { + fn into(self) -> [u8; KZG_COMMITMENT_BYTES_LEN] { + self.0 + } +} + +pub mod serde_kzg_commitment { + use serde::de::Error; + use super::*; + + pub fn serialize(bytes: &[u8; KZG_COMMITMENT_BYTES_LEN], serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(ð2_serde_utils::hex::encode(bytes)) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; KZG_COMMITMENT_BYTES_LEN], D::Error> + where + D: Deserializer<'de>, + { + let s: String = Deserialize::deserialize(deserializer)?; + + let bytes = eth2_serde_utils::hex::decode(&s).map_err(D::Error::custom)?; + + if bytes.len() != KZG_COMMITMENT_BYTES_LEN { + return Err(D::Error::custom(format!( + "incorrect byte length {}, expected {}", + bytes.len(), + KZG_COMMITMENT_BYTES_LEN + ))); + } + + let mut array = [0; KZG_COMMITMENT_BYTES_LEN]; + array[..].copy_from_slice(&bytes); + + Ok(array) + } +} + +impl Encode for KzgCommitment { + fn is_ssz_fixed_len() -> bool { + <[u8; KZG_COMMITMENT_BYTES_LEN] as Encode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + <[u8; KZG_COMMITMENT_BYTES_LEN] as Encode>::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.0.ssz_append(buf) + } +} + +impl Decode for KzgCommitment { + fn is_ssz_fixed_len() -> bool { + <[u8; KZG_COMMITMENT_BYTES_LEN] as Decode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + <[u8; KZG_COMMITMENT_BYTES_LEN] as Decode>::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + <[u8; KZG_COMMITMENT_BYTES_LEN]>::from_ssz_bytes(bytes).map(Self) + } +} + +impl TreeHash for KzgCommitment { + fn tree_hash_type() -> tree_hash::TreeHashType { + <[u8; KZG_COMMITMENT_BYTES_LEN]>::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> Vec { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + <[u8; KZG_COMMITMENT_BYTES_LEN]>::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} diff --git a/consensus/types/src/kzg_proof.rs b/consensus/types/src/kzg_proof.rs new file mode 100644 index 00000000000..50ee1266dab --- /dev/null +++ b/consensus/types/src/kzg_proof.rs @@ -0,0 +1,112 @@ +use std::fmt; +use serde::{Deserialize, Deserializer, Serializer}; +use ssz::{Decode, DecodeError, Encode}; +use tree_hash::TreeHash; + +const KZG_PROOF_BYTES_LEN: usize = 48; + +#[derive(Default, Debug, PartialEq, Hash, Clone, Copy, Serialize, Deserialize)] +#[serde(transparent)] +pub struct KzgProof(#[serde(with = "serde_kzg_proof")] pub [u8; KZG_PROOF_BYTES_LEN]); + +impl fmt::Display for KzgProof { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", eth2_serde_utils::hex::encode(&self.0)) + } +} + +impl From<[u8; KZG_PROOF_BYTES_LEN]> for KzgProof { + fn from(bytes: [u8; KZG_PROOF_BYTES_LEN]) -> Self { + Self(bytes) + } +} + +impl Into<[u8; KZG_PROOF_BYTES_LEN]> for KzgProof { + fn into(self) -> [u8; KZG_PROOF_BYTES_LEN] { + self.0 + } +} + +pub mod serde_kzg_proof { + use serde::de::Error; + use super::*; + + pub fn serialize(bytes: &[u8; KZG_PROOF_BYTES_LEN], serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(ð2_serde_utils::hex::encode(bytes)) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; KZG_PROOF_BYTES_LEN], D::Error> + where + D: Deserializer<'de>, + { + let s: String = Deserialize::deserialize(deserializer)?; + + let bytes = eth2_serde_utils::hex::decode(&s).map_err(D::Error::custom)?; + + if bytes.len() != KZG_PROOF_BYTES_LEN { + return Err(D::Error::custom(format!( + "incorrect byte length {}, expected {}", + bytes.len(), + KZG_PROOF_BYTES_LEN + ))); + } + + let mut array = [0; KZG_PROOF_BYTES_LEN]; + array[..].copy_from_slice(&bytes); + + Ok(array) + } +} + +impl Encode for KzgProof { + fn is_ssz_fixed_len() -> bool { + <[u8; KZG_PROOF_BYTES_LEN] as Encode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + <[u8; KZG_PROOF_BYTES_LEN] as Encode>::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.0.ssz_append(buf) + } +} + +impl Decode for KzgProof { + fn is_ssz_fixed_len() -> bool { + <[u8; KZG_PROOF_BYTES_LEN] as Decode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + <[u8; KZG_PROOF_BYTES_LEN] as Decode>::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + <[u8; KZG_PROOF_BYTES_LEN]>::from_ssz_bytes(bytes).map(Self) + } +} + +impl TreeHash for KzgProof { + fn tree_hash_type() -> tree_hash::TreeHashType { + <[u8; KZG_PROOF_BYTES_LEN]>::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> Vec { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + <[u8; KZG_PROOF_BYTES_LEN]>::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index f05012c0b74..c5a0e6ba078 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -14,6 +14,8 @@ #[macro_use] extern crate lazy_static; +extern crate core; + #[macro_use] pub mod test_utils; @@ -90,6 +92,9 @@ pub mod slot_data; #[cfg(feature = "sqlite")] pub mod sqlite; +pub mod kzg_commitment; +pub mod kzg_proof; + use ethereum_types::{H160, H256}; pub use crate::aggregate_and_proof::AggregateAndProof; From 95203c51d41b89cdccc0228d2ba7050b892f727d Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Sat, 17 Sep 2022 11:26:18 +0200 Subject: [PATCH 002/263] fix some bugx, adjust stucts --- beacon_node/store/src/partial_beacon_state.rs | 2 +- consensus/fork_choice/src/fork_choice.rs | 2 +- consensus/ssz/src/decode/impls.rs | 1 + consensus/ssz/src/encode/impls.rs | 1 + .../src/common/slash_validator.rs | 2 +- .../process_operations.rs | 2 +- .../src/per_epoch_processing.rs | 2 +- consensus/tree_hash/src/impls.rs | 1 + consensus/types/src/beacon_block.rs | 4 +- consensus/types/src/beacon_block_body.rs | 80 ++++++++++++++++++- consensus/types/src/beacon_state.rs | 11 ++- consensus/types/src/blob.rs | 9 +++ consensus/types/src/bls_field_element.rs | 7 ++ consensus/types/src/chain_spec.rs | 20 +++++ consensus/types/src/eth_spec.rs | 6 +- consensus/types/src/fork_name.rs | 18 ++++- consensus/types/src/kzg_commitment.rs | 19 ++++- consensus/types/src/kzg_proof.rs | 19 ++++- consensus/types/src/lib.rs | 6 +- consensus/types/src/signed_beacon_block.rs | 64 ++++++++++++++- 20 files changed, 256 insertions(+), 20 deletions(-) create mode 100644 consensus/types/src/blob.rs create mode 100644 consensus/types/src/bls_field_element.rs diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 010796afd5b..2d3d9cc7bf9 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -14,7 +14,7 @@ use types::*; /// /// Utilises lazy-loading from separate storage for its vector fields. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Eip4844), variant_attributes(derive(Debug, PartialEq, Clone, Encode, Decode)) )] #[derive(Debug, PartialEq, Clone, Encode)] diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index f55a283ed1b..e2780e41a10 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -773,7 +773,7 @@ where (parent_justified, parent_finalized) } else { let justification_and_finalization_state = match block { - BeaconBlockRef::Merge(_) | BeaconBlockRef::Altair(_) => { + BeaconBlockRef::Eip4844(_) | BeaconBlockRef::Merge(_) | BeaconBlockRef::Altair(_) => { let participation_cache = per_epoch_processing::altair::ParticipationCache::new(state, spec) .map_err(Error::ParticipationCacheBuild)?; diff --git a/consensus/ssz/src/decode/impls.rs b/consensus/ssz/src/decode/impls.rs index d91ddabe028..99f31858516 100644 --- a/consensus/ssz/src/decode/impls.rs +++ b/consensus/ssz/src/decode/impls.rs @@ -374,6 +374,7 @@ macro_rules! impl_decodable_for_u8_array { impl_decodable_for_u8_array!(4); impl_decodable_for_u8_array!(32); +impl_decodable_for_u8_array!(48); macro_rules! impl_for_vec { ($type: ty, $max_len: expr) => { diff --git a/consensus/ssz/src/encode/impls.rs b/consensus/ssz/src/encode/impls.rs index cfd95ba40df..1faf9123f5b 100644 --- a/consensus/ssz/src/encode/impls.rs +++ b/consensus/ssz/src/encode/impls.rs @@ -483,6 +483,7 @@ macro_rules! impl_encodable_for_u8_array { impl_encodable_for_u8_array!(4); impl_encodable_for_u8_array!(32); +impl_encodable_for_u8_array!(48); #[cfg(test)] mod tests { diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index e9d94a10625..966a45860ff 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -45,7 +45,7 @@ pub fn slash_validator( validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?; let proposer_reward = match state { BeaconState::Base(_) => whistleblower_reward.safe_div(spec.proposer_reward_quotient)?, - BeaconState::Altair(_) | BeaconState::Merge(_) => whistleblower_reward + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Eip4844(_) => whistleblower_reward .safe_mul(PROPOSER_WEIGHT)? .safe_div(WEIGHT_DENOMINATOR)?, }; diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 31a4ac1fb42..6f2f7831db0 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -230,7 +230,7 @@ pub fn process_attestations<'a, T: EthSpec, Payload: ExecPayload>( BeaconBlockBodyRef::Base(_) => { base::process_attestations(state, block_body.attestations(), verify_signatures, spec)?; } - BeaconBlockBodyRef::Altair(_) | BeaconBlockBodyRef::Merge(_) => { + BeaconBlockBodyRef::Altair(_) | BeaconBlockBodyRef::Merge(_) | BeaconBlockBodyRef::Eip4844(_) => { altair::process_attestations( state, block_body.attestations(), diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index cb90c67b56d..fa710171671 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -37,7 +37,7 @@ pub fn process_epoch( match state { BeaconState::Base(_) => base::process_epoch(state, spec), - BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_epoch(state, spec), + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Eip4844(_) => altair::process_epoch(state, spec), } } diff --git a/consensus/tree_hash/src/impls.rs b/consensus/tree_hash/src/impls.rs index 00fed489c7e..263a93334ec 100644 --- a/consensus/tree_hash/src/impls.rs +++ b/consensus/tree_hash/src/impls.rs @@ -81,6 +81,7 @@ macro_rules! impl_for_lt_32byte_u8_array { impl_for_lt_32byte_u8_array!(4); impl_for_lt_32byte_u8_array!(32); +impl_for_lt_32byte_u8_array!(48); impl TreeHash for U128 { fn tree_hash_type() -> TreeHashType { diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index cbfbd250f9e..6984c25611d 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -1,6 +1,6 @@ use crate::beacon_block_body::{ BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyMerge, BeaconBlockBodyRef, - BeaconBlockBodyRefMut, BeaconBlockBobyEip4844 + BeaconBlockBodyRefMut, BeaconBlockBodyEip4844 }; use crate::test_utils::TestRandom; use crate::*; @@ -189,6 +189,7 @@ impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRef<'a, T, Payload> { BeaconBlockRef::Base { .. } => ForkName::Base, BeaconBlockRef::Altair { .. } => ForkName::Altair, BeaconBlockRef::Merge { .. } => ForkName::Merge, + BeaconBlockRef::Eip4844 { .. } => ForkName::Eip4844, }; if fork_at_slot == object_fork { @@ -573,6 +574,7 @@ macro_rules! impl_clone_as_blinded { impl_clone_as_blinded!(BeaconBlockBase, >, >); impl_clone_as_blinded!(BeaconBlockAltair, >, >); impl_clone_as_blinded!(BeaconBlockMerge, >, >); +impl_clone_as_blinded!(BeaconBlockEip4844, >, >); // A reference to a full beacon block can be cloned into a blinded beacon block, without cloning the // execution payload. diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index a4fdbf6896c..eaa99718cbf 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -8,6 +8,7 @@ use std::marker::PhantomData; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::kzg_commitment::KzgCommitment; /// The body of a `BeaconChain` block, containing operations. /// @@ -56,7 +57,7 @@ pub struct BeaconBlockBody = FullPayload> #[serde(flatten)] pub execution_payload: Payload, #[superstruct(only(Eip4844))] - pub blob_kzg_commitments: VariableList, + pub blob_kzg_commitments: VariableList, #[superstruct(only(Base, Altair))] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] @@ -71,6 +72,7 @@ impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { BeaconBlockBodyRef::Base { .. } => ForkName::Base, BeaconBlockBodyRef::Altair { .. } => ForkName::Altair, BeaconBlockBodyRef::Merge { .. } => ForkName::Merge, + BeaconBlockBodyRef::Eip4844 { .. } => ForkName::Eip4844, } } } @@ -253,6 +255,48 @@ impl From>> } } +impl From>> +for ( + BeaconBlockBodyEip4844>, + Option>, +) +{ + fn from(body: BeaconBlockBodyEip4844>) -> Self { + let BeaconBlockBodyEip4844 { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayload { execution_payload }, + blob_kzg_commitments, + } = body; + + ( + BeaconBlockBodyEip4844 { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayload { + execution_payload_header: From::from(&execution_payload), + }, + blob_kzg_commitments, + }, + None, + ) + } +} + // We can clone a full block into a blinded block, without cloning the payload. impl BeaconBlockBodyBase> { pub fn clone_as_blinded(&self) -> BeaconBlockBodyBase> { @@ -300,6 +344,40 @@ impl BeaconBlockBodyMerge> { } } +impl BeaconBlockBodyEip4844> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyEip4844> { + let BeaconBlockBodyEip4844 { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayload { execution_payload }, + blob_kzg_commitments, + } = self; + + BeaconBlockBodyEip4844 { + randao_reveal: randao_reveal.clone(), + eth1_data: eth1_data.clone(), + graffiti: *graffiti, + proposer_slashings: proposer_slashings.clone(), + attester_slashings: attester_slashings.clone(), + attestations: attestations.clone(), + deposits: deposits.clone(), + voluntary_exits: voluntary_exits.clone(), + sync_aggregate: sync_aggregate.clone(), + execution_payload: BlindedPayload { + execution_payload_header: From::from(execution_payload), + }, + blob_kzg_commitments: blob_kzg_commitments.clone(), + } + } +} + impl From>> for ( BeaconBlockBody>, diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index fca200312f1..0d9920ec96b 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -172,7 +172,7 @@ impl From for Hash256 { /// The state of the `BeaconChain` at some slot. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Eip4844), variant_attributes( derive( Derivative, @@ -250,9 +250,9 @@ where pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Eip4844))] pub previous_epoch_participation: VariableList, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Eip4844))] pub current_epoch_participation: VariableList, // Finality @@ -389,6 +389,7 @@ impl BeaconState { BeaconState::Base { .. } => ForkName::Base, BeaconState::Altair { .. } => ForkName::Altair, BeaconState::Merge { .. } => ForkName::Merge, + BeaconState::Eip4844 { .. } => ForkName::Eip4844, }; if fork_at_slot == object_fork { @@ -1102,6 +1103,7 @@ impl BeaconState { BeaconState::Base(state) => (&mut state.validators, &mut state.balances), BeaconState::Altair(state) => (&mut state.validators, &mut state.balances), BeaconState::Merge(state) => (&mut state.validators, &mut state.balances), + BeaconState::Eip4844(state) => (&mut state.validators, &mut state.balances), } } @@ -1298,12 +1300,14 @@ impl BeaconState { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(&mut state.current_epoch_participation), BeaconState::Merge(state) => Ok(&mut state.current_epoch_participation), + BeaconState::Eip4844(state) => Ok(&mut state.current_epoch_participation), } } else if epoch == self.previous_epoch() { match self { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(&mut state.previous_epoch_participation), BeaconState::Merge(state) => Ok(&mut state.previous_epoch_participation), + BeaconState::Eip4844(state) => Ok(&mut state.previous_epoch_participation), } } else { Err(BeaconStateError::EpochOutOfBounds) @@ -1588,6 +1592,7 @@ impl BeaconState { BeaconState::Base(inner) => BeaconState::Base(inner.clone()), BeaconState::Altair(inner) => BeaconState::Altair(inner.clone()), BeaconState::Merge(inner) => BeaconState::Merge(inner.clone()), + BeaconState::Eip4844(inner) => BeaconState::Eip4844(inner.clone()), }; if config.committee_caches { *res.committee_caches_mut() = self.committee_caches().clone(); diff --git a/consensus/types/src/blob.rs b/consensus/types/src/blob.rs new file mode 100644 index 00000000000..8ea3468ff8d --- /dev/null +++ b/consensus/types/src/blob.rs @@ -0,0 +1,9 @@ +use ssz_types::VariableList; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use ssz::{Decode, DecodeError, Encode}; +use crate::bls_field_element::BlsFieldElement; +use crate::EthSpec; + +#[derive(Default, Debug, PartialEq, Hash, Clone, Serialize, Deserialize)] +#[serde(transparent)] +pub struct Blob(pub VariableList); \ No newline at end of file diff --git a/consensus/types/src/bls_field_element.rs b/consensus/types/src/bls_field_element.rs new file mode 100644 index 00000000000..6693b5765b8 --- /dev/null +++ b/consensus/types/src/bls_field_element.rs @@ -0,0 +1,7 @@ +use crate::Uint256; +use serde::{Deserialize, Serialize}; +use ssz::{Decode, DecodeError, Encode}; + +#[derive(Default, Debug, PartialEq, Hash, Clone, Copy, Serialize, Deserialize)] +#[serde(transparent)] +pub struct BlsFieldElement(pub Uint256); \ No newline at end of file diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index b2ba24ac3ee..b894bef12e9 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -150,6 +150,12 @@ pub struct ChainSpec { pub terminal_block_hash_activation_epoch: Epoch, pub safe_slots_to_import_optimistically: u64, + /* + * Eip4844 hard fork params + */ + pub eip4844_fork_epoch: Option, + pub eip4844_fork_version: [u8; 4], + /* * Networking */ @@ -245,6 +251,7 @@ impl ChainSpec { ForkName::Base => self.genesis_fork_version, ForkName::Altair => self.altair_fork_version, ForkName::Merge => self.bellatrix_fork_version, + ForkName::Eip4844 => self.eip4844_fork_version, } } @@ -254,6 +261,7 @@ impl ChainSpec { ForkName::Base => Some(Epoch::new(0)), ForkName::Altair => self.altair_fork_epoch, ForkName::Merge => self.bellatrix_fork_epoch, + ForkName::Eip4844 => self.eip4844_fork_epoch, } } @@ -263,6 +271,7 @@ impl ChainSpec { BeaconState::Base(_) => self.inactivity_penalty_quotient, BeaconState::Altair(_) => self.inactivity_penalty_quotient_altair, BeaconState::Merge(_) => self.inactivity_penalty_quotient_bellatrix, + BeaconState::Eip4844(_) => self.inactivity_penalty_quotient_bellatrix, } } @@ -275,6 +284,7 @@ impl ChainSpec { BeaconState::Base(_) => self.proportional_slashing_multiplier, BeaconState::Altair(_) => self.proportional_slashing_multiplier_altair, BeaconState::Merge(_) => self.proportional_slashing_multiplier_bellatrix, + BeaconState::Eip4844(_) => self.proportional_slashing_multiplier_bellatrix, } } @@ -287,6 +297,7 @@ impl ChainSpec { BeaconState::Base(_) => self.min_slashing_penalty_quotient, BeaconState::Altair(_) => self.min_slashing_penalty_quotient_altair, BeaconState::Merge(_) => self.min_slashing_penalty_quotient_bellatrix, + BeaconState::Eip4844(_) => self.min_slashing_penalty_quotient_bellatrix, } } @@ -568,6 +579,12 @@ impl ChainSpec { terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), safe_slots_to_import_optimistically: 128u64, + /* + * Eip4844 hard fork params + */ + eip4844_fork_epoch: None, + eip4844_fork_version: [0x03, 0x00, 0x00, 0x00], + /* * Network specific */ @@ -778,6 +795,9 @@ impl ChainSpec { terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), safe_slots_to_import_optimistically: 128u64, + eip4844_fork_epoch: None, + eip4844_fork_version: [0x03, 0x00, 0x00, 0x64], + /* * Network specific */ diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index aafb3d236b6..9d8a765c204 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -99,6 +99,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + * New in Eip4844 */ type MaxBlobsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * Derived values (set these CAREFULLY) */ @@ -275,6 +276,7 @@ impl EthSpec for MainnetEthSpec { type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch type MaxBlobsPerBlock = U16; + type FieldElementsPerBlob = U4096; fn default_spec() -> ChainSpec { ChainSpec::mainnet() @@ -320,7 +322,8 @@ impl EthSpec for MinimalEthSpec { GasLimitDenominator, MinGasLimit, MaxExtraDataBytes, - MaxBlobsPerBlock + MaxBlobsPerBlock, + FieldElementsPerBlob }); fn default_spec() -> ChainSpec { @@ -366,6 +369,7 @@ impl EthSpec for GnosisEthSpec { type MaxPendingAttestations = U2048; // 128 max attestations * 16 slots per epoch type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch type MaxBlobsPerBlock = U16; + type FieldElementsPerBlob = U4096; fn default_spec() -> ChainSpec { ChainSpec::gnosis() diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index e97b08309b7..7afe0d75305 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -11,11 +11,12 @@ pub enum ForkName { Base, Altair, Merge, + Eip4844 } impl ForkName { pub fn list_all() -> Vec { - vec![ForkName::Base, ForkName::Altair, ForkName::Merge] + vec![ForkName::Base, ForkName::Altair, ForkName::Merge, ForkName::Eip4844] } /// Set the activation slots in the given `ChainSpec` so that the fork named by `self` @@ -38,6 +39,12 @@ impl ForkName { spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec } + ForkName::Eip4844 => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.eip4844_fork_epoch = Some(Epoch::new(0)); + spec + } } } @@ -49,6 +56,7 @@ impl ForkName { ForkName::Base => None, ForkName::Altair => Some(ForkName::Base), ForkName::Merge => Some(ForkName::Altair), + ForkName::Eip4844 => Some(ForkName::Merge), } } @@ -59,7 +67,8 @@ impl ForkName { match self { ForkName::Base => Some(ForkName::Altair), ForkName::Altair => Some(ForkName::Merge), - ForkName::Merge => None, + ForkName::Merge => Some(ForkName::Eip4844), + ForkName::Eip4844 => None, } } } @@ -101,6 +110,10 @@ macro_rules! map_fork_name_with { let (value, extra_data) = $body; ($t::Merge(value), extra_data) } + ForkName::Eip4844 => { + let (value, extra_data) = $body; + ($t::Eip4844(value), extra_data) + } } }; } @@ -124,6 +137,7 @@ impl Display for ForkName { ForkName::Base => "phase0".fmt(f), ForkName::Altair => "altair".fmt(f), ForkName::Merge => "bellatrix".fmt(f), + ForkName::Eip4844 => "eip4844".fmt(f), } } } diff --git a/consensus/types/src/kzg_commitment.rs b/consensus/types/src/kzg_commitment.rs index 688f96e0507..cfddc95b3d9 100644 --- a/consensus/types/src/kzg_commitment.rs +++ b/consensus/types/src/kzg_commitment.rs @@ -1,14 +1,21 @@ use std::fmt; -use serde::{Deserialize, Deserializer, Serializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use ssz::{Decode, DecodeError, Encode}; use tree_hash::TreeHash; +use crate::test_utils::{RngCore, TestRandom}; const KZG_COMMITMENT_BYTES_LEN: usize = 48; -#[derive(Default, Debug, PartialEq, Hash, Clone, Copy, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Hash, Clone, Copy, Serialize, Deserialize)] #[serde(transparent)] pub struct KzgCommitment(#[serde(with = "serde_kzg_commitment")] pub [u8; KZG_COMMITMENT_BYTES_LEN]); +impl Default for KzgCommitment { + fn default() -> Self { + KzgCommitment([0; 48]) + } +} + impl fmt::Display for KzgCommitment { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", eth2_serde_utils::hex::encode(&self.0)) @@ -110,3 +117,11 @@ impl TreeHash for KzgCommitment { self.0.tree_hash_root() } } + +impl TestRandom for KzgCommitment { + fn random_for_test(rng: &mut impl RngCore) -> Self { + let mut bytes = [0; KZG_COMMITMENT_BYTES_LEN]; + rng.fill_bytes(&mut bytes); + Self(bytes) + } +} diff --git a/consensus/types/src/kzg_proof.rs b/consensus/types/src/kzg_proof.rs index 50ee1266dab..5bec8e2f86e 100644 --- a/consensus/types/src/kzg_proof.rs +++ b/consensus/types/src/kzg_proof.rs @@ -1,11 +1,12 @@ use std::fmt; -use serde::{Deserialize, Deserializer, Serializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use ssz::{Decode, DecodeError, Encode}; use tree_hash::TreeHash; +use crate::test_utils::{RngCore, TestRandom}; const KZG_PROOF_BYTES_LEN: usize = 48; -#[derive(Default, Debug, PartialEq, Hash, Clone, Copy, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Hash, Clone, Copy, Serialize, Deserialize)] #[serde(transparent)] pub struct KzgProof(#[serde(with = "serde_kzg_proof")] pub [u8; KZG_PROOF_BYTES_LEN]); @@ -15,6 +16,12 @@ impl fmt::Display for KzgProof { } } +impl Default for KzgProof { + fn default() -> Self { + KzgProof([0; 48]) + } +} + impl From<[u8; KZG_PROOF_BYTES_LEN]> for KzgProof { fn from(bytes: [u8; KZG_PROOF_BYTES_LEN]) -> Self { Self(bytes) @@ -110,3 +117,11 @@ impl TreeHash for KzgProof { self.0.tree_hash_root() } } + +impl TestRandom for KzgProof { + fn random_for_test(rng: &mut impl RngCore) -> Self { + let mut bytes = [0; KZG_PROOF_BYTES_LEN]; + rng.fill_bytes(&mut bytes); + Self(bytes) + } +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index c5a0e6ba078..8085b1460f7 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -94,6 +94,8 @@ pub mod sqlite; pub mod kzg_commitment; pub mod kzg_proof; +pub mod bls_field_element; +pub mod blob; use ethereum_types::{H160, H256}; @@ -104,11 +106,11 @@ pub use crate::attestation_duty::AttestationDuty; pub use crate::attester_slashing::AttesterSlashing; pub use crate::beacon_block::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, BeaconBlockRef, - BeaconBlockRefMut, + BeaconBlockRefMut, BeaconBlockEip4844 }; pub use crate::beacon_block_body::{ BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyMerge, - BeaconBlockBodyRef, BeaconBlockBodyRefMut, + BeaconBlockBodyRef, BeaconBlockBodyRefMut, BeaconBlockBodyEip4844 }; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 5c40c4685c3..4ab74ac2119 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -38,7 +38,7 @@ impl From for Hash256 { /// A `BeaconBlock` and a signature from its proposer. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Eip4844), variant_attributes( derive( Debug, @@ -72,6 +72,8 @@ pub struct SignedBeaconBlock = FullPayload, #[superstruct(only(Merge), partial_getter(rename = "message_merge"))] pub message: BeaconBlockMerge, + #[superstruct(only(Eip4844), partial_getter(rename = "message_eip4844"))] + pub message: BeaconBlockEip4844, pub signature: Signature, } @@ -129,6 +131,9 @@ impl> SignedBeaconBlock { BeaconBlock::Merge(message) => { SignedBeaconBlock::Merge(SignedBeaconBlockMerge { message, signature }) } + BeaconBlock::Eip4844(message) => { + SignedBeaconBlock::Eip4844(SignedBeaconBlockEip4844 { message, signature }) + } } } @@ -307,6 +312,60 @@ impl SignedBeaconBlockMerge> { } } +impl SignedBeaconBlockEip4844> { + pub fn into_full_block( + self, + execution_payload: ExecutionPayload, + ) -> SignedBeaconBlockEip4844> { + let SignedBeaconBlockEip4844 { + message: + BeaconBlockEip4844 { + slot, + proposer_index, + parent_root, + state_root, + body: + BeaconBlockBodyEip4844 { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayload { .. }, + blob_kzg_commitments, + }, + }, + signature, + } = self; + SignedBeaconBlockEip4844 { + message: BeaconBlockEip4844 { + slot, + proposer_index, + parent_root, + state_root, + body: BeaconBlockBodyEip4844 { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayload { execution_payload }, + blob_kzg_commitments, + }, + }, + signature, + } + } +} + impl SignedBeaconBlock> { pub fn try_into_full_block( self, @@ -318,6 +377,9 @@ impl SignedBeaconBlock> { SignedBeaconBlock::Merge(block) => { SignedBeaconBlock::Merge(block.into_full_block(execution_payload?)) } + SignedBeaconBlock::Eip4844(block) => { + SignedBeaconBlock::Eip4844(block.into_full_block(execution_payload?)) + } }; Some(full_block) } From ca1e17b386d225ae1430db02377536801fc906af Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Sat, 17 Sep 2022 12:23:03 +0200 Subject: [PATCH 003/263] it compiles! --- beacon_node/beacon_chain/src/beacon_chain.rs | 23 ++++++++++- beacon_node/lighthouse_network/src/config.rs | 2 +- .../src/rpc/codec/ssz_snappy.rs | 16 +++++++- .../lighthouse_network/src/rpc/protocol.rs | 10 +++++ .../lighthouse_network/src/types/pubsub.rs | 6 ++- beacon_node/store/src/partial_beacon_state.rs | 39 ++++++++++++++++--- consensus/types/src/beacon_state.rs | 8 ++-- consensus/types/src/fork_context.rs | 10 +++++ consensus/types/src/lib.rs | 2 +- .../src/signing_method/web3signer.rs | 6 +++ 10 files changed, 107 insertions(+), 15 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 6637b8fd531..39b2279c471 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3353,7 +3353,7 @@ impl BeaconChain { // allows it to run concurrently with things like attestation packing. let prepare_payload_handle = match &state { BeaconState::Base(_) | BeaconState::Altair(_) => None, - BeaconState::Merge(_) => { + BeaconState::Merge(_) | BeaconState::Eip4844(_) => { let prepare_payload_handle = get_execution_payload(self.clone(), &state, proposer_index, builder_params)?; Some(prepare_payload_handle) @@ -3609,6 +3609,27 @@ impl BeaconChain { .ok_or(BlockProductionError::MissingExecutionPayload)?, }, }), + BeaconState::Eip4844(_) => BeaconBlock::Eip4844(BeaconBlockEip4844 { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyEip4844 { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + execution_payload: execution_payload + .ok_or(BlockProductionError::MissingExecutionPayload)?, + blob_kzg_commitments: todo!(), // part of partial block or?? + }, + }), }; let block = SignedBeaconBlock::from_block( diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 263ef0c7cb9..4e8b9bb85c3 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -296,7 +296,7 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos match fork_context.current_fork() { // according to: https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#the-gossip-domain-gossipsub // the derivation of the message-id remains the same in the merge - ForkName::Altair | ForkName::Merge => { + ForkName::Altair | ForkName::Merge | ForkName::Eip4844 => { let topic_len_bytes = topic_bytes.len().to_le_bytes(); let mut vec = Vec::with_capacity( prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index a46a05a8ce3..e952eece300 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; use types::{ EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockBase, SignedBeaconBlockMerge, + SignedBeaconBlockBase, SignedBeaconBlockMerge, SignedBeaconBlockEip4844 }; use unsigned_varint::codec::Uvi; @@ -407,6 +407,10 @@ fn context_bytes( return match **ref_box_block { // NOTE: If you are adding another fork type here, be sure to modify the // `fork_context.to_context_bytes()` function to support it as well! + SignedBeaconBlock::Eip4844 { .. } => { + // Merge context being `None` implies that "merge never happened". + fork_context.to_context_bytes(ForkName::Eip4844) + } SignedBeaconBlock::Merge { .. } => { // Merge context being `None` implies that "merge never happened". fork_context.to_context_bytes(ForkName::Merge) @@ -586,6 +590,11 @@ fn handle_v2_response( decoded_buffer, )?), )))), + ForkName::Eip4844 => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Eip4844(SignedBeaconBlockEip4844::from_ssz_bytes( + decoded_buffer, + )?), + )))), }, Protocol::BlocksByRoot => match fork_name { ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( @@ -601,6 +610,11 @@ fn handle_v2_response( decoded_buffer, )?), )))), + ForkName::Eip4844 => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Eip4844(SignedBeaconBlockEip4844::from_ssz_bytes( + decoded_buffer, + )?), + )))), }, _ => Err(RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 81960214b16..352348f74f7 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -71,6 +71,10 @@ lazy_static! { + types::ExecutionPayload::::max_execution_payload_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field + pub static ref SIGNED_BEACON_BLOCK_EIP4844_MAX: usize = + // Size of a full altair block + *SIGNED_BEACON_BLOCK_MERGE_MAX + 999999999; //TODO + pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize = VariableList::::from(Vec::::new()) .as_ssz_bytes() @@ -102,6 +106,7 @@ lazy_static! { pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M /// The maximum bytes that can be sent across the RPC post-merge. pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M +pub(crate) const MAX_RPC_SIZE_POST_EIP4844: usize = 20 * 1_048_576; // 10M /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; /// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte). @@ -113,6 +118,7 @@ const REQUEST_TIMEOUT: u64 = 15; /// Returns the maximum bytes that can be sent across the RPC. pub fn max_rpc_size(fork_context: &ForkContext) -> usize { match fork_context.current_fork() { + ForkName::Eip4844 => MAX_RPC_SIZE_POST_EIP4844, ForkName::Merge => MAX_RPC_SIZE_POST_MERGE, ForkName::Altair | ForkName::Base => MAX_RPC_SIZE, } @@ -135,6 +141,10 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks *SIGNED_BEACON_BLOCK_MERGE_MAX, // Merge block is larger than base and altair blocks ), + ForkName::Eip4844 => RpcLimits::new( + *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks + *SIGNED_BEACON_BLOCK_EIP4844_MAX, // Merge block is larger than base and altair blocks + ), } } diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index a01072f8e4e..ef3b53abfb8 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -11,7 +11,7 @@ use std::sync::Arc; use types::{ Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, - SignedBeaconBlockMerge, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, + SignedBeaconBlockMerge, SignedBeaconBlockEip4844, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; @@ -167,6 +167,10 @@ impl PubsubMessage { SignedBeaconBlockMerge::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ), + Some(ForkName::Eip4844) => SignedBeaconBlock::::Eip4844( + SignedBeaconBlockEip4844::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), None => { return Err(format!( "Unknown gossipsub fork digest: {:?}", diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 2d3d9cc7bf9..46bc0274f40 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -66,9 +66,9 @@ where pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Eip4844))] pub previous_epoch_participation: VariableList, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Eip4844))] pub current_epoch_participation: VariableList, // Finality @@ -78,17 +78,17 @@ where pub finalized_checkpoint: Checkpoint, // Inactivity - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Eip4844))] pub inactivity_scores: VariableList, // Light-client sync committees - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Eip4844))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Eip4844))] pub next_sync_committee: Arc>, // Execution - #[superstruct(only(Merge))] + #[superstruct(only(Merge, Eip4844))] pub latest_execution_payload_header: ExecutionPayloadHeader, } @@ -178,6 +178,20 @@ impl PartialBeaconState { latest_execution_payload_header ] ), + BeaconState::Eip4844(s) => impl_from_state_forgetful!( + s, + outer, + Eip4844, + PartialBeaconStateEip4844, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header + ] + ), } } @@ -365,6 +379,19 @@ impl TryInto> for PartialBeaconState { latest_execution_payload_header ] ), + PartialBeaconState::Eip4844(inner) => impl_try_into_beacon_state!( + inner, + Eip4844, + BeaconStateEip4844, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header + ] + ), }; Ok(state) } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 0d9920ec96b..dd0ac3b71c2 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -267,17 +267,17 @@ where // Inactivity #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Eip4844))] pub inactivity_scores: VariableList, // Light-client sync committees - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Eip4844))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Eip4844))] pub next_sync_committee: Arc>, // Execution - #[superstruct(only(Merge))] + #[superstruct(only(Merge, Eip4844))] pub latest_execution_payload_header: ExecutionPayloadHeader, // Caching (not in the spec) diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 52b9294c8ca..c5316a65674 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -47,6 +47,16 @@ impl ForkContext { )); } + if spec.bellatrix_fork_epoch.is_some() { + fork_to_digest.push(( + ForkName::Merge, + ChainSpec::compute_fork_digest( + spec.bellatrix_fork_version, + genesis_validators_root, + ), + )); + } + let fork_to_digest: HashMap = fork_to_digest.into_iter().collect(); let digest_to_fork = fork_to_digest diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 8085b1460f7..93c9b39afbc 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -151,7 +151,7 @@ pub use crate::shuffling_id::AttestationShufflingId; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; pub use crate::signed_beacon_block::{ SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockHash, - SignedBeaconBlockMerge, SignedBlindedBeaconBlock, + SignedBeaconBlockMerge, SignedBlindedBeaconBlock, SignedBeaconBlockEip4844, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index cf02ae0c323..0de260ecfcf 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -26,6 +26,7 @@ pub enum ForkName { Phase0, Altair, Bellatrix, + Eip4844, } #[derive(Debug, PartialEq, Serialize)] @@ -90,6 +91,11 @@ impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { block: None, block_header: Some(block.block_header()), }), + BeaconBlock::Eip4844(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Eip4844, + block: None, + block_header: Some(block.block_header()), + }), } } From fe6be28e6bf3a134d7b1d7789348b259e48e3734 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Sat, 17 Sep 2022 13:20:18 +0200 Subject: [PATCH 004/263] beacon: consensus: implement engine api getBlobs --- beacon_node/execution_layer/src/engine_api.rs | 3 +- .../execution_layer/src/engine_api/http.rs | 32 +++++++++++- .../src/engine_api/json_structures.rs | 50 ++++++++++++++++++- beacon_node/execution_layer/src/lib.rs | 8 +-- consensus/types/src/beacon_block_body.rs | 8 +-- consensus/types/src/blob.rs | 18 ++++++- consensus/types/src/execution_payload.rs | 20 +++++++- consensus/types/src/payload.rs | 32 ++++++++++-- consensus/types/src/signed_beacon_block.rs | 4 +- 9 files changed, 155 insertions(+), 20 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index ba0a37736b0..86829e5f687 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -7,9 +7,10 @@ use serde::{Deserialize, Serialize}; use strum::IntoStaticStr; pub use types::{ Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, FixedVector, - Hash256, Uint256, VariableList, + Hash256, Uint256, VariableList, kzg_proof::KzgProof, kzg_commitment::KzgCommitment, blob::Blob, }; + pub mod auth; pub mod http; pub mod json_structures; diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 0f848a7716f..e37aef72d70 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -3,13 +3,14 @@ use super::*; use crate::auth::Auth; use crate::json_structures::*; +use eth2::lighthouse::Eth1Block; use reqwest::header::CONTENT_TYPE; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde_json::json; use std::time::Duration; -use types::EthSpec; +use types::{EthSpec, FullPayload, execution_payload::BlobsBundle}; pub use deposit_log::{DepositLog, Log}; pub use reqwest::Client; @@ -34,6 +35,9 @@ pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1"; pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); +pub const ENGINE_GET_BLOBS_BUNDLE_V1: &str = "engine_getBlobsBundleV1"; +pub const ENGINE_GET_BLOBS_BUNDLE_TIMEOUT: Duration = Duration::from_secs(2); + pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8); @@ -664,6 +668,32 @@ impl HttpJsonRpc { Ok(response.into()) } + pub async fn get_blobs_bundle_v1( + &self, + payload_id: PayloadId, + ) -> Result, Error> { + let params = json!([JsonPayloadIdRequest::from(payload_id)]); + + let response: JsonBlobBundlesV1 = self + .rpc_request(ENGINE_GET_BLOBS_BUNDLE_V1, params, ENGINE_GET_BLOBS_BUNDLE_TIMEOUT) + .await?; + + Ok(response.into()) + } + + pub async fn get_full_payload( + &self, + payload_id: PayloadId, + ) -> Result, Error> { + let payload = self.get_payload_v1(payload_id).await; + let blobs = self.get_blobs_bundle_v1(payload_id).await; + + Ok(FullPayload{ + execution_payload: payload?, + blobs_bundle: blobs?.into(), + }) + } + pub async fn forkchoice_updated_v1( &self, forkchoice_state: ForkChoiceState, diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 31aa79f0559..4907acee3e8 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,6 +1,6 @@ use super::*; use serde::{Deserialize, Serialize}; -use types::{EthSpec, ExecutionBlockHash, FixedVector, Transaction, Unsigned, VariableList}; +use types::{EthSpec, ExecutionBlockHash, FixedVector, Transaction, Unsigned, VariableList, execution_payload::BlobsBundle}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -269,6 +269,54 @@ impl From for PayloadAttributes { } } +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(bound = "T: EthSpec", rename_all = "camelCase")] +pub struct JsonBlobBundlesV1 { + pub block_hash: Hash256, + pub kzgs: Vec, + pub blobs: Vec>, + pub aggregated_proof: KzgProof, +} + +impl From> for JsonBlobBundlesV1 { + fn from(p: BlobsBundle) -> Self { + // Use this verbose deconstruction pattern to ensure no field is left unused. + let BlobsBundle { + block_hash, + aggregated_proof, + blobs, + kzgs, + } = p; + + Self { + block_hash, + aggregated_proof, + blobs, + kzgs, + } + } +} + +impl From> for BlobsBundle { + fn from(j: JsonBlobBundlesV1) -> Self { + // Use this verbose deconstruction pattern to ensure no field is left unused. + let JsonBlobBundlesV1 { + block_hash, + aggregated_proof, + blobs, + kzgs, + } = j; + + Self { + block_hash, + aggregated_proof, + blobs, + kzgs, + } + } +} + + #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonForkChoiceStateV1 { diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 3bdca82ad03..5d1190f81c4 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -835,10 +835,10 @@ impl ExecutionLayer { engine .api - .get_payload_v1::(payload_id) + .get_full_payload::(payload_id) .await .map(|full_payload| { - if full_payload.fee_recipient != suggested_fee_recipient { + if full_payload.execution_payload.fee_recipient != suggested_fee_recipient { error!( self.log(), "Inconsistent fee recipient"; @@ -847,11 +847,11 @@ impl ExecutionLayer { indicate that fees are being diverted to another address. Please \ ensure that the value of suggested_fee_recipient is set correctly and \ that the Execution Engine is trusted.", - "fee_recipient" => ?full_payload.fee_recipient, + "fee_recipient" => ?full_payload.execution_payload.fee_recipient, "suggested_fee_recipient" => ?suggested_fee_recipient, ); } - if f(self, &full_payload).is_some() { + if f(self, &full_payload.execution_payload).is_some() { warn!( self.log(), "Duplicate payload cached, this might indicate redundant proposal \ diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index eaa99718cbf..d4f49d3773b 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -232,7 +232,7 @@ impl From>> deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload }, + execution_payload: FullPayload { execution_payload, blobs_bundle }, } = body; ( @@ -272,7 +272,7 @@ for ( deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload }, + execution_payload: FullPayload { execution_payload, blobs_bundle }, blob_kzg_commitments, } = body; @@ -324,7 +324,7 @@ impl BeaconBlockBodyMerge> { deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload }, + execution_payload: FullPayload { execution_payload, blobs_bundle }, } = self; BeaconBlockBodyMerge { @@ -356,7 +356,7 @@ impl BeaconBlockBodyEip4844> { deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload }, + execution_payload: FullPayload { execution_payload, blobs_bundle }, blob_kzg_commitments, } = self; diff --git a/consensus/types/src/blob.rs b/consensus/types/src/blob.rs index 8ea3468ff8d..efe243859db 100644 --- a/consensus/types/src/blob.rs +++ b/consensus/types/src/blob.rs @@ -1,9 +1,23 @@ use ssz_types::VariableList; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use ssz::{Decode, DecodeError, Encode}; +use crate::test_utils::RngCore; use crate::bls_field_element::BlsFieldElement; -use crate::EthSpec; +use crate::{EthSpec, Uint256}; +use crate::test_utils::TestRandom; #[derive(Default, Debug, PartialEq, Hash, Clone, Serialize, Deserialize)] #[serde(transparent)] -pub struct Blob(pub VariableList); \ No newline at end of file +pub struct Blob(pub VariableList); + +impl TestRandom for Blob { + fn random_for_test(rng: &mut impl RngCore) -> Self { + let mut res = Blob(VariableList::empty()); + for i in 0..4096 { + let slice = ethereum_types::U256([rng.next_u64(), rng.next_u64(), rng.next_u64(), rng.next_u64()]); + let elem =BlsFieldElement(slice); + res.0.push(elem); + } + res + } +} \ No newline at end of file diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 412e5a8df3a..9d6c42c9162 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -1,4 +1,4 @@ -use crate::{test_utils::TestRandom, *}; +use crate::{test_utils::TestRandom, test_utils::RngCore, *, kzg_commitment::KzgCommitment, kzg_proof::KzgProof, blob::Blob}; use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz::Encode; @@ -43,6 +43,24 @@ pub struct ExecutionPayload { pub transactions: Transactions, } +#[derive( + Default, Debug, Clone, Serialize, Deserialize, Derivative, +)] +#[serde(bound = "T: EthSpec")] +pub struct BlobsBundle { + pub block_hash: Hash256, + pub kzgs: Vec, + pub blobs: Vec>, + pub aggregated_proof: KzgProof, +} + + +impl TestRandom for BlobsBundle { + fn random_for_test(rng: &mut impl RngCore) -> Self { + todo!() + } +} + impl ExecutionPayload { pub fn empty() -> Self { Self::default() diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 114ca02ecff..134fe381521 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -1,4 +1,4 @@ -use crate::{test_utils::TestRandom, *}; +use crate::{test_utils::TestRandom, test_utils::RngCore, *}; use derivative::Derivative; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; @@ -8,6 +8,8 @@ use std::fmt::Debug; use std::hash::Hash; use test_random_derive::TestRandom; use tree_hash::TreeHash; +use execution_payload::BlobsBundle; +use core::hash::Hasher; #[derive(Debug)] pub enum BlockType { @@ -218,16 +220,37 @@ impl Encode for BlindedPayload { } } -#[derive(Default, Debug, Clone, Serialize, Deserialize, TestRandom, Derivative)] -#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] +#[derive(Default, Debug, Clone, Serialize, Deserialize)] #[serde(bound = "T: EthSpec")] pub struct FullPayload { pub execution_payload: ExecutionPayload, + pub blobs_bundle: Option>, +} + +impl TestRandom for FullPayload { + fn random_for_test(rng: &mut impl RngCore) -> Self { + todo!() + } +} + +impl PartialEq for FullPayload { + fn eq(&self, other: &FullPayload) -> bool { + todo!() + } +} + +impl Hash for FullPayload { + fn hash(&self, into: &mut H) { + todo!() + } } impl From> for FullPayload { fn from(execution_payload: ExecutionPayload) -> Self { - Self { execution_payload } + Self { + execution_payload, + blobs_bundle: None, + } } } @@ -265,6 +288,7 @@ impl Decode for FullPayload { fn from_ssz_bytes(bytes: &[u8]) -> Result { Ok(FullPayload { execution_payload: Decode::from_ssz_bytes(bytes)?, + blobs_bundle: None, }) } } diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 4ab74ac2119..55e0e8afd28 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -304,7 +304,7 @@ impl SignedBeaconBlockMerge> { deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload }, + execution_payload: FullPayload { execution_payload: execution_payload, blobs_bundle: None }, }, }, signature, @@ -357,7 +357,7 @@ impl SignedBeaconBlockEip4844> { deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload }, + execution_payload: FullPayload { execution_payload: execution_payload, blobs_bundle: None }, blob_kzg_commitments, }, }, From dcfae6c5cfc0ac8a1e4ce2ad04fc1d34da759bcd Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Sat, 17 Sep 2022 13:29:20 +0200 Subject: [PATCH 005/263] implement From for Payload --- beacon_node/beacon_chain/src/beacon_chain.rs | 3 ++- consensus/types/src/payload.rs | 7 +++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 39b2279c471..7418a66c17a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -99,6 +99,7 @@ use types::*; pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; pub use fork_choice::CountUnrealized; +use types::kzg_commitment::KzgCommitment; pub type ForkChoiceError = fork_choice::Error; @@ -3627,7 +3628,7 @@ impl BeaconChain { .ok_or(BlockProductionError::MissingSyncAggregate)?, execution_payload: execution_payload .ok_or(BlockProductionError::MissingExecutionPayload)?, - blob_kzg_commitments: todo!(), // part of partial block or?? + blob_kzg_commitments: blob_kzg_commitments.into(), }, }), }; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 134fe381521..7974c5ef364 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -32,6 +32,7 @@ pub trait ExecPayload: + Hash + TryFrom> + From> + + From> + Send + 'static { @@ -172,6 +173,12 @@ impl From> for BlindedPayload { } } +impl From> for BlindedPayload { + fn from(full_payload: FullPayload) -> Self { + full_payload.execution_payload.into() + } +} + impl TreeHash for BlindedPayload { fn tree_hash_type() -> tree_hash::TreeHashType { >::tree_hash_type() From 8473f08d10844c3dd1547ccd01f83d7e7d9caa47 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Sat, 17 Sep 2022 14:10:15 +0200 Subject: [PATCH 006/263] beacon: consensus: implement engine api getBlobs --- .../beacon_chain/src/execution_payload.rs | 10 +++- .../execution_layer/src/engine_api/http.rs | 13 ----- beacon_node/execution_layer/src/lib.rs | 58 +++++++++++++++++-- consensus/types/src/beacon_block_body.rs | 8 +-- consensus/types/src/payload.rs | 9 +-- consensus/types/src/signed_beacon_block.rs | 4 +- 6 files changed, 72 insertions(+), 30 deletions(-) diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 2221d1fc7cd..84c69022708 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -24,7 +24,7 @@ use state_processing::per_block_processing::{ use std::sync::Arc; use tokio::task::JoinHandle; use tree_hash::TreeHash; -use types::*; +use types::{*, execution_payload::BlobsBundle}; pub type PreparePayloadResult = Result; pub type PreparePayloadHandle = JoinHandle>>; @@ -483,5 +483,13 @@ where .await .map_err(BlockProductionError::GetPayloadFailed)?; + /* + TODO: fetch blob bundles from el engine for block building + let suggested_fee_recipient = execution_layer.get_suggested_fee_recipient(proposer_index).await; + let blobs = execution_layer.get_blob_bundles(parent_hash, timestamp, random, suggested_fee_recipient) + .await + .map_err(BlockProductionError::GetPayloadFailed)?; + */ + Ok(execution_payload) } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index e37aef72d70..bc4d790d8e2 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -681,19 +681,6 @@ impl HttpJsonRpc { Ok(response.into()) } - pub async fn get_full_payload( - &self, - payload_id: PayloadId, - ) -> Result, Error> { - let payload = self.get_payload_v1(payload_id).await; - let blobs = self.get_blobs_bundle_v1(payload_id).await; - - Ok(FullPayload{ - execution_payload: payload?, - blobs_bundle: blobs?.into(), - }) - } - pub async fn forkchoice_updated_v1( &self, forkchoice_state: ForkChoiceState, diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 5d1190f81c4..7eabee8199f 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -20,6 +20,7 @@ use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; +use types::execution_payload::BlobsBundle; use std::collections::HashMap; use std::future::Future; use std::io::Write; @@ -759,6 +760,55 @@ impl ExecutionLayer { .await } + pub async fn get_blob_bundles( + &self, + parent_hash: ExecutionBlockHash, + timestamp: u64, + prev_randao: Hash256, + suggested_fee_recipient: Address, + ) -> Result, Error> { + debug!( + self.log(), + "Issuing engine_getPayload"; + "suggested_fee_recipient" => ?suggested_fee_recipient, + "prev_randao" => ?prev_randao, + "timestamp" => timestamp, + "parent_hash" => ?parent_hash, + ); + self.engine() + .request(|engine| async move { + let payload_id = if let Some(id) = engine + .get_payload_id(parent_hash, timestamp, prev_randao, suggested_fee_recipient) + .await + { + // The payload id has been cached for this engine. + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, + &[metrics::HIT], + ); + id + } else { + error!( + self.log(), + "Exec engine unable to produce blobs, did you call get_payload before?", + ); + return Err(ApiError::PayloadIdUnavailable); + }; + + engine + .api + .get_blobs_bundle_v1::(payload_id) + .await + .map(|bundle| { + // TODO verify the blob bundle here? + bundle.into() + }) + }) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } + async fn get_full_payload_with>( &self, parent_hash: ExecutionBlockHash, @@ -835,10 +885,10 @@ impl ExecutionLayer { engine .api - .get_full_payload::(payload_id) + .get_payload_v1::(payload_id) .await .map(|full_payload| { - if full_payload.execution_payload.fee_recipient != suggested_fee_recipient { + if full_payload.fee_recipient != suggested_fee_recipient { error!( self.log(), "Inconsistent fee recipient"; @@ -847,11 +897,11 @@ impl ExecutionLayer { indicate that fees are being diverted to another address. Please \ ensure that the value of suggested_fee_recipient is set correctly and \ that the Execution Engine is trusted.", - "fee_recipient" => ?full_payload.execution_payload.fee_recipient, + "fee_recipient" => ?full_payload.fee_recipient, "suggested_fee_recipient" => ?suggested_fee_recipient, ); } - if f(self, &full_payload.execution_payload).is_some() { + if f(self, &full_payload).is_some() { warn!( self.log(), "Duplicate payload cached, this might indicate redundant proposal \ diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index d4f49d3773b..ec973b9f801 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -232,7 +232,7 @@ impl From>> deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload, blobs_bundle }, + execution_payload: FullPayload { execution_payload }, } = body; ( @@ -272,7 +272,7 @@ for ( deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload, blobs_bundle }, + execution_payload: FullPayload { execution_payload}, blob_kzg_commitments, } = body; @@ -324,7 +324,7 @@ impl BeaconBlockBodyMerge> { deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload, blobs_bundle }, + execution_payload: FullPayload { execution_payload }, } = self; BeaconBlockBodyMerge { @@ -356,7 +356,7 @@ impl BeaconBlockBodyEip4844> { deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload, blobs_bundle }, + execution_payload: FullPayload { execution_payload }, blob_kzg_commitments, } = self; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 7974c5ef364..23db2d961f8 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -230,8 +230,7 @@ impl Encode for BlindedPayload { #[derive(Default, Debug, Clone, Serialize, Deserialize)] #[serde(bound = "T: EthSpec")] pub struct FullPayload { - pub execution_payload: ExecutionPayload, - pub blobs_bundle: Option>, + pub execution_payload: ExecutionPayload } impl TestRandom for FullPayload { @@ -255,8 +254,7 @@ impl Hash for FullPayload { impl From> for FullPayload { fn from(execution_payload: ExecutionPayload) -> Self { Self { - execution_payload, - blobs_bundle: None, + execution_payload } } } @@ -294,8 +292,7 @@ impl Decode for FullPayload { fn from_ssz_bytes(bytes: &[u8]) -> Result { Ok(FullPayload { - execution_payload: Decode::from_ssz_bytes(bytes)?, - blobs_bundle: None, + execution_payload: Decode::from_ssz_bytes(bytes)? }) } } diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 55e0e8afd28..4ab74ac2119 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -304,7 +304,7 @@ impl SignedBeaconBlockMerge> { deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload: execution_payload, blobs_bundle: None }, + execution_payload: FullPayload { execution_payload }, }, }, signature, @@ -357,7 +357,7 @@ impl SignedBeaconBlockEip4844> { deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload: execution_payload, blobs_bundle: None }, + execution_payload: FullPayload { execution_payload }, blob_kzg_commitments, }, }, From bcc738cb9d1e7049c74d2d5110a104cf7433da65 Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Sat, 17 Sep 2022 14:31:57 +0200 Subject: [PATCH 007/263] progress on gossip stuff --- .../src/behaviour/gossip_cache.rs | 7 +++ .../lighthouse_network/src/types/pubsub.rs | 4 ++ .../lighthouse_network/src/types/topics.rs | 6 +- consensus/types/src/blob.rs | 53 +++++++++++++++++- consensus/types/src/blobs_sidecar.rs | 16 ++++++ consensus/types/src/bls_field_element.rs | 56 ++++++++++++++++++- consensus/types/src/lib.rs | 2 + consensus/types/src/signed_blobs_sidecar.rs | 13 +++++ 8 files changed, 153 insertions(+), 4 deletions(-) create mode 100644 consensus/types/src/blobs_sidecar.rs create mode 100644 consensus/types/src/signed_blobs_sidecar.rs diff --git a/beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs b/beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs index 4842605f7aa..1c6ffd022d0 100644 --- a/beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs @@ -20,6 +20,8 @@ pub struct GossipCache { topic_msgs: HashMap, Key>>, /// Timeout for blocks. beacon_block: Option, + /// Timeout for blobs. + blobs_sidecar: Option, /// Timeout for aggregate attestations. aggregates: Option, /// Timeout for attestations. @@ -41,6 +43,8 @@ pub struct GossipCacheBuilder { default_timeout: Option, /// Timeout for blocks. beacon_block: Option, + /// Timeout for blob sidecars. + blobs_sidecar: Option, /// Timeout for aggregate attestations. aggregates: Option, /// Timeout for attestations. @@ -117,6 +121,7 @@ impl GossipCacheBuilder { let GossipCacheBuilder { default_timeout, beacon_block, + blobs_sidecar, aggregates, attestation, voluntary_exit, @@ -129,6 +134,7 @@ impl GossipCacheBuilder { expirations: DelayQueue::default(), topic_msgs: HashMap::default(), beacon_block: beacon_block.or(default_timeout), + blobs_sidecar: blobs_sidecar.or(default_timeout), aggregates: aggregates.or(default_timeout), attestation: attestation.or(default_timeout), voluntary_exit: voluntary_exit.or(default_timeout), @@ -151,6 +157,7 @@ impl GossipCache { pub fn insert(&mut self, topic: GossipTopic, data: Vec) { let expire_timeout = match topic.kind() { GossipKind::BeaconBlock => self.beacon_block, + GossipKind::BlobsSidecar => self.blobs_sidecar, GossipKind::BeaconAggregateAndProof => self.aggregates, GossipKind::Attestation(_) => self.attestation, GossipKind::VoluntaryExit => self.voluntary_exit, diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index ef3b53abfb8..3519dafcfdd 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -14,11 +14,14 @@ use types::{ SignedBeaconBlockMerge, SignedBeaconBlockEip4844, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; +use types::signed_blobs_sidecar::SignedBlobsSidecar; #[derive(Debug, Clone, PartialEq)] pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. BeaconBlock(Arc>), + /// Gossipsub message providing notification of a new blobs sidecar. + BlobsSidecars(Arc>), /// Gossipsub message providing notification of a Aggregate attestation and associated proof. AggregateAndProofAttestation(Box>), /// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id. @@ -106,6 +109,7 @@ impl PubsubMessage { pub fn kind(&self) -> GossipKind { match self { PubsubMessage::BeaconBlock(_) => GossipKind::BeaconBlock, + PubsubMessage::BlobsSidecars(_) => GossipKind::BlobsSidecar, PubsubMessage::AggregateAndProofAttestation(_) => GossipKind::BeaconAggregateAndProof, PubsubMessage::Attestation(attestation_data) => { GossipKind::Attestation(attestation_data.0) diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 825b1088b29..901a193e326 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -11,6 +11,7 @@ use crate::Subnet; pub const TOPIC_PREFIX: &str = "eth2"; pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy"; pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; +pub const BLOBS_SIDECAR_TOPIC: &str = "blobs_sidecar"; pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof"; pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_"; pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; @@ -19,8 +20,9 @@ pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; pub const SIGNED_CONTRIBUTION_AND_PROOF_TOPIC: &str = "sync_committee_contribution_and_proof"; pub const SYNC_COMMITTEE_PREFIX_TOPIC: &str = "sync_committee_"; -pub const CORE_TOPICS: [GossipKind; 6] = [ +pub const CORE_TOPICS: [GossipKind; 7] = [ GossipKind::BeaconBlock, + GossipKind::BlobsSidecar, GossipKind::BeaconAggregateAndProof, GossipKind::VoluntaryExit, GossipKind::ProposerSlashing, @@ -47,6 +49,8 @@ pub struct GossipTopic { pub enum GossipKind { /// Topic for publishing beacon blocks. BeaconBlock, + /// Topic for publishing blob sidecars. + BlobsSidecar, /// Topic for publishing aggregate attestations and proofs. BeaconAggregateAndProof, /// Topic for publishing raw attestations on a particular subnet. diff --git a/consensus/types/src/blob.rs b/consensus/types/src/blob.rs index efe243859db..f44fbdd26d4 100644 --- a/consensus/types/src/blob.rs +++ b/consensus/types/src/blob.rs @@ -1,6 +1,7 @@ use ssz_types::VariableList; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use ssz::{Decode, DecodeError, Encode}; +use tree_hash::TreeHash; use crate::test_utils::RngCore; use crate::bls_field_element::BlsFieldElement; use crate::{EthSpec, Uint256}; @@ -20,4 +21,54 @@ impl TestRandom for Blob { } res } -} \ No newline at end of file +} + +impl Encode for Blob { + fn is_ssz_fixed_len() -> bool { + as Encode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + as Encode>::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.0.ssz_append(buf) + } +} + +impl Decode for Blob { + fn is_ssz_fixed_len() -> bool { + as Decode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + as Decode>::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + >::from_ssz_bytes(bytes).map(Self) + } +} + +impl TreeHash for Blob { + fn tree_hash_type() -> tree_hash::TreeHashType { + >::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> Vec { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + >::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} diff --git a/consensus/types/src/blobs_sidecar.rs b/consensus/types/src/blobs_sidecar.rs new file mode 100644 index 00000000000..4d39c1af896 --- /dev/null +++ b/consensus/types/src/blobs_sidecar.rs @@ -0,0 +1,16 @@ +use ssz_types::VariableList; +use crate::{EthSpec, Hash256, Slot}; +use crate::blob::Blob; +use crate::kzg_proof::KzgProof; +use serde::{Serialize, Deserialize}; +use ssz_derive::{Encode, Decode}; +use tree_hash_derive::TreeHash; +use derivative::Derivative; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, Derivative)] +pub struct BlobsSidecar { + beacon_block_root: Hash256, + beacon_block_slot: Slot, + blobs: VariableList, T::MaxBlobsPerBlock>, + kzg_aggregate_proof: KzgProof, +} \ No newline at end of file diff --git a/consensus/types/src/bls_field_element.rs b/consensus/types/src/bls_field_element.rs index 6693b5765b8..5dea91e07bb 100644 --- a/consensus/types/src/bls_field_element.rs +++ b/consensus/types/src/bls_field_element.rs @@ -1,7 +1,59 @@ -use crate::Uint256; +use crate::{EthSpec, Uint256}; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; +use tree_hash::TreeHash; #[derive(Default, Debug, PartialEq, Hash, Clone, Copy, Serialize, Deserialize)] #[serde(transparent)] -pub struct BlsFieldElement(pub Uint256); \ No newline at end of file +pub struct BlsFieldElement(pub Uint256); + + +impl Encode for BlsFieldElement { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.0.ssz_append(buf) + } +} + +impl Decode for BlsFieldElement { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + ::from_ssz_bytes(bytes).map(Self) + } +} + +impl TreeHash for BlsFieldElement { + fn tree_hash_type() -> tree_hash::TreeHashType { + ::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> Vec { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + ::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 93c9b39afbc..87f1b6f76b7 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -96,6 +96,8 @@ pub mod kzg_commitment; pub mod kzg_proof; pub mod bls_field_element; pub mod blob; +pub mod signed_blobs_sidecar; +pub mod blobs_sidecar; use ethereum_types::{H160, H256}; diff --git a/consensus/types/src/signed_blobs_sidecar.rs b/consensus/types/src/signed_blobs_sidecar.rs new file mode 100644 index 00000000000..da9af06c768 --- /dev/null +++ b/consensus/types/src/signed_blobs_sidecar.rs @@ -0,0 +1,13 @@ +use bls::Signature; +use crate::blobs_sidecar::BlobsSidecar; +use crate::EthSpec; +use serde::{Serialize, Deserialize}; +use ssz_derive::{Encode, Decode}; +use tree_hash_derive::TreeHash; +use derivative::Derivative; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, Derivative)] +pub struct SignedBlobsSidecar { + pub message: BlobsSidecar, + pub signature: Signature, +} \ No newline at end of file From acace8ab311d23760a87d2bc6ac79d5b097906d9 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Sat, 17 Sep 2022 14:55:18 +0200 Subject: [PATCH 008/263] network: blobs by range message --- .../lighthouse_network/src/behaviour/mod.rs | 23 ++++++++++++++++++- .../lighthouse_network/src/rpc/methods.rs | 17 ++++++++++++++ .../lighthouse_network/src/rpc/outbound.rs | 1 + .../lighthouse_network/src/rpc/protocol.rs | 1 + .../lighthouse_network/src/types/topics.rs | 1 + consensus/types/src/eth_spec.rs | 6 ++++- 6 files changed, 47 insertions(+), 2 deletions(-) diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index 9c9e094db62..898fea4a726 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -10,6 +10,7 @@ use crate::peer_manager::{ ConnectionDirection, PeerManager, PeerManagerEvent, }; use crate::rpc::*; +use crate::rpc::methods::BlobsByRangeRequest; use crate::service::{Context as ServiceContext, METADATA_FILENAME}; use crate::types::{ subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, @@ -39,6 +40,7 @@ use libp2p::{ }; use slog::{crit, debug, o, trace, warn}; use ssz::Encode; +use types::blobs_sidecar::BlobsSidecar; use std::collections::HashSet; use std::fs::File; use std::io::Write; @@ -51,7 +53,7 @@ use std::{ }; use types::{ consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, - SignedBeaconBlock, Slot, SubnetId, SyncSubnetId, + SignedBeaconBlock, Slot, SubnetId, SyncSubnetId, VariableList }; use self::gossip_cache::GossipCache; @@ -797,6 +799,9 @@ impl Behaviour { Request::BlocksByRoot { .. } => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]) } + Request::BlobsByRange { .. } => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_range"]) + } } self.add_event(BehaviourEvent::RequestReceived { peer_id, @@ -1095,6 +1100,9 @@ where InboundRequest::BlocksByRoot(req) => { self.propagate_request(peer_request_id, peer_id, Request::BlocksByRoot(req)) } + InboundRequest::BlobsByRange(req) => { + self.propagate_request(peer_request_id, peer_id, Request::BlobsByRange(req)) + } } } Ok(RPCReceived::Response(id, resp)) => { @@ -1117,12 +1125,16 @@ where RPCResponse::BlocksByRoot(resp) => { self.propagate_response(id, peer_id, Response::BlocksByRoot(Some(resp))) } + RPCResponse::BlobsByRange(resp) => { + self.propagate_response(id, peer_id, Response::BlobsByRange(Some(resp))) + } } } Ok(RPCReceived::EndOfStream(id, termination)) => { let response = match termination { ResponseTermination::BlocksByRange => Response::BlocksByRange(None), ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None), + ResponseTermination::BlobsByRange => Response::BlobsByRange(None), }; self.propagate_response(id, peer_id, response); } @@ -1329,6 +1341,8 @@ pub enum Request { BlocksByRange(BlocksByRangeRequest), /// A request blocks root request. BlocksByRoot(BlocksByRootRequest), + /// A blobs by range request. + BlobsByRange(BlobsByRangeRequest), } impl std::convert::From for OutboundRequest { @@ -1342,6 +1356,7 @@ impl std::convert::From for OutboundRequest { step: 1, }) } + Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r), Request::Status(s) => OutboundRequest::Status(s), } } @@ -1361,6 +1376,8 @@ pub enum Response { BlocksByRange(Option>>), /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Option>>), + /// A response to a get BLOBS_BY_RANGE request + BlobsByRange(Option, TSpec::MaxRequestBlobsSidecars>>>) } impl std::convert::From> for RPCCodedResponse { @@ -1374,6 +1391,10 @@ impl std::convert::From> for RPCCodedResponse RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)), None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), }, + Response::BlobsByRange(r) => match r { + Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRange(b)), + None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRange), + } Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), } } diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 26d755a6e06..a7bd51106e4 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -13,6 +13,7 @@ use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::blobs_sidecar::BlobsSidecar; /// Maximum number of blocks in a single request. pub type MaxRequestBlocks = U1024; @@ -204,6 +205,16 @@ pub struct BlocksByRangeRequest { pub count: u64, } +/// Request a number of beacon blobs from a peer. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct BlobsByRangeRequest { + /// The starting slot to request blobs. + pub start_slot: u64, + + /// The number of blobs from the start slot. + pub count: u64, +} + /// Request a number of beacon block roots from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] pub struct OldBlocksByRangeRequest { @@ -243,6 +254,9 @@ pub enum RPCResponse { /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Arc>), + /// A response to a get BLOBS_BY_RANGE request + BlobsByRange(Arc, T::MaxRequestBlobsSidecars>>), + /// A PONG response to a PING request. Pong(Ping), @@ -258,6 +272,9 @@ pub enum ResponseTermination { /// Blocks by root stream termination. BlocksByRoot, + + // Blobs by range stream termination. + BlobsByRange } /// The structured response containing a result/code indicating success or failure diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 7d5acc43643..1c692016514 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -38,6 +38,7 @@ pub enum OutboundRequest { Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), + BlobsByRange(BlobsByRangeRequest), Ping(Ping), MetaData(PhantomData), } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 352348f74f7..7a280ed5d8e 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -427,6 +427,7 @@ pub enum InboundRequest { Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), + BlobsByRange(BlobsByRangeRequest), Ping(Ping), MetaData(PhantomData), } diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 901a193e326..f119a5855c0 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -210,6 +210,7 @@ impl std::fmt::Display for GossipTopic { let kind = match self.kind { GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(), + GossipKind::BlobsSidecar => BLOBS_SIDECAR_TOPIC.into(), GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(), GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(), GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(), diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 9d8a765c204..9de4777f54b 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -100,6 +100,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + */ type MaxBlobsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq; type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxRequestBlobsSidecars: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * Derived values (set these CAREFULLY) */ @@ -277,6 +278,7 @@ impl EthSpec for MainnetEthSpec { type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch type MaxBlobsPerBlock = U16; type FieldElementsPerBlob = U4096; + type MaxRequestBlobsSidecars = U128; fn default_spec() -> ChainSpec { ChainSpec::mainnet() @@ -323,7 +325,8 @@ impl EthSpec for MinimalEthSpec { MinGasLimit, MaxExtraDataBytes, MaxBlobsPerBlock, - FieldElementsPerBlob + FieldElementsPerBlob, + MaxRequestBlobsSidecars }); fn default_spec() -> ChainSpec { @@ -370,6 +373,7 @@ impl EthSpec for GnosisEthSpec { type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch type MaxBlobsPerBlock = U16; type FieldElementsPerBlob = U4096; + type MaxRequestBlobsSidecars = U128; fn default_spec() -> ChainSpec { ChainSpec::gnosis() From 292a16a6ebcb215c8ac43915e7a7a30e9ffddcbc Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Sat, 17 Sep 2022 14:58:27 +0200 Subject: [PATCH 009/263] gossip boilerplate --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- .../lighthouse_network/src/types/pubsub.rs | 13 +++++ .../lighthouse_network/src/types/topics.rs | 2 + .../network/src/beacon_processor/mod.rs | 56 +++++++++++++++++++ .../beacon_processor/worker/gossip_methods.rs | 14 +++++ beacon_node/network/src/router/mod.rs | 8 +++ beacon_node/network/src/router/processor.rs | 17 ++++++ consensus/types/src/blobs_sidecar.rs | 8 +-- 8 files changed, 115 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 7418a66c17a..3b3814f6c04 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3628,7 +3628,7 @@ impl BeaconChain { .ok_or(BlockProductionError::MissingSyncAggregate)?, execution_payload: execution_payload .ok_or(BlockProductionError::MissingExecutionPayload)?, - blob_kzg_commitments: blob_kzg_commitments.into(), + blob_kzg_commitments: todo!(), }, }), }; diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 3519dafcfdd..db9016b8b0c 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -14,6 +14,7 @@ use types::{ SignedBeaconBlockMerge, SignedBeaconBlockEip4844, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; +use types::blobs_sidecar::BlobsSidecar; use types::signed_blobs_sidecar::SignedBlobsSidecar; #[derive(Debug, Clone, PartialEq)] @@ -184,6 +185,11 @@ impl PubsubMessage { }; Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block))) } + GossipKind::BlobsSidecar => { + let blobs_sidecar = SignedBlobsSidecar::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + Ok(PubsubMessage::BlobsSidecars(Arc::new(blobs_sidecar))) + } GossipKind::VoluntaryExit => { let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?; @@ -228,6 +234,7 @@ impl PubsubMessage { // messages for us. match &self { PubsubMessage::BeaconBlock(data) => data.as_ssz_bytes(), + PubsubMessage::BlobsSidecars(data) => data.as_ssz_bytes(), PubsubMessage::AggregateAndProofAttestation(data) => data.as_ssz_bytes(), PubsubMessage::VoluntaryExit(data) => data.as_ssz_bytes(), PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(), @@ -248,6 +255,12 @@ impl std::fmt::Display for PubsubMessage { block.slot(), block.message().proposer_index() ), + PubsubMessage::BlobsSidecars(blobs) => write!( + f, + "Blobs Sidecar: slot: {}, blobs: {}", + blobs.message.beacon_block_slot, + blobs.message.blobs.len(), + ), PubsubMessage::AggregateAndProofAttestation(att) => write!( f, "Aggregate and Proof: slot: {}, index: {}, aggregator_index: {}", diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 901a193e326..3e772648099 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -182,6 +182,7 @@ impl From for String { let kind = match topic.kind { GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(), + GossipKind::BlobsSidecar => BLOBS_SIDECAR_TOPIC.into(), GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(), GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(), GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(), @@ -210,6 +211,7 @@ impl std::fmt::Display for GossipTopic { let kind = match self.kind { GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(), + GossipKind::BlobsSidecar => BLOBS_SIDECAR_TOPIC.into(), GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(), GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(), GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(), diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index e9a115904d6..2a811b805d2 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -65,6 +65,7 @@ use types::{ SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; +use types::signed_blobs_sidecar::SignedBlobsSidecar; use work_reprocessing_queue::{ spawn_reprocess_scheduler, QueuedAggregate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, }; @@ -184,6 +185,7 @@ pub const GOSSIP_ATTESTATION_BATCH: &str = "gossip_attestation_batch"; pub const GOSSIP_AGGREGATE: &str = "gossip_aggregate"; pub const GOSSIP_AGGREGATE_BATCH: &str = "gossip_aggregate_batch"; pub const GOSSIP_BLOCK: &str = "gossip_block"; +pub const GOSSIP_BLOBS_SIDECAR: &str = "gossip_blobs_sidecar"; pub const DELAYED_IMPORT_BLOCK: &str = "delayed_import_block"; pub const GOSSIP_VOLUNTARY_EXIT: &str = "gossip_voluntary_exit"; pub const GOSSIP_PROPOSER_SLASHING: &str = "gossip_proposer_slashing"; @@ -400,6 +402,26 @@ impl WorkEvent { } } + /// Create a new `Work` event for some blobs sidecar. + pub fn gossip_blobs_sidecar( + message_id: MessageId, + peer_id: PeerId, + peer_client: Client, + blobs: Arc>, + seen_timestamp: Duration, + ) -> Self { + Self { + drop_during_sync: false, + work: Work::GossipBlobsSidecar { + message_id, + peer_id, + peer_client, + blobs, + seen_timestamp, + }, + } + } + /// Create a new `Work` event for some sync committee signature. pub fn gossip_sync_signature( message_id: MessageId, @@ -671,6 +693,13 @@ pub enum Work { block: Arc>, seen_timestamp: Duration, }, + GossipBlobsSidecar { + message_id: MessageId, + peer_id: PeerId, + peer_client: Client, + blobs: Arc>, + seen_timestamp: Duration, + }, DelayedImportBlock { peer_id: PeerId, block: Box>, @@ -739,6 +768,7 @@ impl Work { Work::GossipAggregate { .. } => GOSSIP_AGGREGATE, Work::GossipAggregateBatch { .. } => GOSSIP_AGGREGATE_BATCH, Work::GossipBlock { .. } => GOSSIP_BLOCK, + Work::GossipBlobsSidecar { .. } => GOSSIP_BLOBS_SIDECAR, Work::DelayedImportBlock { .. } => DELAYED_IMPORT_BLOCK, Work::GossipVoluntaryExit { .. } => GOSSIP_VOLUNTARY_EXIT, Work::GossipProposerSlashing { .. } => GOSSIP_PROPOSER_SLASHING, @@ -888,6 +918,7 @@ impl BeaconProcessor { let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN); + let mut gossip_blobs_sidecar_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN); let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN); let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN); @@ -1199,6 +1230,9 @@ impl BeaconProcessor { Work::GossipBlock { .. } => { gossip_block_queue.push(work, work_id, &self.log) } + Work::GossipBlobsSidecar { .. } => { + gossip_blobs_sidecar_queue.push(work, work_id, &self.log) + } Work::DelayedImportBlock { .. } => { delayed_block_queue.push(work, work_id, &self.log) } @@ -1451,6 +1485,28 @@ impl BeaconProcessor { ) .await }), + /* + * Verification for blobs sidecars received on gossip. + */ + Work::GossipBlobsSidecar { + message_id, + peer_id, + peer_client, + blobs, + seen_timestamp, + } => task_spawner.spawn_async(async move { + worker + .process_gossip_blobs_sidecar( + message_id, + peer_id, + peer_client, + blobs, + work_reprocessing_tx, + duplicate_cache, + seen_timestamp, + ) + .await + }), /* * Import for blocks that we received earlier than their intended slot. */ diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 93ed1b463bf..0e1ab697e69 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -22,6 +22,7 @@ use types::{ SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; +use types::signed_blobs_sidecar::SignedBlobsSidecar; use super::{ super::work_reprocessing_queue::{ @@ -987,6 +988,19 @@ impl Worker { }; } + pub async fn process_gossip_blobs_sidecar( + self, + message_id: MessageId, + peer_id: PeerId, + peer_client: Client, + blobs: Arc>, + reprocess_tx: mpsc::Sender>, + duplicate_cache: DuplicateCache, + seen_duration: Duration, + ) { + + } + pub fn process_gossip_voluntary_exit( self, message_id: MessageId, diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index 03b877506fb..0c9b4177954 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -229,6 +229,14 @@ impl Router { block, ); } + PubsubMessage::BlobsSidecars(blobs) => { + self.processor.on_blobs_gossip( + id, + peer_id, + self.network_globals.client(&peer_id), + blobs, + ); + } PubsubMessage::VoluntaryExit(exit) => { debug!(self.log, "Received a voluntary exit"; "peer_id" => %peer_id); self.processor.on_voluntary_exit_gossip(id, peer_id, exit); diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index ce11cbdcef3..c716707f312 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -20,6 +20,7 @@ use types::{ Attestation, AttesterSlashing, EthSpec, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncSubnetId, }; +use types::signed_blobs_sidecar::SignedBlobsSidecar; /// Processes validated messages from the network. It relays necessary data to the syncing thread /// and processes blocks from the pubsub network. @@ -255,6 +256,22 @@ impl Processor { )) } + pub fn on_blobs_gossip( + &mut self, + message_id: MessageId, + peer_id: PeerId, + peer_client: Client, + blobs: Arc>, + ) { + self.send_beacon_processor_work(BeaconWorkEvent::gossip_blobs_sidecar( + message_id, + peer_id, + peer_client, + blobs, + timestamp_now(), + )) + } + pub fn on_unaggregated_attestation_gossip( &mut self, message_id: MessageId, diff --git a/consensus/types/src/blobs_sidecar.rs b/consensus/types/src/blobs_sidecar.rs index 4d39c1af896..445c731cf81 100644 --- a/consensus/types/src/blobs_sidecar.rs +++ b/consensus/types/src/blobs_sidecar.rs @@ -9,8 +9,8 @@ use derivative::Derivative; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, Derivative)] pub struct BlobsSidecar { - beacon_block_root: Hash256, - beacon_block_slot: Slot, - blobs: VariableList, T::MaxBlobsPerBlock>, - kzg_aggregate_proof: KzgProof, + pub beacon_block_root: Hash256, + pub beacon_block_slot: Slot, + pub blobs: VariableList, T::MaxBlobsPerBlock>, + pub kzg_aggregate_proof: KzgProof, } \ No newline at end of file From 36a0add0cd8d2bc2b22f8b3603d3427b0d02d78e Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Sat, 17 Sep 2022 15:23:28 +0200 Subject: [PATCH 010/263] network stuff --- .../src/peer_manager/mod.rs | 3 +++ .../src/rpc/codec/ssz_snappy.rs | 23 +++++++++++++++++++ beacon_node/lighthouse_network/src/rpc/mod.rs | 1 + .../lighthouse_network/src/rpc/protocol.rs | 6 +++++ .../src/rpc/rate_limiter.rs | 11 +++++++++ 5 files changed, 44 insertions(+) diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 55b3884454d..05acef663d3 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -497,6 +497,7 @@ impl PeerManager { Protocol::Ping => PeerAction::MidToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, + Protocol::BlobsByRange => PeerAction::MidToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, @@ -512,6 +513,7 @@ impl PeerManager { Protocol::Ping => PeerAction::Fatal, Protocol::BlocksByRange => return, Protocol::BlocksByRoot => return, + Protocol::BlobsByRange => return, Protocol::Goodbye => return, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, @@ -527,6 +529,7 @@ impl PeerManager { Protocol::Ping => PeerAction::LowToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, + Protocol::BlobsByRange => PeerAction::MidToleranceError, Protocol::Goodbye => return, Protocol::MetaData => return, Protocol::Status => return, diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index e952eece300..f19c7b5a4fd 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -228,6 +228,7 @@ impl Encoder> for SSZSnappyOutboundCodec< OutboundRequest::Goodbye(req) => req.as_ssz_bytes(), OutboundRequest::BlocksByRange(req) => req.as_ssz_bytes(), OutboundRequest::BlocksByRoot(req) => req.block_roots.as_ssz_bytes(), + OutboundRequest::BlobsByRange(req) => req.as_ssz_bytes(), OutboundRequest::Ping(req) => req.as_ssz_bytes(), OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode }; @@ -473,6 +474,9 @@ fn handle_v1_request( Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, }))), + Protocol::BlobsByRange => Ok(Some(InboundRequest::BlobsByRange( + BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))), Protocol::Ping => Ok(Some(InboundRequest::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -505,6 +509,9 @@ fn handle_v2_request( Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, }))), + Protocol::BlobsByRange => Ok(Some(InboundRequest::BlobsByRange( + BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))), // MetaData requests return early from InboundUpgrade and do not reach the decoder. // Handle this case just for completeness. Protocol::MetaData => { @@ -542,6 +549,9 @@ fn handle_v1_response( Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), + Protocol::BlobsByRange => Err(RPCError::InvalidData( + "blobs by range via v1".to_string(), + )), Protocol::Ping => Ok(Some(RPCResponse::Pong(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -616,6 +626,15 @@ fn handle_v2_response( )?), )))), }, + Protocol::BlobsByRange => match fork_name { + ForkName::Eip4844 => Ok(Some(RPCResponse::BlobsByRange(Arc::new( + VariableList::from_ssz_bytes(decoded_buffer)?, + )))), + _ => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + "Invalid forkname for blobsbyrange".to_string(), + )), + } _ => Err(RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, "Invalid v2 request".to_string(), @@ -672,6 +691,7 @@ mod tests { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Eip4844 => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } @@ -875,6 +895,9 @@ mod tests { OutboundRequest::BlocksByRoot(bbroot) => { assert_eq!(decoded, InboundRequest::BlocksByRoot(bbroot)) } + OutboundRequest::BlobsByRange(blbrange) => { + assert_eq!(decoded, InboundRequest::BlobsByRange(blbrange)) + } OutboundRequest::Ping(ping) => { assert_eq!(decoded, InboundRequest::Ping(ping)) } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 0bedd423b20..6197d57f199 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -292,6 +292,7 @@ where match end { ResponseTermination::BlocksByRange => Protocol::BlocksByRange, ResponseTermination::BlocksByRoot => Protocol::BlocksByRoot, + ResponseTermination::BlobsByRange => Protocol::BlobsByRange, }, ), }, diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 7a280ed5d8e..4de2948ff76 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -159,6 +159,8 @@ pub enum Protocol { BlocksByRange, /// The `BlocksByRoot` protocol name. BlocksByRoot, + /// The `BlobsByRange` protocol name. + BlobsByRange, /// The `Ping` protocol name. Ping, /// The `MetaData` protocol name. @@ -488,6 +490,7 @@ impl InboundRequest { InboundRequest::Goodbye(_) => 0, InboundRequest::BlocksByRange(req) => req.count, InboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64, + InboundRequest::BlobsByRange(req) => req.count, InboundRequest::Ping(_) => 1, InboundRequest::MetaData(_) => 1, } @@ -500,6 +503,7 @@ impl InboundRequest { InboundRequest::Goodbye(_) => Protocol::Goodbye, InboundRequest::BlocksByRange(_) => Protocol::BlocksByRange, InboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot, + InboundRequest::BlobsByRange(_) => Protocol::BlobsByRange, InboundRequest::Ping(_) => Protocol::Ping, InboundRequest::MetaData(_) => Protocol::MetaData, } @@ -513,6 +517,7 @@ impl InboundRequest { // variants that have `multiple_responses()` can have values. InboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, + InboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, InboundRequest::Status(_) => unreachable!(), InboundRequest::Goodbye(_) => unreachable!(), InboundRequest::Ping(_) => unreachable!(), @@ -618,6 +623,7 @@ impl std::fmt::Display for InboundRequest { InboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), InboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), + InboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), InboundRequest::MetaData(_) => write!(f, "MetaData request"), } diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 70b14c33dec..8cd1e749e36 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -73,6 +73,8 @@ pub struct RPCRateLimiter { bbrange_rl: Limiter, /// BlocksByRoot rate limiter. bbroots_rl: Limiter, + /// BlobsByRange rate limiter. + blbrange_rl: Limiter, } /// Error type for non conformant requests @@ -98,6 +100,8 @@ pub struct RPCRateLimiterBuilder { bbrange_quota: Option, /// Quota for the BlocksByRoot protocol. bbroots_quota: Option, + /// Quota for the BlocksByRange protocol. + blbrange_quota: Option, } impl RPCRateLimiterBuilder { @@ -116,6 +120,7 @@ impl RPCRateLimiterBuilder { Protocol::Goodbye => self.goodbye_quota = q, Protocol::BlocksByRange => self.bbrange_quota = q, Protocol::BlocksByRoot => self.bbroots_quota = q, + Protocol::BlobsByRange => self.blbrange_quota = q, } self } @@ -156,6 +161,8 @@ impl RPCRateLimiterBuilder { .bbrange_quota .ok_or("BlocksByRange quota not specified")?; + let blbrange_quota = self.blbrange_quota.ok_or("BlobsByRange quota not specified")?; + // create the rate limiters let ping_rl = Limiter::from_quota(ping_quota)?; let metadata_rl = Limiter::from_quota(metadata_quota)?; @@ -163,6 +170,7 @@ impl RPCRateLimiterBuilder { let goodbye_rl = Limiter::from_quota(goodbye_quota)?; let bbroots_rl = Limiter::from_quota(bbroots_quota)?; let bbrange_rl = Limiter::from_quota(bbrange_quota)?; + let blbrange_rl = Limiter::from_quota(blbrange_quota)?; // check for peers to prune every 30 seconds, starting in 30 seconds let prune_every = tokio::time::Duration::from_secs(30); @@ -176,6 +184,7 @@ impl RPCRateLimiterBuilder { goodbye_rl, bbroots_rl, bbrange_rl, + blbrange_rl, init_time: Instant::now(), }) } @@ -199,6 +208,7 @@ impl RPCRateLimiter { Protocol::Goodbye => &mut self.goodbye_rl, Protocol::BlocksByRange => &mut self.bbrange_rl, Protocol::BlocksByRoot => &mut self.bbroots_rl, + Protocol::BlobsByRange => &mut self.blbrange_rl, }; check(limiter) } @@ -211,6 +221,7 @@ impl RPCRateLimiter { self.goodbye_rl.prune(time_since_start); self.bbrange_rl.prune(time_since_start); self.bbroots_rl.prune(time_since_start); + self.blbrange_rl.prune(time_since_start); } } From d4d40be8706ca89ba2bde24ab2d19335865b3a5e Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Sat, 17 Sep 2022 15:58:52 +0200 Subject: [PATCH 011/263] storable blobs --- beacon_node/store/src/config.rs | 4 ++++ beacon_node/store/src/hot_cold_store.rs | 30 +++++++++++++++++++++++++ beacon_node/store/src/lib.rs | 2 ++ 3 files changed, 36 insertions(+) diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index 4268ec2e915..debffdca5eb 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -7,6 +7,7 @@ use types::{EthSpec, MinimalEthSpec}; pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048; pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192; pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 5; +pub const DEFAULT_BLOB_CACHE_SIZE: usize = 5; /// Database configuration parameters. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -17,6 +18,8 @@ pub struct StoreConfig { pub slots_per_restore_point_set_explicitly: bool, /// Maximum number of blocks to store in the in-memory block cache. pub block_cache_size: usize, + /// Maximum number of blobs to store in the in-memory block cache. + pub blob_cache_size: usize, /// Whether to compact the database on initialization. pub compact_on_init: bool, /// Whether to compact the database during database pruning. @@ -41,6 +44,7 @@ impl Default for StoreConfig { slots_per_restore_point: MinimalEthSpec::slots_per_historical_root() as u64, slots_per_restore_point_set_explicitly: false, block_cache_size: DEFAULT_BLOCK_CACHE_SIZE, + blob_cache_size: DEFAULT_BLOB_CACHE_SIZE, compact_on_init: false, compact_on_prune: true, } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index c4b4a64a057..8240007b668 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -38,6 +38,7 @@ use std::path::Path; use std::sync::Arc; use std::time::Duration; use types::*; +use types::signed_blobs_sidecar::SignedBlobsSidecar; /// On-disk database that stores finalized states efficiently. /// @@ -59,6 +60,8 @@ pub struct HotColdDB, Cold: ItemStore> { /// /// The hot database also contains all blocks. pub hot_db: Hot, + /// LRU cache of deserialized blobs. Updated whenever a blob is loaded. + blob_cache: Mutex>>, /// LRU cache of deserialized blocks. Updated whenever a block is loaded. block_cache: Mutex>>, /// Chain spec. @@ -128,6 +131,7 @@ impl HotColdDB, MemoryStore> { cold_db: MemoryStore::open(), hot_db: MemoryStore::open(), block_cache: Mutex::new(LruCache::new(config.block_cache_size)), + blob_cache: Mutex::new(LruCache::new(config.blob_cache_size)), config, spec, log, @@ -161,6 +165,7 @@ impl HotColdDB, LevelDB> { cold_db: LevelDB::open(cold_path)?, hot_db: LevelDB::open(hot_path)?, block_cache: Mutex::new(LruCache::new(config.block_cache_size)), + blob_cache: Mutex::new(LruCache::new(config.blob_cache_size)), config, spec, log, @@ -453,6 +458,31 @@ impl, Cold: ItemStore> HotColdDB .key_delete(DBColumn::ExecPayload.into(), block_root.as_bytes()) } + pub fn put_blobs(&self, + block_root: &Hash256, + blobs: SignedBlobsSidecar, + ) -> Result<(), Error> { + self.hot_db.put_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes(), &blobs.as_ssz_bytes())?; + self.blob_cache.lock().push(*block_root, blobs); + Ok(()) + } + + pub fn get_blobs(&self, + block_root: &Hash256, + ) -> Result>, Error> { + if let Some(blobs) = self.blob_cache.lock().get(block_root) { + Ok(Some(blobs.clone())) + } else { + if let Some(bytes) = self.hot_db.get_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes())? { + let ret = SignedBlobsSidecar::from_ssz_bytes(&bytes)?; + self.blob_cache.lock().put(*block_root, ret.clone()); + Ok(Some(ret)) + } else { + Ok(None) + } + } + } + pub fn put_state_summary( &self, state_root: &Hash256, diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 75aeca058b5..f7af172f5a8 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -171,6 +171,8 @@ pub enum DBColumn { BeaconMeta, #[strum(serialize = "blk")] BeaconBlock, + #[strum(serialize = "blo")] + BeaconBlob, /// For full `BeaconState`s in the hot database (finalized or fork-boundary states). #[strum(serialize = "ste")] BeaconState, From aeb52ff18639b1c8980ac1fc3be997d2b3dd7b7d Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Sat, 17 Sep 2022 16:10:09 +0200 Subject: [PATCH 012/263] network stuff --- .../src/rpc/codec/ssz_snappy.rs | 1 + .../lighthouse_network/src/rpc/methods.rs | 5 ++++ .../lighthouse_network/src/rpc/outbound.rs | 7 ++++++ .../lighthouse_network/src/rpc/protocol.rs | 8 +++++++ beacon_node/network/src/router/mod.rs | 7 ++++++ beacon_node/network/src/router/processor.rs | 24 ++++++++++++++++++- 6 files changed, 51 insertions(+), 1 deletion(-) diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index f19c7b5a4fd..9bcca2e6ba0 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -70,6 +70,7 @@ impl Encoder> for SSZSnappyInboundCodec< RPCResponse::Status(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), + RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(), RPCResponse::Pong(res) => res.data.as_ssz_bytes(), RPCResponse::MetaData(res) => // Encode the correct version of the MetaData response based on the negotiated version. diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index a7bd51106e4..ad67c29babf 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -336,6 +336,7 @@ impl RPCCodedResponse { RPCResponse::Status(_) => false, RPCResponse::BlocksByRange(_) => true, RPCResponse::BlocksByRoot(_) => true, + RPCResponse::BlobsByRange(_) => true, RPCResponse::Pong(_) => false, RPCResponse::MetaData(_) => false, }, @@ -370,6 +371,7 @@ impl RPCResponse { RPCResponse::Status(_) => Protocol::Status, RPCResponse::BlocksByRange(_) => Protocol::BlocksByRange, RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, + RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange, RPCResponse::Pong(_) => Protocol::Ping, RPCResponse::MetaData(_) => Protocol::MetaData, } @@ -404,6 +406,9 @@ impl std::fmt::Display for RPCResponse { } RPCResponse::BlocksByRoot(block) => { write!(f, "BlocksByRoot: Block slot: {}", block.slot()) + } + RPCResponse::BlobsByRange(blob) => { + write!(f, "BlobsByRange: Blob slot: {}", blob.len()) } RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 1c692016514..4f5b2a57824 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -76,6 +76,9 @@ impl OutboundRequest { ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), ], + OutboundRequest::BlobsByRange(_) => vec![ + ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), + ], OutboundRequest::Ping(_) => vec![ProtocolId::new( Protocol::Ping, Version::V1, @@ -97,6 +100,7 @@ impl OutboundRequest { OutboundRequest::Goodbye(_) => 0, OutboundRequest::BlocksByRange(req) => req.count, OutboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64, + OutboundRequest::BlobsByRange(req) => req.count, OutboundRequest::Ping(_) => 1, OutboundRequest::MetaData(_) => 1, } @@ -109,6 +113,7 @@ impl OutboundRequest { OutboundRequest::Goodbye(_) => Protocol::Goodbye, OutboundRequest::BlocksByRange(_) => Protocol::BlocksByRange, OutboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot, + OutboundRequest::BlobsByRange(_) => Protocol::BlobsByRange, OutboundRequest::Ping(_) => Protocol::Ping, OutboundRequest::MetaData(_) => Protocol::MetaData, } @@ -122,6 +127,7 @@ impl OutboundRequest { // variants that have `multiple_responses()` can have values. OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, + OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, OutboundRequest::Status(_) => unreachable!(), OutboundRequest::Goodbye(_) => unreachable!(), OutboundRequest::Ping(_) => unreachable!(), @@ -177,6 +183,7 @@ impl std::fmt::Display for OutboundRequest { OutboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), OutboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), + OutboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), OutboundRequest::MetaData(_) => write!(f, "MetaData request"), } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 4de2948ff76..203f5d0b5e0 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -189,6 +189,7 @@ impl std::fmt::Display for Protocol { Protocol::Goodbye => "goodbye", Protocol::BlocksByRange => "beacon_blocks_by_range", Protocol::BlocksByRoot => "beacon_blocks_by_root", + Protocol::BlobsByRange => "blobs_sidecars_by_range", Protocol::Ping => "ping", Protocol::MetaData => "metadata", }; @@ -297,6 +298,9 @@ impl ProtocolId { Protocol::BlocksByRoot => { RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX) } + Protocol::BlobsByRange => { + RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX) + } Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -315,6 +319,7 @@ impl ProtocolId { Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), + Protocol::BlobsByRange => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), @@ -469,6 +474,9 @@ impl InboundRequest { ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), ], + InboundRequest::BlobsByRange(_) => vec![ + ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), + ], InboundRequest::Ping(_) => vec![ProtocolId::new( Protocol::Ping, Version::V1, diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index 0c9b4177954..24a202c4973 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -168,6 +168,9 @@ impl Router { Request::BlocksByRoot(request) => self .processor .on_blocks_by_root_request(peer_id, id, request), + Request::BlobsByRange(request) => self + .processor + .on_blobs_by_range_request(peer_id, id, request), } } @@ -192,6 +195,10 @@ impl Router { self.processor .on_blocks_by_root_response(peer_id, request_id, beacon_block); } + Response::BlobsByRange(beacon_blob) => { + self.processor + .on_blobs_by_range_response(peer_id, request_id, beacon_blob); + } } } diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index c716707f312..2d12cf54011 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -7,6 +7,7 @@ use crate::sync::manager::RequestId as SyncId; use crate::sync::SyncMessage; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::rpc::*; +use lighthouse_network::rpc::methods::BlobsByRangeRequest; use lighthouse_network::{ Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response, }; @@ -18,7 +19,7 @@ use store::SyncCommitteeMessage; use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, EthSpec, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncSubnetId, + SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncSubnetId, VariableList, blobs_sidecar::BlobsSidecar, }; use types::signed_blobs_sidecar::SignedBlobsSidecar; @@ -161,6 +162,18 @@ impl Processor { )) } + pub fn on_blobs_by_range_request( + &mut self, + peer_id: PeerId, + request_id: PeerRequestId, + request: BlobsByRangeRequest, + ) { + /* + self.send_beacon_processor_work(BeaconWorkEvent::blocks_by_roots_request( + peer_id, request_id, request, + )) + */ + } /// Handle a `BlocksByRange` request from the peer. pub fn on_blocks_by_range_request( &mut self, @@ -235,6 +248,15 @@ impl Processor { }); } + pub fn on_blobs_by_range_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + beacon_blob: Option, <::EthSpec as EthSpec>::MaxRequestBlobsSidecars>>>, + ) { + + } + /// Process a gossip message declaring a new block. /// /// Attempts to apply to block to the beacon chain. May queue the block for later processing. From f9209e2d0854049cefc216b465aa2bfd785e4646 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Sat, 17 Sep 2022 16:39:40 +0200 Subject: [PATCH 013/263] more network stuff --- .../network/src/beacon_processor/mod.rs | 42 +++++++++++++++++++ .../beacon_processor/worker/rpc_methods.rs | 13 ++++++ beacon_node/network/src/router/processor.rs | 4 +- 3 files changed, 56 insertions(+), 3 deletions(-) diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 2a811b805d2..450efc886b3 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -45,6 +45,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock}; use derivative::Derivative; use futures::stream::{Stream, StreamExt}; use futures::task::Poll; +use lighthouse_network::rpc::methods::BlobsByRangeRequest; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, StatusMessage}, Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, @@ -197,6 +198,7 @@ pub const CHAIN_SEGMENT: &str = "chain_segment"; pub const STATUS_PROCESSING: &str = "status_processing"; pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request"; pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; +pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request"; pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; @@ -577,6 +579,21 @@ impl WorkEvent { } } + pub fn blobs_by_range_request( + peer_id: PeerId, + request_id: PeerRequestId, + request: BlobsByRangeRequest, + ) -> Self { + Self { + drop_during_sync: false, + work: Work::BlobsByRangeRequest { + peer_id, + request_id, + request, + }, + } + } + /// Get a `str` representation of the type of work this `WorkEvent` contains. pub fn work_type(&self) -> &'static str { self.work.str_id() @@ -757,6 +774,11 @@ pub enum Work { request_id: PeerRequestId, request: BlocksByRootRequest, }, + BlobsByRangeRequest { + peer_id: PeerId, + request_id: PeerRequestId, + request: BlobsByRangeRequest, + } } impl Work { @@ -780,6 +802,7 @@ impl Work { Work::Status { .. } => STATUS_PROCESSING, Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST, Work::BlocksByRootsRequest { .. } => BLOCKS_BY_ROOTS_REQUEST, + Work::BlobsByRangeRequest {..} => BLOBS_BY_RANGE_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, } @@ -924,6 +947,7 @@ impl BeaconProcessor { let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN); let mut bbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_RANGE_QUEUE_LEN); let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN); + let mut blbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN); // Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to // receive them back once they are ready (`ready_work_rx`). @@ -1266,6 +1290,9 @@ impl BeaconProcessor { Work::BlocksByRootsRequest { .. } => { bbroots_queue.push(work, work_id, &self.log) } + Work::BlobsByRangeRequest { .. } => { + blbrange_queue.push(work, work_id, &self.log) + } Work::UnknownBlockAttestation { .. } => { unknown_block_attestation_queue.push(work) } @@ -1643,6 +1670,21 @@ impl BeaconProcessor { request, ) }), + + Work::BlobsByRangeRequest { + peer_id, + request_id, + request + } => task_spawner.spawn_blocking_with_manual_send_idle(move |send_idle_on_drop| { + worker.handle_blobs_by_range_request( + sub_executor, + send_idle_on_drop, + peer_id, + request_id, + request, + ) + }), + Work::UnknownBlockAttestation { message_id, peer_id, diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 8ca9c35e473..2b8cbbc22c4 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -6,6 +6,7 @@ use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, Whe use itertools::process_results; use lighthouse_network::rpc::StatusMessage; use lighthouse_network::rpc::*; +use lighthouse_network::rpc::methods::BlobsByRangeRequest; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use slog::{debug, error}; use slot_clock::SlotClock; @@ -372,4 +373,16 @@ impl Worker { "load_blocks_by_range_blocks", ); } + + /// Handle a `BlobsByRange` request from the peer. + pub fn handle_blobs_by_range_request( + self, + executor: TaskExecutor, + send_on_drop: SendOnDrop, + peer_id: PeerId, + request_id: PeerRequestId, + request: BlobsByRangeRequest, + ) { + // TODO impl + } } diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index 2d12cf54011..e640bace5f4 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -168,11 +168,9 @@ impl Processor { request_id: PeerRequestId, request: BlobsByRangeRequest, ) { - /* - self.send_beacon_processor_work(BeaconWorkEvent::blocks_by_roots_request( + self.send_beacon_processor_work(BeaconWorkEvent::blobs_by_range_request( peer_id, request_id, request, )) - */ } /// Handle a `BlocksByRange` request from the peer. pub fn on_blocks_by_range_request( From f43532d3deba899e057d77550b44a4633c474cc1 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Sat, 17 Sep 2022 20:05:51 +0200 Subject: [PATCH 014/263] implement handle blobs by range req --- .../beacon_processor/worker/rpc_methods.rs | 140 +++++++++++++++++- 1 file changed, 137 insertions(+), 3 deletions(-) diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 2b8cbbc22c4..97cab17822e 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -12,7 +12,7 @@ use slog::{debug, error}; use slot_clock::SlotClock; use std::sync::Arc; use task_executor::TaskExecutor; -use types::{Epoch, EthSpec, Hash256, Slot}; +use types::{Epoch, EthSpec, Hash256, Slot, VariableList}; use super::Worker; @@ -381,8 +381,142 @@ impl Worker { send_on_drop: SendOnDrop, peer_id: PeerId, request_id: PeerRequestId, - request: BlobsByRangeRequest, + mut req: BlobsByRangeRequest, ) { - // TODO impl + debug!(self.log, "Received BlobsByRange Request"; + "peer_id" => %peer_id, + "count" => req.count, + "start_slot" => req.start_slot, + ); + + // Should not send more than max request blocks + if req.count > MAX_REQUEST_BLOCKS { + req.count = MAX_REQUEST_BLOCKS; + } + + let forwards_block_root_iter = match self + .chain + .forwards_iter_block_roots(Slot::from(req.start_slot)) + { + Ok(iter) => iter, + Err(BeaconChainError::HistoricalBlockError( + HistoricalBlockError::BlockOutOfRange { + slot, + oldest_block_slot, + }, + )) => { + debug!(self.log, "Range request failed during backfill"; "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot); + return self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Backfilling".into(), + request_id, + ); + } + Err(e) => return error!(self.log, "Unable to obtain root iter"; "error" => ?e), + }; + + // Pick out the required blocks, ignoring skip-slots. + let mut last_block_root = None; + let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { + iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) + // map skip slots to None + .map(|(root, _)| { + let result = if Some(root) == last_block_root { + None + } else { + Some(root) + }; + last_block_root = Some(root); + result + }) + .collect::>>() + }); + + let block_roots = match maybe_block_roots { + Ok(block_roots) => block_roots, + Err(e) => return error!(self.log, "Error during iteration over blocks"; "error" => ?e), + }; + + // remove all skip slots + let block_roots = block_roots.into_iter().flatten().collect::>(); + + // Fetching blocks is async because it may have to hit the execution layer for payloads. + executor.spawn( + async move { + let mut blocks_sent = 0; + let mut send_response = true; + + for root in block_roots { + match self.chain.store.get_blobs(&root) { + Ok(Some(blob)) => { + blocks_sent += 1; + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + response: Response::BlobsByRange(Some(Arc::new(VariableList::new(vec![blob.message]).unwrap()))), + id: request_id, + }); + } + Ok(None) => { + error!( + self.log, + "Block in the chain is not in the store"; + "request_root" => ?root + ); + break; + } + Err(e) => { + error!( + self.log, + "Error fetching block for peer"; + "block_root" => ?root, + "error" => ?e + ); + break; + } + } + } + + let current_slot = self + .chain + .slot() + .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); + + if blocks_sent < (req.count as usize) { + debug!( + self.log, + "BlocksByRange Response processed"; + "peer" => %peer_id, + "msg" => "Failed to return all requested blocks", + "start_slot" => req.start_slot, + "current_slot" => current_slot, + "requested" => req.count, + "returned" => blocks_sent + ); + } else { + debug!( + self.log, + "BlocksByRange Response processed"; + "peer" => %peer_id, + "start_slot" => req.start_slot, + "current_slot" => current_slot, + "requested" => req.count, + "returned" => blocks_sent + ); + } + + if send_response { + // send the stream terminator + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + response: Response::BlocksByRange(None), + id: request_id, + }); + } + + drop(send_on_drop); + }, + "load_blocks_by_range_blocks", + ); } } From 76572db9d550e18a253cf9ed09d93cfe907a1ca5 Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Sat, 17 Sep 2022 20:55:21 +0200 Subject: [PATCH 015/263] add network config --- beacon_node/beacon_chain/src/beacon_chain.rs | 14 ++- .../beacon_chain/src/block_verification.rs | 2 + beacon_node/beacon_chain/src/builder.rs | 2 + beacon_node/beacon_chain/src/test_utils.rs | 4 +- beacon_node/http_api/src/publish_blocks.rs | 2 +- .../beacon_processor/worker/gossip_methods.rs | 21 ++++- .../beacon_processor/worker/sync_methods.rs | 2 +- beacon_node/store/src/hot_cold_store.rs | 26 ++++++ beacon_node/store/src/lib.rs | 2 + common/eth2_config/src/lib.rs | 5 ++ .../eip4844/boot_enr.yaml | 0 .../eip4844/config.yaml | 85 ++++++++++++++++++ .../eip4844/deploy_block.txt | 1 + .../eip4844/genesis.ssz.zip | Bin 0 -> 3518 bytes 14 files changed, 159 insertions(+), 7 deletions(-) create mode 100644 common/eth2_network_config/built_in_network_configs/eip4844/boot_enr.yaml create mode 100644 common/eth2_network_config/built_in_network_configs/eip4844/config.yaml create mode 100644 common/eth2_network_config/built_in_network_configs/eip4844/deploy_block.txt create mode 100644 common/eth2_network_config/built_in_network_configs/eip4844/genesis.ssz.zip diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 3b3814f6c04..5d88c5ca418 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -100,6 +100,7 @@ use types::*; pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; pub use fork_choice::CountUnrealized; use types::kzg_commitment::KzgCommitment; +use types::signed_blobs_sidecar::SignedBlobsSidecar; pub type ForkChoiceError = fork_choice::Error; @@ -374,6 +375,8 @@ pub struct BeaconChain { /// Sender given to tasks, so that if they encounter a state in which execution cannot /// continue they can request that everything shuts down. pub shutdown_sender: Sender, + pub block_waiting_for_sidecar: Mutex>>, + pub sidecar_waiting_for_block: Mutex>>, /// Logging to CLI, etc. pub(crate) log: Logger, /// Arbitrary bytes included in the blocks. @@ -2342,7 +2345,7 @@ impl BeaconChain { // Import the blocks into the chain. for signature_verified_block in signature_verified_blocks { match self - .process_block(signature_verified_block, count_unrealized) + .process_block(signature_verified_block, None, count_unrealized) .await { Ok(_) => imported_blocks += 1, @@ -2428,6 +2431,7 @@ impl BeaconChain { pub async fn process_block>( self: &Arc, unverified_block: B, + sidecar: Option>, count_unrealized: CountUnrealized, ) -> Result> { // Start the Prometheus timer. @@ -2444,7 +2448,7 @@ impl BeaconChain { let import_block = async move { let execution_pending = unverified_block.into_execution_pending_block(&chain)?; chain - .import_execution_pending_block(execution_pending, count_unrealized) + .import_execution_pending_block(execution_pending, sidecar, count_unrealized) .await }; @@ -2502,6 +2506,7 @@ impl BeaconChain { async fn import_execution_pending_block( self: Arc, execution_pending_block: ExecutionPendingBlock, + sidecar: Option>, count_unrealized: CountUnrealized, ) -> Result> { let ExecutionPendingBlock { @@ -2557,6 +2562,7 @@ impl BeaconChain { move || { chain.import_block( block, + sidecar, block_root, state, confirmed_state_roots, @@ -2579,6 +2585,7 @@ impl BeaconChain { fn import_block( &self, signed_block: Arc>, + sidecar: Option>, block_root: Hash256, mut state: BeaconState, confirmed_state_roots: Vec, @@ -2917,6 +2924,9 @@ impl BeaconChain { .collect(); ops.push(StoreOp::PutBlock(block_root, signed_block.clone())); ops.push(StoreOp::PutState(block.state_root(), &state)); + if let Some(sidecar) = sidecar { + ops.push(StoreOp::PutBlobs(block_root, sidecar)); + } let txn_lock = self.store.hot_db.begin_rw_transaction(); if let Err(e) = self.store.do_atomically(ops) { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index cdcbf3f68e0..8eae5f256ec 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -144,6 +144,7 @@ pub enum BlockError { present_slot: Slot, block_slot: Slot, }, + MissingSidecar, /// The block state_root does not match the generated state. /// /// ## Peer scoring @@ -277,6 +278,7 @@ pub enum BlockError { /// The peer sent us an invalid block, but I'm not really sure how to score this in an /// "optimistic" sync world. ParentExecutionPayloadInvalid { parent_root: Hash256 }, + } /// Returned when block validation failed due to some issue verifying diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 27046904422..8bed973e588 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -797,6 +797,8 @@ where validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache), attester_cache: <_>::default(), early_attester_cache: <_>::default(), + block_waiting_for_sidecar: <_>::default(), + sidecar_waiting_for_block: <_>::default(), shutdown_sender: self .shutdown_sender .ok_or("Cannot build without a shutdown sender.")?, diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index a62608202ef..128a337605e 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1458,7 +1458,7 @@ where self.set_current_slot(slot); let block_hash: SignedBeaconBlockHash = self .chain - .process_block(Arc::new(block), CountUnrealized::True) + .process_block(Arc::new(block), todo!(), CountUnrealized::True) .await? .into(); self.chain.recompute_head_at_current_slot().await; @@ -1471,7 +1471,7 @@ where ) -> Result> { let block_hash: SignedBeaconBlockHash = self .chain - .process_block(Arc::new(block), CountUnrealized::True) + .process_block(Arc::new(block), todo!(),CountUnrealized::True) .await? .into(); self.chain.recompute_head_at_current_slot().await; diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 60ca8f23281..51120581469 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -32,7 +32,7 @@ pub async fn publish_block( metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); match chain - .process_block(block.clone(), CountUnrealized::True) + .process_block(block.clone(), None, CountUnrealized::True) .await { Ok(root) => { diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 0e1ab697e69..63b252099af 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -673,6 +673,7 @@ impl Worker { .await { let block_root = gossip_verified_block.block_root; + if let Some(handle) = duplicate_cache.check_and_insert(block_root) { self.process_gossip_verified_block( peer_id, @@ -759,6 +760,9 @@ impl Worker { verified_block } + Err(BlockError::MissingSidecar) => { + todo!(); //is relevant? + } Err(BlockError::ParentUnknown(block)) => { debug!( self.log, @@ -920,9 +924,24 @@ impl Worker { ) { let block: Arc<_> = verified_block.block.clone(); + let sidecar = if verified_block.block.message() + .body().blob_kzg_commitments().map(|committments| committments.is_empty()).unwrap_or(true) { + None + } else if let Some(sidecar) = self.chain.sidecar_waiting_for_block.lock().as_ref() { + if sidecar.message.beacon_block_root == verified_block.block_root() { + Some(sidecar.clone()) + } else { + *self.chain.block_waiting_for_sidecar.lock() = Some(verified_block); + return + } + } else { + // we need the sidecar but dont have it yet + return + }; + match self .chain - .process_block(verified_block, CountUnrealized::True) + .process_block(verified_block, sidecar, CountUnrealized::True) .await { Ok(block_root) => { diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 760896e0e99..b2d46f411df 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -80,7 +80,7 @@ impl Worker { } }; let slot = block.slot(); - let result = self.chain.process_block(block, CountUnrealized::True).await; + let result = self.chain.process_block(block, None, CountUnrealized::True).await; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 8240007b668..3c07b4073b0 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -483,6 +483,19 @@ impl, Cold: ItemStore> HotColdDB } } + pub fn blobs_as_kv_store_ops( + &self, + key: &Hash256, + blobs: &SignedBlobsSidecar, + ops: &mut Vec, + ) { + let db_key = get_key_for_col(DBColumn::BeaconBlob.into(), key.as_bytes()); + ops.push(KeyValueStoreOp::PutKeyValue( + db_key, + blobs.as_ssz_bytes(), + )); + } + pub fn put_state_summary( &self, state_root: &Hash256, @@ -710,6 +723,14 @@ impl, Cold: ItemStore> HotColdDB self.store_hot_state(&state_root, state, &mut key_value_batch)?; } + StoreOp::PutBlobs(block_root, blobs) => { + self.blobs_as_kv_store_ops( + &block_root, + &blobs, + &mut key_value_batch, + ); + } + StoreOp::PutStateSummary(state_root, summary) => { key_value_batch.push(summary.as_kv_store_op(state_root)); } @@ -754,6 +775,7 @@ impl, Cold: ItemStore> HotColdDB // Update the block cache whilst holding a lock, to ensure that the cache updates atomically // with the database. let mut guard = self.block_cache.lock(); + let mut guard_blob = self.blob_cache.lock(); for op in &batch { match op { @@ -761,6 +783,10 @@ impl, Cold: ItemStore> HotColdDB guard.put(*block_root, (**block).clone()); } + StoreOp::PutBlobs(block_root, blobs) => { + guard_blob.put(*block_root, blobs.clone()); + } + StoreOp::PutState(_, _) => (), StoreOp::PutStateSummary(_, _) => (), diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index f7af172f5a8..5e74827c9d1 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -42,6 +42,7 @@ use parking_lot::MutexGuard; use std::sync::Arc; use strum::{EnumString, IntoStaticStr}; pub use types::*; +use types::signed_blobs_sidecar::SignedBlobsSidecar; pub type ColumnIter<'a> = Box), Error>> + 'a>; pub type ColumnKeyIter<'a> = Box> + 'a>; @@ -155,6 +156,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati pub enum StoreOp<'a, E: EthSpec> { PutBlock(Hash256, Arc>), PutState(Hash256, &'a BeaconState), + PutBlobs(Hash256, SignedBlobsSidecar), PutStateSummary(Hash256, HotStateSummary), PutStateTemporaryFlag(Hash256), DeleteStateTemporaryFlag(Hash256), diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index 7e3c025a83b..d188088251e 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -307,5 +307,10 @@ define_hardcoded_nets!( // Set to `true` if the genesis state can be found in the `built_in_network_configs` // directory. GENESIS_STATE_IS_KNOWN + ), + ( + eip4844, + "eip4844", + GENESIS_STATE_IS_KNOWN ) ); diff --git a/common/eth2_network_config/built_in_network_configs/eip4844/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/eip4844/boot_enr.yaml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/common/eth2_network_config/built_in_network_configs/eip4844/config.yaml b/common/eth2_network_config/built_in_network_configs/eip4844/config.yaml new file mode 100644 index 00000000000..8e779eac3df --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/eip4844/config.yaml @@ -0,0 +1,85 @@ +# Prater config + +# Extends the mainnet preset +CONFIG_NAME: 'eip4844' +PRESET_BASE: 'mainnet' + +# Transition +# --------------------------------------------------------------- +TERMINAL_TOTAL_DIFFICULTY: 40 +# By default, don't use these params +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Genesis +# --------------------------------------------------------------- +# `2**14` (= 16,384) +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 2 +# Mar-01-2021 08:53:32 AM +UTC +MIN_GENESIS_TIME: 1653318000 +# Prater area code (Vienna) +GENESIS_FORK_VERSION: 0x00000ffd +# Customized for Prater: 1919188 seconds (Mar-23-2021 02:00:00 PM +UTC) +GENESIS_DELAY: 0 + + +# Forking +# --------------------------------------------------------------- +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 + +# Altair +ALTAIR_FORK_VERSION: 0x01000ffd +ALTAIR_FORK_EPOCH: 1 +# Merge +BELLATRIX_FORK_VERSION: 0x02000ffd +BELLATRIX_FORK_EPOCH: 2 +# Sharding +EIP4844_FORK_VERSION: 0x03000ffd +EIP4844_FORK_EPOCH: 3 + +# TBD, 2**32 is a placeholder. Merge transition approach is in active R&D. +TRANSITION_TOTAL_DIFFICULTY: 40 + + +# Time parameters +# --------------------------------------------------------------- +# 12 seconds +SECONDS_PER_SLOT: 12 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 14 +# 2**8 (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**8 (= 256) epochs ~27 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +ETH1_FOLLOW_DISTANCE: 15 + + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 + + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 + +# Deposit contract +# --------------------------------------------------------------- +# Ethereum Goerli testnet +DEPOSIT_CHAIN_ID: 1331 +DEPOSIT_NETWORK_ID: 69 +# Prater test deposit contract on Goerli Testnet +DEPOSIT_CONTRACT_ADDRESS: 0x8A04d14125D0FDCDc742F4A05C051De07232EDa4 diff --git a/common/eth2_network_config/built_in_network_configs/eip4844/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/eip4844/deploy_block.txt new file mode 100644 index 00000000000..573541ac970 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/eip4844/deploy_block.txt @@ -0,0 +1 @@ +0 diff --git a/common/eth2_network_config/built_in_network_configs/eip4844/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/eip4844/genesis.ssz.zip new file mode 100644 index 0000000000000000000000000000000000000000..9a1c014f63769577fd9c06fe2988f338dbb83f8f GIT binary patch literal 3518 zcmWIWW@Zs#U|`^2c(lkcbkSCsze2nW3}P&r4BQMd4C$$Psl}Padd0<6p&^_M%pC%1 z$?Ji*w1S&~k>v$50|S_Ndp0vej78$W$MEGhYxa6O-kh>$+HnWt>6>~o+s{o=N{u_4 z95hR#L4oGCZM3>iE!^klD{aib$RPaL36-E{LDOeP?=m}@tZO}?V?nK; zVx4_O#XcLG|0mq@Rao;ftMo1yZU6be@*!(_o`~ucx%}FMFUvmYUQRz#yPz)X$k&9z z2iuN?)}NbOSmOVCWAHAKm>7ZC8|>enx^#V?+_tX?Zx8Jhm9~God(*Dmn=7ravI6}) z3PxrK{Id_c`buQQiyNh%pXe`=*P1C}zVq6%%CA=To3yP>*3>n>$Q9M@{yuM3?DzGz zRcE`+_pK`YzTzAIvoJC99O19;!k?F2^?mp0=8scnmmc>zJ?+oG)$i{Ao7P&&^#zzC zM!{$ZjE2By2#kinXb6mkz%UPi9jBQn^kIQ_x7FN#Gj^xt{*2!F^zZDibIX>g>?^GM z^&+H?fB2f^#)BV3nXj>JkLmAte{)g! zyIr^3 +AU3xZ4`0uyx@_P*a|617b{M?lL^Z)<3we)JHvlr9Cwzt>6ybQjv&?3an zF74SB*UrCw=2|zzrKR>|xS!f1sGTiS_xaYtLhi*;@gBdQed)~8taMLH)7uwflKm^V zyZF{o^I7GIXBWKM@K)BlNd1V8{@;}aDUa2eUvsZoR3)?bOJKXz-tBWUzJ0tIy1V#Q z>XYxMJn9x-)mFAza?N6ot)gk+gHFTCpVyV_`}6d|?7d}aU$ca4B(AB|{;h3RGl>!l zoLRavJu;*_F!*ub-kMF>)597I3(v<@KKgU-$i$cS{Z)IPTHh3vH=BPc<=}xsb3>Wq zde3pLFRZwE^z^|qz3iWk<-MA3sJT-^ZGZHKtg5TF&c^Xkw~|lr^OlK!%M2H^fB0a^ zitz5ESJ%2^vk8cm_ y-`r;i@MdI^W5(67l7Od~C5<2^N;iuY(#=BaVg-1!vVoK`0%0JK4hQzq7#IMuda* Date: Sat, 17 Sep 2022 21:38:57 +0200 Subject: [PATCH 016/263] forgor something --- beacon_node/beacon_chain/src/beacon_chain.rs | 8 +- .../beacon_chain/src/snapshot_cache.rs | 2 +- .../beacon_processor/worker/gossip_methods.rs | 74 +++++++++++++++++++ beacon_node/store/src/hot_cold_store.rs | 2 +- beacon_node/store/src/lib.rs | 2 +- 5 files changed, 81 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5d88c5ca418..60fb9e4191a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -376,7 +376,7 @@ pub struct BeaconChain { /// continue they can request that everything shuts down. pub shutdown_sender: Sender, pub block_waiting_for_sidecar: Mutex>>, - pub sidecar_waiting_for_block: Mutex>>, + pub sidecar_waiting_for_block: Mutex>>>, /// Logging to CLI, etc. pub(crate) log: Logger, /// Arbitrary bytes included in the blocks. @@ -2431,7 +2431,7 @@ impl BeaconChain { pub async fn process_block>( self: &Arc, unverified_block: B, - sidecar: Option>, + sidecar: Option>>, count_unrealized: CountUnrealized, ) -> Result> { // Start the Prometheus timer. @@ -2506,7 +2506,7 @@ impl BeaconChain { async fn import_execution_pending_block( self: Arc, execution_pending_block: ExecutionPendingBlock, - sidecar: Option>, + sidecar: Option>>, count_unrealized: CountUnrealized, ) -> Result> { let ExecutionPendingBlock { @@ -2585,7 +2585,7 @@ impl BeaconChain { fn import_block( &self, signed_block: Arc>, - sidecar: Option>, + sidecar: Option>>, block_root: Hash256, mut state: BeaconState, confirmed_state_roots: Vec, diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index 40b73451cb0..c77ef9e38a4 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -16,7 +16,7 @@ pub const DEFAULT_SNAPSHOT_CACHE_SIZE: usize = 4; const MINIMUM_BLOCK_DELAY_FOR_CLONE: Duration = Duration::from_secs(6); /// This snapshot is to be used for verifying a child of `self.beacon_block`. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct PreProcessingSnapshot { /// This state is equivalent to the `self.beacon_block.state_root()` state that has been /// advanced forward one slot using `per_slot_processing`. This state is "primed and ready" for diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 63b252099af..0d07c620723 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -935,6 +935,7 @@ impl Worker { return } } else { + *self.chain.block_waiting_for_sidecar.lock() = Some(verified_block); // we need the sidecar but dont have it yet return }; @@ -1017,7 +1018,80 @@ impl Worker { duplicate_cache: DuplicateCache, seen_duration: Duration, ) { + let verified_block = self.chain.block_waiting_for_sidecar.lock().take(); + if let Some(verified_block) = verified_block { + let block = verified_block.block.clone(); + if verified_block.block_root() == blobs.message.beacon_block_root { + match self + .chain + .process_block(verified_block, Some(blobs), CountUnrealized::True) + .await + { + Ok(block_root) => { + metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); + + if reprocess_tx + .try_send(ReprocessQueueMessage::BlockImported(block_root)) + .is_err() + { + error!( + self.log, + "Failed to inform block import"; + "source" => "gossip", + "block_root" => ?block_root, + ) + }; + debug!( + self.log, + "Gossipsub block processed"; + "block" => ?block_root, + "peer_id" => %peer_id + ); + + self.chain.recompute_head_at_current_slot().await; + } + Err(BlockError::ParentUnknown { .. }) => { + // Inform the sync manager to find parents for this block + // This should not occur. It should be checked by `should_forward_block` + error!( + self.log, + "Block with unknown parent attempted to be processed"; + "peer_id" => %peer_id + ); + self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block)); + } + Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { + debug!( + self.log, + "Failed to verify execution payload"; + "error" => %e + ); + } + other => { + debug!( + self.log, + "Invalid gossip beacon block"; + "outcome" => ?other, + "block root" => ?block.canonical_root(), + "block slot" => block.slot() + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "bad_gossip_block_ssz", + ); + trace!( + self.log, + "Invalid gossip beacon block ssz"; + "ssz" => format_args!("0x{}", hex::encode(block.as_ssz_bytes())), + ); + } + }; + } + } else { + *self.chain.sidecar_waiting_for_block.lock() = Some(blobs); + } } pub fn process_gossip_voluntary_exit( diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 3c07b4073b0..de46843adf8 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -784,7 +784,7 @@ impl, Cold: ItemStore> HotColdDB } StoreOp::PutBlobs(block_root, blobs) => { - guard_blob.put(*block_root, blobs.clone()); + guard_blob.put(*block_root, (**blobs).clone()); } StoreOp::PutState(_, _) => (), diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 5e74827c9d1..aac9cda932a 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -156,7 +156,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati pub enum StoreOp<'a, E: EthSpec> { PutBlock(Hash256, Arc>), PutState(Hash256, &'a BeaconState), - PutBlobs(Hash256, SignedBlobsSidecar), + PutBlobs(Hash256, Arc>), PutStateSummary(Hash256, HotStateSummary), PutStateTemporaryFlag(Hash256), DeleteStateTemporaryFlag(Hash256), From 8b71b978e094e524f4309ad2c41ce9adcd3671f9 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Sat, 17 Sep 2022 23:42:49 +0200 Subject: [PATCH 017/263] new round of hacks (config etc) --- beacon_node/lighthouse_network/src/rpc/mod.rs | 1 + .../src/beacon_processor/worker/rpc_methods.rs | 4 ++-- .../eip4844/boot_enr.yaml | 1 + .../eip4844/genesis.ssz.zip | Bin 3518 -> 0 bytes consensus/types/src/eth_spec.rs | 2 +- 5 files changed, 5 insertions(+), 3 deletions(-) delete mode 100644 common/eth2_network_config/built_in_network_configs/eip4844/genesis.ssz.zip diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 6197d57f199..daa565199c4 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -125,6 +125,7 @@ impl RPC { Duration::from_secs(10), ) .n_every(Protocol::BlocksByRoot, 128, Duration::from_secs(10)) + .n_every(Protocol::BlobsByRange, 128, Duration::from_secs(10)) .build() .expect("Configuration parameters are valid"); RPC { diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 97cab17822e..83949f6acec 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -460,7 +460,7 @@ impl Worker { Ok(None) => { error!( self.log, - "Block in the chain is not in the store"; + "Blob in the chain is not in the store"; "request_root" => ?root ); break; @@ -509,7 +509,7 @@ impl Worker { // send the stream terminator self.send_network_message(NetworkMessage::SendResponse { peer_id, - response: Response::BlocksByRange(None), + response: Response::BlobsByRange(None), id: request_id, }); } diff --git a/common/eth2_network_config/built_in_network_configs/eip4844/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/eip4844/boot_enr.yaml index e69de29bb2d..d4cf63c3898 100644 --- a/common/eth2_network_config/built_in_network_configs/eip4844/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/eip4844/boot_enr.yaml @@ -0,0 +1 @@ +- enr:-MK4QI-wkVW1PxL4ksUM4H_hMgTTwxKMzvvDMfoiwPBuRxcsGkrGPLo4Kho3Ri1DEtJG4B6pjXddbzA9iF2gVctxv42GAX9v5WG5h2F0dG5ldHOIAAAAAAAAAACEZXRoMpBzql9ccAAAcDIAAAAAAAAAgmlkgnY0gmlwhKRcjMiJc2VjcDI1NmsxoQK1fc46pmVHKq8HNYLkSVaUv4uK2UBsGgjjGWU6AAhAY4hzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA diff --git a/common/eth2_network_config/built_in_network_configs/eip4844/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/eip4844/genesis.ssz.zip deleted file mode 100644 index 9a1c014f63769577fd9c06fe2988f338dbb83f8f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3518 zcmWIWW@Zs#U|`^2c(lkcbkSCsze2nW3}P&r4BQMd4C$$Psl}Padd0<6p&^_M%pC%1 z$?Ji*w1S&~k>v$50|S_Ndp0vej78$W$MEGhYxa6O-kh>$+HnWt>6>~o+s{o=N{u_4 z95hR#L4oGCZM3>iE!^klD{aib$RPaL36-E{LDOeP?=m}@tZO}?V?nK; zVx4_O#XcLG|0mq@Rao;ftMo1yZU6be@*!(_o`~ucx%}FMFUvmYUQRz#yPz)X$k&9z z2iuN?)}NbOSmOVCWAHAKm>7ZC8|>enx^#V?+_tX?Zx8Jhm9~God(*Dmn=7ravI6}) z3PxrK{Id_c`buQQiyNh%pXe`=*P1C}zVq6%%CA=To3yP>*3>n>$Q9M@{yuM3?DzGz zRcE`+_pK`YzTzAIvoJC99O19;!k?F2^?mp0=8scnmmc>zJ?+oG)$i{Ao7P&&^#zzC zM!{$ZjE2By2#kinXb6mkz%UPi9jBQn^kIQ_x7FN#Gj^xt{*2!F^zZDibIX>g>?^GM z^&+H?fB2f^#)BV3nXj>JkLmAte{)g! zyIr^3 +AU3xZ4`0uyx@_P*a|617b{M?lL^Z)<3we)JHvlr9Cwzt>6ybQjv&?3an zF74SB*UrCw=2|zzrKR>|xS!f1sGTiS_xaYtLhi*;@gBdQed)~8taMLH)7uwflKm^V zyZF{o^I7GIXBWKM@K)BlNd1V8{@;}aDUa2eUvsZoR3)?bOJKXz-tBWUzJ0tIy1V#Q z>XYxMJn9x-)mFAza?N6ot)gk+gHFTCpVyV_`}6d|?7d}aU$ca4B(AB|{;h3RGl>!l zoLRavJu;*_F!*ub-kMF>)597I3(v<@KKgU-$i$cS{Z)IPTHh3vH=BPc<=}xsb3>Wq zde3pLFRZwE^z^|qz3iWk<-MA3sJT-^ZGZHKtg5TF&c^Xkw~|lr^OlK!%M2H^fB0a^ zitz5ESJ%2^vk8cm_ y-`r;i@MdI^W5(67l7Od~C5<2^N;iuY(#=BaVg-1!vVoK`0%0JK4hQzq7#IMuda* Date: Sun, 18 Sep 2022 10:23:53 +0200 Subject: [PATCH 018/263] more enr --- .../built_in_network_configs/eip4844/boot_enr.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/common/eth2_network_config/built_in_network_configs/eip4844/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/eip4844/boot_enr.yaml index d4cf63c3898..4d52cc59752 100644 --- a/common/eth2_network_config/built_in_network_configs/eip4844/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/eip4844/boot_enr.yaml @@ -1 +1,3 @@ -- enr:-MK4QI-wkVW1PxL4ksUM4H_hMgTTwxKMzvvDMfoiwPBuRxcsGkrGPLo4Kho3Ri1DEtJG4B6pjXddbzA9iF2gVctxv42GAX9v5WG5h2F0dG5ldHOIAAAAAAAAAACEZXRoMpBzql9ccAAAcDIAAAAAAAAAgmlkgnY0gmlwhKRcjMiJc2VjcDI1NmsxoQK1fc46pmVHKq8HNYLkSVaUv4uK2UBsGgjjGWU6AAhAY4hzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-MK4QLij8YaVQ6fIi09rDuD9fufxBlCZRXwfM1q6SbNJfy5ZZdAvtlnsfqhIeI0IqeOZdaPExVCfZfR4JJTIuKXFR76GAYJGrqHnh2F0dG5ldHOIAAAAAAAAAACEZXRoMpBCynldgwAP_QMAAAAAAAAAgmlkgnY0gmlwhCJ7uEyJc2VjcDI1NmsxoQJpeftU6RbmIhcFllICznlAMJXL3EwHEGhn73_Gk0wrCYhzeW5jbmV0cwCDdGNwgjLIg3VkcIIu4A +- enr:-JG4QK27MZvV3QbwdLt055Yhei27SjAsDXMFGCdl-Q7SDiCgR_qbiW3BmcOClehFVJgMa6IfjHeJBdbC0jvrr2NycOqGAYJLWb5kgmlkgnY0gmlwhCJE_eeJc2VjcDI1NmsxoQIecO7Y9C7J2Bs7RNxXaUkU6BfmPKIhEsDScKAoxENaRYN0Y3CCdl-DdWRwgnZf +- enr:-JG4QExcHW3vzBcE0f_r-93nSA4iBy4qNLthSyTw7p0tlPwjMl1JVTAgLSNHLLZJzOGtelJO4sw37LliuHyJ55zN5J6GAYJLWTvzgmlkgnY0gmlwhCKq1cmJc2VjcDI1NmsxoQJT2d4jtKQbHNw3tZPLhoMlR73o5LNdi-bk_bYq6siwuIN0Y3CCdl-DdWRwgnZf \ No newline at end of file From 14aa4957b9fcf2f6f9c1ee6b32e7b69ad3e97b6d Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Sun, 18 Sep 2022 10:46:01 +0200 Subject: [PATCH 019/263] correct fork version --- .../built_in_network_configs/eip4844/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/eth2_network_config/built_in_network_configs/eip4844/config.yaml b/common/eth2_network_config/built_in_network_configs/eip4844/config.yaml index 8e779eac3df..d6e6aef57a5 100644 --- a/common/eth2_network_config/built_in_network_configs/eip4844/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/eip4844/config.yaml @@ -36,7 +36,7 @@ ALTAIR_FORK_EPOCH: 1 BELLATRIX_FORK_VERSION: 0x02000ffd BELLATRIX_FORK_EPOCH: 2 # Sharding -EIP4844_FORK_VERSION: 0x03000ffd +EIP4844_FORK_VERSION: 0x83000ffd EIP4844_FORK_EPOCH: 3 # TBD, 2**32 is a placeholder. Merge transition approach is in active R&D. From 285dbf43edd6c7022a87b2f60ea7240c3920b7ad Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Sun, 18 Sep 2022 11:34:46 +0200 Subject: [PATCH 020/263] hacky hacks --- beacon_node/network/src/status.rs | 2 +- consensus/types/src/chain_spec.rs | 6 +++--- consensus/types/src/payload.rs | 15 ++------------- 3 files changed, 6 insertions(+), 17 deletions(-) diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index 865f8ee933f..27689a74163 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -18,7 +18,7 @@ impl ToStatusMessage for BeaconChain { /// Build a `StatusMessage` representing the state of the given `beacon_chain`. pub(crate) fn status_message(beacon_chain: &BeaconChain) -> StatusMessage { - let fork_digest = beacon_chain.enr_fork_id().fork_digest; + let fork_digest = [0x9c, 0x67, 0x11, 0x28]; let cached_head = beacon_chain.canonical_head.cached_head(); let mut finalized_checkpoint = cached_head.finalized_checkpoint(); diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index b894bef12e9..9e3d40c997b 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -560,7 +560,7 @@ impl ChainSpec { domain_sync_committee: 7, domain_sync_committee_selection_proof: 8, domain_contribution_and_proof: 9, - altair_fork_version: [0x01, 0x00, 0x00, 0x00], + altair_fork_version: [0x01, 0x00, 0x0f, 0xfd], altair_fork_epoch: Some(Epoch::new(74240)), /* @@ -571,7 +571,7 @@ impl ChainSpec { min_slashing_penalty_quotient_bellatrix: u64::checked_pow(2, 5) .expect("pow does not overflow"), proportional_slashing_multiplier_bellatrix: 3, - bellatrix_fork_version: [0x02, 0x00, 0x00, 0x00], + bellatrix_fork_version: [0x02, 0x00, 0x0f, 0xfd], bellatrix_fork_epoch: Some(Epoch::new(144896)), terminal_total_difficulty: Uint256::from_dec_str("58750000000000000000000") .expect("terminal_total_difficulty is a valid integer"), @@ -583,7 +583,7 @@ impl ChainSpec { * Eip4844 hard fork params */ eip4844_fork_epoch: None, - eip4844_fork_version: [0x03, 0x00, 0x00, 0x00], + eip4844_fork_version: [0x83, 0x00, 0x0f, 0xfd], /* * Network specific diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 23db2d961f8..6f4e3a39e64 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -227,7 +227,8 @@ impl Encode for BlindedPayload { } } -#[derive(Default, Debug, Clone, Serialize, Deserialize)] +#[derive(Default, Debug, Clone, Serialize, Deserialize, Derivative)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] pub struct FullPayload { pub execution_payload: ExecutionPayload @@ -239,18 +240,6 @@ impl TestRandom for FullPayload { } } -impl PartialEq for FullPayload { - fn eq(&self, other: &FullPayload) -> bool { - todo!() - } -} - -impl Hash for FullPayload { - fn hash(&self, into: &mut H) { - todo!() - } -} - impl From> for FullPayload { fn from(execution_payload: ExecutionPayload) -> Self { Self { From 257087b01085afdadc3c2e8d2a4f5116f9a3a3f0 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Sun, 18 Sep 2022 11:43:53 +0200 Subject: [PATCH 021/263] correct fork version --- consensus/types/src/fork_context.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index c5316a65674..c9e9bed0bf3 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -47,11 +47,11 @@ impl ForkContext { )); } - if spec.bellatrix_fork_epoch.is_some() { + if spec.eip4844_fork_epoch.is_some() { fork_to_digest.push(( - ForkName::Merge, + ForkName::Eip4844, ChainSpec::compute_fork_digest( - spec.bellatrix_fork_version, + spec.eip4844_fork_version, genesis_validators_root, ), )); From 6f7d21c542defc103652e610f820d438e9414cd1 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Sun, 18 Sep 2022 12:13:03 +0200 Subject: [PATCH 022/263] enable 4844 at epoch 3 --- beacon_node/network/src/status.rs | 2 +- consensus/types/src/chain_spec.rs | 18 +++++++++++------- consensus/types/src/fork_name.rs | 3 ++- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index 27689a74163..865f8ee933f 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -18,7 +18,7 @@ impl ToStatusMessage for BeaconChain { /// Build a `StatusMessage` representing the state of the given `beacon_chain`. pub(crate) fn status_message(beacon_chain: &BeaconChain) -> StatusMessage { - let fork_digest = [0x9c, 0x67, 0x11, 0x28]; + let fork_digest = beacon_chain.enr_fork_id().fork_digest; let cached_head = beacon_chain.canonical_head.cached_head(); let mut finalized_checkpoint = cached_head.finalized_checkpoint(); diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 9e3d40c997b..e07d53bf8bd 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -236,13 +236,17 @@ impl ChainSpec { /// Returns the name of the fork which is active at `epoch`. pub fn fork_name_at_epoch(&self, epoch: Epoch) -> ForkName { - match self.bellatrix_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge, - _ => match self.altair_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, - _ => ForkName::Base, - }, + match self.eip4844_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Eip4844, + _ => match self.bellatrix_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge, + _ => match self.altair_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, + _ => ForkName::Base, + }, + } } + } /// Returns the fork version for a named fork. @@ -582,7 +586,7 @@ impl ChainSpec { /* * Eip4844 hard fork params */ - eip4844_fork_epoch: None, + eip4844_fork_epoch: Some(Epoch::new(3)), eip4844_fork_version: [0x83, 0x00, 0x0f, 0xfd], /* diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index 7afe0d75305..5e6ee3e5e9c 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -42,7 +42,7 @@ impl ForkName { ForkName::Eip4844 => { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(0)); - spec.eip4844_fork_epoch = Some(Epoch::new(0)); + spec.eip4844_fork_epoch = Some(Epoch::new(3)); spec } } @@ -126,6 +126,7 @@ impl FromStr for ForkName { "phase0" | "base" => ForkName::Base, "altair" => ForkName::Altair, "bellatrix" | "merge" => ForkName::Merge, + "eip4844" => ForkName::Eip4844, _ => return Err(format!("unknown fork name: {}", fork_name)), }) } From de44b300c089de05a834a920a5993de5c12c5a09 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Fri, 18 Feb 2022 18:29:05 -0700 Subject: [PATCH 023/263] add/update types --- Cargo.lock | 11 ++++ beacon_node/lighthouse_network/src/config.rs | 3 +- .../src/rpc/codec/ssz_snappy.rs | 9 ++- .../lighthouse_network/src/types/pubsub.rs | 8 ++- consensus/ssz/src/decode/impls.rs | 1 + consensus/ssz/src/encode/impls.rs | 1 + .../process_operations.rs | 4 +- consensus/tree_hash/src/impls.rs | 1 + consensus/types/Cargo.toml | 1 + consensus/types/src/beacon_block.rs | 9 ++- consensus/types/src/beacon_block_and_blobs.rs | 13 ++++ consensus/types/src/beacon_block_body.rs | 9 ++- consensus/types/src/chain_spec.rs | 8 +++ consensus/types/src/eth_spec.rs | 13 +++- consensus/types/src/fork_name.rs | 19 +++++- consensus/types/src/kzg_commitment.rs | 63 +++++++++++++++++++ consensus/types/src/lib.rs | 15 +++-- consensus/types/src/signed_beacon_block.rs | 7 ++- testing/ef_tests/src/cases/common.rs | 1 + .../ef_tests/src/cases/epoch_processing.rs | 1 + testing/ef_tests/src/cases/fork.rs | 1 + testing/ef_tests/src/cases/transition.rs | 5 ++ .../src/signing_method/web3signer.rs | 5 ++ 23 files changed, 189 insertions(+), 19 deletions(-) create mode 100644 consensus/types/src/beacon_block_and_blobs.rs create mode 100644 consensus/types/src/kzg_commitment.rs diff --git a/Cargo.lock b/Cargo.lock index cfefa6c1161..4dfd070f4dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5736,6 +5736,16 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-big-array" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18b20e7752957bbe9661cff4e0bb04d183d0948cdab2ea58cdb9df36a61dfe62" +dependencies = [ + "serde", + "serde_derive", +] + [[package]] name = "serde_array_query" version = "0.1.0" @@ -7107,6 +7117,7 @@ dependencies = [ "rusqlite", "safe_arith", "serde", + "serde-big-array", "serde_derive", "serde_json", "serde_with", diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 263ef0c7cb9..ca2178432a2 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -296,7 +296,8 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos match fork_context.current_fork() { // according to: https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#the-gossip-domain-gossipsub // the derivation of the message-id remains the same in the merge - ForkName::Altair | ForkName::Merge => { + //TODO(sean): figure this out + ForkName::Altair | ForkName::Merge | ForkName::Dank => { let topic_len_bytes = topic_bytes.len().to_le_bytes(); let mut vec = Vec::with_capacity( prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index a46a05a8ce3..699d877efed 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; use types::{ EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockBase, SignedBeaconBlockMerge, + SignedBeaconBlockBase, SignedBeaconBlockDank, SignedBeaconBlockMerge, }; use unsigned_varint::codec::Uvi; @@ -407,6 +407,7 @@ fn context_bytes( return match **ref_box_block { // NOTE: If you are adding another fork type here, be sure to modify the // `fork_context.to_context_bytes()` function to support it as well! + SignedBeaconBlock::Dank { .. } => fork_context.to_context_bytes(ForkName::Dank), SignedBeaconBlock::Merge { .. } => { // Merge context being `None` implies that "merge never happened". fork_context.to_context_bytes(ForkName::Merge) @@ -586,6 +587,9 @@ fn handle_v2_response( decoded_buffer, )?), )))), + ForkName::Dank => Ok(Some(RPCResponse::BlocksByRange(Box::new( + SignedBeaconBlock::Dank(SignedBeaconBlockDank::from_ssz_bytes(decoded_buffer)?), + )))), }, Protocol::BlocksByRoot => match fork_name { ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( @@ -601,6 +605,9 @@ fn handle_v2_response( decoded_buffer, )?), )))), + ForkName::Dank => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + SignedBeaconBlock::Dank(SignedBeaconBlockDank::from_ssz_bytes(decoded_buffer)?), + )))), }, _ => Err(RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index a01072f8e4e..291cde46ce1 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -11,8 +11,8 @@ use std::sync::Arc; use types::{ Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, - SignedBeaconBlockMerge, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, - SyncCommitteeMessage, SyncSubnetId, + SignedBeaconBlockDank, SignedBeaconBlockMerge, SignedContributionAndProof, SignedVoluntaryExit, + SubnetId, SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] @@ -167,6 +167,10 @@ impl PubsubMessage { SignedBeaconBlockMerge::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ), + Some(ForkName::Dank) => SignedBeaconBlock::::Dank( + SignedBeaconBlockDank::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), None => { return Err(format!( "Unknown gossipsub fork digest: {:?}", diff --git a/consensus/ssz/src/decode/impls.rs b/consensus/ssz/src/decode/impls.rs index d91ddabe028..99f31858516 100644 --- a/consensus/ssz/src/decode/impls.rs +++ b/consensus/ssz/src/decode/impls.rs @@ -374,6 +374,7 @@ macro_rules! impl_decodable_for_u8_array { impl_decodable_for_u8_array!(4); impl_decodable_for_u8_array!(32); +impl_decodable_for_u8_array!(48); macro_rules! impl_for_vec { ($type: ty, $max_len: expr) => { diff --git a/consensus/ssz/src/encode/impls.rs b/consensus/ssz/src/encode/impls.rs index cfd95ba40df..1faf9123f5b 100644 --- a/consensus/ssz/src/encode/impls.rs +++ b/consensus/ssz/src/encode/impls.rs @@ -483,6 +483,7 @@ macro_rules! impl_encodable_for_u8_array { impl_encodable_for_u8_array!(4); impl_encodable_for_u8_array!(32); +impl_encodable_for_u8_array!(48); #[cfg(test)] mod tests { diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 31a4ac1fb42..0d74ac4dc62 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -230,7 +230,9 @@ pub fn process_attestations<'a, T: EthSpec, Payload: ExecPayload>( BeaconBlockBodyRef::Base(_) => { base::process_attestations(state, block_body.attestations(), verify_signatures, spec)?; } - BeaconBlockBodyRef::Altair(_) | BeaconBlockBodyRef::Merge(_) => { + BeaconBlockBodyRef::Altair(_) + | BeaconBlockBodyRef::Merge(_) + | BeaconBlockBodyRef::Dank(_) => { altair::process_attestations( state, block_body.attestations(), diff --git a/consensus/tree_hash/src/impls.rs b/consensus/tree_hash/src/impls.rs index cf05d2a3d5a..f27c5291861 100644 --- a/consensus/tree_hash/src/impls.rs +++ b/consensus/tree_hash/src/impls.rs @@ -81,6 +81,7 @@ macro_rules! impl_for_lt_32byte_u8_array { impl_for_lt_32byte_u8_array!(4); impl_for_lt_32byte_u8_array!(32); +impl_for_lt_32byte_u8_array!(48); impl TreeHash for U128 { fn tree_hash_type() -> TreeHashType { diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 68fdbf7990d..397d916dce3 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -9,6 +9,7 @@ name = "benches" harness = false [dependencies] +serde-big-array = {version = "0.3.2", features = ["const-generics"]} bls = { path = "../../crypto/bls" } compare_fields = { path = "../../common/compare_fields" } compare_fields_derive = { path = "../../common/compare_fields_derive" } diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 0ec1f9a3741..f9f2d8651be 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -1,6 +1,6 @@ use crate::beacon_block_body::{ - BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyMerge, BeaconBlockBodyRef, - BeaconBlockBodyRefMut, + BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyDank, BeaconBlockBodyMerge, + BeaconBlockBodyRef, BeaconBlockBodyRefMut, }; use crate::test_utils::TestRandom; use crate::*; @@ -17,7 +17,7 @@ use tree_hash_derive::TreeHash; /// A block of the `BeaconChain`. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Dank), variant_attributes( derive( Debug, @@ -64,6 +64,8 @@ pub struct BeaconBlock = FullPayload> { pub body: BeaconBlockBodyAltair, #[superstruct(only(Merge), partial_getter(rename = "body_merge"))] pub body: BeaconBlockBodyMerge, + #[superstruct(only(Dank), partial_getter(rename = "body_dank"))] + pub body: BeaconBlockBodyDank, } pub type BlindedBeaconBlock = BeaconBlock>; @@ -189,6 +191,7 @@ impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRef<'a, T, Payload> { BeaconBlockRef::Base { .. } => ForkName::Base, BeaconBlockRef::Altair { .. } => ForkName::Altair, BeaconBlockRef::Merge { .. } => ForkName::Merge, + BeaconBlockRef::Dank { .. } => ForkName::Dank, }; if fork_at_slot == object_fork { diff --git a/consensus/types/src/beacon_block_and_blobs.rs b/consensus/types/src/beacon_block_and_blobs.rs new file mode 100644 index 00000000000..b39fff77146 --- /dev/null +++ b/consensus/types/src/beacon_block_and_blobs.rs @@ -0,0 +1,13 @@ +use crate::{BLSFieldElement, Blob, EthSpec, SignedBeaconBlock}; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use ssz_types::{FixedVector, VariableList}; +use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; + +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash)] +pub struct BeaconBlockAndBlobs { + pub block: SignedBeaconBlock, + pub blobs: VariableList, E::MaxObjectListSize>, +} diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 381a9bd43e3..e8c66d2d358 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -13,7 +13,7 @@ use tree_hash_derive::TreeHash; /// /// This *superstruct* abstracts over the hard-fork. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Dank), variant_attributes( derive( Debug, @@ -47,14 +47,16 @@ pub struct BeaconBlockBody = FullPayload> pub attestations: VariableList, T::MaxAttestations>, pub deposits: VariableList, pub voluntary_exits: VariableList, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Dank))] pub sync_aggregate: SyncAggregate, // We flatten the execution payload so that serde can use the name of the inner type, // either `execution_payload` for full payloads, or `execution_payload_header` for blinded // payloads. - #[superstruct(only(Merge))] + #[superstruct(only(Merge, Dank))] #[serde(flatten)] pub execution_payload: Payload, + #[superstruct(only(Dank))] + pub blob_kzgs: VariableList, #[superstruct(only(Base, Altair))] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] @@ -69,6 +71,7 @@ impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { BeaconBlockBodyRef::Base { .. } => ForkName::Base, BeaconBlockBodyRef::Altair { .. } => ForkName::Altair, BeaconBlockBodyRef::Merge { .. } => ForkName::Merge, + BeaconBlockBodyRef::Dank { .. } => ForkName::Dank, } } } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index b2ba24ac3ee..6bfae08a455 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -150,6 +150,10 @@ pub struct ChainSpec { pub terminal_block_hash_activation_epoch: Epoch, pub safe_slots_to_import_optimistically: u64, + /* + * Danksharding hard fork params + */ + /* * Networking */ @@ -245,6 +249,8 @@ impl ChainSpec { ForkName::Base => self.genesis_fork_version, ForkName::Altair => self.altair_fork_version, ForkName::Merge => self.bellatrix_fork_version, + //TODO: update this + ForkName::Dank => self.bellatrix_fork_version, } } @@ -254,6 +260,8 @@ impl ChainSpec { ForkName::Base => Some(Epoch::new(0)), ForkName::Altair => self.altair_fork_epoch, ForkName::Merge => self.bellatrix_fork_epoch, + //TODO: update this + ForkName::Dank => self.bellatrix_fork_epoch, } } diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index e6169760264..742a90ad33e 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -95,6 +95,11 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + type GasLimitDenominator: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MinGasLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxExtraDataBytes: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* + * New in Danksharding + */ + type MaxObjectListSize: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type ChunksPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * Derived values (set these CAREFULLY) */ @@ -262,6 +267,8 @@ impl EthSpec for MainnetEthSpec { type GasLimitDenominator = U1024; type MinGasLimit = U5000; type MaxExtraDataBytes = U32; + type MaxObjectListSize = U16777216; // 2**24 + type ChunksPerBlob = U4096; type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch @@ -309,7 +316,9 @@ impl EthSpec for MinimalEthSpec { BytesPerLogsBloom, GasLimitDenominator, MinGasLimit, - MaxExtraDataBytes + MaxExtraDataBytes, + MaxObjectListSize, + ChunksPerBlob }); fn default_spec() -> ChainSpec { @@ -354,6 +363,8 @@ impl EthSpec for GnosisEthSpec { type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U2048; // 128 max attestations * 16 slots per epoch type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch + type MaxObjectListSize = U16777216; // 2**24 + type ChunksPerBlob = U4096; fn default_spec() -> ChainSpec { ChainSpec::gnosis() diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index e97b08309b7..dd83cf5a3c4 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -1,4 +1,4 @@ -use crate::{ChainSpec, Epoch}; +use crate::{ChainSpec, Epoch, Fork}; use serde_derive::{Deserialize, Serialize}; use std::convert::TryFrom; use std::fmt::{self, Display, Formatter}; @@ -11,6 +11,7 @@ pub enum ForkName { Base, Altair, Merge, + Dank, } impl ForkName { @@ -38,6 +39,12 @@ impl ForkName { spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec } + //TODO(sean): update + ForkName::Dank => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec + } } } @@ -49,6 +56,7 @@ impl ForkName { ForkName::Base => None, ForkName::Altair => Some(ForkName::Base), ForkName::Merge => Some(ForkName::Altair), + ForkName::Dank => Some(ForkName::Merge), } } @@ -59,7 +67,8 @@ impl ForkName { match self { ForkName::Base => Some(ForkName::Altair), ForkName::Altair => Some(ForkName::Merge), - ForkName::Merge => None, + ForkName::Merge => Some(ForkName::Dank), + ForkName::Dank => None, } } } @@ -101,6 +110,11 @@ macro_rules! map_fork_name_with { let (value, extra_data) = $body; ($t::Merge(value), extra_data) } + //TODO: don't have a beacon state variant for the new fork yet + ForkName::Dank => { + let (value, extra_data) = $body; + ($t::Merge(value), extra_data) + } } }; } @@ -124,6 +138,7 @@ impl Display for ForkName { ForkName::Base => "phase0".fmt(f), ForkName::Altair => "altair".fmt(f), ForkName::Merge => "bellatrix".fmt(f), + ForkName::Dank => "dank".fmt(f), } } } diff --git a/consensus/types/src/kzg_commitment.rs b/consensus/types/src/kzg_commitment.rs new file mode 100644 index 00000000000..154cb6b0037 --- /dev/null +++ b/consensus/types/src/kzg_commitment.rs @@ -0,0 +1,63 @@ +use crate::test_utils::TestRandom; +use crate::*; +use derivative::Derivative; +use serde_derive::{Deserialize, Serialize}; +use ssz::{Decode, DecodeError, Encode}; +use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; +use superstruct::superstruct; +use test_random_derive::TestRandom; +use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; + +//TODO: is there a way around this newtype +#[derive(Derivative, Debug, Clone, Serialize, Deserialize)] +#[derivative(PartialEq, Eq, Hash)] +pub struct KZGCommitment(#[serde(with = "BigArray")] [u8; 48]); +impl TreeHash for KZGCommitment { + fn tree_hash_type() -> tree_hash::TreeHashType { + <[u8; 48] as TreeHash>::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> Vec { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + <[u8; 48] as TreeHash>::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} + +impl TestRandom for KZGCommitment { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { + KZGCommitment(<[u8; 48] as TestRandom>::random_for_test(rng)) + } +} + +impl Decode for KZGCommitment { + fn is_ssz_fixed_len() -> bool { + <[u8; 48] as Decode>::is_ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + <[u8; 48] as Decode>::from_ssz_bytes(bytes).map(KZGCommitment) + } +} + +impl Encode for KZGCommitment { + fn is_ssz_fixed_len() -> bool { + <[u8; 48] as Encode>::is_ssz_fixed_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.0.ssz_append(buf) + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 32300173ebc..28234130dfd 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -86,11 +86,15 @@ pub mod sync_subnet_id; mod tree_hash_impls; pub mod validator_registration_data; +mod beacon_block_and_blobs; +mod kzg_commitment; pub mod slot_data; #[cfg(feature = "sqlite")] pub mod sqlite; +pub use kzg_commitment::KZGCommitment; use ethereum_types::{H160, H256}; +use serde::Serialize; pub use crate::aggregate_and_proof::AggregateAndProof; pub use crate::attestation::{Attestation, Error as AttestationError}; @@ -98,12 +102,12 @@ pub use crate::attestation_data::AttestationData; pub use crate::attestation_duty::AttestationDuty; pub use crate::attester_slashing::AttesterSlashing; pub use crate::beacon_block::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, BeaconBlockRef, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockDank, BeaconBlockMerge, BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, }; pub use crate::beacon_block_body::{ - BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyMerge, - BeaconBlockBodyRef, BeaconBlockBodyRefMut, + BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyDank, + BeaconBlockBodyMerge, BeaconBlockBodyRef, BeaconBlockBodyRefMut, }; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; @@ -144,7 +148,7 @@ pub use crate::shuffling_id::AttestationShufflingId; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; pub use crate::signed_beacon_block::{ SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockHash, - SignedBeaconBlockMerge, SignedBlindedBeaconBlock, + SignedBeaconBlockMerge, SignedBlindedBeaconBlock,SignedBeaconBlockDank }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; @@ -165,12 +169,15 @@ pub use crate::validator::Validator; pub use crate::validator_registration_data::*; pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; +use serde_big_array::BigArray; pub type CommitteeIndex = u64; pub type Hash256 = H256; pub type Uint256 = ethereum_types::U256; pub type Address = H160; pub type ForkVersion = [u8; 4]; +pub type BLSFieldElement = Uint256; +pub type Blob = FixedVector; pub use bls::{ AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 5c40c4685c3..317cfddca9d 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -38,7 +38,7 @@ impl From for Hash256 { /// A `BeaconBlock` and a signature from its proposer. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Dank), variant_attributes( derive( Debug, @@ -72,6 +72,8 @@ pub struct SignedBeaconBlock = FullPayload, #[superstruct(only(Merge), partial_getter(rename = "message_merge"))] pub message: BeaconBlockMerge, + #[superstruct(only(Dank), partial_getter(rename = "message_dank"))] + pub message: BeaconBlockDank, pub signature: Signature, } @@ -129,6 +131,9 @@ impl> SignedBeaconBlock { BeaconBlock::Merge(message) => { SignedBeaconBlock::Merge(SignedBeaconBlockMerge { message, signature }) } + BeaconBlock::Dank(message) => { + SignedBeaconBlock::Dank(SignedBeaconBlockDank { message, signature }) + } } } diff --git a/testing/ef_tests/src/cases/common.rs b/testing/ef_tests/src/cases/common.rs index e77e5619393..5cb460b4f8f 100644 --- a/testing/ef_tests/src/cases/common.rs +++ b/testing/ef_tests/src/cases/common.rs @@ -78,5 +78,6 @@ pub fn previous_fork(fork_name: ForkName) -> ForkName { ForkName::Base => ForkName::Base, ForkName::Altair => ForkName::Base, ForkName::Merge => ForkName::Altair, // TODO: Check this when tests are released.. + ForkName::Dank => ForkName::Merge, // TODO: Check this when tests are released.. } } diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 0283d13da4a..1c3f42e18c8 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -278,6 +278,7 @@ impl> Case for EpochProcessing { } // No phase0 tests for Altair and later. ForkName::Altair | ForkName::Merge => T::name() != "participation_record_updates", + ForkName::Dank => false, // TODO: revisit when tests are out } } diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs index ae12447abf3..bbe84409e01 100644 --- a/testing/ef_tests/src/cases/fork.rs +++ b/testing/ef_tests/src/cases/fork.rs @@ -61,6 +61,7 @@ impl Case for ForkTest { ForkName::Base => panic!("phase0 not supported"), ForkName::Altair => upgrade_to_altair(&mut result_state, spec).map(|_| result_state), ForkName::Merge => upgrade_to_bellatrix(&mut result_state, spec).map(|_| result_state), + ForkName::Dank => panic!("danksharding not supported"), }; compare_beacon_state_results_without_caches(&mut result, &mut expected) diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index d2b1bb2c624..f97949c398a 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -42,6 +42,11 @@ impl LoadCase for TransitionTest { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(metadata.fork_epoch); } + //TODO(sean): fix + ForkName::Dank => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(metadata.fork_epoch); + } } // Load blocks diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index cf02ae0c323..d16d7693cd7 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -90,6 +90,11 @@ impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { block: None, block_header: Some(block.block_header()), }), + BeaconBlock::Dank(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Dank, + block: None, + block_header: Some(block.block_header()), + }), } } From 7125f0e3c64ae34b6ff860ddc2f9e9923da8567f Mon Sep 17 00:00:00 2001 From: realbigsean Date: Fri, 18 Feb 2022 18:34:17 -0700 Subject: [PATCH 024/263] cargo fix --- consensus/types/src/beacon_block_and_blobs.rs | 6 +++--- consensus/types/src/fork_name.rs | 2 +- consensus/types/src/kzg_commitment.rs | 5 ----- consensus/types/src/lib.rs | 1 - 4 files changed, 4 insertions(+), 10 deletions(-) diff --git a/consensus/types/src/beacon_block_and_blobs.rs b/consensus/types/src/beacon_block_and_blobs.rs index b39fff77146..a24b751adc9 100644 --- a/consensus/types/src/beacon_block_and_blobs.rs +++ b/consensus/types/src/beacon_block_and_blobs.rs @@ -1,7 +1,7 @@ -use crate::{BLSFieldElement, Blob, EthSpec, SignedBeaconBlock}; +use crate::{Blob, EthSpec, SignedBeaconBlock}; use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use ssz_types::{FixedVector, VariableList}; +use ssz_derive::{Encode}; +use ssz_types::{VariableList}; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index dd83cf5a3c4..f64726a5962 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -1,4 +1,4 @@ -use crate::{ChainSpec, Epoch, Fork}; +use crate::{ChainSpec, Epoch}; use serde_derive::{Deserialize, Serialize}; use std::convert::TryFrom; use std::fmt::{self, Display, Formatter}; diff --git a/consensus/types/src/kzg_commitment.rs b/consensus/types/src/kzg_commitment.rs index 154cb6b0037..4098451f2aa 100644 --- a/consensus/types/src/kzg_commitment.rs +++ b/consensus/types/src/kzg_commitment.rs @@ -3,12 +3,7 @@ use crate::*; use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; -use ssz_derive::{Decode, Encode}; -use ssz_types::VariableList; -use superstruct::superstruct; -use test_random_derive::TestRandom; use tree_hash::TreeHash; -use tree_hash_derive::TreeHash; //TODO: is there a way around this newtype #[derive(Derivative, Debug, Clone, Serialize, Deserialize)] diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 28234130dfd..87871aac8f6 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -94,7 +94,6 @@ pub mod sqlite; pub use kzg_commitment::KZGCommitment; use ethereum_types::{H160, H256}; -use serde::Serialize; pub use crate::aggregate_and_proof::AggregateAndProof; pub use crate::attestation::{Attestation, Error as AttestationError}; From 4cdf1b546d368599d3825c99d0c43f2f2da94604 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Sat, 19 Feb 2022 11:42:11 -0700 Subject: [PATCH 025/263] add shanghai fork version and epoch --- beacon_node/lighthouse_network/src/config.rs | 2 +- .../src/rpc/codec/ssz_snappy.rs | 18 ++++-- .../lighthouse_network/src/types/pubsub.rs | 8 +-- .../process_operations.rs | 2 +- consensus/types/src/beacon_block.rs | 12 ++-- consensus/types/src/beacon_block_body.rs | 10 +-- ...con_block_and_blobs.rs => blob_wrapper.rs} | 11 ++-- consensus/types/src/chain_spec.rs | 63 +++++++++++++++---- consensus/types/src/eth_spec.rs | 2 +- consensus/types/src/fork_context.rs | 7 +++ consensus/types/src/fork_name.rs | 17 +++-- consensus/types/src/lib.rs | 10 +-- consensus/types/src/signed_beacon_block.rs | 10 +-- testing/ef_tests/src/cases/common.rs | 2 +- .../ef_tests/src/cases/epoch_processing.rs | 2 +- testing/ef_tests/src/cases/fork.rs | 2 +- testing/ef_tests/src/cases/transition.rs | 7 +-- .../src/signing_method/web3signer.rs | 4 +- 18 files changed, 121 insertions(+), 68 deletions(-) rename consensus/types/src/{beacon_block_and_blobs.rs => blob_wrapper.rs} (58%) diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index ca2178432a2..05139e5580b 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -297,7 +297,7 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos // according to: https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#the-gossip-domain-gossipsub // the derivation of the message-id remains the same in the merge //TODO(sean): figure this out - ForkName::Altair | ForkName::Merge | ForkName::Dank => { + ForkName::Altair | ForkName::Merge | ForkName::Shanghai => { let topic_len_bytes = topic_bytes.len().to_le_bytes(); let mut vec = Vec::with_capacity( prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 699d877efed..9c84305e4f9 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; use types::{ EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockBase, SignedBeaconBlockDank, SignedBeaconBlockMerge, + SignedBeaconBlockBase, SignedBeaconBlockMerge, SignedBeaconBlockShanghai, }; use unsigned_varint::codec::Uvi; @@ -407,7 +407,9 @@ fn context_bytes( return match **ref_box_block { // NOTE: If you are adding another fork type here, be sure to modify the // `fork_context.to_context_bytes()` function to support it as well! - SignedBeaconBlock::Dank { .. } => fork_context.to_context_bytes(ForkName::Dank), + SignedBeaconBlock::Shanghai { .. } => { + fork_context.to_context_bytes(ForkName::Shanghai) + } SignedBeaconBlock::Merge { .. } => { // Merge context being `None` implies that "merge never happened". fork_context.to_context_bytes(ForkName::Merge) @@ -587,8 +589,10 @@ fn handle_v2_response( decoded_buffer, )?), )))), - ForkName::Dank => Ok(Some(RPCResponse::BlocksByRange(Box::new( - SignedBeaconBlock::Dank(SignedBeaconBlockDank::from_ssz_bytes(decoded_buffer)?), + ForkName::Shanghai => Ok(Some(RPCResponse::BlocksByRange(Box::new( + SignedBeaconBlock::Shanghai(SignedBeaconBlockShanghai::from_ssz_bytes( + decoded_buffer, + )?), )))), }, Protocol::BlocksByRoot => match fork_name { @@ -605,8 +609,10 @@ fn handle_v2_response( decoded_buffer, )?), )))), - ForkName::Dank => Ok(Some(RPCResponse::BlocksByRoot(Box::new( - SignedBeaconBlock::Dank(SignedBeaconBlockDank::from_ssz_bytes(decoded_buffer)?), + ForkName::Shanghai => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + SignedBeaconBlock::Shanghai(SignedBeaconBlockShanghai::from_ssz_bytes( + decoded_buffer, + )?), )))), }, _ => Err(RPCError::ErrorResponse( diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 291cde46ce1..b8a3c336168 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -11,8 +11,8 @@ use std::sync::Arc; use types::{ Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, - SignedBeaconBlockDank, SignedBeaconBlockMerge, SignedContributionAndProof, SignedVoluntaryExit, - SubnetId, SyncCommitteeMessage, SyncSubnetId, + SignedBeaconBlockMerge, SignedBeaconBlockShanghai, SignedContributionAndProof, + SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] @@ -167,8 +167,8 @@ impl PubsubMessage { SignedBeaconBlockMerge::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ), - Some(ForkName::Dank) => SignedBeaconBlock::::Dank( - SignedBeaconBlockDank::from_ssz_bytes(data) + Some(ForkName::Shanghai) => SignedBeaconBlock::::Shanghai( + SignedBeaconBlockShanghai::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ), None => { diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 0d74ac4dc62..10b0e5f9b0c 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -232,7 +232,7 @@ pub fn process_attestations<'a, T: EthSpec, Payload: ExecPayload>( } BeaconBlockBodyRef::Altair(_) | BeaconBlockBodyRef::Merge(_) - | BeaconBlockBodyRef::Dank(_) => { + | BeaconBlockBodyRef::Shanghai(_) => { altair::process_attestations( state, block_body.attestations(), diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index f9f2d8651be..dee3292a8b9 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -1,6 +1,6 @@ use crate::beacon_block_body::{ - BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyDank, BeaconBlockBodyMerge, - BeaconBlockBodyRef, BeaconBlockBodyRefMut, + BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyMerge, BeaconBlockBodyRef, + BeaconBlockBodyRefMut, BeaconBlockBodyShanghai, }; use crate::test_utils::TestRandom; use crate::*; @@ -17,7 +17,7 @@ use tree_hash_derive::TreeHash; /// A block of the `BeaconChain`. #[superstruct( - variants(Base, Altair, Merge, Dank), + variants(Base, Altair, Merge, Shanghai), variant_attributes( derive( Debug, @@ -64,8 +64,8 @@ pub struct BeaconBlock = FullPayload> { pub body: BeaconBlockBodyAltair, #[superstruct(only(Merge), partial_getter(rename = "body_merge"))] pub body: BeaconBlockBodyMerge, - #[superstruct(only(Dank), partial_getter(rename = "body_dank"))] - pub body: BeaconBlockBodyDank, + #[superstruct(only(Shanghai), partial_getter(rename = "body_shanghai"))] + pub body: BeaconBlockBodyShanghai, } pub type BlindedBeaconBlock = BeaconBlock>; @@ -191,7 +191,7 @@ impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRef<'a, T, Payload> { BeaconBlockRef::Base { .. } => ForkName::Base, BeaconBlockRef::Altair { .. } => ForkName::Altair, BeaconBlockRef::Merge { .. } => ForkName::Merge, - BeaconBlockRef::Dank { .. } => ForkName::Dank, + BeaconBlockRef::Shanghai { .. } => ForkName::Shanghai, }; if fork_at_slot == object_fork { diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index e8c66d2d358..438d9535c96 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -13,7 +13,7 @@ use tree_hash_derive::TreeHash; /// /// This *superstruct* abstracts over the hard-fork. #[superstruct( - variants(Base, Altair, Merge, Dank), + variants(Base, Altair, Merge, Shanghai), variant_attributes( derive( Debug, @@ -47,15 +47,15 @@ pub struct BeaconBlockBody = FullPayload> pub attestations: VariableList, T::MaxAttestations>, pub deposits: VariableList, pub voluntary_exits: VariableList, - #[superstruct(only(Altair, Merge, Dank))] + #[superstruct(only(Altair, Merge, Shanghai))] pub sync_aggregate: SyncAggregate, // We flatten the execution payload so that serde can use the name of the inner type, // either `execution_payload` for full payloads, or `execution_payload_header` for blinded // payloads. - #[superstruct(only(Merge, Dank))] + #[superstruct(only(Merge, Shanghai))] #[serde(flatten)] pub execution_payload: Payload, - #[superstruct(only(Dank))] + #[superstruct(only(Shanghai))] pub blob_kzgs: VariableList, #[superstruct(only(Base, Altair))] #[ssz(skip_serializing, skip_deserializing)] @@ -71,7 +71,7 @@ impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { BeaconBlockBodyRef::Base { .. } => ForkName::Base, BeaconBlockBodyRef::Altair { .. } => ForkName::Altair, BeaconBlockBodyRef::Merge { .. } => ForkName::Merge, - BeaconBlockBodyRef::Dank { .. } => ForkName::Dank, + BeaconBlockBodyRef::Shanghai { .. } => ForkName::Shanghai, } } } diff --git a/consensus/types/src/beacon_block_and_blobs.rs b/consensus/types/src/blob_wrapper.rs similarity index 58% rename from consensus/types/src/beacon_block_and_blobs.rs rename to consensus/types/src/blob_wrapper.rs index a24b751adc9..23f685e1e50 100644 --- a/consensus/types/src/beacon_block_and_blobs.rs +++ b/consensus/types/src/blob_wrapper.rs @@ -1,13 +1,14 @@ -use crate::{Blob, EthSpec, SignedBeaconBlock}; +use crate::{Blob, EthSpec, Hash256, SignedBeaconBlock, Slot}; use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Encode}; -use ssz_types::{VariableList}; +use ssz_derive::Encode; +use ssz_types::VariableList; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash)] -pub struct BeaconBlockAndBlobs { - pub block: SignedBeaconBlock, +pub struct BlobWrapper { + pub beacon_block_root: Hash256, + pub beacon_block_slot: Slot, pub blobs: VariableList, E::MaxObjectListSize>, } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 6bfae08a455..8887061e495 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -151,8 +151,10 @@ pub struct ChainSpec { pub safe_slots_to_import_optimistically: u64, /* - * Danksharding hard fork params - */ + * Shanghai hard fork params + */ + pub shanghai_fork_version: [u8; 4], + pub shanghai_fork_epoch: Option, /* * Networking @@ -234,11 +236,14 @@ impl ChainSpec { /// Returns the name of the fork which is active at `epoch`. pub fn fork_name_at_epoch(&self, epoch: Epoch) -> ForkName { - match self.bellatrix_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge, - _ => match self.altair_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, - _ => ForkName::Base, + match self.shanghai_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Shanghai, + _ => match self.bellatrix_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge, + _ => match self.altair_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, + _ => ForkName::Base, + }, }, } } @@ -249,8 +254,7 @@ impl ChainSpec { ForkName::Base => self.genesis_fork_version, ForkName::Altair => self.altair_fork_version, ForkName::Merge => self.bellatrix_fork_version, - //TODO: update this - ForkName::Dank => self.bellatrix_fork_version, + ForkName::Shanghai => self.shanghai_fork_version, } } @@ -260,8 +264,7 @@ impl ChainSpec { ForkName::Base => Some(Epoch::new(0)), ForkName::Altair => self.altair_fork_epoch, ForkName::Merge => self.bellatrix_fork_epoch, - //TODO: update this - ForkName::Dank => self.bellatrix_fork_epoch, + ForkName::Shanghai => self.shanghai_fork_epoch, } } @@ -576,6 +579,13 @@ impl ChainSpec { terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), safe_slots_to_import_optimistically: 128u64, + /* + * Shanghai hardfork params + */ + //FIXME(sean) + shanghai_fork_version: [0x03, 0x00, 0x00, 0x00], + shanghai_fork_epoch: None, + /* * Network specific */ @@ -631,6 +641,10 @@ impl ChainSpec { // `Uint256::MAX` which is `2*256- 1`. .checked_add(Uint256::one()) .expect("addition does not overflow"), + // Shanghai + //FIXME(sean) + shanghai_fork_version: [0x03, 0x00, 0x00, 0x01], + shanghai_fork_epoch: None, // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, @@ -786,6 +800,10 @@ impl ChainSpec { terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), safe_slots_to_import_optimistically: 128u64, + //FIXME(sean) + shanghai_fork_version: [0x03, 0x00, 0x00, 0x64], + shanghai_fork_epoch: None, + /* * Network specific */ @@ -861,6 +879,16 @@ pub struct Config { #[serde(deserialize_with = "deserialize_fork_epoch")] pub bellatrix_fork_epoch: Option>, + // FIXME(sean): remove this default + #[serde(default = "default_shanghai_fork_version")] + #[serde(with = "eth2_serde_utils::bytes_4_hex")] + shanghai_fork_version: [u8; 4], + // FIXME(sean): remove this default + #[serde(default = "default_shanghai_fork_epoch")] + #[serde(serialize_with = "serialize_fork_epoch")] + #[serde(deserialize_with = "deserialize_fork_epoch")] + pub shanghai_fork_epoch: Option>, + #[serde(with = "eth2_serde_utils::quoted_u64")] seconds_per_slot: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] @@ -898,6 +926,11 @@ fn default_bellatrix_fork_version() -> [u8; 4] { [0xff, 0xff, 0xff, 0xff] } +fn default_shanghai_fork_version() -> [u8; 4] { + // This value shouldn't be used. + [0xff, 0xff, 0xff, 0xff] +} + /// Placeholder value: 2^256-2^10 (115792089237316195423570985008687907853269984665640564039457584007913129638912). /// /// Taken from https://github.com/ethereum/consensus-specs/blob/d5e4828aecafaf1c57ef67a5f23c4ae7b08c5137/configs/mainnet.yaml#L15-L16 @@ -994,6 +1027,10 @@ impl Config { bellatrix_fork_epoch: spec .bellatrix_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), + shanghai_fork_version: spec.shanghai_fork_version, + shanghai_fork_epoch: spec + .shanghai_fork_epoch + .map(|epoch| MaybeQuoted { value: epoch }), seconds_per_slot: spec.seconds_per_slot, seconds_per_eth1_block: spec.seconds_per_eth1_block, @@ -1039,6 +1076,8 @@ impl Config { altair_fork_epoch, bellatrix_fork_epoch, bellatrix_fork_version, + shanghai_fork_epoch, + shanghai_fork_version, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, @@ -1069,6 +1108,8 @@ impl Config { altair_fork_epoch: altair_fork_epoch.map(|q| q.value), bellatrix_fork_epoch: bellatrix_fork_epoch.map(|q| q.value), bellatrix_fork_version, + shanghai_fork_epoch: shanghai_fork_epoch.map(|q| q.value), + shanghai_fork_version, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 742a90ad33e..3089380b5c9 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -96,7 +96,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + type MinGasLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxExtraDataBytes: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* - * New in Danksharding + * New in Shanghaisharding */ type MaxObjectListSize: Unsigned + Clone + Sync + Send + Debug + PartialEq; type ChunksPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 52b9294c8ca..742136ca9e6 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -47,6 +47,13 @@ impl ForkContext { )); } + if spec.shanghai_fork_epoch.is_some() { + fork_to_digest.push(( + ForkName::Shanghai, + ChainSpec::compute_fork_digest(spec.shanghai_fork_version, genesis_validators_root), + )); + } + let fork_to_digest: HashMap = fork_to_digest.into_iter().collect(); let digest_to_fork = fork_to_digest diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index f64726a5962..88108ffa35a 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -11,7 +11,7 @@ pub enum ForkName { Base, Altair, Merge, - Dank, + Shanghai, } impl ForkName { @@ -39,10 +39,9 @@ impl ForkName { spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec } - //TODO(sean): update - ForkName::Dank => { - spec.altair_fork_epoch = Some(Epoch::new(0)); + ForkName::Shanghai => { spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.shanghai_fork_epoch = Some(Epoch::new(0)); spec } } @@ -56,7 +55,7 @@ impl ForkName { ForkName::Base => None, ForkName::Altair => Some(ForkName::Base), ForkName::Merge => Some(ForkName::Altair), - ForkName::Dank => Some(ForkName::Merge), + ForkName::Shanghai => Some(ForkName::Merge), } } @@ -67,8 +66,8 @@ impl ForkName { match self { ForkName::Base => Some(ForkName::Altair), ForkName::Altair => Some(ForkName::Merge), - ForkName::Merge => Some(ForkName::Dank), - ForkName::Dank => None, + ForkName::Merge => Some(ForkName::Shanghai), + ForkName::Shanghai => None, } } } @@ -111,7 +110,7 @@ macro_rules! map_fork_name_with { ($t::Merge(value), extra_data) } //TODO: don't have a beacon state variant for the new fork yet - ForkName::Dank => { + ForkName::Shanghai => { let (value, extra_data) = $body; ($t::Merge(value), extra_data) } @@ -138,7 +137,7 @@ impl Display for ForkName { ForkName::Base => "phase0".fmt(f), ForkName::Altair => "altair".fmt(f), ForkName::Merge => "bellatrix".fmt(f), - ForkName::Dank => "dank".fmt(f), + ForkName::Shanghai => "shanghai".fmt(f), } } } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 87871aac8f6..e11bab770a7 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -86,7 +86,7 @@ pub mod sync_subnet_id; mod tree_hash_impls; pub mod validator_registration_data; -mod beacon_block_and_blobs; +mod blob_wrapper; mod kzg_commitment; pub mod slot_data; #[cfg(feature = "sqlite")] @@ -101,12 +101,12 @@ pub use crate::attestation_data::AttestationData; pub use crate::attestation_duty::AttestationDuty; pub use crate::attester_slashing::AttesterSlashing; pub use crate::beacon_block::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockDank, BeaconBlockMerge, BeaconBlockRef, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockShanghai, BeaconBlockMerge, BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, }; pub use crate::beacon_block_body::{ - BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyDank, - BeaconBlockBodyMerge, BeaconBlockBodyRef, BeaconBlockBodyRefMut, + BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyMerge, + BeaconBlockBodyRef, BeaconBlockBodyRefMut, BeaconBlockBodyShanghai, }; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; @@ -147,7 +147,7 @@ pub use crate::shuffling_id::AttestationShufflingId; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; pub use crate::signed_beacon_block::{ SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockHash, - SignedBeaconBlockMerge, SignedBlindedBeaconBlock,SignedBeaconBlockDank + SignedBeaconBlockMerge, SignedBlindedBeaconBlock,SignedBeaconBlockShanghai }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 317cfddca9d..49822da821a 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -38,7 +38,7 @@ impl From for Hash256 { /// A `BeaconBlock` and a signature from its proposer. #[superstruct( - variants(Base, Altair, Merge, Dank), + variants(Base, Altair, Merge, Shanghai), variant_attributes( derive( Debug, @@ -72,8 +72,8 @@ pub struct SignedBeaconBlock = FullPayload, #[superstruct(only(Merge), partial_getter(rename = "message_merge"))] pub message: BeaconBlockMerge, - #[superstruct(only(Dank), partial_getter(rename = "message_dank"))] - pub message: BeaconBlockDank, + #[superstruct(only(Shanghai), partial_getter(rename = "message_shanghai"))] + pub message: BeaconBlockShanghai, pub signature: Signature, } @@ -131,8 +131,8 @@ impl> SignedBeaconBlock { BeaconBlock::Merge(message) => { SignedBeaconBlock::Merge(SignedBeaconBlockMerge { message, signature }) } - BeaconBlock::Dank(message) => { - SignedBeaconBlock::Dank(SignedBeaconBlockDank { message, signature }) + BeaconBlock::Shanghai(message) => { + SignedBeaconBlock::Shanghai(SignedBeaconBlockShanghai { message, signature }) } } } diff --git a/testing/ef_tests/src/cases/common.rs b/testing/ef_tests/src/cases/common.rs index 5cb460b4f8f..b695015d797 100644 --- a/testing/ef_tests/src/cases/common.rs +++ b/testing/ef_tests/src/cases/common.rs @@ -78,6 +78,6 @@ pub fn previous_fork(fork_name: ForkName) -> ForkName { ForkName::Base => ForkName::Base, ForkName::Altair => ForkName::Base, ForkName::Merge => ForkName::Altair, // TODO: Check this when tests are released.. - ForkName::Dank => ForkName::Merge, // TODO: Check this when tests are released.. + ForkName::Shanghai => ForkName::Merge, // TODO: Check this when tests are released.. } } diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 1c3f42e18c8..9163b551ae3 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -278,7 +278,7 @@ impl> Case for EpochProcessing { } // No phase0 tests for Altair and later. ForkName::Altair | ForkName::Merge => T::name() != "participation_record_updates", - ForkName::Dank => false, // TODO: revisit when tests are out + ForkName::Shanghai => false, // TODO: revisit when tests are out } } diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs index bbe84409e01..57c4f125442 100644 --- a/testing/ef_tests/src/cases/fork.rs +++ b/testing/ef_tests/src/cases/fork.rs @@ -61,7 +61,7 @@ impl Case for ForkTest { ForkName::Base => panic!("phase0 not supported"), ForkName::Altair => upgrade_to_altair(&mut result_state, spec).map(|_| result_state), ForkName::Merge => upgrade_to_bellatrix(&mut result_state, spec).map(|_| result_state), - ForkName::Dank => panic!("danksharding not supported"), + ForkName::Shanghai => panic!("shanghai not supported"), }; compare_beacon_state_results_without_caches(&mut result, &mut expected) diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index f97949c398a..bbc98994a77 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -42,10 +42,9 @@ impl LoadCase for TransitionTest { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(metadata.fork_epoch); } - //TODO(sean): fix - ForkName::Dank => { - spec.altair_fork_epoch = Some(Epoch::new(0)); - spec.bellatrix_fork_epoch = Some(metadata.fork_epoch); + ForkName::Shanghai => { + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.shanghai_fork_epoch = Some(metadata.fork_epoch); } } diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index d16d7693cd7..967d6b139e0 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -90,8 +90,8 @@ impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { block: None, block_header: Some(block.block_header()), }), - BeaconBlock::Dank(_) => Ok(Web3SignerObject::BeaconBlock { - version: ForkName::Dank, + BeaconBlock::Shanghai(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Shanghai, block: None, block_header: Some(block.block_header()), }), From 4008da6c609ed021064a816015dd92b0f4e66011 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Sat, 19 Feb 2022 15:00:45 -0700 Subject: [PATCH 026/263] sync tx blobs --- .../src/peer_manager/mod.rs | 3 ++ .../src/rpc/codec/ssz_snappy.rs | 23 ++++++++++++--- .../lighthouse_network/src/rpc/methods.rs | 26 ++++++++++++++++- beacon_node/lighthouse_network/src/rpc/mod.rs | 6 ++++ .../lighthouse_network/src/rpc/outbound.rs | 9 ++++++ .../lighthouse_network/src/rpc/protocol.rs | 29 ++++++++++++++++--- .../src/rpc/rate_limiter.rs | 9 ++++++ .../lighthouse_network/src/service/mod.rs | 9 ++++++ beacon_node/network/src/router/mod.rs | 7 +++++ beacon_node/network/src/router/processor.rs | 25 +++++++++++++--- consensus/types/src/blob_wrapper.rs | 17 +++++++++-- consensus/types/src/eth_spec.rs | 8 +++++ consensus/types/src/lib.rs | 1 + 13 files changed, 157 insertions(+), 15 deletions(-) diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 0f291359565..905ce998f1a 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -501,6 +501,7 @@ impl PeerManager { Protocol::Ping => PeerAction::MidToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, + Protocol::TxBlobsByRange => PeerAction::MidToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, @@ -515,6 +516,7 @@ impl PeerManager { match protocol { Protocol::Ping => PeerAction::Fatal, Protocol::BlocksByRange => return, + Protocol::TxBlobsByRange => return, Protocol::BlocksByRoot => return, Protocol::Goodbye => return, Protocol::MetaData => PeerAction::LowToleranceError, @@ -530,6 +532,7 @@ impl PeerManager { ConnectionDirection::Outgoing => match protocol { Protocol::Ping => PeerAction::LowToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, + Protocol::TxBlobsByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, Protocol::Goodbye => return, Protocol::MetaData => return, diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 9c84305e4f9..7b83eb1b6ab 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -15,10 +15,7 @@ use std::io::{Read, Write}; use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; -use types::{ - EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockBase, SignedBeaconBlockMerge, SignedBeaconBlockShanghai, -}; +use types::{BlobWrapper, EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockMerge, SignedBeaconBlockShanghai}; use unsigned_varint::codec::Uvi; const CONTEXT_BYTES_LEN: usize = 4; @@ -69,6 +66,7 @@ impl Encoder> for SSZSnappyInboundCodec< RPCCodedResponse::Success(resp) => match &resp { RPCResponse::Status(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(), + RPCResponse::TxBlobsByRange(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), RPCResponse::Pong(res) => res.data.as_ssz_bytes(), RPCResponse::MetaData(res) => @@ -227,6 +225,7 @@ impl Encoder> for SSZSnappyOutboundCodec< OutboundRequest::Status(req) => req.as_ssz_bytes(), OutboundRequest::Goodbye(req) => req.as_ssz_bytes(), OutboundRequest::BlocksByRange(req) => req.as_ssz_bytes(), + OutboundRequest::TxBlobsByRange(req) => req.as_ssz_bytes(), OutboundRequest::BlocksByRoot(req) => req.block_roots.as_ssz_bytes(), OutboundRequest::Ping(req) => req.as_ssz_bytes(), OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode @@ -469,6 +468,9 @@ fn handle_v1_request( Protocol::BlocksByRange => Ok(Some(InboundRequest::BlocksByRange( OldBlocksByRangeRequest::from_ssz_bytes(decoded_buffer)?, ))), + Protocol::TxBlobsByRange => Ok(Some(InboundRequest::TxBlobsByRange( + TxBlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))), Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, }))), @@ -501,6 +503,9 @@ fn handle_v2_request( Protocol::BlocksByRange => Ok(Some(InboundRequest::BlocksByRange( OldBlocksByRangeRequest::from_ssz_bytes(decoded_buffer)?, ))), + Protocol::TxBlobsByRange => Ok(Some(InboundRequest::TxBlobsByRange( + TxBlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))), Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, }))), @@ -538,6 +543,9 @@ fn handle_v1_response( Protocol::BlocksByRange => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), + Protocol::TxBlobsByRange => Ok(Some(RPCResponse::TxBlobsByRange(Arc::new( + BlobWrapper::from_ssz_bytes(decoded_buffer)?), + ))), Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), @@ -595,6 +603,13 @@ fn handle_v2_response( )?), )))), }, + Protocol::TxBlobsByRange => { + Ok(Some(RPCResponse::TxBlobsByRange(Box::new( + BlobWrapper::from_ssz_bytes( + decoded_buffer, + )? + )))) + }, Protocol::BlocksByRoot => match fork_name { ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes( diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 26d755a6e06..8aa04866c03 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -12,7 +12,7 @@ use std::ops::Deref; use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; -use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{BlobWrapper, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// Maximum number of blocks in a single request. pub type MaxRequestBlocks = U1024; @@ -221,6 +221,12 @@ pub struct OldBlocksByRangeRequest { pub step: u64, } +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct TxBlobsByRangeRequest { + pub execution_block_number: u64, + pub count: u64, +} + /// Request a number of beacon block bodies from a peer. #[derive(Clone, Debug, PartialEq)] pub struct BlocksByRootRequest { @@ -240,6 +246,8 @@ pub enum RPCResponse { /// batch. BlocksByRange(Arc>), + TxBlobsByRange(Box>), + /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Arc>), @@ -256,6 +264,8 @@ pub enum ResponseTermination { /// Blocks by range stream termination. BlocksByRange, + TxBlobsByRange, + /// Blocks by root stream termination. BlocksByRoot, } @@ -318,6 +328,7 @@ impl RPCCodedResponse { RPCCodedResponse::Success(resp) => match resp { RPCResponse::Status(_) => false, RPCResponse::BlocksByRange(_) => true, + RPCResponse::TxBlobsByRange(_) => true, RPCResponse::BlocksByRoot(_) => true, RPCResponse::Pong(_) => false, RPCResponse::MetaData(_) => false, @@ -385,6 +396,9 @@ impl std::fmt::Display for RPCResponse { RPCResponse::BlocksByRange(block) => { write!(f, "BlocksByRange: Block slot: {}", block.slot()) } + RPCResponse::TxBlobsByRange(blob) => { + write!(f, "TxBlobsByRange: Block slot: {}", blob.beacon_block_slot) + } RPCResponse::BlocksByRoot(block) => { write!(f, "BlocksByRoot: Block slot: {}", block.slot()) } @@ -436,6 +450,16 @@ impl std::fmt::Display for OldBlocksByRangeRequest { } } +impl std::fmt::Display for TxBlobsByRangeRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Execution block number: {}, Count: {}", + self.execution_block_number, self.count + ) + } +} + impl slog::KV for StatusMessage { fn serialize( &self, diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 7b0092ef713..2d374c7709c 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -125,6 +125,12 @@ impl RPC { methods::MAX_REQUEST_BLOCKS, Duration::from_secs(10), ) + //FIXME(sean) + .n_every( + Protocol::TxBlobsByRange, + methods::MAX_REQUEST_BLOCKS, + Duration::from_secs(10), + ) .n_every(Protocol::BlocksByRoot, 128, Duration::from_secs(10)) .build() .expect("Configuration parameters are valid"); diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 7d5acc43643..7664a5752c5 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -37,6 +37,7 @@ pub enum OutboundRequest { Status(StatusMessage), Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), + TxBlobsByRange(TxBlobsByRangeRequest), BlocksByRoot(BlocksByRootRequest), Ping(Ping), MetaData(PhantomData), @@ -71,6 +72,10 @@ impl OutboundRequest { ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), ], + //FIXME(sean) what should the protocol version be? + OutboundRequest::TxBlobsByRange(_) => vec![ + ProtocolId::new(Protocol::TxBlobsByRange, Version::V2, Encoding::SSZSnappy), + ], OutboundRequest::BlocksByRoot(_) => vec![ ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), @@ -95,6 +100,7 @@ impl OutboundRequest { OutboundRequest::Status(_) => 1, OutboundRequest::Goodbye(_) => 0, OutboundRequest::BlocksByRange(req) => req.count, + OutboundRequest::TxBlobsByRange(req) => req.count, OutboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64, OutboundRequest::Ping(_) => 1, OutboundRequest::MetaData(_) => 1, @@ -107,6 +113,7 @@ impl OutboundRequest { OutboundRequest::Status(_) => Protocol::Status, OutboundRequest::Goodbye(_) => Protocol::Goodbye, OutboundRequest::BlocksByRange(_) => Protocol::BlocksByRange, + OutboundRequest::TxBlobsByRange(_) => Protocol::TxBlobsByRange, OutboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot, OutboundRequest::Ping(_) => Protocol::Ping, OutboundRequest::MetaData(_) => Protocol::MetaData, @@ -120,6 +127,7 @@ impl OutboundRequest { // this only gets called after `multiple_responses()` returns true. Therefore, only // variants that have `multiple_responses()` can have values. OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, + OutboundRequest::TxBlobsByRange(_) => ResponseTermination::TxBlobsByRange, OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, OutboundRequest::Status(_) => unreachable!(), OutboundRequest::Goodbye(_) => unreachable!(), @@ -175,6 +183,7 @@ impl std::fmt::Display for OutboundRequest { OutboundRequest::Status(status) => write!(f, "Status Message: {}", status), OutboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), OutboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), + OutboundRequest::TxBlobsByRange(req) => write!(f, "Blobs by range: {}", req), OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), OutboundRequest::MetaData(_) => write!(f, "MetaData request"), diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 81960214b16..579a31d95c4 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -20,10 +20,7 @@ use tokio_util::{ codec::Framed, compat::{Compat, FuturesAsyncReadCompatExt}, }; -use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EthSpec, ForkContext, - ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, -}; +use types::{BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, BlobWrapper, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock}; lazy_static! { // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -71,6 +68,12 @@ lazy_static! { + types::ExecutionPayload::::max_execution_payload_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field + pub static ref BLOB_MIN: usize = BlobWrapper::::empty() + .as_ssz_bytes() + .len(); + + pub static ref BLOB_MAX: usize = BlobWrapper::::max_size(); + pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize = VariableList::::from(Vec::::new()) .as_ssz_bytes() @@ -147,6 +150,7 @@ pub enum Protocol { Goodbye, /// The `BlocksByRange` protocol name. BlocksByRange, + TxBlobsByRange, /// The `BlocksByRoot` protocol name. BlocksByRoot, /// The `Ping` protocol name. @@ -176,6 +180,8 @@ impl std::fmt::Display for Protocol { Protocol::Status => "status", Protocol::Goodbye => "goodbye", Protocol::BlocksByRange => "beacon_blocks_by_range", + //FIXME(sean) verify + Protocol::TxBlobsByRange => "tx_blobs_by_range", Protocol::BlocksByRoot => "beacon_blocks_by_root", Protocol::Ping => "ping", Protocol::MetaData => "metadata", @@ -282,6 +288,12 @@ impl ProtocolId { ::ssz_fixed_len(), ::ssz_fixed_len(), ), + Protocol::TxBlobsByRange => { + RpcLimits::new( + ::ssz_fixed_len(), + ::ssz_fixed_len(), + ) + } Protocol::BlocksByRoot => { RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX) } @@ -451,6 +463,11 @@ impl InboundRequest { ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), ], + //FIXME(sean) do I need v1 + InboundRequest::TxBlobsByRange(_) => vec![ + // V2 has higher preference when negotiating a stream + ProtocolId::new(Protocol::TxBlobsByRange, Version::V2, Encoding::SSZSnappy), + ], InboundRequest::BlocksByRoot(_) => vec![ // V2 has higher preference when negotiating a stream ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), @@ -476,6 +493,7 @@ impl InboundRequest { InboundRequest::Status(_) => 1, InboundRequest::Goodbye(_) => 0, InboundRequest::BlocksByRange(req) => req.count, + InboundRequest::TxBlobsByRange(req) => req.count, InboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64, InboundRequest::Ping(_) => 1, InboundRequest::MetaData(_) => 1, @@ -488,6 +506,7 @@ impl InboundRequest { InboundRequest::Status(_) => Protocol::Status, InboundRequest::Goodbye(_) => Protocol::Goodbye, InboundRequest::BlocksByRange(_) => Protocol::BlocksByRange, + InboundRequest::TxBlobsByRange(_) => Protocol::TxBlobsByRange, InboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot, InboundRequest::Ping(_) => Protocol::Ping, InboundRequest::MetaData(_) => Protocol::MetaData, @@ -501,6 +520,7 @@ impl InboundRequest { // this only gets called after `multiple_responses()` returns true. Therefore, only // variants that have `multiple_responses()` can have values. InboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, + InboundRequest::TxBlobsByRange(_) => ResponseTermination::TxBlobsByRange, InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, InboundRequest::Status(_) => unreachable!(), InboundRequest::Goodbye(_) => unreachable!(), @@ -606,6 +626,7 @@ impl std::fmt::Display for InboundRequest { InboundRequest::Status(status) => write!(f, "Status Message: {}", status), InboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), InboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), + InboundRequest::TxBlobsByRange(req) => write!(f, "Blobs by range: {}", req), InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), InboundRequest::MetaData(_) => write!(f, "MetaData request"), diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 70b14c33dec..6d6d3446272 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -71,6 +71,7 @@ pub struct RPCRateLimiter { status_rl: Limiter, /// BlocksByRange rate limiter. bbrange_rl: Limiter, + txbbrange_rl: Limiter, /// BlocksByRoot rate limiter. bbroots_rl: Limiter, } @@ -96,6 +97,7 @@ pub struct RPCRateLimiterBuilder { status_quota: Option, /// Quota for the BlocksByRange protocol. bbrange_quota: Option, + txbbrange_quota: Option, /// Quota for the BlocksByRoot protocol. bbroots_quota: Option, } @@ -115,6 +117,7 @@ impl RPCRateLimiterBuilder { Protocol::MetaData => self.metadata_quota = q, Protocol::Goodbye => self.goodbye_quota = q, Protocol::BlocksByRange => self.bbrange_quota = q, + Protocol::TxBlobsByRange => self.txbbrange_quota = q, Protocol::BlocksByRoot => self.bbroots_quota = q, } self @@ -155,6 +158,9 @@ impl RPCRateLimiterBuilder { let bbrange_quota = self .bbrange_quota .ok_or("BlocksByRange quota not specified")?; + let txbbrange_quota = self + .txbbrange_quota + .ok_or("TxBlobsByRange quota not specified")?; // create the rate limiters let ping_rl = Limiter::from_quota(ping_quota)?; @@ -163,6 +169,7 @@ impl RPCRateLimiterBuilder { let goodbye_rl = Limiter::from_quota(goodbye_quota)?; let bbroots_rl = Limiter::from_quota(bbroots_quota)?; let bbrange_rl = Limiter::from_quota(bbrange_quota)?; + let txbbrange_rl = Limiter::from_quota(txbbrange_quota)?; // check for peers to prune every 30 seconds, starting in 30 seconds let prune_every = tokio::time::Duration::from_secs(30); @@ -176,6 +183,7 @@ impl RPCRateLimiterBuilder { goodbye_rl, bbroots_rl, bbrange_rl, + txbbrange_rl, init_time: Instant::now(), }) } @@ -198,6 +206,7 @@ impl RPCRateLimiter { Protocol::MetaData => &mut self.metadata_rl, Protocol::Goodbye => &mut self.goodbye_rl, Protocol::BlocksByRange => &mut self.bbrange_rl, + Protocol::TxBlobsByRange => &mut self.txbbrange_rl, Protocol::BlocksByRoot => &mut self.bbroots_rl, }; check(limiter) diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 53d29ccb21d..02b0f60e9b3 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -39,7 +39,9 @@ use std::sync::Arc; use std::task::{Context, Poll}; use types::{ consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId, + BlobWrapper, SignedBeaconBlock, SyncSubnetId }; +use crate::rpc::methods::TxBlobsByRangeRequest; use utils::{build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER}; use self::behaviour::Behaviour; @@ -981,6 +983,9 @@ impl Network { Request::BlocksByRange { .. } => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_range"]) } + Request::TxBlobsByRange { .. } => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["tx_blobs_by_range"]) + } Request::BlocksByRoot { .. } => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]) } @@ -1271,6 +1276,9 @@ impl Network { RPCResponse::BlocksByRange(resp) => { self.build_response(id, peer_id, Response::BlocksByRange(Some(resp))) } + RPCResponse::TxBlobsByRange(resp) => { + self.propagate_response(id, peer_id, Response::TxBlobsByRange(Some(resp))) + } RPCResponse::BlocksByRoot(resp) => { self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp))) } @@ -1279,6 +1287,7 @@ impl Network { Ok(RPCReceived::EndOfStream(id, termination)) => { let response = match termination { ResponseTermination::BlocksByRange => Response::BlocksByRange(None), + ResponseTermination::TxBlobsByRange => Response::TxBlobsByRange(None), ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None), }; self.build_response(id, peer_id, response) diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index 03b877506fb..e17f108586e 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -165,6 +165,9 @@ impl Router { Request::BlocksByRange(request) => self .processor .on_blocks_by_range_request(peer_id, id, request), + Request::TxBlobsByRange(request) => self + .processor + .on_tx_blobs_by_range_request(peer_id, id, request), Request::BlocksByRoot(request) => self .processor .on_blocks_by_root_request(peer_id, id, request), @@ -188,6 +191,10 @@ impl Router { self.processor .on_blocks_by_range_response(peer_id, request_id, beacon_block); } + Response::TxBlobsByRange(blob_wrapper) => { + self.processor + .on_tx_blobs_by_range_response(peer_id, request_id, blob_wrapper); + } Response::BlocksByRoot(beacon_block) => { self.processor .on_blocks_by_root_response(peer_id, request_id, beacon_block); diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index ce11cbdcef3..6bc64220624 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -16,10 +16,8 @@ use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::SyncCommitteeMessage; use tokio::sync::mpsc; -use types::{ - Attestation, AttesterSlashing, EthSpec, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncSubnetId, -}; +use lighthouse_network::rpc::methods::TxBlobsByRangeRequest; +use types::{Attestation, AttesterSlashing, BlobWrapper, EthSpec, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncSubnetId}; /// Processes validated messages from the network. It relays necessary data to the syncing thread /// and processes blocks from the pubsub network. @@ -204,6 +202,25 @@ impl Processor { }); } + /// Handle a `BlocksByRange` request from the peer. + pub fn on_tx_blobs_by_range_request( + &mut self, + peer_id: PeerId, + request_id: PeerRequestId, + req: TxBlobsByRangeRequest, + ) { + //FIXME(sean) + } + + pub fn on_tx_blobs_by_range_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + blob_wrapper: Option>>, + ) { + //FIXME(sean) + } + /// Handle a `BlocksByRoot` response from the peer. pub fn on_blocks_by_root_response( &mut self, diff --git a/consensus/types/src/blob_wrapper.rs b/consensus/types/src/blob_wrapper.rs index 23f685e1e50..4368be0bd60 100644 --- a/consensus/types/src/blob_wrapper.rs +++ b/consensus/types/src/blob_wrapper.rs @@ -1,14 +1,27 @@ use crate::{Blob, EthSpec, Hash256, SignedBeaconBlock, Slot}; use serde_derive::{Deserialize, Serialize}; -use ssz_derive::Encode; +use ssz_derive::{Encode, Decode}; use ssz_types::VariableList; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use ssz::{Decode, Encode}; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash)] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq, Default)] pub struct BlobWrapper { pub beacon_block_root: Hash256, pub beacon_block_slot: Slot, pub blobs: VariableList, E::MaxObjectListSize>, } + +impl BlobWrapper { + pub fn empty() -> Self { + Self::default() + } + pub fn max_size() -> usize { + // Fixed part + Self::empty().as_ssz_bytes().len() + // Max size of variable length `blobs` field + + (E::max_object_list_size() * as Encode>::ssz_fixed_len()) + } +} \ No newline at end of file diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 3089380b5c9..6bf462b0261 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -227,6 +227,14 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + fn bytes_per_logs_bloom() -> usize { Self::BytesPerLogsBloom::to_usize() } + + fn max_object_list_size() -> usize { + Self::MaxObjectListSize::to_usize() + } + + fn chunks_per_blob() -> usize { + Self::ChunksPerBlob::to_usize() + } } /// Macro to inherit some type values from another EthSpec. diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index e11bab770a7..c75766cb789 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -169,6 +169,7 @@ pub use crate::validator_registration_data::*; pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; use serde_big_array::BigArray; +pub use crate::blob_wrapper::BlobWrapper; pub type CommitteeIndex = u64; pub type Hash256 = H256; From ebc0ccd02aae0a98eff96cc6834cc4afc39c4f3f Mon Sep 17 00:00:00 2001 From: realbigsean Date: Sun, 20 Feb 2022 07:22:46 -0700 Subject: [PATCH 027/263] some more sync boilerplate --- beacon_node/client/src/builder.rs | 2 +- .../network/src/beacon_processor/mod.rs | 33 ++++++++++++++++++- .../beacon_processor/worker/rpc_methods.rs | 10 ++++++ beacon_node/network/src/router/processor.rs | 23 +++++++++++-- beacon_node/network/src/sync/manager.rs | 14 +++++++- .../network/src/sync/range_sync/range.rs | 2 +- 6 files changed, 78 insertions(+), 6 deletions(-) diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 752ba3b7bcb..74059d42882 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -602,7 +602,7 @@ where /// /// If type inference errors are being raised, see the comment on the definition of `Self`. #[allow(clippy::type_complexity)] - pub fn build( + pub fn build( mut self, ) -> Result>, String> { diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index f477878ac0d..63c414e2e5b 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -59,7 +59,8 @@ use std::task::Context; use std::time::Duration; use std::{cmp, collections::HashSet}; use task_executor::TaskExecutor; -use tokio::sync::mpsc; +use tokio::sync::{mpsc, oneshot}; +use lighthouse_network::rpc::methods::TxBlobsByRangeRequest; use types::{ Attestation, AttesterSlashing, Hash256, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, @@ -152,6 +153,8 @@ const MAX_STATUS_QUEUE_LEN: usize = 1_024; /// will be stored before we start dropping them. const MAX_BLOCKS_BY_RANGE_QUEUE_LEN: usize = 1_024; +const MAX_TX_BLOBS_BY_RANGE_QUEUE_LEN: usize = 1_024; + /// The maximum number of queued `BlocksByRootRequest` objects received from the network RPC that /// will be stored before we start dropping them. const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024; @@ -194,6 +197,7 @@ pub const RPC_BLOCK: &str = "rpc_block"; pub const CHAIN_SEGMENT: &str = "chain_segment"; pub const STATUS_PROCESSING: &str = "status_processing"; pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request"; +pub const TX_BLOBS_BY_RANGE_REQUEST: &str = "tx_blobs_by_range_request"; pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; @@ -541,6 +545,21 @@ impl WorkEvent { } } + pub fn tx_blob_by_range_request( + peer_id: PeerId, + request_id: PeerRequestId, + request: TxBlobsByRangeRequest, + ) -> Self { + Self { + drop_during_sync: false, + work: Work::TxBlobsByRangeRequest { + peer_id, + request_id, + request, + }, + } + } + /// Create a new work event to process `BlocksByRootRequest`s from the RPC network. pub fn blocks_by_roots_request( peer_id: PeerId, @@ -728,6 +747,11 @@ pub enum Work { request_id: PeerRequestId, request: BlocksByRangeRequest, }, + TxBlobsByRangeRequest { + peer_id: PeerId, + request_id: PeerRequestId, + request: TxBlobsByRangeRequest, + }, BlocksByRootsRequest { peer_id: PeerId, request_id: PeerRequestId, @@ -754,6 +778,7 @@ impl Work { Work::ChainSegment { .. } => CHAIN_SEGMENT, Work::Status { .. } => STATUS_PROCESSING, Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST, + Work::TxBlobsByRangeRequest { .. } => TX_BLOBS_BY_RANGE_REQUEST, Work::BlocksByRootsRequest { .. } => BLOCKS_BY_ROOTS_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, @@ -897,6 +922,7 @@ impl BeaconProcessor { let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN); let mut bbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_RANGE_QUEUE_LEN); + let mut txbbrange_queue = FifoQueue::new(MAX_TX_BLOBS_BY_RANGE_QUEUE_LEN); let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN); // Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to @@ -1119,6 +1145,8 @@ impl BeaconProcessor { self.spawn_worker(item, toolbox); } else if let Some(item) = bbrange_queue.pop() { self.spawn_worker(item, toolbox); + } else if let Some(item) = txbbrange_queue.pop() { + self.spawn_worker(item, toolbox); } else if let Some(item) = bbroots_queue.pop() { self.spawn_worker(item, toolbox); // Check slashings after all other consensus messages so we prioritize @@ -1234,6 +1262,9 @@ impl BeaconProcessor { Work::BlocksByRangeRequest { .. } => { bbrange_queue.push(work, work_id, &self.log) } + Work::TxBlobsByRangeRequest { .. } => { + txbbrange_queue.push(work, work_id, &self.log) + } Work::BlocksByRootsRequest { .. } => { bbroots_queue.push(work, work_id, &self.log) } diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 37aee01716b..d480004f5dc 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -8,6 +8,7 @@ use lighthouse_network::rpc::StatusMessage; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use slog::{debug, error}; +use lighthouse_network::rpc::methods::TxBlobsByRangeRequest; use slot_clock::SlotClock; use std::sync::Arc; use task_executor::TaskExecutor; @@ -122,6 +123,15 @@ impl Worker { } } + pub fn handle_tx_blobs_by_range_request( + &self, + peer_id: PeerId, + request_id: PeerRequestId, + mut req: TxBlobsByRangeRequest, + ) { + //FIXME(sean) + } + /// Handle a `BlocksByRoot` request from the peer. pub fn handle_blocks_by_root_request( self, diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index 6bc64220624..80b3a20d936 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -209,7 +209,9 @@ impl Processor { request_id: PeerRequestId, req: TxBlobsByRangeRequest, ) { - //FIXME(sean) + self.send_beacon_processor_work(BeaconWorkEvent::tx_blob_by_range_request( + peer_id, request_id, req, + )) } pub fn on_tx_blobs_by_range_response( @@ -218,7 +220,24 @@ impl Processor { request_id: RequestId, blob_wrapper: Option>>, ) { - //FIXME(sean) + trace!( + self.log, + "Received TxBlobsByRange Response"; + "peer" => %peer_id, + ); + + if let RequestId::Sync(id) = request_id { + self.send_to_sync(SyncMessage::TxBlobsByRangeResponse { + peer_id, + request_id: id, + blob_wrapper, + }); + } else { + debug!( + self.log, + "All tx blobs by range responses should belong to sync" + ); + } } /// Handle a `BlocksByRoot` response from the peer. diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index cdef904715c..6110bdf5280 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -53,7 +53,7 @@ use std::ops::Sub; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; -use types::{EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{BlobWrapper, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a @@ -88,6 +88,18 @@ pub enum SyncMessage { /// A block has been received from the RPC. RpcBlock { request_id: RequestId, + beacon_block: Option>>, + }, + + /// A [`TxBlobsByRangeResponse`] response has been received. + TxBlobsByRangeResponse { + peer_id: PeerId, + request_id: RequestId, + blob_wrapper: Option>>, + }, + + /// A [`BlocksByRoot`] response has been received. + BlocksByRootResponse { peer_id: PeerId, beacon_block: Option>>, seen_timestamp: Duration, diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 25314543877..d7a0d86cd8f 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -55,7 +55,7 @@ use lru_cache::LRUTimeCache; use slog::{crit, debug, trace, warn}; use std::collections::HashMap; use std::sync::Arc; -use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{BlobWrapper, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// For how long we store failed finalized chains to prevent retries. const FAILED_CHAINS_EXPIRY_SECONDS: u64 = 30; From 3f1e5cee78108aedad2d07684ab5aa48580329ae Mon Sep 17 00:00:00 2001 From: realbigsean Date: Sun, 20 Feb 2022 14:11:10 -0700 Subject: [PATCH 028/263] Some gossip work --- beacon_node/client/src/builder.rs | 2 +- .../src/rpc/codec/ssz_snappy.rs | 15 +++---- .../lighthouse_network/src/rpc/outbound.rs | 8 ++-- .../lighthouse_network/src/rpc/protocol.rs | 15 ++++--- .../src/service/gossip_cache.rs | 2 + .../lighthouse_network/src/service/mod.rs | 4 +- .../lighthouse_network/src/types/pubsub.rs | 15 ++++++- .../lighthouse_network/src/types/topics.rs | 5 +++ .../network/src/beacon_processor/mod.rs | 43 ++++++++++++++++++- .../beacon_processor/worker/gossip_methods.rs | 20 +++++++-- beacon_node/network/src/router/mod.rs | 8 ++++ beacon_node/network/src/router/processor.rs | 25 +++++++++-- consensus/types/src/blob_wrapper.rs | 8 ++-- consensus/types/src/lib.rs | 2 +- 14 files changed, 138 insertions(+), 34 deletions(-) diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 74059d42882..752ba3b7bcb 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -602,7 +602,7 @@ where /// /// If type inference errors are being raised, see the comment on the definition of `Self`. #[allow(clippy::type_complexity)] - pub fn build( + pub fn build( mut self, ) -> Result>, String> { diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 7b83eb1b6ab..7e5e114430f 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -15,7 +15,10 @@ use std::io::{Read, Write}; use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; -use types::{BlobWrapper, EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockMerge, SignedBeaconBlockShanghai}; +use types::{ + BlobWrapper, EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair, + SignedBeaconBlockBase, SignedBeaconBlockMerge, SignedBeaconBlockShanghai, +}; use unsigned_varint::codec::Uvi; const CONTEXT_BYTES_LEN: usize = 4; @@ -603,13 +606,9 @@ fn handle_v2_response( )?), )))), }, - Protocol::TxBlobsByRange => { - Ok(Some(RPCResponse::TxBlobsByRange(Box::new( - BlobWrapper::from_ssz_bytes( - decoded_buffer, - )? - )))) - }, + Protocol::TxBlobsByRange => Ok(Some(RPCResponse::TxBlobsByRange(Box::new( + BlobWrapper::from_ssz_bytes(decoded_buffer)?, + )))), Protocol::BlocksByRoot => match fork_name { ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes( diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 7664a5752c5..8dc716520a9 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -73,9 +73,11 @@ impl OutboundRequest { ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), ], //FIXME(sean) what should the protocol version be? - OutboundRequest::TxBlobsByRange(_) => vec![ - ProtocolId::new(Protocol::TxBlobsByRange, Version::V2, Encoding::SSZSnappy), - ], + OutboundRequest::TxBlobsByRange(_) => vec![ProtocolId::new( + Protocol::TxBlobsByRange, + Version::V2, + Encoding::SSZSnappy, + )], OutboundRequest::BlocksByRoot(_) => vec![ ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 579a31d95c4..f3e4a7c4a5a 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -20,7 +20,10 @@ use tokio_util::{ codec::Framed, compat::{Compat, FuturesAsyncReadCompatExt}, }; -use types::{BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, BlobWrapper, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock}; +use types::{ + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, BlobWrapper, EthSpec, + ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, +}; lazy_static! { // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -288,12 +291,10 @@ impl ProtocolId { ::ssz_fixed_len(), ::ssz_fixed_len(), ), - Protocol::TxBlobsByRange => { - RpcLimits::new( - ::ssz_fixed_len(), - ::ssz_fixed_len(), - ) - } + Protocol::TxBlobsByRange => RpcLimits::new( + ::ssz_fixed_len(), + ::ssz_fixed_len(), + ), Protocol::BlocksByRoot => { RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX) } diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index 4842605f7aa..f7b6162933c 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -151,6 +151,8 @@ impl GossipCache { pub fn insert(&mut self, topic: GossipTopic, data: Vec) { let expire_timeout = match topic.kind() { GossipKind::BeaconBlock => self.beacon_block, + //FIXME(sean) use its own timeout + GossipKind::Blob => self.beacon_block, GossipKind::BeaconAggregateAndProof => self.aggregates, GossipKind::Attestation(_) => self.attestation, GossipKind::VoluntaryExit => self.voluntary_exit, diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 02b0f60e9b3..73756ba9f65 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -9,6 +9,8 @@ use crate::peer_manager::{ use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; use crate::service::behaviour::BehaviourEvent; pub use crate::service::behaviour::Gossipsub; +use crate::rpc::*; +use crate::service::{Context as ServiceContext, METADATA_FILENAME}; use crate::types::{ subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, @@ -42,7 +44,7 @@ use types::{ BlobWrapper, SignedBeaconBlock, SyncSubnetId }; use crate::rpc::methods::TxBlobsByRangeRequest; -use utils::{build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER}; +use utils::{build_transport, strip_peer_id, MAX_CONNECTIONS_PER_PEER}; use self::behaviour::Behaviour; use self::gossip_cache::GossipCache; diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index b8a3c336168..72ab12891b1 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -9,7 +9,7 @@ use std::boxed::Box; use std::io::{Error, ErrorKind}; use std::sync::Arc; use types::{ - Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, ProposerSlashing, + Attestation, AttesterSlashing, BlobWrapper, EthSpec, ForkContext, ForkName, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockMerge, SignedBeaconBlockShanghai, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, @@ -106,6 +106,7 @@ impl PubsubMessage { pub fn kind(&self) -> GossipKind { match self { PubsubMessage::BeaconBlock(_) => GossipKind::BeaconBlock, + PubsubMessage::Blob(_) => GossipKind::Blob, PubsubMessage::AggregateAndProofAttestation(_) => GossipKind::BeaconAggregateAndProof, PubsubMessage::Attestation(attestation_data) => { GossipKind::Attestation(attestation_data.0) @@ -180,6 +181,12 @@ impl PubsubMessage { }; Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block))) } + GossipKind::Blob => { + //FIXME(sean) verify against fork context + let blob = + BlobWrapper::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?; + Ok(PubsubMessage::Blob(Box::new(blob))) + } GossipKind::VoluntaryExit => { let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?; @@ -224,6 +231,7 @@ impl PubsubMessage { // messages for us. match &self { PubsubMessage::BeaconBlock(data) => data.as_ssz_bytes(), + PubsubMessage::Blob(data) => data.as_ssz_bytes(), PubsubMessage::AggregateAndProofAttestation(data) => data.as_ssz_bytes(), PubsubMessage::VoluntaryExit(data) => data.as_ssz_bytes(), PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(), @@ -244,6 +252,11 @@ impl std::fmt::Display for PubsubMessage { block.slot(), block.message().proposer_index() ), + PubsubMessage::Blob(blob) => write!( + f, + "Tx Blob: slot: {}, beacon_block_root: {}", + blob.beacon_block_slot, blob.beacon_block_root + ), PubsubMessage::AggregateAndProofAttestation(att) => write!( f, "Aggregate and Proof: slot: {}, index: {}, aggregator_index: {}", diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 825b1088b29..0efa05388be 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -11,6 +11,8 @@ use crate::Subnet; pub const TOPIC_PREFIX: &str = "eth2"; pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy"; pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; +//FIXME(sean) check this name +pub const BLOB_TOPIC: &str = "tx_blob"; pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof"; pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_"; pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; @@ -47,6 +49,7 @@ pub struct GossipTopic { pub enum GossipKind { /// Topic for publishing beacon blocks. BeaconBlock, + Blob, /// Topic for publishing aggregate attestations and proofs. BeaconAggregateAndProof, /// Topic for publishing raw attestations on a particular subnet. @@ -178,6 +181,7 @@ impl From for String { let kind = match topic.kind { GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(), + GossipKind::Blob => BLOB_TOPIC.into(), GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(), GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(), GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(), @@ -206,6 +210,7 @@ impl std::fmt::Display for GossipTopic { let kind = match self.kind { GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(), + GossipKind::Blob => BLOB_TOPIC.into(), GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(), GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(), GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(), diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 63c414e2e5b..4e445207521 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -45,6 +45,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock}; use derivative::Derivative; use futures::stream::{Stream, StreamExt}; use futures::task::Poll; +use lighthouse_network::rpc::methods::TxBlobsByRangeRequest; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, StatusMessage}, Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, @@ -60,9 +61,8 @@ use std::time::Duration; use std::{cmp, collections::HashSet}; use task_executor::TaskExecutor; use tokio::sync::{mpsc, oneshot}; -use lighthouse_network::rpc::methods::TxBlobsByRangeRequest; use types::{ - Attestation, AttesterSlashing, Hash256, ProposerSlashing, SignedAggregateAndProof, + Attestation, AttesterSlashing, BlobWrapper, Hash256, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; @@ -113,6 +113,9 @@ const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024; /// before we start dropping them. const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024; +//FIXME(sean) verify +const MAX_GOSSIP_BLOB_QUEUE_LEN: usize = 1_024; + /// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but /// within acceptable clock disparity) that will be queued before we start dropping them. const MAX_DELAYED_BLOCK_QUEUE_LEN: usize = 1_024; @@ -187,6 +190,7 @@ pub const GOSSIP_ATTESTATION_BATCH: &str = "gossip_attestation_batch"; pub const GOSSIP_AGGREGATE: &str = "gossip_aggregate"; pub const GOSSIP_AGGREGATE_BATCH: &str = "gossip_aggregate_batch"; pub const GOSSIP_BLOCK: &str = "gossip_block"; +pub const GOSSIP_BLOB: &str = "gossip_blob"; pub const DELAYED_IMPORT_BLOCK: &str = "delayed_import_block"; pub const GOSSIP_VOLUNTARY_EXIT: &str = "gossip_voluntary_exit"; pub const GOSSIP_PROPOSER_SLASHING: &str = "gossip_proposer_slashing"; @@ -404,6 +408,25 @@ impl WorkEvent { } } + pub fn gossip_tx_blob_block( + message_id: MessageId, + peer_id: PeerId, + peer_client: Client, + blob: Box>, + seen_timestamp: Duration, + ) -> Self { + Self { + drop_during_sync: false, + work: Work::GossipBlob { + message_id, + peer_id, + peer_client, + blob, + seen_timestamp, + }, + } + } + /// Create a new `Work` event for some sync committee signature. pub fn gossip_sync_signature( message_id: MessageId, @@ -694,6 +717,13 @@ pub enum Work { block: Arc>, seen_timestamp: Duration, }, + GossipBlob { + message_id: MessageId, + peer_id: PeerId, + peer_client: Client, + blob: Box>, + seen_timestamp: Duration, + }, DelayedImportBlock { peer_id: PeerId, block: Box>, @@ -768,6 +798,7 @@ impl Work { Work::GossipAggregate { .. } => GOSSIP_AGGREGATE, Work::GossipAggregateBatch { .. } => GOSSIP_AGGREGATE_BATCH, Work::GossipBlock { .. } => GOSSIP_BLOCK, + Work::GossipBlob { .. } => GOSSIP_BLOB, Work::DelayedImportBlock { .. } => DELAYED_IMPORT_BLOCK, Work::GossipVoluntaryExit { .. } => GOSSIP_VOLUNTARY_EXIT, Work::GossipProposerSlashing { .. } => GOSSIP_PROPOSER_SLASHING, @@ -918,6 +949,7 @@ impl BeaconProcessor { let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN); + let mut gossip_blob_queue = FifoQueue::new(MAX_GOSSIP_BLOB_QUEUE_LEN); let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN); let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN); @@ -1026,6 +1058,9 @@ impl BeaconProcessor { // required to verify some attestations. } else if let Some(item) = gossip_block_queue.pop() { self.spawn_worker(item, toolbox); + //FIXME(sean) + } else if let Some(item) = gossip_blob_queue.pop() { + self.spawn_worker(item, toolbox); // Check the aggregates, *then* the unaggregates since we assume that // aggregates are more valuable to local validators and effectively give us // more information with less signature verification time. @@ -1232,6 +1267,9 @@ impl BeaconProcessor { Work::GossipBlock { .. } => { gossip_block_queue.push(work, work_id, &self.log) } + Work::GossipBlob { .. } => { + gossip_blob_queue.push(work, work_id, &self.log) + } Work::DelayedImportBlock { .. } => { delayed_block_queue.push(work, work_id, &self.log) } @@ -1302,6 +1340,7 @@ impl BeaconProcessor { &metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL, gossip_block_queue.len() as i64, ); + //FIXME(sean) blob metrics metrics::set_gauge( &metrics::BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL, rpc_block_queue.len() as i64, diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index eaf5cd005cc..36981ac9fd8 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -18,9 +18,9 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, - Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, + Attestation, AttesterSlashing, BlobWrapper, EthSpec, Hash256, IndexedAttestation, + ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, + SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; use super::{ @@ -692,6 +692,20 @@ impl Worker { } } + #[allow(clippy::too_many_arguments)] + pub fn process_gossip_blob( + self, + message_id: MessageId, + peer_id: PeerId, + peer_client: Client, + blob: BlobWrapper, + reprocess_tx: mpsc::Sender>, + duplicate_cache: DuplicateCache, + seen_duration: Duration, + ) { + //FIXME(sean) + } + /// Process the beacon block received from the gossip network and /// if it passes gossip propagation criteria, tell the network thread to forward it. /// diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index e17f108586e..dc5e7d8ec95 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -236,6 +236,14 @@ impl Router { block, ); } + PubsubMessage::Blob(blob) => { + self.processor.on_tx_blob_gossip( + id, + peer_id, + self.network_globals.client(&peer_id), + blob, + ); + } PubsubMessage::VoluntaryExit(exit) => { debug!(self.log, "Received a voluntary exit"; "peer_id" => %peer_id); self.processor.on_voluntary_exit_gossip(id, peer_id, exit); diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index 80b3a20d936..11ecdab2825 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -5,7 +5,8 @@ use crate::service::{NetworkMessage, RequestId}; use crate::status::status_message; use crate::sync::manager::RequestId as SyncId; use crate::sync::SyncMessage; -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use lighthouse_network::rpc::methods::TxBlobsByRangeRequest; use lighthouse_network::rpc::*; use lighthouse_network::{ Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response, @@ -16,8 +17,10 @@ use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::SyncCommitteeMessage; use tokio::sync::mpsc; -use lighthouse_network::rpc::methods::TxBlobsByRangeRequest; -use types::{Attestation, AttesterSlashing, BlobWrapper, EthSpec, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncSubnetId}; +use types::{ + Attestation, AttesterSlashing, BlobWrapper, EthSpec, ProposerSlashing, SignedAggregateAndProof, + SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncSubnetId, +}; /// Processes validated messages from the network. It relays necessary data to the syncing thread /// and processes blocks from the pubsub network. @@ -291,6 +294,22 @@ impl Processor { )) } + pub fn on_tx_blob_gossip( + &mut self, + message_id: MessageId, + peer_id: PeerId, + peer_client: Client, + blob: Box>, + ) { + self.send_beacon_processor_work(BeaconWorkEvent::gossip_tx_blob_block( + message_id, + peer_id, + peer_client, + blob, + timestamp_now(), + )) + } + pub fn on_unaggregated_attestation_gossip( &mut self, message_id: MessageId, diff --git a/consensus/types/src/blob_wrapper.rs b/consensus/types/src/blob_wrapper.rs index 4368be0bd60..a2d64755665 100644 --- a/consensus/types/src/blob_wrapper.rs +++ b/consensus/types/src/blob_wrapper.rs @@ -1,10 +1,10 @@ use crate::{Blob, EthSpec, Hash256, SignedBeaconBlock, Slot}; use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Encode, Decode}; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -use ssz::{Decode, Encode}; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq, Default)] @@ -14,7 +14,7 @@ pub struct BlobWrapper { pub blobs: VariableList, E::MaxObjectListSize>, } -impl BlobWrapper { +impl BlobWrapper { pub fn empty() -> Self { Self::default() } @@ -24,4 +24,4 @@ impl BlobWrapper { // Max size of variable length `blobs` field + (E::max_object_list_size() * as Encode>::ssz_fixed_len()) } -} \ No newline at end of file +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index c75766cb789..804b82a24ee 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -111,6 +111,7 @@ pub use crate::beacon_block_body::{ pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *}; +pub use crate::blob_wrapper::BlobWrapper; pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; pub use crate::config_and_preset::{ @@ -169,7 +170,6 @@ pub use crate::validator_registration_data::*; pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; use serde_big_array::BigArray; -pub use crate::blob_wrapper::BlobWrapper; pub type CommitteeIndex = u64; pub type Hash256 = H256; From 203418ffc9447b7d757bd5dff54a1e69cb8208ee Mon Sep 17 00:00:00 2001 From: realbigsean Date: Sun, 20 Feb 2022 14:54:35 -0700 Subject: [PATCH 029/263] add `engine_getBlobV1` --- beacon_node/execution_layer/src/engine_api.rs | 10 ++++++++++ .../execution_layer/src/engine_api/http.rs | 17 +++++++++++++++++ consensus/types/src/blob_wrapper.rs | 4 ++-- 3 files changed, 29 insertions(+), 2 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index ba0a37736b0..c82acea6350 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -11,6 +11,10 @@ pub use types::{ }; pub mod auth; +use crate::engines::ForkChoiceState; +pub use types::{Address, EthSpec, ExecutionPayload, Hash256, Uint256}; +use types::{Blob, KZGCommitment}; + pub mod http; pub mod json_structures; @@ -166,3 +170,9 @@ pub struct ProposeBlindedBlockResponse { pub latest_valid_hash: Option, pub validation_error: Option, } + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct BlobDetailsV1 { + kzg: KZGCommitment, + blob: Vec, +} diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 0f848a7716f..093dad5f15d 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -34,6 +34,9 @@ pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1"; pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); +pub const ENGINE_GET_BLOB_V1: &str = "engine_getBlobV1"; +pub const ENGINE_GET_BLOB_TIMEOUT: Duration = Duration::from_secs(2); + pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8); @@ -664,6 +667,20 @@ impl HttpJsonRpc { Ok(response.into()) } + async fn get_blob_v1( + &self, + payload_id: PayloadId, + versioned_hash: Hash256, + ) -> Result { + let params = json!([JsonPayloadIdRequest::from(payload_id), versioned_hash]); + + let response: BlobDetailsV1 = self + .rpc_request(ENGINE_GET_BLOB_V1, params, ENGINE_GET_BLOB_TIMEOUT) + .await?; + + Ok(response.into()) + } + pub async fn forkchoice_updated_v1( &self, forkchoice_state: ForkChoiceState, diff --git a/consensus/types/src/blob_wrapper.rs b/consensus/types/src/blob_wrapper.rs index a2d64755665..7960403a71b 100644 --- a/consensus/types/src/blob_wrapper.rs +++ b/consensus/types/src/blob_wrapper.rs @@ -1,6 +1,6 @@ -use crate::{Blob, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use crate::{Blob, EthSpec, Hash256, Slot}; use serde_derive::{Deserialize, Serialize}; -use ssz::{Decode, Encode}; +use ssz::Encode; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; use tree_hash::TreeHash; From acaa340b418f84bb1b4a295281dfc97a325a135b Mon Sep 17 00:00:00 2001 From: realbigsean Date: Sun, 20 Feb 2022 15:27:22 -0700 Subject: [PATCH 030/263] add new beacon state variant for shanghai --- beacon_node/beacon_chain/src/beacon_chain.rs | 24 +++++++++++ .../beacon_chain/src/blob_verification.rs | 1 + beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/store/src/partial_beacon_state.rs | 41 +++++++++++++++---- .../src/common/slash_validator.rs | 8 ++-- .../src/per_epoch_processing.rs | 4 +- consensus/types/src/beacon_state.rs | 19 +++++---- consensus/types/src/chain_spec.rs | 3 ++ .../ef_tests/src/cases/epoch_processing.rs | 22 +++++----- testing/ef_tests/src/cases/operations.rs | 18 ++++---- 10 files changed, 105 insertions(+), 36 deletions(-) create mode 100644 beacon_node/beacon_chain/src/blob_verification.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 3eecc9a0dc6..5373cee20d4 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3619,6 +3619,30 @@ impl BeaconChain { .ok_or(BlockProductionError::MissingExecutionPayload)?, }, }), + BeaconState::Shanghai(_) => { + let sync_aggregate = get_sync_aggregate()?; + let execution_payload = get_execution_payload(self, &state, proposer_index)?; + //FIXME(sean) get blobs + BeaconBlock::Shanghai(BeaconBlockShanghai { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyShanghai { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations, + deposits, + voluntary_exits: voluntary_exits.into(), + sync_aggregate, + execution_payload, + blob_kzgs: VariableList::empty(), + }, + }) + } }; let block = SignedBeaconBlock::from_block( diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -0,0 +1 @@ + diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index fbcd8f7fb76..84e15ead71c 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -5,6 +5,7 @@ mod beacon_chain; mod beacon_fork_choice_store; pub mod beacon_proposer_cache; mod beacon_snapshot; +mod blob_verification; pub mod block_reward; mod block_times_cache; mod block_verification; diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 010796afd5b..1e52e9856ab 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -14,7 +14,7 @@ use types::*; /// /// Utilises lazy-loading from separate storage for its vector fields. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Shanghai), variant_attributes(derive(Debug, PartialEq, Clone, Encode, Decode)) )] #[derive(Debug, PartialEq, Clone, Encode)] @@ -66,9 +66,9 @@ where pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Shanghai))] pub previous_epoch_participation: VariableList, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Shanghai))] pub current_epoch_participation: VariableList, // Finality @@ -78,17 +78,17 @@ where pub finalized_checkpoint: Checkpoint, // Inactivity - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Shanghai))] pub inactivity_scores: VariableList, // Light-client sync committees - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Shanghai))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Shanghai))] pub next_sync_committee: Arc>, // Execution - #[superstruct(only(Merge))] + #[superstruct(only(Merge, Shanghai))] pub latest_execution_payload_header: ExecutionPayloadHeader, } @@ -178,6 +178,20 @@ impl PartialBeaconState { latest_execution_payload_header ] ), + BeaconState::Shanghai(s) => impl_from_state_forgetful!( + s, + outer, + Shanghai, + PartialBeaconStateShanghai, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header + ] + ), } } @@ -365,6 +379,19 @@ impl TryInto> for PartialBeaconState { latest_execution_payload_header ] ), + PartialBeaconState::Shanghai(inner) => impl_try_into_beacon_state!( + inner, + Shanghai, + BeaconStateShanghai, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header + ] + ), }; Ok(state) } diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index e9d94a10625..23bd35bf625 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -45,9 +45,11 @@ pub fn slash_validator( validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?; let proposer_reward = match state { BeaconState::Base(_) => whistleblower_reward.safe_div(spec.proposer_reward_quotient)?, - BeaconState::Altair(_) | BeaconState::Merge(_) => whistleblower_reward - .safe_mul(PROPOSER_WEIGHT)? - .safe_div(WEIGHT_DENOMINATOR)?, + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Shanghai(_) => { + whistleblower_reward + .safe_mul(PROPOSER_WEIGHT)? + .safe_div(WEIGHT_DENOMINATOR)? + } }; // Ensure the whistleblower index is in the validator registry. diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index cb90c67b56d..8074a32ed03 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -37,7 +37,9 @@ pub fn process_epoch( match state { BeaconState::Base(_) => base::process_epoch(state, spec), - BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_epoch(state, spec), + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Shanghai(_) => { + altair::process_epoch(state, spec) + } } } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index a5d00cdf2dd..eb4be019f75 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -172,7 +172,7 @@ impl From for Hash256 { /// The state of the `BeaconChain` at some slot. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Shanghai), variant_attributes( derive( Derivative, @@ -250,9 +250,9 @@ where pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Shanghai))] pub previous_epoch_participation: VariableList, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Shanghai))] pub current_epoch_participation: VariableList, // Finality @@ -267,17 +267,17 @@ where // Inactivity #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Shanghai))] pub inactivity_scores: VariableList, // Light-client sync committees - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Shanghai))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Shanghai))] pub next_sync_committee: Arc>, // Execution - #[superstruct(only(Merge))] + #[superstruct(only(Merge, Shanghai))] pub latest_execution_payload_header: ExecutionPayloadHeader, // Caching (not in the spec) @@ -389,6 +389,7 @@ impl BeaconState { BeaconState::Base { .. } => ForkName::Base, BeaconState::Altair { .. } => ForkName::Altair, BeaconState::Merge { .. } => ForkName::Merge, + BeaconState::Shanghai { .. } => ForkName::Shanghai, }; if fork_at_slot == object_fork { @@ -1102,6 +1103,7 @@ impl BeaconState { BeaconState::Base(state) => (&mut state.validators, &mut state.balances), BeaconState::Altair(state) => (&mut state.validators, &mut state.balances), BeaconState::Merge(state) => (&mut state.validators, &mut state.balances), + BeaconState::Shanghai(state) => (&mut state.validators, &mut state.balances), } } @@ -1298,12 +1300,14 @@ impl BeaconState { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(&mut state.current_epoch_participation), BeaconState::Merge(state) => Ok(&mut state.current_epoch_participation), + BeaconState::Shanghai(state) => Ok(&mut state.current_epoch_participation), } } else if epoch == self.previous_epoch() { match self { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(&mut state.previous_epoch_participation), BeaconState::Merge(state) => Ok(&mut state.previous_epoch_participation), + BeaconState::Shanghai(state) => Ok(&mut state.previous_epoch_participation), } } else { Err(BeaconStateError::EpochOutOfBounds) @@ -1608,6 +1612,7 @@ impl BeaconState { BeaconState::Base(inner) => BeaconState::Base(inner.clone()), BeaconState::Altair(inner) => BeaconState::Altair(inner.clone()), BeaconState::Merge(inner) => BeaconState::Merge(inner.clone()), + BeaconState::Shanghai(inner) => BeaconState::Shanghai(inner.clone()), }; if config.committee_caches { *res.committee_caches_mut() = self.committee_caches().clone(); diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 8887061e495..4f62022fb21 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -274,6 +274,7 @@ impl ChainSpec { BeaconState::Base(_) => self.inactivity_penalty_quotient, BeaconState::Altair(_) => self.inactivity_penalty_quotient_altair, BeaconState::Merge(_) => self.inactivity_penalty_quotient_bellatrix, + BeaconState::Shanghai(_) => self.inactivity_penalty_quotient_bellatrix, } } @@ -286,6 +287,7 @@ impl ChainSpec { BeaconState::Base(_) => self.proportional_slashing_multiplier, BeaconState::Altair(_) => self.proportional_slashing_multiplier_altair, BeaconState::Merge(_) => self.proportional_slashing_multiplier_bellatrix, + BeaconState::Shanghai(_) => self.proportional_slashing_multiplier_bellatrix, } } @@ -298,6 +300,7 @@ impl ChainSpec { BeaconState::Base(_) => self.min_slashing_penalty_quotient, BeaconState::Altair(_) => self.min_slashing_penalty_quotient_altair, BeaconState::Merge(_) => self.min_slashing_penalty_quotient_bellatrix, + BeaconState::Shanghai(_) => self.min_slashing_penalty_quotient_bellatrix, } } diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 9163b551ae3..04470c73c0d 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -97,7 +97,7 @@ impl EpochTransition for JustificationAndFinalization { justification_and_finalization_state.apply_changes_to_state(state); Ok(()) } - BeaconState::Altair(_) | BeaconState::Merge(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Shanghai(_) => { let justification_and_finalization_state = altair::process_justification_and_finalization( state, @@ -118,7 +118,7 @@ impl EpochTransition for RewardsAndPenalties { validator_statuses.process_attestations(state)?; base::process_rewards_and_penalties(state, &mut validator_statuses, spec) } - BeaconState::Altair(_) | BeaconState::Merge(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Shanghai(_) => { altair::process_rewards_and_penalties( state, &altair::ParticipationCache::new(state, spec).unwrap(), @@ -147,7 +147,7 @@ impl EpochTransition for Slashings { spec, )?; } - BeaconState::Altair(_) | BeaconState::Merge(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Shanghai(_) => { process_slashings( state, altair::ParticipationCache::new(state, spec) @@ -205,7 +205,7 @@ impl EpochTransition for SyncCommitteeUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Shanghai(_) => { altair::process_sync_committee_updates(state, spec) } } @@ -216,11 +216,13 @@ impl EpochTransition for InactivityUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_inactivity_updates( - state, - &altair::ParticipationCache::new(state, spec).unwrap(), - spec, - ), + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Shanghai(_) => { + altair::process_inactivity_updates( + state, + &altair::ParticipationCache::new(state, spec).unwrap(), + spec, + ) + } } } } @@ -229,7 +231,7 @@ impl EpochTransition for ParticipationFlagUpdates { fn run(state: &mut BeaconState, _: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Shanghai(_) => { altair::process_participation_flag_updates(state) } } diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 798dae083be..bfa63bfe69b 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -81,14 +81,16 @@ impl Operation for Attestation { BeaconState::Base(_) => { base::process_attestations(state, &[self.clone()], VerifySignatures::True, spec) } - BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_attestation( - state, - self, - 0, - proposer_index, - VerifySignatures::True, - spec, - ), + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Shanghai(_) => { + altair::process_attestation( + state, + self, + 0, + proposer_index, + VerifySignatures::True, + spec, + ) + } } } } From 809b52715eeaf68fff66f250487f2dfc39b67847 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Tue, 22 Feb 2022 08:29:29 -0500 Subject: [PATCH 031/263] some block building updates --- beacon_node/beacon_chain/src/beacon_chain.rs | 6 +- .../beacon_chain/src/execution_payload.rs | 125 ++++++++++++++++++ beacon_node/execution_layer/src/lib.rs | 65 +++++++++ consensus/types/src/execution_payload.rs | 5 + 4 files changed, 199 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5373cee20d4..1dba7330eed 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -17,8 +17,9 @@ use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; use crate::events::ServerSentEventHandler; -use crate::execution_payload::{get_execution_payload, PreparePayloadHandle}; +use crate::execution_payload::{ PreparePayloadHandle}; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; +use crate::execution_payload::{get_execution_payload, get_execution_payload_and_blobs}; use crate::head_tracker::HeadTracker; use crate::historical_blocks::HistoricalBlockError; use crate::migrate::BackgroundMigrator; @@ -3621,7 +3622,8 @@ impl BeaconChain { }), BeaconState::Shanghai(_) => { let sync_aggregate = get_sync_aggregate()?; - let execution_payload = get_execution_payload(self, &state, proposer_index)?; + let (execution_payload, blobs) = + get_execution_payload_and_blobs(self, &state, proposer_index)?; //FIXME(sean) get blobs BeaconBlock::Shanghai(BeaconBlockShanghai { slot, diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 2221d1fc7cd..b0ea743b199 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -387,6 +387,33 @@ pub fn get_execution_payload< Ok(join_handle) } +/// Wraps the async `prepare_execution_payload` function as a blocking task. +pub fn prepare_execution_payload_and_blobs_blocking( + chain: &BeaconChain, + state: &BeaconState, + proposer_index: u64, +) -> Result< + Option<( + ExecutionPayload, + VariableList< + KZGCommitment, + <::EthSpec as EthSpec>::MaxObjectListSize, + >, + )>, + BlockProductionError, +> { + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BlockProductionError::ExecutionLayerMissing)?; + + execution_layer + .block_on_generic(|_| async { + prepare_execution_payload_and_blobs(chain, state, proposer_index).await + }) + .map_err(BlockProductionError::BlockingFailed)? +} + /// Prepares an execution payload for inclusion in a block. /// /// Will return `Ok(None)` if the merge fork has occurred, but a terminal block has not been found. @@ -485,3 +512,101 @@ where Ok(execution_payload) } + +pub async fn prepare_execution_payload_and_blobs( + chain: &BeaconChain, + state: &BeaconState, + proposer_index: u64, +) -> Result< + Option<( + ExecutionPayload, + VariableList< + KZGCommitment, + <::EthSpec as EthSpec>::MaxObjectListSize, + >, + )>, + BlockProductionError, +> { + let spec = &chain.spec; + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BlockProductionError::ExecutionLayerMissing)?; + + let parent_hash = if !is_merge_transition_complete(state) { + let is_terminal_block_hash_set = spec.terminal_block_hash != Hash256::zero(); + let is_activation_epoch_reached = + state.current_epoch() >= spec.terminal_block_hash_activation_epoch; + + if is_terminal_block_hash_set && !is_activation_epoch_reached { + return Ok(None); + } + + let terminal_pow_block_hash = execution_layer + .get_terminal_pow_block_hash(spec) + .await + .map_err(BlockProductionError::TerminalPoWBlockLookupFailed)?; + + if let Some(terminal_pow_block_hash) = terminal_pow_block_hash { + terminal_pow_block_hash + } else { + return Ok(None); + } + } else { + state.latest_execution_payload_header()?.block_hash + }; + + let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?; + let random = *state.get_randao_mix(state.current_epoch())?; + let finalized_root = state.finalized_checkpoint().root; + + // The finalized block hash is not included in the specification, however we provide this + // parameter so that the execution layer can produce a payload id if one is not already known + // (e.g., due to a recent reorg). + let finalized_block_hash = + if let Some(block) = chain.fork_choice.read().get_block(&finalized_root) { + block.execution_status.block_hash() + } else { + chain + .store + .get_block(&finalized_root) + .map_err(BlockProductionError::FailedToReadFinalizedBlock)? + .ok_or(BlockProductionError::MissingFinalizedBlock(finalized_root))? + .message() + .body() + .execution_payload() + .ok() + .map(|ep| ep.block_hash) + }; + + // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. + let execution_payload = execution_layer + .get_payload( + parent_hash, + timestamp, + random, + finalized_block_hash.unwrap_or_else(Hash256::zero), + proposer_index, + ) + .await + .map_err(BlockProductionError::GetPayloadFailed)?; + + //FIXME(sean) + for tx in execution_payload.blob_txns_iter() { + let versioned_hash = Hash256::zero(); + // get versioned hash + let blob = execution_layer + .get_blob::( + parent_hash, + timestamp, + random, + finalized_root, + proposer_index, + versioned_hash, + ) + .await + .map_err(BlockProductionError::GetPayloadFailed)?; + } + + Ok(Some((execution_payload, VariableList::empty()))) +} diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 68071ee9b1f..fd6886c69e9 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -888,6 +888,71 @@ impl ExecutionLayer { .map_err(Error::EngineError) } + pub async fn get_blob( + &self, + parent_hash: Hash256, + timestamp: u64, + random: Hash256, + finalized_block_hash: Hash256, + proposer_index: u64, + versioned_hash: Hash256, + ) -> Result { + let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; + + debug!( + self.log(), + "Issuing engine_getBlob"; + "suggested_fee_recipient" => ?suggested_fee_recipient, + "random" => ?random, + "timestamp" => timestamp, + "parent_hash" => ?parent_hash, + ); + self.engines() + .first_success(|engine| async move { + let payload_id = if let Some(id) = engine + .get_payload_id(parent_hash, timestamp, random, suggested_fee_recipient) + .await + { + // The payload id has been cached for this engine. + id + } else { + // The payload id has *not* been cached for this engine. Trigger an artificial + // fork choice update to retrieve a payload ID. + // + // TODO(merge): a better algorithm might try to favour a node that already had a + // cached payload id, since a payload that has had more time to produce is + // likely to be more profitable. + let fork_choice_state = ForkChoiceState { + head_block_hash: parent_hash, + safe_block_hash: parent_hash, + finalized_block_hash, + }; + let payload_attributes = PayloadAttributes { + timestamp, + random, + suggested_fee_recipient, + }; + + engine + .notify_forkchoice_updated( + fork_choice_state, + Some(payload_attributes), + self.log(), + ) + .await + .map(|response| response.payload_id)? + .ok_or(ApiError::PayloadIdUnavailable)? + }; + + engine + .api + .get_blob_v1::(payload_id, versioned_hash) + .await + }) + .await + .map_err(Error::EngineErrors) + } + /// Maps to the `engine_newPayload` JSON-RPC call. /// /// ## Fallback Behaviour diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 412e5a8df3a..78a53a3675e 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -3,6 +3,7 @@ use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; +use std::slice::Iter; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -58,4 +59,8 @@ impl ExecutionPayload { // Max size of variable length `transactions` field + (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction())) } + + pub fn blob_txns_iter(&self) -> Iter<'_, Transaction> { + self.transactions.iter() + } } From fe6fc55449f9e942b34393e78f9f39458a1b11b8 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Tue, 5 Apr 2022 16:55:42 -0400 Subject: [PATCH 032/263] fix compilation errors, rename capella -> shanghai, cleanup some rebase issues --- beacon_node/beacon_chain/src/beacon_chain.rs | 6 +- .../beacon_chain/src/execution_payload.rs | 103 +++--------------- beacon_node/execution_layer/src/engine_api.rs | 5 +- .../execution_layer/src/engine_api/http.rs | 4 +- beacon_node/execution_layer/src/lib.rs | 55 +--------- beacon_node/lighthouse_network/src/config.rs | 5 +- .../src/rpc/codec/ssz_snappy.rs | 20 ++-- .../lighthouse_network/src/rpc/methods.rs | 4 +- .../lighthouse_network/src/rpc/protocol.rs | 9 +- .../lighthouse_network/src/service/mod.rs | 2 +- .../lighthouse_network/src/types/pubsub.rs | 10 +- .../network/src/beacon_processor/mod.rs | 10 +- .../beacon_processor/worker/gossip_methods.rs | 4 +- beacon_node/network/src/router/processor.rs | 9 +- beacon_node/network/src/sync/manager.rs | 6 +- .../network/src/sync/range_sync/range.rs | 2 +- beacon_node/store/src/partial_beacon_state.rs | 26 ++--- .../src/common/slash_validator.rs | 2 +- .../process_operations.rs | 2 +- .../src/per_epoch_processing.rs | 2 +- consensus/types/src/beacon_block.rs | 12 +- consensus/types/src/beacon_block_body.rs | 12 +- consensus/types/src/beacon_state.rs | 24 ++-- .../src/{blob_wrapper.rs => blobs_sidecar.rs} | 8 +- consensus/types/src/chain_spec.rs | 63 +++++------ consensus/types/src/consts.rs | 13 +++ consensus/types/src/eth_spec.rs | 22 ++-- consensus/types/src/fork_context.rs | 6 +- consensus/types/src/fork_name.rs | 16 +-- consensus/types/src/lib.rs | 14 ++- consensus/types/src/signed_beacon_block.rs | 10 +- consensus/types/src/signed_blobs_sidecar.rs | 15 +++ testing/ef_tests/src/cases/common.rs | 2 +- .../ef_tests/src/cases/epoch_processing.rs | 14 +-- testing/ef_tests/src/cases/fork.rs | 2 +- testing/ef_tests/src/cases/operations.rs | 2 +- testing/ef_tests/src/cases/transition.rs | 4 +- .../src/signing_method/web3signer.rs | 4 +- 38 files changed, 218 insertions(+), 311 deletions(-) rename consensus/types/src/{blob_wrapper.rs => blobs_sidecar.rs} (72%) create mode 100644 consensus/types/src/signed_blobs_sidecar.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 1dba7330eed..9f536254adf 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3620,17 +3620,17 @@ impl BeaconChain { .ok_or(BlockProductionError::MissingExecutionPayload)?, }, }), - BeaconState::Shanghai(_) => { + BeaconState::Capella(_) => { let sync_aggregate = get_sync_aggregate()?; let (execution_payload, blobs) = get_execution_payload_and_blobs(self, &state, proposer_index)?; //FIXME(sean) get blobs - BeaconBlock::Shanghai(BeaconBlockShanghai { + BeaconBlock::Capella(BeaconBlockCapella { slot, proposer_index, parent_root, state_root: Hash256::zero(), - body: BeaconBlockBodyShanghai { + body: BeaconBlockBodyCapella { randao_reveal, eth1_data, graffiti, diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index b0ea743b199..022cfc36132 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -388,16 +388,19 @@ pub fn get_execution_payload< } /// Wraps the async `prepare_execution_payload` function as a blocking task. -pub fn prepare_execution_payload_and_blobs_blocking( +pub fn prepare_execution_payload_and_blobs_blocking< + T: BeaconChainTypes, + Payload: ExecPayload, +>( chain: &BeaconChain, state: &BeaconState, proposer_index: u64, ) -> Result< Option<( - ExecutionPayload, + Payload, VariableList< KZGCommitment, - <::EthSpec as EthSpec>::MaxObjectListSize, + <::EthSpec as EthSpec>::MaxBlobsPerBlock, >, )>, BlockProductionError, @@ -409,7 +412,7 @@ pub fn prepare_execution_payload_and_blobs_blocking( execution_layer .block_on_generic(|_| async { - prepare_execution_payload_and_blobs(chain, state, proposer_index).await + prepare_execution_payload_and_blobs::(chain, state, proposer_index).await }) .map_err(BlockProductionError::BlockingFailed)? } @@ -513,100 +516,22 @@ where Ok(execution_payload) } -pub async fn prepare_execution_payload_and_blobs( +pub async fn prepare_execution_payload_and_blobs< + T: BeaconChainTypes, + Payload: ExecPayload, +>( chain: &BeaconChain, state: &BeaconState, proposer_index: u64, ) -> Result< Option<( - ExecutionPayload, + Payload, VariableList< KZGCommitment, - <::EthSpec as EthSpec>::MaxObjectListSize, + <::EthSpec as EthSpec>::MaxBlobsPerBlock, >, )>, BlockProductionError, > { - let spec = &chain.spec; - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BlockProductionError::ExecutionLayerMissing)?; - - let parent_hash = if !is_merge_transition_complete(state) { - let is_terminal_block_hash_set = spec.terminal_block_hash != Hash256::zero(); - let is_activation_epoch_reached = - state.current_epoch() >= spec.terminal_block_hash_activation_epoch; - - if is_terminal_block_hash_set && !is_activation_epoch_reached { - return Ok(None); - } - - let terminal_pow_block_hash = execution_layer - .get_terminal_pow_block_hash(spec) - .await - .map_err(BlockProductionError::TerminalPoWBlockLookupFailed)?; - - if let Some(terminal_pow_block_hash) = terminal_pow_block_hash { - terminal_pow_block_hash - } else { - return Ok(None); - } - } else { - state.latest_execution_payload_header()?.block_hash - }; - - let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?; - let random = *state.get_randao_mix(state.current_epoch())?; - let finalized_root = state.finalized_checkpoint().root; - - // The finalized block hash is not included in the specification, however we provide this - // parameter so that the execution layer can produce a payload id if one is not already known - // (e.g., due to a recent reorg). - let finalized_block_hash = - if let Some(block) = chain.fork_choice.read().get_block(&finalized_root) { - block.execution_status.block_hash() - } else { - chain - .store - .get_block(&finalized_root) - .map_err(BlockProductionError::FailedToReadFinalizedBlock)? - .ok_or(BlockProductionError::MissingFinalizedBlock(finalized_root))? - .message() - .body() - .execution_payload() - .ok() - .map(|ep| ep.block_hash) - }; - - // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. - let execution_payload = execution_layer - .get_payload( - parent_hash, - timestamp, - random, - finalized_block_hash.unwrap_or_else(Hash256::zero), - proposer_index, - ) - .await - .map_err(BlockProductionError::GetPayloadFailed)?; - - //FIXME(sean) - for tx in execution_payload.blob_txns_iter() { - let versioned_hash = Hash256::zero(); - // get versioned hash - let blob = execution_layer - .get_blob::( - parent_hash, - timestamp, - random, - finalized_root, - proposer_index, - versioned_hash, - ) - .await - .map_err(BlockProductionError::GetPayloadFailed)?; - } - - Ok(Some((execution_payload, VariableList::empty()))) + todo!() } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index c82acea6350..361953bebc1 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -9,12 +9,9 @@ pub use types::{ Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, FixedVector, Hash256, Uint256, VariableList, }; - -pub mod auth; -use crate::engines::ForkChoiceState; -pub use types::{Address, EthSpec, ExecutionPayload, Hash256, Uint256}; use types::{Blob, KZGCommitment}; +pub mod auth; pub mod http; pub mod json_structures; diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 093dad5f15d..92c207f7999 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -667,10 +667,10 @@ impl HttpJsonRpc { Ok(response.into()) } - async fn get_blob_v1( + pub async fn get_blob_v1( &self, payload_id: PayloadId, - versioned_hash: Hash256, + versioned_hash: ExecutionBlockHash, ) -> Result { let params = json!([JsonPayloadIdRequest::from(payload_id), versioned_hash]); diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index fd6886c69e9..50ea35d76d1 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -897,60 +897,7 @@ impl ExecutionLayer { proposer_index: u64, versioned_hash: Hash256, ) -> Result { - let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; - - debug!( - self.log(), - "Issuing engine_getBlob"; - "suggested_fee_recipient" => ?suggested_fee_recipient, - "random" => ?random, - "timestamp" => timestamp, - "parent_hash" => ?parent_hash, - ); - self.engines() - .first_success(|engine| async move { - let payload_id = if let Some(id) = engine - .get_payload_id(parent_hash, timestamp, random, suggested_fee_recipient) - .await - { - // The payload id has been cached for this engine. - id - } else { - // The payload id has *not* been cached for this engine. Trigger an artificial - // fork choice update to retrieve a payload ID. - // - // TODO(merge): a better algorithm might try to favour a node that already had a - // cached payload id, since a payload that has had more time to produce is - // likely to be more profitable. - let fork_choice_state = ForkChoiceState { - head_block_hash: parent_hash, - safe_block_hash: parent_hash, - finalized_block_hash, - }; - let payload_attributes = PayloadAttributes { - timestamp, - random, - suggested_fee_recipient, - }; - - engine - .notify_forkchoice_updated( - fork_choice_state, - Some(payload_attributes), - self.log(), - ) - .await - .map(|response| response.payload_id)? - .ok_or(ApiError::PayloadIdUnavailable)? - }; - - engine - .api - .get_blob_v1::(payload_id, versioned_hash) - .await - }) - .await - .map_err(Error::EngineErrors) + todo!() } /// Maps to the `engine_newPayload` JSON-RPC call. diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 05139e5580b..cf3381a94f6 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -21,6 +21,9 @@ const GOSSIP_MAX_SIZE: usize = 1_048_576; // 1M /// The maximum transmit size of gossip messages in bytes post-merge. const GOSSIP_MAX_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M +const MAX_REQUEST_BLOBS_SIDECARS: usize = 128; +const MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS: usize = 128; + /// The cache time is set to accommodate the circulation time of an attestation. /// /// The p2p spec declares that we accept attestations within the following range: @@ -297,7 +300,7 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos // according to: https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#the-gossip-domain-gossipsub // the derivation of the message-id remains the same in the merge //TODO(sean): figure this out - ForkName::Altair | ForkName::Merge | ForkName::Shanghai => { + ForkName::Altair | ForkName::Merge | ForkName::Capella => { let topic_len_bytes = topic_bytes.len().to_le_bytes(); let mut vec = Vec::with_capacity( prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 7e5e114430f..58bd9786ef6 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -16,8 +16,8 @@ use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; use types::{ - BlobWrapper, EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockBase, SignedBeaconBlockMerge, SignedBeaconBlockShanghai, + BlobsSidecar, EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair, + SignedBeaconBlockBase, SignedBeaconBlockCapella, SignedBeaconBlockMerge, }; use unsigned_varint::codec::Uvi; @@ -409,8 +409,8 @@ fn context_bytes( return match **ref_box_block { // NOTE: If you are adding another fork type here, be sure to modify the // `fork_context.to_context_bytes()` function to support it as well! - SignedBeaconBlock::Shanghai { .. } => { - fork_context.to_context_bytes(ForkName::Shanghai) + SignedBeaconBlock::Capella { .. } => { + fork_context.to_context_bytes(ForkName::Capella) } SignedBeaconBlock::Merge { .. } => { // Merge context being `None` implies that "merge never happened". @@ -547,7 +547,7 @@ fn handle_v1_response( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), Protocol::TxBlobsByRange => Ok(Some(RPCResponse::TxBlobsByRange(Arc::new( - BlobWrapper::from_ssz_bytes(decoded_buffer)?), + BlobsSidecar::from_ssz_bytes(decoded_buffer)?), ))), Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), @@ -600,14 +600,14 @@ fn handle_v2_response( decoded_buffer, )?), )))), - ForkName::Shanghai => Ok(Some(RPCResponse::BlocksByRange(Box::new( - SignedBeaconBlock::Shanghai(SignedBeaconBlockShanghai::from_ssz_bytes( + ForkName::Capella => Ok(Some(RPCResponse::BlocksByRange(Box::new( + SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( decoded_buffer, )?), )))), }, Protocol::TxBlobsByRange => Ok(Some(RPCResponse::TxBlobsByRange(Box::new( - BlobWrapper::from_ssz_bytes(decoded_buffer)?, + BlobsSidecar::from_ssz_bytes(decoded_buffer)?, )))), Protocol::BlocksByRoot => match fork_name { ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( @@ -623,8 +623,8 @@ fn handle_v2_response( decoded_buffer, )?), )))), - ForkName::Shanghai => Ok(Some(RPCResponse::BlocksByRoot(Box::new( - SignedBeaconBlock::Shanghai(SignedBeaconBlockShanghai::from_ssz_bytes( + ForkName::Capella => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( decoded_buffer, )?), )))), diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 8aa04866c03..db0af513162 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -12,7 +12,7 @@ use std::ops::Deref; use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; -use types::{BlobWrapper, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{BlobsSidecar, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// Maximum number of blocks in a single request. pub type MaxRequestBlocks = U1024; @@ -246,7 +246,7 @@ pub enum RPCResponse { /// batch. BlocksByRange(Arc>), - TxBlobsByRange(Box>), + TxBlobsByRange(Box>), /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Arc>), diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index f3e4a7c4a5a..d023b362245 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -21,7 +21,7 @@ use tokio_util::{ compat::{Compat, FuturesAsyncReadCompatExt}, }; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, BlobWrapper, EthSpec, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, BlobsSidecar, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, }; @@ -71,11 +71,11 @@ lazy_static! { + types::ExecutionPayload::::max_execution_payload_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field - pub static ref BLOB_MIN: usize = BlobWrapper::::empty() + pub static ref BLOB_MIN: usize = BlobsSidecar::::empty() .as_ssz_bytes() .len(); - pub static ref BLOB_MAX: usize = BlobWrapper::::max_size(); + pub static ref BLOB_MAX: usize = BlobsSidecar::::max_size(); pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize = VariableList::::from(Vec::::new()) @@ -120,7 +120,8 @@ const REQUEST_TIMEOUT: u64 = 15; pub fn max_rpc_size(fork_context: &ForkContext) -> usize { match fork_context.current_fork() { ForkName::Merge => MAX_RPC_SIZE_POST_MERGE, - ForkName::Altair | ForkName::Base => MAX_RPC_SIZE, + //FIXME(sean) check this + ForkName::Altair | ForkName::Base | ForkName::Capella => MAX_RPC_SIZE, } } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 73756ba9f65..6289712bb73 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -41,7 +41,7 @@ use std::sync::Arc; use std::task::{Context, Poll}; use types::{ consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId, - BlobWrapper, SignedBeaconBlock, SyncSubnetId + BlobsSidecar, SignedBeaconBlock, SyncSubnetId }; use crate::rpc::methods::TxBlobsByRangeRequest; use utils::{build_transport, strip_peer_id, MAX_CONNECTIONS_PER_PEER}; diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 72ab12891b1..780fa215f10 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -9,9 +9,9 @@ use std::boxed::Box; use std::io::{Error, ErrorKind}; use std::sync::Arc; use types::{ - Attestation, AttesterSlashing, BlobWrapper, EthSpec, ForkContext, ForkName, ProposerSlashing, + Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ForkContext, ForkName, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, - SignedBeaconBlockMerge, SignedBeaconBlockShanghai, SignedContributionAndProof, + SignedBeaconBlockCapella, SignedBeaconBlockMerge, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; @@ -168,8 +168,8 @@ impl PubsubMessage { SignedBeaconBlockMerge::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ), - Some(ForkName::Shanghai) => SignedBeaconBlock::::Shanghai( - SignedBeaconBlockShanghai::from_ssz_bytes(data) + Some(ForkName::Capella) => SignedBeaconBlock::::Capella( + SignedBeaconBlockCapella::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ), None => { @@ -184,7 +184,7 @@ impl PubsubMessage { GossipKind::Blob => { //FIXME(sean) verify against fork context let blob = - BlobWrapper::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?; + BlobsSidecar::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?; Ok(PubsubMessage::Blob(Box::new(blob))) } GossipKind::VoluntaryExit => { diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 4e445207521..496142f9a10 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -62,9 +62,9 @@ use std::{cmp, collections::HashSet}; use task_executor::TaskExecutor; use tokio::sync::{mpsc, oneshot}; use types::{ - Attestation, AttesterSlashing, BlobWrapper, Hash256, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, - SyncCommitteeMessage, SyncSubnetId, + Attestation, AttesterSlashing, BlobsSidecar, Hash256, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, + SubnetId, SyncCommitteeMessage, SyncSubnetId, }; use work_reprocessing_queue::{ spawn_reprocess_scheduler, QueuedAggregate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, @@ -412,7 +412,7 @@ impl WorkEvent { message_id: MessageId, peer_id: PeerId, peer_client: Client, - blob: Box>, + blob: Box>, seen_timestamp: Duration, ) -> Self { Self { @@ -721,7 +721,7 @@ pub enum Work { message_id: MessageId, peer_id: PeerId, peer_client: Client, - blob: Box>, + blob: Box>, seen_timestamp: Duration, }, DelayedImportBlock { diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 36981ac9fd8..e78da09cac4 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -18,7 +18,7 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, BlobWrapper, EthSpec, Hash256, IndexedAttestation, + Attestation, AttesterSlashing, BlobsSidecar, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; @@ -698,7 +698,7 @@ impl Worker { message_id: MessageId, peer_id: PeerId, peer_client: Client, - blob: BlobWrapper, + blob: BlobsSidecar, reprocess_tx: mpsc::Sender>, duplicate_cache: DuplicateCache, seen_duration: Duration, diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index 11ecdab2825..3e9e004d0ea 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -18,8 +18,9 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::SyncCommitteeMessage; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, BlobWrapper, EthSpec, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncSubnetId, + Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, + SubnetId, SyncSubnetId, }; /// Processes validated messages from the network. It relays necessary data to the syncing thread @@ -221,7 +222,7 @@ impl Processor { &mut self, peer_id: PeerId, request_id: RequestId, - blob_wrapper: Option>>, + blob_wrapper: Option>>, ) { trace!( self.log, @@ -299,7 +300,7 @@ impl Processor { message_id: MessageId, peer_id: PeerId, peer_client: Client, - blob: Box>, + blob: Box>, ) { self.send_beacon_processor_work(BeaconWorkEvent::gossip_tx_blob_block( message_id, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 6110bdf5280..1451ffb075e 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -53,7 +53,7 @@ use std::ops::Sub; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; -use types::{BlobWrapper, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{BlobsSidecar, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a @@ -88,14 +88,16 @@ pub enum SyncMessage { /// A block has been received from the RPC. RpcBlock { request_id: RequestId, + peer_id: PeerId, beacon_block: Option>>, + seen_timestamp: Duration, }, /// A [`TxBlobsByRangeResponse`] response has been received. TxBlobsByRangeResponse { peer_id: PeerId, request_id: RequestId, - blob_wrapper: Option>>, + blob_wrapper: Option>>, }, /// A [`BlocksByRoot`] response has been received. diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index d7a0d86cd8f..39696f2b643 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -55,7 +55,7 @@ use lru_cache::LRUTimeCache; use slog::{crit, debug, trace, warn}; use std::collections::HashMap; use std::sync::Arc; -use types::{BlobWrapper, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{BlobsSidecar, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// For how long we store failed finalized chains to prevent retries. const FAILED_CHAINS_EXPIRY_SECONDS: u64 = 30; diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 1e52e9856ab..66b517b77e5 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -14,7 +14,7 @@ use types::*; /// /// Utilises lazy-loading from separate storage for its vector fields. #[superstruct( - variants(Base, Altair, Merge, Shanghai), + variants(Base, Altair, Merge, Capella), variant_attributes(derive(Debug, PartialEq, Clone, Encode, Decode)) )] #[derive(Debug, PartialEq, Clone, Encode)] @@ -66,9 +66,9 @@ where pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Merge, Shanghai))] + #[superstruct(only(Altair, Merge, Capella))] pub previous_epoch_participation: VariableList, - #[superstruct(only(Altair, Merge, Shanghai))] + #[superstruct(only(Altair, Merge, Capella))] pub current_epoch_participation: VariableList, // Finality @@ -78,17 +78,17 @@ where pub finalized_checkpoint: Checkpoint, // Inactivity - #[superstruct(only(Altair, Merge, Shanghai))] + #[superstruct(only(Altair, Merge, Capella))] pub inactivity_scores: VariableList, // Light-client sync committees - #[superstruct(only(Altair, Merge, Shanghai))] + #[superstruct(only(Altair, Merge, Capella))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Merge, Shanghai))] + #[superstruct(only(Altair, Merge, Capella))] pub next_sync_committee: Arc>, // Execution - #[superstruct(only(Merge, Shanghai))] + #[superstruct(only(Merge, Capella))] pub latest_execution_payload_header: ExecutionPayloadHeader, } @@ -178,11 +178,11 @@ impl PartialBeaconState { latest_execution_payload_header ] ), - BeaconState::Shanghai(s) => impl_from_state_forgetful!( + BeaconState::Capella(s) => impl_from_state_forgetful!( s, outer, - Shanghai, - PartialBeaconStateShanghai, + Capella, + PartialBeaconStateCapella, [ previous_epoch_participation, current_epoch_participation, @@ -379,10 +379,10 @@ impl TryInto> for PartialBeaconState { latest_execution_payload_header ] ), - PartialBeaconState::Shanghai(inner) => impl_try_into_beacon_state!( + PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!( inner, - Shanghai, - BeaconStateShanghai, + Capella, + BeaconStateCapella, [ previous_epoch_participation, current_epoch_participation, diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index 23bd35bf625..6351fdcc357 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -45,7 +45,7 @@ pub fn slash_validator( validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?; let proposer_reward = match state { BeaconState::Base(_) => whistleblower_reward.safe_div(spec.proposer_reward_quotient)?, - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Shanghai(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { whistleblower_reward .safe_mul(PROPOSER_WEIGHT)? .safe_div(WEIGHT_DENOMINATOR)? diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 10b0e5f9b0c..71b4ee53551 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -232,7 +232,7 @@ pub fn process_attestations<'a, T: EthSpec, Payload: ExecPayload>( } BeaconBlockBodyRef::Altair(_) | BeaconBlockBodyRef::Merge(_) - | BeaconBlockBodyRef::Shanghai(_) => { + | BeaconBlockBodyRef::Capella(_) => { altair::process_attestations( state, block_body.attestations(), diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index 8074a32ed03..fc93ab79b96 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -37,7 +37,7 @@ pub fn process_epoch( match state { BeaconState::Base(_) => base::process_epoch(state, spec), - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Shanghai(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { altair::process_epoch(state, spec) } } diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index dee3292a8b9..199b7601d6a 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -1,6 +1,6 @@ use crate::beacon_block_body::{ - BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyMerge, BeaconBlockBodyRef, - BeaconBlockBodyRefMut, BeaconBlockBodyShanghai, + BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyCapella, BeaconBlockBodyMerge, + BeaconBlockBodyRef, BeaconBlockBodyRefMut, }; use crate::test_utils::TestRandom; use crate::*; @@ -17,7 +17,7 @@ use tree_hash_derive::TreeHash; /// A block of the `BeaconChain`. #[superstruct( - variants(Base, Altair, Merge, Shanghai), + variants(Base, Altair, Merge, Capella), variant_attributes( derive( Debug, @@ -64,8 +64,8 @@ pub struct BeaconBlock = FullPayload> { pub body: BeaconBlockBodyAltair, #[superstruct(only(Merge), partial_getter(rename = "body_merge"))] pub body: BeaconBlockBodyMerge, - #[superstruct(only(Shanghai), partial_getter(rename = "body_shanghai"))] - pub body: BeaconBlockBodyShanghai, + #[superstruct(only(Capella), partial_getter(rename = "body_capella"))] + pub body: BeaconBlockBodyCapella, } pub type BlindedBeaconBlock = BeaconBlock>; @@ -191,7 +191,7 @@ impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRef<'a, T, Payload> { BeaconBlockRef::Base { .. } => ForkName::Base, BeaconBlockRef::Altair { .. } => ForkName::Altair, BeaconBlockRef::Merge { .. } => ForkName::Merge, - BeaconBlockRef::Shanghai { .. } => ForkName::Shanghai, + BeaconBlockRef::Capella { .. } => ForkName::Capella, }; if fork_at_slot == object_fork { diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 438d9535c96..d48515440d2 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -13,7 +13,7 @@ use tree_hash_derive::TreeHash; /// /// This *superstruct* abstracts over the hard-fork. #[superstruct( - variants(Base, Altair, Merge, Shanghai), + variants(Base, Altair, Merge, Capella), variant_attributes( derive( Debug, @@ -47,16 +47,16 @@ pub struct BeaconBlockBody = FullPayload> pub attestations: VariableList, T::MaxAttestations>, pub deposits: VariableList, pub voluntary_exits: VariableList, - #[superstruct(only(Altair, Merge, Shanghai))] + #[superstruct(only(Altair, Merge, Capella))] pub sync_aggregate: SyncAggregate, // We flatten the execution payload so that serde can use the name of the inner type, // either `execution_payload` for full payloads, or `execution_payload_header` for blinded // payloads. - #[superstruct(only(Merge, Shanghai))] + #[superstruct(only(Merge, Capella))] #[serde(flatten)] pub execution_payload: Payload, - #[superstruct(only(Shanghai))] - pub blob_kzgs: VariableList, + #[superstruct(only(Capella))] + pub blob_kzgs: VariableList, #[superstruct(only(Base, Altair))] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] @@ -71,7 +71,7 @@ impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { BeaconBlockBodyRef::Base { .. } => ForkName::Base, BeaconBlockBodyRef::Altair { .. } => ForkName::Altair, BeaconBlockBodyRef::Merge { .. } => ForkName::Merge, - BeaconBlockBodyRef::Shanghai { .. } => ForkName::Shanghai, + BeaconBlockBodyRef::Capella { .. } => ForkName::Capella, } } } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index eb4be019f75..8a5cdda9b66 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -172,7 +172,7 @@ impl From for Hash256 { /// The state of the `BeaconChain` at some slot. #[superstruct( - variants(Base, Altair, Merge, Shanghai), + variants(Base, Altair, Merge, Capella), variant_attributes( derive( Derivative, @@ -250,9 +250,9 @@ where pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Merge, Shanghai))] + #[superstruct(only(Altair, Merge, Capella))] pub previous_epoch_participation: VariableList, - #[superstruct(only(Altair, Merge, Shanghai))] + #[superstruct(only(Altair, Merge, Capella))] pub current_epoch_participation: VariableList, // Finality @@ -267,17 +267,17 @@ where // Inactivity #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] - #[superstruct(only(Altair, Merge, Shanghai))] + #[superstruct(only(Altair, Merge, Capella))] pub inactivity_scores: VariableList, // Light-client sync committees - #[superstruct(only(Altair, Merge, Shanghai))] + #[superstruct(only(Altair, Merge, Capella))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Merge, Shanghai))] + #[superstruct(only(Altair, Merge, Capella))] pub next_sync_committee: Arc>, // Execution - #[superstruct(only(Merge, Shanghai))] + #[superstruct(only(Merge, Capella))] pub latest_execution_payload_header: ExecutionPayloadHeader, // Caching (not in the spec) @@ -389,7 +389,7 @@ impl BeaconState { BeaconState::Base { .. } => ForkName::Base, BeaconState::Altair { .. } => ForkName::Altair, BeaconState::Merge { .. } => ForkName::Merge, - BeaconState::Shanghai { .. } => ForkName::Shanghai, + BeaconState::Capella { .. } => ForkName::Capella, }; if fork_at_slot == object_fork { @@ -1103,7 +1103,7 @@ impl BeaconState { BeaconState::Base(state) => (&mut state.validators, &mut state.balances), BeaconState::Altair(state) => (&mut state.validators, &mut state.balances), BeaconState::Merge(state) => (&mut state.validators, &mut state.balances), - BeaconState::Shanghai(state) => (&mut state.validators, &mut state.balances), + BeaconState::Capella(state) => (&mut state.validators, &mut state.balances), } } @@ -1300,14 +1300,14 @@ impl BeaconState { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(&mut state.current_epoch_participation), BeaconState::Merge(state) => Ok(&mut state.current_epoch_participation), - BeaconState::Shanghai(state) => Ok(&mut state.current_epoch_participation), + BeaconState::Capella(state) => Ok(&mut state.current_epoch_participation), } } else if epoch == self.previous_epoch() { match self { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(&mut state.previous_epoch_participation), BeaconState::Merge(state) => Ok(&mut state.previous_epoch_participation), - BeaconState::Shanghai(state) => Ok(&mut state.previous_epoch_participation), + BeaconState::Capella(state) => Ok(&mut state.previous_epoch_participation), } } else { Err(BeaconStateError::EpochOutOfBounds) @@ -1612,7 +1612,7 @@ impl BeaconState { BeaconState::Base(inner) => BeaconState::Base(inner.clone()), BeaconState::Altair(inner) => BeaconState::Altair(inner.clone()), BeaconState::Merge(inner) => BeaconState::Merge(inner.clone()), - BeaconState::Shanghai(inner) => BeaconState::Shanghai(inner.clone()), + BeaconState::Capella(inner) => BeaconState::Capella(inner.clone()), }; if config.committee_caches { *res.committee_caches_mut() = self.committee_caches().clone(); diff --git a/consensus/types/src/blob_wrapper.rs b/consensus/types/src/blobs_sidecar.rs similarity index 72% rename from consensus/types/src/blob_wrapper.rs rename to consensus/types/src/blobs_sidecar.rs index 7960403a71b..e8644af157c 100644 --- a/consensus/types/src/blob_wrapper.rs +++ b/consensus/types/src/blobs_sidecar.rs @@ -8,13 +8,13 @@ use tree_hash_derive::TreeHash; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq, Default)] -pub struct BlobWrapper { +pub struct BlobsSidecar { pub beacon_block_root: Hash256, pub beacon_block_slot: Slot, - pub blobs: VariableList, E::MaxObjectListSize>, + pub blobs: VariableList, E::MaxBlobsPerBlock>, } -impl BlobWrapper { +impl BlobsSidecar { pub fn empty() -> Self { Self::default() } @@ -22,6 +22,6 @@ impl BlobWrapper { // Fixed part Self::empty().as_ssz_bytes().len() // Max size of variable length `blobs` field - + (E::max_object_list_size() * as Encode>::ssz_fixed_len()) + + (E::max_object_list_size() * as Encode>::ssz_fixed_len()) } } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 4f62022fb21..9f25208b9e2 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -22,6 +22,8 @@ pub enum Domain { ContributionAndProof, SyncCommitteeSelectionProof, ApplicationMask(ApplicationDomain), + //FIXME(sean) add this domain + //BlobsSideCar, } /// Lighthouse's internal configuration struct. @@ -151,10 +153,10 @@ pub struct ChainSpec { pub safe_slots_to_import_optimistically: u64, /* - * Shanghai hard fork params + * Capella hard fork params */ - pub shanghai_fork_version: [u8; 4], - pub shanghai_fork_epoch: Option, + pub capella_fork_version: [u8; 4], + pub capella_fork_epoch: Option, /* * Networking @@ -236,8 +238,8 @@ impl ChainSpec { /// Returns the name of the fork which is active at `epoch`. pub fn fork_name_at_epoch(&self, epoch: Epoch) -> ForkName { - match self.shanghai_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Shanghai, + match self.capella_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Capella, _ => match self.bellatrix_fork_epoch { Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge, _ => match self.altair_fork_epoch { @@ -254,7 +256,7 @@ impl ChainSpec { ForkName::Base => self.genesis_fork_version, ForkName::Altair => self.altair_fork_version, ForkName::Merge => self.bellatrix_fork_version, - ForkName::Shanghai => self.shanghai_fork_version, + ForkName::Capella => self.capella_fork_version, } } @@ -264,7 +266,7 @@ impl ChainSpec { ForkName::Base => Some(Epoch::new(0)), ForkName::Altair => self.altair_fork_epoch, ForkName::Merge => self.bellatrix_fork_epoch, - ForkName::Shanghai => self.shanghai_fork_epoch, + ForkName::Capella => self.capella_fork_epoch, } } @@ -274,7 +276,7 @@ impl ChainSpec { BeaconState::Base(_) => self.inactivity_penalty_quotient, BeaconState::Altair(_) => self.inactivity_penalty_quotient_altair, BeaconState::Merge(_) => self.inactivity_penalty_quotient_bellatrix, - BeaconState::Shanghai(_) => self.inactivity_penalty_quotient_bellatrix, + BeaconState::Capella(_) => self.inactivity_penalty_quotient_bellatrix, } } @@ -287,7 +289,7 @@ impl ChainSpec { BeaconState::Base(_) => self.proportional_slashing_multiplier, BeaconState::Altair(_) => self.proportional_slashing_multiplier_altair, BeaconState::Merge(_) => self.proportional_slashing_multiplier_bellatrix, - BeaconState::Shanghai(_) => self.proportional_slashing_multiplier_bellatrix, + BeaconState::Capella(_) => self.proportional_slashing_multiplier_bellatrix, } } @@ -300,7 +302,7 @@ impl ChainSpec { BeaconState::Base(_) => self.min_slashing_penalty_quotient, BeaconState::Altair(_) => self.min_slashing_penalty_quotient_altair, BeaconState::Merge(_) => self.min_slashing_penalty_quotient_bellatrix, - BeaconState::Shanghai(_) => self.min_slashing_penalty_quotient_bellatrix, + BeaconState::Capella(_) => self.min_slashing_penalty_quotient_bellatrix, } } @@ -583,12 +585,11 @@ impl ChainSpec { safe_slots_to_import_optimistically: 128u64, /* - * Shanghai hardfork params + * Capella hardfork params */ //FIXME(sean) - shanghai_fork_version: [0x03, 0x00, 0x00, 0x00], - shanghai_fork_epoch: None, - + capella_fork_version: [0x03, 0x00, 0x00, 0x00], + capella_fork_epoch: None, /* * Network specific */ @@ -644,10 +645,10 @@ impl ChainSpec { // `Uint256::MAX` which is `2*256- 1`. .checked_add(Uint256::one()) .expect("addition does not overflow"), - // Shanghai + // Capella //FIXME(sean) - shanghai_fork_version: [0x03, 0x00, 0x00, 0x01], - shanghai_fork_epoch: None, + capella_fork_version: [0x03, 0x00, 0x00, 0x01], + capella_fork_epoch: None, // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, @@ -804,8 +805,8 @@ impl ChainSpec { safe_slots_to_import_optimistically: 128u64, //FIXME(sean) - shanghai_fork_version: [0x03, 0x00, 0x00, 0x64], - shanghai_fork_epoch: None, + capella_fork_version: [0x03, 0x00, 0x00, 0x64], + capella_fork_epoch: None, /* * Network specific @@ -883,14 +884,14 @@ pub struct Config { pub bellatrix_fork_epoch: Option>, // FIXME(sean): remove this default - #[serde(default = "default_shanghai_fork_version")] + #[serde(default = "default_capella_fork_version")] #[serde(with = "eth2_serde_utils::bytes_4_hex")] - shanghai_fork_version: [u8; 4], + capella_fork_version: [u8; 4], // FIXME(sean): remove this default - #[serde(default = "default_shanghai_fork_epoch")] + #[serde(default = "default_capella_fork_epoch")] #[serde(serialize_with = "serialize_fork_epoch")] #[serde(deserialize_with = "deserialize_fork_epoch")] - pub shanghai_fork_epoch: Option>, + pub capella_fork_epoch: Option>, #[serde(with = "eth2_serde_utils::quoted_u64")] seconds_per_slot: u64, @@ -929,7 +930,7 @@ fn default_bellatrix_fork_version() -> [u8; 4] { [0xff, 0xff, 0xff, 0xff] } -fn default_shanghai_fork_version() -> [u8; 4] { +fn default_capella_fork_version() -> [u8; 4] { // This value shouldn't be used. [0xff, 0xff, 0xff, 0xff] } @@ -1030,9 +1031,9 @@ impl Config { bellatrix_fork_epoch: spec .bellatrix_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), - shanghai_fork_version: spec.shanghai_fork_version, - shanghai_fork_epoch: spec - .shanghai_fork_epoch + capella_fork_version: spec.capella_fork_version, + capella_fork_epoch: spec + .capella_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), seconds_per_slot: spec.seconds_per_slot, @@ -1079,8 +1080,8 @@ impl Config { altair_fork_epoch, bellatrix_fork_epoch, bellatrix_fork_version, - shanghai_fork_epoch, - shanghai_fork_version, + capella_fork_epoch, + capella_fork_version, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, @@ -1111,8 +1112,8 @@ impl Config { altair_fork_epoch: altair_fork_epoch.map(|q| q.value), bellatrix_fork_epoch: bellatrix_fork_epoch.map(|q| q.value), bellatrix_fork_version, - shanghai_fork_epoch: shanghai_fork_epoch.map(|q| q.value), - shanghai_fork_version, + capella_fork_epoch: capella_fork_epoch.map(|q| q.value), + capella_fork_version, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, diff --git a/consensus/types/src/consts.rs b/consensus/types/src/consts.rs index a9377bc3e00..8e12b05fb5d 100644 --- a/consensus/types/src/consts.rs +++ b/consensus/types/src/consts.rs @@ -22,3 +22,16 @@ pub mod altair { pub mod merge { pub const INTERVALS_PER_SLOT: u64 = 3; } +pub mod cappella { + use crate::Uint256; + + use lazy_static::lazy_static; + + lazy_static! { + pub static ref BLS_MODULUS: Uint256 = Uint256::from_dec_str( + "52435875175126190479447740508185965837690552500527637822603658699938581184513" + ) + .expect("should initialize BLS_MODULUS"); + } + pub const BLOB_TX_TYPE: u8 = 5; +} diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 6bf462b0261..a5544666904 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -96,10 +96,10 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + type MinGasLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxExtraDataBytes: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* - * New in Shanghaisharding + * New in Capella */ - type MaxObjectListSize: Unsigned + Clone + Sync + Send + Debug + PartialEq; - type ChunksPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxBlobsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * Derived values (set these CAREFULLY) */ @@ -229,11 +229,11 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + } fn max_object_list_size() -> usize { - Self::MaxObjectListSize::to_usize() + Self::MaxBlobsPerBlock::to_usize() } fn chunks_per_blob() -> usize { - Self::ChunksPerBlob::to_usize() + Self::FieldElementsPerBlob::to_usize() } } @@ -275,8 +275,8 @@ impl EthSpec for MainnetEthSpec { type GasLimitDenominator = U1024; type MinGasLimit = U5000; type MaxExtraDataBytes = U32; - type MaxObjectListSize = U16777216; // 2**24 - type ChunksPerBlob = U4096; + type MaxBlobsPerBlock = U16777216; // 2**24 + type FieldElementsPerBlob = U4096; type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch @@ -325,8 +325,8 @@ impl EthSpec for MinimalEthSpec { GasLimitDenominator, MinGasLimit, MaxExtraDataBytes, - MaxObjectListSize, - ChunksPerBlob + MaxBlobsPerBlock, + FieldElementsPerBlob }); fn default_spec() -> ChainSpec { @@ -371,8 +371,8 @@ impl EthSpec for GnosisEthSpec { type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U2048; // 128 max attestations * 16 slots per epoch type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch - type MaxObjectListSize = U16777216; // 2**24 - type ChunksPerBlob = U4096; + type MaxBlobsPerBlock = U16777216; // 2**24 + type FieldElementsPerBlob = U4096; fn default_spec() -> ChainSpec { ChainSpec::gnosis() diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 742136ca9e6..90d1fbc6864 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -47,10 +47,10 @@ impl ForkContext { )); } - if spec.shanghai_fork_epoch.is_some() { + if spec.capella_fork_epoch.is_some() { fork_to_digest.push(( - ForkName::Shanghai, - ChainSpec::compute_fork_digest(spec.shanghai_fork_version, genesis_validators_root), + ForkName::Capella, + ChainSpec::compute_fork_digest(spec.capella_fork_version, genesis_validators_root), )); } diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index 88108ffa35a..a87e2b521b4 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -11,7 +11,7 @@ pub enum ForkName { Base, Altair, Merge, - Shanghai, + Capella, } impl ForkName { @@ -39,9 +39,9 @@ impl ForkName { spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec } - ForkName::Shanghai => { + ForkName::Capella => { spec.bellatrix_fork_epoch = Some(Epoch::new(0)); - spec.shanghai_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); spec } } @@ -55,7 +55,7 @@ impl ForkName { ForkName::Base => None, ForkName::Altair => Some(ForkName::Base), ForkName::Merge => Some(ForkName::Altair), - ForkName::Shanghai => Some(ForkName::Merge), + ForkName::Capella => Some(ForkName::Merge), } } @@ -66,8 +66,8 @@ impl ForkName { match self { ForkName::Base => Some(ForkName::Altair), ForkName::Altair => Some(ForkName::Merge), - ForkName::Merge => Some(ForkName::Shanghai), - ForkName::Shanghai => None, + ForkName::Merge => Some(ForkName::Capella), + ForkName::Capella => None, } } } @@ -110,7 +110,7 @@ macro_rules! map_fork_name_with { ($t::Merge(value), extra_data) } //TODO: don't have a beacon state variant for the new fork yet - ForkName::Shanghai => { + ForkName::Capella => { let (value, extra_data) = $body; ($t::Merge(value), extra_data) } @@ -137,7 +137,7 @@ impl Display for ForkName { ForkName::Base => "phase0".fmt(f), ForkName::Altair => "altair".fmt(f), ForkName::Merge => "bellatrix".fmt(f), - ForkName::Shanghai => "shanghai".fmt(f), + ForkName::Capella => "capella".fmt(f), } } } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 804b82a24ee..b1437650be6 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -86,11 +86,13 @@ pub mod sync_subnet_id; mod tree_hash_impls; pub mod validator_registration_data; -mod blob_wrapper; +mod blobs_sidecar; mod kzg_commitment; +mod signed_blobs_sidecar; pub mod slot_data; #[cfg(feature = "sqlite")] pub mod sqlite; + pub use kzg_commitment::KZGCommitment; use ethereum_types::{H160, H256}; @@ -101,17 +103,17 @@ pub use crate::attestation_data::AttestationData; pub use crate::attestation_duty::AttestationDuty; pub use crate::attester_slashing::AttesterSlashing; pub use crate::beacon_block::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockShanghai, BeaconBlockMerge, BeaconBlockRef, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockMerge, BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, }; pub use crate::beacon_block_body::{ - BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyMerge, - BeaconBlockBodyRef, BeaconBlockBodyRefMut, BeaconBlockBodyShanghai, + BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyCapella, + BeaconBlockBodyMerge, BeaconBlockBodyRef, BeaconBlockBodyRefMut, }; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *}; -pub use crate::blob_wrapper::BlobWrapper; +pub use crate::blobs_sidecar::BlobsSidecar; pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; pub use crate::config_and_preset::{ @@ -148,7 +150,7 @@ pub use crate::shuffling_id::AttestationShufflingId; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; pub use crate::signed_beacon_block::{ SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockHash, - SignedBeaconBlockMerge, SignedBlindedBeaconBlock,SignedBeaconBlockShanghai + SignedBeaconBlockMerge, SignedBlindedBeaconBlock,SignedBeaconBlockCapella }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 49822da821a..de8b65f50d2 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -38,7 +38,7 @@ impl From for Hash256 { /// A `BeaconBlock` and a signature from its proposer. #[superstruct( - variants(Base, Altair, Merge, Shanghai), + variants(Base, Altair, Merge, Capella), variant_attributes( derive( Debug, @@ -72,8 +72,8 @@ pub struct SignedBeaconBlock = FullPayload, #[superstruct(only(Merge), partial_getter(rename = "message_merge"))] pub message: BeaconBlockMerge, - #[superstruct(only(Shanghai), partial_getter(rename = "message_shanghai"))] - pub message: BeaconBlockShanghai, + #[superstruct(only(Capella), partial_getter(rename = "message_capella"))] + pub message: BeaconBlockCapella, pub signature: Signature, } @@ -131,8 +131,8 @@ impl> SignedBeaconBlock { BeaconBlock::Merge(message) => { SignedBeaconBlock::Merge(SignedBeaconBlockMerge { message, signature }) } - BeaconBlock::Shanghai(message) => { - SignedBeaconBlock::Shanghai(SignedBeaconBlockShanghai { message, signature }) + BeaconBlock::Capella(message) => { + SignedBeaconBlock::Capella(SignedBeaconBlockCapella { message, signature }) } } } diff --git a/consensus/types/src/signed_blobs_sidecar.rs b/consensus/types/src/signed_blobs_sidecar.rs new file mode 100644 index 00000000000..f5f60e2bdcc --- /dev/null +++ b/consensus/types/src/signed_blobs_sidecar.rs @@ -0,0 +1,15 @@ +use crate::{Blob, BlobsSidecar, EthSpec, Hash256, Slot}; +use bls::Signature; +use serde_derive::{Deserialize, Serialize}; +use ssz::Encode; +use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; +use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; + +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq)] +pub struct SignedBlobsSidecar { + pub message: BlobsSidecar, + pub signature: Signature, +} diff --git a/testing/ef_tests/src/cases/common.rs b/testing/ef_tests/src/cases/common.rs index b695015d797..c172d880aa2 100644 --- a/testing/ef_tests/src/cases/common.rs +++ b/testing/ef_tests/src/cases/common.rs @@ -78,6 +78,6 @@ pub fn previous_fork(fork_name: ForkName) -> ForkName { ForkName::Base => ForkName::Base, ForkName::Altair => ForkName::Base, ForkName::Merge => ForkName::Altair, // TODO: Check this when tests are released.. - ForkName::Shanghai => ForkName::Merge, // TODO: Check this when tests are released.. + ForkName::Capella => ForkName::Merge, // TODO: Check this when tests are released.. } } diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 04470c73c0d..2652c792e70 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -97,7 +97,7 @@ impl EpochTransition for JustificationAndFinalization { justification_and_finalization_state.apply_changes_to_state(state); Ok(()) } - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Shanghai(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { let justification_and_finalization_state = altair::process_justification_and_finalization( state, @@ -118,7 +118,7 @@ impl EpochTransition for RewardsAndPenalties { validator_statuses.process_attestations(state)?; base::process_rewards_and_penalties(state, &mut validator_statuses, spec) } - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Shanghai(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { altair::process_rewards_and_penalties( state, &altair::ParticipationCache::new(state, spec).unwrap(), @@ -147,7 +147,7 @@ impl EpochTransition for Slashings { spec, )?; } - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Shanghai(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { process_slashings( state, altair::ParticipationCache::new(state, spec) @@ -205,7 +205,7 @@ impl EpochTransition for SyncCommitteeUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Shanghai(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { altair::process_sync_committee_updates(state, spec) } } @@ -216,7 +216,7 @@ impl EpochTransition for InactivityUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Shanghai(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { altair::process_inactivity_updates( state, &altair::ParticipationCache::new(state, spec).unwrap(), @@ -231,7 +231,7 @@ impl EpochTransition for ParticipationFlagUpdates { fn run(state: &mut BeaconState, _: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Shanghai(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { altair::process_participation_flag_updates(state) } } @@ -280,7 +280,7 @@ impl> Case for EpochProcessing { } // No phase0 tests for Altair and later. ForkName::Altair | ForkName::Merge => T::name() != "participation_record_updates", - ForkName::Shanghai => false, // TODO: revisit when tests are out + ForkName::Capella => false, // TODO: revisit when tests are out } } diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs index 57c4f125442..78d20735731 100644 --- a/testing/ef_tests/src/cases/fork.rs +++ b/testing/ef_tests/src/cases/fork.rs @@ -61,7 +61,7 @@ impl Case for ForkTest { ForkName::Base => panic!("phase0 not supported"), ForkName::Altair => upgrade_to_altair(&mut result_state, spec).map(|_| result_state), ForkName::Merge => upgrade_to_bellatrix(&mut result_state, spec).map(|_| result_state), - ForkName::Shanghai => panic!("shanghai not supported"), + ForkName::Capella => panic!("capella not supported"), }; compare_beacon_state_results_without_caches(&mut result, &mut expected) diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index bfa63bfe69b..b8d46dd3d60 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -81,7 +81,7 @@ impl Operation for Attestation { BeaconState::Base(_) => { base::process_attestations(state, &[self.clone()], VerifySignatures::True, spec) } - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Shanghai(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { altair::process_attestation( state, self, diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index bbc98994a77..02f2f8ff061 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -42,9 +42,9 @@ impl LoadCase for TransitionTest { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(metadata.fork_epoch); } - ForkName::Shanghai => { + ForkName::Capella => { spec.bellatrix_fork_epoch = Some(Epoch::new(0)); - spec.shanghai_fork_epoch = Some(metadata.fork_epoch); + spec.capella_fork_epoch = Some(metadata.fork_epoch); } } diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 967d6b139e0..4e39dc7357f 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -90,8 +90,8 @@ impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { block: None, block_header: Some(block.block_header()), }), - BeaconBlock::Shanghai(_) => Ok(Web3SignerObject::BeaconBlock { - version: ForkName::Shanghai, + BeaconBlock::Capella(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Capella, block: None, block_header: Some(block.block_header()), }), From 7520651515dd65da45464dfb3d6fbcb3fea11db7 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Tue, 5 Apr 2022 17:02:54 -0400 Subject: [PATCH 033/263] cargo fix and some test fixes --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- beacon_node/beacon_chain/src/execution_payload.rs | 6 +++--- beacon_node/execution_layer/src/engine_api.rs | 2 +- beacon_node/execution_layer/src/lib.rs | 12 ++++++------ .../lighthouse_network/src/rpc/codec/base.rs | 3 +++ .../lighthouse_network/src/rpc/codec/ssz_snappy.rs | 3 +++ beacon_node/lighthouse_network/tests/common/mod.rs | 3 +++ beacon_node/network/src/beacon_processor/mod.rs | 2 +- .../src/beacon_processor/worker/gossip_methods.rs | 14 +++++++------- .../src/beacon_processor/worker/rpc_methods.rs | 6 +++--- beacon_node/network/src/sync/manager.rs | 2 +- consensus/types/src/signed_blobs_sidecar.rs | 3 +-- 12 files changed, 33 insertions(+), 25 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9f536254adf..ad2cd9ca633 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3622,7 +3622,7 @@ impl BeaconChain { }), BeaconState::Capella(_) => { let sync_aggregate = get_sync_aggregate()?; - let (execution_payload, blobs) = + let (execution_payload, _blobs) = get_execution_payload_and_blobs(self, &state, proposer_index)?; //FIXME(sean) get blobs BeaconBlock::Capella(BeaconBlockCapella { diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 022cfc36132..7b84d4f8f77 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -520,9 +520,9 @@ pub async fn prepare_execution_payload_and_blobs< T: BeaconChainTypes, Payload: ExecPayload, >( - chain: &BeaconChain, - state: &BeaconState, - proposer_index: u64, + _chain: &BeaconChain, + _state: &BeaconState, + _proposer_index: u64, ) -> Result< Option<( Payload, diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 361953bebc1..9c94a666e69 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -9,7 +9,7 @@ pub use types::{ Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, FixedVector, Hash256, Uint256, VariableList, }; -use types::{Blob, KZGCommitment}; +use types::{KZGCommitment}; pub mod auth; pub mod http; diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 50ea35d76d1..b28d08fa69b 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -890,12 +890,12 @@ impl ExecutionLayer { pub async fn get_blob( &self, - parent_hash: Hash256, - timestamp: u64, - random: Hash256, - finalized_block_hash: Hash256, - proposer_index: u64, - versioned_hash: Hash256, + _parent_hash: Hash256, + _timestamp: u64, + _random: Hash256, + _finalized_block_hash: Hash256, + _proposer_index: u64, + _versioned_hash: Hash256, ) -> Result { todo!() } diff --git a/beacon_node/lighthouse_network/src/rpc/codec/base.rs b/beacon_node/lighthouse_network/src/rpc/codec/base.rs index 53f85d9a7b6..6c6ce2da32f 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/base.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/base.rs @@ -193,14 +193,17 @@ mod tests { let mut chain_spec = Spec::default_spec(); let altair_fork_epoch = Epoch::new(1); let merge_fork_epoch = Epoch::new(2); + let capella_fork_epoch = Epoch::new(3); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); + chain_spec.capella_fork_epoch = Some(capella_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 58bd9786ef6..0132733280e 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -677,14 +677,17 @@ mod tests { let mut chain_spec = Spec::default_spec(); let altair_fork_epoch = Epoch::new(1); let merge_fork_epoch = Epoch::new(2); + let capella_fork_epoch = Epoch::new(3); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); + chain_spec.capella_fork_epoch = Some(capella_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } diff --git a/beacon_node/lighthouse_network/tests/common/mod.rs b/beacon_node/lighthouse_network/tests/common/mod.rs index a3c32d0fb1b..c367f5f0216 100644 --- a/beacon_node/lighthouse_network/tests/common/mod.rs +++ b/beacon_node/lighthouse_network/tests/common/mod.rs @@ -32,14 +32,17 @@ pub fn fork_context(fork_name: ForkName) -> ForkContext { let mut chain_spec = E::default_spec(); let altair_fork_epoch = Epoch::new(1); let merge_fork_epoch = Epoch::new(2); + let capella_fork_epoch = Epoch::new(3); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); + chain_spec.capella_fork_epoch = Some(capella_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(E::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(E::slots_per_epoch()), + ForkName::Capella => capella_fork_epoch.start_slot(E::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 496142f9a10..e4202d447f1 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -60,7 +60,7 @@ use std::task::Context; use std::time::Duration; use std::{cmp, collections::HashSet}; use task_executor::TaskExecutor; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::{mpsc}; use types::{ Attestation, AttesterSlashing, BlobsSidecar, Hash256, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index e78da09cac4..b257862a1e9 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -695,13 +695,13 @@ impl Worker { #[allow(clippy::too_many_arguments)] pub fn process_gossip_blob( self, - message_id: MessageId, - peer_id: PeerId, - peer_client: Client, - blob: BlobsSidecar, - reprocess_tx: mpsc::Sender>, - duplicate_cache: DuplicateCache, - seen_duration: Duration, + _message_id: MessageId, + _peer_id: PeerId, + _peer_client: Client, + _blob: BlobsSidecar, + _reprocess_tx: mpsc::Sender>, + _duplicate_cache: DuplicateCache, + _seen_duration: Duration, ) { //FIXME(sean) } diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index d480004f5dc..d7a21e49b47 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -125,9 +125,9 @@ impl Worker { pub fn handle_tx_blobs_by_range_request( &self, - peer_id: PeerId, - request_id: PeerRequestId, - mut req: TxBlobsByRangeRequest, + _peer_id: PeerId, + _request_id: PeerRequestId, + _req: TxBlobsByRangeRequest, ) { //FIXME(sean) } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 1451ffb075e..15b5d242154 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -53,7 +53,7 @@ use std::ops::Sub; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; -use types::{BlobsSidecar, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{BlobsSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a diff --git a/consensus/types/src/signed_blobs_sidecar.rs b/consensus/types/src/signed_blobs_sidecar.rs index f5f60e2bdcc..3e1ee6df803 100644 --- a/consensus/types/src/signed_blobs_sidecar.rs +++ b/consensus/types/src/signed_blobs_sidecar.rs @@ -1,9 +1,8 @@ -use crate::{Blob, BlobsSidecar, EthSpec, Hash256, Slot}; +use crate::{BlobsSidecar, EthSpec}; use bls::Signature; use serde_derive::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use ssz_types::VariableList; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; From e81dbbfea4a985f92da6079f751298c8be9cc45a Mon Sep 17 00:00:00 2001 From: realbigsean Date: Mon, 3 Oct 2022 21:48:02 -0400 Subject: [PATCH 034/263] compile --- beacon_node/beacon_chain/src/beacon_chain.rs | 15 +- .../beacon_chain/src/block_verification.rs | 34 ++- beacon_node/beacon_chain/src/builder.rs | 2 - .../beacon_chain/src/execution_payload.rs | 58 +--- beacon_node/execution_layer/src/engine_api.rs | 6 +- .../execution_layer/src/engine_api/http.rs | 12 +- .../src/engine_api/json_structures.rs | 41 +-- beacon_node/execution_layer/src/lib.rs | 30 +- beacon_node/http_api/src/block_rewards.rs | 2 +- beacon_node/lighthouse_network/src/config.rs | 3 - .../src/peer_manager/mod.rs | 2 - .../src/rpc/codec/ssz_snappy.rs | 14 +- .../lighthouse_network/src/rpc/methods.rs | 35 +-- beacon_node/lighthouse_network/src/rpc/mod.rs | 9 +- .../lighthouse_network/src/rpc/outbound.rs | 19 +- .../lighthouse_network/src/rpc/protocol.rs | 27 +- .../src/rpc/rate_limiter.rs | 4 +- .../src/service/api_types.rs | 12 +- .../lighthouse_network/src/service/mod.rs | 41 ++- .../lighthouse_network/src/types/pubsub.rs | 7 +- .../network/src/beacon_processor/mod.rs | 35 +-- .../beacon_processor/worker/gossip_methods.rs | 26 +- .../beacon_processor/worker/rpc_methods.rs | 266 +++++++++--------- beacon_node/network/src/router/processor.rs | 35 +-- beacon_node/network/src/sync/manager.rs | 16 +- beacon_node/store/src/hot_cold_store.rs | 35 ++- beacon_node/store/src/lib.rs | 2 +- common/eth2_config/src/lib.rs | 6 +- .../sepolia/genesis.ssz.zip | Bin 136489 -> 273912 bytes consensus/fork_choice/src/fork_choice.rs | 4 +- consensus/types/src/beacon_block.rs | 4 +- consensus/types/src/beacon_block_body.rs | 12 +- consensus/types/src/blob.rs | 23 +- consensus/types/src/blobs_sidecar.rs | 4 +- consensus/types/src/bls_field_element.rs | 3 +- consensus/types/src/chain_spec.rs | 3 +- consensus/types/src/eth_spec.rs | 2 +- consensus/types/src/execution_payload.rs | 5 +- consensus/types/src/fork_context.rs | 5 +- consensus/types/src/fork_name.rs | 9 +- consensus/types/src/kzg_commitment.rs | 2 +- consensus/types/src/kzg_proof.rs | 14 +- consensus/types/src/lib.rs | 20 +- consensus/types/src/payload.rs | 2 +- consensus/types/src/signed_beacon_block.rs | 38 +-- .../ef_tests/src/cases/epoch_processing.rs | 2 +- 46 files changed, 374 insertions(+), 572 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 4280065552e..8e9cd4bc72b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -17,9 +17,9 @@ use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; use crate::events::ServerSentEventHandler; -use crate::execution_payload::{ PreparePayloadHandle}; +use crate::execution_payload::get_execution_payload; +use crate::execution_payload::PreparePayloadHandle; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; -use crate::execution_payload::{get_execution_payload, get_execution_payload_and_blobs}; use crate::head_tracker::HeadTracker; use crate::historical_blocks::HistoricalBlockError; use crate::migrate::BackgroundMigrator; @@ -377,8 +377,6 @@ pub struct BeaconChain { /// Sender given to tasks, so that if they encounter a state in which execution cannot /// continue they can request that everything shuts down. pub shutdown_sender: Sender, - pub block_waiting_for_sidecar: Mutex>>, - pub sidecar_waiting_for_block: Mutex>>>, /// Logging to CLI, etc. pub(crate) log: Logger, /// Arbitrary bytes included in the blocks. @@ -2440,7 +2438,6 @@ impl BeaconChain { self: &Arc, block_root: Hash256, unverified_block: B, - sidecar: Option>>, count_unrealized: CountUnrealized, ) -> Result> { // Start the Prometheus timer. @@ -2458,7 +2455,7 @@ impl BeaconChain { let execution_pending = unverified_block.into_execution_pending_block(block_root, &chain)?; chain - .import_execution_pending_block(execution_pending, sidecar, count_unrealized) + .import_execution_pending_block(execution_pending, count_unrealized) .await }; @@ -2516,7 +2513,6 @@ impl BeaconChain { async fn import_execution_pending_block( self: Arc, execution_pending_block: ExecutionPendingBlock, - sidecar: Option>>, count_unrealized: CountUnrealized, ) -> Result> { let ExecutionPendingBlock { @@ -2572,7 +2568,6 @@ impl BeaconChain { move || { chain.import_block( block, - sidecar, block_root, state, confirmed_state_roots, @@ -2595,7 +2590,6 @@ impl BeaconChain { fn import_block( &self, signed_block: Arc>, - sidecar: Option>>, block_root: Hash256, mut state: BeaconState, confirmed_state_roots: Vec, @@ -2934,9 +2928,6 @@ impl BeaconChain { .collect(); ops.push(StoreOp::PutBlock(block_root, signed_block.clone())); ops.push(StoreOp::PutState(block.state_root(), &state)); - if let Some(sidecar) = sidecar { - ops.push(StoreOp::PutBlobs(block_root, sidecar)); - } let txn_lock = self.store.hot_db.begin_rw_transaction(); if let Err(e) = self.store.do_atomically(ops) { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index ce41522f991..5fc295900bb 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -134,7 +134,10 @@ pub enum BlockError { /// its parent. ParentUnknown(Arc>), /// The block skips too many slots and is a DoS risk. - TooManySkippedSlots { parent_slot: Slot, block_slot: Slot }, + TooManySkippedSlots { + parent_slot: Slot, + block_slot: Slot, + }, /// The block slot is greater than the present slot. /// /// ## Peer scoring @@ -150,7 +153,10 @@ pub enum BlockError { /// ## Peer scoring /// /// The peer has incompatible state transition logic and is faulty. - StateRootMismatch { block: Hash256, local: Hash256 }, + StateRootMismatch { + block: Hash256, + local: Hash256, + }, /// The block was a genesis block, these blocks cannot be re-imported. GenesisBlock, /// The slot is finalized, no need to import. @@ -169,7 +175,9 @@ pub enum BlockError { /// /// It's unclear if this block is valid, but it conflicts with finality and shouldn't be /// imported. - NotFinalizedDescendant { block_parent_root: Hash256 }, + NotFinalizedDescendant { + block_parent_root: Hash256, + }, /// Block is already known, no need to re-import. /// /// ## Peer scoring @@ -182,7 +190,10 @@ pub enum BlockError { /// /// The `proposer` has already proposed a block at this slot. The existing block may or may not /// be equal to the given block. - RepeatProposal { proposer: u64, slot: Slot }, + RepeatProposal { + proposer: u64, + slot: Slot, + }, /// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER. /// /// ## Peer scoring @@ -197,7 +208,10 @@ pub enum BlockError { /// ## Peer scoring /// /// The block is invalid and the peer is faulty. - IncorrectBlockProposer { block: u64, local_shuffling: u64 }, + IncorrectBlockProposer { + block: u64, + local_shuffling: u64, + }, /// The proposal signature in invalid. /// /// ## Peer scoring @@ -221,7 +235,10 @@ pub enum BlockError { /// ## Peer scoring /// /// The block is invalid and the peer is faulty. - BlockIsNotLaterThanParent { block_slot: Slot, parent_slot: Slot }, + BlockIsNotLaterThanParent { + block_slot: Slot, + parent_slot: Slot, + }, /// At least one block in the chain segment did not have it's parent root set to the root of /// the prior block. /// @@ -277,8 +294,9 @@ pub enum BlockError { /// /// The peer sent us an invalid block, but I'm not really sure how to score this in an /// "optimistic" sync world. - ParentExecutionPayloadInvalid { parent_root: Hash256 }, - + ParentExecutionPayloadInvalid { + parent_root: Hash256, + }, } /// Returned when block validation failed due to some issue verifying diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 223a9a6ee6f..916ebd23594 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -801,8 +801,6 @@ where validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache), attester_cache: <_>::default(), early_attester_cache: <_>::default(), - block_waiting_for_sidecar: <_>::default(), - sidecar_waiting_for_block: <_>::default(), shutdown_sender: self .shutdown_sender .ok_or("Cannot build without a shutdown sender.")?, diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index bffaf8ee78f..a6138ff10e5 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -17,6 +17,7 @@ use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::debug; use slot_clock::SlotClock; +use ssz_types::VariableList; use state_processing::per_block_processing::{ compute_timestamp_at_slot, is_execution_enabled, is_merge_transition_complete, partially_verify_execution_payload, @@ -24,7 +25,10 @@ use state_processing::per_block_processing::{ use std::sync::Arc; use tokio::task::JoinHandle; use tree_hash::TreeHash; -use types::{*, execution_payload::BlobsBundle}; +use types::{ + BeaconBlockRef, BeaconState, BeaconStateError, EthSpec, ExecPayload, ExecutionBlockHash, + Hash256, KzgCommitment, SignedBeaconBlock, Slot, +}; pub type PreparePayloadResult = Result; pub type PreparePayloadHandle = JoinHandle>>; @@ -387,36 +391,6 @@ pub fn get_execution_payload< Ok(join_handle) } -/// Wraps the async `prepare_execution_payload` function as a blocking task. -pub fn prepare_execution_payload_and_blobs_blocking< - T: BeaconChainTypes, - Payload: ExecPayload, ->( - chain: &BeaconChain, - state: &BeaconState, - proposer_index: u64, -) -> Result< - Option<( - Payload, - VariableList< - KzgCommitment, - <::EthSpec as EthSpec>::MaxBlobsPerBlock, - >, - )>, - BlockProductionError, -> { - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BlockProductionError::ExecutionLayerMissing)?; - - execution_layer - .block_on_generic(|_| async { - prepare_execution_payload_and_blobs::(chain, state, proposer_index).await - }) - .map_err(BlockProductionError::BlockingFailed)? -} - /// Prepares an execution payload for inclusion in a block. /// /// Will return `Ok(None)` if the merge fork has occurred, but a terminal block has not been found. @@ -513,7 +487,7 @@ where .await .map_err(BlockProductionError::GetPayloadFailed)?; - /* + /* TODO: fetch blob bundles from el engine for block building let suggested_fee_recipient = execution_layer.get_suggested_fee_recipient(proposer_index).await; let blobs = execution_layer.get_blob_bundles(parent_hash, timestamp, random, suggested_fee_recipient) @@ -523,23 +497,3 @@ where Ok(execution_payload) } - -pub async fn prepare_execution_payload_and_blobs< - T: BeaconChainTypes, - Payload: ExecPayload, ->( - _chain: &BeaconChain, - _state: &BeaconState, - _proposer_index: u64, -) -> Result< - Option<( - Payload, - VariableList< - KzgCommitment, - <::EthSpec as EthSpec>::MaxBlobsPerBlock, - >, - )>, - BlockProductionError, -> { - todo!() -} diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index c9e14b2714a..8dd7992751f 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -6,11 +6,9 @@ use reqwest::StatusCode; use serde::{Deserialize, Serialize}; use strum::IntoStaticStr; pub use types::{ - Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, FixedVector, - Hash256, Uint256, VariableList, kzg_proof::KzgProof, kzg_commitment::KzgCommitment, blob::Blob, + blob::Blob, Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, + FixedVector, Hash256, KzgCommitment, KzgProof, Uint256, VariableList, }; -use types::{KzgCommitment}; - pub mod auth; pub mod http; diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index bc4d790d8e2..1d536f1171f 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -10,7 +10,7 @@ use serde::de::DeserializeOwned; use serde_json::json; use std::time::Duration; -use types::{EthSpec, FullPayload, execution_payload::BlobsBundle}; +use types::{EthSpec, FullPayload}; pub use deposit_log::{DepositLog, Log}; pub use reqwest::Client; @@ -671,14 +671,18 @@ impl HttpJsonRpc { pub async fn get_blobs_bundle_v1( &self, payload_id: PayloadId, - ) -> Result, Error> { + ) -> Result, Error> { let params = json!([JsonPayloadIdRequest::from(payload_id)]); let response: JsonBlobBundlesV1 = self - .rpc_request(ENGINE_GET_BLOBS_BUNDLE_V1, params, ENGINE_GET_BLOBS_BUNDLE_TIMEOUT) + .rpc_request( + ENGINE_GET_BLOBS_BUNDLE_V1, + params, + ENGINE_GET_BLOBS_BUNDLE_TIMEOUT, + ) .await?; - Ok(response.into()) + Ok(response) } pub async fn forkchoice_updated_v1( diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 4907acee3e8..eeea53724ab 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,6 +1,6 @@ use super::*; use serde::{Deserialize, Serialize}; -use types::{EthSpec, ExecutionBlockHash, FixedVector, Transaction, Unsigned, VariableList, execution_payload::BlobsBundle}; +use types::{EthSpec, ExecutionBlockHash, FixedVector, Transaction, Unsigned, VariableList}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -278,45 +278,6 @@ pub struct JsonBlobBundlesV1 { pub aggregated_proof: KzgProof, } -impl From> for JsonBlobBundlesV1 { - fn from(p: BlobsBundle) -> Self { - // Use this verbose deconstruction pattern to ensure no field is left unused. - let BlobsBundle { - block_hash, - aggregated_proof, - blobs, - kzgs, - } = p; - - Self { - block_hash, - aggregated_proof, - blobs, - kzgs, - } - } -} - -impl From> for BlobsBundle { - fn from(j: JsonBlobBundlesV1) -> Self { - // Use this verbose deconstruction pattern to ensure no field is left unused. - let JsonBlobBundlesV1 { - block_hash, - aggregated_proof, - blobs, - kzgs, - } = j; - - Self { - block_hash, - aggregated_proof, - blobs, - kzgs, - } - } -} - - #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonForkChoiceStateV1 { diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 6722e47a992..99f86b86ec0 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -4,6 +4,7 @@ //! This crate only provides useful functionality for "The Merge", it does not provide any of the //! deposit-contract functionality that the `beacon_node/eth1` crate already provides. +use crate::json_structures::JsonBlobBundlesV1; use crate::payload_cache::PayloadCache; use auth::{strip_prefix, Auth, JwtKey}; use builder_client::BuilderHttpClient; @@ -787,10 +788,10 @@ impl ExecutionLayer { timestamp: u64, prev_randao: Hash256, suggested_fee_recipient: Address, - ) -> Result, Error> { + ) -> Result, Error> { debug!( self.log(), - "Issuing engine_getPayload"; + "Issuing engine_getBlobsBundle"; "suggested_fee_recipient" => ?suggested_fee_recipient, "prev_randao" => ?prev_randao, "timestamp" => timestamp, @@ -808,22 +809,15 @@ impl ExecutionLayer { &[metrics::HIT], ); id - } else { + } else { error!( self.log(), "Exec engine unable to produce blobs, did you call get_payload before?", ); - return Err(ApiError::PayloadIdUnavailable); + return Err(ApiError::PayloadIdUnavailable); }; - engine - .api - .get_blobs_bundle_v1::(payload_id) - .await - .map(|bundle| { - // TODO verify the blob bundle here? - bundle.into() - }) + engine.api.get_blobs_bundle_v1::(payload_id).await }) .await .map_err(Box::new) @@ -937,18 +931,6 @@ impl ExecutionLayer { .map_err(Error::EngineError) } - pub async fn get_blob( - &self, - _parent_hash: Hash256, - _timestamp: u64, - _random: Hash256, - _finalized_block_hash: Hash256, - _proposer_index: u64, - _versioned_hash: Hash256, - ) -> Result { - todo!() - } - /// Maps to the `engine_newPayload` JSON-RPC call. /// /// ## Fallback Behaviour diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index 05886a4d023..828be8e5760 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -4,7 +4,7 @@ use lru::LruCache; use slog::{debug, warn, Logger}; use state_processing::BlockReplayer; use std::sync::Arc; -use types::BlindedBeaconBlock; +use types::beacon_block::BlindedBeaconBlock; use warp_utils::reject::{ beacon_chain_error, beacon_state_error, custom_bad_request, custom_server_error, }; diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index b84215a3c17..a6488be23a9 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -21,9 +21,6 @@ const GOSSIP_MAX_SIZE: usize = 1_048_576; // 1M /// The maximum transmit size of gossip messages in bytes post-merge. const GOSSIP_MAX_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M -const MAX_REQUEST_BLOBS_SIDECARS: usize = 128; -const MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS: usize = 128; - /// The cache time is set to accommodate the circulation time of an attestation. /// /// The p2p spec declares that we accept attestations within the following range: diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 2fb49319840..1029204ae6f 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -516,7 +516,6 @@ impl PeerManager { match protocol { Protocol::Ping => PeerAction::Fatal, Protocol::BlocksByRange => return, - Protocol::TxBlobsByRange => return, Protocol::BlocksByRoot => return, Protocol::BlobsByRange => return, Protocol::Goodbye => return, @@ -533,7 +532,6 @@ impl PeerManager { ConnectionDirection::Outgoing => match protocol { Protocol::Ping => PeerAction::LowToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, - Protocol::TxBlobsByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, Protocol::BlobsByRange => PeerAction::MidToleranceError, Protocol::Goodbye => return, diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 3c27e0f40e3..f88721a81d6 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -16,8 +16,8 @@ use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; use types::{ - EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockBase, SignedBeaconBlockMerge, SignedBeaconBlockEip4844 + BlobsSidecar, EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair, + SignedBeaconBlockBase, SignedBeaconBlockEip4844, SignedBeaconBlockMerge, }; use unsigned_varint::codec::Uvi; @@ -550,9 +550,7 @@ fn handle_v1_response( Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - Protocol::BlobsByRange => Err(RPCError::InvalidData( - "blobs by range via v1".to_string(), - )), + Protocol::BlobsByRange => Err(RPCError::InvalidData("blobs by range via v1".to_string())), Protocol::Ping => Ok(Some(RPCResponse::Pong(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -627,15 +625,15 @@ fn handle_v2_response( )?), )))), }, - Protocol::BlobsByRange => match fork_name { + Protocol::BlobsByRange => match fork_name { ForkName::Eip4844 => Ok(Some(RPCResponse::BlobsByRange(Arc::new( - VariableList::from_ssz_bytes(decoded_buffer)?, + BlobsSidecar::from_ssz_bytes(decoded_buffer)?, )))), _ => Err(RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, "Invalid forkname for blobsbyrange".to_string(), )), - } + }, _ => Err(RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, "Invalid v2 request".to_string(), diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index c958e134d98..62059610d24 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -12,8 +12,8 @@ use std::ops::Deref; use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; -use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; use types::blobs_sidecar::BlobsSidecar; +use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// Maximum number of blocks in a single request. pub type MaxRequestBlocks = U1024; @@ -23,6 +23,9 @@ pub const MAX_REQUEST_BLOCKS: u64 = 1024; pub type MaxErrorLen = U256; pub const MAX_ERROR_LEN: u64 = 256; +pub type MaxRequestBlobsSidecars = U1024; +pub const MAX_REQUEST_BLOBS_SIDECARS: u64 = 1024; + /// Wrapper over SSZ List to represent error message in rpc responses. #[derive(Debug, Clone)] pub struct ErrorType(pub VariableList); @@ -232,12 +235,6 @@ pub struct OldBlocksByRangeRequest { pub step: u64, } -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct TxBlobsByRangeRequest { - pub execution_block_number: u64, - pub count: u64, -} - /// Request a number of beacon block bodies from a peer. #[derive(Clone, Debug, PartialEq)] pub struct BlocksByRootRequest { @@ -257,13 +254,11 @@ pub enum RPCResponse { /// batch. BlocksByRange(Arc>), - TxBlobsByRange(Box>), - /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Arc>), /// A response to a get BLOBS_BY_RANGE request - BlobsByRange(Arc, T::MaxRequestBlobsSidecars>>), + BlobsByRange(Arc>), /// A PONG response to a PING request. Pong(Ping), @@ -278,13 +273,11 @@ pub enum ResponseTermination { /// Blocks by range stream termination. BlocksByRange, - TxBlobsByRange, - /// Blocks by root stream termination. BlocksByRoot, - // Blobs by range stream termination. - BlobsByRange + /// Blobs by range stream termination. + BlobsByRange, } /// The structured response containing a result/code indicating success or failure @@ -345,7 +338,6 @@ impl RPCCodedResponse { RPCCodedResponse::Success(resp) => match resp { RPCResponse::Status(_) => false, RPCResponse::BlocksByRange(_) => true, - RPCResponse::TxBlobsByRange(_) => true, RPCResponse::BlocksByRoot(_) => true, RPCResponse::BlobsByRange(_) => true, RPCResponse::Pong(_) => false, @@ -415,14 +407,11 @@ impl std::fmt::Display for RPCResponse { RPCResponse::BlocksByRange(block) => { write!(f, "BlocksByRange: Block slot: {}", block.slot()) } - RPCResponse::TxBlobsByRange(blob) => { - write!(f, "TxBlobsByRange: Block slot: {}", blob.beacon_block_slot) - } RPCResponse::BlocksByRoot(block) => { write!(f, "BlocksByRoot: Block slot: {}", block.slot()) } RPCResponse::BlobsByRange(blob) => { - write!(f, "BlobsByRange: Blob slot: {}", blob.len()) + write!(f, "BlobsByRange: Blob slot: {}", blob.beacon_block_slot) } RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), @@ -472,13 +461,9 @@ impl std::fmt::Display for OldBlocksByRangeRequest { } } -impl std::fmt::Display for TxBlobsByRangeRequest { +impl std::fmt::Display for BlobsByRangeRequest { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "Execution block number: {}, Count: {}", - self.execution_block_number, self.count - ) + write!(f, "Start Slot: {}, Count: {}", self.start_slot, self.count) } } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index ecc0deb3ab1..75e78b0b322 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -24,6 +24,7 @@ pub(crate) use handler::HandlerErr; pub(crate) use methods::{MetaData, MetaDataV1, MetaDataV2, Ping, RPCCodedResponse, RPCResponse}; pub(crate) use protocol::{InboundRequest, RPCProtocol}; +use crate::rpc::methods::MAX_REQUEST_BLOBS_SIDECARS; pub use handler::SubstreamId; pub use methods::{ BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, MaxRequestBlocks, @@ -125,14 +126,12 @@ impl RPC { methods::MAX_REQUEST_BLOCKS, Duration::from_secs(10), ) - //FIXME(sean) + .n_every(Protocol::BlocksByRoot, 128, Duration::from_secs(10)) .n_every( - Protocol::TxBlobsByRange, - methods::MAX_REQUEST_BLOCKS, + Protocol::BlobsByRange, + MAX_REQUEST_BLOBS_SIDECARS, Duration::from_secs(10), ) - .n_every(Protocol::BlocksByRoot, 128, Duration::from_secs(10)) - .n_every(Protocol::BlobsByRange, 128, Duration::from_secs(10)) .build() .expect("Configuration parameters are valid"); RPC { diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 3e2a0200779..a2029fd24c0 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -37,7 +37,6 @@ pub enum OutboundRequest { Status(StatusMessage), Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), - TxBlobsByRange(TxBlobsByRangeRequest), BlocksByRoot(BlocksByRootRequest), BlobsByRange(BlobsByRangeRequest), Ping(Ping), @@ -73,19 +72,15 @@ impl OutboundRequest { ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), ], - //FIXME(sean) what should the protocol version be? - OutboundRequest::TxBlobsByRange(_) => vec![ProtocolId::new( - Protocol::TxBlobsByRange, - Version::V2, - Encoding::SSZSnappy, - )], OutboundRequest::BlocksByRoot(_) => vec![ ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), ], - OutboundRequest::BlobsByRange(_) => vec![ - ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), - ], + OutboundRequest::BlobsByRange(_) => vec![ProtocolId::new( + Protocol::BlobsByRange, + Version::V1, + Encoding::SSZSnappy, + )], OutboundRequest::Ping(_) => vec![ProtocolId::new( Protocol::Ping, Version::V1, @@ -106,7 +101,6 @@ impl OutboundRequest { OutboundRequest::Status(_) => 1, OutboundRequest::Goodbye(_) => 0, OutboundRequest::BlocksByRange(req) => req.count, - OutboundRequest::TxBlobsByRange(req) => req.count, OutboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64, OutboundRequest::BlobsByRange(req) => req.count, OutboundRequest::Ping(_) => 1, @@ -120,7 +114,6 @@ impl OutboundRequest { OutboundRequest::Status(_) => Protocol::Status, OutboundRequest::Goodbye(_) => Protocol::Goodbye, OutboundRequest::BlocksByRange(_) => Protocol::BlocksByRange, - OutboundRequest::TxBlobsByRange(_) => Protocol::TxBlobsByRange, OutboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot, OutboundRequest::BlobsByRange(_) => Protocol::BlobsByRange, OutboundRequest::Ping(_) => Protocol::Ping, @@ -135,7 +128,6 @@ impl OutboundRequest { // this only gets called after `multiple_responses()` returns true. Therefore, only // variants that have `multiple_responses()` can have values. OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, - OutboundRequest::TxBlobsByRange(_) => ResponseTermination::TxBlobsByRange, OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, OutboundRequest::Status(_) => unreachable!(), @@ -192,7 +184,6 @@ impl std::fmt::Display for OutboundRequest { OutboundRequest::Status(status) => write!(f, "Status Message: {}", status), OutboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), OutboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), - OutboundRequest::TxBlobsByRange(req) => write!(f, "Blobs by range: {}", req), OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), OutboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index fc20f4ab3db..ec308d1eef3 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -71,11 +71,7 @@ lazy_static! { + types::ExecutionPayload::::max_execution_payload_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field - pub static ref BLOB_MIN: usize = BlobsSidecar::::empty() - .as_ssz_bytes() - .len(); - - pub static ref BLOB_MAX: usize = BlobsSidecar::::max_size(); + pub static ref SIGNED_BEACON_BLOCK_EIP4844_MAX: usize = *SIGNED_BEACON_BLOCK_MERGE_MAX + (48 * ::max_blobs_per_block()); pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize = VariableList::::from(Vec::::new()) @@ -108,7 +104,7 @@ lazy_static! { pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M /// The maximum bytes that can be sent across the RPC post-merge. pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M -//TODO(sean) check + //TODO(sean) check pub(crate) const MAX_RPC_SIZE_POST_EIP4844: usize = 20 * 1_048_576; // 10M /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; @@ -160,7 +156,6 @@ pub enum Protocol { Goodbye, /// The `BlocksByRange` protocol name. BlocksByRange, - TxBlobsByRange, /// The `BlocksByRoot` protocol name. BlocksByRoot, /// The `BlobsByRange` protocol name. @@ -302,9 +297,10 @@ impl ProtocolId { Protocol::BlocksByRoot => { RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX) } - Protocol::BlobsByRange => { - RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX) - } + Protocol::BlobsByRange => RpcLimits::new( + ::ssz_fixed_len(), + ::ssz_fixed_len(), + ), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -478,9 +474,11 @@ impl InboundRequest { ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), ], - InboundRequest::BlobsByRange(_) => vec![ - ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), - ], + InboundRequest::BlobsByRange(_) => vec![ProtocolId::new( + Protocol::BlocksByRoot, + Version::V1, + Encoding::SSZSnappy, + )], InboundRequest::Ping(_) => vec![ProtocolId::new( Protocol::Ping, Version::V1, @@ -501,7 +499,6 @@ impl InboundRequest { InboundRequest::Status(_) => 1, InboundRequest::Goodbye(_) => 0, InboundRequest::BlocksByRange(req) => req.count, - InboundRequest::TxBlobsByRange(req) => req.count, InboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64, InboundRequest::BlobsByRange(req) => req.count, InboundRequest::Ping(_) => 1, @@ -515,7 +512,6 @@ impl InboundRequest { InboundRequest::Status(_) => Protocol::Status, InboundRequest::Goodbye(_) => Protocol::Goodbye, InboundRequest::BlocksByRange(_) => Protocol::BlocksByRange, - InboundRequest::TxBlobsByRange(_) => Protocol::TxBlobsByRange, InboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot, InboundRequest::BlobsByRange(_) => Protocol::BlobsByRange, InboundRequest::Ping(_) => Protocol::Ping, @@ -530,7 +526,6 @@ impl InboundRequest { // this only gets called after `multiple_responses()` returns true. Therefore, only // variants that have `multiple_responses()` can have values. InboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, - InboundRequest::TxBlobsByRange(_) => ResponseTermination::TxBlobsByRange, InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, InboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, InboundRequest::Status(_) => unreachable!(), diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 8cd1e749e36..62a81f3e233 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -161,7 +161,9 @@ impl RPCRateLimiterBuilder { .bbrange_quota .ok_or("BlocksByRange quota not specified")?; - let blbrange_quota = self.blbrange_quota.ok_or("BlobsByRange quota not specified")?; + let blbrange_quota = self + .blbrange_quota + .ok_or("BlobsByRange quota not specified")?; // create the rate limiters let ping_rl = Limiter::from_quota(ping_quota)?; diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index e5d81737cfb..57f2074b4bb 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -1,8 +1,9 @@ use std::sync::Arc; use libp2p::core::connection::ConnectionId; -use types::{EthSpec, SignedBeaconBlock}; +use types::{BlobsSidecar, EthSpec, SignedBeaconBlock}; +use crate::rpc::methods::BlobsByRangeRequest; use crate::rpc::{ methods::{ BlocksByRangeRequest, BlocksByRootRequest, OldBlocksByRangeRequest, RPCCodedResponse, @@ -32,6 +33,8 @@ pub enum Request { Status(StatusMessage), /// A blocks by range request. BlocksByRange(BlocksByRangeRequest), + /// A bloibs by range request. + BlobsByRange(BlobsByRangeRequest), /// A request blocks root request. BlocksByRoot(BlocksByRootRequest), } @@ -47,6 +50,7 @@ impl std::convert::From for OutboundRequest { step: 1, }) } + Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r), Request::Status(s) => OutboundRequest::Status(s), } } @@ -64,6 +68,8 @@ pub enum Response { Status(StatusMessage), /// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch. BlocksByRange(Option>>), + /// A response to a get BLOBS_BY_RANGE request. A None response signals the end of the batch. + BlobsByRange(Option>>), /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Option>>), } @@ -79,6 +85,10 @@ impl std::convert::From> for RPCCodedResponse RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)), None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), }, + Response::BlobsByRange(r) => match r { + Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRange(b)), + None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRange), + }, Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), } } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index f0f6d3faa2a..03ebb218ab7 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1,3 +1,5 @@ +use self::behaviour::Behaviour; +use self::gossip_cache::GossipCache; use crate::config::{gossipsub_config, NetworkLoad}; use crate::discovery::{ subnet_predicate, DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS, @@ -7,11 +9,10 @@ use crate::peer_manager::{ ConnectionDirection, PeerManager, PeerManagerEvent, }; use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; +use crate::rpc::methods::BlobsByRangeRequest; +use crate::rpc::*; use crate::service::behaviour::BehaviourEvent; pub use crate::service::behaviour::Gossipsub; -use crate::rpc::*; -use crate::rpc::methods::BlobsByRangeRequest; -use crate::service::{Context as ServiceContext, METADATA_FILENAME}; use crate::types::{ subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, @@ -29,13 +30,17 @@ use libp2p::gossipsub::subscription_filter::MaxCountSubscriptionFilter; use libp2p::gossipsub::{ GossipsubEvent, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, }; -use slog::{crit, debug, o, trace, warn}; +use libp2p::identify::{Identify, IdentifyConfig, IdentifyEvent}; +use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; +use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p::PeerId; +use slog::{crit, debug, info, o, trace, warn}; use ssz::Encode; -use types::blobs_sidecar::BlobsSidecar; use std::collections::HashSet; use std::fs::File; use std::io::Write; use std::path::{Path, PathBuf}; +use std::pin::Pin; use std::{ collections::VecDeque, marker::PhantomData, @@ -44,13 +49,9 @@ use std::{ }; use types::{ consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, - SignedBeaconBlock, Slot, SubnetId, SyncSubnetId, VariableList + SignedBeaconBlock, Slot, SubnetId, SyncSubnetId, VariableList, }; -use crate::rpc::methods::TxBlobsByRangeRequest; -use utils::{build_transport, strip_peer_id, MAX_CONNECTIONS_PER_PEER}; - -use self::behaviour::Behaviour; -use self::gossip_cache::GossipCache; +use utils::{build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER}; pub mod api_types; mod behaviour; @@ -988,9 +989,6 @@ impl Network { Request::BlocksByRange { .. } => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_range"]) } - Request::TxBlobsByRange { .. } => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["tx_blobs_by_range"]) - } Request::BlocksByRoot { .. } => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]) } @@ -1261,7 +1259,12 @@ impl Network { Some(event) } InboundRequest::BlobsByRange(req) => { - self.propagate_request(peer_request_id, peer_id, Request::BlobsByRange(req)) + let event = self.build_request( + peer_request_id, + peer_id, + Request::BlobsByRange(req), + ); + Some(event) } } } @@ -1287,21 +1290,17 @@ impl Network { RPCResponse::BlocksByRange(resp) => { self.build_response(id, peer_id, Response::BlocksByRange(Some(resp))) } - RPCResponse::TxBlobsByRange(resp) => { - self.propagate_response(id, peer_id, Response::TxBlobsByRange(Some(resp))) + RPCResponse::BlobsByRange(resp) => { + self.build_response(id, peer_id, Response::BlobsByRange(Some(resp))) } RPCResponse::BlocksByRoot(resp) => { self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp))) } - RPCResponse::BlobsByRange(resp) => { - self.propagate_response(id, peer_id, Response::BlobsByRange(Some(resp))) - } } } Ok(RPCReceived::EndOfStream(id, termination)) => { let response = match termination { ResponseTermination::BlocksByRange => Response::BlocksByRange(None), - ResponseTermination::TxBlobsByRange => Response::TxBlobsByRange(None), ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None), ResponseTermination::BlobsByRange => Response::BlobsByRange(None), }; diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index d3b5059da04..b29e0c9ff70 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -8,14 +8,13 @@ use ssz::{Decode, Encode}; use std::boxed::Box; use std::io::{Error, ErrorKind}; use std::sync::Arc; +use types::signed_blobs_sidecar::SignedBlobsSidecar; use types::{ Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ForkContext, ForkName, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, - SignedBeaconBlockMerge, SignedBeaconBlockEip4844, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, - SyncCommitteeMessage, SyncSubnetId, + SignedBeaconBlockEip4844, SignedBeaconBlockMerge, SignedContributionAndProof, + SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; -use types::blobs_sidecar::BlobsSidecar; -use types::signed_blobs_sidecar::SignedBlobsSidecar; #[derive(Debug, Clone, PartialEq)] pub enum PubsubMessage { diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 6f8d86a4d9b..f574daf91a3 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -570,21 +570,6 @@ impl WorkEvent { } } - pub fn tx_blob_by_range_request( - peer_id: PeerId, - request_id: PeerRequestId, - request: TxBlobsByRangeRequest, - ) -> Self { - Self { - drop_during_sync: false, - work: Work::TxBlobsByRangeRequest { - peer_id, - request_id, - request, - }, - } - } - /// Create a new work event to process `BlocksByRootRequest`s from the RPC network. pub fn blocks_by_roots_request( peer_id: PeerId, @@ -741,13 +726,6 @@ pub enum Work { blobs: Arc>, seen_timestamp: Duration, }, - GossipBlob { - message_id: MessageId, - peer_id: PeerId, - peer_client: Client, - blob: Box>, - seen_timestamp: Duration, - }, DelayedImportBlock { peer_id: PeerId, block: Box>, @@ -801,11 +779,6 @@ pub enum Work { request_id: PeerRequestId, request: BlocksByRangeRequest, }, - TxBlobsByRangeRequest { - peer_id: PeerId, - request_id: PeerRequestId, - request: TxBlobsByRangeRequest, - }, BlocksByRootsRequest { peer_id: PeerId, request_id: PeerRequestId, @@ -838,7 +811,6 @@ impl Work { Work::ChainSegment { .. } => CHAIN_SEGMENT, Work::Status { .. } => STATUS_PROCESSING, Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST, - Work::TxBlobsByRangeRequest { .. } => TX_BLOBS_BY_RANGE_REQUEST, Work::BlocksByRootsRequest { .. } => BLOCKS_BY_ROOTS_REQUEST, Work::BlobsByRangeRequest {..} => BLOBS_BY_RANGE_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, @@ -1090,7 +1062,7 @@ impl BeaconProcessor { } else if let Some(item) = gossip_block_queue.pop() { self.spawn_worker(item, toolbox); //FIXME(sean) - } else if let Some(item) = gossip_blob_queue.pop() { + } else if let Some(item) = gossip_blobs_sidecar_queue.pop() { self.spawn_worker(item, toolbox); // Check the aggregates, *then* the unaggregates since we assume that // aggregates are more valuable to local validators and effectively give us @@ -1331,9 +1303,6 @@ impl BeaconProcessor { Work::BlocksByRangeRequest { .. } => { bbrange_queue.push(work, work_id, &self.log) } - Work::TxBlobsByRangeRequest { .. } => { - txbbrange_queue.push(work, work_id, &self.log) - } Work::BlocksByRootsRequest { .. } => { bbroots_queue.push(work, work_id, &self.log) } @@ -1571,7 +1540,7 @@ impl BeaconProcessor { seen_timestamp, } => task_spawner.spawn_async(async move { worker - .process_gossip_blobs_sidecar( + .process_gossip_blob( message_id, peer_id, peer_client, diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index b1a119b1356..1f56ebc5c8f 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -695,12 +695,12 @@ impl Worker { } #[allow(clippy::too_many_arguments)] - pub fn process_gossip_blob( + pub async fn process_gossip_blob( self, _message_id: MessageId, _peer_id: PeerId, _peer_client: Client, - _blob: BlobsSidecar, + _blob: Arc>, _reprocess_tx: mpsc::Sender>, _duplicate_cache: DuplicateCache, _seen_duration: Duration, @@ -951,22 +951,6 @@ impl Worker { let block: Arc<_> = verified_block.block.clone(); let block_root = verified_block.block_root; - let sidecar = if verified_block.block.message() - .body().blob_kzg_commitments().map(|committments| committments.is_empty()).unwrap_or(true) { - None - } else if let Some(sidecar) = self.chain.sidecar_waiting_for_block.lock().as_ref() { - if sidecar.message.beacon_block_root == verified_block.block_root() { - Some(sidecar.clone()) - } else { - *self.chain.block_waiting_for_sidecar.lock() = Some(verified_block); - return - } - } else { - *self.chain.block_waiting_for_sidecar.lock() = Some(verified_block); - // we need the sidecar but dont have it yet - return - }; - match self .chain .process_block(block_root, verified_block, CountUnrealized::True) @@ -1012,7 +996,7 @@ impl Worker { "Failed to verify execution payload"; "error" => %e ); - } + }, other => { debug!( self.log, @@ -1034,10 +1018,6 @@ impl Worker { } }; } - } else { - *self.chain.sidecar_waiting_for_block.lock() = Some(blobs); - } - } pub fn process_gossip_voluntary_exit( self, diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 1557cefe4d9..0859155828e 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -6,10 +6,9 @@ use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, Whe use itertools::process_results; use lighthouse_network::rpc::StatusMessage; use lighthouse_network::rpc::*; -use lighthouse_network::rpc::methods::BlobsByRangeRequest; +use lighthouse_network::rpc::methods::{BlobsByRangeRequest, MAX_REQUEST_BLOBS_SIDECARS}; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use slog::{debug, error}; -use lighthouse_network::rpc::methods::TxBlobsByRangeRequest; use slot_clock::SlotClock; use std::sync::Arc; use task_executor::TaskExecutor; @@ -124,15 +123,6 @@ impl Worker { } } - pub fn handle_tx_blobs_by_range_request( - &self, - _peer_id: PeerId, - _request_id: PeerRequestId, - _req: TxBlobsByRangeRequest, - ) { - //FIXME(sean) - } - /// Handle a `BlocksByRoot` request from the peer. pub fn handle_blocks_by_root_request( self, @@ -400,133 +390,135 @@ impl Worker { ); // Should not send more than max request blocks - if req.count > MAX_REQUEST_BLOCKS { - req.count = MAX_REQUEST_BLOCKS; + if req.count > MAX_REQUEST_BLOBS_SIDECARS { + req.count = MAX_REQUEST_BLOBS_SIDECARS; } - let forwards_block_root_iter = match self - .chain - .forwards_iter_block_roots(Slot::from(req.start_slot)) - { - Ok(iter) => iter, - Err(BeaconChainError::HistoricalBlockError( - HistoricalBlockError::BlockOutOfRange { - slot, - oldest_block_slot, - }, - )) => { - debug!(self.log, "Range request failed during backfill"; "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot); - return self.send_error_response( - peer_id, - RPCResponseErrorCode::ResourceUnavailable, - "Backfilling".into(), - request_id, - ); - } - Err(e) => return error!(self.log, "Unable to obtain root iter"; "error" => ?e), - }; - - // Pick out the required blocks, ignoring skip-slots. - let mut last_block_root = None; - let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { - iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) - // map skip slots to None - .map(|(root, _)| { - let result = if Some(root) == last_block_root { - None - } else { - Some(root) - }; - last_block_root = Some(root); - result - }) - .collect::>>() - }); - - let block_roots = match maybe_block_roots { - Ok(block_roots) => block_roots, - Err(e) => return error!(self.log, "Error during iteration over blocks"; "error" => ?e), - }; - - // remove all skip slots - let block_roots = block_roots.into_iter().flatten().collect::>(); - - // Fetching blocks is async because it may have to hit the execution layer for payloads. - executor.spawn( - async move { - let mut blocks_sent = 0; - let mut send_response = true; - - for root in block_roots { - match self.chain.store.get_blobs(&root) { - Ok(Some(blob)) => { - blocks_sent += 1; - self.send_network_message(NetworkMessage::SendResponse { - peer_id, - response: Response::BlobsByRange(Some(Arc::new(VariableList::new(vec![blob.message]).unwrap()))), - id: request_id, - }); - } - Ok(None) => { - error!( - self.log, - "Blob in the chain is not in the store"; - "request_root" => ?root - ); - break; - } - Err(e) => { - error!( - self.log, - "Error fetching block for peer"; - "block_root" => ?root, - "error" => ?e - ); - break; - } - } - } - - let current_slot = self - .chain - .slot() - .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); - - if blocks_sent < (req.count as usize) { - debug!( - self.log, - "BlocksByRange Response processed"; - "peer" => %peer_id, - "msg" => "Failed to return all requested blocks", - "start_slot" => req.start_slot, - "current_slot" => current_slot, - "requested" => req.count, - "returned" => blocks_sent - ); - } else { - debug!( - self.log, - "BlocksByRange Response processed"; - "peer" => %peer_id, - "start_slot" => req.start_slot, - "current_slot" => current_slot, - "requested" => req.count, - "returned" => blocks_sent - ); - } - - if send_response { - // send the stream terminator - self.send_network_message(NetworkMessage::SendResponse { - peer_id, - response: Response::BlobsByRange(None), - id: request_id, - }); - } - - drop(send_on_drop); - }, - "load_blocks_by_range_blocks", - ); + //FIXME(sean) create the blobs iter + + // let forwards_block_root_iter = match self + // .chain + // .forwards_iter_block_roots(Slot::from(req.start_slot)) + // { + // Ok(iter) => iter, + // Err(BeaconChainError::HistoricalBlockError( + // HistoricalBlockError::BlockOutOfRange { + // slot, + // oldest_block_slot, + // }, + // )) => { + // debug!(self.log, "Range request failed during backfill"; "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot); + // return self.send_error_response( + // peer_id, + // RPCResponseErrorCode::ResourceUnavailable, + // "Backfilling".into(), + // request_id, + // ); + // } + // Err(e) => return error!(self.log, "Unable to obtain root iter"; "error" => ?e), + // }; + // + // // Pick out the required blocks, ignoring skip-slots. + // let mut last_block_root = None; + // let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { + // iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) + // // map skip slots to None + // .map(|(root, _)| { + // let result = if Some(root) == last_block_root { + // None + // } else { + // Some(root) + // }; + // last_block_root = Some(root); + // result + // }) + // .collect::>>() + // }); + // + // let block_roots = match maybe_block_roots { + // Ok(block_roots) => block_roots, + // Err(e) => return error!(self.log, "Error during iteration over blocks"; "error" => ?e), + // }; + // + // // remove all skip slots + // let block_roots = block_roots.into_iter().flatten().collect::>(); + // + // // Fetching blocks is async because it may have to hit the execution layer for payloads. + // executor.spawn( + // async move { + // let mut blocks_sent = 0; + // let mut send_response = true; + // + // for root in block_roots { + // match self.chain.store.get_blobs(&root) { + // Ok(Some(blob)) => { + // blocks_sent += 1; + // self.send_network_message(NetworkMessage::SendResponse { + // peer_id, + // response: Response::BlobsByRange(Some(Arc::new(VariableList::new(vec![blob.message]).unwrap()))), + // id: request_id, + // }); + // } + // Ok(None) => { + // error!( + // self.log, + // "Blob in the chain is not in the store"; + // "request_root" => ?root + // ); + // break; + // } + // Err(e) => { + // error!( + // self.log, + // "Error fetching block for peer"; + // "block_root" => ?root, + // "error" => ?e + // ); + // break; + // } + // } + // } + // + // let current_slot = self + // .chain + // .slot() + // .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); + // + // if blocks_sent < (req.count as usize) { + // debug!( + // self.log, + // "BlocksByRange Response processed"; + // "peer" => %peer_id, + // "msg" => "Failed to return all requested blocks", + // "start_slot" => req.start_slot, + // "current_slot" => current_slot, + // "requested" => req.count, + // "returned" => blocks_sent + // ); + // } else { + // debug!( + // self.log, + // "BlocksByRange Response processed"; + // "peer" => %peer_id, + // "start_slot" => req.start_slot, + // "current_slot" => current_slot, + // "requested" => req.count, + // "returned" => blocks_sent + // ); + // } + // + // if send_response { + // // send the stream terminator + // self.send_network_message(NetworkMessage::SendResponse { + // peer_id, + // response: Response::BlobsByRange(None), + // id: request_id, + // }); + // } + // + // drop(send_on_drop); + // }, + // "load_blocks_by_range_blocks", + // ); } } diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index 7ac39782b5b..b4ce4f05997 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -6,7 +6,6 @@ use crate::status::status_message; use crate::sync::manager::RequestId as SyncId; use crate::sync::SyncMessage; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; -use lighthouse_network::rpc::methods::TxBlobsByRangeRequest; use lighthouse_network::rpc::*; use lighthouse_network::rpc::methods::BlobsByRangeRequest; use lighthouse_network::{ @@ -218,40 +217,29 @@ impl Processor { }); } - /// Handle a `BlocksByRange` request from the peer. - pub fn on_tx_blobs_by_range_request( - &mut self, - peer_id: PeerId, - request_id: PeerRequestId, - req: TxBlobsByRangeRequest, - ) { - self.send_beacon_processor_work(BeaconWorkEvent::tx_blob_by_range_request( - peer_id, request_id, req, - )) - } - - pub fn on_tx_blobs_by_range_response( + pub fn on_blobs_by_range_response( &mut self, peer_id: PeerId, request_id: RequestId, - blob_wrapper: Option>>, + blob_wrapper: Option>>, ) { trace!( self.log, - "Received TxBlobsByRange Response"; + "Received BlobsByRange Response"; "peer" => %peer_id, ); if let RequestId::Sync(id) = request_id { - self.send_to_sync(SyncMessage::TxBlobsByRangeResponse { + self.send_to_sync(SyncMessage::RpcBlob { peer_id, request_id: id, - blob_wrapper, + blob_sidecar: blob_wrapper, + seen_timestamp: timestamp_now(), }); } else { debug!( self.log, - "All tx blobs by range responses should belong to sync" + "All blobs by range responses should belong to sync" ); } } @@ -286,15 +274,6 @@ impl Processor { }); } - pub fn on_blobs_by_range_response( - &mut self, - peer_id: PeerId, - request_id: RequestId, - beacon_blob: Option, <::EthSpec as EthSpec>::MaxRequestBlobsSidecars>>>, - ) { - - } - /// Process a gossip message declaring a new block. /// /// Attempts to apply to block to the beacon chain. May queue the block for later processing. diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 15b5d242154..d5dfb60fbb8 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -89,21 +89,15 @@ pub enum SyncMessage { RpcBlock { request_id: RequestId, peer_id: PeerId, - beacon_block: Option>>, + beacon_block: Option>>, seen_timestamp: Duration, }, - /// A [`TxBlobsByRangeResponse`] response has been received. - TxBlobsByRangeResponse { + /// A blob has been received from RPC. + RpcBlob { peer_id: PeerId, request_id: RequestId, - blob_wrapper: Option>>, - }, - - /// A [`BlocksByRoot`] response has been received. - BlocksByRootResponse { - peer_id: PeerId, - beacon_block: Option>>, + blob_sidecar: Option>>, seen_timestamp: Duration, }, @@ -598,6 +592,8 @@ impl SyncManager { .block_lookups .parent_chain_processed(chain_hash, result, &mut self.network), }, + //FIXME(sean) + SyncMessage::RpcBlob { .. } => todo!() } } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index cc718818fbe..d44b57258a3 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -38,8 +38,8 @@ use std::marker::PhantomData; use std::path::Path; use std::sync::Arc; use std::time::Duration; -use types::*; use types::signed_blobs_sidecar::SignedBlobsSidecar; +use types::*; /// On-disk database that stores finalized states efficiently. /// @@ -480,22 +480,28 @@ impl, Cold: ItemStore> HotColdDB .key_delete(DBColumn::ExecPayload.into(), block_root.as_bytes()) } - pub fn put_blobs(&self, - block_root: &Hash256, - blobs: SignedBlobsSidecar, + pub fn put_blobs( + &self, + block_root: &Hash256, + blobs: SignedBlobsSidecar, ) -> Result<(), Error> { - self.hot_db.put_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes(), &blobs.as_ssz_bytes())?; + self.hot_db.put_bytes( + DBColumn::BeaconBlob.into(), + block_root.as_bytes(), + &blobs.as_ssz_bytes(), + )?; self.blob_cache.lock().push(*block_root, blobs); Ok(()) } - pub fn get_blobs(&self, - block_root: &Hash256, - ) -> Result>, Error> { + pub fn get_blobs(&self, block_root: &Hash256) -> Result>, Error> { if let Some(blobs) = self.blob_cache.lock().get(block_root) { Ok(Some(blobs.clone())) } else { - if let Some(bytes) = self.hot_db.get_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes())? { + if let Some(bytes) = self + .hot_db + .get_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes())? + { let ret = SignedBlobsSidecar::from_ssz_bytes(&bytes)?; self.blob_cache.lock().put(*block_root, ret.clone()); Ok(Some(ret)) @@ -512,10 +518,7 @@ impl, Cold: ItemStore> HotColdDB ops: &mut Vec, ) { let db_key = get_key_for_col(DBColumn::BeaconBlob.into(), key.as_bytes()); - ops.push(KeyValueStoreOp::PutKeyValue( - db_key, - blobs.as_ssz_bytes(), - )); + ops.push(KeyValueStoreOp::PutKeyValue(db_key, blobs.as_ssz_bytes())); } pub fn put_state_summary( @@ -746,11 +749,7 @@ impl, Cold: ItemStore> HotColdDB } StoreOp::PutBlobs(block_root, blobs) => { - self.blobs_as_kv_store_ops( - &block_root, - &blobs, - &mut key_value_batch, - ); + self.blobs_as_kv_store_ops(&block_root, &blobs, &mut key_value_batch); } StoreOp::PutStateSummary(state_root, summary) => { diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index aac9cda932a..df6e3950137 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -41,8 +41,8 @@ pub use metrics::scrape_for_metrics; use parking_lot::MutexGuard; use std::sync::Arc; use strum::{EnumString, IntoStaticStr}; -pub use types::*; use types::signed_blobs_sidecar::SignedBlobsSidecar; +pub use types::*; pub type ColumnIter<'a> = Box), Error>> + 'a>; pub type ColumnKeyIter<'a> = Box> + 'a>; diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index d188088251e..45fc709cba6 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -308,9 +308,5 @@ define_hardcoded_nets!( // directory. GENESIS_STATE_IS_KNOWN ), - ( - eip4844, - "eip4844", - GENESIS_STATE_IS_KNOWN - ) + (eip4844, "eip4844", GENESIS_STATE_IS_KNOWN) ); diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/sepolia/genesis.ssz.zip index 1321634cea6faa40295d0ead46ee9a0479e6b529..64aa1a5be9f701031b05e53e9c8533720e00439f 100644 GIT binary patch delta 132634 zcmZ5`S6tIe7wva!97RP%K&dJUDnbOLm#8R+2#E9=5drBSCA8#!5D}3kQl&-dJ<@B4 z^iHIN03iuI^w2^e^?L8)eVK>ZFZ;J=X0NsOtm2Y?5AhfHUg{k`aruA$``^F+J5fGt z!qOYLXn*-w7+(b6Kec?vQZEDlH+t-E^k0|p{}&w*IQGBlEBF4V^FQJLIe2+^czJ>x z{octzJRSX@Uhmx?4ql#4F3zAg{x0r*5Esw?Z{HW>>*(#}?qUyecJy@gb@7$)^$j*P zIsL!m{}umwW4fX0;2+5Szkg1w9RJ_{{@(&@Fin4%I#2#uHJBk>x43o!3Wc5%ov{sX zZr1y6c8mXmmNyiNo4wv}K!a}D*1DOSQ<*$X^u?yqiIJ1*cP2932B9quX9W*zamw3Z zTlgiA3;T{6|IH~QKlYxs{b=^x2Y1@Hiul0DhRVC`d*6=-pJ0Ra|2<*FLLz^2v_x)^ zlB(=W{pu0t2F({JE)?x^s;0h6*4bE1ql@#_hx6CCj7 zKM6Y!fL(ZQXN-e@aF7b`77K!7^+I7SHA{V?sRv>dwR`OUDCgL=Vl1l+xGosb*j}6{ zrDoMrnN~F}el(4%mGy*cLLtsIWRQ9$PvTRIJ4=F+tb@B%S$RAop{9q!UH*hgzcy4okLQ+J<$ z>6@m#UXt0g0hKDZw6Ic&MkBy?H-vJP{6Un7D-o&Yf19qJ_iR*_@aD4DgZXFO&t%^h z#NlP9Caxn3G!NlE4-3T|ZYcC^`+kmghw(QICULbYc16T~bH5=6&|yah-tWct_kA_G)xo!VVFAGn^hTvFjhQ>EJX!OtAcCAW(R0U>gU$W3 z^-lbJH$3V3_EF?0PdxAGbu(J$fZN}9t{EX75GUra+~pipf-^|>bt*mPMmj%OcgLea>E|2$(>Gh| z4~Bkx#U+jl8Z%sX&rC%psruY=o1|ao&MPyzJreU$DqOUs!V5q=j?#k`8SS+%(ob4l z9?sllP;pd#ir1k(HQ}FBkmu0*Y9IFV+%xy!({%K=(`P*29az66^-2h(Jje(kPi#Fm z+Vv=I+Mq3-9zBbvuN=_uVb7vL?92PJE=G3FcWj-!NPIp#=Uwjm?~!)8KbHKdHh8^L zPyst=?SsHK(tsGg_QH@9di&&_lDOB#gF2(xcXV~ydCX}()=`RXNVc#_Qtk5@o71Dx zXBlHjG`ynpp~?R}(cSFK#E{);BJ7isLFLbb)-y|HgVxD!+E-qAh#j1U26(dQ;_HT! z(5=nG>r-Y}?hv=P5x2w03AyoK1f{GC>VyEhBxQqKpt?ti{M=+CUX80?U}&^)yYu`b z9y?xz;TGLSsrW5LhEMBY?I=aRaV~NIYhd0q5%TbmI~BRu5|I0HDvm@_v^jt#?p7}N zS0vp2<+x->0+kO01rA|^*wd;(!^I?o!8E@ro#i1rQ9e+=K34sM%h1J?ZdZ0igj+$w zd~Oyf0&A(8x+b%P+>rgHKJrF_YgvQ%VD~|`4j=;J|>FrcZe z4EMv;p&I*>`)X_KI|M~=8Y8lY_TY)#$zDu?}qj&50d?{8KLN<~77My_ql ztCpsgy9=vw+J$pQ<@*ci`GTkVYeIROjy^gn$+X_KlGv}V)K6S2Vc+wMyXasBgtD{M z9yccbEjmnjqr3|N&U8QM=@%qT!=oQm?!agd`fx1LT4`F{{_Vl% z<9ZTt@9Xwrl+oR3fa3ewk8Z(cn8}|OM}%VSB^Skpnam@;XqJxR8Kd14ZrL0;+5kEEvNFctI z_P}v-Swlulmw=jzJ{@8Q6%)$0$K0zw9$DwEieIFXYs9}1;_#H+t?04Qlu(8>)r|&- zDXk&yOCe8a1TGZ=77mh1mg%H-^VFW(clBRU*P_MU0QLK)iF@ z`LWiwr8!buKhKghDl{nMu?`Ws5)5vS1KP)p5zBGWY{ucLzw~%HQ2M%(n`tB%n zqF~?pd+Ymy!Q);VE2Hy9JwoP>BiIV>85xobCI*wN_Ix{`52ETmPD!~DQk>MrXFC~c z8B1!GIr!N%XR!45R{w*|Iz6-VERVK7o{o>jSaQWaar+&}ye+YUCqL-loXEqWdZO^f zmR|04b_TGsgs|rFLI}eNS)POf7J=2CH!Bb#%PSM5_|ttg@{X5IDL-pGEdiWmi*wof zh%!3f!_VU~WA7cN6G@II)V{SN;g%#6ODf%<*On57hAK8hRp-}uil;@zcmu!B&x9?B z-QY0B=2Jq%S(#;yf5LUL)A00>7*bOuqMWnRJGM=}|%^G0bK(U+-0)zCRz4$7Qt_8!oH{w(o$nC+_U&bl9Ct6=PYHx?3IUMU*kCRJlWFhkaSkc`hmWplvmL`tY@n7xaMwy)RFeVmD-Z6@6CJ|j94PD z=k(L|-0|;c#AXB*>`StnC7uY#qq9RTOcg*33dWlFSz zpk|*B+CA5dn;{}(PBls2hNuz6>VVxxF;|_VPjPa$@KSBdw*0EDA;;q`_1a2VlNIza*dPnZ_RaKFt7VnL zYdZuV#6fwWf`rbL!V>VeD}Jc6R9loH_`e>aIZ7DnrLAB)TcDEC!B%y^TdaVoz`c09 zxZ|Ef55ipSGW6Y^QGIG_OuG83*PY8Jk-?63N5K}!pRB73o*R3ZBs4{zuXsMT!~RoT zyzC?DZ%ui zRdwA)Uu$aw348!zL$p7`X_EujwVtCZ#upJnj=WtjI2CQt#y)Zn8osM;c7eDMm(y#N z^x{3lTszNR^;~07gN`SDkv0%Q4ox7FDFRF>W0Z}2GoWcvSNpC<0BX?6v?*ZZupjYc z|7qwXSZ7phF`%nMJR@?#K}Sv9rTJqi9z&#C9Ye8dUYUsCGIzQR%r1F@pRv3@ruSY( z=4zm|8*DwMc>#61!Szu>7&yC(qetAbf1Zl>|K;3nVK`4@3Nqt^9ogxo^d*dD1u46w zL&p#}Pi4hpHL~?>5w$CubC@qJt}hiI4NpL=V77G!me_}r!9qRGXWXOiOVW;tleOaS z%}@&)2Kr4r!MB{}Ok7Hta<@kyL3^$A1N6?SiR0mPtJ^o7=)}X?PVxCs8ga z%=0!UsX?!&r`3HJ!jP0}R<(u#p83I{An5XGK4XD(^U6^}^R@lr$y+OJ$K5y$sydN! z7jwXx*&r*D`jiq;g!iX$&Rx@qvw(qoOHdETvu>RVmi%KLjQbLJNLU<1!wp2=$=zI< z`pMg2V_l1OZyQ!%0EUSUK56$z?SoTCj&pGxhBBwn;$PPWcp@uIVrpBv3E&Wx71XGO zId>*h4XB>$j}Wq!mY%AA8C`kYVc1Ua-PCnj2MbNgh#R=mWIcM`7=Ki@8mud@*TX|v=FvvWV| zgFIjw$(M2gz3SlAy;=Z}302WxBf5nQ3Nn(bJSTUg@hQWsI!ZTM6iM>GJ(S>+=w(tC z^3HGX@Rm$+n&|ay*%v6cv1l2ge`tNRipb%V0YLWJK|}$Uoy2+DV4&?c z;T+1@ON@~d&>LvVxstjYpL^sPI?TU}4?l^;OBT(@cSZl;ivRpz!3cSPFy6G2^WW~5 zhI(?06lO|ZY6y`6IeK%Nr4w=xn^ z!oVGso&*`4Q}X5pWb`ZdbNRN(dvga_lmM=gwR(VJYk_~nved3 zlgVks79C>z&r6cl*ay%gg?Rk=qPkFz1%`93ju8AW_KoT2K{KYe_r$Or0kFUux`~fC zfa94O>#J{!Bb1XNHk7Y(q>-n?pJK*5ey^_?4$N>TR6JZb@X_Pa2plioSFrN3J6Na% z>6VyD1>7)7JQFq=?!;=Y(c3*)UQm8IgKZNLvV#W691Q&WmI@zu`O|>%o7t_$rJzB) z_KMu*MPHUy`7yJzv18^p4MQ65iOO#!1Fe^s$g#Y6k?JK;&BEE1U-}g!VKMQ?s1RB= zoY_c8tLZV;c%BkUiFZ3gQNASaAKzGiqt>bx6j0n227cZX6sWSl|E8BPxJ5(nBhXWJ51mx_yvTN(P*!r0pQxP3PXV|9qIwR`1w9EC=7M>5ZL~iYq=t183 z*K>LQ;wE#@fj zAN1D9>-0m8l@m3Jc$+)A$iPs2Aheip}wI?s&uO!mEzS9$P zb=Q+q{is#R+M)J2g#DNl0Y)w?<0G_yh3QPjJup zZaghTC~0-GiZ7Y3PA>#@%s@o*ky&bw(#`!|}Q;<;ufTiuE7Q_>FU&We-{RSnDjyGzey0fjLoQb%Ls?Ol?cdN8UY%zOaH=_W9Wj zQoC0qpm9N*v-_~0prtviuCjw|>lyQ;6a@xN5Wix8j=z(;>wf!YplL%UTymvtZ&y;6 zKn{$!#9D{a$-(uy#;0&HM&i1Wm&gq}6 zg-%t%@`w;aTS1%BCnN8oG@)mMw`1ON$-%(^0X|V*jTuOFzaMjk2_1&IHK6Ib9A|zV z&RqYtFyOvnvy>X5-wmDoRlJ^cb_5VQrI|@<%pk*&cmD#$CQU>!tWvryv`38>Fnny1K$o1JqDzF*UPw>Yt z3eB$yJWK^)F_SX)(W4(*J1C;AY31WQ3azypmj0})5SM{C`cgbl#~)7x3dsM>f^Qy7 zkeXD?zpSo`o)i9~G5?KuF1T=#@x#4aFCH$!-}q#(8h!HA1CEl@R7do#5;NuI=8E~z z@vh+^J))#XdU_Px(`aR6NhY3*0QuPeX%QY3B&-jy}`onCXTxSIn1pxAk$Pq2hzw+;ONAH1#u_E58zmz<-e7zo1i$k8?c*U_t9&*^~?(ph+^A13=~lW&%7mv3!&<+u}>Tb~`T$&xZ6E zidob#lBlV>``j6eBcJ3HZcts{2QcZrIIF*GF|?2egBx#EZglJ$5X!xmmcBt4boX^j zu@)_~4Lg;90j&g846SMEAp$DA;o}1=9cO}vuf~M~?Y6Xd15=`Z@SEw4RaL)Xs=Wm; z#qC&XE^K?QD@c)nvPszHw$VZ~2p6U2nRlTDOkH zJG;P#{BGa(INJX;q{LDvIQ-~){EaES+9~Pll2~2Z5j5Lb$a>k8qwN1DyvK9OueodS zrnqtFq;Gy3_62vgrutPcyZoGyoO(#L=ghv$!h0n=eRhC(-S}WREi2at0sKyRc+`Sa z^`BB?(L$9jNv|8PE>x97U7B&-lkV^Q^<7B1!jD0j#l(wyW>ZPcTd3zPq8UY7*y_k^ zKf$r>rD*i|!q&9EcRbayAStkcxlpt;@}Bu;0ZDQ*D{hh8hK&twayfJ}GuJ~}#%i^M z?`{WomcuomVpkhc_PH3~qJZ`pb}wJ0O?rB)Sm%{?y;H02 zdcW(Pzo`L8c6QHaA()Bs-Gu_C81T^LK>xDWso$;|XsDWpz((c?xN}6RUKzsEKuAE- z<)cJ1^i#0_^UmPc(_H*pX=BXJp9#i9vAb6`$w{R1ptCIVjV*aVw9{aRQLaoSQ4A+G zf|~b#N+9}JjgP0Fh4pn9W3ND81Jo5Jk#<)ROzWzTpUo;>x`KSyJlAErXYCx{ z-M2Bko(AZDAoY%#KBc2ukvf07Q*DJmBPd$*LBp?7U3x&4mn1r^q(?V2M>YOdBe!{V`lYSkb3jIn zt{e+tBamumMqy=mkFsHs+Xe(|-qqK1C8SV+ zxP&^Z*QP@!kprD@OARgbGc6vRRIRMZH%&lXEoJ2!Z0tW#NVmqnY@AE?@M9fgo&JzY zAnWYuGc$3?O_(*E&8AMc{JX(#hTsU;;US__=vZGcN^mOVfc|X`*Lfr?_fpMn@eG%f zXi@s%$Z%F=1%k&`#eQxDats_BG681iu?E69Uo8;Gt(1Vs`cNU!>PPK&Nd=IB#=rBo z{7*l%E{hTr2F1F>o<6TxUs{sk*{ykQb$g__p0Dn#rf^aE4@2rlnwC3!7q`JpB6c@_ z4WiM1HY(199tG_8TVTdcqT3Y9CYZruLYw7Abhp3y{y8WFtpWX{D+?7XGk3t1xy72f zPpXvJAEmIA&OPUI)ak6oMQ)tA2wb6)|BFFLlry{?*N)1>#B101WX$GNeb^PyUF%)@ zL<#gc_GPCR(hgHw$DM!mG1&Wbk=lCcXI_ZtL*8d zRUzE78N?*b9q)2*8nkt?ue~fgwy4q)7!VP@iWFsxdX6@6MTzU2XF0C`!)*xZi6BMy z)wvs}ZnhF=`1e)I5KM;2bqGmn3A7oc@^+E_RRs5d@T!*7Xi2tW83jLY0v$iVa+k5U z4~M?;N)<3+M~;VO=tah4$nd9o*W+5;#7bVrakQcRd`!6if`q+Uw$#fin}bueUVQxz zWVJFIZAZJTvfkLF7~ud{5fG#Ch+tDrA}WtcOmBu};7~B?UGG`e$RB}kJwkuH+xiAB zE@;SnryGb{w?mubP z4O?yv-+8>Vq}zd%g->76X8>~rd-P=ac9`mtn*cYWYOgGV1^5zd4+nZAqYS9QO4pu_ zKYp5Xe#W%Yn)vti^R4M4{og$K=DhBy-T-CHhGoR=L4u~<4R1?-AD$=&N;{cB;Bquk znuFmY+B`d`)o%Rec-)yG!ckkwD?BclZ!OzMZC2rUh8zVLFp)j;D9!O;%s2g>aE_4} z+>bmO{ecXmdku!p_SSz^Ig@`H8l;!kP^hK9aZzO-MXMLe#+dm}|96Q{eZDlU+&lYK z@-caKkm4H-OSky_Q%K5vZEULA!_K4$R+$Pk%jE%~Ong7*)d}_$xe&_&2dq{_ z6s~Hs>J699vM`)7-2T~`%g;LXr*jQgV>ntQm&l_le&8i94^(W3xhmL>c$2Z*vJU13 znsxZ47$tPUnWQ}}mPEfRO)bAj zHRn0EvuF|a3W#J)OH!7=rzXxPS@Q<<6vm>v<3{hbB_wQ~ItLc;$A7z_;bfx!@<(6E z*!u2Y+A{FpnrWeuSjdXs*imk(W09p(_b$*P9nCky=i1fa8galR{_@SvKz*55yX^8V z_HnU}riu^U_q7%Zjpngv-hSG9S+X_uUMQSdkrE}ac}FdGII_uD@NAHo%wZR?{un~a zxp*{Wp!PPJ_(HCJvf;X#owGh%{wrvF>ZDip3paJ=*`MHmMb}#2fi`eSUOd?M%@3fj z@K=mF*6V8@GQD`bhv9mJ(E;&IF@OpR0o^hao(mm8no0mv4|-i74`1K)yEie zsoXf{!7?^anBbMq=9lSVagS6p16mJ3$MIyt)H=>iUQeR!>9cF&C1XFsk3&JvGirCh zNFAKMaJBh-MEIMTfswbttU)ROoP|3hhok;L*mpUzll^c|MrxEXKqGkxMz(PLCg(<$ z5K7Iw3$QT(mePI(ewX+k|Ib_vK#ctKhjv~7f}Xt^%9)HhS1(YY5p=^O9NoU?mMo=* z_A#n=ik7``$k*F*1Aw^+75IF!8@hDPOOw3R^=wloSp;|UO|oHx9TTw0qh%IK?uCA~ zaHfKFnhgi*g10|I!?03s-0{Ygi73FA9ZY>xU+J5TC`PEyWDeTrAmSBUoMZgk5Ndq7 z+w%?y?x(eF-7Gz4@xU<=jED%t_r+1&$#z0n)1#DP?6+^?6X-&50-WDqa_!T`rjX1C{ zs>IX%>#IHvTwZFMyrhEmH1tQ~w}c(TNdx!%8c7T>p4KBk*8f8YX07!~aM@}Cs2p#I zNek&1DLJimfNb_Kpw8dX!>nQV4R!|pxvOdoQwIk+sHhFLuWC`>_v2Ac&L%LI6{t3_j6;`ya5 z)h6a45IVGVD=pZ&Yn?v~5N|EI?_-BMK8Nlx2=$~a3Ij1>QwnhW$(@ky)7DPbwX?K` z+8qC2Z|01Llz4BJNsgatvml`uJAxZr0DzNVFR1tWT3jc*k4yOKoZgQD-DU{ zC$lw81E<20!{;T>vt8GCWKYGa1+Ug$QHj4*E`QB%x=N{Ak$u0+KR!=MRNwl-{A%-F zIA>7P6-(l7mngdgf8SaGJ6;O>9YtJFVOK{PEuX`*8ezi!HZfSxB%~8;$55y@F{CQd7nM zIEI+B$c)2}Ki7G(Ax9Po#^=Bqp|TZZhFjmKnsaPdSI$eH0l&{PsNv>gSiy)U>>c~Y ze|-)%1ueIGZiO>m03uC{wsj@j_Rc2MCUf>Xp>;-xm>c*STEs*=R}g~2f+!$+&VW4A zt$tEWOU5vpZLl~4n8TnFhid(WZ>1Q1c9Y6d>7BqX>v!`pt#q?Px4r+!<#sl@BAeQY zYaX5`;S{(j*{M9$Q!~0^@^~rul9h9`F6UOIT(@ z{dLMN1K%m2p}-Q?6uA+**b>x{cCxXHGwfdlV6K)D-XkEBaNDuMekifb>r&<1>O=N( zrS+ax;?Gy>%CzX=IG@J%4Idpu&sk=!Ee-A|(X;TEnCDp~!>clUL#8gTl7AY~&EAC; zKhHaxJ54FUE6g`1hEwe9?mMluH;+Z?XfTbZ05B%gEv<*XBppG*VJSEZ)As;HBDmfQ<$hubSf_12!1iyGI?-9r2g1sND@=Yi7|jy$Hnw z)bE6KQcjG8mE+5dbFfbYZuM*dC*zMj;?A3l%Y^~0Tqw4s*k6sCu!vII7eyVcx11-L zLD#C@2h6K5a5`%~js~gYD#3N^TUrc1Y6Wgny~vF;fEIJrWc86B(zJodT+cqaAN_0( zEgp3Xv;@3~Hv&y;l?CrXxX?JTPZ4yRVUx@`4QuS5AE@W0oec`orMPIR`Ij~4z{}mf z`!0RHaNy89gm>sE34jhKKV=a$(=IaT!>llZ%YhQn<`(<+*F<=1s76q~C{o)aL#}ES z*Dvb^Q}$QZL(X~u@hyJ$sX1`I!n4D4av>Cx@zzDzeym=I&q-f3TXQ-2!=4~^HAuob z?Wab)%#mI22$)wViGBH*g6eAB4KxljVgIuG!&r1#!^_3p1w${M;c4DpMJl}WIPC-J zC&YUdQzItosf*oPEk8Pm79Gf@$FJQg)pW_Yy){rS=GF>uCiczZ;V3OzO`kxQiE{7@ zcVCGM9)2aB3aaH|@}54$j%wOZLa6Tov0e)E6(R6|&_i)DXyRXZkWl$p6Zn0cs9Bj= ztB2_+;(2qKxg#erXc7lWieD}G7{`|3PGAhFdo?vAwB zM80}@s7kJ%rWQmFQRN7~#k8Pb(y8>$NfgA9BroOt4XT7^ci@#hkdgddIPm@zrX$uV3}G6Ce^j~+n#=W&y|zEJ2W(gVVWM8 zp}&rRNzae)Opgd1To%08w2~_wpUjfIbdc@AvPt$CA|2h}(IW|@`fEJ|!Jng#5> zWOX^;?0tXjPu*O*5e(@#2;Q)tK=86cVTWDt0dbP68q9yZLP0G_LssHY92e*0P>Ndh zR(?@GtU%J-61t%`XwFrQ{dW(ODCRGNu-IGx^{2fj6GE+J+WO%)>jbVX%MKcRc=u1mZzM#yblg~Qy*mu-v2nl`=#DyRC4A6 z;HGXvVtAxrxteHq1eEc>QQL-cso}yb2YS-8uJk!4w!Qk zYQ3Z8b*iv#7tqYEIZNkzm{G^a;Soy?I=$ob{VA?gH5lF3HA-ZPz7)61vcNt@>mQPz zyzayeoGwV6`6@pj+np@$Y>_QLfP<)YrhzFhBjkjJ1>j8&4gm-M;%_Rci|`uz;Ml)& zfY8C}`v>SkZS}JI+g3Ge9;r`?cJJ+sJV~VJzaz9v?|t7>&BMHB>;C8~(><5&cjQOa zufHi|x3K{ZeEhjhEq5^HuOGW<0=m@|BE)=y>{{kFzj3a#$+2yHloPR~S_4~ahhbnu zz}a%=(tsVL91sH&DbP5aej?PdgN)apB!(Di@ zTCmU^y^S;LyPaRW(F1j(VR6xP=GVp@ybucL4MhSy)$#e~`8KHdn8xFm@}z5)rvC7@ zc_kStr&jtiTm0GTq{?n0Br!#~alv9CRRkEk@(!NYKJhaOErwzD7+z0tW-mQugV#t| z`ox!fwCL2e}_OR|5&CygO9@jrD#M{%^} zMbBCpyNz0GTs_-GXFVH_82s*CMC{LV0E9Gd9_}q{1_^4SNku(gjgb7t;7#h6?MMlc z3c9NM{#kC(O7n>MY?>@qVMrDrVNnYj6YJ+y*L;(0`^^w#Xue`B#U@d zINxomdKK#t7FR3p6xUG5tbmDSw?uhwugv~ijMyqmx^9ZQ=ZMr)JMi(}M(#uz7=oUh zv6OYA8m)V|BU@-a{Zo<}fbx6t#t+h^BqGY^c{wI^y`rSjgHZP>`aDtH#=|W9;rH`i z*7{BUMNas=i}eP2I91EBE4evlgoc{X)t66wd9sC=xsKv&b>*fNQ28fq1B>Ffho*al zHs$ySb3^66a#T{fEF&d}+8XJ>!4KoJIG%@WYOAmFnHl`$hU7h<@ZT|G#nvk>V=2NB zrvQ*Ca~@Q#sV!#i+e6;mqxokFGtb=IVVUr00TiWoe+qfPPv|zy7S#o&Z1yd?-YY%9 zgpz&Lk)2JAf)nsk;@)gL#px`)Y6%e@7UomGG5pPCa>S=&m=mh(f~+nriP2T^YadPV z=r=rM>5hTg0N~joYNQamP1ZH2DLs1KL2uES(8Q0)ZOojSo2HiGnwCGLaJxC*whUgZ zgh1}y0ic_o417mV$p!S%@f0#zcq4_rQ>lZgPYRKpt)CyV7CG|5P3kws70!q-(aKL4 z_8I1WLxH)bh_8JM0eAm2>F1U>(33-bi#KCXi4F=t3EFvZDlZ>7C}Zr^kxahWs_!B& zx!XZ#l26PdVaqzgDip*vO0P1yK2+TjqkzEl9yu-p5v$98iSD1Ea8F_uk&;@uQ%F$V ze$zddQC*>vJ^h-L*1oi`RXXO<6SYkcH;l_x53VJhk_l8*!~J;3bOk1cgjL#>rMW;G z!hqlFz^?AORERnXw18c=6{WylR=?QBh|wlz@_%<0??7vcOlJGSBv0O`aNBnE`98lx zgr_&apO<+E$kfrBW;Z(qX@c39wAL4!TWGHKC&BO=6NM{A9ooGkste24K}}?9LNx>7 zzYu#Ran7iR*hb3ywfr0GV<*r%O?IgOv{w=JGBtY^DJ5?03Ko-+ZW|LQvj{eQXX$p4&<=2;jctMM|2 zPGiJBqDIksrO0!hoL}-A%dN5JhtJ6?^=%(@@hhEX)>UB#E)oLr$`N;c`s@JKab){P zL7(g1Dmp_=ya7W|2eVoff9JQ+aWpZOH>^^UEwfd!R|y9yz-1wPGj z;7@}SRoH2tAWnwKeSh35f%mE0H6@4S9XrS=&o5N`m+M1ZDSd=P)5IsVVQp|@U3U^M zS+D0``1N1w6r0lB7k5)CmlB$czVO)+?*x^|dmR@y9!$3`_E>zoTH;uGrPV$n$-lr> zOsVzhBvgKs%ZX+~1H7>WC&~Ve_^-D(IQ~sBwC2|bdQE@1buA+O-B6K38|Z+(wOVz< zdH# z=g7n1$HeTqMC+)Yq+@ey{{1AMQL5n`y`;2|1_Ml3F?z7{(z}ex$6?J~k^1k%>gT}X zUd&XN?Sq=1TG~xQxwDMb{8Zt1VPaAMUJrF-B@OjV zD2Cu}JI(-CP!z*y(}1bN+H&NCvT23O!tXfQVjN4|$2!|r!?}hM5IR(+y3-6R`Bhwa-@zb{W$RH-5kb||Q{{(El0EksVPS%I z&wK<=q$t{2#iu@F?3l`VQF!8Y;!>V$B61*0#TLl8Yp4k>vr{$heOPFiZ+=jGaQBda z&qal3;Moolf5M%~E$HiA>Dou7vFYDbeN*iAWNyanbU=*gwHd_w{n=f*@FBVcZoJcj zaY9B|EzP^9cUXK)KuE;Rzja%{Ay37RQJ*QUCM@{%CM!H=>I3+{JWTe3mmY!5<3VRk zj(}C~#;B9cDb^uWv-&AabFxrKj?Ic6HQ?stzQpcA;jL`6mt6J31>Z11$MZVwtyLRa zzOnUXQIpiEBb)DR&-(w7Z^L8mnZ(N!tt| zbT+b!yfv2)J2Vn3m>ICOFeM)#qDh?sj@+yj4gbBFeu*|Px-@VDicf-il-Rg{>!ku4 zeM@FgB_z*p-cKVnIe*PoGvjr7?H@&|G$+0hs@*2ou%iUaW~hSdVdGXNA5U; z?i_MS2zW{Xv#!&c-vb5*eH52l$(4_jgPBZQQYoo@k(qD0+{HSBQf^RxI`#x$Ufi+% zv^GG49NxLmI5n`O2B05Ewy)SHdSYH}>eY6C6AMKMmKlx&vrr7_L z{xH8*4?+rR0FjCFDrR=2KropyCCLe-yPKz`aO-)k+IYCeneLdMICBmzr+(o!ti}{~ zJGwuXA!)S(9U0D~Sd-slOy;!6=Z~Z(823(o0B#59mXt?!n+7Kr`BkgOri|ksB>c6R^suo6&yGJS$58Hzp4tiU2*nJx0RT#ex_9pEPGdN+SC>T!d3kolj_mu zgOh>HGfD579GSJXDrmCqzWK*z4#$qxJ{h$7JBB^o<1T!zh933e4!OB$yy94B4HmHu zYB46m^xU++fSvbTrHq{yfOrJM&hl&?(@^`EY8H zHGy@TaI>cTabT-Wt{^4iyG^60ZdrXSReiT@#o+E~eV`f|$1leyBYhbUsdIeBbTB#= zS?3rZ;`Szad;PuFX`^-R+Et(yzS}@px>{=vOa^HL49(VGE{qX8GMsy!P$0t2p0ldk zRjcZhd@d6`-uVEyfy_Bawa2QqonIoD^Du~n4d6YxRO(h$pSvUYHNx$j6E0biA` zdooZ}59HK|LA{=lcX!rm4PZHIi4Gkf(Ec)P23y`tf0a=Qsegpj6!02XfM;F5bE>TT z@U~&pJsQ~Qe9&#%gf0~QW_nWntA6UmSE3b{dr7XN4Vyuu_6@n2QiL;*D0u^Je=sNrH{0Bi& z6Qb3!n?)UbW~kfu_e>jNd#rj8=(WC8zA2)uHJ#BXVU&@FsB`k&zb$>-TB+Q^w{~@s z>$-fV`ZV%c(aMqdL}S3t)17nj*?|5k+qhD6(wAKUF#@R7hvXF=s zSw%DaxHIE();b8OM_cNz(83&}Nwo~xZMJ`DT5r#m8C%~VIef9bb9tTgIH6$cZbAjA zQF!MA#Mc({a?QA;8>lBLA7~FhQd>8N32Az!r$9uVJ@8bAf^#AK+Wz~|sHOrn-8F{#4W?^#TGmN{vN<-dTpP2|9!*QhHkUAXRbi~UEl?sPAl z7p@gLcX*_KuIDDEzHAg@-1FFJ?=Jxxsw zmb2E`!f+)tUzOSd|kp<0##GSpxgH{&>Gdfn3ZurGlmKwM4=Q?Ey zDa!3GmRkc_XE875Rw@H}43=P*;M<803yn?!70hxoiuD~&G@j+Wdle{9774chVhT{c zYc8!!AlOPNnp~b0&(0+D-REogmUKzK0qJ{Ih6#{OfB#bAbaLiUO95 z{|^8;K*qnu9lGOUryBQHM%`9)YyT!QN?j-yZ*BS!pN}^<@LTOex5^#*aBJ$Xzf@XZ zIbnyZfebqiwR|-4OO0Pf-)!4@<*7_rhMtbl|J%)c7Z)yW(Dwem;9ukH3y+lgyL`VE zp_|Mu9$0^~_xRg?*G35OSHd*+a<1Lbr|Pb_!@EqL^&n~1^JUtcOqlgy%)p0EiQBdP zdO1O=H<^2l|L5$`T;B#RJ<;*opr#FG6w0>uXz)+te(za0L*+2%n*`cNY&G%8s(rEA zmQGQ+@5JKW&Dgvdo7-$`wb{09+qP}HHrHmm zHrsaZ`w`~(1oJyHXYTvD{sc&CR+}CgpEO?iF64mj=Bqu=@-HG88wnRNTm=*j&$SF5 z5e4N{HkQ`YexScxWkDFhA%6m8(Gpb7*kl{6ie$B)6|*xuM8_A)&53dTU9JU%rNT|` zwF-ZCmohahwgaoJy|DN4lpi~&_xMx8{hWu+bk$Dd=>pZh#z^Pppj*qDjaT>ROI822 ztU#Rb6jA2rW!hC*^RGIZ?R2!m4j*mPne(uHem4j(;l}eMS<^-aLbbA6ATt~|(dM-a z^B;5tMSgfpZVoRa+CeWrDiedA`O*O0-CKW9EXbq?f7W_H>$EY%^#?3L!m{-bVqj%B zK3J68F>ct121RqxnO+s}?^>LG$5BB44S$AYXKI~b4bKoQB8!cVAnGr~Y)dKV z_Cc7?09?fS{u!BwK8zLG3u~I}_yMxn6fxapNmKhWQ7yD)s^_k7YZrH~=6kq?Tp)i! zSIQ?zdn0SKn()$H@0Ay8V z;rzC@%9iTCRLs_E>3@F)A+507_%m4Ef89iq2P_YfN=T}pw9`{+)0d6f7A-h5QF^U) z42*cj_dm8l7a29$h@<}gCoQ95t>u4wJr5;|NtFiO%vl@sHY@yjVj(7oapa zunyp|Dz|X!O`pQJP{2R;?E!xsXUK?O)G|Bk*qj>VO#YjA{Mu_*hS6;w9OJt zcU6D!k_}pA60p^U9OcYb_i+cjFG5+baT*MQPI&Q|NiJapK9a}EZuevi@T@w|?C}g(*`4cEJi(5%kt#@N3cU~z1gD`*0Jwp%ck}Wv- zkz?TqHN*z`(zcz1-%*zdUu7Ntt8ZYvnt)#ZH$1Z+m)OSf9iiaP1|-YGKW~EyUS{O= z!f-3rR{%^mA+k*$Y6Q!BEcOfabPduMc^=97$;L>PM7*k;kvq&~EHrZA| z;Nu%a-zqM9Y)u0s$w`?%=2#p|qB6nk|<*jsCKQ*OE+c8yL+%i2(eccL$q zi0bfo;{WE08bb5!-h0aNoZ16%-cNrIszSO}4p^SL-~oKD>>5pz@zw(Uo7LLKANOnc z##2f@Q;Cnm@$DQ-ivJ-K)#8sx4i`zwG)FwQKAwhJj_&MF7m@E|rLjoSg@|C5H_wft zmq0kTH4T5F&GqCIkS}!zj;OtFk52RNBA3d_Wc*VaUBH-66Y8$qh)Wrl}S( z1!weRrW3-HC4clZbi1aiqqBAboFHlYmVZ3X1|j7QFP@^`R(@CXM=}86dW@M>iKB9} zj;>3CY+v@hL#Bb}vKiXOX{5Hkx*YWJw!p;F#FKyOuPOH8DSY+bkOe*zp0wv|Tlm99 z2oF1tRq^2)^-`%*@}v_{;JdaRAyAps9yy!t@`7^Ji&pT0@!!PibD$iYD zG_rr2NsNFOoWC2eo^_&fs8$%rtC~{8NAH&YM?Y!?`Xy1nyV)Q0d(d4l60!Y|?Dnrl ziT1B4%HxjuV$P_)q|oWRGv%+1^c|x4%MHqqI{AR>IZ$&`jjamBL(@jk2MC+v?Q7xyt&$XehPYem=k> zF%B0&8i%LuCs4!PPhxhS@I67~gE$L~XpmwX2a-#8Pt)>2J^Y#wbSyKK^}$**(UQww zV3f9NOSI(3*B(s*bzw#m!ab>((7!c8lUVgd@&}2c*9GV1{wfXt7W}#RertwphY^1g z>LgT3m>^M~A+1Vm_6V+LM6RC$`pZ>{Hs^A_QOOe+W7HYb5snov?=_URtEf-##QL*! z&mfpsC11_b(p;0hzA!XqbszZa`_kvhoaElAHdloTNd8V_*>95VC8CUQeZP4nO#&S# zS6-_oq=ui<>Xp4<9GWZ?81wo*Bo%)M)5?3l<+)dC;ug@Tq}qe#-7`(Z)U}9<2|!4F zna_drxg+Z?ONhKrwx`?3%qz8)Da6XttJp7t1s&OQ|1o-R{Wu~212x9Ya}&RMN{pen zv!vcGx<~~R-JHUHKC#E0%o%$4PzXM`Oyme)T5V*Aq}?=?$2;5+qs9-X|aXti(4>XQ*u3&x3*XC$cXi^ zzVGnTT*ScWok~|-sb0Q&cLp_^ull=-!Z{D(tAFE&zOS!rF6f&?GW~a3tAnjDQRA{) zvLCvFcwLdL(AKgX54-bdv0`-_-nfiZl;9*Qa&!I0O zo{vp>G}l*5kwA>MSz7zg;wSpQl~q*6ACMJsZE zH{SmWppOR0O-sysa{7C)C?7u?xBw9O;0ruTFn(DX;Y(sb;A+`np)! ze;+(N{5GC^MqT`n+U#Y&LSa7b;MyKL6CD|xf97&FeEkDR&zIwRG@7o7}8D= z80>U>T+riQiadYlgaG9TAw&EwIMHOqHAC8 zPkC)0m2*@0NL#!@fM_<@KSaR~b{TM-q(qg(LAYk@Df z1gL4t8hn4>OFyqHdIN8asXWX|v9=0$AACs5bYl&@ClyH z7Plnq%|vcU(0U2Dv!aC@nv8ziRTLeFst>cFl|6?xSgJQVhnWA;6P64*vbVL-zHKcW z=T8yXv&!{()-mJ#Y^f=M+mkSKDvQN193;)|#I}?C&Z}?KcaDbA0dT^74gN(PI*;6R zNMwHs2LV=+1Hr{F6$2i|G~yJ?D+>BoJzQ4S_}zZi-y8Q!iRzR4-b8@$T~g-l(LH*b z@g#?&YC7fqovc)UqFG#nzt52O{>6WIOiA`V!^V6)o8s-)beRnlB<%e`xZKGM zu>C6Amp3i8CM=f@4`J&;43cpJ&)Tk$^;O7uot`lOee|K7+KEwyi}G=L{!;jrj4~uv z8)L{oq(ephT5HW&PE%oblJ7V47+?PeE(tRa5>O-9`S>24 z^Eua%kzzfkr~<`4ty67%U3MofEPN9X7cqG9=vyz!4VS@BzhRuOKu>e7PG0woWHL8# za|yt!B2>tJHdvrn&{)UZBCjzGi)A1)z~xFrzPeW>~(M z%b1Hmm$xL(b`Z^8r@Va$J3TI+zG zBM>q=KsFudy{qISQmIjK&%IHnT;i|n${@($GM zLYHvRTYm9*jg4>WYjbQG2}~isY5h`24{d@02YPZspt>YnB9XX>wyHyrqI0CPr^-7TVQ6AoHpT&MQlw?BP3DgpivLvA6T!McnZ-jl;!!D3L z;KgqOk+Q2-9@9@iq>z-(ySl0R+5sNK*9JptKmK^ASnSMHH2_XgpjUl+IpV&_S4Q$Z zL;}`Jv2de)B%Hz^Sy{apbpAL+lUeWTmQGEKPct-tw zinf&d{OCvv3hH)>xP`~l{4)gpPoG@O8}y9wxc2rFoT(T><&S^b)*N_FF7wA@QuFAq+DvH^6Fw$7{W*Ar@xd?>e>R?Deug?( z2TG;_0KuPzCKrE^DYlwy#L2bz;;*L^;j47YzHr8iw_v=W+m4D`q-`x&=0=ZOss@MU zY5vA|JWFCMxXu1?X-2R9t;||wbHLQjzK-!nSiz8;P5T=#WppB|OOV>15X+N{rxejO z5_2I6kmmL6pV5yfFM}?fOMGcUCh|9d>nzkNR*}3Y->QFa-nQ?DN$pq;wnxTUe?izA zun-!NP*{8i-7>tX+}<(zCpa11-9MH~3WbXRRF` zfg@O(P9%S&;fuY)T%0~Aj*N`@Z5PG?`WY1DR@;&AKqg&uwDQQh zLyn2IQA)^v^UPD&5It*<%xZqgOzXfVLfP95n383qhu6`+do}LrsTV!jjJHBGoRIxP zc?HmCE@J6SQ-^kiwWo{}ZM($?BlwO;mw!jfs8fH0Q&D)7at)u2kB}mW{nnUE&@}_m zdBCUVF=T!QQKLbtzG=6Hl=c>aePUD|eX5hu##1Kre~7JgT0-E?anN}!Afr0fo;(K7 zupW-BL~O)#WoOpwQ|u(+&@SOvaM!b5yW-NCX!n5<^%P05LsN+Ta*2y@?bayMYAO!t zV)TEoY>%tgO^O%L&2-H6rS?$+nGQ+?{8iSvO*b5C*r$Oe`!vfK@b3wKG`T8`#=iuz z@4J#JqcVb3+yh2)A0j%I0D&s4=^gl|73%0HmSH-poILgt_;?ALG3erbBX;ggDF-~* zcXkxNXWmrzId(CnkPWJiRl3pm+NM?6f6RYoNrlz%Tv9X}?s@-Y0NCAVDD0z;ui+A3 z7#*)Tes3~5ZA3Sun+My)ZU1x!-Q3=IcaAsfSgL@N^0m}AJcdL%AoxglG$W5muSE-T zSd^u1xkPmmfo#0<2xe*kP+@Z22$etv`mZz`0J3^?bctX%YNiP#y*Ft{> zI&r;tbvEgB_A@71AA5pA)oh@8)poEE>ib0CtVbq}2U;pTKz}daq3{^0_ z>r^mGRJHyZIOw`eRg#=d?IiMj@;VDe=1a;~eY36TNQAjQ_>M46SHt?jj^FpqTYNF_ z$Ow@8kF*%T>O~cD{bn2nqJ-^6|Ky!;xlLmMWw7`G3?AkA5(hNs_wjYf4+MWX^{IeH zj-vf+9jaIr)g=+&5Z^)!4N>{bi6Jkx8ATR@Lc zBI7l5bw2r^6zxUC6IjW($d90)JfR;mDL6&KvZ?C4*p>N6q$xbR%|`nDyf|A7s0-aj z8;!l|!ug7L-smfH!#vCxuKm_<9fyAqqL@rsi3dyMjLY$bG?E*Y7{@)b%#! zeaN}<=Co-v=chS+l;M1nV+djET$}NeInFYlMeG*tf1;}bF54-*c!?}XTALhtyjQk^ zZiF)hT1@fW%PwU0t^1Hb4+i_r_pA{mXmo{TjFv14NSVpc9Zi2aHjbi|bH~0miom2R z$GQG<`A+%Gpio9Uo;C&u@qLKMeJHFdM)tTLCGMgQ5a`26)3<=r;?t~2QE3JG-f z)e;I2M6tGYOXKwZ-q5GI*2pMoGGFhl5_QM4u$(&hzD}wr{G#VRBR5A1{R5yNbxCw} zjt`IB5+IW*s6>B(uQKY=qK=rbr=! zVfPSxB*$ZercJ}k<*ukH;FQASJ-gRE3B16yRP6+frb=_U3o+8-n36Mq(_2@x3ypgI zsj3!^lQ#!=GXXr`zG)VG-=@Hcv7S#k*?5A#&7J%EMy_L z$W+#cU&(*gSk-Lgq!XZzK8^PQ+wf&IZjGmlVq2dd`1W9E_TD-#15Es6CO;&ICNH)V z6y;B#|7n;x|6%0(=BayBNTh!#8QOvRbj~7!MuNEqV4F^T8=7Pe z3#qi;esh|RG;Ng~sP?d--GePHiR*L<`X<55fiq)}5P;DL6GnTqIguV{fY&mn$u{I{ zfqKsb6KR&>wjAr~*`@ttT2JURDhqt@>YNE*u0lHvmB03&;+%TW1bjqh;PoTk0ELMt`#uqSdXAL<8WLaP#TWdg?JZU`hv$!vaNH+E{aKw z22g3i*Xs_s`SY)nYAbUdIBJ#DdVS9KKCP3dFzFY9j%BvY%$yG%en)Lm&n*v{D<9`r zi`PVAto0ur@ZuE7h`Bj@HY-sY!KkF+^Avw94ONK;y66&kGIUYj-nUx8fo2-6x#erU63*=rz|R`f1ASX;b%#hnELVT{ z0#M{KBU+kzEg+JX6lIR0Q>rTT4@bM)_WQMyy~WR2P<*!z5`G-n0AkniSPmlPXQ`i5 zi7OLE)vN=Kd(1HZK-GVDAP1!Wfldv?2NjgHnU=xt2C_<)J^PvRDn%uE>Os9=X_dra zPdw?SU57Xq{UygQ*H7XQcwYdbuF`)rO>^+#putF+P;K}mJfT4tf)mITkf{M35%8nqJ?G-dR=t=) z&(CNaN3X@Bfxjgc%?xrf=JWJQw{qoEnA2>K(`MTCq%b8io&q{q?Zl*UBB{R%WTA?bTWVCx`@v(T44TU zkds?ZdVUY4p77xU3t#oR3xue%i+lZlP@5oq5+|CNC{4;Z6i0iKb9Q2Du!%SYeUoUM z_L6`7Yapv5c^WF=2pwJACTp^%U=MTuJb?R6LWtGNY;KtAQ6}oRf?aLe(#jmrDO#fT zC+nqNDc;PmOqeCeZ1aDj&tmzkcSgjMZD0aj!IqYJX3n%yf8z`0wV{UYi{YPBAYGw) z(626!amFqcX!lLvF#i#IiUH|pL4=ST?i=7h8bH!VjozvUPl?q-Daro5^<)pfXiREJ zw4O7(s2TJRX4l!Th%W7N4nM@fwJ6MRoBer@Hp!R)ew8Jvb3%WqpZfW`sclR{K)(#E zAy!xS8_tJI!FnH-Qs_^bRLNVeXfqTyrEF@+gBurUA|dG5g2pmsw>$hY^@+!JC?hw(&#dg zv#jYXS9A1L_Wp-JObrmHoG0mhK}5gtj2;iVd~la?VB4I!NuG=Uz??akMg#q=cD3G# zT~a(c2g45vc6JNL5+wo2e_Oc^H1_$B4JMEGo1a4% zi29oUVGw^vBs!-Q2xB!L@c#~dQ6(~Hk%@LRI(hn-&6qrC4ND}*L#);9%K4ALlD}JD zF9DF;t&XXPqe0JYeBP}l{Ny+xp#!D8jmc)T z=^pI8r!T1-NwDVwy45>r2;sNhKF=G)B5`-@lR(<%;zn{vZ#S2ct@zmVor$tbrqRukLDH@t<7Ei78`by&!g`MqTdPsldMKlJ`124;y!3VFe+~+ zScRQG{GHE0>-G>zqZ2`74Tma)@C9^){=Tg1qKo6>nfnIb@z+H4dm`63+X2HbV2w%oakzg*NVISI%U#x2sRKmIVp(ao!BJB3{xL{Q z&@SbvP4_ff2nqaZ_2ME|W=xWK9jOKKIMlGfR zDi~lXxeMXH#fsm3Px85c9MLe8b04Kl#T@?;`v-{Ij{q{6?aC^>Lk0HGoLnop=8cGsp=>u2`63$RvncKJ>kCYcek^~U+IC`y zWwczT_Brhe#oxbHD zbN`k0>PbRfFR_8&P~F_!&dl5;T2QpbGIhv-QF=vE;w&!ms63%Ezy30Ki*o;O|2*N&%pmgkjC3ZJPZRNZ6*i$XF zA5NYoTqp&7%B%=|A{h38%Wo!jJ#4>g9+u}MFUfW3U|eF5s{ZsRhsb|aWcfzVtnDvn z7e7D4+>{J_Kr^p?O1}A%sC_r>}u9^4oo*7RDsC^iG;MdHN2$Q4Z0xn57BmlBf!8U4JURQUy%MYefaK< z=Ef&~Qi<94-O7w;kmG+#FX%nyesA{TqD`(uKiJ(gnd`AdBI?uZYuLwy12$4C;!Kfv zGkQY>*C_PFy|{vekpAvc04U}?;vGLv#e?rMkHbajit@*3#BqtW8&inBm}-u(f-c9% zEZ)~)&FG-(-=kLea$kRho$}4rO^&U%e54nr!QD*yZN#a0iIjhw^+%e&7K0`yaK;GN zQWZ74%D+W;6L5u4Dz8!{fJ*l!3kxN5icv)fy6Bg8cN4|p0K;#*<;vaNU`htd5e!+B zEc7+J=>+yp_}rb+4T{(sfh+$Cq3v=sRSx(*D+tr-!o$T~zSRRXHHx|L7WY41ow~6G zE?l^}qktY7bq9ZSc3h`JknT9Axk-#3SYX0rjps~+3&+f5i%5 z<=-p70!Z@E*+NVlY+ms@O=)>^H!Z}@TaR4|-OW(U8Wtn-pg%D(IBmEx`)?Ro*ln_= zXEWuFs}EBAA?m@84NAje3o$jU$+l3FXST*dd$fJ7<6D1V;PNUhQO+^ZJdhVWchE+x zdbD0Z+oKVNs3gvQp*HD1IIhvVzQX{xGLD*yD-w8p17+AwXd>xO>oi}=!c38{c@H=q zZ9=_v8M!Dd2saab0ao`F*N%r!RlUx2AC$!If^FT+V%T=<-*ovOTaXo?%Og|0R$LZa zXFY!!HRFHrz(oI@qV%`O|z5}VOxMHX0RmN`9^?_`c&fHxO%gbqn$-Y;u+5va8r^ z;Sd7dx5(5%-AoLdc2wG@f@KI?K8FS+uIBAJfpNkUkYs>m&$KYEkd!_we(3*uY_;AE z0Vdc8nDO~1{|rpjb6n=uAxNPyK>pQk1{){yAa@8~1^t65Cy>Z_YE%=3vw; zt-*h?E~2$IA^NxdMG=%;9y(46s6h0eA6=jqwua3qtMT<@1w<|l=Z@SSYh42v ziCXMs)a09WUl1UY04f6uUG!BY9dZo?d@7 zOCDn7gVo>&q>jabWV^lk{9`3x;?EhXc@fkW^JhI|KAh=ymU~Kh_Cgy}rdPhz)mYG7 zd?kI@vM030^(FWta5~kT17rn?!wcZZebc|uv5WP=->&?JP%}zQoqm&j-lfEK$O557 zM42M1sFrJVk!>A{(4#Xt3ApRlE31D@;yWS*NTA7Q@2Cen4hQS4XOHemzAC$0p;Hm(8dQxawR3kpE-nZu<|6x(BILT zAP8W>6%83d?+gj*!`j6-^;m8P--`c>j7~ey{z@{G`?{rCyCG;l=6oH{%42^L@WzZC z46XARne?mxcu8E^rb6;1vJ-!|W1(WG6OX|d z*o?wtBxy;Ej|DNmEbYl?J1Y;0qR(T_eo)!c%8d2x@lnCYB@h?BJ%)e1-tIgA9L|Wj zg@t~;qU%xvWfKb$c{CO!f@eW#Z8~bppAw*Fz9bkasU&+5>G!KhOD!vh{-x`>QNGnr z#d+#m(%GgN9am}oa5bQz{8d6^ln6cRy0f+B zBYcZ|706!T#qQ|b|LUf}nUkZs8#i=xEvp(3J-hTTKHIMf^g+OvvQxh)CkIoHk7Lzw ze3+>J@p= zdf~Nv^RKzE{{`)}E%F2O^vD`s-p}sefpo9J*wpIQGo^GCMQ&dc6C0p}6)+)Hf}Aq; z##Gir5WQAEJ5g|hB;$cT&r)oGSY{y|dLat+j$2t|bO^zk+Vs#}Wqc^EML6lb7NW#7{E~xOu06OQ;9V=$$5FXy8%XF-JUW~ z`V?9nek{L{A8450virzWtg8%{+Q+=24?!32|3G1kiYCwPAZX$VO5LX*)6|aJ)7AdT zU{2mkVd|B&E!v{VS%7a=;oRPY!Hf(AAav}S44M3?yi|XVW#-3SU06phK_<4Il~=az zNi*QK1O0<(*@)0%lb_Eu%0DP;-nZGXf}B;M=2bZeIq4DJevK{HsKGVltwK%DIEUC$ zoiYQLCfNz*^fOhZ?8^2X<+CQ?O$7HoG2BW-R zd_^A2yX%&gb3y>BT<4DeTBul>p`WxJmJ$+JnVV?u4qaAifuBq9z;OWq{xH{7g8nW} zOJ%*Op($^Anufv42+ey3Ua|5f1YMwO_TIfIavc-oPuG_Q=a8k6)1AAD7)O{W82(1E zcPD?)$bc)NJR~jDqns>fA0oHxa|4divbSO==OO}44m57L+d`|nHowN$bUXPht26RuDYk=eB=;saaxFgFb;QS%$6_w1@FZO_ObLZr_I} zYcV;)jc2VN^3(CiSmjh+5Wf}R2Qf3Cyk9Gryq*EN9Elwbq5kt&d6RWNH>!6E#6#zd zHL(WpV>|h{EkO?=3E9e7yEl=FH&1MrNki7|g0IJ!`!##q>8kFvLwio18en8srJsL~ z`F$YIE@Dc>vID5i97azK6&X^4p#F6d>Xk>Cn?^Kj^UsVX&4ifGmloQ)pnEHDNUgOz?oY(IDY015ZYba`RDEZw6>o< z&3F+~LS^aFcw{gS8;=MkzRU$Zzq)_aMVmrIZ!OTNCs-UJdpjZ0pZs&|lVPdwnyjX` zRs7ZH1OuTFp>gzLuIT3%6!Cqa3nvn{sAYgzn@!}haQq>xP6;jMbU<(9{vZEXavJD4 z^tFy+!$*#4Ogdw${H->Zl+OVUb>FPN@0#qGzlS_5rWSI@6Ro3 zp!qW4E;?{QpScn7k9?V%7hm8tw}6^0#bh8VY8+>@lx>QA!_6 zdnT%5Z9cC8k|k+V5bgeGwogm@;Y`CU5VOk4U5Jbxz}<>3q$h*!{>l5)8O_og@FtcsXgOwc`KY{APdEP9)&oW_HZ#iu`00 zBdkU5fBql3aaU&;T(^sqZm_uCOK{US@DHi$6zf8^X!V=-YX`$)pJLJp+MPovN9`ILNz@WXoL#4R*+f(hA zJ!{2djmAQ%pyPiVtQ`5;a;_J+31&UFi_M--ID7C=y31H3z%J7gb;!e6|Jf}Y93%yG zxZA>JC99TWA{-X2wNe8E`U=Bemt;=ETKaQS?y9M7Yvc{c= zVHzsDFAb|~kU$)n;u3&Z8^*<5^7HnR5cv?Tdty>sDU5#yyH<%mKvyfJVd*~@CYSyO znG}?DdGmxOykfA7UgF8%sQ>fZTVr%_&na+#yS|A;IWOa z`;l%MV{tSsH5q-d&$D}f@Zw;lMWsmlkUDrDXx}7EV+}5ix%e-Z8aT2UDaevWa+SvLdi71N( zA)JbXj)7!uYI+7$Q{y|1B-KaJd+a%D0eW-kvqL1*)wW|VE}0t8JG_#pp{SJBkuCILe1T zfj3Frno6?FB`|`wCd=y{MLqYwl4(j+GC^OrvmQD3J1PAv4Exr!Sqk_SGmT6VqrXYE zPi8TY&7{KjB=i+mL@iKZ$UzlLOa0%YWJ~FxLNCjq%4X?==pk z8(&v7@{e|sZR50@Nh%BUM2isA5Ws*0K^Ktg4kbmUl*l#)Qys!d#RA*9^E_x`us=`; zIyDf|7uw8QKORtg3S2KsagX2rYgq`>)i#5A9f(_qXKDXb>eUl6BU0K z0BbI)FI6dZd;61C$(8L1?U~CZIcC#e6L9~vf=L5(xTy^&l1HBw^wuh_$3k7K<}-{@ zdQ2RH%b=sZXx}PR5pz#m7toF#Jcy2^o|K>G5CR^)^Cnn}xJiU1)bRG!N|vy9J>qY^ zO!`v@^S~mSZ-Sm9;M9%t`_}++>v?}yJucF%-w0UqaiYoFeJI`17;BuF?xa|y!}qVm zRm?bdDb2BycfcC?U$7xJ8FYy6LSGj~uf%_!loL#psG8Ptp>weq7l00;>ep@&ZkNhugWUVB|`$aH$vB>R!iaOgClpr2>ZSj9>rZ+)MyH%C-tRDn=~qQ;I{!sSDw$k*C|mpi5w*EkgZLz>wQFNURDpQm|G_O|%jk z`ni(xa#ltK_2n#t5E!m8C%$OdISA-#t$zCh>@Nh~(S1g){r>iKdkudYtBm3N|Ig!v0J6}}3e?KIsw*lTREwrQz7TXcS#cL@Gb@rCb`F?<^IBQs7b=H{Q9wnzD( z5JyOV_E1L~6}}cLbYXv9J`j^UbYomqCH`$_z>qpmb=O844F&LBy{nA=>yb%&Rrp8p z_JHwW6!o|tjH)8h9A0Y;40Op&DJH>SS(>^rjM}bwj{iCLL6SjIu=Vd!gDU*_^pK9A8^#|z0S!072vN1x)MBz67wpL-k06-8MP{sw=@JLPt1BBI~{p!Y4N zQp7o%D`njqvR6)jIIp^*(}O!<-q<AyL6nj$zTgM!hdAp9PDxLwJKqAXk&p_{%c7a^H{iQGVP%~x1t6@V2 zNhu~R1L$XUF``WU4eUgIV154P1Muy3In?1RZsN564X9@>Wh4*wSI)7m@~t4ygp35*sQkKTWPZdYh9jKRV5V~~7*ci8(t zWE_B4$(v5hL=w>zIMDy8MJ7ZvR_)Bls^Q2u&*og=6FLTTawB$kUkwys(Sd;@(yuKrky-emMG;W8sR)9~|LIWj3i>3Qs|78}uZ^C@qI6a3;# z;98HBcN%{hpkWm9f&w6V1FBDyd|?$vj4AZ2yGgB?L!Wm`CAlOe_kZd%K_BQ#QgeS0 zMh>hCjxVLL)~=~@sa=P|Z{^U}*{c~+y)|4xpdAJ*zLSjg>Oo-N$-<5S!)`q}3w6k- zTjEU_&WG^9B^p)Z6E9k!sa|(%soy|%)we=~8<~F%WvZ7THL=3R6hIbAI-m58ot`~* z=6ZP;n?Q3T7Fdxgho};~7Q3L)@aO<)&3%i<@bXP9cSk^nk=^f5)(}8c zLmKo0#4Uwy=XX|a2D7Jr-8h)bO;DhCngIEnPkyv<+AXBpU(gGgl<@pO?7{) z(mco)Da#0Ix=ATL{Oe#R^NWk-uro_CZX4sg@D8Q{Tu$paSBT3*=yxiv6{bz+{mp1p>LAF!tDs|< z!EPLBmv)Zu^=G66K#(nw8yq2b|DM z8V>2~mKg2GP1+m|ffAbU0BY^$9^0P?>pwZbKIeCHJ(3j32!TowLD@#?+gj}F6|oWt;=jp-6b|32t$3~+?!e>SW)})t}SCR8O}H~pjNJ?{Q8<7 zqqvfFyNUr=uYAwjd@RbGlOn`G-69=R81c9fw76cQ!~9{@dv-aq+i^gaNqMp5gM zGhB$xTYQY0&_pW8yB%+)&31p4cUokJ;b6w=LN*Tjfqs&&A0NPZ`o3lKCBia4>ekP> z+k%2kCUOy+U4|}mKL95>0`$4M7AYw<+I|QM!?lp~l$5FIF{STC)69D2pzUv(Mcy)* z&984l*O6e8229^MUO4#Yf#elAw(TZb+*^2)l+BUG!8~z3y&HpjmMVXGs1tW)(BY-T;b#Kp_rJWh#l?1^~wJcvv$r$AUgKoiD z6I%1lwke^{^uD6fy!i;#zF@xGi&D=*E*j94sBN2%<*IUj=92^JjA-k{64BypzscmG zcX~*v-4}XIM+-~}L)3pR4|uRu-UM)dsY(M@QJ9~3mzX@cQaD=Or59mN%A%LbelKap zy+)(c8h|dVidnJ4|1Rf#d4bYb1s&68b7s&@k$6^}fos*Mp&bR{HAF25}{zHHCid?BWx%)qFk{ZTu zKek@rWZ^C+XO~VZET6;=O56|s3{N@3{Qu25x87V?$~+zeuED1No*7(jJrf!~HC&sI zkbfu}Yn}dHoJ-9|>GlAf09HV$zdugH^-Q?3l0P#S`Z?r;=Wwb@-abu}BrCkVGykn7 zKJ@EphZ>?D^(0%v^)c*!4kiN&&}mWjO~ICM;2H6eJbg&p&A9Qp?%8dXH}U)FNwP*R z=wEf(OvZ{_Z8vA1jK^Oh8q%T*lHfCFf={KC6m)||Ue5PzS-7)*{S=1%8hmWmRx6t3 zBl5F}+Di6_deNzURu*RWKU+gDz}9aLgXvPVXkY->t_29W%G`K%|v%-dwoL7 zgL%JRK}v*$5_H~)nH*0_StWxQmtp15Wv|<}oP_&t@VORh$mnVYsk-+}43Wk|Bo*f$ z7G$7)k)ISuS1K@nCoQpe411AQb;)h{J~~IqEkP(jBHBy+P_GKQo-5+;JKCS}K<(UN z_LkFw`Sn`aV?|9QFav)lS+>&DLC1Hk-H#af=EwbrTqi)$ItK_UhW*Q0uMj%W@HZzR z31y|UGWR-IcfMb!?Tu{O4)mC;!ugv$+`^4tL@hs5NpMtuogk|DAYP0@#BI}?IfwU8 z+Y_Ga+_qJlC)?1Nd1EaBFks%4|w zjw&u3#{{YEgmgT1`n?m3!uzq84D@(6b2Q*vxQ{L>&BGjNe*U=hT{#T|iJBVoN0g<^ zOp~onQ77obUMk&LZRu>l)irCJ?|k#5I>8K4ng~vRAw)*@6u!R88u}^J*yO}?6@33; z^f&8o_rTF{Ge9&au;4T^M#!pzSez=wh^sJr)wFVXY;?^H5Bi$gnM-@f*=2mZP(}_p z?`f_8TmNbSrZ(3-b|7SIq+MEYgxAO zK2AA*$jM3(G-?Nv;2Ze>U5U`fGSTi%X9cu0UL^b~utFai3JDdkZ; z9?={{3C|Te82wYJW^D<*QH&vpwB8e2f9S`5Wegavqvr95Zy|Y}yPy0^vNfWM zlkgz)E`$~Y?u#VzV}C9kO!wk?3c8yd(>abR&tjcPMIqichC-uzXTB zD$9YqNQ3FfxmI3%(&!!B0UN0Nq`mIouV5M`ULKz}nJJ(T0x)NR&DAQ@bJjEys_Ja; zqZ1d`;+=!*l5yuvPEZPl`dvbHD&j_eN(2$LOTqaW3_v-_N9DvH#oTe|;#8|~-G7u+ ziIX}Sl0W4AYzD)GL9e!Kl9;tGxy6B{D05vBllPVxLGpK%>@d=Nn=8&7ruR}E{&ISP zzXe{Js!;mOdSd$uXyqzryh;;y$N9^F@irsZ8P#RM*-wB0j|$s*03ztVMc@5@VBhm* zI_oPj$^vVF{dR*7SNA*zY>eR13uuBd1G1<3O_;YYM5LMU0nyXQd`!ToV@1>*dQ%bn z`Ce%kSmKf2@`$?ftxhv}wS+(u887IrfrBW`N>jLGgp}IMhyXcb`(l06wSPmexdDPA ze52rozn>wiNVH()IgIFWW@a*fn}JeIZypwth*qpRd}zjepgUIGW!F||#;yuSqX1|-tRm|);V^pCMf znfYkkr?qK~#esporly?RvEGTT{K5Z)_sl>?xzs#Q-r9)DuG9UHJoT(e;v|ZHzY(y) z-2&_*ccisR0Nis}mBa$7di84-il#^Afz zVVL{62sI)mTP2gOWfuWY&k5+AA(5==_&a-YKM1`7|3Y>qOe+n4s(1Y&m$@+MLW^fl zfjGH~!!AKq?@1Nuk=8YkcH{$gg2J~w`MQ;gaG}*Y7kIx^=Okco_AkWPPn4L~8-q^q zmH$mD#K)L(3*x(uc?*(Uq*=1n8_oD9Pd(bO*k^#WjdAEw^2S0cqQ<#Eti6!=8%Q}H zyf37iOktMK>xv!(1jXdoG z^4ld6jU{)#^`femDNmyyD&GPq?CcXNi%5~NbOfd^NtC-TCV?x@fcMvS;mBvrU7@AM zirgQFX#~Z8%Ua0j|H5UVdO+`!NJ;d63%nqQ0xJeM&zYz4Q{Y>&lW?=A*-D_1;fz;< z-BT}xV7GkA(kB-$47d&vfzChvHOD8G_qB+1M$UT~Y5l#j8Ysr0E&b8gsoCqGs}%k1 zq<~V=egr0;Hv*|VgEYk6&wb~dxL9)^yDB36hE$_}A|_#$!V20F{klb3Fn|we%Cosb z-pKEER5wKvYoyW35jqLaH$dJ7cg5}w#mfXedEcEYiBv2{53FIMJ_vh<_(`yV4x@{v z^-C|HF?^5**3tocNNo>=SIU$3`j~;^bX1jzD@kB&8}j|uFrFQm$v&tQfd|F zX7A&F0eX^A1^$3Uz@E8vz7Tw|o_T#R#vS`NV{Jb3UEBnfo0kIU~FMCh0YCGj$l&2|D~eS&BFB%%?^u`tw`Y{e@=}=QA%1Ih+b9^a zO@pc^dtG96YyM5n-BHjjDJi7AFj*}-+XE@eRN>%^v_h@#@e3xHL6w9h5~yU3G?cSZTcr3Q=MtLZ^zOziV8Kqr3pw~LI;GaH! z{)~^~oWtlL(XB*_3E^b_43R6A)~n%$Yk-mGNrBSPT@Mv94W##E84i&%1`auC_#ZoX za6c{2`CXBV1b8loLl@ zIHy`u0Qm;h8s1Mhk6_CiW}*fIfsyfCl&7phA&(h!jA+7Makcqp07T)V6LzpS*) zPE>$i>riv(RyHcT4#98^dx11<)=lZ8>ylFM zZ(M+N@u2D-yh5Vh)lL6{jyExapBUQ)R0-!96=#}Bucky@5FA&#v{A+)V zJHZk z0`0tV12hCqZwdqj5rcJC9(xZt5i#ggJ;dYIUuWqf;X19rj}yqBRzntlU)6Q|KYvo* zIOuKT-0TUymnFYFiXr;mSHR!EgC9KQ_Zr4Ffj=_!qAlHHnQbl?l|i+4AR!<{>U( z{*^c$CCRjIKPRjK{pI@cJ8Cn`5J^8(s`d@-L?=O-RCRt5x-Nu&T`Jwsq7BYCh@kSe z8%()A$@0!hCW;csj1o40ujk_{h~Y%#P$!XFytL)}bMsjuF1FiXy2S(f=tH=RgocoE z*o<77kzuLm`H#4H*Wkw?5>@s>V#4nYW|OR*5-|lW8)E1CZYTn-Fd!3=kCFJxz2UK` z$Hi04)FVPOL?5Sr=x%*~>Y9rP6zB&?WQk7kM|#GL&rogF=yk^P6Vt8c5`3c4uaeOA z`}%Bl9^`WatUsAEQS`cU{DdgRz%OPB#GV44I&d6AHAlth5#cBPX4)0!bm7UVe~_ zNfF;8f}vk?=t?MkAl_h%@gIv2^?CN9Tsw-tX=pZNr}0_1N>#Qjoc?4(?h4F5On>c-{DZ*Iw9lQr zYoNivzGwMC1w_$h5nqk|u-O*vhU0O<`@878bm&ZD?6KjKFzoEZ0s0e*H4T3$m|F4< zN*A{p;}}UEulOhEEE?B+o!1uaEwTF^a%2b3X>Rs^Lc$;|8T(TgfI`zEO3ODazBVXc z^tXi@B9e5M{I_Xby7MO;*=ZZ}j_W$;p&6b$cj@S&G)C*dI6ulMx*ZhCO8)&KFSaQg zZUSe!VT52CYjCXay~C;|Q(fSE65Ht3E}r9MdQTBimHiW5M%LBqJNlEO+~Twq7c%I` z-f{tdDbpwI8|U@&T;`NP2xa5RvavA&Uc~D^wZRG*=gW;HGseri?A#j22zpKnOCVf_ z{QtY`j9{7Auop)>yx~%eY7H2)9VHR5GZT3+=zpi6Bl0v~ri@{ET_1VK@_@&slN;ny zEVX5FoB@xU5|@26idsJcRK#}r}R=qFXdmg(=vuJoS{E}+|h^S*pFa?cnn>a}ifdwyT%C-9?`n9%YaVSsp1WnsBsQ1T_r3Pd--w^cBX+JqEg^CF-Ggw%HbA*KeO^tLUHCe*XDC{8!*!217zPxoZU<^_-W#7YHJ)qP^g~PO@I) zzy3Vd>IE#D{7}PHE3VJiY2&1SVaA8wp8q}W}HL0#q5HvJ(A}Xa(_#&CdP#Oe(@Z0;&j8RwCGYndHn zLS)be*Hb?L@5dYmL0%89#Qw9EV2E3M6%c7|_s37P^t6N{*<-cpQ0TmPkzQoGxED(P z`)f@_0CeeGXrFvzGX(VASzUX_vlfEYAH86CZsKostYT62L@oY*%V6#AXqNroJSOTc zvWz!_fc)Pf*J4Bk2EmZ2Bq3VPCcj-oM2H0WDyNGaOM-|%PwZ~Q_FY-@&W`Y_d!Qrl z8K$3FHOiv>FeN{4L7qT{`9==kZ+O_PfWMAPTg-87pgRv3sdyZOWP7`pbim6$_hj}p z9*i8=H&O(*>qm)yRIqD<&Pj+oOUuS8@9J)(r3F1zL6bl!zu3S`J=(G7i$X)y)P54x z&E!|?74Ck0x1|1j`U05gK4wVz91fAEI)rWsDIY~itxaGk>71)Al1F_*0KHf$)CF!M zTv+VS`A?ou)v)JXgwL4mx~h&A6*N6m@#riO;GDh`NH4%2DdZ zX~YCLSToeRxqaRqMf(o2v^|lLr>aD1!Vg8jyJ#dA7eOaO(!99+_ni2N`rpWtrgk() zE59l)XTr%lds<}eK$Fd()GbSv+gBueU!PcUdZR0UNC5&LR}n##f!*_3c>TrFqX>0- z?@YgIT$`8B!4yhh!R9x00;{zZ z&Uw~<8XAD^H*kPNKZ!nCoKBEmb*w7DM1*Xe6l(AhAQwK2%ogJSpEdFSK>+G+aYR_i zb%J5v8^+#jR{i%=@hkyk;&fRSMJmw4W*TAh$D+5Qg>#>p@N(4L{uu4wP-|20lSyN~ zK4&1<%A_I*cc1(=Oy_2QP9f)@ z48Xy4e&fC?I-z5hRYtkxv@RQ?A{YMlu=9H~4lxelKlG-qyb^9M)CTDocQg=v{r8W^$jAALUU;WLeTgu>~gXRKg19QUhwD}60$pd z1)15AKZv3QrTRgqaoaAIvjL98s_%bS+PFl%d83`=paVT(a$ILpJSW2aSFQ1XGH))n>e^pd$_{^Dr;7UK*@G^tYSxaRM%e6o z&()mi$q9me&@UHVnn_ZgF5LYlCEEuil>dPBgF2C>CDiz)gz^nT1W2AVHF)NH-)^PqL4!>muy5qWt6YcK+4Q@iaG*_aPOGq!#WG@&2`Vs+uE&7dB zL4vyP=FNSg;-&zH#jP4j&9V78;QBgONepp8WtzKnK|`~6fl-Y)&xNd zIyE5k4GGCRx8@)e{iR7UbjjQ4EcaIyi3`D)o$cfr>N-aKk9te>?Z-!|CVK6jD>`6Z z>L)_lJ}IIhrBd{@6&Vl529jide~yFmyogAM?hWV-8dYPezzr+(p#nfBv2gxAB`#`h z#s8fXD>-329g^>U_97X1$t3Ul+x4W5^sIU>5m45%$Wu;Rg=G=dqA-}6t@W#P@-ITs zFWKyK@`5{J&@CyI*;PU;Q!f%1@fz!5JH5H1S-3D{St#x_;k1x;?^&XM6qCY#00M^; zRVp$UvQ20$K*}%*)xEQ#e$P)XCp*3#R@Ot z73Bi6L17;u39}41tmNQ-a)R!KiTfkX{1O^E=by#HvP~{Od)5M@zi@FjEj%AfFXfKk2(IQqNzTL^*o3yX z#rjQyL;3YYa}t(?hsD>Y4N~KDSfWfofrDTO^uLq;*8^^M)G(Z|e!(|&i}=r=AZDi&Vorxxk#bL-5lh zS+7199`qHa73ju)4#idnDmiI6&1?YHb$Jb zpn}nQaVwV5h&u(c6d5otzYdwKej7~1@bE%V;o$q4m9G18mLK;6>*x_*m;iLx7;-Em z_uIQ&%Z0~(ITZ`)Zxc}h+@sj<}0LdeA91UWpu`->5+7R%(W#J9gBQ_8-W-HiuNN{>~C||D3~Vf?XH$#D`Eo=7oRTA* zfN|i)V)CWtds^!~7M6nzM5MlVE!NIUp_r?-IyOZKE$9}}g5sGAwsGVYYIt<{;06YC zU8)9uVk2ZCRv{tD$4`X~$3fB0oxJWp{nXb@-0Ag zP+=!n<&o=|jz|_*#8#@_hJl+#J_?Cp`*Ssaqhv>}t&e=5lwH=)7b*>Ur)GU2fVj0W zccsMlXD`;sHi#>T4arF$oajc^H?7C)iVfwTigoT-BXmo3v>b_wDJpQc(08(d2;l(! zaoInWD^~{%OK;0#0a!O{u@e!tfj(tc|C97fOV?`06&e;>G%L|difeR%qZ3bdq~n{D80ZAOEm#1 zVgZXoM}E1h&pH}AP1Ld=-Edrqd23I9EVi`-O9#+v9YVf}>sU2Oq7Z$(cYGuI{Qmk3J_tG@K##)GEi(|=7wHz8Z;w_`>`I5Sa@$+`g|YCb4I3EYkLuNbk2dRn zL0(6?xOQQUaX@e79cDy-OLYD-YUSaKX@*z()(MKXnLfYFK8{mT8|V{A2WNEIl0Qde zfb?U-ibl>8@=ocrePwk_nwy#$N?^8;Ygk*NB7R~P1z5wR2bmV2dQf7r`4I+*VD)Wz z{8Ok$`8Mj_f9CTi=Ba492n6VVWu75JW0}QN^ROsDr=^{sMv@A6+$rbe-1vH@>YSOIvJj_(fETOPRmwC1()4o@P%n1A9;?g<|BU`^N&DZ z+GxQWU>z(iYituB-|;56#>AFF&|(S^PI0Av(VVsH)urL@x*vKJ7U&Ouh##mEWdZ&z zw+W4NQX~UGtZ`ji*ISBd?J(a~n{_)cNd0=W+d*gA_Ig8;Z%;dJCz?hrSgmoLyAQ&A z;(cHV#M}}tUI(SRUN%PT%8;~^JB79J-*#jHJzSz$XW5ha3*G2iME~pa+fcCaY}P31 znSSHR{t(dZgB2q8Ey`zq)(EN7^Njc*ChG7+2&k^DWv+<0^?c&rmq`M|*=q7O!{GEu zNYwTW>)?Senz}EB!XwHAG)7L$?>Gg*(o&17gRtVh5Ag{J@t`y2&23uZ-)7cHL+bwI z&Z2D?X?V128Cdz@+lp+?^RQxSAFx+0j#U%$r(mws+Q9Yy1dyJ8dsPu4T_+jQ%1}8} zh9rl^;*{@Zwgw`pT#i#xL2nVwvy6ilHHef{IToNUU^&riRkiY&TwfrKmCgdoxLn*1 z5PsK=vGdD(3B+e@7(u%S9Lr(WrrFWO;P2nw<2pkB_~ax&Dl{s)KSEs9eX#@`QcR8# z#)+oO!i23g%1&N?T>7%yIm8+$dn?ucy@o;m2m!@ks!9WhEnN(oC zFpZP*Yx=^#C^bW%kiX>=Bb?Zcc0e<^h=AS{TbNb&opSnXKK_t+`l^Q=ZR)`JPDH*| z=S!UVe)}UIL}A3C!H;k_7a2*2H$c2lhhm$FE1V>M%1V(O=ep4az|cPa@LufS z^>)=7kpcZaPEMmJr&qWx3?F24Y%w&9khbd*5`q&pNv0CzSqw;h1eP6pMIf4fD0hqC zN8|?rhEM$?^2&Ga`34If_NGvU7ndbYc>3D3id!qxRVC@51LYVcYMD_f<(b9YAvPv% zE>lS5$vHQFWqFD>DTRgBurFG164XhXiR;W$K;}V*h7Q1>A^P?A=GHh^kd(=~hNvuc zlb$nl+>8XNbpmI566j{S3Wr>w@qY)bKMFVxE|M?N1Sn)7H)P^`tStT}Bm-9avV@pc z9BLK?Gn#ef1=LUXfSlO(BjvvnY1S!BsV7RwC=WRdnOaa4^{=+J&JOxQJ@@u`f6c;8no*{4|1 zaF5M@gEgS`S(1NO7M*)4*;Q?@3)~7YRc{P=W9=rHs$MU>E}Yr!ACF@_(l9DLY0rUj zG6Q`OXlz;A#<09^X!*lm&BLaw51`Z^?tLpCf#%ix_J_0<{MJHAY=eL18E4-W%s0F=3?^b2>R%gO!(f19l9m6Z*Cc;OjKgs{kLc=;TT4A5`EZgoM0B; z)pD0aHjm6s>pTa#4y2A@;Zbaml;@ZHdcJ)`LxI6ySJi&mjDWIWJu&{Ta*KB2vd}?) zyk-%G3vZ}WRSQADxmCPS)n$g8nvIxwPw{vw5?AFHOuA1~rjhdkUD3@|2=<;T-8mL`OnqG3B8E`mqTXa zo2s{6Og0{)TO29)dN<5v3j{4$fbj(`lEDh^I(~ea7?PP=t*0VtY&3EhReumf%2b0c z=vZb;C1uu=En73!3F8+_C%nMfw33ieG_H2oc8c?%r(rhQ@_{?M#k&qm5y5DGV>AhX zSX1ST)P1cm{6fKG;e>WgBQ=M}GVf)qnK2IK@qg$|4V&oFL-tWQGUV8Yb?hH*52`8V zS6{El7Ln2<=94Mba_sP|R7)x~qC+X(<9&1Q}U^Lkp8X1`*jv{_!9i8XjEuDGj{JdH=83jGqh zFS>lv;byi48YN7qLkz1pYE0(tP4!7R55Ejo9)Bpi$3c?HkxMCo4n=6C3!0OSX7BrV zV)a3kvC)m|1PLRHreqx+z&m9G?i|PSAJ}$ z(!HUBWsMnSb_F8!@26YQ=#d-2korpOG5*0z=ygL-R(5tSHfj;96|@W0Hx4? zEUmW`eUGRfO8X{3R5$g1#6_WO&Ylh|Jr|4kY4B`aP3hWss3#WJ1+m*K_7oNH8^bWx z7~vg8hU@7IjuqSp{A0RC3b-MIEbey(bZ_gfo)qySX!H%%~sb?-I z<8rCd9`s?a8s>DG!unm$TKV-CSh-#M>v<&YM)^5qf34Y)?65M}n>o)}SDaUPFH0;p z0SlETK)at})lo7E{f7K~hky-=y4qx~+|X*YWYmh}K`R?{@TFy7Uz=9E{dWnGJ%^)H zsI`U|CJJ)G1kF}|lK=aUeM1YSu7uYD)Mm{<+UfFbL8)Ot^2qI4XyFSgBvPDbCM*uc z=_#;zb!MADreN&yw+-}G@3b*&Dol4g&y8f5Da<&$&L#`P1%}#bR1XnX#ZtmSHS}kf zpf42Lp)V8)D%l@d|A5Fko!G18>oOMkIO=wGQM(flD(K^X`o@h(^os(A@;cC8u0L=x z6L#CB>*FO>!=aAOn6f5pziu=T%aE)pBe=Op1v+(cKTa_XyS9Yr>QboF0B6Lx3cDY3 zr`V_GbjV&qcl9r%uBD|v8gF92f{5Kg=Oj|xQXSD!9G#0Bhb9Ydwj$JYr}Jq0Y+33* zcYegT+raOCVtE%2B@?@(ASzfg02~g74-$jXJScMOuZMO^Xa$?X zkZuL9SW`sVJ3@}22ku7b$1>ChDFUj=;?(hiFS=NF2IKZf<(96>~GJEx_>_bn9Jm3qRb? zBvZna369eK)i4vbFAgv#3BZioEa>QMTQe$u3s@KJ&kVboyozk5S%$!;F^KlnLfGv& zTV^&O5lvQ+_t03M&dn)T>}Asu*o*y}^MnYOx_mV$eX1;|ki8lA`U*BTolRzMm&Eu4 z`jmNT)HqC(k2rlE%rEpXq%hyFL*dd%9B^Oe>mPX36&7mCUw-u}T^T*L`c;W=+z9A@ z#`BJJNJ?LGrk6d{3QR*2?iyNPlc?d8>N&!P(92{@-E;_l1AjBE3>{)L5KE(L=-tPVmGtGzmG=OKbGzP& zF_a21wm1T)+Z|@7avf9WNACVBSY`Xgk*SGl+q9ct`S1Oo@&{_rN1uXKh@NH7r-D4= z^+45cB$`7|K3VjtRK`Q|?QCZYgiNI)#9TI{v_rOdr{7`~?f{4tNe#9i6w^H_Hlq%`bL3FwH$roe!4dgDsqqq1^=b6=u(g z8D8?k_*0ffzU6c*2J^mmU@KuQqiU7|5`pBL(~rebO~q^jb11Kl!tl<<=2;hX`(Uw^ z2L6pCM^S2_7M5QW`P8gT@-#`iWlPrdSg;i*uF!=Unm z`)e;tHRuknKsEn96NU}p23~sQDidp4Y4Ato+UyXc?n2slX$*VeKxL2LS8IZm#ACK4 z_r&-Zz;h1Y&hf~QKKaF zSyAqC0OMUG0n%bbXsiH#E~nfQ)wLj|I4y*j7t3w3$oeTjd8JAnqnwtoWqkFk)4w3F zCti&`A#lCPu~X1V69M$PwA>pwntmBF13kI=d!OMJx_Yrwq9T4yRli9K$;!pocqFUA zBp6!)ar;mddTF_WFhJq*2)<%D+C7meiy+fy=(31SnT)fuAOt6Ws*Xb3%^&pJ30~ts z$y&}){VX>Y8arN90&^(WsbdtQ|CDw5jmE>ytR5__8>L?G9m$QX-1d+Nn9a(-3zh;4 zkj`Z0L`lzguasEN;p|R|%kHp($$14mDuDjv3oh&2F^9x3X@Xp+c)emIyLn&!b*?IPxVuQGgn^v1cX9VqWpj4tK;T0(lU-}A|&PEf-$ z4A4Lr13uoyJXn`I=JH*D*;LjY+)fIU=LB znDWD6U%)9aILK5Pmi9IGU;opEN~Bq#0Ig%ui>GfS(0&$sz3(VP|3i?ZWi>AK``dC0 zr2MbrIbGK13nD7elYR>a{vlJZeXWr8->@uIL16L1Y|8WsRB?9~mW**`xVE{k;1h(% zTWp3W`%FxKPdM=d7(_)ZPr~=B&&w*?zX?dA;gRXX2AeXNw|m+(PqjXSepZ`e1s2}t z@0&|b{0Ojwo;uRwhEAL%kBOW-a7jM2k=V#8kf`h9Vt*F$P3b)l&<3pS59pabl2bQ} zi25&oY;lIWs7(_$b)i2kg_>vSf$m#u(`ID;O1ln!Ed)IjkV+S-gLEDd>#oRvsbEg- z^YQsTV#A$`wHL{;PFw=6ezzJ~3{d6!5j*!w)QB99uEVUQlylmZ7pZSN*3Hrnm#OXz z^a!OEQDF~6Bs-&LBNIBiew>?LC>r&l(_P{^0i(Z^mI2wzq-8jK#RsbgTyp+af&@mu z8R}wxHfNu{fFD%FdE^bTQ-Upeb#<8ul920d89NN!6?UZkZq=;@J3PPWOt%!dy> z$xCfq>PX~?3kPs**F2IrA<;<%VK2C-csN?U?=fDfEkD+uW@~f-52Ln!M&FE$buzMl z``OVO>5C@^HFUJdgJ+*Gi5;%->Cve z8%~MG&^$S9kWv1*)!k3bryK#uWH$?7`n#B*W^(R3`pJo4Cx+O@Jva)q)~Z=1qk-NO zo2T8K-n^+_IP=Na@}^q(igBW}^zw^;r?Xb1hS7G7^iL*Xs!zTpq7s`$Hphzq1qFb# zr~Zpe81@IwUvdE(8VHByk%T}7X)Pars71_|3rEm@V`Zreb?)gz=Lgj(m#@s0Y^*^$ zcOOMjsDRzz#s*JCzeMTG*@?(P*txJ<5)ExWfX+%q-2jE+Wt`9LIz}7dR49LcIz<@D zwtUzahc*fWdX7M#s5*sI&pLg$O>*5_vLF2=+r&5Lt?Jbv&nc|0{=fRMVC>b$;)ppm zhr`*;>NeN_(fss7qFS~5sp>2+iqIPYetKHSqODHLa9e#-uz1i{n0Pd=--;UdIkdnp zeE&zuk648)$v^F*){4Kh3>f);_i3d_IrwWS@(Z4#lV`igbj*SBusjA#Ry&SQX@9&a zm>XvrSTxlTQm>d=VdkG=S)eBiR);?Zl1d3l$-JZ=3;V4~3o3)dxv}f>g?EJLkZ-oZ zlshqY?y9(&Q=A)KDlQx}0Hc(*C{@H;P-!vnS=RBZ)M0ms7d89+#ImY?MI@rWpkpA# z^1)Kx&CcmH(ByqeVHjd;gtqiQXS-Sd&FXL~n*2c8Rbq+?V{6A*bEBq&{9SDh{DBOl z(^5!28e0WNW;4v{kfGiRkiwoYe^Sn@`Bw*eyFy}z=?D&FvKXnph|XdKOK^%GEcuJ9 z|CjkC2CZwQ+qRDZ6&R&5JaFthF@^*@M9N-cRnrY4~eYf z)O4>af+OfbB%$ttE*Bw64*cx#Wsq1Gm4w~<3^z;;rIc6g${EYD@4nV;=&=$HmV++4 zwsTj>bbwSmze@8V2aNL#c4`X;xKWbMHMTCU9jxBpUk1U)pw|$8hizvVYaTx6xdztM z1j&YWc*qk@%p?^Ve|PhxvnNvgc~&|QAW%hM>3aY5iC#UG9l(Fwc3qpu@=Y0eXg-Q< zpz5w~aXIAWL__)rOGP(x0Uf;^b6cSPHPZ&RR~-CDav@}wpvk+a#^$4q@8o4SKGm&1 zc;hVii!Tq|5Xx_VA#aNdzbSHLbf%4^ zJGn=T~HfXQ|c7jzLC8x8td9btnvcMvmw$li5**Oj~mL7jN&e28hc zv#kEe9Tw=?%8=g3>_Cff(Fto<YTjjxWGt@U`(K;nOv|y9O zwD&5WpwC=GMg@C2>hAZ1LOdx!4uN6_$%5oFg*#zGfF z*erAih0NFa27UWW7yTYY!p8#o@7mLn-v@7g@{Nu<%Mk-#UbI91X!o#XH#e~yyNYR7 zd9BO_>+M8*Vx?FQ%Ywe9Myz?o$7!K_b)bRMN~T4BEV9_ZY$w%2wLKRe@jU^vouK_h z`s4^FIu~`1e|Iu3x&Q_PV?3iJL(HUpc?(a`RlQf)MD{b+ON{)IZNDWV2K|z#ji;Hg z$}iJk-?W1H&195Vva?HH;^;=#k8#vL;kOt-Kdlf4+nNRns-9lfoj{@pttptSw0gU7xV)OM0eeY7b@!}krR z5Mm51ct`M<4A+$fu$0WCNq-ISPrR%P#~70c%hMUwI6^C7;34ViBV2Dt^@ zNWwr7@w;;Ucr6Kow>;E-gCy1a)J(wVG(f#YD;$V;?gaXj*^1=TRkwDn?DfjHe1vhp zdte)o9%FOI`hkssrIvFUfES$-WtbX-7w)&!8TA?jd|7Z#5IPd`@(=dNk7aBwK>xFU zNg;YS{x={aIzS|e2J{2ui?YNZbv&wlIJ-R!2JLmZXUFR-d9&R-<&cx;53OTU<{lHc zVQ%+dLK$#q5_57u*oDtGxgaUp!QF%(;uqQ~9-LgBG9CIhd@BMaTKAx%w-t}{Jyb}_ zDTHbn+=p>GI;7Hw6FUPx$-GV4l)|om{$ff_Lbf>b)>Dc7z^=<-H z&;ho}{p3~N$dE{XxQb+TB6F<;k}ARaMXW)+Cqlfk7OJT5mCV6(B=S1;TpI{3M{1zI)#R@KsLM{tx>pFA^UR{|l9gsV@sQjc>PvQk<01+bP9plbwwy1h=e>~naf zx;PuHz{ArC115uiFt&NC3DVZfM}-=aB`gTr;>C7i_VZRm8#AW>7L>yH4_Z6V^nDiQ^yD4YJIK+x=!w^~Sz+&wY4>uuX zHv^A;-9Fi8uw(Ap$-v5gC2j%Ghhca}jMB_=W5rO8j_PIIG5OS)KBN`RMgE4Wr#6EA za(PSKKxLeV#%0JJn+c0bwX*623BMafgaO6buQNx~qAbaC{9zp9oa@!?i!OR}>|O;twL3 zNOqoAynqPvL3qi3qTJAC0dBnYS>P|Mw^)w2SST=7EF`ogb1L(70O5B^wVnYH6=M2`u*g<+ z<{L#c%)4JGwShQyPVtAJD_;W7jVu)6#_9w4#sloeu7%xyev|8}b>EfPv0N8;J5j$H zPQK)yr0|$RP(Pr;rPVV4{hVAXnM*^#Ji@~M!&QC}5?~9*9zm6gym)6>9BH5{$}75M zpmtpeQM>zceuP*9ei-gs@psUual%r69OaljvwAcb9t=YgX$=<}deT@#g#lQ)Df+$y zqULCr+S;psU2J7`wZgJGduXYKs#{Us2uRS$kb3m%<&BAKL6t9h_nydV69#UM$ns?I zDx5kDhcAEq{{~|PZM|MtI(43_7dMMb0B$qSaNof8FS*X-W0dfH*jWy(hAA$M5q>am zRYK{2Zo-LrYEaeNLiYE!;NpbN&f)(umKT-(1DRoeDZcUbVkF-Vb$G)Ye)9gVE5lv0 zZ}IFHAj2lvOs%xy`F$)h+0viNmeQ5;dfl1bxuz@e$jJ+ORz%`0Wv9wH{!!$fyp%Y~ zc=&x6Ny(N66}+qQBT zyO%t!t1S@%KCL&P?QF$x^wisDErhQvK$k~)w*0DU)Jvj&kL|-tNpXQ*V9{Q!oS77Qst-Rmd9BW0KFHS+ zmC=b;B_u0;$%O_J^rh{uaB8_}_zar4tXCJ`3~iw_WrT=U1tB<`W^253t@$8|xrLe9 zH$*7yb?gDex=UaPO(nL&7~>`;_U~W1$U%o9xad-9IFoztrSy0 z*JUPNXr~+iTR^10HTit0^0Pln)v8)xaa)Vh zTJL8IV4zHvCN#KxO#&$kr9S#&`CZ+S7i!8IPMV5N{4Y1?sq7K_F(b2Q?orKWe^ssB zvGp2urYf<1A&QDu4)S2IP*@1!Lj*4b&AFo)!lh+Q`K>i{S^6T>| zQN!1TmyV|8Q`ty>hVkmG)jil0J$@2&1zT}Ws3N(OM2`p)Pk<1e4LV)ke@^ohOVyQr zBL`PtGZA$DQcs%I`@Q!s}&bjK{UN2drWaBpE%;z`IN@ z0Rbi2v@}LZruA%lev(Q@eZ*lht1?vOpj9j&9mS)JN)+Pa+3>B0eYqnvXiVmTFzH~&T$km|&%ktbB__ULZ)N2>jEcTSstna>? z3D|Db91z*;dBOIeA0XLyB5s{5TG%#p_$@%g@swj_KdyG!Yg<`Af6P|{LtpH2`UIxc zo&kD8|7Q#=l_Fro*g5Nh2kDf&LKwFUk?0Jf?Tde6p2v;(;&WKsAJAhtYMs`YQ?j0+ zCZB_+CahY?DoHkeqEAk3g@!7|euhurELokoar>_6xVQ=GQLdhQ0KVL~1wO@?-s#l( z(LR&n>ve3O15^5`f3^A^mhI71peGp>MxZc_GN`MM$#hL0=Qkc#EsVb*V6TYdQ1V1w z6Z-#3c-dr^g+D{N{)}7jf+%7DdTL5)YFETUKQyF~xv2`kd{UD1rBdI}EWrQbBTR$t z{we-W?`if8;>a=28g|bd+WYH<^4Ek>m`}IKF~wm;XKp@;e>r=D$mE{Vza)Qh-NF%p zwMvKn7vU1kJ!gAZ#%m~=6QMi=rM?ifpHh7#xYD3Qibd(fMYhfv)2PJ~*u(DVQtpnp z#Ps~IXe>%Ieg8^Tyny-5>ft?|1y4^8K)) zXOy#UkUW>NK-Px5-1dY;Y4_{;W*YPb3a#|T7z**zqg&OUBn^!%=%(1<1NUuzbNme> z+~J_ThL~_}6aV1tJQ|!| zuL%YGqpKXvvl07biE)V-*`Hz@0%$uvK7^hP4HRAWnt8O*4GyO#v1vqLm7LzpFJRGu?*2&~ zxzlF#e~rVEw9B>dGcJo?HzRR5+|o6;DW+MwAf_7eU-$nXW!IoxSGPuY?8c34yRmIM zjqRjy+BCLp+qTu%w(X=z<9i?B{7Yu6j~kWCjhtQMxSY{Q$pkeI^Z7pmxM95h2L*$mlXyT%VE+_GtO}j@ zf9zB`V6}cyw5OH({r+v$#1sPpks(_HFXa52@YIc{w|+M0k%l!{=qOy4BWnJ5McA?4 z&_7>uDVDj>30JvR_jfZ zJQdRk+6abKP`a3$${C=2Ex;w^G%((I!G&$R;Nf#5m*@49d#K^AVZ4}d1ReUMeaWxQ z+^nAqO2=0_fN0&g_^M=JgjF+=ne1H=j1D6hpfdt>;3%$CR!9ltr?({KOSMFp%V0vEylN-m z7NRwA0!iB`8V|Oi{VKYqmQRT(yRia*0TnxMn~rag$(B>##~Cg;Gfr{-(%U|2j5Z=2 z<~8Ug2dVIC&!;QlKD|+pmtD)Ff253QcaC+Z9#c+26Bhg7jD`~^>>IjB=emZw&?)ns z&PhOhoYS~I@`M{tnXbwmTmQf0k?}eHL>mfq=_hsKAz#oh2@M@+q|s=)mwFrRn!AHU zcQ-XKJ=SttN3Hl6O?VHy}5k4|n$y>KOuU zt2TpUL79)}MUy`mP;@!pp|t)J)CvoRCco`~D^^q`A`aW*KBs8>NhdPNA5>1mPk)B6f4SbyGZthY=`sI2kDK$zZU@;@nG7ZNGgs&U)k_}4j=1zC zNBqB?h&pITwwopz{i$*U2wRa;Bi})%A_7BW7~jLZXvZM;#i8649psI3X?0EoorDY+ zcv(%%8zDl~#ABA!`O((wq~biHwSmim6wc6|ah|N}-X(p-4Ew!*e|D^7mEf5;QH_+? zM4&Icf?elOj(j#7ramr-5weLjJm>AA#+H%cjyzUo8_rR*XDLMPD+_@8KqWD)Qol_@ z0EvP;^!U`PZdR}MkmGp5Ct<~&irKYrwm<|)FA@!O!(@2D>e_4{X2A=Q+hd%Y$ax|ai4jqRA6Kvu1?Zd40x)>I1*sBMF6g6*u8y@ZavF%Z zWia|#dpv)}1p}1M{AhWi%DDXqi*-Y>1FEBeS0gFco5o*he*{dy4aGrcZ|$2mcuyX+ zLNnw2P=}cb;?6GfJ_Wu-c(Lsd)ge)%{bTvNWmFu!9Oqf+SV4!R2SjW9EqKMzkYr%o z<8J};Gjj%TMKceg*OB$biB{P_SEAO%Y_CwA^7t|8)^po2K9{_yIP`HcqvS+B?$)48 z>pgv^G-Lgde{=e`G+ZHzWU7=3=-<;tK|UnyL5+Favp2vx;w0E0Ub#RKb|uA(wTB12 zzO+`zs4F@Z7U>H**K5%L;+wNAWt28m5G{s<{7>pM;g7`t9dZ^v|GKRHZxK;q+LS;w zv$XkZxH3uN&CgW@S5eUn4UGAW*6ac=m>0*rLeNzaf35F#c$%||SCa`(wwz>0oEc^e zld+U}T9~*STz{@aeoWA%9e9UbK>htyvr+EFs{oLkO8VM8=(q{aqB*J4m5=xQ2>0~$ zZ75Drim4+X06p%mDun`777UZ=6I~8Bay$wPFLL&4(6uFIi-Q=0>o0VKitLBcKz;Lv za&XQoe>xRCV&HC+?K-z3lspl#DNiQS4>oIU9Q&e~1aJP-k#4w25p>b7p=(NZ4VS~? zxOnEf)W@t-wI3b)Ktoi&{K{CYUM0RE!ZFrD?$!%@kfP9ZO(Yv&!t=F)IHPKlb#cpSAN+uDiU(Jx2-p=h1xtUaJ#5^1U><9#Z(e_+L51h|N3Z+*I5z;^8)B^c1@VvudE zu>(9Y@@TTYLOtJwwP~QDJ#+x+4Y{X_M)hI>PAiJ$8`78LD;ty72NqA6YnS%=&vif>e|+RGaj+%1h|*)N)0RBO>Z<1 z+z!as=(0f1p)Y88X|Gu5KUCj1u->%Qe+a^}WRApZq2O>UyJuX(z$9hy)&ue*xD={* zLJ^{Ugf@WWQX9EyUv+KRP93L!5<{)4Kd}*P;16{bH)(+NAqTAx2 zrj%bi`u8*SD^aOJc=C<_`k&!ZzIUUOm~JFZ6Iz#VWa3*QuNVb%;V*CL;7L88hgAF= z|2K)#N_*$3E2S|#QLr4h&$I6%f903TSenI4G6z*A#Net}X#(LadXy$5Px1)da-G8F zt9dSO?!r}dE3dR|$Bs@hVB%)r3gXHxCt2E}uL ze3tSeh{@dSyv50~;jWLYs^$?Ef2Sm@H_vqg zyG>1J1wBSOaPAxk@~G}|0Hcugk`bD~&~@x>!&w2ikiWH}8K{;#;f5Y`)eVa| z(T%&Uf6th*-kg$TY6sno>$j7}_!D2PAHR}y-zK(RpHSDid^9tOk53*fdumDEhsCxN zR)|Y~c}-hQ(HW(b`SlkbJ%&*$dee>H#EFF_~RwrRxZ zBr>D3gL3&=&&VP%>t=c}ReLrFb{;|dvD@Z+N=@NuD}sU!*Y|C|ve zQL}Qer_YRrhC=MUfBcd9!%vRGa#jFzS^p74yIL^_IHJu&Zx~dH6>#Xh_`bpOd(#qw z{J0$Smy51oL~=BVwTCBxo9jzpKmz4_mXKm5O`<^iRS}Xnk9=JSQzo_JY^BPvm4a zm02xXk$l7)4Txa}sqg=|*v$s?SWY-I9N~zgZhUgt z^|kyoLsbvx$)GcPiJVhG7{=i9e&3(&h4cI^e}c`nTV#^~yV0e}J|mPF1Q~EAZ%0}; zb#rldlusRJUm&?wr|VthR$721!}-^#PRl3XDH7I)HX1ZI?%;Pa(8tsoE7Z%SKUK`n zP`L$9!R)@FtncR{(6!JBQlv{iI;U3lJ>27s8U|aO8)QSBQh#d#>Hy4&wEpm;dmvds zfBma@P0@H|ctYAGvUBiiDVR6t1Y5y-Rz%lmKw>B}DNW9*rvRcNr+l=~(o|S|a2Cst zaceagr;7`hpT7c7c ziIgck3)FFI8)CojUu&&pQH!>7F5%FBW)I3+-h$5F@@72RV*J|Ptei3#xn(jw(BR_Y zea2JT`?!lmpHeG$j}({~ZA~VK8>1m^*F_`*5_cC|q3hCBA9<4W;|HjX=6|>Kf0|7b z|5&*>;~7%|y&I#(|(fI2o1CJQPBilNH(o$_$Tu%0Es|% zY1{VQLfht=e2?)`Wok!`27Up%e`;C5Jz2lVCE_=7gTlcBD=To%t|qIdU64^v0yH>N zTHq6+WJ8!lpTFljLXWpgK&Ri5C!6QwFGTRAbnie}(iw*WMON zDI2QoW1BZA;wZP}3c{hY$+0{yR!aoA$)Shl3M0yngpKIJJA2ApobpH3{pAV#vL6{}`&H+aT?u2%7hQfsQo$@x}_;sM;>O5vee~IVYGm9xBo)hua ziU{4(X8p``3tCAOefol#Tg#aQS}DwpHY# zkEZA|ih$zwX;60S%M|D}ORLMLs(-3st~|b5mZcqoYiJwJyIC=07h0-Y&kB=f5b=(B zQu;;0gn>N?OlU?Ye*mqWpZZ50bwgM=zpPq3kq9l6PDPOMcu&1@i=DU#Kv$yrN$iWF z)2G0_~%r-vp)=w`FO~Q-W_UR>+UtlWxgqZo<-m!8Q}Xn-|4ui z7!P_0U;(Y}YD@P3!7SIHe1I9YO#)d>M}f9FBt|YF{<=ycWlwJN(^?J3w^Jc%l9y5d z0>GS4AG>w`f7^wTkV4dGtxPidcPEXR0pjYjt7YpOH0WEHSon~#>9Xqm0RO^H@ruq& z2MxQ1PRFB?d@ND72<5h9jbe*!KAOO^gwxNk&F>B00V>p&DH}B*jV+i@hfHnFt&&fY ziW`%h#IWvX9E7i+`<*J#+LhAJ{X%luJQUSdU|aote+Y~EP$ACjIl4T`H(j5g-bEv( z;gw6dRWm{!F{ycgTtg$Q(Pg9#V@+q5ig4ZAgAw`(_8(AW&*1eF_Bza<(@p10T8Fdl zI#Kaw*%xTII9O|gEK~FKr`Nxh9H*Uf{QG9CWLZd*rdH!nr6~&I7=albTs9YM+@*%| z^%6vOe+p_2QOS8&*PoGYIN$yl5`s<}H{7zM@5{_C#=}s@fXy$r?6xoe5Ns^P$;|*G za4uZ_*2U-b(IWTPE_Vm<2@R5V4wy><6UnmujxDqH?*Cgjd-kS2M}(HE2WKMx@czpY z=vfgBy@5Z`Y@&}B8TxoTtnup3mkI-sbG>5wf9l|CU^)M?QVw`@i}ud)*1xQd?cqjY z1JCPf>3*m2_?HpGR5J^vf_37s{-Rqb9x<~G!7vG}pf9eWIC0kKT*1~FiBYRDWci|; zyRf8L{Caqf(=B{0Hw5->vuKJDbXdQ|4?G4wo5TaC>p5m$>dmHli;WIsPO!tfzhx5R zfB#8=qf0WYJYxfWOiiS_%0BU})Mtt)P{(eDjm<|*k*DINJ;0ooI}!2aKb}jMr+mEf z?9GmY((W1?o(DW$X5zQ~T%1!RtIAHR@YsEJJ!cr56~Yl7sgVC-fbMsS=rj5&dZD9m zHpW1w9TPDG9vHLtniw%>+{8t?oFn;~d!_5}eWejw{zA zEwzqvIt}$5;g6X=~GVcQb0bWx)z1N(wyip}TmI5jj& zIG6IlL@{sw_`%y?yi?R>%zrXXf3=jeR2cX=m;vHZdK%~mmecg;o1@8pk^NTXZj3dw zoCY5_D4=jqK_6ji|G=giNoR}{4P?@F~IpHb>L?qw}4GWc_%m9 z{-T0&ARFfjfY|P6Y90UYIvDA3*0w_Hr)&2YadmftyC!lKU$R%w+1m=Me`!xw$Zzu~ z8+`i7{cVmA58R3@+offdB2Y;i-tx(7iyprc*uC>R9dK;Part|IpRp48TAd^2;!sdT zqY7%mf(cA5kY^cJRR3rx5mP~zz~*~&7vbJ+MRbwj+!uJ&$#nAeQfJTOR*T(-rj*o+ zm=tdAjzjoh2$OhUAZ(kwI;oz)|vC&x#RA*;K=?Nh^aF4 zz%fS~aM}Q`XnhnjFG)`wZBuc&E|~?5=Q=1J<$TRO;cn#z;E#tT#|muA4Rp+xA^4Mt z`@`Q5iD@Jm^y|K2`=r~-pexFa+DXBfukvCO7$>W8P!+gze^@S*|KN{oi4%S3yH-pV zI8zOm{C138Sn0TB7r`3?#<1-%x?U0-DQ(}0TyWN5tp%sP?-%v$Xlh!{%B_Pg-Y>+I z#b`8u4(RgCMZ9q}Kj0;-{ORkPc}>RWvqY=Y;~(;H{Vn(j9T}w=jc-6R7z{vy5eSW) z-0><5d6g63e|`023g|r`(8|KF=SlT+N(Mb=&zEZ7$sFuP5$^*1Pl&JH`zL)+F%-@BtfJL0;bJZdFrweogb%gLa8CDZ^tc|RQRKnBA5Go(efmYm7E z)ukt=BxfZ1$4s?qOxakk^{K3_Cb?RNkV7nn_kJ0~eg~%2JU7tRwM3^C&e!3_?T@70 z5{A>ke{v2^8}fw92|@3;PNIMGpo5=3(V*bQA9@JRvuUdPcSgMR)*R%$JY%^r=ZN>` zQjgc2CpgY`6_UxF4uD>sHvRi*t3Y+zyNNuO4Z6`SY&$2eSditc->o6&YYSQMWQwsC1>TNm-~p|RRH zJsm(*caak=e>Tgn2kCo|?6)8L9Zzy5wDIgeADU{Sd_iaSLNdm?E68-$33>4sGaRSZ z3*Bjrtk@U)l5+96p2U}kcp3-|Rl_)sf7FyPza(HB_yIU-uTobv+_hAY;h3DVT>sYH z^@b6Xm=~N*hmuKxKrec%wD(CfBzEFjY-}#MF>5?j5* zL%PkWY5C|lk&&?gB!5**p*wuI%ro&BTX+a#?r?9t3jV>EKl3X-W8ew&!qx((f1s}9 zmm|geU40Go503xLIu|#gv7&@e!oUcV= z3`OMp%7y#69h3agq@az2%t_cDe;)vf?urvC@cbk;h&G2xO~F@tHRk{$NIl^p;S%Ku zIMBz`%Cns{VK=wV9WPAjhxdMS?rBS2)H1>WJRdF@ zz&FD9iIpDnT9r}1@OncTZg#z#xNI+JLfMOSsMf4SwsooCiO%xhy4Jn|lVK1xsTP3_7t*fL1nbX^Z5Tth z?aum3(89N`V4tA(!o(Z!wq%BtXpi5%bd}4qF?`@LVAy7OFddZrGq_TrX5Z11L;4qI z5aX>HW++>1ivvXCMwK@|e_0@MAiKTw(L_Pxm?W$~!n5+~b&)zrFn~Uo``esqPMfbM z`BJ|mndz$4v;FpXwZF5O%Vy*;tz&M3B4HC`nPu&j^gO8Nv9VoM2DsBvg(F?QEb8_S z^s>?q5~5ZxUJEkTrbkfgO+3ngZY?X9sww%U<2RY?O|1uToC=TGe|fMc>;@wJ>_n!f z$-}5T#juX)X1VB!HaTtR|5`c(hy`$(5)l0Pn$~luZu?#9H>NZqOHYaFEY55?)Y%#6 zA&&lQ{}!-yN2qVh9j}pl$iDx35I31st6$N1F4A_tG|Cw-iJ0k;yHLi7 zEW+Osr_9_X5wDOMe`tsi*-EakP3fc!cgV5{JRd^@{j4@47wQ6IF(do9o@zA2j9hJ} zj9OVgG0deR0&^onhH|&i_7P4A)h&ZziOdIYhXxEU#9vqO7dyh{KccNrZ4M3Pv<__N z2gW=OX{?YFKzCdR5shti?^rQoJ8;klUcCz~uU%ebBp!Wee_N>9(tnW~j?tR``})U$ z(7g>Y$rna374T>|18X3y{<(F~tA*e2gd<3VW0_y8aQuzBdKM}P^je2fo5PrER5kH7 zxb2tWzXSyz@fbC$14N6a9WmT>8+@%vb)-!6CZslM%?MP{gZk@$2Wo1K^pPvwO(P`o z_C8ip5_YUm`{|B(eW#1aM)u z;yeq0Ni6oZMJvJpm1G!j`#Rgei6r6gP5FzUpVf^tfp~=Vk+A`rHBel7bWH|yHWv*Q zDPMV!;GafMJPyE7!Q54TN7`%Cs+@K+J;SQf-HA@pwk<@h5RnD95~P zwFiCbCm{ZnDcGU6_a#_=xqd*jkNrE!C$>ur%z4i;_Kf}HAJAu~W}Gf06rMCe0_L7& z8l!z9XHRT*Ws+VzZ#ZnJ&1{)a-<3%+Db9qif1dhW&u!8p00fO!g0XIe(+F)Tf{Q{% zI;v5l&&kb1K0l4#+Ni=j=t|Uv3yjjwR@B^!0e2!pqzKsFRMlT7$nEA^x7j`ivjZsw zs43p}KDKiUUg3-<{bayU#^p*H@M!$`#{@%6^6IBylk33k*}U^q)Na+|An2b{a`j<% ze^kLAHT2_(kT@P{hl7-s5qs}MUZN$&=-D_In>ND30e!zl)Z+lHtFo-uT%e;_pkrDn zK44nDh0X|mrG}`VR;B8}p_(;S09MKjbg^LM$eiZ|>`YO9Zu-pcidr9a0&CJj~_o!H%`K1Gft3c9uXO} zY!(`H_fLUxSRRxR5#noMBkvv-nUpQ0S+)+?(NEEn=QtE*KZKBotE1!_MU#1qiI0@? zmRNx38d9J~b7zJVp9@Zu#MnN5Z{@x{IG==!M2!W;0rX_SAeO*8iJ4UNc;;kAe`U;| zZxLb()~I?0&=b|yE*3wM@Xi*KUX3M&(_E!(DvcUx6#>4&8(lVK-kO^g^+|6{dR(|4 z2rTMgx%&auA7N8Tx}b{%GiYH}jCQ_?S5DW4K0Q~d2nTnGrl7g2X%{1h6Ll;I>;xVx z?Fqi^*p*{v&K{`&*AcAobs<-Fe?hR&U)Gm>Fw?JDYA1Di^sCqEnQ|hwLC0Lk0DRlb z(ffLJ$%_qT92@zuY-4~i|pU-$j6?=wsNZJdC;lDMta%ArL*yq5Q-4$YWmh_rJ zkb9K~Rtm?cwI0n!(-Gi%v)POZ_XIlm;_~$@-NrB5q*}yLbI}DWw95pnf0h9o2>4TO zP73@zzr-jEzOFVI*z(h4+F7N|1eDE28s^Jw2quseT&D)reT9x~nSg6E`Ydg`j>8dC z&`DIX^e<>i^Xf+W)qd%j(*bicHCZx6nzCfgtsG=B6R zZ!_IQ3+6hkLd%Xpm{JPB=wtf3;Hrpr9S4sNFaj3&UmEt>pL(4P`~M~yuc27FT>jwf zk<(ANyTNJ90ey|@hxbuTf%_!Bz<59XmJg;ZGdkm}d2~`Ur%2ouf6X-AO&~6z3SImc z?4F@`pu35w8~An^tg(>s7145tfQjl?cwhM25UW^6ng+i`{zcpa=-I~p90fPA9-Hh_ zOnpCxDRzu#LeT5bH}e=cWclezzn~6^)@^E!p6k#k^TZ~LSEnohV14j{Ci{P24wer- zeH7pCjw_5Q(480ifBG#;Vuq7IFKlheh!0!u72jQ|aQR_qWe?9NTUgbf8FdiS_Wnxa*Beh)>L2s zN~6PzqP*_WCi^bf$OY%y`p#;B>MUY&pI4wzF$U;obsF^U-%;#lXA$*Iq|Zs-h0ibH z=*S9Cdg0N9JeAb7*WV^cv%&rRSBhw>)`?iufexsFG0&8*3|hMMgH5x!Z(L<*xeY}= zy->M+7C!Bue|u9y5ftv~J$CCSroR<5wY*Lx-a=pIKjNBHA(|-z@XBQs@;JX-o%{%_SwS@z^)jL#=-4$EkE9j?3 z4n~;R%lom_>(-Q$$t!E|uPre*!%F19(M1+=V1eA-SGzS7H2| z$hiViKiV5FxkJ-IEt!iG*GOfD-eYfcULTv%BkFk*?%g|c6r}T&a3AZ+h zS$5@$`a!*dazgDNm1`-BNHi)EE)VS!TnB(`f7!lr|IHmn+?iqBt;XjXUy(#2O#PAX zH_W$~rc}^%AazHOZvxm>23oRXWD;cv;1@2PD zKZm3>M5ZF4L>)+yEja~!aUJ{glFqz(=SNGtp3*SGZCuopg2SbU?Kg}9chD>CoBhEV z!9DHLEMK0Au64N-w*{!Y@u0~lsQ#{-f4U!DV^1Y10{ry?SO;hP7KXl8eF6RCO7Mqv zfoEG`dN0_)OeHZP|AEdiZ;`B%+ae@oXV}gdTzIi45riU9j4O*g;-Azs4%jU#sEFmN zfn8O47Pl{Fw+`X*aONx1mqn1Oj6`ykfDZOxFGDyR%Ge$5kVbFOt|$5uBpDsHf2{Md zr>ar+LObRL3<~uKBI>!Utdre-E=5QH|7J!WzZU3)o-IjS;7$e^W^pmx`WZxOnX}QH zZrp>;`UGhxfd@^9{9 zX@`+nOj_vgQm0WplCgZvM9;Ism&|zMIgWhJHOfwtasUl|f9@UoAES2|9~j?K8!FhL zd;bX_ZPC{kC~biNdd^-=J5`G1%`}PFliA-n-J_f>^4Qu~aP9f@dBHCPN=dQ~yd0x>GY?A^7`7 zZ`xCHDkd{b8&eRk??VRfv9ifoM4SjutO?*e`_cq%@+l9^5xGrKy8`=elSw_?PSdhvvK&2(lXp|?T1hP zZU!(aTS8r~6@5(WHEh6Oa`%;52T(C+C5QcvYj)}W72gJV0PRbts$S)$H@p&~r-?*E z4(JiX-zreMlmCspy}NSJwLCbxqH5dpMpF36lisx^CXt8D1SB_Ve?5iD+wj4!TLoF$ z0lLAgRfVykkYKOA3t8yxV2WrPpW!^1VLM5ca^dTsA0QzZ(r)8O>RaZIXwG_eVHF%j zq>K>&8%=BKF{^Qot|Ms)DPQL3g+CHMrs{D$r4QiU)630$g!f_fTSS}v!o6z|L1Ax( zcKaXIgpRbzJ?NBTe=3s`hC7p&f=gK*j%46yz-epaaKEldBNh}wTp~6ykr;S*Xq$md z2CfOKA=cz0BH%?}R4Q|+165#GW&xvz-P!ipJLmHDnuje~wZ>Q_47woH{jD-Z^}5|F zEBoK4!f0rEVcP+3UeMvT&7-D_-g8@(AhN(+XLqsTpR0dXe^YKFz~bSD;W9@p3x@g# zCy&gbXUD_zUD{%d;`?ia3#J(8M`oK9QIq#^b=v}WQY$#I@@-m5l=TwFt}zc@pQ|hd za%-7%&}82c*=dM{6}+%dd@k_UiWV6|htx>va*G<`i(eJxgWKQmCk~yTS`HW!E}fts znV$n|tHcY0e*+H#DWcb!cw-%6%o5+zOHpTTO4jn|t`z1pRDOLeKIQCj% zK0g;jF)uyk=(p`6O#}@mqUvYTOX5RWFIy3Z@BW7#a}4TX6}Vad37e$mr3(AirK>od zG%CZrbi}m24EpNp?N$$nnMc>5p>OmB7KIf+a3fkFe|)WsDNFKnn#uT#!St0M#VqXJ zJ0iQwhDs9X;{6cow;Az~yfBM+PPT$VPE9k?u-X*4c`34RD?#QC&HG@}i#8AUqQj>d zY$fje9yK77SZBbKBGhANe6K2S1`)%6qQb0OeRcjf{R=Yg575<3xnh$#yw1P+#CC7RF2ojKe zCcj%+V($Y3wc)HOkdW?wRe`?L#N673CNz`RDilyq(J`r_HFi03Ag)^?FK++EYV3(gV;HCthqk7gIive`b8sbrsQ3HUQ#>RH~QZYKzC}E%vxA} zJ;8M)wU~zklizix)5Jr`qygLbnTqnDhV_yc>6fgBDti!ii50C@`7I<5NF%D`7mSx` z@K)NIPucCJT4A(jG=$cjMWI}Hf9U=F0{SmjXipcuWA=!v!KS!W!gHaDu{gVy9r{_V zhbTczkMn|*{(Yqlo#L@iIoDLu_?iZYM0elu5SyJkhiKeHZL&-bXqEKG%4GZdInl<; zh)oANpeJ%x*bT!kbvsnYTY0FGFC^8pH3~F)s6;_A1K@&y-egQ9xHkq(Z?C&K{{x z7z6*(LuGj=dV4it3l8$Yi1id#!3Or=6PcZgnGx!ZWDjuBvlMQ(e*r932wsN7G@8!0 zI%OueqaQ#0@6o8N^iMuYnt{(3(2FdJAIP_J!3HromeMlf!XUmfI2f$)P4Y39{8X{C zQU3~Ntc!qjZ({sSd=@OzTb`H$;5)emu9xMy%9=v*7Y}*hMK5j&wOJmtk0n44 zB5CMiy6i02!PZ@fe@EzTqsl5-h9KnLrVcI~wW^PLcJiy@9Fwh}^L&u^9;|Gj!>9nJ zbALf0^eu|}VCGZweYDFRTT9je8D2E&HH-c{06qleP%pYd5-U zK=_h#Ng#+A+f-#1YHH)B+I72R0aMAS{Vqf41k_s_b1WyZe{ntkd^L`oj2sHkgd3Mi zYe5b?>bHCL;08U6RUaX8i^zB)26Odq@;<>%CMiJ~9txXo{57h&FQW^&$kwe9DI1$6 ziJ8Z{1U}*Q(uRD=}7IT?EBZB?ruK8elzN`WgyT|WpNvlOAng&$q? zl86?{cX>3df6*>Gcx2yVtY}x8Cw4wC;RgOO=751^I6wCAL`whw`bDxUit;?$rbb?v z(@FC0!}UqwwzH~(syt-sD;3aX&Y>L5JcLG-Qaw#_9=Ee)oOp?h`DrE4i{`w16Phv( zv#|uo5mKI=JaE2Zg1^7eVF59PVpv=%_ir*J6I`xTe{g1hIbdZ?EYu_uqWBe4R`@_4 zQ|;a1?-J7YSJ~G6Rz9}x*|pJq8$7RN?2$rvE!yM%PW;92)!}M0YTYf6s8v(42LM@n z6+%42HLNP)!L_8&UqQcnZ15_6?Zi`iK6_s1fqqsGPaB@ZJt__a*3WAwPXwkoltrxP z)8g1Xe<39m2f3DrW2+4fAW9@sJ$_3)*OWj9sw0B4UoF4*{ucLOtx%ik=Jbz3IMWsiIs*prTv5Vg4LD%=UmSN z-Vuw4al;bg!FbZMHy_ucg>6)(dTfIxhk%428F#jg1^#_SNj-e?U>M1Bn)p_!Kq8&& zxrZ2~RnWg2Q_M@L{vRlENAhL6D3nwHFQ?cx2Zw2ty%pM{SGF8FH$dohfMfKt$lPeUo@TL_0D!%2&2IJCd(r2#LX=Y z{W}_WQ;|#jy^$Br31CZ>5u5dOr*DyD{7W9yHzAm8B`>I3(QNHnyC1k&pj*pYGX^Gq z#d;pOt}mC2xwG<^ znVG)rv#|lX!eRKlJ9Vd_Inb_r;exrK2MXo)Bu}DdN4om{WZf3z#Tf&8#nu8}jA4-H zs1fq&*&2TEkW-t-^ouI-5`19u`)UcSTdh~U+5*h4G4E!avdrt9Y)x52R-(%GNhHJ( zhM*V3RqN+d_q7q;@sVp&e`7nezo5B(Vq>W)VK2|EqE~z(FWWp2&B!FLe7(C3!<)5_ zHwB1Gp_DPWe$!>M>5Gggq#Yo1-4uNKWnr)x3BxefgU*4J)e)Eca_k%L#R^AT_426S zL`a>{Qi%3INbmrr>LO#$@@`7`bd~nys_}^>VgN;eso!W9g@e`7e}nJ5x?K4d3h$Hz zQNDHS{5yk46@kAe=nJo_3)`S=vqeUJjqB;8e-YZ{u)0Pa;12cFt?D(zic9aHA9V;w;E=p7C96ifj--IT{Ha1^f~=OEH%-%hM;>wOJ54SM5q^O6+pa$6(X zXZLH@1ob&T93EQSe_uuSu0=kX5d%j(acQ9k^9?G7EpLc^2gId|2H@xK+r1-P(j;#k z-OVGh!Am9+LlX6RlJZTC_S83=BH?3zesu5?63M5j0y>Bh}!iRc;Dz~1v<8vnFMrsE`~_Vn*Tf-5)}h> zW*%ja`1%?j_ea1+so!L_iHnHioYUN+@|JNzEI=p93+X15&`Mi>mJeP~|9}(rOr~@9OyM1YdjtG-bBMlm=@={}L*g z)u6qWXz?2)N1nnK=73*PrbtKvRZZrJ?yW!i7=;_bwQV7 z1d>21ddlF9OaD4raWv3;sVI4ZW_?cc-yOE`ae*j&MOnQzs1NZrUvqv}cUbzY1xVV; z@?L)Ud_b6-?L=SZJ7y;s9H z><(xR7&ectc6h`h=o0MIzZK1XyxBw1AgnQd49lR#eDk?|Nu-<6nFkKa!)WUg*OzbE zt6dHg6aUZ>32rxk-PuA=oo*d}1ig@^I%#v7zkfBx2IbfE_wX{9dI)`RrKcrRa5oJM zlcbVF_AJ=q0_dq9SS{3J0Z+&RHNe@o8$_xMtDzVt;aFDZ2hjF)roplaBeu-wNUTiS4TL%{)zRHC9oD_64DBGz>Ati0xi;l%F{ z=zoR0Bla0w*oUsvskE$HrusriLhjVOscv(!%!_^kdT3Oo`F#l_W^g5@+dfC?4Gzh! zU0O|#i1Yvl@|RsR`O}DbFQb0Zb8GA}k#&{qZ#75&YJPlp?vFybOk(FVPAZk~!Si0} zRQ{7?7Keyz5kJuV$4!dY0v55_(_AGa&42h^1my9x=FV$hqWjR90u5-#8eDtm+x4>* zmjVQh)Ppbj@qYriChM%#`K{Qp8v+7V8z3L@O1*9_*HbD`owH9T}P0&66^NYv>6kbf9_ z?~b_(f5Z&gHSSRM8hBb7wmvRqfvzZTyP+HYeai2WXFZWXfo6d+jqdY2f~G$ipuXco zW{Y1qd9?m)@I&^?+?ffNOeduTuwXhO^Y#UKgQ-f9Y9AYyi#DNM>xTmi5&P5%_|`P& zaqtN^WH)_JIygSYVoR0=OyzBl`21lDWL9H2g&Ad zcAysnrtz?#CmthNHOF4)xq}Zr-{`4_(z(|KrWHkuvXCqGHJEVk*uj&e;~DVTR({$gVBlFRyG+a&*! zl)&i!e}d^oK?Oi8$&vTqmfn#(q2;y}>@wLn<-Ax)9*Uxw!X#vo$1i3ab1QEU{M-DvuF;Rf-9) za=s(iJrSG=m>*nPtCB9h>FZm>GM3w!5D83xC4lbK47$4%e1$kfj3HI$jW&_)HWph+ zDCaC0UY{d5-6he?)_;MNus2+4oY(OFBU$%Gm;-#~OsQCTlJuI*|FTn*t`a*GSWEN_ z9I+<(rWo6s-2yrgz({6*y4rrjS$=7#F0=T{WxvE!j*O5IetGh`SSG?!{U|v&p|ZANuX!2 z1y40g7jPDpWMVZYr}5?fF5~4Y^8fZTSCRi?XzJgc<-!T<7ag}crN7;HSU!)b3IP4& zkz0{goBcE-1xj@%@!vR3QuBH96~1m%*`_gB(8-rH>VIVCp1~-lo<&^NndRph!B5Us z%FF}3_TAImxGU}ecX=p*kLYS920_xYb5(LpfI4}Bwo^Ae<_5vzBDOEE#5nAuf#cEE zOW+=zsBeWTS>(wi&1GJ^aW-7F@c!RKY$#e=s$#*v0{PoMdxF- zqy1OU^0a?a`@(QqZ0f|Spx;gbKb~AsA0cVa6n`n6GFzSza@WvBEK+}ZIT0k>)gDd1 z5-%v}YgRUb5g*?m73eRN0`^OcnQYs}wqmt~PeV1F@+I;Cr*jSZf)ip{%PFOxr_M#z zAs$*9a!`WJ>{*Y84-NA$ZmTcn_Y!k9sU2myWC%nATwpar&cv{D&=!goEhU5IGW<+%2xP zmlv}sC5Zn6NP&QAb{Pv6RN_G}89LLS_vVIJRRwzTVyWgAR|=u)ZP-96rn~X&KN{c*8O>aM zc?7-4qN?&^0xgNs8M&Fk|L;ox!tW7rYrI0I1!xy^Na!D`X`ilzj3HEXu;;@2%O#68 z`hcBG9s8@onJHVUZe9v~x>$f1?pPT`V3yD1sJmUS(fWr`?k*} z1M6NeO#2O3ZuNyQ(htD3ffLotolV>?+h;uw~JEdl#8}a<@!-820lo z8Nx7J=F)>59Yp|<9vW!{K5rFgH6MOl_EE-&`?Qh6L$cw4RQS+42!Hf;g zhOHtRcda;t%w=sg-BLC$x{BZ-fo`;_X6T?&TJ=f1KptJg7fPnc3KNMVK3EeyEKzKb zmh^bg6fvbf=4n#Q&VMfB{`WpV1XRv{3UJl!3762e6%*{3QucOi5nOP#lK3Ld7(y}p z0{vRd^}fRqWz6&)2udF1Fl@~HwHI+>!`JhdKs)I2@U#>jx3&@D}OX%739(wj(=x#fw(jtTnl; z8qK?RcS2cRx;1Kjyq8k)G(ZEt!iwk@#q>{~dNly|T3yHo^;p|Be99307WQ>0majzR z>^q_k6IgY@^M6pHu0H-Oz&Xo0L=?PKX0>V+%(Y1I+MYdKy@IJSs;?wClHp!%4)bc# za-2?OI`G>BIDa+1|GD4@ZVvfQg;JJSDRJJJ9hW<(@?Jj-E5p11`l>#WN;A#%1eF@f zF(rlWaxDj3Xw;d_1ea_GaW@yHcV;mx>Kv(ZS^*P#yRD+#ug3}avxw20N&n6=W1_jy z>yVPd(^j%K%sJ-W2eojM+{yNAq+z77ybD;4{zemE=rQ%72-xt$vdSGkj_jLM|V;8Z=a0-djJ zo0Aqw@U~aV^ZZ$rSrjEp#}F3M5#6gX#>DMKx873Y$=&K={?X4osRlY9 z*AE8<-Z#)Sd*$NcmBkm0y!PT}&NGz$hb0?P{eM*c=IPMXyC%^;(h_iPP<;gzMy6rN zEqdsshiQO3z7I9)fft4W{T+kWavPK|${-9;CEg(Ul8E??V$eP2F=B(fz6C=Mp{SwS zFSTkedLO#5q3&On_bh;@(nw26(Fy#pBBThqEEECe$#XY^%P^}tnJ@|b z`n7H4rCZ_%IJ1$n?vbSBx{P13^TP*O-G8nndd%;X>-?`@vlgsHMX13kAZURldHS`; z!u=LX4(ON^Jkk4CIE0#@FT%K8FMNaD(xBf?33;C!Q5qUr^IS}v!#kS$LpuM;(Hb4@ z#-Z;wf6KP=LY|4~#qj)1d_Lnv9D^M81y*tjc&Z6&>k*PtV??<1`HI^mlDZ|8)NlqWK?x@Q;$B84kwaI)=vlMZYyzgJR)jxl#7>V=E(S6 zYwUvnGdQ$;L!J?zkeaCy6qQ7)&^oJz#(8b_LY{kCCOXgy)T1srm%LRijnd)3%}<-O z40;|RaLV9C#0m!3DTV*`6rsbVZhxjM7g9h?hS}0ks51jXdITal7vI>s%!^Zu@T?jf zl+%1XZ)?c?jPBD_!9f3xBhAWb%C2I~Tk-GYo_D&(1fs(z_qRzGKheI?_+Unkbb31G z|9?7zn^1U(!Kf3J1n}?AzM&mDTRY;kv_}1=HeX_9e>v-^658LM{!PyY1%LWk9W}{< zD*4u0?<;m>r#$0-O0q5-FfBMpdth_XmdE&WXn>w-mdQ?Nd6~!MvBMk_aFN|IJ(kxS zW5{XZmEM~96ZvJ^SRLD{-P?-|*WM9ykx||>mb&n_5X)wL6@ekBOp@j~TXHGD{lc?B*sR4rFEPMI5m{-K&JTaSgP6DQ%z$Ernyhr7|Q22D^pH>Qfr=!UFtVT^BbND z$3Q`q#|{5jk#rHY=)biEpOt8xDe#LE^0}6#E3D5ay>XD|Nv5hAQ07 zcZ-d}PE&jH1)h)B_=;5ljvkeN3MFxNli7s(?VU7dYbt!N)em zJD|Wa0!PzOlB`zdE4WZ8?+Gk`UQrun;B?bzqOoFH6YNWOOz<$u33N`Ll0zg5#m>6dsBg#6k0R1vN@YPt%# zhTiu;p2Pi?zM^^=M)9i2$@tT^nJ9STjz`vhiR~gzB!>k<1hxNX++1-%GH;26=3*7VRgy2vqyL1PKxhua&!B^AdwWw zKP?=aq+IrM(4)eOH$A?VS>Ic!0qBJq&gASxW!M%Rrhm%m6g-FK*#$LMf8q9WIEE+r zO5SOnJwvAVfm@r9W{a`$wcc#k(BzxcV_pVN=ZuPD`KYpWZDKvpL!%lhKVm4(l~r4P za|K}Bkbf5-Tok$na`-idg!c;5&q4TM-RVg6sT?hR>9dz5Ocw`)V&-K&vVXs9V`J5C zU}j^aSAU7R`8b4Sl?=_uRBwacBHAXVThnBm3QI3K(N$3?i}1JqrWjr@+bqud0VT?v z-}rMRwLljk`qlX?(4IuG92{Vmq%EDEpo|uJRC(uJf+H_xpLn^k zw5b33@H}nZF4yi=^fwWBE~a%H^;MIwKH|(oF;AwtP|qe5X3T^jET|8}_Jk z>Gbn`rqhFH9+>qf)3|I8d@vk)>``%@^mzqDM;CH+Ih89d&xj_ymF8g_?HkQ8c%u9I zO@G$RuG8y4e{hpPiPnkU3SNlM7)388&JG{jwtCmAZ4hkhZ5v>!_Q%>B;CxdwA`_#a+3e19Ci1JfiRJG8|Yun_i5*U)=Pu!kF_PWs;7 z<)2=3sc%Y{7a~*W@x%kV`=`VOUqw0^o_w9}*ecb3D@KBrPGIr$>DM1(#I6S$+s$}h zRBG2WxTVcaQ%6KO;vqmKPN;o&XctMuBe+;tIYwT8^h;ME=7YhD>mFDGl?CYQAAgmN zL@Q2Z@Vm+LIKJgQ=o8gUNiyw=Q(u>E>rTW8zi7>3{XoiEsqc)7Dv9UDOaKpCHH`hZ zbS}}FjI-GT#JE{|3SI(tua9O6GPn0X(62>*ccsYm!r6q#fW|1O4<0duSEZqD&jzHt zN^I5H>>XoI@7*MlXIY5IwzxVnuYXQ$AZK-rP|qc~4=L(yV9Lm7Yl1DK!P51faNFdt zX;vI`qP)(3DJ7mzko*UWCfx}x1_;i!jXF&S13U8UB&j3HMh?t)DGT6j^u z9}K7@*NmjewzHO*cOG|A0DZ+5j1pl^R6k%+HGfemhbkAq zZi>F`5|I>(*^4bs|I#NF+oScpw^(8+gld9sd#)Kf1rQwnp?OW6_EL~WWbTq@_4xXd zKCruEuAWl-RV;xRbgOqHJfGNi(Vuy`JNvl-Pw^uOyAhdu6BwNu~%eT{8(X>jq0xw!hWTS^Py#$Nyd0YI0SD{x`sx$?-jePfJ46Y1M zWu9e#$}ftVi>g$fLgd`biG@%Y{^6QEZ5rl-@obf+<_G8h>7z@wOTvdhs^w zMM#;w2txb=?Q1T5m?LINZLh-|+mRDwk&797M?I;bzWGTrCXFe-S}s?i^6 zL9fIs=@$2n=D#Ue8Cby_ui>9^z~ru=DyCo@ zfN%6!Q}|ZhT0$;4<>?%ZH|nX`$%=)+8fjWXr6vnB+SYtM_L%CnCCo||@s!3Oo4>Xb zw$W@zCg@J*U_JnSJ|=MPoI`1C=X++Rk>u%5xoIE0yhI8Skbg~!O<`w$MyCiYwqJWx zRwm_m#dhUHECQ4$Z>Ak_8sR3OkMQtsb|Er~zJEY=iq)4`qm`eUhJX%loh0hPYc9Z8 zD*?w|sA+Q{zY{~YoEUlWN&yq3H7E_ZZ$}aq0%i=bQDR2_1y`_)PFPBOj%=vZn`Y!#Zo~|+HY@6BY}kr4MkMLd~M6pT10QYbsa`9#?-GE zC&%p2(zS)N9Rjw=-fwA5awEWI#GwN(6j*k5bOZ&zSIS&aipSGjAGMTT8Qg9cLO1Bp z$M;GFA{@b(e)(crc9F(hQ?P1F@#G0v^d(cYs4PREMt_S<2S=*O=x>8%g#rx9BT!qn zVLw^rkD4Sf3Kt;$TUyv>bDH|YlYok6`RDXoF6jH?{nVr@+3A2Am_H{&hRpKn7INFy zU-w^im}8ALQ|`Ud)@Y}7nV@=SelI3c{gVC!{3;Mu;7?>q#ow1$#klDmt|Tkw!_Owy zslFXw8A6dgwoL|D=rHsoIACh6xHeNB`XcSTG1kAsueOA z_MEx!*kuF#eN&EEYOE2xUW~Y#ba5)HBXhEmsgZiw=T!80MWEyAaugoc467_*@MkF$ z{KJ<$XG-B5*A??Qj@2$d`CKTUipRy9dQp|)_H^7nzxxrWCD>b@ z6fh31D9b{zkmfDctXl)z@pW_3WYt&eB4juaDDxe;<7BSXMrru_r|iH=M;d{CWX{z= z34fZSKr04B@LUZAg~SB2@>O#Dl(~eG57E|hXLnBj;Z4)zDHN)3!hIUJga*J?@_Gd) z+R@pGl52Ly(j><3P4*T26O>}MKE}M#Y!npSV zKY6F1@ap$~keS{APmFRRibD8?g`CO?;D37=22Gx_P5&ILbP89(Ff}&aQ=+xY^~f2$ z8yM{6$v~tjJ#kB53fqw^9tn#O>*XP5bzi6xKj){U%V~WYO3XypH zw*3jnVF^~mG=CZvsx+hK5h`P$H?NE@SpGqy^o&6Lw+ebVdSiKfG13w4OxQTM&wp(? z3BL1}-a{uWwBBe8$6;9{#otRzKNc0+jDZ+mvKo~UvQ1M+bmU5uRe z#y)t(gbQ3sy@Tr0fWc1V5L9hI<$s?NGxno)xh2~(OLiZ-6iEopss`pN0Jeb1*^3a&K(@Kxp zz%FGjgK{HycRRQLx@O@^V5|Hnk+TAQ;Z--}03O3~^BLUrS2NU+SKc?{qJKs}K8rmE z&m10R?p7bor-k+ltqzq43ujQlmS_?1b0?u4k3aU;p)DJc{ojQT_9Mv^+$%u5czCUv zak2vRHSPkI#IN{ZO;7atc29oMfm~v;(5D1!pKPwF(-VPeBI`fT=rOCKbx^y1G~nk_ zmw{T8DkjeqdG6n~d|&V}(tn#x4Q4NSh^HVocjPSiwLupdMN8QmD*lDgEszK_9erAf zc$5piZsA_GVwpY619YiFbAHCZm|);DsPPHJF_-9+0c}^@CSL~@HMrYzRqqq@MwFeo zs9>>Fpkd!6g;wiVK)3VLCK=IQOo@`r^?6myy;36JHxm4Pa?tf}w154Fy7oo`Xjj0klJh{z6%35~ z<|wHZ@nHc%aH+iiQh&7+d;XZFzp*K4dp4ae(Q3jZ|t7mwm zA%#g2HfhB6%23vN+q;K04N0PJ9{?Fl_^N9`GlLCXBbEYy@|QMM6%(AK_f2X)jE0L7 zbWd9(g@*c599~O_G458VzLkz9&1@nV*de~+0;&vY)@p!!>3?!xD#JmmY}@;Yx#{`{ zKo;)7_Gnw?2c6CT^^qM{P=zt?$$bw=^Yy`JHkuQ3OG>fjK$_x9eB!ev26;0yp9wer zZv6pE6H%mkSgTRHx=r3AJ~$LHE+NSfL47budT`%@mePTnr`1k^cOis zr%f(*!4i&29e;E)UCnb*EH4F1*P~W{%$aU<`}^R{ka3%r^EWXv?bo(ZY=!bAoCPH5 zViqA|azrZ49$--Qn?99KOAIMbuzD7~rm>6n4l=h-j`S0hzgOSbL%kjH(zyLfQLF8OiIfX#kk;1B)K2_(CtrGLx^wFopSpkS%nk_+BF*gHjx z9L$a6YNq0<(R7YD2h64YwEzP2$p3s)v@duKbzofEL`BiBKsrrCh||BoR=S2QOn*8w z6*L*k`&8s)G+K9qHV3H9fkhxSLDMehYwPuK;`I`EtaBkf?;J{ictbSXwJs=CnOSK$1GbJdkv6 zoUf>ktZCaVK{LM~5LpDbDFw)_kOu73-&V1M-eaEPpC$@;o~6TDo}-1GVdrW{Lf{h`?<9WLdw@PBBwd<;u4eB4)Nz28$t*%~W`A zqlp&^xBox%;btfY?^#)&R)x71WDSLbp*H!3QRXUzXoY!9`X3U>9NLT+^GcW_cfNEH!NU2^Q zbt78_xE^zw$KCqI;7MxGC9t_UsB@i$z$~pullW$TCeDvU8E%iC22`VViq^TSBQKef zIgImEuk#RY+k!0p7 z9)7Ud`Tg;uU-q|y%}|%FCAE@84V9e(oy!cm3NG8CEJMc#*SmzXDYV~ZnM*N9BY&`e zYxo$ZajnHA5Xl+&j?IKb`^12|t@};~9N3h?b5y5;@8F+Eg~jQILF0gTlV4$p76x&e z%7o*94)(Tw6CchB|Fxv^L583#5XlC1$Q)X$wNB)SFTGN2lJM7<7x#{jag1=*S8K2J z^MLw8!2i1;2O_U(n~{m`*A=Eq`;nf^471Z>5>{ z78L%7;cNC6@NG>soQ5j0Kk~8osO>TJw7nIUu~s|}08hpP=|)>jSP2oECDHwIp2fkH zRcj_^DDNr(uJAz6xy%5|+}odlRNXBEJ*c_qOefMhY&VfKe`M0rtV7GedVh0slGH2C z32(LOE7Hn#v|Oiw6U;2cKgOQ4gy{W?5%I?<7{@#%qLy;})%OaHq&1*#VHz{VSJ!09 zO_P68-@d!IJi+}fU}*{^JO7FvQ!NYycwPGC?0CmT%c+9#NDHqJp3))eB>7PTrPV`JHr{Ow^#g|&mN`5 ze*k+nJqPg@9jzOoVe=cy5MEmAhPuNYp6IyFWo>=!OU%ax`ud1?gMTo`g8b-pUw`BP zhx30AP3%g0uw%f=`cJV?wq z7qyvl>Mc)ZQR+eSM`ZM%rwP{ow`^q9OjkvkV+_C}YnsMDc~~`ex)h?W{5y094CmlX zNjGOVqR|h?D*q3;K7UjX(GAk61$F5zx?_S>Mjy9?@1qj2aAICDw8XR7ZMe>Qi1$}H(Go`3TJF|Y)Ow3Y_>kx9Y+ zRj-tyvMf5v&lTJ5>fp-WR!94w?-uo-56!MD#%jkW+UU$R-s^=LnceKG_J?b8>)QAN zztAay2ZMj~Dzy>?A8vjv>k0+UC6IlWwq9Bs+d%GSqvU#3x~x2~c;N z%JIdQ6#$JD*qV(cvWuVjN?^a&kAdI?2El-0YOuUF_o#i@RM6?BjLcr{SxF18%h?03 zRWQK`q<`G6LXMw5pz5MEga5qrN}^kuc=4>a#PHm@6!mG-0)(M$8(S<+k{#o3MS&l&wBfg>d@Jk>#!L1(FUBS}joy-)e+D^9 zf_eJHL07O9Ho+8j(hZW+c>ha|xDR^SuN)4ZerQSHRn)b=$4E=!=2vA|uaq^;4&T_p zjVqi0*nqOBPlWGZ$XgA?aJKE?#nW5dgMYj0ebdwISI!o;Au^$e=#L-c1#E_MDO>3p2cZjEIDnpV6FI1wq}T`#7N4b(y^%LHIct3Trw5@u?K5bv0d86%SATKB z&!=rLz?14(S8cA`&lXry4i;$D(zw28VJanmSuaVjR>@Qdz~_bg*$1Lv1PRKJqR()?0}0|$SNXF%*(-Na9*tU^>w*rpg>}aCIjN( z?0`V+W|P_n%?$_DTn>-7v7w+dbbnkD^!d0p6JAM8bGY%xn>T}r2x+YDx8lzUuf&zKw{Xx=W?(J>I}AmNwRc`ibhxhs zwW>wG3sL>=HJYvOFl_}I3Quit?V}=@)DaU~Km7h71Fup~wTk$@x|X!EUXY)&qCC#@ z;TB9}yMCUO{5@KtpexwA$A7EFa{S>q)!a)p6A!|cl(lD)Mb=<_oQWez$o&`+t?G;k zLVVi0miWHvSHzS7Ti|}7u`f^G6n;@Wi+lVeE{@H<0((cMZNR_2zZ3onIuJ;o+@5O| zyg7#xF$|6kp&(}dv<{x;X`tWt%Me4!t2Hs}rht;&Wo9H~H5i z&9AB&=zC#Ql0NnZ-T3S$Rx}h)Oyrwmouga!t9yY- zD0s`MCduIYUw1|G0xfiocykjahM#|$(Bi+Yks4$Iln1{07=P|=t~eZgl-k3KC9ct# z52t#K_!t?88+kCG-^Xd;X8i2|Kg9y8lw!|VRF{wNxM9_B7UQcLTmT13rxCJ7S!m3e zH$E)IJonZn82@_FR&B6==XLa?9ds`9I#8!E?2({nV;Q%zKW89@Juw$tw5=Z@b7nI; z9GQ$+{+~2=n;HGKy2xt76|6oWV7P>WSZG#m^X(f4dw*MX(YqKTB=tV0uf8K>dpjfO z-MA440r1?om4`-KlOM(9M$pM~@nuI%rc%R2vEaX+AKfS|EwEgv=qe@7*i_@HF@FKW zbIQ%Xw|j`Im#_%35T8B4N^SgdWEkN~;Rf~~J^q8ay`2$2Sa-Jai{eHYB%_I+^F2!M z?q;aJXn%gI<3?g-aVK4}?@W$; zeaFu-%_WoApl?(26!A4DseN`B(A#9R>BAlbG=!j>igLv2E%6a{?&dK>=9=Y7uvR+L zWsHW^V~>D2m|)+8pjgzus%`%uzlAbb-k$>XlYbvQH$p7EP{N>JiwRMEyZ*O1Lhb1a zO{$PEhf$SO`JDCIFAJYAh~Yh{xvwa{n!$Vs@^ymrLM=`Zs(~%s9ILLN@SvfmD@}A} z&15;Y8}y3Flc{{Krbp4sInbexKNZ@we$$zM%YK^u^{0V;i>Bkg4=UFF5e{K`WiT@O z_Q0q55LH@ZjGd@2um8~v6A_}Ua6w6}h%UGRE)cBnYyKzb1Y1A| z8z~Z|_6RVIg|QwkTI_|0M8K2Nw>LR$^qpA94D1lY;iM_!QBm%DIj)_{tsdaMw5lpU zfoaVq`KB(w&ybDk*O6&ue#vG`sEK<44S%|Qu*U6c>)l_w;b?*I!;SA9vfw_dQ)bkL zmcQ{Z>;Dm-K}ffs={)SW%Ll8`upzMptN_uL_L^L(m9PHT#~ZuhHD4O#%>t2Vwec5g zFu*W=flja`+^{Dt@_FE$wAgbVMVOXTLXiOqpQh@YBcB}4`ywQkrgTBy9 zLCb`K^LDC%^U5PDPbTXgxD;ncvVrCVcvBKmMO_N-2x&mmjl9*z!3yd*-rW7Lzud3+Mp`mEcwj4Sylu@*xS-7HRPMsiOTUUmX`HebU($>sF5AAq-$K z>go$K`FN>Nw87F^HUP#bsy}^Iqz`7}uzw57S(;+@saHD6+n`vCt&Y{)K!2+X>fp%J z`(t6DZ|)Pg)|?maChTwrA(~Rz8|{i5^&Y;}1g5piSZ8G+OBq=O>VKE}sfXAf zB-&+AM$ey$#MR5%We5gJpfc*bi=P|wK*wBT(g2O=dS$Gi9_g*)7!33?860HV!pk4a-e#2p3+w`w>74m!RPZ*X z0Q8e9%I4C*Mo_X&BnB9O-hWny37jM+VQl|%#3h4(Mp>bRmI)y|lZTXsu$V!bYISU` z1JDXj^_&~-+qeZ*(CfIcX8TNr-3|nQu>&SHmp5@RB4Pyn^Z1DuueC8cB$pEs27k@DlZ?Z$? zhhCqg6PjoTRLhTesK~O=Lp~-J(8JM-h0pS&2h6uf!>QR~y_UY5iSW-~KKQsnGHpA2 zAN0~S=(}yBk4XOW#V*`(zBSAiXmq(-^ixPJc}YxBTA`BZFwVat`+T-j==#NmBTu;*#h) z0c=<=Fl!kGfZZ%h)@;2HQI3c4!Ea1&X)+63+NK{A{2vuFI#y*@PU2@JAXnlTsHZBiBe&JBo-j_Uk_M(e(nZ z9hLZ4sed%u##ZMisc$dd{HcJPm8IMH*KONxPICGF+7*#v-82)$#_=(=`YhH351_}v zxBN{01q)6RPHYpFI3)(t(n>||$6z{1N(HvX!{I6uaGCS9yEDTo#Q^eRxVYy62Jp~F z-xS2ZpEEgKc*uFh&&c=ZCpFNi;7fTb_#^lP`hQzpSF1!XOHtz|NeWK2lVjku7A;mu zX-h}Nm0jX)q_DQdDA%=LN4$ITHg>dmA*oLV#1&;TLeh$%|Ksq(bg9p80wgVHr&>(~ za)WN3HZ(v7^aaup-PIgVI@|?qHu9#7bu`=DVU!c1d>uJ&To{#vnqn~AXRNE+^1PuC z$A2d=OaC9tJwHk3;^AMoXjxzLb%zgDt}1Ct6u+30PF5ckZGtXyE@Tg%M?~xRHN!uH zF;SE@Af2_%9_^k~%T0^@oJyVMDH-6h?@~(Rt!pddBD~jB0~~r<29$wes&;{QKhR{d zeT-ap*1F^k8uL31w=;)=UZ5Up3AaHd9e*IlG)1QRWkBv-R{me2?~~6*T+%bd4=C~? z?PoB`g_|7`QWCq34nsF#!05p_``?>Cr8WJ`L?X*^ljX1mEW@H%_=ibwpg4*yfaUC;dYztii6a=B=+C@(qTV~2^32p7jNOTPh@zkiVj zN3N!WCQzA;_6xJ$A?->6Hx+(`DsGd?sO8#%Zb_lcCsNm=F^Aa5Cf{`?}OgwvWEV&nXwrGSs#5PH6!8elz&mb2KZ!H zx}<-P`-;walB=gDynXh9E;91nUB9QX*)9ve>MPNJ_W$}Fz0=s&sEbweUc4MHPcUM( zJ5LeS>#uP)DQb+4zgZ2iX>?K3fwd|*;#4brIffh-58S7K1qX0J?VgihmV&+vX&WgB z6E*XI70HLQE6kR9I{pon!hc9Rd3JL0v9dWtOUSgWrsf#Z86Q8580@G1Itj>5$cAJP zF!>jSiYGB)1-j4XUg#~Z+$0hIs{s%720d&h&8or}$gnQOV%JQNaI@l^qbuP9qR6{D zvH9`%`Cd9>8-EMeXAJN0JAPG9>_2t^Dho2+@Qjz53Ni-yNz6Xi;6PvhlpX${Qq(~wuyqsegmTL_ z#xpf9>Y=}=N74EUAAcl^myNLNwnk@@A+Tut_HF1U4hIlBl_6eC^{m3}i=JUbV`urA zCad#DiE!g@(+}Ld4bTmfXCj4u7y%foZ=T&>4XSFUHZW zf#uclQPdYgRg}VCWTnD0^bu)gWNG+6-oS!pnF|wLB2Dj_3VXdLq2s>*{N>+p#|67T zz|Hl+3EDBLKhZ=U9^k-)U6kC?c|#sSesK7@OO!!TKmEZUYl8Fp#9Ab+s?~>`AoA%< zCBdXryl<^D<9`e<*GSqk?v{t@KLjQK4)Y{8vP?ohxNzULE8fV`s-?JWEhJF}T3zIl z?@n-_H)yc5j(kH^wDU*DLfRtyEwzZs?Gh4x!t$0OpDU<8Z(_)|CdzAG)%e8grq z$pEC9ka!EI>~N=pnR5f)Sy(Wcr*|oOK~1=r_C^eApnqFZ{02&U5kD0C?^nj9wth)d zE?zLXg*}f4^@UoKxkmKC6n5ID1cMDI$7$qQ&GcA!0!KGj-sEA&lX?U~l{08^wC9cU zMGRaS=3#|pm52)tpbu?zId=0NS+|}QhGwv1KKv!gUJLBVCJ1W*+iHC&OIva0@K~!QNXyZb}2k``n!Q5hBDIK)hQy3znSX6(}%=djB|ag05CL_*d$EsS9@yu zr$l+r7DMiLsBK;z;CRZZr7yk*y74)eSV&w_$bb82fm*`@9xwC3Mt9JUG?v;oHe=&K zmsk!~kQYMir+w_|?>XTVb@S66AOTNOIn)uL$wIDa{mHqjXU^k$TpyvNPhf3twqpi5 zd+XMOj+92aa~UphlH-Glot(2S$BD*Uh*3pqNW0#GXzld z8h=JNo}Z!%b{spw%TtxUtV{IBDo6W4djb<>p-B!9xq<~=H%H2BJ~nIX7)lx&eo>FP6ECc9h) z2KdM6ZuQho0C9_hy5QWkRA=^jysPqi+%_7Yuq8MIG3wq(YaYUXpdz4&^_DCkrYzhV zB2owFs2=pZ1U^SFxX4}i=N;!xmH=s`ky-m5&eltmA(A1E!PCxy{#So7b*+4%i&glg+UYe31J8I6ruf$x zp7_x4^r9t%u~4K4|L*SUp!?ArvZyzF^&D_z?$P{gs>F}yQ#z7tS}rgjYkx<%)oOSk zDa0DJfA|F5t}uj;hCBV#OZJN~=w#nITU(%Aeh-3hD4fxnQ%}sp+137WPY%V90K=U; zH58R665y@>uKsM86#Xl2od}{YiOJjW&0&c1bZQmB#vgAEbT3RCmD1d8J(h+!tq}OP zr+xFxV|MpUiWhY9<-k3)#6OpOjUCC(8jnyPGxu)AJbary zv%nSB%2ZfJgRk$ZfPd8bi1VM1M{hB61E5>U=iM~bN1LYJsfZcdJ!PsxLF9QvV+|KR zPDmyQdd*TjSgo1^&d_@3cBZA=U;QPru10RE8AIhRZMBqb7ou~FpkyM`llfUcf^5cY zLbWR3978)+kI&B>p&W1VyZ&F3OO0@RHs_=(v-`9L6AI8(5r0Mf6fIqecIN_i_c%XK zkX)oZbP}?)Owoz`sTNxsoe18|uy5ip85oLq7uul09HfAQN1noZf#&l>!AYAVY++u+ zUnYVqSe?@8it=lLiPNAvymZF+`CkYS;uN&s1kRdkCcgjKN1j*+$<$^jJWE#y_d?Np z(-kbQBMv?cL4UI@6a|oQ9sR@Rar81!hf*#q`xo6OVAZWAp!kWimnuARhd@8M%Db>* z&+pa9G`{!TOM1aw74948P+p2!#s&T5$zHpjvk-9-|a; z;4f*dQ5M!BGMceG06jp$zkVUro#B+I=C!||n_^S>nmaxMI!}LC?X9u{-f{`^lOo}K zBOUoFQ*zJTeRK>2b=%{t3f)<^T(k@R)JV<&M~V7n<<_p*JDj}I5r?sdQ(MfY)oaV) z8fQKp$mgJ=%+@bKU$rB(H#cqt*S$nm{ua>(?%*vIr20NoR)0jx#=uSB;gXY0Dw8}U zk%HX;9sugs8IFHGGUOJZO+L6(c~e54bYpR@0;}n{GGk*SA{q3zI+tMZ*J=!G4)xWM zcm7itCdb#7`9>$I2j;&b!P$chEAR3ne6L3oLxb=P4A5+6KtFq^GM&w8hRFS1w@pYp z?OxP-Ho87N09INTMu<-g`Un%5y@$m1Y)uwgw{1#W4Bmek2DG#3hoZDsU_m1iRvvV! zKv>XIc&8sL0TGVg@OePL;s|p5pO~9Dc_FQohq4*t=6%?jIMk(41MiSQTTalosez?0 z#yeAF)NH|6N-!NBxY46{(C>+Nw8WFaX*64$6seMBGRUMb;dQbtl5j7>fTgu`%u5fB zd$vr~BMpDno-XZRY`9^9_fGN1$Rq6)=sSrbep=aU1AbwqOO8rT0~5v;T63lA6VyF) zT)T}X=6}9okGj4s%iL*xm~GES9s6;>o##H5Ec2qnOoitIrxBdrK11OTr>pzokg<_NhiP|_M~8p+q(*65U8yLRy=t}^%%Qa{I+cE; zBL*B3SU*~ghlm^eMoG3yj`=yplHL2!&{y*sCc@|23=TT^GCwM0H@xXATtdmAA-W8G zV8e0yd7zX&D3CHN2cu0*f@zb>I9d|saby9PrUO3$^zE{wOpcM1CbC_u>E?Tyu*v#o z!Fqow0VPVmSW_WDuV;x;5f)6v8%7oJno*UfIK@Ea|I)dX3}0bNVi<8@F4dZTP(MbZ zI`oV0;r8Acty~Z2#eKnFqdwnhAJHavf3-sKTa^(lEcp_!62qdWj?)af2}h9sg785P z%>nX@7Ixm$nMgoq4;&2U>fh~q*<6yMA#sO zIqF2=Vs{dD@Sy2kmrUc6Yh*qx;B}p{iZZt*@Z^%%B&EPzKH@z>d#hoNcb3$7k9dE5 z40PY3SMOOKIJDoUkSY{ScA5-$r&GspvXlY|R|#QmP7e~LD;w|H19efJ;_RGjPstja~AJ~K~CQP?le4bfLrs*zj({fv2N|5@(q7SjR>>(>P{s$-%n93 z|HW4cCO=LNAh!~&ig!^@vc$jVjudXLZVMinwKy>=aLT46WRrZ573FYYw-}x zf(=`B$KnoE>@cQEGx2J_D}yfj4Jjf7P&Xk%fsFFeVbs?xu#lL{TP=k`80{314%TZF zExTBuwf$3Hc(m__ioU&)fC+zXo)rt})4#EjEj`~66vS%nVv&3V?_Ci#{6G7}|DlGs z)m{uEo+s_wQj{?l z>x{hyaW#9e&8Dajg0;aTRD?voA1aL3&_FmBJOVhijyg=7&RNM`(FW2VhEZ3pV;||7 z718llC(scHHer06F_WO6bHYz9>Z#K_bNnJw3Z&O=eD2A7Ub1do`xN9FAw=8Xn4@C^ z8eEouTp`>bj1eI*qzZo?EYfsEE`Js5xAl-Egy+y>9r;qwe{rd>wt`Eu702~lQgN?6 z-45SKUr4?C_}@t;ZD{5NKWeZl_4`BcA9iS_!ufYh*cL!mwCJt!8omr19X=pHI=3M_ zkx~lBl4r@#UT_QRssQ@N+LpJg|$7;PtFHQyd_z$=i6v9M&Z1QkX8Rw~4ag+VhfHMdpd_ee+P7 z0QAA!H>Hjn)|`P{i}=1)e5HTb>?0jYA=4b4EM8&6?!|wJ_gn3skSh82^OOW^zb}?v zQb4C;E}6&cF8fiO*elJ@PN&T2@rI~?+V>*6F8nA;(0k02my5G&sEm5P=yxu`)Uhva zt(Wc@j}h1j#PpCI38zQyb{tHSwH-A5Lu!)orA*7(;~gCbVO^%MsXzKy2&~}>;XM`rNy>&T+F^< zT+xzNZzb??lsUMx$Jka^Pu&YzI_tEL4bvz}DtCX#iUX}+1B1si69$O&{mai}Xp2=# zTVY~IO3O~o9~-gxJ|FRt*3HA_4>||ZDy|U7Wr{A;@)ka%8=T&)=Ian5(0jM?ruIL| z&Vjp*hKs@ztBq~jX=AfV8r!yQ+qTu%PLniNW81c!^!t9oeSg4Rvu5t#oPG9w4n;wS z>qURCgRruoA(m0g$?~_B12MdGKq?gS71213{neo>%{AYhqmmez1t)^0q~@q(W?CL} zf$bMlKK0O^tgP?p)8+wzMrb5ctH%2b`#3J)e}aI!?lI~VM0)nDA^*3O(UeDk6kO(LO^f%OiOc^l7WB0 zhWXN9c-7eqWCk8}i6E1trxc(=p}>1K;uThwBgLkVLr9PYvb}(gG)!%BuRqw&kuCa2 z8wj@CehOG0efei_2~J;5&lmGnmaV`me*BxWmEX!N(xbhe#0}^=OR&$p{8Gi3uRG3^ zp*m;x@BkB*-B9@+bfIVc^&9AQwVHn`dbG)&j)PA@JcF~N7tbZhWJe5}kDF5jYp5Px z2w?z1!Z$m6b^lfCDkE$TEMU?9xIS|YMZ<89C3=%!v!(8KbeSD2KW~xz9=Cx2^siIc zd;>x^!-Sm4{SK_3x)OteU}L6Y_|7kZp4JmjOVwoC+Ku=QIOZ>WL{~mO&($GC|_;zcA=ORVVK;Mc)bO!rvv@MwuM z-;r|1_nvrVT_j^x}<)zhS z%CunH+%ZGTixd~uv|E}#k|Xf2HZ-bXZOv={d~e^PnPm$|C4%dwc-((x&coBRCdo$3 zg$eF-&OGEuY+s^NH8O#YP>R&wHm&EcY;=V7w1Tyll1ATB^ltz7e z7ygBi^v_vF!zs-%Qa*njYk$g@h3%ijra4$*MZYPe^ibR*UcTP4(iua&_5tp1v<8hG z>`fi=v7F80o}|sj4{T9Zyk!F|%y?3lpjRL@Ut2?dP@-a1`&jzUAHwGDcnK7tNJ=x& ztK^bs&u#kZal7M6uKyE`j5O#gzR$z~gsBul`)F&?oQxogY1@Aoq9k4{YkIy&PH@pc zWYJ83E;l6;GV&Co7daH*3!KnbK8F8e=ley5I6T5V%q(C+i*q)TJ&-KyEoSqD_V}kG zlzTeA~K3 zjBu-QoyRCVQFVW05HeBB%s5q5gU{$ppXSKinh=Zxo)qBb)97Po@UwR86_0;m*Ri#_ zIo9bHyU5)$gQ_I$p&3g7<04g0n2R-l`5qq2tT%)qczIDYlTOohfzC!D`lcDP-=!oHVy`FO} z4V%l|0d@hk-x8q@zhmCMX%lLp?5B`h zwo_&^3|`h?s6AkI1Tep>gie~$shWcRuFjaQZQaqbca{}?ok)<`v^!wU^>Eq_Tab#` z$YEyIct(F1t=|j1P0BXG{ZaxKD6R%{EKs^7)oDF<;8Hgh-Rs7wyXjM%5R|qj|JyCB z$pd}uVg6{lXb~r?p6;8Ob~dU(UK4LD#qn3pMRY`#1sEu&`uvNmW(wE+TPBWa8V2@X zKY;$Na9el*)P?}@*)r2(hvV;-ck$1B)Fc*3)A4`q?4WxDN+m0m-YE_9J&Yr|gWq2@ zeme?0{pgDD-}=(jIJ29~DOR^Mev2~XvYa>XoZm`h{CuVqflWXg7eM{Wt5x>JVcqBf-KCi5is#F6J*?hX|IoyZBTZ4! zJ!FoIBYtLrf<|pLSKB@l7sMx0#ukIZZu)7$oj+D7Cb|9^7eGN^gr~+LD1|)RH*}eoky9Eb|xd-Vl;`6w?wm_rA2q| zU`ml{Jcc+#z6J94&@eNBx?cgLBbHYl^geZ3wGup(uF}{>O6s$$cy`a*(f*MbebCJb z^$re6DL5;L5zQevB+7I^ilh6!ZazUBK(h zn-TasSRuZ)X>%_t)|RE`*>=nF?|_P7J!I)*(AT9k6>-p6r*BCsD^0*WmeDs;L&w@L zDGz@dv)muq-HtnkPwBtHhIRI6eZyYqWYUCYGT=th5c`2Dy0k?k(OQeQCbez-LxIMObt#s}bS58F`>hd<9N*D2N7 zE?4F&{$S(;<|R3BXWE{c20AJ{z6g&x-@z)HH#YKhj+OYP=78dR3)+(v5QS_kY5AR+ zNqY7yPeikad#>n2+v~gxVB|$5U^dwFyq)6lzF717&O02f84G_u&$4DsRniv;dYc-+ z=Gz^=n9LAmkjPl(KQJ5H>8!JkTu?Ql<*bP@G~yz4Q_6$)EvTggT-yCRa-%k|pnRY~ zp|tzED4OZ#2)IofMH+Eja)1|?0MqcO-ahCxS6i4TP$-S`-q0m585vbXwS38#zO2IX z*Wj~{Xg+(!!Z?3nLIGSVo0E77y41VsS{ew-8Kga0Wp+Hr7%fGrY>3DFLBNZM${f+A z)gpF~4*J(AjP#DuGg;4ptamHQnuJu$Cg5|y*2tp6(F&XwtoP9=u9s2Xmhx-H{*U)W zq=kc8fbciw(rnhFgjF*2aA>ab9}-#lxVZteFP=nG4MKl)pqID#Xnf}-HE}4wR!mC< zal>>!ZXw=Wa8Ad4pYiGGc?3sgTsoEgBw>LsB1Hspi48bFrdxSGXH%&fIs_Mdx1n{a zlyi((?aeBPS^dIlp$K{fQa%}9c^>nJU>0`=#lQL+DP0}cKa^f^J=l?$hW(HB;C<}U zM$Gk{ml%Jf3x31C-V=ale!OurzVj}%JCpqM+xsKb<|?hyBD%$%1JU(-2Du&RHWCpDNdYdz~sP72)fD$7^jW#oH zr7kW-_JLpIR@l^uo!c+1-Vn@)Yq*vapx0b0HK~6j8$^6X6AU7{5%~RT*+0MPFBYq6 zSB3UtOcvs1E(fXP9kr#^*h1Y@q-U-G8Sk;{Usv{jv)15<741M%WIRP+C6E(Ou(6p5 z$7+M_Qfz|0xjxgDB?M-GcVeQOU)ND{~Y>a2Eczcu+-dF2%HpOGA(@H7~d|a8hr0g=W1r#r!0dG>MXLUmh)5dS0jK_%1OAR)N@4jVx54$(8Izs zzx5KVYV8OD@gBKqORxkkRkBW83xIYyHbX-!JHb)-UQW;02Gu6xPtu9~b~KGu^tV%L z9P|<2!`bx1*V`8=n!K%?<(Ibry6J!2gWzfE#wp!J=#R>u&#nL1zAudrnRC{YFYpNT z0LsbUvPLWD*Ur%79j+_3!HKlje5-|=jgD@M7Q8K>8&F!Qkl4To>N{p)VJtBjY?W3} zIbrqFoX*7eimNO}eo{EY#Ct7FYuPrRP_``(Ee8YYhDn_YM?N`h4i&>u3mSiZcV7RjR^Yw)_JrCG6lCtC>|<=iNn_{}2XdGPy?( z@#_hy3HaSyMF=#5A$2F#y)=;_EYdS?mtbCsqsKy~EfSZz=8()HQUC)VC6aaLKs!X2^dOeiyLAo|qB? zT`2znTR5Z5EDo*E2jpkTzXzQx%Y;?-s!m&9IHlK<6e;}gJaz+3Y?nFS^T z)lDU}6NV;ah2FQ4bS91meJeB3ZNt)wSN;aqn&kD&)a^M(sSEq4$Fcb zOj@*-=VPM;^!_+pzk^~y=}VishRZS-H$HhI(zUapQ|ID}W8qqzEA@Y9~Nd?g@ z+Z_Hm9wA4-F7%i*Y5M7Rdvt9s(Y-^vEXx;~_X_w8bQZ~Y=~aJs(3c$K!)B40fN{Qy zgd(UGJ}*U)liC?FXK2{}^wLt2A#vii3I*4vDo;sYU>)^4sfhw692)7^8*{oe?5}@mcfpCGPMvHQHrFcCG8km~ z^o!yi^}khW0zysZ-eG5n4(ON@i7p#kOIW@D8yz__O$fIw2C37Jrt^Vt0t3(l%uAZ} z0tw(0Q?Q^LLeou1q7WMb4kQkfpp;c-rU%piOn`B2tm$s(={;KaEs&es9eqJ!MHPpz z@m(H*o&kUUPLuikfY?7+Id;V+6z;O+6roc>W?;?z*4xTpD2o@<)mMLvL%)j*#`z*o8??Kj2ip{-~XrC#fFPadIdP3t_sv&lKZ$klyUIC*| zb%=jx@j40^ccctrp)`W=k3iO7BK+pVQ)W}p4WZrT-`WyGGEVWTPu=D*$@;g%1PW9ynCKNO|WQi^|a22oO3Y+N$499Q0v_O$<&>ij|TU$$~NH9R&WVDn=y$%f1H|R7*dcGtl@vl^@5S( zUvHE0VCA(!hbp8}_xT|{7zL2g7<76RA!#sPC;dC`6b%JV(~{Zv_Q+M+3Jw@bOCW!0 z!!;YMT5mD%3}IV5hEPFgq6VS(_8ma)CjOJq=(BI^EUY3W>Jh#4;g*}2C;x{eol0Os z8RwNZnRo3&Q~cL_%MfdAio<2XH2;6-*QwZ?k1f6>uw%*LK z7kyR4Xqb6uu_}BF*IYZ+b$LZD?Kpo4<<}b$TyWff?X)n*;X+0Qb3Lvx2DngcxM?=} zVtcffd<7Rbsj7EPEqbjFcoNqfsb2-=a7>?~ zw$qkL=kOXN6Dyg6$t2TmD|kOn?}7n}4j-ObX;-yqa#8!ggq57=(En=c1}J}q!BYEG zn>c}^X(uPi_)A5iNShy^N#JA-A zBmyu7WEAGJ$or`KV%AL>R}hl##4)G*G$a*QA&6hx2c4x@)E$K7h#fe5avH)k#bu5L z>p!0LN+iJGo0#D`D^+#w!OMTG>D8$eh4s84VBtY73^-0=b29`Y;jE5G5B@+6=U`#p zW;2+1kb0jn6^Cj8ecMs$G;S=913{K%jMoJLA9=m%PLg@nyRllMv-y;cT2}Fy0CxM$A~r}aLDh&2>2@wHU)w7P}qIXIk^mI zSOIx{byMW>k)b#@SjL2sziu7U-dLa~Lt=kBUA_cQPg29OK^xjY2H0~#SyoB~{>vP} zvio?u!{SANI$>XZP$GYiVih=$Qw0)kInVFGq@5Ukev*Kt5WrcfZienwc~0TZ9Vxfw zg1%0?BGsdZaEb_BaZo&b8^1y8My>0xE{kwpzpdA0QMW@R+jG9PcZnS1!H`Ly6FO!D z_=`2|U+Fp0jkzyP$cX0X)K61=-WXMR2Rw+3LS7t>3Qlnl$=uUSlc3Y}bPK6+g9#)1s z1M71b2fU$W(*LlhE%h97Z^=d4$C-w9fqnU(JiAyuiHn?lG}_4582+{>A2nu6+>U=c z1)P(?>2-$;d;ovf#5e&|`b>@{dgqjLW%`2$PNUdLIrc1gm7%1h_$KIM+&g+jK~pX7LrSbY$D*tI@Wj^gK~)#O7WNn5OzsQltjzBDFJDKwc(&%ipn&*=Bqm z?$cPI<}3{ITN!tGn*;rZDuFK03xWDX-2>i4>M8%-hbn(CEFd!BBql7*;t$01*D(Hj zJrRY!7U6CT5%c34O)%~=gkKkdwfvg#a%!<;z2^>7;={^2({?F?Ovdf0Rt_z3aGzSx zQv>lN2F)fLTCvf}*j|*^SVBd)Wt}L*8leVV?B37sPP>GwOh|TFg;ZbsOY$DlbpdsW zSQk;-uv35VhA%%}?nP>8tZzJvQfm>{dv@jb1VLZx5U0(P*3s6gDaN+<+mJ(61n=dQ zWw?Ko8e%o!oaIBB%W5L&s)REk%Dz8`gfP&}3!uUN^yhFI+%r^x7$Qw_pX6cVhBZBY zS`u4Q`dg1W^B;(0k-&Gf%5eKYE^xzg*3#p_U1fikKxwa^y-4F7PL$e+x(AII<5y_$ zHfflm37pV?x!d9d&cDeZhc;uyuisW7f7EU`KIFVbalycy6}0JtK0lUkQtOxg_*hw7 zO{Jd4k{qvrFiFAAmsb7Zb<@;ij^~g&NcIH*X<@?wHhIgk@EMRB+OJ+Mp#>9T@n$b% zeU*QUhUzB+hJ;|Pn>-bSOhIqjg4ms9V(*H6^XJYjw(-M`{d;$96^Y#3;C51Pq@ClTcAr3;nz0=U+5mtm0-Llzp^AG5jxdFUTQ1Kl=#Y4!H1{nsW(;bll8&~xP0>Q znpzTP0_wa4G{;>cu!`!a-b9=~>0S^>DJ6O#d#Et0*$jO_M};R6Ku(uP)8J8w#*hc2+(DZ$c&|Xfp$H7BQ29ODd;iAP9o) z;Q&dtqC{N91PrUY?O8*{CL-XMY0Nn^H&5{MQ;#Qi3g~mEZLRFu!RLQ8t$y}Qa-I7Z zqC2cFO~ydHcw4BnIHBV>$(zG=9dQ6~o-#Gn${lA95KUSHZdXoKdoM`iN;8>@M5L4* z|5mFHS@3f7hcR>p-BDfu#~AFEGnr(G$6`=E6v_23O5dj@26g1|^XUV6&#MWGN{zdj3kIza_UY0H9f0DAjhN@X!T05( zfvDiqn~O1}zu>pecZd|rUwHNnpzGCT9=d)%f9iniRs(MR6&!yu6)z!i$P_(u;4+R6 zi#=>AHI~CdQP(bh``M)MQ_p#RfDdcvW~s?f!wK+`wvNR&H9sCBHyv*o?Mpi`#?Ko( z(92twa|!;p*L<3aEsWroP!7Vs|8M>NSt`yv4%sOzap}S@dg?}9K?DERzD2kv@Y@0p zVo4^ynwhewT-<-MUA&$vs)uD8e^u*zMUVKMyI|-Bx)dP~!(?0>-DdSlDkDjEQckF8 zopS}*QW)P_kdvB@!?94E>g5|4JeB#E4Zxiz4bTfAhG3~O!9w?#gI6jN>ALOYK;-vWP8*^ z6g?P_ewIp+16>-V@u-oDPg%U~6HoH2>7EC%<+D<%x5J5IlX=FVQ*O$& zv)_!yypn4yQL49x+KvU}6WiHdqYTtaX0Nl!V2LE>v`6%G8m$3vZ0M2XJ7nCJgn}dw?RA1BoZjSGrB7bdxB5#p1-%U5g z;z+ng;c$?X8GZ{zeR@h}H3G^Nt_$fmC$$kX-lZ;Whsp0u2@QHW=YOPUi&NkhK!6_g zhMezM%)=sP_MIU!nmJEPp`F^!$z_*^_{ny6`^A5i)!}S=p&5aIXGU8*Pt2(<2N>gy zovv$l4|dkz?66se<&VZMwOZnr1TYOgX7Fu`KtC|9bLudvlhnDtQ!MlBA96MIMqx1+ z1IZBC2Dn#ai0-Jm`2&pZ@o+ppc4X&7{T^UuW@k>hu6mY>bYoSsZD}2y8IFtjMe??G zl>UE#xIqVWU#74|i7k(uvqma)2C6f7K-a!6sl6ULJ?#&H4|_s)B!zrDEFbdZ^_cd< z1hsn|QGn$@uLqxK!;Wl6fNIO{u41@VL$ftnnthV_dpx}g=+2k;$jM;YPTQjQg2px~ zd$&}k;^MM5(k#(g z#C=BQLAl6KuPP8CwyK;M`UfeE%mRNJaC)iJrOorQ4*?mJjYJ#Rj6DR&5)l4b|-8PaGRu-%9>P1t#(&3Py+KZjMNcg`wu?umCJO(^K z@&FO2c?s+V77MESqhmWVp21|vnk_Ltju5Zk9llXpfv)-lA=*n=>nAO@50QUtT#HA# z$0t)nj2($GLt+|bM)3@0s+I0wAOym+R0gfS54Py=0tAkb))kz}Oh{ z&R=?6v(Bw9u#PICIQtaOFsKl*s>7a7Z5*InPi-Qjcv!L^F)A!fdK zOPETXSe)uxok)O5LM@UKZ9jjSK(Yr_Ov-EF)k#z#U)ZF~X}mssVi)ugU%4y%YY8%V zH{6e^r<$(#b6NfEwK6eVQO(mN!AMB+7P^*I$>4UTIDG?E{2bN$0RU^0)?9I*gj;D< z^N&;aW^PYM!R!xlLA~uFl{YRT&`Cz^c*6R-;nFo}?w>S;z59x~;zobxfRM)K!@Lvm zCiBnK*&^RDXwjeAeLcOFf)|2FGSd#`+M#+pIfQciC?H6SjWRU>u z<9g7WA5P|DxmZa^H>!VYqqo)eUgZ{%m+X4!W6?r_{#j&mxR(XI?PCTyYfHPn2fV<- ziv^8#hZ_o;fXS^eL4#2pQg3SB4G0(NL1z29nF8FpkvgX3!tKt9bkS6w>=RCv;9{oI zM=SoAkWf_$px4#af+U{Us!M(c#FJKO`4J6888uccLR=FjGZ}w~?ELqWZA;w@0gA@9 zbLH1{t_<@xz&4wn4%>;Mdth?nBaE^_M3_+5UvlphAb`W+oH`!#MUS=XaJZzRp+|qJ zFmo-vWPun4NOUpSB94!wmixJoOM$P6UPRjXOPLtkANub5I`e?`42`FX*e4keduHhr z^W90htfOSpxm61IsTV<*?!1qMIo1D9u1Db0~ptg6eHlzsi_aVyCdteo|IOu<2y22*p7Q1WtO5wk%&k!zdrqp1NygskIbDo){bWNXJLGs>M_iw&V;e^tpH~&Li*RH}y{JZCx?TMx=)m398}TBwZCaxVk1>fq zNf}W%ygOK;;wb{CuP#I(>$J+F7eEhg60CfJVq<^DEP*34pfZ1^R%PRt#m!vtORY~Ve9ZU4y zrZegIoKa&g+A4YS#REK?o_-@HpUDh%Z}^G*D22n4UM@-Tmh$ZrQgNA@#W4=cPQlOoyNY;B>>lc0xDP_`IE$1XQN9QO+obH#)CV599TYwT%cm; z5IZigHZTa{c^&xi85^WkcN)Tnqsfn} z)8T*w)-1bB!fdKG&O*Zqb4ht;_gpb?pGlN-CX&*Q7|;`5(KEw5ekT}5m^t5x!mEF` zpOF^}R6mLLo9R6*y!n#Ea&4@ckk|2T2T-I>_qNLhGyw=yvP8$0SvnRsm-KzkmFwoI zeNLX-GWnc&xy@5_&=;sjKMT>#N0HRQUK4u}i6E|64Jpubpl-c&|25rD+G9`eQE2d_ zNgwRmh*<1*nr46ok~b=X%h$J^n=5}S5R;O@!82t9^m0$t(oKSlAe2reL4TZ1b-(hy zU#o#b)^QM6Y~i-ud<-0fhM&*21#_i}KFwh->i6-?FD`x+E#@wtGO+=Q3<5iKX=QL# z5KRgqMTxRZF6W&hnq4}3VUt_0KV<JVk%@+`cdizTB&1L22ulfNms%YKz`wiLZan;mpD~jCaN& z%XDmab()dL{`ogf3}ZNv0A0z=WmwiJGFt$Z4qXJuLw z#Yx-eLK#aoRxwbYD^F9VWlK4E32%RkuQpP_QvA8ts6F)2g+a_Z3A!({p`-MyIJHXl z5q|lKs@%*k1t#!oO2MGx9H}9`3^-+@f%u>P@-a-Y8M3ObWSzZxU@y7rYP8~K^h2|O znB1;6_d>s#MYIYfWLtkF7RUV~=tIpSk(JK{%!!;=$XR8Eq|L?aKSU4x_#bj%bq3-D z1N+v8>a31)5L9OCl=iqX^HiQdo_fA#kX*g*eWwDqzn53pFU-s6zV929k4kgNv#+2F zY!z^V`v>Z_9z`ooX(P3FGpIvP8nahbH0+k+TH2Q0pU(?Wn16qmbIMx7HYs&RHdlcS zqO_FQkRUUbKA6uE{6t2Cub#6Jo5>8{W>lnZG(qp3(gk`qZv4_zM<>UD#;k^F5sI*q zp_zUfOHQ6VrR>bl;Gq$0QBV791aqrG>w{B%pwmZqJiC&Yw7-BX`7?X;Qj;Ru)>gk$ra8x))ZLAK`b_kDWMUzx3&f+Znt^I>@$ z#-G#7cYJI^wqDoS&bq4&NpugN1n&>tVuW(iI2Bd!CS89b*-wC`0qZ9Q1DZM?`N|01 z2ZPZ7J&kT5xr0+s{is}|9q9h*a0iBbn!kKeIa&cZLGg`ygejYy5k@w#GU6R!ZkK8| zwmpJrzbfjhvz;F(zkQTG0{-YN*8`NlFzPNa(G}%to=%_({+5t)X_x3aaoYgsplcup zYc6;0UIBkO_YMwBVJ&ym=p1lJI<19$crciv;xx2EKh1rGHW~2qVM6PY$g+)rzqe8w znh{9R7kxHTB&F+aj0FY;7schT$dYzaTHK4E8&HZ}FkI*{_Sa5dE~#D{n|sy*ABIAX zpD;gs!i+H`MZZBZfAZCrnC04imK4~m-~i^%8l`^#hvf24{KO(+spmhYhznqb5M-P0 zUjtR#=t0jPXY87!T3N*fg!+Bemu`iHAU&b5HE1L9ExOIOnHZ`zEF(MOWFVa%r+79`%)xJ zV8MSUgpISF@dd?$Y@Y6B;;8Njhd(iy$R}Iz%h1bd-P0c%V51}DZ^FU}cH2DemuMB( zQRWxx2G{@GUV{jid5YottT{DqoVr<3jwbZAr|y4!U#fm2<8m9!8(>KY+;kVOF6zr=rx zN@}X(RgI1^oz7wSw;&>|FA%Yi_Nq((;C*D^9(jge<~A4nPaG!*W%|TDFO!?#8R;7| zv4<1r*0}=TaOd5@OuBWJaWgngDgu^;;t={N+Q2h!F;AYd5#NLEc5aihr!L4DaEU`c zt6)I(L#~9kE~?@%-7y&lS^}jC^QM1y;$CM~apwl>N(yxDZN4&;|KI0d`cC6-1~O-r zJC2Eu2hXdd@auR6s{CbQP;jN*?{K&Xl{Hu&&&O=*OMqX4WcV{%BC7zcyv#W?Mng$r z6*Du-U)dJu5XX=oZa)>z zgxjK(R;HcP&mwuI!-|72pDviQB(~vJtN81zvYRje{3?GC6uvL{;35N<8dju6nJcYI zz?2$uY$gs^98hn)HhZ=t{M&!TSQ0>2Zwq?*u@3hXcWfzo1p^Th9SkOQ=zlUIcNRf; z70+AA30He|5O0RAuDlzWpC(!;ngf9(7H$?wdSsgFQ<9^)9$#ojowByueXn>0$)VRx zLFeAa%Q!Ol4&q8!hLBUQ6pELO@f>^ZQWH{Po@6}Jw=XYgZo<)2`}co_R+s1nWx79j z14fU{C3a;+jbG6Wlbxp;Ol`~3I95o~*hhh$hWGYYr;OU3G;(EFJejUo(p{oefNqWmYNqE+<>tDm&Omh2@SZ75~ye zPt|`sftk>-;yr(1oQsc!vTa8zO-tvSjN51!&|AlH*;fhG^(Qj3lu%EEvD+$J6Uqmdmpj)jX@R9DFw zxGa}xX;Hty2nXGq0HbAkaEsE1o-NjZGDG9lNsw!t_LmKVomY}JQ=vvH2cG)R`I5?F zh?0{9Sd}R>;E$h1r*s^zYH*~x!QApCk9fmqQ?jQm*b?cW5XT4fNJ^>Y=Ekj<3>tpI z!tP$N@s)oLui&L6ZM&}h6{F;K;#cyQx(ns1EEg$p)2k0re5ZUnixDG_zjmFSPl1Ornjr28P?ap}Be-rNu zd3F6@E%(h9GyWAPR{@#s$Y-c`F}@^6;2jGU1Jef(2bzr&12z(-VGNAT?}8J%y+o1~ql_!7l zu4Z1?@u*z>*^>F#9ij&PvlvlHs7+yVqx?dj^<5bgmUYI}KmeC1jLvqEafPBtpss%j z!J$=@zoI}sqmtZ$HwFy=lPtr{n*7&Kgky+k&eu?HeC)9Rro?mKzf%#l)(ZNi4s z1|Ch3|G~Gqj`-JoaYOTds@#!zar^89vcA}vddL=&F@ANXHe=;D-9kCBWn+D3sY8FM zq*-_c{c%!H)Kr!j0psr%D97q-eie#*L7oy{7W_BTR{T$bKnA}&DJX%YC1-!<&`B}j zp!^W{)Bb64K&4-maIqbC2l_8y=B|n2Ctn-2+;?+_cmIn=b7qp*~CVmoj1P4 z=!~%nLXs?vDO~t3^NBTUFr7dr3RMy!UNI|!mZ{LS4B!QED6t?36kY#GC};@H?aJl$ zZSd-)6da4FVhvwyKD)sJ%?)T$8QPx;}m1dcbkmU z^|fHLL$UYsY|l*;IW(jxyA)!l)vjp~buT4{N573!-c}pFO1|OYauq6f6O$l`cQP3xh<3bRgAIqiuHVL}w6FhV2_>}-dK)k<$d@{O1&$Tj;{=3PgEL#7c z%w+e*%+r>Qwj>`Sfx-8u*NXW9L-bjdFSx)#>LayoMg!kzi zNl^~m3DByO6Q0M1NMn3uBDR$sjwdIXFSvYV4bu(5{;v+UAM{Qlrly+P8EHS}J-i8q zqYcfW^04)R_tt(fgS&S;L15Bcifo1Z(^y91=(W^HGI;|iH|c^SIdRN?;E-L^`3V78 z9vP3HiWoUjcu{ZSmT3gKOVNMtOXW^WQ@@z>h!I^@zIlG<2l1qqjp#`I?p}6xum{tM zwZfFxRJzKD#CqanG6%rgMy6S==A_cXAYiG}|5|=iF5~lS9IY>BHRw1Zj}~+g3A=j5 z#vmcF3%T-or^RY7o0=zom{G;^@IPg+D6S4`Mp%k0B$hP7+LsA&PP3N}Ccu5uNE4Hm z!jw|wJNvZ{m>EeBAvgDKFl1f`s!9RdZ_p2H)%p8KfK+M}-gZmqda(uq#c(fV2~t+W@ko(HE* z^9tBg1ZXAm!(}TZ@JM>T*ktGnU3UVE5x0b9@hmP*~ApW-C2AYArQQUAT+#x!5F2E7n>mTb$pPC z(Byl!EC~ZVfip59=uKN9ob8|VvnuVtZ#w+fYe<^twPcg0PE_$>!dBD-W?&h9J1n0j zrCUCR(sC~Z8J@?$`B$lJ9>&>Xk|l3D0k^7}S6K4XctnGL!jhv%!bxym(CrwHEp@W& zAzvAO_s5qT2%@c$CAr){)!jJ-aEcJYb4Qyii#-lvZ`U0$HImD$f6WClAOF2&i_+yJ zIbVm;9!={RT~V=$dDV-F1-VnVkb;iMO7NCjpx~VQNjbg|V5@v~3a8+PTH!o76v@cv zl5J_xJl+In-H9)5z^Fqx+vfeqxfxCICs^LNOkQ}hD3N@X?E)!Qi)^nZj}Ty-nC zj6uJ-+P1G?R4H^f{F9<(RW$iBXp|;IIB+nQN~yp0b_KH~u>~gN$d|`etYE*#2X(># zdGL1kngtguflc!HF%$wmXcm9?jFlO;r4GpMoHhA>K$jv!kq_TOY|7fx&Tb=jg|rWM zULK3p@~SbKw+!sF6QvTB(i#O0y(9ir8;BI;xL5*Hb%zASj%9Dm^yfsP<|a*jm|LG7 zl@ET(99A!k!KP!+WJ-_&vQxKBOT5GopzDwsCeG zW3|VB9MkJ5!+;LkH4|2tB@P84u@T-h8BwIu7@xLl=N0pp_FU!#MbOpT3v*0QGf9M{ zrTN^TZeK=pKU~r!C_4LBnjIGHH_3t@q`kZgIRLBgJSt1(%CO11WLe6OX8IX+6^`n+6G4H?`_F*lM2G3122u%=12K$Hqig$ zD*E*NN`V|mqF#)ZZYgWGE>fJ|(joZW+ba~FG6x>03Qk&Dan*2xI+$l)(_j(*0hAek zbA06x;54XM~&_ z8yHAlU11_F^-u6n?W^g6JC$q}@5}XYw@aP(fK~QS3S>Kc3*bfLwTPZ;l=-56kgt^N zZCZZzzN7FOB3a@B5d8X11^VL@4e@5S5sd;NJP=oo1|`k&23ppxwYO{4g2hZ1%k6!D>@^MWR1E0{ z5afh~2rNMHaIqM5s6;Iqx!^_E;tXilse!)8A}y&@uEf3SNDKD6JC!Hyv8#l*$VG??B_A8Jhki0`QA#H_jQ6`C^RHg^}Jy^@fIj!3>gcq)_S( zc46%r6ZG{gaRl)YavA>E{rAnuUV`;p@29i-@MTDwL&a>~`X>wjaL6`FubbZS`Jou1Ubu+?U7du~1!x7xY0Lq6v; zhriUkOdgXS;*u4jG69;oyK8CS@iPl3#YF&odQUm61D8TAg-e)!7yj!xr)-|j@k z&ZL0NfYJ;Xn_6OjcOGaZ}t+Tl4N5e}qX| zI+wI7R643SI~D{K?z#`21kT29$W`4V%-%nD8&dLB*8%BZOKEL0apA1}n6$00C|2{Z zW_gz)f&*EMtf*zq&s|Due$)4qZ)78<69%Lu0{wngN&-Zn>vA~nu zrS4=(3_Pm5K)dt&NZ4d;!G&7)U<2;UUZWExDdCcg>rUpk7x>T9PTrs(m&|G1cWhZ@ zvq2vXRGwbzXYh)9Z|gE1R($Z+bEupJG^Gy z2t!-J6pP-lM+03ENYBd`pOzMSPsx|5Y^{ucGCr1iEz^cC&c!!(Uup;tOcrD?tP$S& z!#&JsW`7AsSq3nRKIuS!%|Qgz)t?=7an3pMp`>>Q=1>#v#O2f}fWF8g^x{gRQhzeN zCdL!|Fe&Xz!MI&92dzf;xc7Krxo#YBF-}Bw^xyXW;~%#|?!5|RK=4LfRdaidS&dnL z{CTRk7rYYum%F)#&(~5Ve$Nr7M9>fXr(Qci?fxiH$ z)D!~Qkvv0CO)|@quqmKP+{*W(A0LQkYd^(0jrSr@$uV3 z8#+G9Ei}mo?x)0EbFE<}sb*Y%O6@6VD?Xwa@+7n{1YVgoiWEB9*ZDhWdwhlPQjlc|)dQQ*X9LTl{`}$l=O#)jdK?DyzHk=( zjh`Yqw_dvaOYvxe;VgYUG)zvrG1$A$)eJf`Dh}xfiWr!?ry;|e^v$NJq5s}NA~L?l zElmA{3;OTPNVF3@Isv$US{gj_cJwa<2^;`p_SuOt1^>3=nrkCPq3V;H3gz!UHtK+| z@T|pj_N@OvfwfvCw9l-!MUyB%c2_Koh-8xru1$;C%OLhZkO@44;rjdjZ57`^dBZl( z)!F6$9|2`Lfxrxw6B(|`OhG4V4r}3q@w#R2Lj3w8oB2G@Gv>8_p?D2M!Tm>L%}Vw` zIoMU?11{yqZ2Eo^z9z#?mpR7Wg%FpyaVv8iHxtwgV_)NeH*MMPE`eC=f#kpF#&8qh zMm9cdtG(O*OyxKB4LF0|J2fp-GnHoV!>-hSCv%lr7#bEAUn7+wTc~VRM{s}s=q0$>F zAcN*7_|C;kYdj#AhTk@TdvrCcl4!4CkB+~ggnfL5?V)i~6?LzzadSGo0_0FW68?;W zG|jW^l1t`0O^R}``d9qT`4DdMd$Gn)0qCQFYL9$Y_y}Wvj2*2&&xq74vH1C|LW>r| zu915qcI-zl@;u+~Me4!zEN%?2;9BpjKut>HpOuHoqy9zen77%g?@}{djU9DtM^OZ%Bxr2gKR&l3y>cwLINy`1cN3n#`ms zol<_+4{iWP->Sx~K_3nLx~S3|iu5ky7_iY?jjKd|GyBofU(}AN^Sh7boYM+}HCFa% zF%x5c>bubQZ;`V5fWUhYL9l8~@evpF$G4scq3|nxJLEE$x7#wjyBlH9=@=0)XUrD| z_Hzl8)&Xd=?qu;n6}eR9(6N+7o#_9J&HHbKzXe}4Ji|6+SY!UVgkuHJwwq3O-!zle z3$M$6$S|0$%ri`PO8zp)e_p0H3q}dteTCdIVB{zu9 zMS|{NgGBJ3>_DnkCyJMI<~Kxg$D+NaEU(so9cME10}H4`9-7?ctnGoG7=gcyA5Ffnraex348Bflg8DV&zOE1wG;A!^lZ|GFsbKyElzz_{SG& zW})B0UQCr(qgWU_anA*#I*bC}9Vx+=cbKV*CF%e;J;)&Nc5$ZeRA4z=Cm0G;baL>2 z4LJt$Z;zZAkFx`Pni`jRanS;`CM%i4%gF;5T+d1jGyG$-Ag1qTn6Ch`q^En*s)-<| za;r*1QrRER$`BYm64E0Ba>dc8k*z{i(kRFT_8SWBU*cO7)CCynqd_;I#JG~BDrD~s zz^UNzXkb9_ZXJ-6gf8^@N%qdZ{`#wb@v5-%=6nnM@H!1ESiEy~1Ae)Lbt%n$396b& zNe}kdU^}FHETkW1pQ%7}{gyp14!TF6PVDFhooBb%SI09Z&re~P=A!v}xG6lbU$zt+eJt|PttPv%`qQQBG;Ew9D`P9}h1`p5Xiv7|;f`EHL) ziK6!k5u*^u1L5(Kj-2YfF3`){U@vt;J7(M4rI5W(JQIwUm^?=a-&1JF6~+i$a2#}| z^B+oN-2*4wvOBD`2H;kI{=hvPgs+eJ(3Lz+uCrURkzRKU*9VorLCG!~x2a?F8t4WT z*MV$WFYj++zh63HtB?;v#yRvC19J9xdZDMg5(pR8MiundUZKMktu2N)w1MGysDc|OsPkp)ArG)rzLB{XdawCmlWEX2m#Ndf^@8R=1Vt6m2NLGf&01_jQ`7Z=T+g|{Eo zm{X(M8}!X|;Z0d6%r9G%f_`_8Rme)2mS!y*<4&iye!B&6Et!S2*Xp=hh*zoY8*_hM z@z(w*0E1G?R?rTAcwkHgTe-XGti_r1RlWc&*f}+E9!Ese4(J*47~M;Shehq&86yTr zy4L3agFE+Jt0n`MXl3B^(J|I5|AiiVr#!hrLIhnq6;m1rzSc_*i6J|0IsSIIi!-Xd z?!sPbwIeb$Su6L!RscG-GT)stQBM%pOPF-o;&)hbvxDq^?Yrq)VLcYo*l5&+beT?? z^~)t?)NzqyO~#IAj|(7ttNpS1l|>pJ>%eO;-DHJ{yxb3S%|Wc*=pOr21N3Za(C;}y zkIY{2P*`>I<5QQg;o;P5dyGH(ddqu7!JZ3j{!lLy4?(F7r$M|@FHtLFz+N6D$N1jn z`bToCB#k0}g8m{zsZP(OqU0-H#?1`GF6hy+e|v9Kqfb^{?&e<3Jc@^RY=|2pc%y)I z8PTHPjpPzLbFErv^1Tu)TOS4n>3T0<&3U7U5%bOWFy?@}*hYxLKtAr&&3CBY>69!q zT?BL|YAh`^VX_ppnLt~UX9abT%j5^Q)h?nK3(K;9lF|9VhZY5*UygUd!8h1JNfAgK zu|mK-GoJ&9$DCL8D#h|Nkvh;jdlsz+5m|2Nhu z%H6PM#4s52!DK5ar6}Geo(`{&UaQGw5yXJj{GWIP!0e(|v%CQe+-J0h-TFQRgwfB?O#N@Fi<8hIn(PU=MoGMs4?&NJhAUk?F%=It zU6lHY#-et{9_eY<-Dx@E`yONeyk;&SIjz@Sz5ub-snR(saE)!5U0SM}!X zuZ5IQe?coAKv@?Xbtx8N7h#`H0*|17vP>*&!5LsUEicn2x0%u4IT<&Xb~wI~&>>F6 z`a(%Eyd>RTQ2{zHvwnmwWTLGU+-^<;?=*WF_L`FL=pHLi)}kmQjGCh|$P#^bL+n8` zM%f=;>S*{D2+&7N#{~+pW+C(7K9px#X5MJ<=g?Qd2OzgS5Q;(Hk`g+3EpPLGy(;q6 z1x^*W9HBG9RKPDBKD4ePDA>4&oBh@ub|T9B)y^^3_F}&hDrglT@sIf`S%tQPo=!5IsE$5LV3SYjP{m1fuY9XN!nmmzf zx^bmHmlc@lujkLWQnTHDI%_lCCf;Evh($kvp4U7Wb+(^iy1V&5rA&VO%%)~AA~$E} zCc0A=>4(67opFqeass{jo=PpLe#Gb?7`sCU9NtgRGi@A{+x2}^UTJN- z{y2>2Ego)vNLtP&cDste4JxqHDDY=@-KPE;Bq{h2)~n8bh5|_%a?>vU7k9enjYP9a z-(T35i3Ov18bhG2U=|;!LH|0njE##$m*ijnU>`L8`fd12<+s0o+F@JNYMEbk=^ZaR z;i%EOy5p%N&h{qR{bS-}ssK&&2Xn9AOqt^e2W)TAiR+OXEA$L7vUV121EMbY>!8a` zRuYEZPc|uv!(;(5&1aWx-;~MOn~^>-8p~~CJ*|{q@>6w=f}wV7nG=$3B#>i(ed#B? zEEUDcojj(mShTZ$O50yJJs)eXSaC16Cy1k=L4S@Li%+wNHi7Y@9L;_I?|k}yZPxdU z2EYIAA5#t)S4xT%%FlOD6-I3XK<3J}9!RQt_R;1n=s2AY zrT@Ue(m%3)pU*?-VCZ7$0^N>bHr{143GHUGteML@22s%6#?jt? zPbKc&tT94)gGV#tlk<`2)Y*v<&z#nt#60}x7+nE$DYiC!5 z@mUJ!b7SOi)7wRi(Mtciz_hv&<#>bO01UF181OrRxlw+{37K)0LdR|oC$U^SBW8=4z%eCGMNiY#F z{A(EtY^}skRuQ|*Wi8jjpQ~}4EQHiBXB=jTXLZ)g`i+7P25SyV?L3;J`!x)|Hs_$a ztg)lio5Vo(#XC$2C9oq1o$L6k-O$ou$}3^+4iW~gk{KH#KeNvHMF0Q$r{&o@SY z>;_-@4US|8h~xG6m-tPw^VQ9wyOETMh-1CYpgS$swg%e|dWy0Z>iAL%u)cGQ=N6n} zyw7cl*i~aj%(w0|^2TFEdmVqhzElkQ5396PE%>vi3Cu!tGK1u}%8m@4RB~$C6M(^Lh83Pnt|GXCaADsnsdAM>N1g0B!oM^@@tXel1cGPww)e# z6wu|S2;_T$mK1u5tv^;)T7`;*D;a|r9CMmqLtD+Gnq~glp`5vB>P)g5plvlgHk*UD z0%?SoWuJ)_Xcn}V);EmKbvHx&j5u2@9@(zS3XYDTn|=d;rpP{bWw!#d(p0a1_tU7V zD-AA8|6&viYJa#$ST~{Xv@ zZL0&Llbuvj&baQ(PxT6YtafGqf_JASAyvQ>f74YUo*P9JS4xH(>Yw)UXmHVbs2J$e z)O3d6q8O}p8#Nj+YyBtN_sj~U@kg7Ch2$(ExUx{6OXN!XH#`=<;?iU+zU*=>OaN|< zz8m{XrYTGuOxqEk`({^vb058*A2)++7$FJq(l5|wi`Ly%jG15}T5Se)cjc`MJ}lAPP>E%I?kuRAg500TRXxP`tLXS&`p z+-JG(H`FqQWEl}R;JUdTS?9Y;;w4Pvc3CZ*AIIVCPc41}kAP6vL$&;awwlysvnNdx zoA1Ho(JFTc+kx4CkeYNF4Cu;U?OqEj|Be-Ma5CiW-`MjW+sW&}F(_<3Q$aYcAB`rc zD-pIX>YMq=e7=mAaV8)2fY>P-DljmEuUsVlwepPpAyB}83VE1*@J8jBS4yE^vl z63U+LMok)3!^^SEMyvV?`iIAB4qoOz%~oA)hh>7O0mxf_1Hq6G9ouC;byQAZVksEm zQLnNsz_e*YM)HsHft=RP;`l8UlcsK2Zz$+3%rE@r`X7J&?j&3`t7Bh%l$X#^0>4{5 zqQiWBEk!=N_LO-um#M}eX!(K^XwVNYCkS9GY*$~!e8aRw(bcvSyQ}0${%MQsuEPR+ z`BsWy3c8VhthI22%VA(2jY(kaZh+&5>7>R=3mC5$JV&oxA_nSv2m7|nElqxjDrFs<^`T0--hMdOOEIsP76 z5Y^NR6;h}MPvDOWDFDU=x;-*v2*C=qDpKpXbXU^Nl!c=I2URd_QG*k#awSgA^z3Ol zbMUA}9voM9Op8ktP84v5)amx~+qB#|2gRBc-?LHU2ifbz!w(`Vl24^{YS2rc5+9ud z)sSz0!S&1Gu(OKE@?=p1W*jyb!Bb031sxP|HX_X_fy6I{gI*BaomT`3>cCt^)+U~2 z^}Yw(CrxU0;-uYM-GBkd2>d=H=l)Hbv!tvV=`~0 zf1tlui#jDFnbH<}9jhq|6o6IOR}$+03}#e+7QcN+JM~po$Y9(cty7SzllWImsd2u2 zwZFaseVJ#7v)1y@#Eo~AEtF5RpHGOq-{1>|Cl;|IA(*4@$^`FkQfXeC5EzXqg(V~Y zK>%L?D44d{#L-DCQFSVhT4Ap}D)*`;eq}Y7$MvPZ2ivxw4@2g5^KCIFXZ4dCRSPVC zizLLgC=wBJGA(#5$XZ_!Yf&4I^`?j?VBsm|J=w; zcPd<{7RI3g##~{E@;X6(b9wirDaG^KzmEm6z4=w&FFm&P!C?MwV+qjuQyGx;qxYmf z;m4FA7ww9++3;_08X!{ZZsA=puwQw9M7VlUo)AaJe$UJaF;QkDvlGi?0QBt&HNVMQ zq)*?C^~aY*wJ)VYv|LqBq6#Nax+I2}xykw0={E`1;L3k^k*tM4dZ^KH0J=t32%dNX zD#5aY!=1Bo4E>bC)V{_8wsOXMJw2wNXH)Cu$*|G)5tS-sFM``)xP~&*ds*7iUn?J78>PGN&MR z03WEUUO1GKp$bD*q52m|IE(>*9GWHU^20R)ctCEgzr8w{Fx;}XKN|R8RQ)A# zgb%IupqjK?i^sqX=mJ~lSnSe(%iK4W*L`w2sQN}VTiYJZLDVrHf$SK6Q4ZVqY^R?T z6Nqfrk4+&yf?~wvz{E?6NtAZw-RA?QQ8-K0rd3#nKRWLqSSvOG$vA8n=>IV;^E}t) zORkwUoUfiVZJ}g^47O~=WI#ZsMRcPN+e4vX1X~8?CH!BdJVe7Jn;_tiDBAJmWCh7! z&o3G+F2B8`BfC`#WS;DQ?8Jxi$q>-FJ~3{GTE8fecA}Lh?Wa4$97^X7Bcm|mRVIjI zMZYy_3a9NTyJzFzC|<2LTh^Se zKws<7e#zsNvIa(av4UVsu-D`F$Ef%T=~B-T3cs6Tlp$PH1bzB{N2cyXy}zCudY(Ar zA@HwpW!3ChU^3R{SBU8|Rj^0P?#YtR9%`1I>Iy;)73iG3Gz1IrcRORSFMCHZN4V_j z=#*wK4FX}<@oVeWI=J&ba~a&vLf_VyTT%#)p{bJaWY!%kYHFZ`EjT z_~q%hMU<_i1r?xwlLhn15VnFQ#IqfLz(}7f>2?JAPi?CEcpJ^Zpy#GicDf2Tb&O|S z-7X(Lo^r+XR38EVs!uQ#sy^;gP*j?gqZskG#kNarXIJcLjK=5pC}Tm-N$h;0+{JJy zm)=K5ng2_%&2M=wI)wP3h)#w+r!&{P4-{HcRKv>6CobcEWm!+tGbmg6ZSy)ov=J^EmY&)QxbnNWB1#yK!DqP>G~Ucf05(u_)KFny4*47W+<+{@pz`MBNxltVgXt`w;B8D16NJQw}}3mO3vW zUvS;{M>~KV5{5Qw@7TI;+Vr<8w%R~z7_0pWRJ0N3g@E-YcB7n|J=OG(H0J2foVa8P zNXa>+6G_K;`s59n1kHk$VQzJT!gO1>JNDXCCxB6Z)AP$O^zV86dN%Bjvh@#~pGJqV zEryHAUm$X+<4Hib&Rx8fE}U}0;i4i6L@!>8mt`MRS=1eS6o=%azs4cyuQZ7N)& zEJ)dZe;-;8E`{{ZDb)`Fe46zw-1-N9>SCyLBM^sD(lWtS_os#dZfCS@MVI*)#BwL7 z+@zJa0DAUz2Hi9Q^c0C|Coa%ae1S4wd%$W#eX*i&%W;7XdsV5 zSrt6AJHw2P<)<||h2VC%3j~PUP1b3$FHtgoIO_lQts-N)C2zv{-iNX_QQoGPO?ZNS zbG?Sfv}(}nwJ0pCq!A75DQ~@4EY`_~JWLZct>XR`MjTr>uR9Lio}3xojN{Y(1m2g! zg#RFxsHyckc*N=CxR9$Dh(#&-WOSG9}Qgm4E@)Smh`~e6aMw$3$@8sgr{nGq-LE2yCIj_pC5Vk&MOtf}c8h`Erf6xYNfIHDsoRU%FZZ9qPOVpu4hYBtq4W9(PocQJJatcB!(lL4<&BiQlBC zwwya&XUd6UNaA9Y3G#_g)rXFMu6?^)vwc43FOa%?V~b&DOL(#Yjlxft4GCHL`V)W4 z&oHL5YvscmUV3Z}gYmZbq2DrLl?Q2?guuPJHgm(`{^p5RhucNt~iE z*~d(+qu$4nXBoD}VZroL;z7JdT=_cU9deM+1bs1UA*duVqgzvuk*n{rXdxMwv^Un~5Y~baE!uZLD;CH8gluID@f&wZp8eKtL$A>|cmW z1#-WA+ZxGfttsJ&j2oe~WJoyJg991R6@j4g2}8Wgv5}6mr!6_(9k2UV_$rpI0Nehn z=BX=&!Ja=U*@{$K+`jjHZ8JM9>SX}6Xc_`Eiz;$NjtjvNEN7L|fp_pgPKYLdFnKxW z7ktnQ`sQjeOTN5+peWbP-~srK_4z+4I`Gh+lgh`6>`E=MhX1drl64Rte%5duCK4@82o+J4897qCOINy*Lhbjy=d3-b88z@kfwdWA0HX;$!`M}C&*K+~gdkgN3Ui&HW z%zuz2FB8;%@WGG-u}KecsWD^m8z(1EdQpkbFFJr>>DSFMhAuz0Y6Kwt9m3^++sr=R zl$m@ZH+JD z2IzV<@@5$JLV2pE|2Lu=eo?96D0zA_YHCd@*0-R}dp7(4EO?3BnxO1q<5iy^c_=Pm z+(}Y~Fg95XdR5beW*J7k&q1DqIbl)@@{ocbSsCX?e7N1`f zgI#TZ45@-Y`?{PDwdyqNZFXhenEFUnqfGg?wc`O^vJZ+Rwoa!QWbfKfU3#x;SLe0u zN~pm0*gg@j7|;O*6`$xEJmyGn`pE8ql*>ZAzoT?Pe}xH;L6DovnQ zAjwpYt-}L8vKngRH;Xr7fPE7D-EQk?<<}*PBu7!5>+u9cYsB{luyCK5>dMtiGGL6B z6vHwWuY}a2_!YaEseamHxcr&j{`(_;&An)g!yD)Vo5HGt53N{b>g&P^i*K3P`AO< zOPRHK@hs#DPe8*Ko{GUY(81Y%MK9|Ef$`ydWGp~SGGtwy_MObbD?$bWXjy@n6E={L0& z%4x@D3c<$R?_N!^MUA7jk0<3HU87lXu;9yuQn4)A#SDKMY}L>Q$6D-vND4pjvHDUP zFQ#KDG$?x0M~`5mMe%3B@<(P2!}OZ{><#Gn+3Fr!Qf_jsV>_5%S|Lt@VWim^af&}% zJ@8_W+Ooi|%H<_;^^JpUe?D62$FyJD%>Yv+kjq8^K=RAjy5n`LD=#RFA{MpG@D{HHgnVVBgfr^7TaLuM)}r=@mz z$(0PHXdsT|3)Dq&Bw%#AQ^Fqn{hj&E39!^iqYAd?WjF0~6TUnQ@2> zUed!t!30IZn|mUzYmT{M&nQLmH^l+qIwm1%G{Py~{9f{Z&y)q51!J2z&P*TW4V8{^Q1wO`0^-{F`w_zL{eaGQ^$haDkPD>?kM!<&ot4R4uQsCkJY))2#u z`590F*=aPe8rmWg7|W#`OV1Igf*W@aiSVmh2qSqi1@tUTaL(E~_n_V_Ef16QAa>&C zVzMNB)5bS{5|b*6Ix0LYK()C!o^^vtsB=Qf0P>4!`e^1-VJN$H*n4=C<_H%$8^!3H z$C-Bmmkqr>RBkeG3Qn}xdwsQ+>+@k7Fnrwn2%c4c5;};Q@Y*%^y*vtZM|q-I&q=TA zI??}&TyTBU8f#IA;X?eI5Jezw!6mG#j94^oqr-|*==t7+| z=J9(%JwW1X0B_LWf`gmr4u-&W$WPG6KMVpclN03T8p#XSQj}4or>fDtB5f!z)MOOR z6kgze->By&#oT|&x8wtMy7s?s)qvf^{iU9hQj!6()89eR40g=tuad0J7VzS$>`VKp zpo=f{wKeZ~b^GiiZ%G{%pUu>ad1my6e;do+RS!5kLQa-v+VahlERkWZN*JHJRVK3m z=jL;%>tmKl%2x^g{s=4>drkpV>!iTEXp**nF9Y?UF9A%UIh9gim7PCdR(U-Y3AQBV zEBo7&d=T>U${z9el#DA%_7x@$qm{S9Gi_^`t9Jr(YiaLjZ0&CwYFhCR$XV6Zne_k8Lc2u^zdHx=Dl39GW= z(>a9yLgejeC>+qNs0FjO>X*X-x;+wqYkb(~eT%Mi*Y}r*iLBxytu2>G2gRc;GQ@}C z6ZKxy2Qqul0%GIdu{%f9IAacw?AG;dkMgf{^o7^#(8-;pu2AN*6Ee9-Ybm%M+85A^ zy*d#Ly9w}Co$ll>t^#GprNfU)KM9ml*0c{qYkBn3PFpZTQ=q<=Ay=e?XJ4RyKFI*( zA(~P)g#!+QAJfPw_b8M*(=(oP%s)34ZABwEQpiE~-lEfO%2)2Mj%;fHd`+0hA~xE; zzA}7L>YrEWrBc_lHP4IeG`kwY*txOuMifZo0B^ePnruo)fecW)Xz7KIzWr&*X@S+T92!`M@~xfn zQbZ6n$`U;QiYs+~DZ1OI@X(od6aTJV;_0+eGk&{dZ0X@JUf%^BcUR8v>JgUSk(>Au zZuQx+s8LU_j!NCX-?rf|{VzXa@dp-PUudwY#};!{Q*-nE2MR#onVEopjfxZ$M*1Os z)6Gp6%32-skH+&uHq!kQml!pFkD{)Aqn9aoybgCR9;Z^5Jt3)E6J0Fh?a73=cp+HyY;pa&nKcGy*nqGA? zwqCrZ!v$LhwjsZNpXHPxe)WOD74;;2KIk1h;m&r<)e6%j1e}?>$q2gL6ujSqCLags ziu_CarIO?+<=0_js1V0no8Vq9(DhJm&dM`o|4c(x2_!Y7f8T* zy&1V`o9&@d5zR5Ka*WtIgmb-%M(6IBM0Bzoa(bSmnms8RoxAHV8OZ9!jVhq2tFE&> zYNgfJG%0fqNtw@k*kNu{#hB7nDk~(!8+4CA(U38hql}Xe@h`#7%Vje>;wiHfGdvqn z#+g&yRWq`G8kMBfJ~`2ozz#Qs^`Yu3ZQ!IfWNJ;A9|@Ji*bJ^qwY-|&q?JW`sefp{ zX6`y2^ftBVBNtE0V0OEG(OUd+syB{9>`r@CBSZSdLA0yVoVE8U3)y`3*dwd8J1J$G$%hI~VOA02snueRevV}#o-D68`sG7~^E^KuL=!2VWla(0d*Ww!1S zM-J~wicb9ORVi^R0=h3V;dE(rN+e#ldA{DGmgYrFB6r#u)vB6Dldcofs8-KZ?tq)! zTsA3xqEa2_Kmbkj9dJ)rl??`qI7OH{IK69fAu?vNq~V>|p^$duiD+R4J<}H7Bindq zmc7U@uC7ifb#7;GzN>d{98X zIDyz-4?%qD+t+Ij;lOh_x_tZA6+Hrax7Z(lZC<<8(gBtT36agzl=)rUUj4z~k@7W+ zMaEul5u>y{)x;HwZl}=#pq?8X?gS`l{V=DHT=5zPYD2+M^MC2D;p5mSJsZECmMJ7E zgI>_~M6?qz2$#}Q)dthwV7Y+n2a(+fKC)@S6zTkRISp(gKchn$ z4GREdK%2j$grG2Ylu8xQ;|jg?FqgB4zkV_ce<%fHUhKW|J?l;(!(oY}mvy5H?Kzvc zrX^Y4v_BrIcsRSOc%K4+?e=~Md93DfED)5)l|M(i%?}Y*4BWRd=PRnEJT^d=o8)Yj zn$1<8<3eHT2M$LMq|Px0e@75{RRX&OycLc0i;7v-OAJ3x($L%fMx_bh0(4}Wr<~4s zf9;a~^}infj+6Dttp5{e^YIkUmB%Nk0=*DucC@@q%89)ED*p#VRWIXkdSJ^b9EL~i z+e5&EW3@^IuZrnued#Vk+viziJeHOZ;QJIBHGVF4=MM+Ye9HJ;0Om?R4w{|&H?9|R z?L^r#=($BO*3T~~{^yg7ykDi1@GJMDfBx9|;1ks5&fZ}hV?q7F#B7>c#gFHKktNF< zs{APdPziE-6x1+TU}|5S)sWiLitpXF|LTeR^=ai?tl15^dA}I(ZkC21EKnKFV3Jy1 z*({2Wm4Wx^9YYv~G@1qmw!ZS6qj<@cp_1>jB(MNgR0?g`qdzVl&f*mGGp^3{EU)dW$U*$2k+V?Wtr>Um9+6^ZkLP`+ zhY?C}XsnnkGEYQv!TOzA6#vG$iQ==pMql2mf1-50*tzT> zBt|gkGYJI5$I6Z0k}RKPy~zg%691@9BWVzHZ&0az)oUJH6yQs>4LoBmCuYTZq1CnP zGXvAoBZT`z+6l-}3e32Rp=og+s4+9yOc?JMH(I)GpeL@|Em0t&y;2tf29aIj5X|IjLX1Z zVW9DvH^2&Qi{a<|@SySDvIXB?b#yH+9=_IaX)ucVW0|Zr#)R(W-$j50QQ1$V zncVd6@6ic==WgnbjYIy)XU!De@BAp_<`2$uRGU+ax>Vqd7{i%hVM>D44G zQrJi)pfI;rm$(*pf5+!A{-LT-X##&|(Hwn0Yms(UZ^|Vx2r`7hMppL4zi)P)`~`O% z16^RtS9BUc8US!QX9Y}24O~3z?@*=P%3r-XeVoTwKh~g?IbWJo4;z^&u$ClAQ!gd~ zsn=q;LcaF145a=wvkh*QUmx%}qPZT|aa{ou0tKKmU&@3&fAel_8gMr);%xqOfa}E9 zVpWc`VI-i`KX0U156RxK~dk?fmtR8@mGe9(F{WTOMgT zx$EF!3$=Hve-F6Ld%+gS(#sZsnSwhgbZU`m(!@B*g@BANE9i;50KM3Y{r{C5s_eu| zV3gKQCO)pkRo6c=gH4{RqCh7me$UjP4ic z5XN|HFQ&hRE^Id?otO7se=JXe{>p6Abi$Wp&NRz*e_c~xlPmL4xm0Jv^qeEPOP1UQ zr)4Yvgs77bjy3G(@T~-`UeAD8<(^A_B?%ST&!?TOaT;q}vHSMrIZoqwa5cCp9MHF< z#68R&jz$rk+W%!gjh#>XKxJzeiuh5bKGg4V@%;_?nS%c&d=zfs$Ty(5ZMV1@3+N@U zdVw@jf8y1ewQ^V3@^rjCZumtW9{o#s%@dqY67->FW2{P~u3 zA8C#++Z$x-{l3u2dK;ZM4@#`lw!F&v=%jHAFi)ls7NHHD-nc|ZeHJx?nfr*jrU0?Z z;g_Yxl0j4rdYfv?;=j6=F^?*8`(5nda0WYAe=aL_tpvPCUSI~3ahdYv-2gVCU@>IQ zxuuUV6xs)va-Y{yF_jD;X@DzLj9ZDk4SBRq%YUh^S_99ESp(gP+R&RY_cI6Xs)@w7 z`}+jst&N8cg?e3taOl3u^Ko684-ERj$ee~@VDYz-s}l0KL0}CQr*17wX7MM4du)4k ze|xAnqxekMDBGUG0+@baWxO4z5e->6vT@dvCx^zMSDBSJ zS@dnv<6$$psKGK&tQ-YgOP1fq&?&xLf0AQd8r!7*NgJS>HP(ZPKWhx%P$kd+eCZ!4 zpMR|V;#*Miipi3qZ786pR}4Y@i-cZCaj$Uz`g3ebBDqVyM;0WGfA%9Qfr;0Mn1;`G z%Rn;0xVxgY(BxnHCO`b+sCF6oGT;!40S$~-gpJM@KXa0sMc5b8eP5)^op!Ndf2O*! zc?#%4)-wb>3#05T5p#Hb<6urBWoDt&=Aw%8tJ_MjdC#Y zP#EzpeIpiVn_4XX_R}$wJ}6Tre}>aAdobgo^eSJE_fA*~;Pv~>FY-G6P0Ux8tSf9~ z)^mpp5j)&JfLJcv9Yq`T)If83^geU#vZXA)uC zMOYPyH9JFH?v-~U*_l&A0c6|cnM)aI3`jx;6Rc$?Tf|7g(TGmfF-#ksrNG9oUfp%( zbGqRfH*K(y2?Bj#<>APSL|hMo<4Rj+NYKYW9Fglesu|YD=WDr(3h@8NJ3mq#XvvHS zw-*1cp<4ud7E7#I(LEAWe+$`m4z|ve0y`ywmnQXgDqoLmpR2xbNLvklN^qDxaW_v@ zk{y+TUI>&d9ZO8QC2`tfeJbh&R{Zw(s*Ke?wD-uu6sz~Z_!{+1o9OY&&#WgSJG~n( zPrv}MkBd)0z);)Bbfl{2Bbs}xEvNM3RYtLrv zocRDAJmNu4%jPbpW0!t2?X<3*d%k9U7H=q|Yr1wFzZ9_Ly=VBzJT5(A`>*10gI1y6 z3d%i6_@!-DsKkz}dJgnLfPg;8+0D+4r&{Ll;$2VR=2Ym~whhMJN1#KC2+0|@#z>y8 zvxMzverevaZ9hE{f8aY#f_#*=iOr;b$RmmQIr8939w9RUagiS+SlwL@dMA+zxk|Fj zn;``srn#^^>oJ^&{8YNBotp7FQ1g4qhN~6g@?2-(YP8xaXE1^tHfJ69ecRMvALfrF zh%t88%`y%Nhxp-Fsu&%8#o}zmVnzu%ttwOdg+(dokMY)Vf2=Bl8!1|!YTf0G_O7>3 zzpFQ7LeDyFSeqiRCFPB7D^;GT(+fCiCq7c>;Jo7(cRS~IA8 z;Lq3G7c_-Pf3+^RWsLcm?Qt0^ch-!J%y)bQDPJeY>#E=EZwiqA|&A z^3F>S9EXRDe^*3;e8V+;mA2hWyhLI0<#qq!ED8<*3%xnXr-BDlr01UC)+hPSp5T~W zMoANj_7#ytmfkwheVHX}b+jimkKW9fl7$PQ3hmKbf1gfxMz5?D$Y^~IM$*4wqrZZO zYM}Mv{l5%RQ>Fx&U?(Xj9{eOi0JAB9B=sMivsry;$UUSVEUyn)M2D5iwfa>P&F5)FJdZJ2g z&NI~gKDh~4|MpBH{GN7-`R)i?24;)Vz|JFIai(Q6%Y+)2w-@|UY$o^l6D~z9!N}=w ze=LEzxY5Y-!_TN~QWp3JGsL{zzg*Bsd0$tx(qF}&5B9!(S!x41F_k-LJx2T| zC3RT6pXc`m=0l9tXs}=Kc)^#q5oa;T6{6jmxxJ~%T1uvbv=*PzC=n>x%eH+gQ#Xsc zp5xbH%~S@L{XfVkvQl8h0&>rF@N>V&f0<@BPzMKdA}Ep-D8*_IXi;|6dV>=4z-J9) zG_zBSX4@(ML~TI|UDhvgn719!ZLgS9je#EbQJ}K2b=<7+q24J932kH8n2$05JaF(@ zY}KdU%?jUP^Y|F3>}rmrl(7jBRYnZ2n?6Q?|36mJwbev6@5;*uEJk_0vgys2f6UnT z&n^7;ng(7`?)QSJ6g4jLxYl=%_AO-)uKU(ebg_Zw}iLdRI?B_#*xwsXPu*~{aCrVK^EBKO#e;aDSR5-5; z=k(D#754SygR#0luNQhiHrzkO?1Q-aAQ!G0JRM|uysfUChP(B=R|e1eu>atW;ZHSc zIY*yvo1T2)+jZ)|>u;DU3X$?Hf3+su=~E}b7qnd2tj^y(8y>I|YN8{f476eNFC#Px zr@Cly!P={-WDzCXQwHBte-1Gl);E=jf#0exr8!B~`$Woic51e;ti>#ayNwCYYo?l# zr`ew_r!8FVWh?EVR^S#_!J|LesCX3!g!G`HG<<4OSx`7QnT+XNj|C{&n zpV&cTjRB#Y|En=1j$BQdgC|0bSUC85E^kE{8eIB}USCY-zFpEdUJBR3UM>>J0W+LX z!U1GN^8!0-NYl{VbI9Xd<7kOnj=SJ$(W_ zc<24vZEMzJe@N{nZ4>AB)h^S-S^plSw+1s+%Ty>Qd15Ur6zuaSkY!^nSdo z11DMH&VTN+3eGfE3mgX^8Y1JaRtt|%B+b>VKQ54gU+0gtP}OqDQJ8cI3!Z}h<;F8Y zv-$~bJpehffz_i&2L8n4}$Gf6!_0_zDPxcRWmp&CJ{^BnrVy zZ6paeYji;CWvhcP=(FwkLl3oOkiWKQX8ZAN{;4r{%$=M-CzNyvyf2+RvUB(nkt!9Q z(f9*fTy97T&K;<>nV=SE4kOzywlul9xpNqLMUVSldLid{zt=3$2>#kQvHF9_NzsvV zd|P$&e`$fBcvrvxg$sGK^#x{|O`t3`9BxDJGCq#=g6=erqwwz)Q$Vdl!da&5v*68B z_U%KAjW4zCXLA0%{z1`Teq)Dw@Hyty=!5#$__7NHe%Ex5M#GtF=a1st8}FlQ_V<7Bq}$tO+TfejXPbm=RH#erav%dt`{yhw=5ZVv(aM8{N?ebVNZIu86>9gtzZGbYo8X8Pkm)wsg7e6(Wv zf3Hr%gC#^QYdd4Q4%PJOw6sUEpL@(+9YR5BQLMcv}@o*s7 zL^}Q=y9i*FEQrGnDLn>}C&tWy-)TEXz!I&?sgc)3=o>A=uwEI>Cdk$v$bdPTe@D!p z3^+NH`(lHs;Of4A!7;))G|mf9OkhYs@>V<$&9W8UEh-h(9<#Q6Q$QLz(s$wiVI2g% zTa;;3LMlsAOY5Ith71u9`nHv#MgHSOLW!1tSk!@CQHoYI$m!C$*WOkG$2gc(b zyj;)Y?e#4)|K*+}IAv$o)Ipy;f1q9Fn7BYLfDd{MsDtg9>?z5otK?m+6`}1zp8Q)F z=Fgem#pnR%R1Gn-k8<}DeM~ecdU0qz%QQO_80|CarkGcJnMQ2pBM&W{j=563@hT%d zrS6EPpFYu3e~1JGs)ie--@Y3pYEBFOKZJ{kqBv#T?vZ^S&7P&Y$z(mtK_EKnrPf3X- zUR!8h{7jxlUT&TFxtZwGa51qj^YWg1)yB`(U9`Wn{)*b7HCnsZf8)I_3d_^|06&VT zPEBF1CXv*MUVj2}pNhC9FLJ~*%(d-&DAgYLw>nsTi0Z~uLnUSHa6bQ(vS{Se@NRQ? z@*353RzY@!t#P5!13CEqe}~KZiZ>)gz8VDb z$PfaD3;2v{f5o=(e@uf_oXZ_-o_!?De`A-UfQgm5U3(%l*JS9y5S9l-kS|b z;uKL&Rgbg;4v-;IsHDPX0mnkEKr<-VfUHvnOd{H<~idba06y zF&%!y?fnUn@;tCGywf)KQN0yXg=LEuXtZehmXQ9le^G&LSoq))g*bn!I`K);?F5P+ zM|b9D^nZwFxt1Vo8aos-jwavrzL!cIlMox~(I}^~Gx8-sgA{K;;v9Hif>48Nqmg+E z3wa6fiX;5gJ^T%E+Bm_VbRC5-I$V4~3ZX$%(Twpc`xJaqY>^`Rn&eVd)dYpiaGcFZ z0Aapyf8_2zp@kOL!d*N`X`TH3sYPD$?uG8msVls=r~n|FYGnWciY>%?t4dBQRp)v^ zcAZ9nQ+RV#dHT8mBN6;;5xr=vfZXVeHvDhFP*Pg~ilY>Uiq7-?Qr(U%>b_xobNA5) zX~Enx3k(OQx-a{I053w3-1d0#?KdVh?Ww4;e<<5Mzh-(vI&9Tl;b#?7@MGNgw0eYB z+U7sY9JSZNVF8=r(-pcnXcg}k$A816?7)sRrw!;>(-{A-O^9%wlAreisv zf8;0jJij!5f|Xvv$p>b(-@lOWIX(hXN|dKtglZ~eyj1MQ7 zK8@h3OgbW*-TA)PZ39+31?4J5e<{jOjlEAZ#WZS~RnkiM+iL{|@!^6M8O}CO6rVrK z12MMdjz}Ys2+OYTJ+-s#-ltsS9l}`Yf9GtCOBvUab%K{;q*v>D51kN;IR5o1{u2sk z+pj`#U1rE3ZbMV=J_{u=#xH*p$adL+^zNPFZw!bI0CWF`%P-)ur>~Htp0rVx*HwP9F>lYtD*A;u?XfKnynvGd zq%EnJdf=ZfSB!9lH0eZh`_C)wHvM_e`0(=-yS`jhSu+~CzDEN-x%7CVf7J%r#s(Q+ z?#e?@ZCLT_5y=y%D>{#yTZaF08pdYa3m|CgAu|LYS<_Vu@7W%pj$lNW5$<9YUw zA}3VugLh$kzy)8*xVg%CYlbN39r zqG}_l!{ruvVHo_So?2svFzQ=EQ#s$s9&i9IPkS$CNBb zhnEwPev+6zN^#DUbKg+3ZlKyCPnsE@>qEB5zg5oK;9u#Qy-*@iG1;UxoY1?f;5}c8 z?7w}d)#)a}?;TXcf0e#-`QV{6%Le?y{{dyfFK~TncA@fqw>iYBsqoj*C-;SEdZ6h_ z4(P6+UpqzBgJv9t-$;lf(Gr|?7!eO5%FmYtudl8lCHFZthHmgT6NmaUBgBd-b>4>B z53?u2l~!kf{PO@)VZxu?+QKUP2wTqqCy4++hZ+-&E6`BPf8=!Lr9BFS`cWXkX#cRP z;!xi{Mmq)GIibMVfLkzTm!f3UGNI&mM{rn)@17z)c@5%zI?9lk%MS)Kolil+ZvT9b z1CncsyJ3OnmeM}MF3z=Xrc~@79IVS;C<6y*D2gAk>Su;%FTo!mF_29Hw*o6gI<%JD zgP4?gqltA#f3&ypu5K`Kr&+3kc4Qf(r|1`~6t6CB%zpZ3E`S2bw}^cma)~P29gP4w z{a32UmRw2>9E;ny!lzfQYw%yLDJ%;6`s`@s7DE&&6#l*e#Pzk=xivXvGPjTYzYTTN z))xm(geWP_WiSqd^%sHw<%7-K+6g3cvlY+Viccn=e|9N|w^h@2*4v3_mVRe~zc$Vl zkQ>KW?pMnUI}O|5CM}U!pl+~6Q~QIAL~In#SI?!q8`Yp8k?Z|^S@EYKOV%%zOjTg_#-O|@H(+2QU7~lNFD;HV)rTQA?DT9h<$v_LSg25y> zG-?Co~}^- zf4>=}F<)XoRe0Az>n*ea^+Y)fWZr?|YQN(Qpl?4#vXcL;(N8?H@~|M5kAwt2)GRDS zb=($)De-6A9{52RBZJG{lk3gRMW?ybd15h!0lD-v{jfK?IxKpkx#gyAq6TbT4GbwA zPX>pLs4&N(sTp)j>|-yihoPho3Ocv2e}ebDO&YuyS{9}>wi>O0ZTg|?$N=LlS78uQ zGQG$U^rK%{llY>Sk14bZ^Nao3C8j7f126)UxF0`*qo0J3E2&oZ1LXtk&XesGIToD3 z=~Q@875M&`nr8gNA&D?FdFeBmfmF6^wMCmYT+7EFxH*~Y^RE(3t;%;qh(*|Ke>3hv zX-;;qz|x=#u}r@7D`{nyI=|;Zs7?{ZOJs9BFD8)(tllj6&$xga${b>yu5^|H3CNd2 zU|v8g$xt)O{I6Zwd{tT zAzO4q{1v(G)u+W$3VtazlX_s3e`Qs$rZ8kabbp?o#ARKn4{Nez7orXu`9*|D)cFzG zJLi6RL$H%3QR0a{1^5z}ghm|>FQIr!=6HsrD;0^r>?W#bYj%LmC*QK=0saydo%X1kiac!I zrWU-fPjLGw5_Q9S?~JT}e;BdS!x3!O*~Z)c;ibQWrNsVut5$c%a%E)2oCluM7_-|I z1tr+ z8l~5MI}50F&lfN@I#;}uz#2lWlza>(!WD0iyfARW0nY5~A4-4o; zS*gO7lMvR%*i$|@>-98P;MH=g+EVJsK4Gofr_whkJIMW;&baMAGOU13F(qm| zdM~h*F5T*r8EIVwjA!d~44oZX?|x;Hph4?D$u#}}bB6f2xwi3u@n>udc$u?eNV4;n z;F%WlZyG3pLctQ!e=m(bGUY;5(u-B^;VlEANL6-NALVV%uq8Np49LX*vsw1ILn&%y zf(l}k^82F4?vy9Z@s~e37Wjq|7f@c{YcBcmk)WZg*Onm&jJ!=edF~m9F}U!loxBUK zN%oGhN3onO%9i6+)c$h?Hcp#6Lcpg~&3Tl3dOfiF=ru_! zPYL+hA{+69-^`eGbf6BqdCxZYv$JqL(b1lEu}ucF(D+2*6BI|$<+nss6Q|{m1V!nF z0EZCi1BB_#Xe9wcVKwLQokP9^WSSL^Y=r8@2a@Y7U}jzp8R<#%j%Un|S3Rv=&? zpxaM)32?Vbf9^9apwID=WKf!_O&srgS7e0Yi}rE=R`qdUd1ueKwElc4O=et`3v(;? zWre4yCIAn0o(8;v%_r^#qeCAno!ysz5ViB!(c!k#@T9y2gLbl&n1HF~aY+Zw6aDes zE7o5+czt3KPvAyTq*UiG!L3NZL+blv&U(z6YLmILe@FXyHs@7bDRl5vrnObLdr5v* z6{lik9lZPES&ALIevKyEMLwF{Ri;~%J>K`JX^GC0pQHx`0x=z>0QI8CGL8MbYn*wi zmA@euk^Z~M54pFI@~`F2JC1B=;QM1_Rs2gW1zPQz=jG_a5c6-{zRwx(B;z9@OlwZj z!S4_(f0L>>9R}Itntr4BB2LxXzjjo2XeQAX8B0Ic8_xi&#&hjK61ow0*Muc ze;+_u32HPWV8p>U%}vGbF;;>wTSH~vG!o2++v6ipj63gL05J*Pa5n?m1FAnc?vuZe zUWB<@R$tL<$pTNhzavr40AE33ul!!+0k^qxYS?r%HDcn=Wp_Z%{x>zBhk$^Ph%Jx5 zq3+|lp}x|45*($kdE2U3PvDp7>f}?+e+Xkn$BF-@<+A>Tt60{f4w#uXL-yXQc{oRp zO8&lup{^;XaOy3ns7_iOx(aBX){t^`me#7M;>#MT7bp?)uB&CTk)C&>lPPl|gEy(D z@a-OX{QljciKC8~Ns)?lNQwwAaZ|Gb_0ek(bHkrGro+rcS-?qgDqmV(MR?f(f2h$T zy3@ww>&iKToivJOx=~C)9{z54hlEu+#V6qT1N;F(+TVc7dVRm_@Lmgfm`xTa*5xu` z%&A^r2?;dgKCxa+=bqcFN<-b};X@7j^AQEWs2IWzH)H?gC6BvVN64DVUMy7aL-<~a zkAu$m!;3cfyG-xHgx_~7RUgwaf7(C7ZxPltTb1GPrwKoKo4Fd)dQY}|`W+YfcYT`` zNx`63@3I|`$MvELt+D>CBiUJZ+a>tI+cX$yV#v5sxY_HCe7zQ3uDLEh6M+$M!@XkxAX&}e^FlMCryWo z>0b9CoYLL2#|WQt`=cY~Q8n<_#(o*e%g}D9JL2;_dj7z;x2_m=1dMh_|A0c5lB6hI zdt4P`G2A?7tc&VX^^t2npi>{dyPd0y^DkH`{C-9V|KPJ4cQ8{qx$4wOx;r8GKZ%e% zl!IfwZsofy&5sT(2-h^^f1*=bj`^tKg`0?3X>6~`1oDXMH!@5I#N@(T9+ zZ<2C!1aE&-_Of`?RlMBDe=CNj?hotbBzWLu&OzV5`5HzPx|jIQ!T$4}CudHC60L-v z@P5n*UckzLmz|o=fQcF(G%1?wac1|Q)&WBJ_Jzba5Q_@Kxn5Xje+S}*fv+PJ_MrWy$ynzCW z4+!XA9rg>=Ph#2_dwIc+e-gOrNhl_DH%65m=6rwa1vKkb^XWRcmhkns^ZZ70pA}liDxKXi93^%Ev+1Rr+tVH{V-#td!<4_ z45M_X$Z$N1e?=L+YF(y?UCfvQnd`O$bmD3g`xvMny5M8bZ@Wqt#TVQBbZ@`wuIy_q9;8o@ckyQ-MGZrYG5LtD3~ES{Tae?gJf52OzB``&`?~*C4tqKPXz=gj* z=~3z*_H)GhuBM*3?BbF-uo0IKE$UQ6a&)@g$|*Zh21s)Uj|$DEoWXV(@51y!kNoNm( z?-hm%qJ27$ftpE^l&QK328c+b&nk6J6pU59-Ki+`JexrQDO5Z>Fo2k#u!06XWv^rh zqFm$g@%-rH0H=PV-&ngM?f+VJ@DI*#I6(8Of6L5f#yZuXQIKWd^?`8?2sFq$CJr4D zjl_D_)`l`BwIS|O#EORO6N+35RQhO(S&0*sIr+ut{7rShv;2x~)z`Fc86KkNew7Yh z&lM%WXhpBA-y?AAB3tjbP_)RTVze&4#HbWEbU#S_Ty@QL^Ry+w$EP#fZ}OWc@e*)= ze?(HXIPtds!Wl*9HFk5qGET69_){%Lwz`p(;7Shs!0T_~Eutshk7ESYR>@@Dr4BFi zk=Yc3AGFD(Kb6H_4~UO-E2Xaf1~7QLT~P0T^#uSHLA!-q2K2N(O&AnTc7>-?JUCkF z9fP#=`)y7)@I%c8gh-$|Cl>8Tyq(n*qaqyUpUUv@L}w4 zx`Zkkel3&$@NpbEi4w^iax8rH&(A<^35Y>s>YH3?Dpy~{*wch?@F!O~bt!z&#V_v{ zV)PBPQpsKjEVvhDz=2DErG7*PL7uRyJIh= zq?}_*UW)h93EPDxKA-FIiGfSV<^}u{-@gWej2LRz-00q?t8)P^HQJ|IH7wk@;IEA{ znCCTDsT3z>8Na?To*bt0r^{c~f5G2Uv;y%Z!>8PzJX?jVdqSmx3C@+t1V65w08zgS zxS|I~6D4b1u6XbHuJK~B;)*A+e!O0UI4Vq$g4YPt9t|<{%l(l#@Qn_13dDsMC{~H> zL9Y~c-CngZ61DK4W{+PfYJ+9A+rA(R+cf|TxEt2R1AnEyT+t`uqe5$}fB)iv5e2S! z;(mz3P$z?rXQ?f#D{NrRmGQ>=C?QKpsAc2)sYmah3t92k%jK^DxY8d96uWWH?0+tD zp=X(~$o2z>S$yl}+%jTem6xS0zExL7qm@=lvhyvzsnj(vyx@l+#U=1pt4`+h3bMaK z2ZgGk>OPHAKrA8p>eppCf2Mw~(Csy)QO13J$=X^u|MAp9`v}}42mO?vDEjR2RVGW! z$EfY215EYJX-#oRb_z^G8o+6)6v(dA+r_GnORu3Llq8I+HE~5`S~k{zJ?iSf6f{TG&XHFf2p|ezNY; zejr1E4#TC>nw@#57mp-s8vLA1fDm@k5hn*e)jN6&#qih$Prj^P;djWP)ui)*ys7Q7 zJel9$UAt4b2!)*)f85qe$SIDA9P=S3cuyAq2qtj5qu2fyQmK-5<6D#nd%ejCDXFJw z>8!4+uUrQHcbv2st!H9FfRH{uL-?Xk`(jte8}CR3HKy^5hp3;ykXNG9nS1>$K%|1t z6OePj0gQM?>Jcv`);CF+6sTUXmKKY=@2x(5GnQahsV0LHe*phryZf%mhZ}*$RmHDe zdPaFhztRx*8%6H%P`jlF7G}c=QEsTiTM64w;$Br13C5}b>I^l7+>L+e+^o`J7rWo{ z#y(N(brP1YWqp~9t!W3p{Zr5OTCUVz?=;y>L6_C0ZNAYPRXQ?5TBVf~G0T8y()im` zb|u_!M5vm}e>?sv;`<`7hh8C<%F6;ze*mx3KbLi4q8F}E%o2e2?c?nfSrK@xPeU-D zi&(jcOWO1A<@WZ|FI-hJDd>JXI2{!6n)7EXno1|3t_tw4cu>YK!rCbq|EsBtaVOGmxiw;j!~K~!H@df?DCR1af-1eqbctF4ACUJ|V`Iqg0u{qnaZ za)NB}82DF=$BEyI^6~LPvvMYT#Bw!tNrK64dp$*H5^q>MBL9K*+t2jDZ83LWSnWm6 z=(=+Nf4AX!?jzHKO4syMfaEb>`YTU%2O6$#-p{DYLAY>1@KYGbhSwHZhtSyH+_!9}U$@~X0hVe!KP{`Z;AiyRT4@`0|Y?N3}9*Y;m5!$SUjV#1B@FV`cUqB8xNO~HNf{ybPuNs@z=*Mu$b*wEOmN{?*q|0+Uexv;7KGkHj0D-e$9Id zeGy2KcO`)D8+fh+PM+$Jq#t(c7s2q5un#C}k9v2lUFy)Ce82+4(c zA}B`MO-YfT^x-y{5@LlrM3wxuQ~pJK`8$>*p{G#{mPhW;`n@z z#MQ#5^)EnE==^Z!F7L;2-(`#093bm5;zB^Ckwi`!bcZEPst#=n?Nd5Ml;cdEM8!O-A6${j%qiN5zg4ulwhA!JP%BQ(7fIM{!%fY_2IT%q6Rs5i znmi0cu=o_O)900};y)qf=W7N(nve>ah0IZ~`%{2VS19#&`cq4*yYCP>$&`(cefz!^ z!cU~!Ow7=VITC#}T$iE|*T-phn#bqPEsetNK|xE}BWT0d%jQ-Gzsh*} z;bH#W#$cXTFVD{~1htR`hdhPdFc=4qouY|N7n(M$@w zDk8#16RMv-nFW%70@v*eab#4k3E~Oxz_nZ38DKb)C*~OG;Ap7-AlL)Z& zb752&HA|h|8*&Kc${Zi=dE@kiH=q3pOF@BPClmb11*|6ffB%cvgZLaT--I<==O(H4 zQ5WV_&mm>;eg$`daS<8+#X>8ZO!0-ZrN7|T7GN$-J+fc+K%R|+AciA?&WTbb3*DTE zlwrLvo1Edj2Y+%s^Et0f(fo1&c!*%QpkT?H43HJ&EU}85%^nIuTa23pO=HIq&QO-d zpLWm!aoGT3e_c}@@rS7@5?v*LQJkaIt#dpQt1t0#QKt@Rg%G?1HfDIP=Z?&Nt@TEk z0b9k&p5D3k3fL6azO=;j3aTY;38LertqQ5A=F42@^JJbD0(!=C`j<_fMGq3+t@H^< z`0^vj7oj~L%8{?YUE!E7fFE3w3+7}UuHX)vWu;1|HgIjdg|AS6!w`|Bdz|B_Gc3)O|P{3%Je&1vS$v0msJgN zSX}wJ)ju&PW#h*7lLWkDs#zziGq%pQXWN&%Wr~BJ zw1|)3qdV%KGH#nGt)O}+rs(hQ!tm;s3=qeuf8&EUrs<|^Tw9gM#r|Nl-0ZNpiTg5q zo0O^nk{oOksT_N_g+5hn|HB}W{r=4J$>i$bY;gj`$@eoi_%`)Um4^WsG}Yx7`5>Eg z+WxataC|gDi9U{~?)$tP+KE)UC7QZVQ%Fadrn~C=DH~u1xd6c9&eqMVo9zT-G;C(I ze;Mg2iidQlz$&a`e*_;xT$mg6JRWisC@Q3VQi`b56twpJT081>`A@IG2vTQ^XJtH; zOjBH{+0uP^Y}3TmA0WX)m?m_J=2en71d+*^YDLWmon`7Qz6Tn2xi9g5CmZO`*mS`{ zHPV5FMY_E+w1-N3#geP$O{`_~1-*V7qpx8Rw*^VAf429+ zB2Rtm0DolC?2Kp}Is?NJwB(w?5Bc86*7FjGx3Smss8E-S)3Ocu4g6=G+7{BigpJl= zo&&(TwMtaPM~ZbTy#AV|0jhuW$HY7K_AJ8mL9Sn8JHSt2>VmdyZ5m$Wli5hieTbE0 zYsrsTa%dIotx6>5u-CRF(3o^yK z#G_T}T^6?(M)1x7gS0fu-^liQ21RLzoZIPuWQO-M4#^Q|;f8AZ%c$T-e|%X#`_-g3 z=ayn%mD$_gnb~A>Yzv!2szW5hCKr)kW60%uK86|{$}>F zR?jp(F10Xc-cJNry(bKSf0$V+V*@7a1sfN?w+ZN!G40|P$ejgpK*X0r<-8n7^gPUw zsDS@+;b;39M$u5OeuU_Svzb}(EUo3EuR3R=ylW|`wM+|1C@OLF{+q^&MqVRB7g_QO zq`lO|8JRSwUot}pyf5vfm)Im;BXKc|9zr${>XfYcPoBbMleFb*A!W~)=mJ^dxq(TLA6AsO7Nvm(HD3P`)Juz>sJ59aV<_jbB~M z_QGb9^9=V)b27urL!oOa=_&^bcoRnzq4+KKcL_u-HbowCf8(ayb4icH{K5&Hg*Ma& zsUC7eQ>T|#s z0znWOM`Dqa%dfVl3n-C2QuYsb&=uKYJcD0OTh%=*+FJ=;cxPxsdh+tK@NAZP!ho5U z{p8rheZfR#$2Rk-1wEriI}iGJ@YJh z`gLb+Qj}BhapF~Y65Uzvy>lg~E_D184|tzSihsB$e=D|D22J2jqGNnjkGW!nKz%Oe z``UseR0+2XAfAkMOXtw=FjHZO<16Akq)Uh5I%8i}xHGO9QNMz>yDK6m@%2~XDci{L zMMYR2{xc@~uwL)a>$YwY7QHYzTO;O_81O-u(_@PSUhpcJSpZo}AdZa{ABhGfn3U{tGoOe`IjE z_35e`f7+jxJ?G(5PFIlGsi7I+u34kAYSoSz$@5=mF8!g?!CP;pGjX-jt;4#Dnl5je z=PiA)&0R8JpZvqLeznHh1t-2-P`k!=`JqrUa{F-*F^H-wAZ$pJ_#;p!(*&EZY{$l*T z@8z@XFX@M*a$f7z?M%=6E@!+li2a{_in@c8a>(;T5qEcUD9uXuzouK|RJ-YCm!+zW zk(u@@T=k>fzuC8tK9Tz?FEQtG>_qMF)*cm;mOMXk$-`cB06|${!Fu2}X zmvL)a;BLuE&ZmqFapxGS^peXIKDlaN zZY+3fxN=sn>94TQjf%4ByW396d}02u`6yHU&a`Q-?d}y{D{M=SVqEerf=RIdXeUR- zxm=OcohJ``%xL_(wVb=ue(yY$%A0#q>=@tWm}N7c>e%@y=J%?}&-^DWYV)qGQ~#=^ z!=$pktM|cX-~hrsnf?Ne2fg_GEEl|XXp6qeNiw`go$CZgbqW1 fHzSh>Gf@3>R$Z1L5mq*kdPX3O1k$^JiWnFGRDutU diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 806d1d04a55..7b3111ecdaa 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -777,7 +777,9 @@ where (parent_justified, parent_finalized) } else { let justification_and_finalization_state = match block { - BeaconBlockRef::Eip4844(_) | BeaconBlockRef::Merge(_) | BeaconBlockRef::Altair(_) => { + BeaconBlockRef::Eip4844(_) + | BeaconBlockRef::Merge(_) + | BeaconBlockRef::Altair(_) => { let participation_cache = per_epoch_processing::altair::ParticipationCache::new(state, spec) .map_err(Error::ParticipationCacheBuild)?; diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index c5f20710347..44f4fd22924 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -1,6 +1,6 @@ use crate::beacon_block_body::{ - BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyMerge, BeaconBlockBodyRef, - BeaconBlockBodyRefMut, BeaconBlockBodyEip4844 + BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyEip4844, BeaconBlockBodyMerge, + BeaconBlockBodyRef, BeaconBlockBodyRefMut, }; use crate::test_utils::TestRandom; use crate::*; diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index ec973b9f801..61bf56f323e 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -1,3 +1,4 @@ +use crate::kzg_commitment::KzgCommitment; use crate::test_utils::TestRandom; use crate::*; use derivative::Derivative; @@ -8,7 +9,6 @@ use std::marker::PhantomData; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -use crate::kzg_commitment::KzgCommitment; /// The body of a `BeaconChain` block, containing operations. /// @@ -256,10 +256,10 @@ impl From>> } impl From>> -for ( - BeaconBlockBodyEip4844>, - Option>, -) + for ( + BeaconBlockBodyEip4844>, + Option>, + ) { fn from(body: BeaconBlockBodyEip4844>) -> Self { let BeaconBlockBodyEip4844 { @@ -272,7 +272,7 @@ for ( deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload}, + execution_payload: FullPayload { execution_payload }, blob_kzg_commitments, } = body; diff --git a/consensus/types/src/blob.rs b/consensus/types/src/blob.rs index 982d67306f9..9b35c2584af 100644 --- a/consensus/types/src/blob.rs +++ b/consensus/types/src/blob.rs @@ -1,22 +1,27 @@ -use ssz_types::VariableList; +use crate::bls_field_element::BlsFieldElement; +use crate::test_utils::RngCore; +use crate::test_utils::TestRandom; +use crate::{EthSpec, Uint256}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use ssz::{Decode, DecodeError, Encode}; +use ssz_types::VariableList; use tree_hash::{PackedEncoding, TreeHash}; -use crate::test_utils::RngCore; -use crate::bls_field_element::BlsFieldElement; -use crate::{EthSpec, Uint256}; -use crate::test_utils::TestRandom; #[derive(Default, Debug, PartialEq, Hash, Clone, Serialize, Deserialize)] #[serde(transparent)] pub struct Blob(pub VariableList); -impl TestRandom for Blob { +impl TestRandom for Blob { fn random_for_test(rng: &mut impl RngCore) -> Self { let mut res = Blob(VariableList::empty()); for i in 0..4096 { - let slice = ethereum_types::U256([rng.next_u64(), rng.next_u64(), rng.next_u64(), rng.next_u64()]); - let elem =BlsFieldElement(slice); + let slice = ethereum_types::U256([ + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + ]); + let elem = BlsFieldElement(slice); res.0.push(elem); } res @@ -60,7 +65,7 @@ impl TreeHash for Blob { >::tree_hash_type() } - fn tree_hash_packed_encoding(&self) -> PackedEncoding{ + fn tree_hash_packed_encoding(&self) -> PackedEncoding { self.0.tree_hash_packed_encoding() } diff --git a/consensus/types/src/blobs_sidecar.rs b/consensus/types/src/blobs_sidecar.rs index de46ee5e0f8..f00d457afdb 100644 --- a/consensus/types/src/blobs_sidecar.rs +++ b/consensus/types/src/blobs_sidecar.rs @@ -1,3 +1,4 @@ +use crate::kzg_proof::KzgProof; use crate::{Blob, EthSpec, Hash256, Slot}; use serde_derive::{Deserialize, Serialize}; use ssz::Encode; @@ -5,7 +6,6 @@ use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -use crate::kzg_proof::KzgProof; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq, Default)] @@ -24,6 +24,6 @@ impl BlobsSidecar { // Fixed part Self::empty().as_ssz_bytes().len() // Max size of variable length `blobs` field - + (E::max_object_list_size() * as Encode>::ssz_fixed_len()) + + (E::max_blobs_per_block() * as Encode>::ssz_fixed_len()) } } diff --git a/consensus/types/src/bls_field_element.rs b/consensus/types/src/bls_field_element.rs index 2f66dc0b334..3c7aed5f60e 100644 --- a/consensus/types/src/bls_field_element.rs +++ b/consensus/types/src/bls_field_element.rs @@ -7,7 +7,6 @@ use tree_hash::{PackedEncoding, TreeHash}; #[serde(transparent)] pub struct BlsFieldElement(pub Uint256); - impl Encode for BlsFieldElement { fn is_ssz_fixed_len() -> bool { ::is_ssz_fixed_len() @@ -45,7 +44,7 @@ impl TreeHash for BlsFieldElement { ::tree_hash_type() } - fn tree_hash_packed_encoding(&self) -> PackedEncoding{ + fn tree_hash_packed_encoding(&self) -> PackedEncoding { self.0.tree_hash_packed_encoding() } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 95a29125d93..9641b2059c4 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -246,9 +246,8 @@ impl ChainSpec { Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, _ => ForkName::Base, }, - } + }, } - } /// Returns the fork version for a named fork. diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 67f7721bfdd..e67389a384e 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -228,7 +228,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + Self::BytesPerLogsBloom::to_usize() } - fn max_object_list_size() -> usize { + fn max_blobs_per_block() -> usize { Self::MaxBlobsPerBlock::to_usize() } diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index fd3a43bfd84..9ab8faa8a07 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -1,4 +1,7 @@ -use crate::{test_utils::TestRandom, test_utils::RngCore, *, kzg_commitment::KzgCommitment, kzg_proof::KzgProof, blob::Blob}; +use crate::{ + blob::Blob, kzg_commitment::KzgCommitment, kzg_proof::KzgProof, test_utils::RngCore, + test_utils::TestRandom, *, +}; use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz::Encode; diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index c9e9bed0bf3..bbd5f6beae2 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -50,10 +50,7 @@ impl ForkContext { if spec.eip4844_fork_epoch.is_some() { fork_to_digest.push(( ForkName::Eip4844, - ChainSpec::compute_fork_digest( - spec.eip4844_fork_version, - genesis_validators_root, - ), + ChainSpec::compute_fork_digest(spec.eip4844_fork_version, genesis_validators_root), )); } diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index f2f885fd3e3..dc45565d41b 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -11,12 +11,17 @@ pub enum ForkName { Base, Altair, Merge, - Eip4844 + Eip4844, } impl ForkName { pub fn list_all() -> Vec { - vec![ForkName::Base, ForkName::Altair, ForkName::Merge, ForkName::Eip4844] + vec![ + ForkName::Base, + ForkName::Altair, + ForkName::Merge, + ForkName::Eip4844, + ] } /// Set the activation slots in the given `ChainSpec` so that the fork named by `self` diff --git a/consensus/types/src/kzg_commitment.rs b/consensus/types/src/kzg_commitment.rs index 047e0f44c69..1fbdb6ca2ca 100644 --- a/consensus/types/src/kzg_commitment.rs +++ b/consensus/types/src/kzg_commitment.rs @@ -14,7 +14,7 @@ impl TreeHash for KzgCommitment { <[u8; 48] as TreeHash>::tree_hash_type() } - fn tree_hash_packed_encoding(&self) -> PackedEncoding{ + fn tree_hash_packed_encoding(&self) -> PackedEncoding { self.0.tree_hash_packed_encoding() } diff --git a/consensus/types/src/kzg_proof.rs b/consensus/types/src/kzg_proof.rs index c05777f1a1f..cff619ec887 100644 --- a/consensus/types/src/kzg_proof.rs +++ b/consensus/types/src/kzg_proof.rs @@ -1,8 +1,8 @@ -use std::fmt; +use crate::test_utils::{RngCore, TestRandom}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use ssz::{Decode, DecodeError, Encode}; +use std::fmt; use tree_hash::{PackedEncoding, TreeHash}; -use crate::test_utils::{RngCore, TestRandom}; const KZG_PROOF_BYTES_LEN: usize = 48; @@ -35,19 +35,19 @@ impl Into<[u8; KZG_PROOF_BYTES_LEN]> for KzgProof { } pub mod serde_kzg_proof { - use serde::de::Error; use super::*; + use serde::de::Error; pub fn serialize(bytes: &[u8; KZG_PROOF_BYTES_LEN], serializer: S) -> Result - where - S: Serializer, + where + S: Serializer, { serializer.serialize_str(ð2_serde_utils::hex::encode(bytes)) } pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; KZG_PROOF_BYTES_LEN], D::Error> - where - D: Deserializer<'de>, + where + D: Deserializer<'de>, { let s: String = Deserialize::deserialize(deserializer)?; diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 240ecee2cfb..be63c43c50c 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -90,12 +90,12 @@ pub mod slot_data; #[cfg(feature = "sqlite")] pub mod sqlite; +pub mod blob; +pub mod blobs_sidecar; +pub mod bls_field_element; pub mod kzg_commitment; pub mod kzg_proof; -pub mod bls_field_element; -pub mod blob; pub mod signed_blobs_sidecar; -pub mod blobs_sidecar; use ethereum_types::{H160, H256}; @@ -105,12 +105,12 @@ pub use crate::attestation_data::AttestationData; pub use crate::attestation_duty::AttestationDuty; pub use crate::attester_slashing::AttesterSlashing; pub use crate::beacon_block::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, BeaconBlockRef, - BeaconBlockRefMut, BeaconBlockEip4844 + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockEip4844, BeaconBlockMerge, + BeaconBlockRef, BeaconBlockRefMut, }; pub use crate::beacon_block_body::{ - BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyMerge, - BeaconBlockBodyRef, BeaconBlockBodyRefMut, BeaconBlockBodyEip4844 + BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyEip4844, + BeaconBlockBodyMerge, BeaconBlockBodyRef, BeaconBlockBodyRefMut, }; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; @@ -139,6 +139,8 @@ pub use crate::free_attestation::FreeAttestation; pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::IndexedAttestation; +pub use crate::kzg_commitment::KzgCommitment; +pub use crate::kzg_proof::KzgProof; pub use crate::participation_flags::ParticipationFlags; pub use crate::participation_list::ParticipationList; pub use crate::payload::{BlindedPayload, BlockType, ExecPayload, FullPayload}; @@ -151,8 +153,8 @@ pub use crate::selection_proof::SelectionProof; pub use crate::shuffling_id::AttestationShufflingId; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; pub use crate::signed_beacon_block::{ - SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockHash, - SignedBeaconBlockMerge, SignedBlindedBeaconBlock, SignedBeaconBlockEip4844, + SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockEip4844, + SignedBeaconBlockHash, SignedBeaconBlockMerge, SignedBlindedBeaconBlock, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index c713085b60c..5004d65a9eb 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -1,4 +1,5 @@ use crate::{test_utils::TestRandom, *}; +use core::hash::Hasher; use derivative::Derivative; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; @@ -8,7 +9,6 @@ use std::fmt::Debug; use std::hash::Hash; use test_random_derive::TestRandom; use tree_hash::{PackedEncoding, TreeHash}; -use core::hash::Hasher; #[derive(Debug)] pub enum BlockType { diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 4ab74ac2119..4b937912ba2 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -319,26 +319,26 @@ impl SignedBeaconBlockEip4844> { ) -> SignedBeaconBlockEip4844> { let SignedBeaconBlockEip4844 { message: - BeaconBlockEip4844 { - slot, - proposer_index, - parent_root, - state_root, - body: - BeaconBlockBodyEip4844 { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings, - attester_slashings, - attestations, - deposits, - voluntary_exits, - sync_aggregate, - execution_payload: BlindedPayload { .. }, - blob_kzg_commitments, + BeaconBlockEip4844 { + slot, + proposer_index, + parent_root, + state_root, + body: + BeaconBlockBodyEip4844 { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayload { .. }, + blob_kzg_commitments, + }, }, - }, signature, } = self; SignedBeaconBlockEip4844 { diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index d717a56026b..7ad3406c7a8 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -280,7 +280,7 @@ impl> Case for EpochProcessing { } // No phase0 tests for Altair and later. ForkName::Altair | ForkName::Merge => T::name() != "participation_record_updates", - ForkName::Eip4844 => false, // TODO: revisit when tests are out + ForkName::Eip4844 => false, // TODO: revisit when tests are out } } From 8d45e487750829d72f57068d3a9c83f27109eb0c Mon Sep 17 00:00:00 2001 From: realbigsean Date: Mon, 3 Oct 2022 21:52:16 -0400 Subject: [PATCH 035/263] cargo fix --- beacon_node/beacon_chain/src/beacon_chain.rs | 3 --- beacon_node/beacon_chain/src/execution_payload.rs | 3 +-- beacon_node/execution_layer/src/engine_api/http.rs | 4 +--- beacon_node/lighthouse_network/src/rpc/protocol.rs | 2 +- beacon_node/lighthouse_network/src/service/mod.rs | 12 +++--------- beacon_node/lighthouse_network/src/types/pubsub.rs | 2 +- beacon_node/network/src/beacon_processor/mod.rs | 2 +- .../src/beacon_processor/worker/gossip_methods.rs | 2 +- .../src/beacon_processor/worker/rpc_methods.rs | 8 ++++---- beacon_node/network/src/router/processor.rs | 2 +- beacon_node/network/src/sync/range_sync/range.rs | 2 +- consensus/types/src/blob.rs | 6 +++--- consensus/types/src/bls_field_element.rs | 2 +- consensus/types/src/execution_payload.rs | 1 - 14 files changed, 19 insertions(+), 32 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 8e9cd4bc72b..5020406ae5f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -98,11 +98,8 @@ use task_executor::{ShutdownReason, TaskExecutor}; use tree_hash::TreeHash; use types::beacon_state::CloneConfig; use types::*; - pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; pub use fork_choice::CountUnrealized; -use types::kzg_commitment::KzgCommitment; -use types::signed_blobs_sidecar::SignedBlobsSidecar; pub type ForkChoiceError = fork_choice::Error; diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index a6138ff10e5..f056aeb9904 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -17,7 +17,6 @@ use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::debug; use slot_clock::SlotClock; -use ssz_types::VariableList; use state_processing::per_block_processing::{ compute_timestamp_at_slot, is_execution_enabled, is_merge_transition_complete, partially_verify_execution_payload, @@ -27,7 +26,7 @@ use tokio::task::JoinHandle; use tree_hash::TreeHash; use types::{ BeaconBlockRef, BeaconState, BeaconStateError, EthSpec, ExecPayload, ExecutionBlockHash, - Hash256, KzgCommitment, SignedBeaconBlock, Slot, + Hash256, SignedBeaconBlock, Slot, }; pub type PreparePayloadResult = Result; diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 1d536f1171f..031abf721f9 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -3,14 +3,12 @@ use super::*; use crate::auth::Auth; use crate::json_structures::*; -use eth2::lighthouse::Eth1Block; use reqwest::header::CONTENT_TYPE; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde_json::json; - use std::time::Duration; -use types::{EthSpec, FullPayload}; +use types::{EthSpec}; pub use deposit_log::{DepositLog, Log}; pub use reqwest::Client; diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index ec308d1eef3..6b6cef8e797 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -21,7 +21,7 @@ use tokio_util::{ compat::{Compat, FuturesAsyncReadCompatExt}, }; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, BlobsSidecar, EthSpec, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, }; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 03ebb218ab7..03feb267a7c 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -9,7 +9,6 @@ use crate::peer_manager::{ ConnectionDirection, PeerManager, PeerManagerEvent, }; use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; -use crate::rpc::methods::BlobsByRangeRequest; use crate::rpc::*; use crate::service::behaviour::BehaviourEvent; pub use crate::service::behaviour::Gossipsub; @@ -19,7 +18,7 @@ use crate::types::{ }; use crate::Eth2Enr; use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; -use crate::{rpc::*, EnrExt}; +use crate::{EnrExt}; use api_types::{PeerRequestId, Request, RequestId, Response}; use futures::stream::StreamExt; use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; @@ -35,21 +34,16 @@ use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent}; use libp2p::PeerId; use slog::{crit, debug, info, o, trace, warn}; -use ssz::Encode; -use std::collections::HashSet; -use std::fs::File; use std::io::Write; -use std::path::{Path, PathBuf}; +use std::path::{PathBuf}; use std::pin::Pin; use std::{ - collections::VecDeque, marker::PhantomData, sync::Arc, task::{Context, Poll}, }; use types::{ - consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, - SignedBeaconBlock, Slot, SubnetId, SyncSubnetId, VariableList, + consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId, }; use utils::{build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER}; diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index b29e0c9ff70..12cb0bf69ea 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -10,7 +10,7 @@ use std::io::{Error, ErrorKind}; use std::sync::Arc; use types::signed_blobs_sidecar::SignedBlobsSidecar; use types::{ - Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ForkContext, ForkName, ProposerSlashing, + Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockEip4844, SignedBeaconBlockMerge, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index f574daf91a3..fc96767b306 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -62,7 +62,7 @@ use std::{cmp, collections::HashSet}; use task_executor::TaskExecutor; use tokio::sync::{mpsc}; use types::{ - Attestation, AttesterSlashing, BlobsSidecar, Hash256, ProposerSlashing, + Attestation, AttesterSlashing, Hash256, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 1f56ebc5c8f..18c792cb6d8 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -18,7 +18,7 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, BlobsSidecar, EthSpec, Hash256, IndexedAttestation, + Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 0859155828e..2ef858eee68 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -12,7 +12,7 @@ use slog::{debug, error}; use slot_clock::SlotClock; use std::sync::Arc; use task_executor::TaskExecutor; -use types::{Epoch, EthSpec, Hash256, Slot, VariableList}; +use types::{Epoch, EthSpec, Hash256, Slot}; use super::Worker; @@ -377,10 +377,10 @@ impl Worker { /// Handle a `BlobsByRange` request from the peer. pub fn handle_blobs_by_range_request( self, - executor: TaskExecutor, - send_on_drop: SendOnDrop, + _executor: TaskExecutor, + _send_on_drop: SendOnDrop, peer_id: PeerId, - request_id: PeerRequestId, + _request_id: PeerRequestId, mut req: BlobsByRangeRequest, ) { debug!(self.log, "Received BlobsByRange Request"; diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index b4ce4f05997..be382efe7eb 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -5,7 +5,7 @@ use crate::service::{NetworkMessage, RequestId}; use crate::status::status_message; use crate::sync::manager::RequestId as SyncId; use crate::sync::SyncMessage; -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::rpc::*; use lighthouse_network::rpc::methods::BlobsByRangeRequest; use lighthouse_network::{ diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 39696f2b643..25314543877 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -55,7 +55,7 @@ use lru_cache::LRUTimeCache; use slog::{crit, debug, trace, warn}; use std::collections::HashMap; use std::sync::Arc; -use types::{BlobsSidecar, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// For how long we store failed finalized chains to prevent retries. const FAILED_CHAINS_EXPIRY_SECONDS: u64 = 30; diff --git a/consensus/types/src/blob.rs b/consensus/types/src/blob.rs index 9b35c2584af..a1f86dab653 100644 --- a/consensus/types/src/blob.rs +++ b/consensus/types/src/blob.rs @@ -1,8 +1,8 @@ use crate::bls_field_element::BlsFieldElement; use crate::test_utils::RngCore; use crate::test_utils::TestRandom; -use crate::{EthSpec, Uint256}; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use crate::{EthSpec}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use ssz_types::VariableList; use tree_hash::{PackedEncoding, TreeHash}; @@ -14,7 +14,7 @@ pub struct Blob(pub VariableList TestRandom for Blob { fn random_for_test(rng: &mut impl RngCore) -> Self { let mut res = Blob(VariableList::empty()); - for i in 0..4096 { + for _i in 0..4096 { let slice = ethereum_types::U256([ rng.next_u64(), rng.next_u64(), diff --git a/consensus/types/src/bls_field_element.rs b/consensus/types/src/bls_field_element.rs index 3c7aed5f60e..818c9df4751 100644 --- a/consensus/types/src/bls_field_element.rs +++ b/consensus/types/src/bls_field_element.rs @@ -1,4 +1,4 @@ -use crate::{EthSpec, Uint256}; +use crate::{Uint256}; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use tree_hash::{PackedEncoding, TreeHash}; diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 9ab8faa8a07..8b4ccfd3c15 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -1,5 +1,4 @@ use crate::{ - blob::Blob, kzg_commitment::KzgCommitment, kzg_proof::KzgProof, test_utils::RngCore, test_utils::TestRandom, *, }; use derivative::Derivative; From c0dc42ea072cd808c825ba0e2a19ea0f323faa57 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Tue, 4 Oct 2022 08:21:46 -0400 Subject: [PATCH 036/263] cargo fmt --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 +- .../execution_layer/src/engine_api/http.rs | 2 +- .../lighthouse_network/src/rpc/protocol.rs | 4 +- .../lighthouse_network/src/service/mod.rs | 4 +- .../network/src/beacon_processor/mod.rs | 16 +- .../beacon_processor/worker/gossip_methods.rs | 56 ++-- .../beacon_processor/worker/rpc_methods.rs | 266 +++++++++--------- beacon_node/network/src/router/processor.rs | 4 +- beacon_node/network/src/sync/manager.rs | 2 +- consensus/types/src/blob.rs | 2 +- consensus/types/src/bls_field_element.rs | 2 +- consensus/types/src/execution_payload.rs | 4 +- 12 files changed, 182 insertions(+), 184 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5020406ae5f..f43f0403cc5 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -12,6 +12,7 @@ use crate::block_verification::{ signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock, IntoExecutionPendingBlock, PayloadVerificationOutcome, POS_PANDA_BANNER, }; +pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; use crate::chain_config::ChainConfig; use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; @@ -57,6 +58,7 @@ use eth2::types::{EventKind, SseBlock, SyncDuty}; use execution_layer::{ BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, PayloadAttributes, PayloadStatus, }; +pub use fork_choice::CountUnrealized; use fork_choice::{ AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses, @@ -98,8 +100,6 @@ use task_executor::{ShutdownReason, TaskExecutor}; use tree_hash::TreeHash; use types::beacon_state::CloneConfig; use types::*; -pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; -pub use fork_choice::CountUnrealized; pub type ForkChoiceError = fork_choice::Error; diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 031abf721f9..9aa8289fc3a 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -8,7 +8,7 @@ use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde_json::json; use std::time::Duration; -use types::{EthSpec}; +use types::EthSpec; pub use deposit_log::{DepositLog, Log}; pub use reqwest::Client; diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 6b6cef8e797..23a142a7ef3 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -21,8 +21,8 @@ use tokio_util::{ compat::{Compat, FuturesAsyncReadCompatExt}, }; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EthSpec, - ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EthSpec, ForkContext, + ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, }; lazy_static! { diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 03feb267a7c..fafff57fd45 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -16,9 +16,9 @@ use crate::types::{ subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, }; +use crate::EnrExt; use crate::Eth2Enr; use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; -use crate::{EnrExt}; use api_types::{PeerRequestId, Request, RequestId, Response}; use futures::stream::StreamExt; use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; @@ -35,7 +35,7 @@ use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent}; use libp2p::PeerId; use slog::{crit, debug, info, o, trace, warn}; use std::io::Write; -use std::path::{PathBuf}; +use std::path::PathBuf; use std::pin::Pin; use std::{ marker::PhantomData, diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index fc96767b306..97e18acc396 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -60,13 +60,13 @@ use std::task::Context; use std::time::Duration; use std::{cmp, collections::HashSet}; use task_executor::TaskExecutor; -use tokio::sync::{mpsc}; +use tokio::sync::mpsc; +use types::signed_blobs_sidecar::SignedBlobsSidecar; use types::{ - Attestation, AttesterSlashing, Hash256, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, - SubnetId, SyncCommitteeMessage, SyncSubnetId, + Attestation, AttesterSlashing, Hash256, ProposerSlashing, SignedAggregateAndProof, + SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; -use types::signed_blobs_sidecar::SignedBlobsSidecar; use work_reprocessing_queue::{ spawn_reprocess_scheduler, QueuedAggregate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, }; @@ -788,7 +788,7 @@ pub enum Work { peer_id: PeerId, request_id: PeerRequestId, request: BlobsByRangeRequest, - } + }, } impl Work { @@ -812,7 +812,7 @@ impl Work { Work::Status { .. } => STATUS_PROCESSING, Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST, Work::BlocksByRootsRequest { .. } => BLOCKS_BY_ROOTS_REQUEST, - Work::BlobsByRangeRequest {..} => BLOBS_BY_RANGE_REQUEST, + Work::BlobsByRangeRequest { .. } => BLOBS_BY_RANGE_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, } @@ -1693,7 +1693,7 @@ impl BeaconProcessor { Work::BlobsByRangeRequest { peer_id, request_id, - request + request, } => task_spawner.spawn_blocking_with_manual_send_idle(move |send_idle_on_drop| { worker.handle_blobs_by_range_request( sub_executor, diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 18c792cb6d8..8454d83f2be 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -17,12 +17,12 @@ use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; +use types::signed_blobs_sidecar::SignedBlobsSidecar; use types::{ - Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, - ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, - SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, + Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, + Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; -use types::signed_blobs_sidecar::SignedBlobsSidecar; use super::{ super::work_reprocessing_queue::{ @@ -959,31 +959,31 @@ impl Worker { Ok(block_root) => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); - if reprocess_tx - .try_send(ReprocessQueueMessage::BlockImported(block_root)) - .is_err() - { - error!( + if reprocess_tx + .try_send(ReprocessQueueMessage::BlockImported(block_root)) + .is_err() + { + error!( self.log, "Failed to inform block import"; "source" => "gossip", "block_root" => ?block_root, ) - }; + }; - debug!( + debug!( self.log, "Gossipsub block processed"; "block" => ?block_root, "peer_id" => %peer_id ); - self.chain.recompute_head_at_current_slot().await; - } - Err(BlockError::ParentUnknown { .. }) => { - // Inform the sync manager to find parents for this block - // This should not occur. It should be checked by `should_forward_block` - error!( + self.chain.recompute_head_at_current_slot().await; + } + Err(BlockError::ParentUnknown { .. }) => { + // Inform the sync manager to find parents for this block + // This should not occur. It should be checked by `should_forward_block` + error!( self.log, "Block with unknown parent attempted to be processed"; "peer_id" => %peer_id @@ -996,28 +996,28 @@ impl Worker { "Failed to verify execution payload"; "error" => %e ); - }, - other => { - debug!( + } + other => { + debug!( self.log, "Invalid gossip beacon block"; "outcome" => ?other, "block root" => ?block_root, "block slot" => block.slot() ); - self.gossip_penalize_peer( - peer_id, - PeerAction::MidToleranceError, - "bad_gossip_block_ssz", - ); - trace!( + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "bad_gossip_block_ssz", + ); + trace!( self.log, "Invalid gossip beacon block ssz"; "ssz" => format_args!("0x{}", hex::encode(block.as_ssz_bytes())), ); - } - }; } + }; + } pub fn process_gossip_voluntary_exit( self, diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 2ef858eee68..beaea383366 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -4,9 +4,9 @@ use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped}; use itertools::process_results; +use lighthouse_network::rpc::methods::{BlobsByRangeRequest, MAX_REQUEST_BLOBS_SIDECARS}; use lighthouse_network::rpc::StatusMessage; use lighthouse_network::rpc::*; -use lighthouse_network::rpc::methods::{BlobsByRangeRequest, MAX_REQUEST_BLOBS_SIDECARS}; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use slog::{debug, error}; use slot_clock::SlotClock; @@ -384,141 +384,141 @@ impl Worker { mut req: BlobsByRangeRequest, ) { debug!(self.log, "Received BlobsByRange Request"; - "peer_id" => %peer_id, - "count" => req.count, - "start_slot" => req.start_slot, - ); + "peer_id" => %peer_id, + "count" => req.count, + "start_slot" => req.start_slot, + ); - // Should not send more than max request blocks - if req.count > MAX_REQUEST_BLOBS_SIDECARS { - req.count = MAX_REQUEST_BLOBS_SIDECARS; - } + // Should not send more than max request blocks + if req.count > MAX_REQUEST_BLOBS_SIDECARS { + req.count = MAX_REQUEST_BLOBS_SIDECARS; + } //FIXME(sean) create the blobs iter - // let forwards_block_root_iter = match self - // .chain - // .forwards_iter_block_roots(Slot::from(req.start_slot)) - // { - // Ok(iter) => iter, - // Err(BeaconChainError::HistoricalBlockError( - // HistoricalBlockError::BlockOutOfRange { - // slot, - // oldest_block_slot, - // }, - // )) => { - // debug!(self.log, "Range request failed during backfill"; "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot); - // return self.send_error_response( - // peer_id, - // RPCResponseErrorCode::ResourceUnavailable, - // "Backfilling".into(), - // request_id, - // ); - // } - // Err(e) => return error!(self.log, "Unable to obtain root iter"; "error" => ?e), - // }; - // - // // Pick out the required blocks, ignoring skip-slots. - // let mut last_block_root = None; - // let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { - // iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) - // // map skip slots to None - // .map(|(root, _)| { - // let result = if Some(root) == last_block_root { - // None - // } else { - // Some(root) - // }; - // last_block_root = Some(root); - // result - // }) - // .collect::>>() - // }); - // - // let block_roots = match maybe_block_roots { - // Ok(block_roots) => block_roots, - // Err(e) => return error!(self.log, "Error during iteration over blocks"; "error" => ?e), - // }; - // - // // remove all skip slots - // let block_roots = block_roots.into_iter().flatten().collect::>(); - // - // // Fetching blocks is async because it may have to hit the execution layer for payloads. - // executor.spawn( - // async move { - // let mut blocks_sent = 0; - // let mut send_response = true; - // - // for root in block_roots { - // match self.chain.store.get_blobs(&root) { - // Ok(Some(blob)) => { - // blocks_sent += 1; - // self.send_network_message(NetworkMessage::SendResponse { - // peer_id, - // response: Response::BlobsByRange(Some(Arc::new(VariableList::new(vec![blob.message]).unwrap()))), - // id: request_id, - // }); - // } - // Ok(None) => { - // error!( - // self.log, - // "Blob in the chain is not in the store"; - // "request_root" => ?root - // ); - // break; - // } - // Err(e) => { - // error!( - // self.log, - // "Error fetching block for peer"; - // "block_root" => ?root, - // "error" => ?e - // ); - // break; - // } - // } - // } - // - // let current_slot = self - // .chain - // .slot() - // .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); - // - // if blocks_sent < (req.count as usize) { - // debug!( - // self.log, - // "BlocksByRange Response processed"; - // "peer" => %peer_id, - // "msg" => "Failed to return all requested blocks", - // "start_slot" => req.start_slot, - // "current_slot" => current_slot, - // "requested" => req.count, - // "returned" => blocks_sent - // ); - // } else { - // debug!( - // self.log, - // "BlocksByRange Response processed"; - // "peer" => %peer_id, - // "start_slot" => req.start_slot, - // "current_slot" => current_slot, - // "requested" => req.count, - // "returned" => blocks_sent - // ); - // } - // - // if send_response { - // // send the stream terminator - // self.send_network_message(NetworkMessage::SendResponse { - // peer_id, - // response: Response::BlobsByRange(None), - // id: request_id, - // }); - // } - // - // drop(send_on_drop); - // }, - // "load_blocks_by_range_blocks", - // ); + // let forwards_block_root_iter = match self + // .chain + // .forwards_iter_block_roots(Slot::from(req.start_slot)) + // { + // Ok(iter) => iter, + // Err(BeaconChainError::HistoricalBlockError( + // HistoricalBlockError::BlockOutOfRange { + // slot, + // oldest_block_slot, + // }, + // )) => { + // debug!(self.log, "Range request failed during backfill"; "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot); + // return self.send_error_response( + // peer_id, + // RPCResponseErrorCode::ResourceUnavailable, + // "Backfilling".into(), + // request_id, + // ); + // } + // Err(e) => return error!(self.log, "Unable to obtain root iter"; "error" => ?e), + // }; + // + // // Pick out the required blocks, ignoring skip-slots. + // let mut last_block_root = None; + // let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { + // iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) + // // map skip slots to None + // .map(|(root, _)| { + // let result = if Some(root) == last_block_root { + // None + // } else { + // Some(root) + // }; + // last_block_root = Some(root); + // result + // }) + // .collect::>>() + // }); + // + // let block_roots = match maybe_block_roots { + // Ok(block_roots) => block_roots, + // Err(e) => return error!(self.log, "Error during iteration over blocks"; "error" => ?e), + // }; + // + // // remove all skip slots + // let block_roots = block_roots.into_iter().flatten().collect::>(); + // + // // Fetching blocks is async because it may have to hit the execution layer for payloads. + // executor.spawn( + // async move { + // let mut blocks_sent = 0; + // let mut send_response = true; + // + // for root in block_roots { + // match self.chain.store.get_blobs(&root) { + // Ok(Some(blob)) => { + // blocks_sent += 1; + // self.send_network_message(NetworkMessage::SendResponse { + // peer_id, + // response: Response::BlobsByRange(Some(Arc::new(VariableList::new(vec![blob.message]).unwrap()))), + // id: request_id, + // }); + // } + // Ok(None) => { + // error!( + // self.log, + // "Blob in the chain is not in the store"; + // "request_root" => ?root + // ); + // break; + // } + // Err(e) => { + // error!( + // self.log, + // "Error fetching block for peer"; + // "block_root" => ?root, + // "error" => ?e + // ); + // break; + // } + // } + // } + // + // let current_slot = self + // .chain + // .slot() + // .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); + // + // if blocks_sent < (req.count as usize) { + // debug!( + // self.log, + // "BlocksByRange Response processed"; + // "peer" => %peer_id, + // "msg" => "Failed to return all requested blocks", + // "start_slot" => req.start_slot, + // "current_slot" => current_slot, + // "requested" => req.count, + // "returned" => blocks_sent + // ); + // } else { + // debug!( + // self.log, + // "BlocksByRange Response processed"; + // "peer" => %peer_id, + // "start_slot" => req.start_slot, + // "current_slot" => current_slot, + // "requested" => req.count, + // "returned" => blocks_sent + // ); + // } + // + // if send_response { + // // send the stream terminator + // self.send_network_message(NetworkMessage::SendResponse { + // peer_id, + // response: Response::BlobsByRange(None), + // id: request_id, + // }); + // } + // + // drop(send_on_drop); + // }, + // "load_blocks_by_range_blocks", + // ); } } diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index be382efe7eb..c2cf483d978 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -6,8 +6,8 @@ use crate::status::status_message; use crate::sync::manager::RequestId as SyncId; use crate::sync::SyncMessage; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use lighthouse_network::rpc::*; use lighthouse_network::rpc::methods::BlobsByRangeRequest; +use lighthouse_network::rpc::*; use lighthouse_network::{ Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response, }; @@ -17,12 +17,12 @@ use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::SyncCommitteeMessage; use tokio::sync::mpsc; +use types::signed_blobs_sidecar::SignedBlobsSidecar; use types::{ Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncSubnetId, }; -use types::signed_blobs_sidecar::SignedBlobsSidecar; /// Processes validated messages from the network. It relays necessary data to the syncing thread /// and processes blocks from the pubsub network. diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index d5dfb60fbb8..92866959021 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -593,7 +593,7 @@ impl SyncManager { .parent_chain_processed(chain_hash, result, &mut self.network), }, //FIXME(sean) - SyncMessage::RpcBlob { .. } => todo!() + SyncMessage::RpcBlob { .. } => todo!(), } } diff --git a/consensus/types/src/blob.rs b/consensus/types/src/blob.rs index a1f86dab653..89e5e8bbe77 100644 --- a/consensus/types/src/blob.rs +++ b/consensus/types/src/blob.rs @@ -1,7 +1,7 @@ use crate::bls_field_element::BlsFieldElement; use crate::test_utils::RngCore; use crate::test_utils::TestRandom; -use crate::{EthSpec}; +use crate::EthSpec; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use ssz_types::VariableList; diff --git a/consensus/types/src/bls_field_element.rs b/consensus/types/src/bls_field_element.rs index 818c9df4751..7654f65b364 100644 --- a/consensus/types/src/bls_field_element.rs +++ b/consensus/types/src/bls_field_element.rs @@ -1,4 +1,4 @@ -use crate::{Uint256}; +use crate::Uint256; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use tree_hash::{PackedEncoding, TreeHash}; diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 8b4ccfd3c15..78a53a3675e 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -1,6 +1,4 @@ -use crate::{ - test_utils::TestRandom, *, -}; +use crate::{test_utils::TestRandom, *}; use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz::Encode; From ba16a037a326fdf2517d8bffaa7c77927388fa08 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Tue, 4 Oct 2022 09:34:05 -0400 Subject: [PATCH 037/263] cleanup --- .../beacon_chain/src/block_verification.rs | 1 - beacon_node/beacon_chain/src/snapshot_cache.rs | 2 +- beacon_node/execution_layer/src/engine_api.rs | 6 ------ .../src/rpc/codec/ssz_snappy.rs | 2 +- .../lighthouse_network/src/rpc/protocol.rs | 2 +- .../lighthouse_network/src/rpc/rate_limiter.rs | 2 +- .../lighthouse_network/src/service/api_types.rs | 2 +- .../lighthouse_network/src/types/pubsub.rs | 4 ---- beacon_node/network/src/beacon_processor/mod.rs | 10 ++++------ .../beacon_processor/worker/gossip_methods.rs | 3 --- beacon_node/network/src/metrics.rs | 9 +++++++++ beacon_node/store/src/config.rs | 2 +- beacon_node/store/src/lib.rs | 2 +- consensus/types/src/chain_spec.rs | 16 +++++++--------- consensus/types/src/consts.rs | 2 +- consensus/types/src/eth_spec.rs | 4 ++-- 16 files changed, 30 insertions(+), 39 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 5fc295900bb..3a578b63a5c 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -147,7 +147,6 @@ pub enum BlockError { present_slot: Slot, block_slot: Slot, }, - MissingSidecar, /// The block state_root does not match the generated state. /// /// ## Peer scoring diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index c77ef9e38a4..40b73451cb0 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -16,7 +16,7 @@ pub const DEFAULT_SNAPSHOT_CACHE_SIZE: usize = 4; const MINIMUM_BLOCK_DELAY_FOR_CLONE: Duration = Duration::from_secs(6); /// This snapshot is to be used for verifying a child of `self.beacon_block`. -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct PreProcessingSnapshot { /// This state is equivalent to the `self.beacon_block.state_root()` state that has been /// advanced forward one slot using `per_slot_processing`. This state is "primed and ready" for diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 8dd7992751f..e11ba52a850 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -166,9 +166,3 @@ pub struct ProposeBlindedBlockResponse { pub latest_valid_hash: Option, pub validation_error: Option, } - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct BlobDetailsV1 { - kzg: KzgCommitment, - blob: Vec, -} diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index f88721a81d6..c00ff64004e 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -410,7 +410,7 @@ fn context_bytes( // NOTE: If you are adding another fork type here, be sure to modify the // `fork_context.to_context_bytes()` function to support it as well! SignedBeaconBlock::Eip4844 { .. } => { - // Merge context being `None` implies that "merge never happened". + // Eip4844 context being `None` implies that "merge never happened". fork_context.to_context_bytes(ForkName::Eip4844) } SignedBeaconBlock::Merge { .. } => { diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 23a142a7ef3..c0c8e83eaaa 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -475,7 +475,7 @@ impl InboundRequest { ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), ], InboundRequest::BlobsByRange(_) => vec![ProtocolId::new( - Protocol::BlocksByRoot, + Protocol::BlobsByRange, Version::V1, Encoding::SSZSnappy, )], diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 62a81f3e233..6aa91aab6b7 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -100,7 +100,7 @@ pub struct RPCRateLimiterBuilder { bbrange_quota: Option, /// Quota for the BlocksByRoot protocol. bbroots_quota: Option, - /// Quota for the BlocksByRange protocol. + /// Quota for the BlobsByRange protocol. blbrange_quota: Option, } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 57f2074b4bb..46af7ddb226 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -33,7 +33,7 @@ pub enum Request { Status(StatusMessage), /// A blocks by range request. BlocksByRange(BlocksByRangeRequest), - /// A bloibs by range request. + /// A blobs by range request. BlobsByRange(BlobsByRangeRequest), /// A request blocks root request. BlocksByRoot(BlocksByRootRequest), diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 12cb0bf69ea..781ac9a12cb 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -175,10 +175,6 @@ impl PubsubMessage { SignedBeaconBlockEip4844::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ), - Some(ForkName::Eip4844) => SignedBeaconBlock::::Eip4844( - SignedBeaconBlockEip4844::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ), None => { return Err(format!( "Unknown gossipsub fork digest: {:?}", diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 97e18acc396..9d42db4495c 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -157,7 +157,7 @@ const MAX_STATUS_QUEUE_LEN: usize = 1_024; /// will be stored before we start dropping them. const MAX_BLOCKS_BY_RANGE_QUEUE_LEN: usize = 1_024; -const MAX_TX_BLOBS_BY_RANGE_QUEUE_LEN: usize = 1_024; +const MAX_BLOBS_BY_RANGE_QUEUE_LEN: usize = 1_024; /// The maximum number of queued `BlocksByRootRequest` objects received from the network RPC that /// will be stored before we start dropping them. @@ -951,14 +951,13 @@ impl BeaconProcessor { let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN); - let mut gossip_blobs_sidecar_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN); + let mut gossip_blobs_sidecar_queue = FifoQueue::new(MAX_GOSSIP_BLOB_QUEUE_LEN); let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN); let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN); let mut bbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_RANGE_QUEUE_LEN); - let mut txbbrange_queue = FifoQueue::new(MAX_TX_BLOBS_BY_RANGE_QUEUE_LEN); let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN); - let mut blbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN); + let mut blbrange_queue = FifoQueue::new(MAX_BLOBS_BY_RANGE_QUEUE_LEN); // Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to // receive them back once they are ready (`ready_work_rx`). @@ -1343,9 +1342,8 @@ impl BeaconProcessor { &metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL, gossip_block_queue.len() as i64, ); - //FIXME(sean) blob metrics metrics::set_gauge( - &metrics::BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL, + &metrics::BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL, rpc_block_queue.len() as i64, ); metrics::set_gauge( diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 8454d83f2be..e5b0d76dae4 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -786,9 +786,6 @@ impl Worker { verified_block } - Err(BlockError::MissingSidecar) => { - todo!(); //is relevant? - } Err(BlockError::ParentUnknown(block)) => { debug!( self.log, diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index b4e7a3bace3..94de2988c8d 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -152,6 +152,15 @@ lazy_static! { "beacon_processor_rpc_block_imported_total", "Total number of gossip blocks imported to fork choice, etc." ); + // Rpc blobs. + pub static ref BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_rpc_blob_queue_total", + "Count of blobs from the rpc waiting to be verified." + ); + pub static ref BEACON_PROCESSOR_RPC_BLOB_IMPORTED_TOTAL: Result = try_create_int_counter( + "beacon_processor_rpc_blob_imported_total", + "Total number of gossip blobs imported." + ); // Chain segments. pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: Result = try_create_int_gauge( "beacon_processor_chain_segment_queue_total", diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index 1422216876a..53d99f75ebf 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -18,7 +18,7 @@ pub struct StoreConfig { pub slots_per_restore_point_set_explicitly: bool, /// Maximum number of blocks to store in the in-memory block cache. pub block_cache_size: usize, - /// Maximum number of blobs to store in the in-memory block cache. + /// Maximum number of blobs to store in the in-memory blob cache. pub blob_cache_size: usize, /// Whether to compact the database on initialization. pub compact_on_init: bool, diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index df6e3950137..00e37a18ec8 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -173,7 +173,7 @@ pub enum DBColumn { BeaconMeta, #[strum(serialize = "blk")] BeaconBlock, - #[strum(serialize = "blo")] + #[strum(serialize = "blb")] BeaconBlob, /// For full `BeaconState`s in the hot database (finalized or fork-boundary states). #[strum(serialize = "ste")] diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 9641b2059c4..279a449e53a 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -22,8 +22,7 @@ pub enum Domain { ContributionAndProof, SyncCommitteeSelectionProof, ApplicationMask(ApplicationDomain), - //FIXME(sean) add this domain - //BlobsSideCar, + BlobsSideCar, } /// Lighthouse's internal configuration struct. @@ -565,7 +564,7 @@ impl ChainSpec { domain_sync_committee: 7, domain_sync_committee_selection_proof: 8, domain_contribution_and_proof: 9, - altair_fork_version: [0x01, 0x00, 0x0f, 0xfd], + altair_fork_version: [0x01, 0x00, 0x00, 0x00], altair_fork_epoch: Some(Epoch::new(74240)), /* @@ -576,7 +575,7 @@ impl ChainSpec { min_slashing_penalty_quotient_bellatrix: u64::checked_pow(2, 5) .expect("pow does not overflow"), proportional_slashing_multiplier_bellatrix: 3, - bellatrix_fork_version: [0x02, 0x00, 0x0f, 0xfd], + bellatrix_fork_version: [0x02, 0x00, 0x00, 0x00], bellatrix_fork_epoch: Some(Epoch::new(144896)), terminal_total_difficulty: Uint256::from_dec_str("58750000000000000000000") .expect("terminal_total_difficulty is a valid integer"), @@ -587,8 +586,8 @@ impl ChainSpec { /* * Eip4844 hard fork params */ - eip4844_fork_version: [0x04, 0x00, 0x00, 0xfd], - eip4844_fork_epoch: Some(Epoch::new(u64::MAX)), + eip4844_fork_version: [0x04, 0x00, 0x00, 0x00], + eip4844_fork_epoch: None, /* * Network specific @@ -646,8 +645,7 @@ impl ChainSpec { .checked_add(Uint256::one()) .expect("addition does not overflow"), // Eip4844 - //FIXME(sean) - eip4844_fork_version: [0x03, 0x00, 0x00, 0x01], + eip4844_fork_version: [0x04, 0x00, 0x00, 0x01], eip4844_fork_epoch: None, // Other network_id: 2, // lighthouse testnet network id @@ -805,7 +803,7 @@ impl ChainSpec { safe_slots_to_import_optimistically: 128u64, eip4844_fork_version: [0x04, 0x00, 0x00, 0x64], - eip4844_fork_epoch: Some(Epoch::new(u64::MAX)), + eip4844_fork_epoch: None, /* * Network specific diff --git a/consensus/types/src/consts.rs b/consensus/types/src/consts.rs index 8e12b05fb5d..2469f5f9cf0 100644 --- a/consensus/types/src/consts.rs +++ b/consensus/types/src/consts.rs @@ -22,7 +22,7 @@ pub mod altair { pub mod merge { pub const INTERVALS_PER_SLOT: u64 = 3; } -pub mod cappella { +pub mod eip4844 { use crate::Uint256; use lazy_static::lazy_static; diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index e67389a384e..716754c7e54 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -275,7 +275,7 @@ impl EthSpec for MainnetEthSpec { type GasLimitDenominator = U1024; type MinGasLimit = U5000; type MaxExtraDataBytes = U32; - type MaxBlobsPerBlock = U16777216; // 2**24 + type MaxBlobsPerBlock = U16; // 2**4 = 16 type FieldElementsPerBlob = U4096; type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch @@ -371,7 +371,7 @@ impl EthSpec for GnosisEthSpec { type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U2048; // 128 max attestations * 16 slots per epoch type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch - type MaxBlobsPerBlock = U16777216; // 2**24 + type MaxBlobsPerBlock = U16; // 2**4 = 16 type FieldElementsPerBlob = U4096; fn default_spec() -> ChainSpec { From 7527c2b455423ae59248133738b179fe08922a62 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Tue, 4 Oct 2022 14:57:29 -0400 Subject: [PATCH 038/263] fix RPC limit add blob signing domain --- .../beacon_chain/src/block_verification.rs | 33 ++++--------------- .../lighthouse_network/src/rpc/protocol.rs | 12 ++++++- .../network/src/beacon_processor/mod.rs | 2 -- consensus/types/src/blobs_sidecar.rs | 4 +-- consensus/types/src/chain_spec.rs | 7 +++- consensus/types/src/config_and_preset.rs | 1 + consensus/types/src/lib.rs | 4 ++- 7 files changed, 30 insertions(+), 33 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 3a578b63a5c..f83bc535d93 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -134,10 +134,7 @@ pub enum BlockError { /// its parent. ParentUnknown(Arc>), /// The block skips too many slots and is a DoS risk. - TooManySkippedSlots { - parent_slot: Slot, - block_slot: Slot, - }, + TooManySkippedSlots { parent_slot: Slot, block_slot: Slot }, /// The block slot is greater than the present slot. /// /// ## Peer scoring @@ -152,10 +149,7 @@ pub enum BlockError { /// ## Peer scoring /// /// The peer has incompatible state transition logic and is faulty. - StateRootMismatch { - block: Hash256, - local: Hash256, - }, + StateRootMismatch { block: Hash256, local: Hash256 }, /// The block was a genesis block, these blocks cannot be re-imported. GenesisBlock, /// The slot is finalized, no need to import. @@ -174,9 +168,7 @@ pub enum BlockError { /// /// It's unclear if this block is valid, but it conflicts with finality and shouldn't be /// imported. - NotFinalizedDescendant { - block_parent_root: Hash256, - }, + NotFinalizedDescendant { block_parent_root: Hash256 }, /// Block is already known, no need to re-import. /// /// ## Peer scoring @@ -189,10 +181,7 @@ pub enum BlockError { /// /// The `proposer` has already proposed a block at this slot. The existing block may or may not /// be equal to the given block. - RepeatProposal { - proposer: u64, - slot: Slot, - }, + RepeatProposal { proposer: u64, slot: Slot }, /// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER. /// /// ## Peer scoring @@ -207,10 +196,7 @@ pub enum BlockError { /// ## Peer scoring /// /// The block is invalid and the peer is faulty. - IncorrectBlockProposer { - block: u64, - local_shuffling: u64, - }, + IncorrectBlockProposer { block: u64, local_shuffling: u64 }, /// The proposal signature in invalid. /// /// ## Peer scoring @@ -234,10 +220,7 @@ pub enum BlockError { /// ## Peer scoring /// /// The block is invalid and the peer is faulty. - BlockIsNotLaterThanParent { - block_slot: Slot, - parent_slot: Slot, - }, + BlockIsNotLaterThanParent { block_slot: Slot, parent_slot: Slot }, /// At least one block in the chain segment did not have it's parent root set to the root of /// the prior block. /// @@ -293,9 +276,7 @@ pub enum BlockError { /// /// The peer sent us an invalid block, but I'm not really sure how to score this in an /// "optimistic" sync world. - ParentExecutionPayloadInvalid { - parent_root: Hash256, - }, + ParentExecutionPayloadInvalid { parent_root: Hash256 }, } /// Returned when block validation failed due to some issue verifying diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index c0c8e83eaaa..3840aa3ba6f 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -20,8 +20,10 @@ use tokio_util::{ codec::Framed, compat::{Compat, FuturesAsyncReadCompatExt}, }; +use types::BlobsSidecar; +use types::SignedBlobsSidecar; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EthSpec, ForkContext, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Blob, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, }; @@ -98,6 +100,14 @@ lazy_static! { .as_ssz_bytes() .len(); + pub static ref SIGNED_BLOBS_SIDECAR_MIN: usize = SignedBlobsSidecar { + message: BlobsSidecar::::empty(), + signature: Signature::empty(), + }.as_ssz_bytes() + .len(); + + pub static ref SIGNED_BLOBS_SIDECAR_MAX: usize = *SIGNED_BLOBS_SIDECAR_MIN // Max size of variable length `blobs` field + + (MainnetEthSpec::max_blobs_per_block() * as Encode>::ssz_fixed_len()); } /// The maximum bytes that can be sent across the RPC pre-merge. diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 9d42db4495c..7d7f6602e5c 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -1182,8 +1182,6 @@ impl BeaconProcessor { self.spawn_worker(item, toolbox); } else if let Some(item) = bbrange_queue.pop() { self.spawn_worker(item, toolbox); - } else if let Some(item) = txbbrange_queue.pop() { - self.spawn_worker(item, toolbox); } else if let Some(item) = bbroots_queue.pop() { self.spawn_worker(item, toolbox); // Check slashings after all other consensus messages so we prioritize diff --git a/consensus/types/src/blobs_sidecar.rs b/consensus/types/src/blobs_sidecar.rs index f00d457afdb..75100a04180 100644 --- a/consensus/types/src/blobs_sidecar.rs +++ b/consensus/types/src/blobs_sidecar.rs @@ -12,7 +12,7 @@ use tree_hash_derive::TreeHash; pub struct BlobsSidecar { pub beacon_block_root: Hash256, pub beacon_block_slot: Slot, - pub blobs: VariableList, E::MaxBlobsPerBlock>, + pub blobs: VariableList, E::MaxBlobsPerBlock>, pub kzg_aggregate_proof: KzgProof, } @@ -24,6 +24,6 @@ impl BlobsSidecar { // Fixed part Self::empty().as_ssz_bytes().len() // Max size of variable length `blobs` field - + (E::max_blobs_per_block() * as Encode>::ssz_fixed_len()) + + (E::max_blobs_per_block() * as Encode>::ssz_fixed_len()) } } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 279a449e53a..aa477b22e5d 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -13,6 +13,7 @@ use tree_hash::TreeHash; pub enum Domain { BeaconProposer, BeaconAttester, + BlobsSideCar, Randao, Deposit, VoluntaryExit, @@ -22,7 +23,6 @@ pub enum Domain { ContributionAndProof, SyncCommitteeSelectionProof, ApplicationMask(ApplicationDomain), - BlobsSideCar, } /// Lighthouse's internal configuration struct. @@ -99,6 +99,7 @@ pub struct ChainSpec { */ pub(crate) domain_beacon_proposer: u32, pub(crate) domain_beacon_attester: u32, + pub(crate) domain_blobs_sidecar: u32, pub(crate) domain_randao: u32, pub(crate) domain_deposit: u32, pub(crate) domain_voluntary_exit: u32, @@ -340,6 +341,7 @@ impl ChainSpec { match domain { Domain::BeaconProposer => self.domain_beacon_proposer, Domain::BeaconAttester => self.domain_beacon_attester, + Domain::BlobsSideCar => self.domain_blobs_sidecar, Domain::Randao => self.domain_randao, Domain::Deposit => self.domain_deposit, Domain::VoluntaryExit => self.domain_voluntary_exit, @@ -529,6 +531,7 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, + domain_blobs_sidecar: 10, // 0x0a000000 /* * Fork choice @@ -743,6 +746,7 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, + domain_blobs_sidecar: 10, /* * Fork choice @@ -1181,6 +1185,7 @@ mod tests { test_domain(Domain::BeaconProposer, spec.domain_beacon_proposer, &spec); test_domain(Domain::BeaconAttester, spec.domain_beacon_attester, &spec); + test_domain(Domain::BlobsSideCar, spec.domain_blobs_sidecar, &spec); test_domain(Domain::Randao, spec.domain_randao, &spec); test_domain(Domain::Deposit, spec.domain_deposit, &spec); test_domain(Domain::VoluntaryExit, spec.domain_voluntary_exit, &spec); diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index e624afe2dbb..74b07717a28 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -72,6 +72,7 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap { "bls_withdrawal_prefix".to_uppercase() => u8_hex(spec.bls_withdrawal_prefix_byte), "domain_beacon_proposer".to_uppercase() => u32_hex(spec.domain_beacon_proposer), "domain_beacon_attester".to_uppercase() => u32_hex(spec.domain_beacon_attester), + "domain_blobs_sidecar".to_uppercase() => u32_hex(spec.domain_blobs_sidecar), "domain_randao".to_uppercase()=> u32_hex(spec.domain_randao), "domain_deposit".to_uppercase()=> u32_hex(spec.domain_deposit), "domain_voluntary_exit".to_uppercase() => u32_hex(spec.domain_voluntary_exit), diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index be63c43c50c..4bb383dfba4 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -157,6 +157,7 @@ pub use crate::signed_beacon_block::{ SignedBeaconBlockHash, SignedBeaconBlockMerge, SignedBlindedBeaconBlock, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; +pub use crate::signed_blobs_sidecar::SignedBlobsSidecar; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; pub use crate::signed_voluntary_exit::SignedVoluntaryExit; pub use crate::signing_data::{SignedRoot, SigningData}; @@ -183,7 +184,8 @@ pub type Uint256 = ethereum_types::U256; pub type Address = H160; pub type ForkVersion = [u8; 4]; pub type BLSFieldElement = Uint256; -pub type Blob = FixedVector; +pub type Blob = FixedVector::FieldElementsPerBlob>; +pub type Polynomial = VariableList::FieldElementsPerBlob>; pub use bls::{ AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, From cc59f936057da4b9d1d114661d64b225b4ff8d70 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Tue, 4 Oct 2022 15:42:05 -0400 Subject: [PATCH 039/263] compressed eip4844 genesis --- .../eip4844/genesis.ssz.zip | Bin 0 -> 3636 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 common/eth2_network_config/built_in_network_configs/eip4844/genesis.ssz.zip diff --git a/common/eth2_network_config/built_in_network_configs/eip4844/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/eip4844/genesis.ssz.zip new file mode 100644 index 0000000000000000000000000000000000000000..1db6281d4ff169dbadeeff01648595b0cec9c9ba GIT binary patch literal 3636 zcmWIWW@Zs#U}E54;3&5XU9?r^uK+ItgBXh@gFAx^Lvns@ZhoGAYDtDsd|qlvd45rL zd~$wXT4uU_QfX#RNqlA=ns~8(YG#3ng^7uNdTL&3ab~ezadA~>2qy#cd293J)*74S z(h6<{MwS=M3=Cl6?b%EpF&2pfAHy&5#J|7f%f4&RiDqA8Pj|B|&W5qNvT+X^ulvmP zlXly7hl+J0t^>Ho&Im4ersRU2>5 z3j1&RzkhMHRp0z;Yd<_%bfe|(%kXLYzn$$^%l=yI?D{)DBL8oh_3i0$CWcWkVnd*O z!!Ez+4yn)I&0bz7`8VL@lpQNe&Ga)ae4Q9;xloW-+BJW*kM_;#th~Sb^QY%ZRhzv$ zdNIHv+1(h+VS6xumAP`N3`5ERf7>5q@x}m4S~@R z7!85Z5Eu=C5flOs^tdVX?E&4juS$OwznfJmn*ThMC+A}Qv6+{CEMt{S?thytZ=Lfk zKU>J$CRp@GLgCGif{EAX?c4sf+|%-``gyOJ5i$`!BQjndo-AAz!#g+nM`l(^&B>N0 zq2~Kn%)XHmqoG*-VxGgD6+3zVX@`8)kA8lX?Ql46l=z(@Tcv&FtM^srU$rf=)3mm) z_1pcTyza+`*4FF%t~TY%9{p>2eqDFHr+k5;d)oHkSxxS}N!7k5- z`)*t~p=uo@*5kFh^vBukc6FV;tZK!-w=b(#EqNk8bMDqJK_BPL^w&N6R^Wch!Pc$L zS%z$n~vU$^+=Zh}-&gWU9zoze1>6c5#=f3)Sw`|c-_4M%f=W-`RUQjpw*=x4G z=hc*`*B`#MTsd-3{n*(v`!9X{6n;4W+s&DGmq&g`h}hlp^*is(D=Drnq2Hd~dm6a* zm`}g;w@)YE-Mz)4cEq;&Y3Ju^VfXYuM$f)pd;ct??n+VAX4mHKvM~NCQ!Dd5*B*+B z>$7h*lRF{5KQrOklqZj7G|3%YEqV1j&-tp%%Jqr%X18LmtancRv!wa*qUGOvB+d85 zr6l;CKkVFeG_pOcx90zof6p|#t$F^eG%9j?wf?5%{dBu|dlp`Mc)Pv)#(w4iZ$>6L rW?Wq|33!@W(g7b!?&H}ty*+AAX0%0$Zo(Sw4GB5xDdm!G+ literal 0 HcmV?d00001 From 3d69484f76181b46a777a2e04541d96c7c61bdb8 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 4 Oct 2022 14:52:55 -0500 Subject: [PATCH 040/263] Fix genesis.ssz.zip --- .../eip4844/genesis.ssz.zip | Bin 3636 -> 3546 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/common/eth2_network_config/built_in_network_configs/eip4844/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/eip4844/genesis.ssz.zip index 1db6281d4ff169dbadeeff01648595b0cec9c9ba..88b405071058d5163f5757e775c7cea3dd4b1183 100644 GIT binary patch delta 937 zcmdlYb4%JHz?+#xgnL4hGXH7~U|vskaVxGFS+mx2AfwRv)F zpA8UCED)*B5Mz-z@G*S(&6>U5jyI?5nReX4c>1QE%=U9rlv3l)CdbX_T4WgQmQ^w3 zcu=7EZ5yrbQw#U``AQq}FER*!c0wiSS)GnyYGCJ}#q42@BW1;ou<`$Ot z|K1q9OC%;nVD<+4x2G;$-zT^2Yr@+@J4L1K-|pVDEBEF~>#MArCo=zLjQM9DcJ-CW ziWfIZKR?l5B(F77#C+$qXO&;A>^Es!o2;p8evvDx-Ti&utl01CZ>!FBo9|my_lok6pRZpX;XNP1b}v4n zswU#b*5&VWf93|2UG6jvIwsCK&0Z|(&xa!|vf<9ppVd#2l8m>}t2}!4e73roq_Ecs z$vx@<=1FCH>$yY!AHHU}@!$th=4))*WBNPZ-&|DwZr3gM`g7{3PnVv}68`(`yZj!5 z|GyS?JU=(({`~)cZY{l<>FmX{uJ{)O=QX;@JhSHoTSfE>b_D zqyKkhLCRxw=GWY-7FEgA@BI?kZnby&+>CD@Z-(wJewF&<`zepQ#aFeJt(IJ~*kh|` zTKJ&T@bc$%CHww7y)b)kS=!euAsdNnYPElBo7GIB!~$oQ?o5vi=?)Bj+_$%8Q}*<* z#=^q$ag~q$+&ePyrG0Z+}?aeUOR~H;=jycnQd53)~m+V*1Us3qu+r#=d_Zb4b**O+%mH8{g%fJB2 rFah3-Od`yP%mk4$hDd^P6AUbA1hH`EEt|=Fd@@{-KsDJwqZt?gH_5vC literal 3636 zcmWIWW@Zs#U}E54;3&5XU9?r^uK+ItgBXh@gFAx^Lvns@ZhoGAYDtDsd|qlvd45rL zd~$wXT4uU_QfX#RNqlA=ns~8(YG#3ng^7uNdTL&3ab~ezadA~>2qy#cd293J)*74S z(h6<{MwS=M3=Cl6?b%EpF&2pfAHy&5#J|7f%f4&RiDqA8Pj|B|&W5qNvT+X^ulvmP zlXly7hl+J0t^>Ho&Im4ersRU2>5 z3j1&RzkhMHRp0z;Yd<_%bfe|(%kXLYzn$$^%l=yI?D{)DBL8oh_3i0$CWcWkVnd*O z!!Ez+4yn)I&0bz7`8VL@lpQNe&Ga)ae4Q9;xloW-+BJW*kM_;#th~Sb^QY%ZRhzv$ zdNIHv+1(h+VS6xumAP`N3`5ERf7>5q@x}m4S~@R z7!85Z5Eu=C5flOs^tdVX?E&4juS$OwznfJmn*ThMC+A}Qv6+{CEMt{S?thytZ=Lfk zKU>J$CRp@GLgCGif{EAX?c4sf+|%-``gyOJ5i$`!BQjndo-AAz!#g+nM`l(^&B>N0 zq2~Kn%)XHmqoG*-VxGgD6+3zVX@`8)kA8lX?Ql46l=z(@Tcv&FtM^srU$rf=)3mm) z_1pcTyza+`*4FF%t~TY%9{p>2eqDFHr+k5;d)oHkSxxS}N!7k5- z`)*t~p=uo@*5kFh^vBukc6FV;tZK!-w=b(#EqNk8bMDqJK_BPL^w&N6R^Wch!Pc$L zS%z$n~vU$^+=Zh}-&gWU9zoze1>6c5#=f3)Sw`|c-_4M%f=W-`RUQjpw*=x4G z=hc*`*B`#MTsd-3{n*(v`!9X{6n;4W+s&DGmq&g`h}hlp^*is(D=Drnq2Hd~dm6a* zm`}g;w@)YE-Mz)4cEq;&Y3Ju^VfXYuM$f)pd;ct??n+VAX4mHKvM~NCQ!Dd5*B*+B z>$7h*lRF{5KQrOklqZj7G|3%YEqV1j&-tp%%Jqr%X18LmtancRv!wa*qUGOvB+d85 zr6l;CKkVFeG_pOcx90zof6p|#t$F^eG%9j?wf?5%{dBu|dlp`Mc)Pv)#(w4iZ$>6L rW?Wq|33!@W(g7b!?&H}ty*+AAX0%0$Zo(Sw4GB5xDdm!G+ From 9d99c784ea20ff6eee6db23a95dbb04465e6b4e0 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 4 Oct 2022 17:50:30 -0500 Subject: [PATCH 041/263] Add gossip verification stub --- .../beacon_chain/src/blob_verification.rs | 161 ++++++++++++++++++ .../beacon_chain/src/block_verification.rs | 2 +- 2 files changed, 162 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 8b137891791..6d06cbcdada 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -1 +1,162 @@ +use derivative::Derivative; +use slot_clock::SlotClock; +use crate::beacon_chain::{BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; +use crate::{block_verification::get_validator_pubkey_cache, BeaconChainError}; +use bls::PublicKey; +use std::sync::Arc; +use types::{ + consts::eip4844::BLS_MODULUS, BeaconStateError, BlobsSidecar, EthSpec, Hash256, + SignedBlobsSidecar, Slot, +}; + +pub enum BlobError { + /// The blob sidecar is from a slot that is later than the current slot (with respect to the + /// gossip clock disparity). + /// + /// ## Peer scoring + /// + /// Assuming the local clock is correct, the peer has sent an invalid message. + FutureSlot { + message_slot: Slot, + latest_permissible_slot: Slot, + }, + /// The blob sidecar is from a slot that is prior to the earliest permissible slot (with + /// respect to the gossip clock disparity). + /// + /// ## Peer scoring + /// + /// Assuming the local clock is correct, the peer has sent an invalid message. + PastSlot { + message_slot: Slot, + earliest_permissible_slot: Slot, + }, + + /// The blob sidecar contains an incorrectly formatted `BLSFieldElement` > `BLS_MODULUS`. + /// + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + BlobOutOfRange { blob_index: usize }, + + /// The blob sidecar contains a KZGCommitment that is not a valid G1 point on + /// the bls curve. + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + InvalidKZGCommitment, + /// The proposal signature in invalid. + /// + /// ## Peer scoring + /// + /// The signature on the blob sidecar invalid and the peer is faulty. + ProposalSignatureInvalid, + + /// A blob sidecar for this proposer and slot has already been observed. + /// + /// ## Peer scoring + /// + /// The `proposer` has already proposed a sidecar at this slot. The existing sidecar may or may not + /// be equal to the given sidecar. + RepeatSidecar { proposer: u64, slot: Slot }, + + /// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid. + /// + /// ## Peer scoring + /// + /// We were unable to process this sync committee message due to an internal error. It's unclear if the + /// sync committee message is valid. + BeaconChainError(BeaconChainError), +} + +impl From for BlobError { + fn from(e: BeaconChainError) -> Self { + BlobError::BeaconChainError(e) + } +} + +impl From for BlobError { + fn from(e: BeaconStateError) -> Self { + BlobError::BeaconChainError(BeaconChainError::BeaconStateError(e)) + } +} + +/// A wrapper around a `SignedBlobsSidecar` that indicates it has been approved for re-gossiping on +/// the p2p network. +#[derive(Derivative)] +#[derivative(Debug(bound = "T: BeaconChainTypes"))] +pub struct GossipVerifiedBlobsSidecar { + pub blob_sidecar: Arc>, +} + +impl GossipVerifiedBlobsSidecar { + pub fn new( + blob_sidecar: Arc>, + chain: &BeaconChain, + ) -> Result { + let blob_slot = blob_sidecar.message.beacon_block_slot; + // Do not gossip or process blobs from future or past slots. + let latest_permissible_slot = chain + .slot_clock + .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .ok_or(BeaconChainError::UnableToReadSlot)?; + if blob_slot > latest_permissible_slot { + return Err(BlobError::FutureSlot { + message_slot: latest_permissible_slot, + latest_permissible_slot: blob_slot, + }); + } + + let earliest_permissible_slot = chain + .slot_clock + .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .ok_or(BeaconChainError::UnableToReadSlot)?; + if blob_slot > earliest_permissible_slot { + return Err(BlobError::PastSlot { + message_slot: earliest_permissible_slot, + earliest_permissible_slot: blob_slot, + }); + } + + // Verify that blobs are properly formatted + //TODO: add the check while constructing a Blob type from bytes + for (i, blob) in blob_sidecar.message.blobs.iter().enumerate() { + if blob.iter().any(|b| *b >= *BLS_MODULUS) { + return Err(BlobError::BlobOutOfRange { blob_index: i }); + } + } + + // Verify that the KZG proof is a valid G1 point + if PublicKey::deserialize(&blob_sidecar.message.kzg_aggregate_proof.0).is_err() { + return Err(BlobError::InvalidKZGCommitment); + } + + // TODO: Verify proposer signature + + // // let state = /* Get a valid state */ + // let proposer_index = state.get_beacon_proposer_index(blob_slot, &chain.spec)? as u64; + // let signature_is_valid = { + // let pubkey_cache = get_validator_pubkey_cache(chain)?; + // let pubkey = pubkey_cache + // .get(proposer_index as usize) + // .ok_or_else(|| BlobError::UnknownValidator(proposer_index)?; + // blob.verify_signature( + // Some(block_root), + // pubkey, + // &fork, + // chain.genesis_validators_root, + // &chain.spec, + // ) + // }; + + // if !signature_is_valid { + // return Err(BlobError::ProposalSignatureInvalid); + // } + + // TODO: Check that we have not already received a sidecar with a valid signature for this slot. + + Ok(Self { blob_sidecar }) + } +} diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index f83bc535d93..fbc9acdb2e7 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1753,7 +1753,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( } /// Obtains a read-locked `ValidatorPubkeyCache` from the `chain`. -fn get_validator_pubkey_cache( +pub fn get_validator_pubkey_cache( chain: &BeaconChain, ) -> Result>, BlockError> { chain From 12fe5145508b67c04f02f89610cfafd29cda88fd Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 4 Oct 2022 19:17:51 -0500 Subject: [PATCH 042/263] Add more gossip verification functions for blobs --- beacon_node/beacon_chain/src/beacon_chain.rs | 18 +++++++++ .../beacon_chain/src/blob_verification.rs | 12 +++--- beacon_node/beacon_chain/src/lib.rs | 2 +- beacon_node/beacon_chain/src/metrics.rs | 16 ++++++++ .../network/src/beacon_processor/mod.rs | 1 - .../beacon_processor/worker/gossip_methods.rs | 40 +++++++++++++++---- 6 files changed, 73 insertions(+), 16 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f43f0403cc5..f8b9dde98cd 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -6,6 +6,7 @@ use crate::attestation_verification::{ use crate::attester_cache::{AttesterCache, AttesterCacheKey}; use crate::beacon_proposer_cache::compute_proposer_duties_from_head; use crate::beacon_proposer_cache::BeaconProposerCache; +use crate::blob_verification::{BlobError, VerifiedBlobsSidecar}; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::{ check_block_is_finalized_descendant, check_block_relevancy, get_block_root, @@ -1773,6 +1774,23 @@ impl BeaconChain { }) } + /// Accepts some `SignedBlobsSidecar` from the network and attempts to verify it, + /// returning `Ok(_)` if it is valid to be (re)broadcast on the gossip network. + pub fn verify_blobs_sidecar_for_gossip<'a>( + &self, + blobs_sidecar: &'a SignedBlobsSidecar, + ) -> Result, BlobError> { + metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_REQUESTS); + let _timer = metrics::start_timer(&metrics::BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES); + VerifiedBlobsSidecar::verify(blobs_sidecar, self).map(|v| { + if let Some(_event_handler) = self.event_handler.as_ref() { + // TODO: Handle sse events + } + metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_SUCCESSES); + v + }) + } + /// Accepts some attestation-type object and attempts to verify it in the context of fork /// choice. If it is valid it is applied to `self.fork_choice`. /// diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 6d06cbcdada..4d1627567c0 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -87,13 +87,13 @@ impl From for BlobError { /// the p2p network. #[derive(Derivative)] #[derivative(Debug(bound = "T: BeaconChainTypes"))] -pub struct GossipVerifiedBlobsSidecar { - pub blob_sidecar: Arc>, +pub struct VerifiedBlobsSidecar<'a, T: BeaconChainTypes> { + pub blob_sidecar: &'a SignedBlobsSidecar, } -impl GossipVerifiedBlobsSidecar { - pub fn new( - blob_sidecar: Arc>, +impl<'a, T: BeaconChainTypes> VerifiedBlobsSidecar<'a, T> { + pub fn verify( + blob_sidecar: &'a SignedBlobsSidecar, chain: &BeaconChain, ) -> Result { let blob_slot = blob_sidecar.message.beacon_block_slot; @@ -121,7 +121,7 @@ impl GossipVerifiedBlobsSidecar { } // Verify that blobs are properly formatted - //TODO: add the check while constructing a Blob type from bytes + //TODO: add the check while constructing a Blob type from bytes instead of after for (i, blob) in blob_sidecar.message.blobs.iter().enumerate() { if blob.iter().any(|b| *b >= *BLS_MODULUS) { return Err(BlobError::BlobOutOfRange { blob_index: i }); diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 84e15ead71c..ba83047f57e 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -5,7 +5,7 @@ mod beacon_chain; mod beacon_fork_choice_store; pub mod beacon_proposer_cache; mod beacon_snapshot; -mod blob_verification; +pub mod blob_verification; pub mod block_reward; mod block_times_cache; mod block_verification; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index ead4a540254..f8accec14c9 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -941,6 +941,22 @@ lazy_static! { "beacon_pre_finalization_block_lookup_count", "Number of block roots subject to single block lookups" ); + + /* + * Blob sidecar Verification + */ + pub static ref BLOBS_SIDECAR_PROCESSING_REQUESTS: Result = try_create_int_counter( + "beacon_blobs_sidecar_processing_requests_total", + "Count of all blob sidecars submitted for processing" + ); + pub static ref BLOBS_SIDECAR_PROCESSING_SUCCESSES: Result = try_create_int_counter( + "beacon_blobs_sidecar_processing_successes_total", + "Number of blob sidecars verified for gossip" + ); + pub static ref BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES: Result = try_create_histogram( + "beacon_blobs_sidecar_gossip_verification_seconds", + "Full runtime of blob sidecars gossip verification" + ); } /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 7d7f6602e5c..947c215b3d7 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -1542,7 +1542,6 @@ impl BeaconProcessor { peer_client, blobs, work_reprocessing_tx, - duplicate_cache, seen_timestamp, ) .await diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index e5b0d76dae4..37c5f8c776e 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -3,6 +3,7 @@ use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; use beacon_chain::store::Error; use beacon_chain::{ attestation_verification::{self, Error as AttnError, VerifiedAttestation}, + blob_verification::BlobError, observed_operations::ObservationOutcome, sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::get_block_delay_ms, @@ -697,15 +698,27 @@ impl Worker { #[allow(clippy::too_many_arguments)] pub async fn process_gossip_blob( self, - _message_id: MessageId, - _peer_id: PeerId, - _peer_client: Client, - _blob: Arc>, - _reprocess_tx: mpsc::Sender>, - _duplicate_cache: DuplicateCache, - _seen_duration: Duration, + message_id: MessageId, + peer_id: PeerId, + peer_client: Client, + blob: Arc>, + reprocess_tx: mpsc::Sender>, + seen_timestamp: Duration, ) { - //FIXME(sean) + match self.chain.verify_blobs_sidecar_for_gossip(&blob) { + Ok(verified_sidecar) => { + // Register with validator monitor + // Propagate + // Apply to fork choice + } + Err(error) => self.handle_blobs_verification_failure( + peer_id, + message_id, + Some(reprocess_tx), + error, + seen_timestamp, + ), + }; } /// Process the beacon block received from the gossip network and @@ -2212,4 +2225,15 @@ impl Worker { self.propagate_if_timely(is_timely, message_id, peer_id) } + + /// Handle an error whilst verifying a `SignedBlobsSidecar` from the network. + fn handle_blobs_verification_failure( + &self, + peer_id: PeerId, + message_id: MessageId, + reprocess_tx: Option>>, + error: BlobError, + seen_timestamp: Duration, + ) { + } } From c55b28bf10a271588d0045781c22da63bcca9b68 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 4 Oct 2022 19:18:06 -0500 Subject: [PATCH 043/263] Minor fixes --- beacon_node/lighthouse_network/src/config.rs | 3 +-- beacon_node/lighthouse_network/src/rpc/protocol.rs | 10 +++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index a6488be23a9..3cd2c542312 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -295,8 +295,7 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos let topic_bytes = message.topic.as_str().as_bytes(); match fork_context.current_fork() { // according to: https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#the-gossip-domain-gossipsub - // the derivation of the message-id remains the same in the merge - //TODO(sean): figure this out + // the derivation of the message-id remains the same in the merge and for eip 4844. ForkName::Altair | ForkName::Merge | ForkName::Eip4844 => { let topic_len_bytes = topic_bytes.len().to_le_bytes(); let mut vec = Vec::with_capacity( diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 3840aa3ba6f..3812b6153b2 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -114,8 +114,7 @@ lazy_static! { pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M /// The maximum bytes that can be sent across the RPC post-merge. pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M - //TODO(sean) check -pub(crate) const MAX_RPC_SIZE_POST_EIP4844: usize = 20 * 1_048_576; // 10M +pub(crate) const MAX_RPC_SIZE_POST_EIP4844: usize = 10 * 1_048_576; // 10M /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; /// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte). @@ -152,7 +151,7 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { ), ForkName::Eip4844 => RpcLimits::new( *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks - *SIGNED_BEACON_BLOCK_EIP4844_MAX, // Merge block is larger than base and altair blocks + *SIGNED_BEACON_BLOCK_EIP4844_MAX, // EIP 4844 block is larger than base, altair and merge blocks ), } } @@ -329,8 +328,9 @@ impl ProtocolId { Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), - Protocol::BlobsByRange => rpc_block_limits_by_fork(fork_context.current_fork()), - + Protocol::BlobsByRange => { + RpcLimits::new(*SIGNED_BLOBS_SIDECAR_MIN, *SIGNED_BLOBS_SIDECAR_MAX) + } Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), From 21bf3d37cdce46632cfa4e3f5abb194f172c6851 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 5 Oct 2022 02:52:23 -0500 Subject: [PATCH 044/263] Reprocess blob sidecar messages --- .../beacon_chain/src/blob_verification.rs | 8 + .../network/src/beacon_processor/mod.rs | 60 +++++-- .../work_reprocessing_queue.rs | 159 +++++++++++++++++- .../beacon_processor/worker/gossip_methods.rs | 80 ++++++++- 4 files changed, 288 insertions(+), 19 deletions(-) diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 4d1627567c0..18708aa5ff0 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -62,6 +62,14 @@ pub enum BlobError { /// be equal to the given sidecar. RepeatSidecar { proposer: u64, slot: Slot }, + /// The `blobs_sidecar.message.beacon_block_root` block is unknown. + /// + /// ## Peer scoring + /// + /// The attestation points to a block we have not yet imported. It's unclear if the attestation + /// is valid or not. + UnknownHeadBlock { beacon_block_root: Hash256 }, + /// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid. /// /// ## Peer scoring diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 947c215b3d7..87e092332f5 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -80,6 +80,8 @@ mod worker; use crate::beacon_processor::work_reprocessing_queue::QueuedGossipBlock; pub use worker::{ChainSegmentProcessId, GossipAggregatePackage, GossipAttestationPackage}; +use self::work_reprocessing_queue::QueuedBlobsSidecar; + /// The maximum size of the channel for work events to the `BeaconProcessor`. /// /// Setting this too low will cause consensus messages to be dropped. @@ -116,6 +118,8 @@ const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024; //FIXME(sean) verify const MAX_GOSSIP_BLOB_QUEUE_LEN: usize = 1_024; +//FIXME(sean) verify +const MAX_BLOBS_SIDECAR_REPROCESS_QUEUE_LEN: usize = 1_024; /// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but /// within acceptable clock disparity) that will be queued before we start dropping them. @@ -206,6 +210,7 @@ pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request"; pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; +pub const UNKNOWN_BLOBS_SIDECAR: &str = "unknown_blobs_sidecar"; /// A simple first-in-first-out queue with a maximum length. struct FifoQueue { @@ -413,7 +418,7 @@ impl WorkEvent { pub fn gossip_blobs_sidecar( message_id: MessageId, peer_id: PeerId, - peer_client: Client, + _peer_client: Client, blobs: Arc>, seen_timestamp: Duration, ) -> Self { @@ -422,7 +427,6 @@ impl WorkEvent { work: Work::GossipBlobsSidecar { message_id, peer_id, - peer_client, blobs, seen_timestamp, }, @@ -670,6 +674,20 @@ impl std::convert::From> for WorkEvent { seen_timestamp, }, }, + ReadyWork::BlobsSidecar(QueuedBlobsSidecar { + peer_id, + message_id, + blobs_sidecar, + seen_timestamp, + }) => Self { + drop_during_sync: true, + work: Work::UnknownBlobsSidecar { + message_id, + peer_id, + blobs: blobs_sidecar, + seen_timestamp, + }, + }, } } } @@ -722,7 +740,12 @@ pub enum Work { GossipBlobsSidecar { message_id: MessageId, peer_id: PeerId, - peer_client: Client, + blobs: Arc>, + seen_timestamp: Duration, + }, + UnknownBlobsSidecar { + message_id: MessageId, + peer_id: PeerId, blobs: Arc>, seen_timestamp: Duration, }, @@ -815,6 +838,7 @@ impl Work { Work::BlobsByRangeRequest { .. } => BLOBS_BY_RANGE_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, + Work::UnknownBlobsSidecar { .. } => UNKNOWN_BLOBS_SIDECAR, } } } @@ -931,6 +955,7 @@ impl BeaconProcessor { LifoQueue::new(MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN); let mut unknown_block_attestation_queue = LifoQueue::new(MAX_UNAGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN); + let mut unknown_blobs_sidecar_queue = LifoQueue::new(MAX_BLOBS_SIDECAR_REPROCESS_QUEUE_LEN); let mut sync_message_queue = LifoQueue::new(MAX_SYNC_MESSAGE_QUEUE_LEN); let mut sync_contribution_queue = LifoQueue::new(MAX_SYNC_CONTRIBUTION_QUEUE_LEN); @@ -1312,6 +1337,9 @@ impl BeaconProcessor { Work::UnknownBlockAggregate { .. } => { unknown_block_aggregate_queue.push(work) } + Work::UnknownBlobsSidecar { .. } => { + unknown_blobs_sidecar_queue.push(work) + } } } } @@ -1531,20 +1559,16 @@ impl BeaconProcessor { Work::GossipBlobsSidecar { message_id, peer_id, - peer_client, blobs, seen_timestamp, } => task_spawner.spawn_async(async move { - worker - .process_gossip_blob( - message_id, - peer_id, - peer_client, - blobs, - work_reprocessing_tx, - seen_timestamp, - ) - .await + worker.process_gossip_blob( + message_id, + peer_id, + blobs, + Some(work_reprocessing_tx), + seen_timestamp, + ) }), /* * Import for blocks that we received earlier than their intended slot. @@ -1731,6 +1755,14 @@ impl BeaconProcessor { seen_timestamp, ) }), + Work::UnknownBlobsSidecar { + message_id, + peer_id, + blobs, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_blob(message_id, peer_id, blobs, None, seen_timestamp) + }), }; } } diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs index 2aeec11c325..b08542eeb54 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs @@ -30,7 +30,10 @@ use task_executor::TaskExecutor; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::time::error::Error as TimeError; use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey}; -use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock, SubnetId}; +use types::{ + Attestation, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock, SignedBlobsSidecar, + SubnetId, +}; const TASK_NAME: &str = "beacon_processor_reprocess_queue"; const GOSSIP_BLOCKS: &str = "gossip_blocks"; @@ -44,6 +47,10 @@ const ADDITIONAL_QUEUED_BLOCK_DELAY: Duration = Duration::from_millis(5); /// For how long to queue aggregated and unaggregated attestations for re-processing. pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12); +/// For how long to queue blob sidecars for re-processing. +/// TODO: rethink duration +pub const QUEUED_BLOBS_SIDECARS_DELAY: Duration = Duration::from_secs(6); + /// For how long to queue rpc blocks before sending them back for reprocessing. pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(3); @@ -55,6 +62,10 @@ const MAXIMUM_QUEUED_BLOCKS: usize = 16; /// How many attestations we keep before new ones get dropped. const MAXIMUM_QUEUED_ATTESTATIONS: usize = 16_384; +/// TODO: fix number +/// How many blobs we keep before new ones get dropped. +const MAXIMUM_QUEUED_BLOB_SIDECARS: usize = 16_384; + /// Messages that the scheduler can receive. pub enum ReprocessQueueMessage { /// A block that has been received early and we should queue for later processing. @@ -69,6 +80,8 @@ pub enum ReprocessQueueMessage { UnknownBlockUnaggregate(QueuedUnaggregate), /// An aggregated attestation that references an unknown block. UnknownBlockAggregate(QueuedAggregate), + /// A blob sidecar that references an unknown block. + UnknownBlobSidecar(QueuedBlobsSidecar), } /// Events sent by the scheduler once they are ready for re-processing. @@ -77,6 +90,7 @@ pub enum ReadyWork { RpcBlock(QueuedRpcBlock), Unaggregate(QueuedUnaggregate), Aggregate(QueuedAggregate), + BlobsSidecar(QueuedBlobsSidecar), } /// An Attestation for which the corresponding block was not seen while processing, queued for @@ -118,6 +132,15 @@ pub struct QueuedRpcBlock { pub should_process: bool, } +/// A blob sidecar for which the corresponding block was not seen while processing, queued for +/// later. +pub struct QueuedBlobsSidecar { + pub peer_id: PeerId, + pub message_id: MessageId, + pub blobs_sidecar: Arc>, + pub seen_timestamp: Duration, +} + /// Unifies the different messages processed by the block delay queue. enum InboundEvent { /// A gossip block that was queued for later processing and is ready for import. @@ -127,6 +150,8 @@ enum InboundEvent { ReadyRpcBlock(QueuedRpcBlock), /// An aggregated or unaggregated attestation is ready for re-processing. ReadyAttestation(QueuedAttestationId), + /// A blob sidecar is ready for re-processing. + ReadyBlobsSidecar(QueuedBlobsSidecarId), /// A `DelayQueue` returned an error. DelayQueueError(TimeError, &'static str), /// A message sent to the `ReprocessQueue` @@ -147,6 +172,7 @@ struct ReprocessQueue { rpc_block_delay_queue: DelayQueue>, /// Queue to manage scheduled attestations. attestations_delay_queue: DelayQueue, + blobs_sidecar_delay_queue: DelayQueue, /* Queued items */ /// Queued blocks. @@ -155,15 +181,19 @@ struct ReprocessQueue { queued_aggregates: FnvHashMap, DelayKey)>, /// Queued attestations. queued_unaggregates: FnvHashMap, DelayKey)>, + queued_blob_sidecars: FnvHashMap, DelayKey)>, /// Attestations (aggregated and unaggregated) per root. awaiting_attestations_per_root: HashMap>, + awaiting_blobs_sidecars_per_root: HashMap>, /* Aux */ /// Next attestation id, used for both aggregated and unaggregated attestations next_attestation: usize, + next_sidecar: usize, early_block_debounce: TimeLatch, rpc_block_debounce: TimeLatch, attestation_delay_debounce: TimeLatch, + blobs_sidecar_debounce: TimeLatch, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -172,6 +202,9 @@ enum QueuedAttestationId { Unaggregate(usize), } +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +struct QueuedBlobsSidecarId(usize); + impl QueuedAggregate { pub fn beacon_block_root(&self) -> &Hash256 { &self.attestation.message.aggregate.data.beacon_block_root @@ -235,6 +268,21 @@ impl Stream for ReprocessQueue { Poll::Ready(None) | Poll::Pending => (), } + match self.blobs_sidecar_delay_queue.poll_expired(cx) { + Poll::Ready(Some(Ok(id))) => { + return Poll::Ready(Some(InboundEvent::ReadyBlobsSidecar(id.into_inner()))); + } + Poll::Ready(Some(Err(e))) => { + return Poll::Ready(Some(InboundEvent::DelayQueueError( + e, + "blobs_sidecar_queue", + ))); + } + // `Poll::Ready(None)` means that there are no more entries in the delay queue and we + // will continue to get this result until something else is added into the queue. + Poll::Ready(None) | Poll::Pending => (), + } + // Last empty the messages channel. match self.work_reprocessing_rx.poll_recv(cx) { Poll::Ready(Some(message)) => return Poll::Ready(Some(InboundEvent::Msg(message))), @@ -264,14 +312,19 @@ pub fn spawn_reprocess_scheduler( gossip_block_delay_queue: DelayQueue::new(), rpc_block_delay_queue: DelayQueue::new(), attestations_delay_queue: DelayQueue::new(), + blobs_sidecar_delay_queue: DelayQueue::new(), queued_gossip_block_roots: HashSet::new(), queued_aggregates: FnvHashMap::default(), queued_unaggregates: FnvHashMap::default(), + queued_blob_sidecars: FnvHashMap::default(), awaiting_attestations_per_root: HashMap::new(), + awaiting_blobs_sidecars_per_root: HashMap::new(), next_attestation: 0, + next_sidecar: 0, early_block_debounce: TimeLatch::default(), rpc_block_debounce: TimeLatch::default(), attestation_delay_debounce: TimeLatch::default(), + blobs_sidecar_debounce: TimeLatch::default(), }; executor.spawn( @@ -473,6 +526,39 @@ impl ReprocessQueue { self.next_attestation += 1; } + InboundEvent::Msg(UnknownBlobSidecar(queued_blob_sidecar)) => { + if self.blobs_sidecar_delay_queue.len() >= MAXIMUM_QUEUED_BLOB_SIDECARS { + if self.blobs_sidecar_debounce.elapsed() { + error!( + log, + "Blobs sidecar queue is full"; + "queue_size" => MAXIMUM_QUEUED_BLOB_SIDECARS, + "msg" => "check system clock" + ); + } + // Drop the attestation. + return; + } + + let id = QueuedBlobsSidecarId(self.next_sidecar); + + // Register the delay. + let delay_key = self + .blobs_sidecar_delay_queue + .insert(id, QUEUED_BLOBS_SIDECARS_DELAY); + + // Register this sidecar for the corresponding root. + self.awaiting_blobs_sidecars_per_root + .entry(queued_blob_sidecar.blobs_sidecar.message.beacon_block_root) + .or_default() + .push(id); + + // Store the blob sidecar and its info. + self.queued_blob_sidecars + .insert(self.next_sidecar, (queued_blob_sidecar, delay_key)); + + self.next_sidecar += 1; + } InboundEvent::Msg(BlockImported(root)) => { // Unqueue the attestations we have for this root, if any. if let Some(queued_ids) = self.awaiting_attestations_per_root.remove(&root) { @@ -517,6 +603,43 @@ impl ReprocessQueue { } } } + // Unqueue the blob sidecars we have for this root, if any. + // TODO: merge the 2 data structures. + if let Some(queued_ids) = self.awaiting_blobs_sidecars_per_root.remove(&root) { + for id in queued_ids { + // metrics::inc_counter( + // &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS, + // ); + + if let Some((work, delay_key)) = self + .queued_blob_sidecars + .remove(&id.0) + .map(|(blobs_sidecar, delay_key)| { + (ReadyWork::BlobsSidecar(blobs_sidecar), delay_key) + }) + { + // Remove the delay. + self.blobs_sidecar_delay_queue.remove(&delay_key); + + // Send the work. + if self.ready_work_tx.try_send(work).is_err() { + error!( + log, + "Failed to send scheduled blob sidecar"; + ); + } + } else { + // There is a mismatch between the blob sidecar ids registered for this + // root and the queued blob sidecars. This should never happen. + error!( + log, + "Unknown queued blob sidecar for block root"; + "block_root" => ?root, + "id" => ?id, + ); + } + } + } } // A block that was queued for later processing is now ready to be processed. InboundEvent::ReadyGossipBlock(ready_block) => { @@ -591,6 +714,40 @@ impl ReprocessQueue { } } } + InboundEvent::ReadyBlobsSidecar(queued_blobs_sidecar_id) => { + // metrics::inc_counter( + // &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_ATTESTATIONS, + // ); + + if let Some((root, work)) = self + .queued_blob_sidecars + .remove(&queued_blobs_sidecar_id.0) + .map(|(blobs_sidecar, _delay_key)| { + ( + blobs_sidecar.blobs_sidecar.message.beacon_block_root, + ReadyWork::BlobsSidecar(blobs_sidecar), + ) + }) + { + if self.ready_work_tx.try_send(work).is_err() { + error!( + log, + "Failed to send scheduled attestation"; + ); + } + + if let Some(queued_blob_sidecars) = + self.awaiting_blobs_sidecars_per_root.get_mut(&root) + { + if let Some(index) = queued_blob_sidecars + .iter() + .position(|&id| id == queued_blobs_sidecar_id) + { + queued_blob_sidecars.swap_remove(index); + } + } + } + } } metrics::set_gauge_vec( diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 37c5f8c776e..b59537a1d28 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -1,3 +1,4 @@ +use crate::beacon_processor::work_reprocessing_queue::QueuedBlobsSidecar; use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; use beacon_chain::store::Error; @@ -696,13 +697,12 @@ impl Worker { } #[allow(clippy::too_many_arguments)] - pub async fn process_gossip_blob( + pub fn process_gossip_blob( self, message_id: MessageId, peer_id: PeerId, - peer_client: Client, blob: Arc>, - reprocess_tx: mpsc::Sender>, + reprocess_tx: Option>>, seen_timestamp: Duration, ) { match self.chain.verify_blobs_sidecar_for_gossip(&blob) { @@ -714,8 +714,9 @@ impl Worker { Err(error) => self.handle_blobs_verification_failure( peer_id, message_id, - Some(reprocess_tx), + reprocess_tx, error, + blob, seen_timestamp, ), }; @@ -2233,7 +2234,78 @@ impl Worker { message_id: MessageId, reprocess_tx: Option>>, error: BlobError, + blobs_sidecar: Arc>, seen_timestamp: Duration, ) { + // TODO: metrics + match &error { + BlobError::FutureSlot { .. } => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + } + BlobError::PastSlot { .. } => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + } + BlobError::BeaconChainError(e) => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + } + BlobError::BlobOutOfRange { blob_index } => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + } + BlobError::InvalidKZGCommitment => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + } + BlobError::ProposalSignatureInvalid => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + } + BlobError::RepeatSidecar { proposer, slot } => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + } + BlobError::UnknownHeadBlock { beacon_block_root } => { + debug!( + self.log, + "Blob sidecar for unknown block"; + "peer_id" => %peer_id, + "block" => ?beacon_block_root + ); + if let Some(sender) = reprocess_tx { + // We don't know the block, get the sync manager to handle the block lookup, and + // send the attestation to be scheduled for re-processing. + self.sync_tx + .send(SyncMessage::UnknownBlockHash(peer_id, *beacon_block_root)) + .unwrap_or_else(|_| { + warn!( + self.log, + "Failed to send to sync service"; + "msg" => "UnknownBlockHash" + ) + }); + let msg = ReprocessQueueMessage::UnknownBlobSidecar(QueuedBlobsSidecar { + peer_id, + message_id, + blobs_sidecar, + seen_timestamp, + }); + + if sender.try_send(msg).is_err() { + error!( + self.log, + "Failed to send blob sidecar for re-processing"; + ) + } + } else { + // We shouldn't make any further attempts to process this attestation. + // + // Don't downscore the peer since it's not clear if we requested this head + // block from them or not. + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + + return; + } + } } } From 91efb9d4c780b55025c3793a67bd9dacc1b2c924 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 5 Oct 2022 02:56:55 -0500 Subject: [PATCH 045/263] Add todos --- beacon_node/beacon_chain/src/blob_verification.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 18708aa5ff0..90709d58195 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -105,6 +105,7 @@ impl<'a, T: BeaconChainTypes> VerifiedBlobsSidecar<'a, T> { chain: &BeaconChain, ) -> Result { let blob_slot = blob_sidecar.message.beacon_block_slot; + let blob_root = blob_sidecar.message.beacon_block_root; // Do not gossip or process blobs from future or past slots. let latest_permissible_slot = chain .slot_clock @@ -117,6 +118,9 @@ impl<'a, T: BeaconChainTypes> VerifiedBlobsSidecar<'a, T> { }); } + // TODO: return `UnknownHeadBlock` if blob_root doesn't exist in fork choice + // and wherever it could be found. + let earliest_permissible_slot = chain .slot_clock .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) From b5b4ce950981a08543a2e4750a4310fdf4085728 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Wed, 5 Oct 2022 17:14:45 -0400 Subject: [PATCH 046/263] blob production --- beacon_node/beacon_chain/src/beacon_chain.rs | 33 +- beacon_node/beacon_chain/src/errors.rs | 5 + .../beacon_chain/src/execution_payload.rs | 45 +- .../src/engine_api/json_structures.rs | 5 +- beacon_node/execution_layer/src/lib.rs | 4 +- beacon_node/http_api/src/lib.rs | 24 +- beacon_node/http_api/src/metrics.rs | 12 + beacon_node/http_api/src/publish_blobs.rs | 129 ++++++ common/eth2/src/lib.rs | 47 +++ common/eth2/src/types.rs | 8 + consensus/types/src/blobs_sidecar.rs | 13 +- consensus/types/src/signed_blobs_sidecar.rs | 14 +- validator_client/src/block_service.rs | 396 ++++++++++++------ validator_client/src/http_metrics/metrics.rs | 6 + validator_client/src/signing_method.rs | 3 + .../src/signing_method/web3signer.rs | 4 + validator_client/src/validator_store.rs | 47 ++- 17 files changed, 625 insertions(+), 170 deletions(-) create mode 100644 beacon_node/http_api/src/publish_blobs.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f8b9dde98cd..943e3a20f36 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -255,7 +255,7 @@ struct PartialBeaconBlock { deposits: Vec, voluntary_exits: Vec, sync_aggregate: Option>, - prepare_payload_handle: Option>, + prepare_payload_handle: Option>, } pub type BeaconForkChoice = ForkChoice< @@ -3291,15 +3291,16 @@ impl BeaconChain { // // Wait for the execution layer to return an execution payload (if one is required). let prepare_payload_handle = partial_beacon_block.prepare_payload_handle.take(); - let execution_payload = if let Some(prepare_payload_handle) = prepare_payload_handle { - let execution_payload = prepare_payload_handle - .await - .map_err(BlockProductionError::TokioJoin)? - .ok_or(BlockProductionError::ShuttingDown)??; - Some(execution_payload) - } else { - None - }; + let (execution_payload, kzg_commitments, blobs) = + if let Some(prepare_payload_handle) = prepare_payload_handle { + let (execution_payload, commitments, blobs) = prepare_payload_handle + .await + .map_err(BlockProductionError::TokioJoin)? + .ok_or(BlockProductionError::ShuttingDown)??; + (execution_payload, commitments, blobs) + } else { + return Err(BlockProductionError::MissingExecutionPayload); + }; // Part 3/3 (blocking) // @@ -3311,6 +3312,7 @@ impl BeaconChain { chain.complete_partial_beacon_block( partial_beacon_block, execution_payload, + kzg_commitments, verification, ) }, @@ -3557,7 +3559,8 @@ impl BeaconChain { fn complete_partial_beacon_block>( &self, partial_beacon_block: PartialBeaconBlock, - execution_payload: Option, + execution_payload: Payload, + kzg_commitments: Vec, verification: ProduceBlockVerification, ) -> Result, BlockProductionError> { let PartialBeaconBlock { @@ -3633,8 +3636,7 @@ impl BeaconChain { voluntary_exits: voluntary_exits.into(), sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, - execution_payload: execution_payload - .ok_or(BlockProductionError::MissingExecutionPayload)?, + execution_payload, }, }), BeaconState::Eip4844(_) => BeaconBlock::Eip4844(BeaconBlockEip4844 { @@ -3653,10 +3655,9 @@ impl BeaconChain { voluntary_exits: voluntary_exits.into(), sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, - execution_payload: execution_payload - .ok_or(BlockProductionError::MissingExecutionPayload)?, + execution_payload, //FIXME(sean) get blobs - blob_kzg_commitments: VariableList::empty(), + blob_kzg_commitments: VariableList::from(kzg_commitments), }, }), }; diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 704cba489d2..db521d4a3d1 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -249,6 +249,11 @@ pub enum BlockProductionError { BlockingFailed(execution_layer::Error), TerminalPoWBlockLookupFailed(execution_layer::Error), GetPayloadFailed(execution_layer::Error), + GetBlobsFailed(execution_layer::Error), + BlobPayloadMismatch { + blob_block_hash: ExecutionBlockHash, + payload_block_hash: ExecutionBlockHash, + }, FailedToReadFinalizedBlock(store::Error), MissingFinalizedBlock(Hash256), BlockTooLarge(usize), diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index f056aeb9904..642fae5285e 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -12,6 +12,7 @@ use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, }; +use execution_layer::json_structures::JsonBlobBundlesV1; use execution_layer::{BuilderParams, PayloadStatus}; use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; @@ -25,12 +26,13 @@ use std::sync::Arc; use tokio::task::JoinHandle; use tree_hash::TreeHash; use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, EthSpec, ExecPayload, ExecutionBlockHash, - Hash256, SignedBeaconBlock, Slot, + BeaconBlockRef, BeaconState, BeaconStateError, Blob, BlobsSidecar, EthSpec, ExecPayload, + ExecutionBlockHash, Hash256, KzgCommitment, SignedBeaconBlock, Slot, }; -pub type PreparePayloadResult = Result; -pub type PreparePayloadHandle = JoinHandle>>; +pub type PreparePayloadResult = + Result<(Payload, Vec, Vec>), BlockProductionError>; +pub type PreparePayloadHandle = JoinHandle>>; #[derive(PartialEq)] pub enum AllowOptimisticImport { @@ -354,7 +356,7 @@ pub fn get_execution_payload< state: &BeaconState, proposer_index: u64, builder_params: BuilderParams, -) -> Result, BlockProductionError> { +) -> Result, BlockProductionError> { // Compute all required values from the `state` now to avoid needing to pass it into a spawned // task. let spec = &chain.spec; @@ -413,7 +415,7 @@ pub async fn prepare_execution_payload( proposer_index: u64, latest_execution_payload_header_block_hash: ExecutionBlockHash, builder_params: BuilderParams, -) -> Result +) -> PreparePayloadResult where T: BeaconChainTypes, Payload: ExecPayload + Default, @@ -473,8 +475,8 @@ where // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. // // This future is not executed here, it's up to the caller to await it. - let execution_payload = execution_layer - .get_payload::( + let (execution_payload_result, blobs_result) = tokio::join!( + execution_layer.get_payload::( parent_hash, timestamp, random, @@ -482,17 +484,20 @@ where forkchoice_update_params, builder_params, &chain.spec, - ) - .await - .map_err(BlockProductionError::GetPayloadFailed)?; - - /* - TODO: fetch blob bundles from el engine for block building - let suggested_fee_recipient = execution_layer.get_suggested_fee_recipient(proposer_index).await; - let blobs = execution_layer.get_blob_bundles(parent_hash, timestamp, random, suggested_fee_recipient) - .await - .map_err(BlockProductionError::GetPayloadFailed)?; - */ + ), + execution_layer.get_blob_bundles(parent_hash, timestamp, random, proposer_index) + ); + + let execution_payload = + execution_payload_result.map_err(BlockProductionError::GetPayloadFailed)?; + let blobs = blobs_result.map_err(BlockProductionError::GetPayloadFailed)?; + + if execution_payload.block_hash() != blobs.block_hash { + return Err(BlockProductionError::BlobPayloadMismatch { + blob_block_hash: blobs.block_hash, + payload_block_hash: execution_payload.block_hash(), + }); + } - Ok(execution_payload) + Ok((execution_payload, blobs.kzgs, blobs.blobs)) } diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index eeea53724ab..fde4f706a20 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,6 +1,6 @@ use super::*; use serde::{Deserialize, Serialize}; -use types::{EthSpec, ExecutionBlockHash, FixedVector, Transaction, Unsigned, VariableList}; +use types::{Blob, EthSpec, ExecutionBlockHash, FixedVector, Transaction, Unsigned, VariableList}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -272,10 +272,9 @@ impl From for PayloadAttributes { #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(bound = "T: EthSpec", rename_all = "camelCase")] pub struct JsonBlobBundlesV1 { - pub block_hash: Hash256, + pub block_hash: ExecutionBlockHash, pub kzgs: Vec, pub blobs: Vec>, - pub aggregated_proof: KzgProof, } #[derive(Debug, PartialEq, Serialize, Deserialize)] diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 99f86b86ec0..1078876ef70 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -787,8 +787,10 @@ impl ExecutionLayer { parent_hash: ExecutionBlockHash, timestamp: u64, prev_randao: Hash256, - suggested_fee_recipient: Address, + proposer_index: u64, ) -> Result, Error> { + let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; + debug!( self.log(), "Issuing engine_getBlobsBundle"; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5b4fa5816d8..203b462b164 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -13,6 +13,7 @@ mod block_rewards; mod database; mod metrics; mod proposer_duties; +mod publish_blobs; mod publish_blocks; mod state_id; mod sync_committees; @@ -48,7 +49,7 @@ use types::{ Attestation, AttestationData, AttesterSlashing, BeaconStateError, BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, - SignedBeaconBlock, SignedBlindedBeaconBlock, SignedContributionAndProof, + SignedBeaconBlock, SignedBlindedBeaconBlock, SignedBlobsSidecar, SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData, }; @@ -1052,6 +1053,26 @@ pub fn serve( }, ); + // POST beacon/blobs + let post_beacon_blobs = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("blobs")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .and_then( + |blobs: Arc>, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| async move { + publish_blobs::publish_blobs(blobs, chain, &network_tx, log) + .await + .map(|()| warp::reply()) + }, + ); + /* * beacon/blocks */ @@ -3162,6 +3183,7 @@ pub fn serve( post_beacon_blocks .boxed() .or(post_beacon_blinded_blocks.boxed()) + .or(post_beacon_blobs.boxed()) .or(post_beacon_pool_attestations.boxed()) .or(post_beacon_pool_attester_slashings.boxed()) .or(post_beacon_pool_proposer_slashings.boxed()) diff --git a/beacon_node/http_api/src/metrics.rs b/beacon_node/http_api/src/metrics.rs index 1c3ab1f6804..6851913733d 100644 --- a/beacon_node/http_api/src/metrics.rs +++ b/beacon_node/http_api/src/metrics.rs @@ -41,4 +41,16 @@ lazy_static::lazy_static! { "http_api_block_published_very_late_total", "The count of times a block was published beyond the attestation deadline" ); + pub static ref HTTP_API_BLOB_BROADCAST_DELAY_TIMES: Result = try_create_histogram( + "http_api_blob_broadcast_delay_times", + "Time between start of the slot and when the blob was broadcast" + ); + pub static ref HTTP_API_BLOB_PUBLISHED_LATE_TOTAL: Result = try_create_int_counter( + "http_api_blob_published_late_total", + "The count of times a blob was published beyond more than half way to the attestation deadline" + ); + pub static ref HTTP_API_BLOB_PUBLISHED_VERY_LATE_TOTAL: Result = try_create_int_counter( + "http_api_blob_published_very_late_total", + "The count of times a blob was published beyond the attestation deadline" + ); } diff --git a/beacon_node/http_api/src/publish_blobs.rs b/beacon_node/http_api/src/publish_blobs.rs new file mode 100644 index 00000000000..41d76c61cd8 --- /dev/null +++ b/beacon_node/http_api/src/publish_blobs.rs @@ -0,0 +1,129 @@ +use crate::metrics; +use beacon_chain::validator_monitor::{get_block_delay_ms, get_slot_delay_ms, timestamp_now}; +use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, CountUnrealized}; +use lighthouse_network::PubsubMessage; +use network::NetworkMessage; +use slog::{crit, error, info, warn, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use tokio::sync::mpsc::UnboundedSender; +use tree_hash::TreeHash; +use types::{ + BlindedPayload, ExecPayload, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, + SignedBeaconBlock, SignedBlobsSidecar, +}; +use warp::Rejection; + +/// Handles a request from the HTTP API for full blocks. +pub async fn publish_blobs( + blobs_sidecar: Arc>, + chain: Arc>, + network_tx: &UnboundedSender>, + log: Logger, +) -> Result<(), Rejection> { + let seen_timestamp = timestamp_now(); + + // Send the blob, regardless of whether or not it is valid. The API + // specification is very clear that this is the desired behaviour. + crate::publish_pubsub_message( + network_tx, + PubsubMessage::BlobsSidecars(blobs_sidecar.clone()), + )?; + + // Determine the delay after the start of the slot, register it with metrics. + let delay = get_slot_delay_ms( + seen_timestamp, + blobs_sidecar.message.beacon_block_slot, + &chain.slot_clock, + ); + metrics::observe_duration(&metrics::HTTP_API_BLOB_BROADCAST_DELAY_TIMES, delay); + + //FIXME(sean) process blobs + // match chain + // .process_block(blobs_sidecar.clone(), CountUnrealized::True) + // .await + // { + // Ok(root) => { + // info!( + // log, + // "Valid block from HTTP API"; + // "block_delay" => ?delay, + // "root" => format!("{}", root), + // "proposer_index" => block.message().proposer_index(), + // "slot" => block.slot(), + // ); + // + // // Notify the validator monitor. + // chain.validator_monitor.read().register_api_block( + // seen_timestamp, + // blobs_sidecar.message(), + // root, + // &chain.slot_clock, + // ); + // + // // Update the head since it's likely this block will become the new + // // head. + // chain.recompute_head_at_current_slot().await; + // + // // Perform some logging to inform users if their blocks are being produced + // // late. + // // + // // Check to see the thresholds are non-zero to avoid logging errors with small + // // slot times (e.g., during testing) + // let crit_threshold = chain.slot_clock.unagg_attestation_production_delay(); + // let error_threshold = crit_threshold / 2; + // if delay >= crit_threshold { + // crit!( + // log, + // "Block was broadcast too late"; + // "msg" => "system may be overloaded, block likely to be orphaned", + // "delay_ms" => delay.as_millis(), + // "slot" => block.slot(), + // "root" => ?root, + // ) + // } else if delay >= error_threshold { + // error!( + // log, + // "Block broadcast was delayed"; + // "msg" => "system may be overloaded, block may be orphaned", + // "delay_ms" => delay.as_millis(), + // "slot" => block.slot(), + // "root" => ?root, + // ) + // } + // + // Ok(()) + // } + // Err(BlockError::BlockIsAlreadyKnown) => { + // info!( + // log, + // "Block from HTTP API already known"; + // "block" => ?block.canonical_root(), + // "slot" => block.slot(), + // ); + // Ok(()) + // } + // Err(BlockError::RepeatProposal { proposer, slot }) => { + // warn!( + // log, + // "Block ignored due to repeat proposal"; + // "msg" => "this can happen when a VC uses fallback BNs. \ + // whilst this is not necessarily an error, it can indicate issues with a BN \ + // or between the VC and BN.", + // "slot" => slot, + // "proposer" => proposer, + // ); + // Ok(()) + // } + // Err(e) => { + // let msg = format!("{:?}", e); + // error!( + // log, + // "Invalid block provided to HTTP API"; + // "reason" => &msg + // ); + // Err(warp_utils::reject::broadcast_without_import(msg)) + // } + // } + Ok(()) +} diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 104ca9ccd40..52e8922ccbe 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -603,6 +603,27 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST beacon/blobs` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn post_beacon_blobs( + &self, + block: &SignedBlobsSidecar, + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blobs"); + + //FIXME(sean) should we re-use the proposal timeout? seems reasonable to.. + self.post_with_timeout(path, block, self.timeouts.proposal) + .await?; + + Ok(()) + } + /// `POST beacon/blinded_blocks` /// /// Returns `Ok(None)` on a 404 error. @@ -1269,6 +1290,32 @@ impl BeaconNodeHttpClient { self.get(path).await } + /// `GET v1/validator/blocks_and_blobs/{slot}` + pub async fn get_validator_blocks_and_blobs>( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + ) -> Result>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("blocks_and_blobs") + .push(&slot.to_string()); + + path.query_pairs_mut() + .append_pair("randao_reveal", &randao_reveal.to_string()); + + if let Some(graffiti) = graffiti { + path.query_pairs_mut() + .append_pair("graffiti", &graffiti.to_string()); + } + + self.get(path).await + } + /// `GET v2/validator/blinded_blocks/{slot}` pub async fn get_validator_blinded_blocks>( &self, diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index e6573580034..2ac4fcf49be 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1110,6 +1110,14 @@ pub struct LivenessResponseData { pub is_live: bool, } +#[derive(PartialEq, Debug, Serialize, Deserialize)] +#[serde(bound = "T: EthSpec, Payload: ExecPayload")] +pub struct BlocksAndBlobs> { + pub block: BeaconBlock, + pub blobs: Vec>, + pub kzg_aggregate_proof: KzgProof, +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/blobs_sidecar.rs b/consensus/types/src/blobs_sidecar.rs index 75100a04180..5003a97a62c 100644 --- a/consensus/types/src/blobs_sidecar.rs +++ b/consensus/types/src/blobs_sidecar.rs @@ -1,5 +1,5 @@ use crate::kzg_proof::KzgProof; -use crate::{Blob, EthSpec, Hash256, Slot}; +use crate::{BeaconBlock, Blob, EthSpec, Hash256, SignedRoot, Slot}; use serde_derive::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -9,14 +9,17 @@ use tree_hash_derive::TreeHash; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq, Default)] -pub struct BlobsSidecar { +#[serde(bound = "T: EthSpec")] +pub struct BlobsSidecar { pub beacon_block_root: Hash256, pub beacon_block_slot: Slot, - pub blobs: VariableList, E::MaxBlobsPerBlock>, + pub blobs: VariableList, T::MaxBlobsPerBlock>, pub kzg_aggregate_proof: KzgProof, } -impl BlobsSidecar { +impl SignedRoot for BlobsSidecar {} + +impl BlobsSidecar { pub fn empty() -> Self { Self::default() } @@ -24,6 +27,6 @@ impl BlobsSidecar { // Fixed part Self::empty().as_ssz_bytes().len() // Max size of variable length `blobs` field - + (E::max_blobs_per_block() * as Encode>::ssz_fixed_len()) + + (T::max_blobs_per_block() * as Encode>::ssz_fixed_len()) } } diff --git a/consensus/types/src/signed_blobs_sidecar.rs b/consensus/types/src/signed_blobs_sidecar.rs index 3e1ee6df803..74a779219a1 100644 --- a/consensus/types/src/signed_blobs_sidecar.rs +++ b/consensus/types/src/signed_blobs_sidecar.rs @@ -8,7 +8,17 @@ use tree_hash_derive::TreeHash; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq)] -pub struct SignedBlobsSidecar { - pub message: BlobsSidecar, +#[serde(bound = "T: EthSpec")] +pub struct SignedBlobsSidecar { + pub message: BlobsSidecar, pub signature: Signature, } + +impl SignedBlobsSidecar { + pub fn from_blob(blob: BlobsSidecar, signature: Signature) -> Self { + Self { + message: blob, + signature, + } + } +} diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index b0b69a4f50d..3ef0c0e25b7 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -6,13 +6,16 @@ use crate::{ }; use crate::{http_metrics::metrics, validator_store::ValidatorStore}; use environment::RuntimeContext; -use eth2::types::Graffiti; +use eth2::types::{Graffiti, VariableList}; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::ops::Deref; use std::sync::Arc; use tokio::sync::mpsc; -use types::{BlindedPayload, BlockType, EthSpec, ExecPayload, FullPayload, PublicKeyBytes, Slot}; +use types::{ + BlindedPayload, BlobsSidecar, BlockType, EthSpec, ExecPayload, ForkName, FullPayload, + PublicKeyBytes, Slot, +}; #[derive(Debug)] pub enum BlockError { @@ -316,126 +319,285 @@ impl BlockService { let proposer_index = self.validator_store.validator_index(&validator_pubkey); let validator_pubkey_ref = &validator_pubkey; - // Request block from first responsive beacon node. - let block = self - .beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - let block = match Payload::block_type() { - BlockType::Full => { - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_GET], - ); - beacon_node - .get_validator_blocks::( - slot, - randao_reveal_ref, - graffiti.as_ref(), - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })? - .data - } - BlockType::Blinded => { - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BLINDED_BEACON_BLOCK_HTTP_GET], - ); - beacon_node - .get_validator_blinded_blocks::( - slot, - randao_reveal_ref, - graffiti.as_ref(), - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })? - .data - } - }; + match self.context.eth2_config.spec.fork_name_at_slot::(slot) { + ForkName::Base | ForkName::Altair | ForkName::Merge => { + // Request block from first responsive beacon node. + let block = self + .beacon_nodes + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + let block = match Payload::block_type() { + BlockType::Full => { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_GET], + ); + beacon_node + .get_validator_blocks::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })? + .data + } + BlockType::Blinded => { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_GET], + ); + beacon_node + .get_validator_blinded_blocks::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })? + .data + } + }; + + if proposer_index != Some(block.proposer_index()) { + return Err(BlockError::Recoverable( + "Proposer index does not match block proposer. Beacon chain re-orged" + .to_string(), + )); + } + + Ok::<_, BlockError>(block) + }, + ) + .await?; + + let signed_block = self_ref + .validator_store + .sign_block::(*validator_pubkey_ref, block, current_slot) + .await + .map_err(|e| { + BlockError::Recoverable(format!("Unable to sign block: {:?}", e)) + })?; + + // Publish block with first available beacon node. + self.beacon_nodes + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async { + match Payload::block_type() { + BlockType::Full => { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_POST], + ); + beacon_node + .post_beacon_blocks(&signed_block) + .await + .map_err(|e| { + BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {:?}", + e + )) + })? + } + BlockType::Blinded => { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], + ); + beacon_node + .post_beacon_blinded_blocks(&signed_block) + .await + .map_err(|e| { + BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {:?}", + e + )) + })? + } + } + Ok::<_, BlockError>(()) + }, + ) + .await?; + + info!( + log, + "Successfully published block"; + "block_type" => ?Payload::block_type(), + "deposits" => signed_block.message().body().deposits().len(), + "attestations" => signed_block.message().body().attestations().len(), + "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), + "slot" => signed_block.slot().as_u64(), + ); + } + ForkName::Eip4844 => { + if matches!(Payload::block_type(), BlockType::Blinded) { + //FIXME(sean) + crit!( + log, + "`--builder-payloads` not yet supported for EIP-4844 fork" + ); + return Ok(()); + } - if proposer_index != Some(block.proposer_index()) { - return Err(BlockError::Recoverable( - "Proposer index does not match block proposer. Beacon chain re-orged" - .to_string(), - )); - } + // Request block from first responsive beacon node. + let block_and_blobs = self + .beacon_nodes + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_GET], + ); + let block_and_blobs = beacon_node + .get_validator_blocks_and_blobs::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })? + .data; + + if proposer_index != Some(block_and_blobs.block.proposer_index()) { + return Err(BlockError::Recoverable( + "Proposer index does not match block proposer. Beacon chain re-orged" + .to_string(), + )); + } + + Ok::<_, BlockError>(block_and_blobs) + }, + ) + .await?; + + let blobs_sidecar = BlobsSidecar { + beacon_block_root: block_and_blobs.block.canonical_root(), + beacon_block_slot: block_and_blobs.block.slot(), + blobs: VariableList::from(block_and_blobs.blobs), + kzg_aggregate_proof: block_and_blobs.kzg_aggregate_proof, + }; + + let block = block_and_blobs.block; + let block_publish_future = async { + let signed_block = self_ref + .validator_store + .sign_block::(*validator_pubkey_ref, block, current_slot) + .await + .map_err(|e| { + BlockError::Recoverable(format!("Unable to sign block: {:?}", e)) + })?; + + // Publish block with first available beacon node. + self.beacon_nodes + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_POST], + ); + beacon_node + .post_beacon_blocks(&signed_block) + .await + .map_err(|e| { + BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {:?}", + e + )) + })?; + Ok::<_, BlockError>(()) + }, + ) + .await?; + + info!( + log, + "Successfully published block"; + "block_type" => ?Payload::block_type(), + "deposits" => signed_block.message().body().deposits().len(), + "attestations" => signed_block.message().body().attestations().len(), + "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), + "slot" => signed_block.slot().as_u64(), + ); - Ok::<_, BlockError>(block) - }, - ) - .await?; + Ok::<_, BlockError>(()) + }; + + let blob_publish_future = async { + let signed_blobs = self_ref + .validator_store + .sign_blobs(*validator_pubkey_ref, blobs_sidecar, current_slot) + .await + .map_err(|e| { + BlockError::Recoverable(format!("Unable to sign blob: {:?}", e)) + })?; + + // Publish block with first available beacon node. + self.beacon_nodes + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOB_HTTP_POST], + ); + beacon_node.post_beacon_blobs(&signed_blobs).await.map_err( + |e| { + BlockError::Irrecoverable(format!( + "Error from beacon node when publishing blob: {:?}", + e + )) + }, + )?; + Ok::<_, BlockError>(()) + }, + ) + .await?; + + info!( + log, + "Successfully published blobs"; + "block_type" => ?Payload::block_type(), + "slot" => signed_blobs.message.beacon_block_slot.as_u64(), + "block_root" => ?signed_blobs.message.beacon_block_root, + "blobs_len" => signed_blobs.message.blobs.len(), + ); - let signed_block = self_ref - .validator_store - .sign_block::(*validator_pubkey_ref, block, current_slot) - .await - .map_err(|e| BlockError::Recoverable(format!("Unable to sign block: {:?}", e)))?; - - // Publish block with first available beacon node. - self.beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async { - match Payload::block_type() { - BlockType::Full => { - let _post_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_POST], - ); - beacon_node - .post_beacon_blocks(&signed_block) - .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })? - } - BlockType::Blinded => { - let _post_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], - ); - beacon_node - .post_beacon_blinded_blocks(&signed_block) - .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })? - } - } Ok::<_, BlockError>(()) - }, - ) - .await?; + }; + + let (res_block, res_blob) = tokio::join!(block_publish_future, blob_publish_future); + + res_block?; + res_blob?; + } + } - info!( - log, - "Successfully published block"; - "block_type" => ?Payload::block_type(), - "deposits" => signed_block.message().body().deposits().len(), - "attestations" => signed_block.message().body().attestations().len(), - "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), - "slot" => signed_block.slot().as_u64(), - ); Ok(()) } } diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 146d008a575..cc71196f4c1 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -13,6 +13,7 @@ pub const BEACON_BLOCK: &str = "beacon_block"; pub const BEACON_BLOCK_HTTP_GET: &str = "beacon_block_http_get"; pub const BLINDED_BEACON_BLOCK_HTTP_GET: &str = "blinded_beacon_block_http_get"; pub const BEACON_BLOCK_HTTP_POST: &str = "beacon_block_http_post"; +pub const BEACON_BLOB_HTTP_POST: &str = "beacon_blob_http_post"; pub const BLINDED_BEACON_BLOCK_HTTP_POST: &str = "blinded_beacon_block_http_post"; pub const ATTESTATIONS: &str = "attestations"; pub const ATTESTATIONS_HTTP_GET: &str = "attestations_http_get"; @@ -57,6 +58,11 @@ lazy_static::lazy_static! { "Total count of attempted block signings", &["status"] ); + pub static ref SIGNED_BLOBS_TOTAL: Result = try_create_int_counter_vec( + "vc_signed_beacon_blobs_total", + "Total count of attempted blob signings", + &["status"] + ); pub static ref SIGNED_ATTESTATIONS_TOTAL: Result = try_create_int_counter_vec( "vc_signed_attestations_total", "Total count of attempted Attestation signings", diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index de69d990033..36467bd1782 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -37,6 +37,7 @@ pub enum Error { pub enum SignableMessage<'a, T: EthSpec, Payload: ExecPayload = FullPayload> { RandaoReveal(Epoch), BeaconBlock(&'a BeaconBlock), + BlobsSidecar(&'a BlobsSidecar), AttestationData(&'a AttestationData), SignedAggregateAndProof(&'a AggregateAndProof), SelectionProof(Slot), @@ -58,6 +59,7 @@ impl<'a, T: EthSpec, Payload: ExecPayload> SignableMessage<'a, T, Payload> { match self { SignableMessage::RandaoReveal(epoch) => epoch.signing_root(domain), SignableMessage::BeaconBlock(b) => b.signing_root(domain), + SignableMessage::BlobsSidecar(b) => b.signing_root(domain), SignableMessage::AttestationData(a) => a.signing_root(domain), SignableMessage::SignedAggregateAndProof(a) => a.signing_root(domain), SignableMessage::SelectionProof(slot) => slot.signing_root(domain), @@ -180,6 +182,7 @@ impl SigningMethod { Web3SignerObject::RandaoReveal { epoch } } SignableMessage::BeaconBlock(block) => Web3SignerObject::beacon_block(block)?, + SignableMessage::BlobsSidecar(blob) => Web3SignerObject::BlobsSidecar(blob), SignableMessage::AttestationData(a) => Web3SignerObject::Attestation(a), SignableMessage::SignedAggregateAndProof(a) => { Web3SignerObject::AggregateAndProof(a) diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 0de260ecfcf..6668badb923 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -11,6 +11,7 @@ pub enum MessageType { AggregateAndProof, Attestation, BlockV2, + BlobsSidecar, Deposit, RandaoReveal, VoluntaryExit, @@ -50,6 +51,8 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: ExecPayload> { #[serde(skip_serializing_if = "Option::is_none")] block_header: Option, }, + //FIXME(sean) just guessing here + BlobsSidecar(&'a BlobsSidecar), #[allow(dead_code)] Deposit { pubkey: PublicKeyBytes, @@ -105,6 +108,7 @@ impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { Web3SignerObject::AggregateAndProof(_) => MessageType::AggregateAndProof, Web3SignerObject::Attestation(_) => MessageType::Attestation, Web3SignerObject::BeaconBlock { .. } => MessageType::BlockV2, + Web3SignerObject::BlobsSidecar(_) => MessageType::BlobsSidecar, Web3SignerObject::Deposit { .. } => MessageType::Deposit, Web3SignerObject::RandaoReveal { .. } => MessageType::RandaoReveal, Web3SignerObject::VoluntaryExit(_) => MessageType::VoluntaryExit, diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 292b49ac3a5..389bbb8005e 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -19,11 +19,12 @@ use std::sync::Arc; use task_executor::TaskExecutor; use types::{ attestation::Error as AttestationError, graffiti::GraffitiString, Address, AggregateAndProof, - Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, Domain, Epoch, - EthSpec, ExecPayload, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof, - Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot, - SignedValidatorRegistrationData, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, - SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, + Attestation, BeaconBlock, BlindedPayload, BlobsSidecar, ChainSpec, ContributionAndProof, + Domain, Epoch, EthSpec, ExecPayload, Fork, FullPayload, Graffiti, Hash256, Keypair, + PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, + SignedBlobsSidecar, SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData, + Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, + SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, }; use validator_dir::ValidatorDir; @@ -531,6 +532,42 @@ impl ValidatorStore { } } + pub async fn sign_blobs( + &self, + validator_pubkey: PublicKeyBytes, + blobs_sidecar: BlobsSidecar, + current_slot: Slot, + ) -> Result, Error> { + let slot = blobs_sidecar.beacon_block_slot; + + // Make sure the blob slot is not higher than the current slot to avoid potential attacks. + if slot > current_slot { + warn!( + self.log, + "Not signing blob with slot greater than current slot"; + "blob_slot" => slot.as_u64(), + "current_slot" => current_slot.as_u64() + ); + return Err(Error::GreaterThanCurrentSlot { slot, current_slot }); + } + + let signing_epoch = slot.epoch(E::slots_per_epoch()); + let signing_context = self.signing_context(Domain::BlobsSideCar, signing_epoch); + + metrics::inc_counter_vec(&metrics::SIGNED_BLOBS_TOTAL, &[metrics::SUCCESS]); + + let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?; + let signature = signing_method + .get_signature::>( + SignableMessage::BlobsSidecar(&blobs_sidecar), + signing_context, + &self.spec, + &self.task_executor, + ) + .await?; + Ok(SignedBlobsSidecar::from_blob(blobs_sidecar, signature)) + } + pub async fn sign_attestation( &self, validator_pubkey: PublicKeyBytes, From 44515b8cbeb5f0733518d95a1fbe783df9212c13 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Wed, 5 Oct 2022 17:20:54 -0400 Subject: [PATCH 047/263] cargo fix --- beacon_node/beacon_chain/src/blob_verification.rs | 7 +++---- beacon_node/beacon_chain/src/execution_payload.rs | 3 +-- beacon_node/http_api/src/publish_blobs.rs | 11 ++++------- .../src/beacon_processor/worker/gossip_methods.rs | 6 +++--- consensus/types/src/blobs_sidecar.rs | 2 +- 5 files changed, 12 insertions(+), 17 deletions(-) diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 90709d58195..dd2d1badced 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -2,11 +2,10 @@ use derivative::Derivative; use slot_clock::SlotClock; use crate::beacon_chain::{BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; -use crate::{block_verification::get_validator_pubkey_cache, BeaconChainError}; +use crate::{BeaconChainError}; use bls::PublicKey; -use std::sync::Arc; use types::{ - consts::eip4844::BLS_MODULUS, BeaconStateError, BlobsSidecar, EthSpec, Hash256, + consts::eip4844::BLS_MODULUS, BeaconStateError, Hash256, SignedBlobsSidecar, Slot, }; @@ -105,7 +104,7 @@ impl<'a, T: BeaconChainTypes> VerifiedBlobsSidecar<'a, T> { chain: &BeaconChain, ) -> Result { let blob_slot = blob_sidecar.message.beacon_block_slot; - let blob_root = blob_sidecar.message.beacon_block_root; + let _blob_root = blob_sidecar.message.beacon_block_root; // Do not gossip or process blobs from future or past slots. let latest_permissible_slot = chain .slot_clock diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 642fae5285e..7684343300c 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -12,7 +12,6 @@ use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, }; -use execution_layer::json_structures::JsonBlobBundlesV1; use execution_layer::{BuilderParams, PayloadStatus}; use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; @@ -26,7 +25,7 @@ use std::sync::Arc; use tokio::task::JoinHandle; use tree_hash::TreeHash; use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, Blob, BlobsSidecar, EthSpec, ExecPayload, + BeaconBlockRef, BeaconState, BeaconStateError, Blob, EthSpec, ExecPayload, ExecutionBlockHash, Hash256, KzgCommitment, SignedBeaconBlock, Slot, }; diff --git a/beacon_node/http_api/src/publish_blobs.rs b/beacon_node/http_api/src/publish_blobs.rs index 41d76c61cd8..5748ab6960f 100644 --- a/beacon_node/http_api/src/publish_blobs.rs +++ b/beacon_node/http_api/src/publish_blobs.rs @@ -1,16 +1,13 @@ use crate::metrics; -use beacon_chain::validator_monitor::{get_block_delay_ms, get_slot_delay_ms, timestamp_now}; -use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, CountUnrealized}; +use beacon_chain::validator_monitor::{get_slot_delay_ms, timestamp_now}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::PubsubMessage; use network::NetworkMessage; -use slog::{crit, error, info, warn, Logger}; -use slot_clock::SlotClock; +use slog::{Logger}; use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; -use tree_hash::TreeHash; use types::{ - BlindedPayload, ExecPayload, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, - SignedBeaconBlock, SignedBlobsSidecar, + SignedBlobsSidecar, }; use warp::Rejection; diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index b59537a1d28..2cabd1ba37d 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -2245,10 +2245,10 @@ impl Worker { BlobError::PastSlot { .. } => { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } - BlobError::BeaconChainError(e) => { + BlobError::BeaconChainError(_e) => { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } - BlobError::BlobOutOfRange { blob_index } => { + BlobError::BlobOutOfRange { blob_index: _ } => { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); } BlobError::InvalidKZGCommitment => { @@ -2257,7 +2257,7 @@ impl Worker { BlobError::ProposalSignatureInvalid => { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); } - BlobError::RepeatSidecar { proposer, slot } => { + BlobError::RepeatSidecar { proposer: _, slot: _ } => { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } BlobError::UnknownHeadBlock { beacon_block_root } => { diff --git a/consensus/types/src/blobs_sidecar.rs b/consensus/types/src/blobs_sidecar.rs index 5003a97a62c..4e9174d94cc 100644 --- a/consensus/types/src/blobs_sidecar.rs +++ b/consensus/types/src/blobs_sidecar.rs @@ -1,5 +1,5 @@ use crate::kzg_proof::KzgProof; -use crate::{BeaconBlock, Blob, EthSpec, Hash256, SignedRoot, Slot}; +use crate::{Blob, EthSpec, Hash256, SignedRoot, Slot}; use serde_derive::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; From 1430b561c37adb44d5705005de6bf633deb8c16d Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Thu, 6 Oct 2022 21:16:57 -0500 Subject: [PATCH 048/263] Add more gossip verification conditions --- .../beacon_chain/src/blob_verification.rs | 102 ++++++++++++------ consensus/types/src/signed_blobs_sidecar.rs | 38 ++++++- 2 files changed, 103 insertions(+), 37 deletions(-) diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index dd2d1badced..be9b0effc17 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -1,13 +1,13 @@ use derivative::Derivative; use slot_clock::SlotClock; -use crate::beacon_chain::{BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; -use crate::{BeaconChainError}; -use bls::PublicKey; -use types::{ - consts::eip4844::BLS_MODULUS, BeaconStateError, Hash256, - SignedBlobsSidecar, Slot, +use crate::beacon_chain::{ + BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY, + VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, }; +use crate::BeaconChainError; +use bls::PublicKey; +use types::{consts::eip4844::BLS_MODULUS, BeaconStateError, Hash256, SignedBlobsSidecar, Slot}; pub enum BlobError { /// The blob sidecar is from a slot that is later than the current slot (with respect to the @@ -69,6 +69,13 @@ pub enum BlobError { /// is valid or not. UnknownHeadBlock { beacon_block_root: Hash256 }, + /// The proposal_index corresponding to blob.beacon_block_root is not known. + /// + /// ## Peer scoring + /// + /// The block is invalid and the peer is faulty. + UnknownValidator(u64), + /// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid. /// /// ## Peer scoring @@ -103,17 +110,17 @@ impl<'a, T: BeaconChainTypes> VerifiedBlobsSidecar<'a, T> { blob_sidecar: &'a SignedBlobsSidecar, chain: &BeaconChain, ) -> Result { - let blob_slot = blob_sidecar.message.beacon_block_slot; - let _blob_root = blob_sidecar.message.beacon_block_root; + let block_slot = blob_sidecar.message.beacon_block_slot; + let block_root = blob_sidecar.message.beacon_block_root; // Do not gossip or process blobs from future or past slots. let latest_permissible_slot = chain .slot_clock .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .ok_or(BeaconChainError::UnableToReadSlot)?; - if blob_slot > latest_permissible_slot { + if block_slot > latest_permissible_slot { return Err(BlobError::FutureSlot { message_slot: latest_permissible_slot, - latest_permissible_slot: blob_slot, + latest_permissible_slot: block_slot, }); } @@ -124,10 +131,10 @@ impl<'a, T: BeaconChainTypes> VerifiedBlobsSidecar<'a, T> { .slot_clock .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .ok_or(BeaconChainError::UnableToReadSlot)?; - if blob_slot > earliest_permissible_slot { + if block_slot > earliest_permissible_slot { return Err(BlobError::PastSlot { message_slot: earliest_permissible_slot, - earliest_permissible_slot: blob_slot, + earliest_permissible_slot: block_slot, }); } @@ -140,34 +147,59 @@ impl<'a, T: BeaconChainTypes> VerifiedBlobsSidecar<'a, T> { } // Verify that the KZG proof is a valid G1 point + // TODO(pawan): KZG commitment can also be point at infinity, use a different check + // (bls.KeyValidate) if PublicKey::deserialize(&blob_sidecar.message.kzg_aggregate_proof.0).is_err() { return Err(BlobError::InvalidKZGCommitment); } - // TODO: Verify proposer signature - - // // let state = /* Get a valid state */ - // let proposer_index = state.get_beacon_proposer_index(blob_slot, &chain.spec)? as u64; - // let signature_is_valid = { - // let pubkey_cache = get_validator_pubkey_cache(chain)?; - // let pubkey = pubkey_cache - // .get(proposer_index as usize) - // .ok_or_else(|| BlobError::UnknownValidator(proposer_index)?; - // blob.verify_signature( - // Some(block_root), - // pubkey, - // &fork, - // chain.genesis_validators_root, - // &chain.spec, - // ) - // }; - - // if !signature_is_valid { - // return Err(BlobError::ProposalSignatureInvalid); - // } - - // TODO: Check that we have not already received a sidecar with a valid signature for this slot. + let proposer_shuffling_root = chain + .canonical_head + .cached_head() + .snapshot + .beacon_state + .proposer_shuffling_decision_root(block_root)?; + + let (proposer_index, fork) = match chain + .beacon_proposer_cache + .lock() + .get_slot::(proposer_shuffling_root, block_slot) + { + Some(proposer) => (proposer.index, proposer.fork), + None => { + let state = &chain.canonical_head.cached_head().snapshot.beacon_state; + ( + state.get_beacon_proposer_index(block_slot, &chain.spec)?, + state.fork(), + ) + } + }; + let signature_is_valid = { + let pubkey_cache = chain + .validator_pubkey_cache + .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) + .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout) + .map_err(BlobError::BeaconChainError)?; + + let pubkey = pubkey_cache + .get(proposer_index as usize) + .ok_or_else(|| BlobError::UnknownValidator(proposer_index as u64))?; + + blob_sidecar.verify_signature( + None, + pubkey, + &fork, + chain.genesis_validators_root, + &chain.spec, + ) + }; + + if !signature_is_valid { + return Err(BlobError::ProposalSignatureInvalid); + } + // TODO(pawan): Check that we have not already received a sidecar with a valid signature for this slot. + // TODO(pawan): check if block hash is already known Ok(Self { blob_sidecar }) } } diff --git a/consensus/types/src/signed_blobs_sidecar.rs b/consensus/types/src/signed_blobs_sidecar.rs index 74a779219a1..677b95bd389 100644 --- a/consensus/types/src/signed_blobs_sidecar.rs +++ b/consensus/types/src/signed_blobs_sidecar.rs @@ -1,7 +1,9 @@ -use crate::{BlobsSidecar, EthSpec}; +use crate::{ + signing_data::SignedRoot, BlobsSidecar, ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, + SigningData, +}; use bls::Signature; use serde_derive::{Deserialize, Serialize}; -use ssz::Encode; use ssz_derive::{Decode, Encode}; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; @@ -21,4 +23,36 @@ impl SignedBlobsSidecar { signature, } } + + /// Verify `self.signature`. + /// + /// If the root of `blob_sidecar.message` is already known it can be passed in via `object_root_opt`. + /// Otherwise, it will be computed locally. + pub fn verify_signature( + &self, + object_root_opt: Option, + pubkey: &PublicKey, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> bool { + let domain = spec.get_domain( + self.message.beacon_block_slot.epoch(T::slots_per_epoch()), + Domain::BlobsSideCar, + fork, + genesis_validators_root, + ); + + let message = if let Some(object_root) = object_root_opt { + SigningData { + object_root, + domain, + } + .tree_hash_root() + } else { + self.message.signing_root(domain) + }; + + self.signature.verify(pubkey, message) + } } From 255fdf072467fdc0afa2a7566b85afb4b6346d7f Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Thu, 13 Oct 2022 09:37:20 -0500 Subject: [PATCH 049/263] Added Capella Data Structures to consensus/types (#3637) * Ran Cargo fmt * Added Capella Data Structures to consensus/types --- Cargo.lock | 20 +- .../beacon_chain/src/execution_payload.rs | 4 +- beacon_node/http_api/src/publish_blobs.rs | 6 +- .../beacon_processor/worker/gossip_methods.rs | 5 +- consensus/types/Cargo.toml | 2 +- consensus/types/src/beacon_block.rs | 35 +- consensus/types/src/beacon_block_body.rs | 138 ++- consensus/types/src/beacon_state.rs | 59 +- consensus/types/src/builder_bid.rs | 15 +- consensus/types/src/chain_spec.rs | 76 ++ consensus/types/src/eth_spec.rs | 41 + consensus/types/src/execution_payload.rs | 68 +- .../types/src/execution_payload_header.rs | 123 ++- consensus/types/src/fork_name.rs | 25 +- consensus/types/src/lib.rs | 28 +- consensus/types/src/payload.rs | 993 ++++++++++++++++-- consensus/types/src/signed_beacon_block.rs | 97 +- consensus/types/src/withdrawal.rs | 27 + 18 files changed, 1592 insertions(+), 170 deletions(-) create mode 100644 consensus/types/src/withdrawal.rs diff --git a/Cargo.lock b/Cargo.lock index 4dfd070f4dd..653e0fc3d2f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -428,7 +428,7 @@ dependencies = [ "state_processing", "store", "strum", - "superstruct", + "superstruct 0.5.0", "task_executor", "tempfile", "tokio", @@ -3692,7 +3692,7 @@ dependencies = [ "smallvec", "snap", "strum", - "superstruct", + "superstruct 0.5.0", "task_executor", "tempfile", "tiny-keccak", @@ -6395,6 +6395,20 @@ dependencies = [ "syn", ] +[[package]] +name = "superstruct" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b9e5728aa1a87141cefd4e7509903fc01fa0dcb108022b1e841a67c5159fc5" +dependencies = [ + "darling", + "itertools", + "proc-macro2", + "quote", + "smallvec", + "syn", +] + [[package]] name = "swap_or_not_shuffle" version = "0.2.0" @@ -7125,7 +7139,7 @@ dependencies = [ "slog", "smallvec", "state_processing", - "superstruct", + "superstruct 0.6.0", "swap_or_not_shuffle", "tempfile", "test_random_derive", diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 7684343300c..16c758b377f 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -25,8 +25,8 @@ use std::sync::Arc; use tokio::task::JoinHandle; use tree_hash::TreeHash; use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, Blob, EthSpec, ExecPayload, - ExecutionBlockHash, Hash256, KzgCommitment, SignedBeaconBlock, Slot, + BeaconBlockRef, BeaconState, BeaconStateError, Blob, EthSpec, ExecPayload, ExecutionBlockHash, + Hash256, KzgCommitment, SignedBeaconBlock, Slot, }; pub type PreparePayloadResult = diff --git a/beacon_node/http_api/src/publish_blobs.rs b/beacon_node/http_api/src/publish_blobs.rs index 5748ab6960f..cf08ac2d2b4 100644 --- a/beacon_node/http_api/src/publish_blobs.rs +++ b/beacon_node/http_api/src/publish_blobs.rs @@ -3,12 +3,10 @@ use beacon_chain::validator_monitor::{get_slot_delay_ms, timestamp_now}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::PubsubMessage; use network::NetworkMessage; -use slog::{Logger}; +use slog::Logger; use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; -use types::{ - SignedBlobsSidecar, -}; +use types::SignedBlobsSidecar; use warp::Rejection; /// Handles a request from the HTTP API for full blocks. diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 2cabd1ba37d..debfdff3d71 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -2257,7 +2257,10 @@ impl Worker { BlobError::ProposalSignatureInvalid => { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); } - BlobError::RepeatSidecar { proposer: _, slot: _ } => { + BlobError::RepeatSidecar { + proposer: _, + slot: _, + } => { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } BlobError::UnknownHeadBlock { beacon_block_root } => { diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 397d916dce3..b4164021d95 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -44,7 +44,7 @@ regex = "1.5.5" lazy_static = "1.4.0" parking_lot = "0.12.0" itertools = "0.10.0" -superstruct = "0.5.0" +superstruct = "0.6.0" serde_json = "1.0.74" smallvec = "1.8.0" serde_with = "1.13.0" diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 44f4fd22924..d58e890c60b 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -17,7 +17,7 @@ use tree_hash_derive::TreeHash; /// A block of the `BeaconChain`. #[superstruct( - variants(Base, Altair, Merge, Eip4844), + variants(Base, Altair, Merge, Capella, Eip4844), variant_attributes( derive( Debug, @@ -48,7 +48,7 @@ use tree_hash_derive::TreeHash; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] -pub struct BeaconBlock = FullPayload> { +pub struct BeaconBlock = FullPayload> { #[superstruct(getter(copy))] pub slot: Slot, #[superstruct(getter(copy))] @@ -64,16 +64,22 @@ pub struct BeaconBlock = FullPayload> { pub body: BeaconBlockBodyAltair, #[superstruct(only(Merge), partial_getter(rename = "body_merge"))] pub body: BeaconBlockBodyMerge, + #[superstruct(only(Capella), partial_getter(rename = "body_capella"))] + pub body: BeaconBlockBodyCapella, #[superstruct(only(Eip4844), partial_getter(rename = "body_eip4844"))] pub body: BeaconBlockBodyEip4844, } pub type BlindedBeaconBlock = BeaconBlock>; -impl> SignedRoot for BeaconBlock {} -impl<'a, T: EthSpec, Payload: ExecPayload> SignedRoot for BeaconBlockRef<'a, T, Payload> {} +impl> SignedRoot for BeaconBlock {} +impl<'a, T: EthSpec, Payload: AbstractExecPayload> SignedRoot + for BeaconBlockRef<'a, T, Payload> +{ +} -impl> BeaconBlock { +impl> BeaconBlock { + // FIXME: deal with capella / eip4844 forks here as well /// Returns an empty block to be used during genesis. pub fn empty(spec: &ChainSpec) -> Self { if spec.bellatrix_fork_epoch == Some(T::genesis_epoch()) { @@ -180,7 +186,7 @@ impl> BeaconBlock { } } -impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRef<'a, T, Payload> { +impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockRef<'a, T, Payload> { /// Returns the name of the fork pertaining to `self`. /// /// Will return an `Err` if `self` has been instantiated to a variant conflicting with the fork @@ -191,6 +197,7 @@ impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRef<'a, T, Payload> { BeaconBlockRef::Base { .. } => ForkName::Base, BeaconBlockRef::Altair { .. } => ForkName::Altair, BeaconBlockRef::Merge { .. } => ForkName::Merge, + BeaconBlockRef::Capella { .. } => ForkName::Capella, BeaconBlockRef::Eip4844 { .. } => ForkName::Eip4844, }; @@ -245,12 +252,12 @@ impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRef<'a, T, Payload> { /// Extracts a reference to an execution payload from a block, returning an error if the block /// is pre-merge. - pub fn execution_payload(&self) -> Result<&Payload, Error> { + pub fn execution_payload(&self) -> Result, Error> { self.body().execution_payload() } } -impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRefMut<'a, T, Payload> { +impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockRefMut<'a, T, Payload> { /// Convert a mutable reference to a beacon block to a mutable ref to its body. pub fn body_mut(self) -> BeaconBlockBodyRefMut<'a, T, Payload> { map_beacon_block_ref_mut_into_beacon_block_body_ref_mut!(&'a _, self, |block, cons| cons( @@ -259,7 +266,7 @@ impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRefMut<'a, T, Payload> } } -impl> BeaconBlockBase { +impl> BeaconBlockBase { /// Returns an empty block to be used during genesis. pub fn empty(spec: &ChainSpec) -> Self { BeaconBlockBase { @@ -380,7 +387,7 @@ impl> BeaconBlockBase { } } -impl> BeaconBlockAltair { +impl> BeaconBlockAltair { /// Returns an empty Altair block to be used during genesis. pub fn empty(spec: &ChainSpec) -> Self { BeaconBlockAltair { @@ -439,7 +446,7 @@ impl> BeaconBlockAltair { } } -impl> BeaconBlockMerge { +impl> BeaconBlockMerge { /// Returns an empty Merge block to be used during genesis. pub fn empty(spec: &ChainSpec) -> Self { BeaconBlockMerge { @@ -461,7 +468,7 @@ impl> BeaconBlockMerge { deposits: VariableList::empty(), voluntary_exits: VariableList::empty(), sync_aggregate: SyncAggregate::empty(), - execution_payload: Payload::default(), + execution_payload: Payload::Merge::default(), }, } } @@ -536,7 +543,7 @@ macro_rules! impl_from { parent_root, state_root, body, - }, payload) + }, payload.map(Into::into)) } } } @@ -545,6 +552,7 @@ macro_rules! impl_from { impl_from!(BeaconBlockBase, >, >, |body: BeaconBlockBodyBase<_, _>| body.into()); impl_from!(BeaconBlockAltair, >, >, |body: BeaconBlockBodyAltair<_, _>| body.into()); impl_from!(BeaconBlockMerge, >, >, |body: BeaconBlockBodyMerge<_, _>| body.into()); +impl_from!(BeaconBlockCapella, >, >, |body: BeaconBlockBodyCapella<_, _>| body.into()); impl_from!(BeaconBlockEip4844, >, >, |body: BeaconBlockBodyEip4844<_, _>| body.into()); // We can clone blocks with payloads to blocks without payloads, without cloning the payload. @@ -576,6 +584,7 @@ macro_rules! impl_clone_as_blinded { impl_clone_as_blinded!(BeaconBlockBase, >, >); impl_clone_as_blinded!(BeaconBlockAltair, >, >); impl_clone_as_blinded!(BeaconBlockMerge, >, >); +impl_clone_as_blinded!(BeaconBlockCapella, >, >); impl_clone_as_blinded!(BeaconBlockEip4844, >, >); // A reference to a full beacon block can be cloned into a blinded beacon block, without cloning the diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 61bf56f323e..36e0ce77004 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -14,7 +14,7 @@ use tree_hash_derive::TreeHash; /// /// This *superstruct* abstracts over the hard-fork. #[superstruct( - variants(Base, Altair, Merge, Eip4844), + variants(Base, Altair, Merge, Capella, Eip4844), variant_attributes( derive( Debug, @@ -39,7 +39,7 @@ use tree_hash_derive::TreeHash; #[serde(untagged)] #[serde(bound = "T: EthSpec, Payload: ExecPayload")] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -pub struct BeaconBlockBody = FullPayload> { +pub struct BeaconBlockBody = FullPayload> { pub randao_reveal: Signature, pub eth1_data: Eth1Data, pub graffiti: Graffiti, @@ -48,14 +48,20 @@ pub struct BeaconBlockBody = FullPayload> pub attestations: VariableList, T::MaxAttestations>, pub deposits: VariableList, pub voluntary_exits: VariableList, - #[superstruct(only(Altair, Merge, Eip4844))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub sync_aggregate: SyncAggregate, // We flatten the execution payload so that serde can use the name of the inner type, // either `execution_payload` for full payloads, or `execution_payload_header` for blinded // payloads. - #[superstruct(only(Merge, Eip4844))] + #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] #[serde(flatten)] - pub execution_payload: Payload, + pub execution_payload: Payload::Merge, + #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] + #[serde(flatten)] + pub execution_payload: Payload::Capella, + #[superstruct(only(Eip4844), partial_getter(rename = "execution_payload_eip4844"))] + #[serde(flatten)] + pub execution_payload: Payload::Eip4844, #[superstruct(only(Eip4844))] pub blob_kzg_commitments: VariableList, #[superstruct(only(Base, Altair))] @@ -65,6 +71,23 @@ pub struct BeaconBlockBody = FullPayload> pub _phantom: PhantomData, } +impl> BeaconBlockBody { + pub fn execution_payload<'a>(&'a self) -> Result, Error> { + self.to_ref().execution_payload() + } +} + +impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, T, Payload> { + pub fn execution_payload(&self) -> Result, Error> { + match self { + Self::Base(_) | Self::Altair(_) => Err(Error::IncorrectStateVariant), + Self::Merge(body) => Ok(Payload::Ref::from(&body.execution_payload)), + Self::Capella(body) => Ok(Payload::Ref::from(&body.execution_payload)), + Self::Eip4844(body) => Ok(Payload::Ref::from(&body.execution_payload)), + } + } +} + impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { /// Get the fork_name of this object pub fn fork_name(self) -> ForkName { @@ -72,6 +95,7 @@ impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { BeaconBlockBodyRef::Base { .. } => ForkName::Base, BeaconBlockBodyRef::Altair { .. } => ForkName::Altair, BeaconBlockBodyRef::Merge { .. } => ForkName::Merge, + BeaconBlockBodyRef::Capella { .. } => ForkName::Capella, BeaconBlockBodyRef::Eip4844 { .. } => ForkName::Eip4844, } } @@ -218,7 +242,7 @@ impl From>> impl From>> for ( BeaconBlockBodyMerge>, - Option>, + Option>, ) { fn from(body: BeaconBlockBodyMerge>) -> Self { @@ -232,7 +256,7 @@ impl From>> deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload }, + execution_payload: FullPayloadMerge { execution_payload }, } = body; ( @@ -246,8 +270,48 @@ impl From>> deposits, voluntary_exits, sync_aggregate, - execution_payload: BlindedPayload { - execution_payload_header: From::from(&execution_payload), + execution_payload: BlindedPayloadMerge { + execution_payload_header: From::from(execution_payload.clone()), + }, + }, + Some(execution_payload), + ) + } +} + +impl From>> + for ( + BeaconBlockBodyCapella>, + Option>, + ) +{ + fn from(body: BeaconBlockBodyCapella>) -> Self { + let BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadCapella { execution_payload }, + } = body; + + ( + BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayloadCapella { + execution_payload_header: From::from(execution_payload.clone()), }, }, Some(execution_payload), @@ -258,7 +322,7 @@ impl From>> impl From>> for ( BeaconBlockBodyEip4844>, - Option>, + Option>, ) { fn from(body: BeaconBlockBodyEip4844>) -> Self { @@ -272,7 +336,7 @@ impl From>> deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload }, + execution_payload: FullPayloadEip4844 { execution_payload }, blob_kzg_commitments, } = body; @@ -287,12 +351,12 @@ impl From>> deposits, voluntary_exits, sync_aggregate, - execution_payload: BlindedPayload { - execution_payload_header: From::from(&execution_payload), + execution_payload: BlindedPayloadEip4844 { + execution_payload_header: From::from(execution_payload.clone()), }, blob_kzg_commitments, }, - None, + Some(execution_payload), ) } } @@ -324,7 +388,7 @@ impl BeaconBlockBodyMerge> { deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload }, + execution_payload: FullPayloadMerge { execution_payload }, } = self; BeaconBlockBodyMerge { @@ -337,8 +401,40 @@ impl BeaconBlockBodyMerge> { deposits: deposits.clone(), voluntary_exits: voluntary_exits.clone(), sync_aggregate: sync_aggregate.clone(), - execution_payload: BlindedPayload { - execution_payload_header: From::from(execution_payload), + execution_payload: BlindedPayloadMerge { + execution_payload_header: From::from(execution_payload.clone()), + }, + } + } +} + +impl BeaconBlockBodyCapella> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyCapella> { + let BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadCapella { execution_payload }, + } = self; + + BeaconBlockBodyCapella { + randao_reveal: randao_reveal.clone(), + eth1_data: eth1_data.clone(), + graffiti: *graffiti, + proposer_slashings: proposer_slashings.clone(), + attester_slashings: attester_slashings.clone(), + attestations: attestations.clone(), + deposits: deposits.clone(), + voluntary_exits: voluntary_exits.clone(), + sync_aggregate: sync_aggregate.clone(), + execution_payload: BlindedPayloadCapella { + execution_payload_header: From::from(execution_payload.clone()), }, } } @@ -356,7 +452,7 @@ impl BeaconBlockBodyEip4844> { deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload }, + execution_payload: FullPayloadEip4844 { execution_payload }, blob_kzg_commitments, } = self; @@ -370,8 +466,8 @@ impl BeaconBlockBodyEip4844> { deposits: deposits.clone(), voluntary_exits: voluntary_exits.clone(), sync_aggregate: sync_aggregate.clone(), - execution_payload: BlindedPayload { - execution_payload_header: From::from(execution_payload), + execution_payload: BlindedPayloadEip4844 { + execution_payload_header: From::from(execution_payload.clone()), }, blob_kzg_commitments: blob_kzg_commitments.clone(), } @@ -387,7 +483,7 @@ impl From>> fn from(body: BeaconBlockBody>) -> Self { map_beacon_block_body!(body, |inner, cons| { let (block, payload) = inner.into(); - (cons(block), payload) + (cons(block), payload.map(Into::into)) }) } } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 3a006e54618..6438a0a7e1b 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -172,7 +172,7 @@ impl From for Hash256 { /// The state of the `BeaconChain` at some slot. #[superstruct( - variants(Base, Altair, Merge, Eip4844), + variants(Base, Altair, Merge, Capella, Eip4844), variant_attributes( derive( Derivative, @@ -250,9 +250,9 @@ where pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Merge, Eip4844))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub previous_epoch_participation: VariableList, - #[superstruct(only(Altair, Merge, Eip4844))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub current_epoch_participation: VariableList, // Finality @@ -267,18 +267,39 @@ where // Inactivity #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] - #[superstruct(only(Altair, Merge, Eip4844))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub inactivity_scores: VariableList, // Light-client sync committees - #[superstruct(only(Altair, Merge, Eip4844))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Merge, Eip4844))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub next_sync_committee: Arc>, // Execution - #[superstruct(only(Merge, Eip4844))] - pub latest_execution_payload_header: ExecutionPayloadHeader, + #[superstruct( + only(Merge), + partial_getter(rename = "latest_execution_payload_header_merge") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderMerge, + #[superstruct( + only(Capella), + partial_getter(rename = "latest_execution_payload_header_capella") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderCapella, + #[superstruct( + only(Eip4844), + partial_getter(rename = "latest_execution_payload_header_eip4844") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844, + + // Withdrawals + #[superstruct(only(Capella, Eip4844))] + pub withdrawal_queue: VariableList, + #[superstruct(only(Capella, Eip4844))] + pub next_withdrawal_index: u64, + #[superstruct(only(Capella, Eip4844))] + pub next_partial_withdrawal_validator_index: u64, // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] @@ -389,6 +410,7 @@ impl BeaconState { BeaconState::Base { .. } => ForkName::Base, BeaconState::Altair { .. } => ForkName::Altair, BeaconState::Merge { .. } => ForkName::Merge, + BeaconState::Capella { .. } => ForkName::Capella, BeaconState::Eip4844 { .. } => ForkName::Eip4844, }; @@ -679,6 +701,23 @@ impl BeaconState { .ok_or(Error::ShuffleIndexOutOfBounds(index)) } + // TODO: check this implementation + /// Convenience accessor for the `execution_payload_header` as an `ExecutionPayloadHeaderRef`. + pub fn latest_execution_payload_header(&self) -> Result, Error> { + match self { + BeaconState::Base(_) | BeaconState::Altair(_) => Err(Error::IncorrectStateVariant), + BeaconState::Merge(state) => Ok(ExecutionPayloadHeaderRef::Merge( + &state.latest_execution_payload_header, + )), + BeaconState::Capella(state) => Ok(ExecutionPayloadHeaderRef::Capella( + &state.latest_execution_payload_header, + )), + BeaconState::Eip4844(state) => Ok(ExecutionPayloadHeaderRef::Eip4844( + &state.latest_execution_payload_header, + )), + } + } + /// Return `true` if the validator who produced `slot_signature` is eligible to aggregate. /// /// Spec v0.12.1 @@ -1103,6 +1142,7 @@ impl BeaconState { BeaconState::Base(state) => (&mut state.validators, &mut state.balances), BeaconState::Altair(state) => (&mut state.validators, &mut state.balances), BeaconState::Merge(state) => (&mut state.validators, &mut state.balances), + BeaconState::Capella(state) => (&mut state.validators, &mut state.balances), BeaconState::Eip4844(state) => (&mut state.validators, &mut state.balances), } } @@ -1300,6 +1340,7 @@ impl BeaconState { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(&mut state.current_epoch_participation), BeaconState::Merge(state) => Ok(&mut state.current_epoch_participation), + BeaconState::Capella(state) => Ok(&mut state.current_epoch_participation), BeaconState::Eip4844(state) => Ok(&mut state.current_epoch_participation), } } else if epoch == self.previous_epoch() { @@ -1307,6 +1348,7 @@ impl BeaconState { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(&mut state.previous_epoch_participation), BeaconState::Merge(state) => Ok(&mut state.previous_epoch_participation), + BeaconState::Capella(state) => Ok(&mut state.previous_epoch_participation), BeaconState::Eip4844(state) => Ok(&mut state.previous_epoch_participation), } } else { @@ -1612,6 +1654,7 @@ impl BeaconState { BeaconState::Base(inner) => BeaconState::Base(inner.clone()), BeaconState::Altair(inner) => BeaconState::Altair(inner.clone()), BeaconState::Merge(inner) => BeaconState::Merge(inner.clone()), + BeaconState::Capella(inner) => BeaconState::Capella(inner.clone()), BeaconState::Eip4844(inner) => BeaconState::Eip4844(inner.clone()), }; if config.committee_caches { diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 047bceae7e2..818ec52b813 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -1,4 +1,7 @@ -use crate::{ChainSpec, EthSpec, ExecPayload, ExecutionPayloadHeader, SignedRoot, Uint256}; +use crate::{ + AbstractExecPayload, ChainSpec, EthSpec, ExecPayload, ExecutionPayloadHeader, SignedRoot, + Uint256, +}; use bls::PublicKeyBytes; use bls::Signature; use serde::{Deserialize as De, Deserializer, Serialize as Ser, Serializer}; @@ -10,7 +13,7 @@ use tree_hash_derive::TreeHash; #[serde_as] #[derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone)] #[serde(bound = "E: EthSpec, Payload: ExecPayload")] -pub struct BuilderBid> { +pub struct BuilderBid> { #[serde_as(as = "BlindedPayloadAsHeader")] pub header: Payload, #[serde(with = "eth2_serde_utils::quoted_u256")] @@ -21,12 +24,12 @@ pub struct BuilderBid> { _phantom_data: PhantomData, } -impl> SignedRoot for BuilderBid {} +impl> SignedRoot for BuilderBid {} /// Validator registration, for use in interacting with servers implementing the builder API. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] #[serde(bound = "E: EthSpec, Payload: ExecPayload")] -pub struct SignedBuilderBid> { +pub struct SignedBuilderBid> { pub message: BuilderBid, pub signature: Signature, } @@ -42,7 +45,7 @@ impl> SerializeAs for BlindedPayloa } } -impl<'de, E: EthSpec, Payload: ExecPayload> DeserializeAs<'de, Payload> +impl<'de, E: EthSpec, Payload: AbstractExecPayload> DeserializeAs<'de, Payload> for BlindedPayloadAsHeader { fn deserialize_as(deserializer: D) -> Result @@ -55,7 +58,7 @@ impl<'de, E: EthSpec, Payload: ExecPayload> DeserializeAs<'de, Payload> } } -impl> SignedBuilderBid { +impl> SignedBuilderBid { pub fn verify_signature(&self, spec: &ChainSpec) -> bool { self.message .pubkey diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index aa477b22e5d..af22823d2ef 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -11,6 +11,7 @@ use tree_hash::TreeHash; /// Each of the BLS signature domains. #[derive(Debug, PartialEq, Clone, Copy)] pub enum Domain { + BlsToExecutionChange, BeaconProposer, BeaconAttester, BlobsSideCar, @@ -152,6 +153,13 @@ pub struct ChainSpec { pub terminal_block_hash_activation_epoch: Epoch, pub safe_slots_to_import_optimistically: u64, + /* + * Capella hard fork params + */ + pub capella_fork_version: [u8; 4], + /// The Capella fork epoch is optional, with `None` representing "Merge never happens". + pub capella_fork_epoch: Option, + /* * Eip4844 hard fork params */ @@ -174,6 +182,11 @@ pub struct ChainSpec { * Application params */ pub(crate) domain_application_mask: u32, + + /* + * Capella params + */ + pub(crate) domain_bls_to_execution_change: u32, } impl ChainSpec { @@ -256,6 +269,7 @@ impl ChainSpec { ForkName::Base => self.genesis_fork_version, ForkName::Altair => self.altair_fork_version, ForkName::Merge => self.bellatrix_fork_version, + ForkName::Capella => self.capella_fork_version, ForkName::Eip4844 => self.eip4844_fork_version, } } @@ -266,6 +280,7 @@ impl ChainSpec { ForkName::Base => Some(Epoch::new(0)), ForkName::Altair => self.altair_fork_epoch, ForkName::Merge => self.bellatrix_fork_epoch, + ForkName::Capella => self.capella_fork_epoch, ForkName::Eip4844 => self.eip4844_fork_epoch, } } @@ -276,6 +291,7 @@ impl ChainSpec { BeaconState::Base(_) => self.inactivity_penalty_quotient, BeaconState::Altair(_) => self.inactivity_penalty_quotient_altair, BeaconState::Merge(_) => self.inactivity_penalty_quotient_bellatrix, + BeaconState::Capella(_) => self.inactivity_penalty_quotient_bellatrix, BeaconState::Eip4844(_) => self.inactivity_penalty_quotient_bellatrix, } } @@ -289,6 +305,7 @@ impl ChainSpec { BeaconState::Base(_) => self.proportional_slashing_multiplier, BeaconState::Altair(_) => self.proportional_slashing_multiplier_altair, BeaconState::Merge(_) => self.proportional_slashing_multiplier_bellatrix, + BeaconState::Capella(_) => self.proportional_slashing_multiplier_bellatrix, BeaconState::Eip4844(_) => self.proportional_slashing_multiplier_bellatrix, } } @@ -302,6 +319,7 @@ impl ChainSpec { BeaconState::Base(_) => self.min_slashing_penalty_quotient, BeaconState::Altair(_) => self.min_slashing_penalty_quotient_altair, BeaconState::Merge(_) => self.min_slashing_penalty_quotient_bellatrix, + BeaconState::Capella(_) => self.min_slashing_penalty_quotient_bellatrix, BeaconState::Eip4844(_) => self.min_slashing_penalty_quotient_bellatrix, } } @@ -351,6 +369,7 @@ impl ChainSpec { Domain::ContributionAndProof => self.domain_contribution_and_proof, Domain::SyncCommitteeSelectionProof => self.domain_sync_committee_selection_proof, Domain::ApplicationMask(application_domain) => application_domain.get_domain_constant(), + Domain::BlsToExecutionChange => self.domain_bls_to_execution_change, } } @@ -586,6 +605,12 @@ impl ChainSpec { terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), safe_slots_to_import_optimistically: 128u64, + /* + * Capella hard fork params + */ + capella_fork_version: [0x03, 00, 00, 00], + capella_fork_epoch: Some(Epoch::new(18446744073709551615)), + /* * Eip4844 hard fork params */ @@ -608,6 +633,11 @@ impl ChainSpec { * Application specific */ domain_application_mask: APPLICATION_DOMAIN_BUILDER, + + /* + * Capella params + */ + domain_bls_to_execution_change: 10, } } @@ -647,6 +677,9 @@ impl ChainSpec { // `Uint256::MAX` which is `2*256- 1`. .checked_add(Uint256::one()) .expect("addition does not overflow"), + // Capella + capella_fork_version: [0x03, 0x00, 0x00, 0x01], + capella_fork_epoch: None, // Eip4844 eip4844_fork_version: [0x04, 0x00, 0x00, 0x01], eip4844_fork_epoch: None, @@ -806,6 +839,15 @@ impl ChainSpec { terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), safe_slots_to_import_optimistically: 128u64, + /* + * Capella hard fork params + */ + capella_fork_version: [0x03, 0x00, 0x00, 0x64], + capella_fork_epoch: None, + + /* + * Eip4844 hard fork params + */ eip4844_fork_version: [0x04, 0x00, 0x00, 0x64], eip4844_fork_epoch: None, @@ -825,6 +867,11 @@ impl ChainSpec { * Application specific */ domain_application_mask: APPLICATION_DOMAIN_BUILDER, + + /* + * Capella params + */ + domain_bls_to_execution_change: 10, } } } @@ -884,6 +931,14 @@ pub struct Config { #[serde(deserialize_with = "deserialize_fork_epoch")] pub bellatrix_fork_epoch: Option>, + #[serde(default = "default_capella_fork_version")] + #[serde(with = "eth2_serde_utils::bytes_4_hex")] + capella_fork_version: [u8; 4], + #[serde(default)] + #[serde(serialize_with = "serialize_fork_epoch")] + #[serde(deserialize_with = "deserialize_fork_epoch")] + pub capella_fork_epoch: Option>, + #[serde(default = "default_eip4844_fork_version")] #[serde(with = "eth2_serde_utils::bytes_4_hex")] eip4844_fork_version: [u8; 4], @@ -928,6 +983,11 @@ fn default_bellatrix_fork_version() -> [u8; 4] { [0xff, 0xff, 0xff, 0xff] } +fn default_capella_fork_version() -> [u8; 4] { + // TODO: determine if the bellatrix example should be copied like this + [0xff, 0xff, 0xff, 0xff] +} + fn default_eip4844_fork_version() -> [u8; 4] { // This value shouldn't be used. [0xff, 0xff, 0xff, 0xff] @@ -1029,6 +1089,10 @@ impl Config { bellatrix_fork_epoch: spec .bellatrix_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), + capella_fork_version: spec.capella_fork_version, + capella_fork_epoch: spec + .capella_fork_epoch + .map(|epoch| MaybeQuoted { value: epoch }), eip4844_fork_version: spec.eip4844_fork_version, eip4844_fork_epoch: spec .eip4844_fork_epoch @@ -1078,6 +1142,8 @@ impl Config { altair_fork_epoch, bellatrix_fork_epoch, bellatrix_fork_version, + capella_fork_epoch, + capella_fork_version, eip4844_fork_epoch, eip4844_fork_version, seconds_per_slot, @@ -1110,6 +1176,8 @@ impl Config { altair_fork_epoch: altair_fork_epoch.map(|q| q.value), bellatrix_fork_epoch: bellatrix_fork_epoch.map(|q| q.value), bellatrix_fork_version, + capella_fork_epoch: capella_fork_epoch.map(|q| q.value), + capella_fork_version, eip4844_fork_epoch: eip4844_fork_epoch.map(|q| q.value), eip4844_fork_version, seconds_per_slot, @@ -1204,6 +1272,14 @@ mod tests { apply_bit_mask(builder_domain_pre_mask, &spec), &spec, ); + + test_domain( + Domain::BlsToExecutionChange, + spec.domain_bls_to_execution_change, + &spec, + ); + + test_domain(Domain::BlobsSideCar, spec.domain_blobs_sidecar, &spec); } fn apply_bit_mask(domain_bytes: [u8; 4], spec: &ChainSpec) -> u32 { diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 716754c7e54..4cf102bd772 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -95,6 +95,13 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + type GasLimitDenominator: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MinGasLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxExtraDataBytes: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* + * New in Capella + */ + type MaxPartialWithdrawalsPerEpoch: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type WithdrawalQueueLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxBlsToExecutionChanges: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxWithdrawalsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * New in Eip4844 */ @@ -228,10 +235,32 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + Self::BytesPerLogsBloom::to_usize() } + /// Returns the `MAX_PARTIAL_WITHDRAWALS_PER_EPOCH` constant for this specification. + fn max_partial_withdrawals_per_epoch() -> usize { + Self::MaxPartialWithdrawalsPerEpoch::to_usize() + } + + /// Returns the `WITHDRAWAL_QUEUE_LIMIT` constant for this specification. + fn withdrawal_queue_limit() -> usize { + Self::WithdrawalQueueLimit::to_usize() + } + + /// Returns the `MAX_BLS_TO_EXECUTION_CHANGES` constant for this specification. + fn max_bls_to_execution_changes() -> usize { + Self::MaxBlsToExecutionChanges::to_usize() + } + + /// Returns the `MAX_WITHDRAWALS_PER_PAYLOAD` constant for this specification. + fn max_withdrawals_per_payload() -> usize { + Self::MaxWithdrawalsPerPayload::to_usize() + } + + /// Returns the `MAX_BLOBS_PER_BLOCK` constant for this specification. fn max_blobs_per_block() -> usize { Self::MaxBlobsPerBlock::to_usize() } + /// FIXME: why is this called chunks_per_blob?? fn chunks_per_blob() -> usize { Self::FieldElementsPerBlob::to_usize() } @@ -280,6 +309,10 @@ impl EthSpec for MainnetEthSpec { type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch + type MaxPartialWithdrawalsPerEpoch = U256; + type WithdrawalQueueLimit = U1099511627776; + type MaxBlsToExecutionChanges = U16; + type MaxWithdrawalsPerPayload = U16; fn default_spec() -> ChainSpec { ChainSpec::mainnet() @@ -325,6 +358,10 @@ impl EthSpec for MinimalEthSpec { GasLimitDenominator, MinGasLimit, MaxExtraDataBytes, + MaxPartialWithdrawalsPerEpoch, + WithdrawalQueueLimit, + MaxBlsToExecutionChanges, + MaxWithdrawalsPerPayload, MaxBlobsPerBlock, FieldElementsPerBlob }); @@ -371,6 +408,10 @@ impl EthSpec for GnosisEthSpec { type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U2048; // 128 max attestations * 16 slots per epoch type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch + type MaxPartialWithdrawalsPerEpoch = U256; + type WithdrawalQueueLimit = U1099511627776; + type MaxBlsToExecutionChanges = U16; + type MaxWithdrawalsPerPayload = U16; type MaxBlobsPerBlock = U16; // 2**4 = 16 type FieldElementsPerBlob = U4096; diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 78a53a3675e..f68e563e416 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -13,12 +13,35 @@ pub type Transactions = VariableList< ::MaxTransactionsPerPayload, >; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive( - Default, Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, +#[superstruct( + variants(Merge, Capella, Eip4844), + variant_attributes( + derive( + Default, + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + Derivative, + ), + derivative(PartialEq, Hash(bound = "T: EthSpec")), + serde(bound = "T: EthSpec", deny_unknown_fields), + cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) + ), + cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") )] +#[derive(Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative)] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] +#[serde(untagged)] #[serde(bound = "T: EthSpec")] +#[ssz(enum_behaviour = "transparent")] +#[tree_hash(enum_behaviour = "transparent")] +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct ExecutionPayload { pub parent_hash: ExecutionBlockHash, pub fee_recipient: Address, @@ -39,28 +62,57 @@ pub struct ExecutionPayload { pub extra_data: VariableList, #[serde(with = "eth2_serde_utils::quoted_u256")] pub base_fee_per_gas: Uint256, + #[superstruct(only(Eip4844))] + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub excess_blobs: u64, pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, + #[superstruct(only(Capella, Eip4844))] + pub withdrawals: VariableList, } impl ExecutionPayload { - pub fn empty() -> Self { - Self::default() + #[allow(clippy::integer_arithmetic)] + /// Returns the maximum size of an execution payload. + pub fn max_execution_payload_merge_size() -> usize { + // Fixed part + ExecutionPayloadMerge::::default().as_ssz_bytes().len() + // Max size of variable length `extra_data` field + + (T::max_extra_data_bytes() * ::ssz_fixed_len()) + // Max size of variable length `transactions` field + + (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction())) + } + + #[allow(clippy::integer_arithmetic)] + /// Returns the maximum size of an execution payload. + pub fn max_execution_payload_capella_size() -> usize { + // Fixed part + ExecutionPayloadCapella::::default().as_ssz_bytes().len() + // Max size of variable length `extra_data` field + + (T::max_extra_data_bytes() * ::ssz_fixed_len()) + // Max size of variable length `transactions` field + + (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction())) + // Max size of variable length `withdrawals` field + // TODO: check this + + (T::max_withdrawals_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + ::ssz_fixed_len())) } #[allow(clippy::integer_arithmetic)] /// Returns the maximum size of an execution payload. - pub fn max_execution_payload_size() -> usize { + pub fn max_execution_payload_eip4844_size() -> usize { // Fixed part - Self::empty().as_ssz_bytes().len() + ExecutionPayloadEip4844::::default().as_ssz_bytes().len() // Max size of variable length `extra_data` field + (T::max_extra_data_bytes() * ::ssz_fixed_len()) // Max size of variable length `transactions` field + (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction())) + // Max size of variable length `withdrawals` field + // TODO: check this + + (T::max_withdrawals_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + ::ssz_fixed_len())) } pub fn blob_txns_iter(&self) -> Iter<'_, Transaction> { - self.transactions.iter() + self.transactions().iter() } } diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 01780fa1c32..f92ab956e1d 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -5,12 +5,37 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use BeaconStateError; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive( - Default, Debug, Clone, Serialize, Deserialize, Derivative, Encode, Decode, TreeHash, TestRandom, +#[superstruct( + variants(Merge, Capella, Eip4844), + variant_attributes( + derive( + Default, + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + Derivative, + ), + derivative(PartialEq, Hash(bound = "T: EthSpec")), + serde(bound = "T: EthSpec", deny_unknown_fields), + cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) + ), + ref_attributes(derive(PartialEq, TreeHash), tree_hash(enum_behaviour = "transparent")), + cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") )] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] +#[serde(bound = "T: EthSpec")] +#[tree_hash(enum_behaviour = "transparent")] +#[ssz(enum_behaviour = "transparent")] +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct ExecutionPayloadHeader { pub parent_hash: ExecutionBlockHash, pub fee_recipient: Address, @@ -31,33 +56,107 @@ pub struct ExecutionPayloadHeader { pub extra_data: VariableList, #[serde(with = "eth2_serde_utils::quoted_u256")] pub base_fee_per_gas: Uint256, + #[superstruct(only(Eip4844))] + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub excess_blobs: u64, pub block_hash: ExecutionBlockHash, pub transactions_root: Hash256, + #[superstruct(only(Capella, Eip4844))] + pub withdrawals_root: Hash256, } -impl ExecutionPayloadHeader { - pub fn empty() -> Self { - Self::default() +impl From> for ExecutionPayloadHeaderMerge { + fn from(payload: ExecutionPayloadMerge) -> Self { + Self { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions_root: payload.transactions.tree_hash_root(), + } } } - -impl<'a, T: EthSpec> From<&'a ExecutionPayload> for ExecutionPayloadHeader { - fn from(payload: &'a ExecutionPayload) -> Self { - ExecutionPayloadHeader { +impl From> for ExecutionPayloadHeaderCapella { + fn from(payload: ExecutionPayloadCapella) -> Self { + Self { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions_root: payload.transactions.tree_hash_root(), + withdrawals_root: payload.withdrawals.tree_hash_root(), + } + } +} +impl From> for ExecutionPayloadHeaderEip4844 { + fn from(payload: ExecutionPayloadEip4844) -> Self { + Self { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom.clone(), + logs_bloom: payload.logs_bloom, prev_randao: payload.prev_randao, block_number: payload.block_number, gas_limit: payload.gas_limit, gas_used: payload.gas_used, timestamp: payload.timestamp, - extra_data: payload.extra_data.clone(), + extra_data: payload.extra_data, base_fee_per_gas: payload.base_fee_per_gas, + excess_blobs: payload.excess_blobs, block_hash: payload.block_hash, transactions_root: payload.transactions.tree_hash_root(), + withdrawals_root: payload.withdrawals.tree_hash_root(), + } + } +} + +impl TryFrom> for ExecutionPayloadHeaderMerge { + type Error = BeaconStateError; + fn try_from(header: ExecutionPayloadHeader) -> Result { + match header { + ExecutionPayloadHeader::Merge(execution_payload_header) => Ok(execution_payload_header), + _ => Err(BeaconStateError::IncorrectStateVariant), + } + } +} +impl TryFrom> for ExecutionPayloadHeaderCapella { + type Error = BeaconStateError; + fn try_from(header: ExecutionPayloadHeader) -> Result { + match header { + ExecutionPayloadHeader::Capella(execution_payload_header) => { + Ok(execution_payload_header) + } + _ => Err(BeaconStateError::IncorrectStateVariant), + } + } +} +impl TryFrom> for ExecutionPayloadHeaderEip4844 { + type Error = BeaconStateError; + fn try_from(header: ExecutionPayloadHeader) -> Result { + match header { + ExecutionPayloadHeader::Eip4844(execution_payload_header) => { + Ok(execution_payload_header) + } + _ => Err(BeaconStateError::IncorrectStateVariant), } } } diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index dc45565d41b..42b8bdded7e 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -11,6 +11,7 @@ pub enum ForkName { Base, Altair, Merge, + Capella, Eip4844, } @@ -20,6 +21,7 @@ impl ForkName { ForkName::Base, ForkName::Altair, ForkName::Merge, + ForkName::Capella, ForkName::Eip4844, ] } @@ -32,24 +34,35 @@ impl ForkName { ForkName::Base => { spec.altair_fork_epoch = None; spec.bellatrix_fork_epoch = None; + spec.capella_fork_epoch = None; spec.eip4844_fork_epoch = None; spec } ForkName::Altair => { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = None; + spec.capella_fork_epoch = None; spec.eip4844_fork_epoch = None; spec } ForkName::Merge => { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = None; + spec.eip4844_fork_epoch = None; + spec + } + ForkName::Capella => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); spec.eip4844_fork_epoch = None; spec } ForkName::Eip4844 => { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); spec.eip4844_fork_epoch = Some(Epoch::new(0)); spec } @@ -64,7 +77,8 @@ impl ForkName { ForkName::Base => None, ForkName::Altair => Some(ForkName::Base), ForkName::Merge => Some(ForkName::Altair), - ForkName::Eip4844 => Some(ForkName::Merge), + ForkName::Capella => Some(ForkName::Merge), + ForkName::Eip4844 => Some(ForkName::Capella), } } @@ -75,7 +89,8 @@ impl ForkName { match self { ForkName::Base => Some(ForkName::Altair), ForkName::Altair => Some(ForkName::Merge), - ForkName::Merge => Some(ForkName::Eip4844), + ForkName::Merge => Some(ForkName::Capella), + ForkName::Capella => Some(ForkName::Eip4844), ForkName::Eip4844 => None, } } @@ -118,6 +133,10 @@ macro_rules! map_fork_name_with { let (value, extra_data) = $body; ($t::Merge(value), extra_data) } + ForkName::Capella => { + let (value, extra_data) = $body; + ($t::Capella(value), extra_data) + } ForkName::Eip4844 => { let (value, extra_data) = $body; ($t::Eip4844(value), extra_data) @@ -134,6 +153,7 @@ impl FromStr for ForkName { "phase0" | "base" => ForkName::Base, "altair" => ForkName::Altair, "bellatrix" | "merge" => ForkName::Merge, + "capella" => ForkName::Capella, "eip4844" => ForkName::Eip4844, _ => return Err(format!("unknown fork name: {}", fork_name)), }) @@ -146,6 +166,7 @@ impl Display for ForkName { ForkName::Base => "phase0".fmt(f), ForkName::Altair => "altair".fmt(f), ForkName::Merge => "bellatrix".fmt(f), + ForkName::Capella => "capella".fmt(f), ForkName::Eip4844 => "eip4844".fmt(f), } } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 4bb383dfba4..527b54f478e 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -1,5 +1,5 @@ //! Ethereum 2.0 types - +#![feature(generic_associated_types)] // Required for big type-level numbers #![recursion_limit = "128"] // Clippy lint set up @@ -85,6 +85,7 @@ pub mod sync_selection_proof; pub mod sync_subnet_id; mod tree_hash_impls; pub mod validator_registration_data; +pub mod withdrawal; pub mod slot_data; #[cfg(feature = "sqlite")] @@ -105,12 +106,12 @@ pub use crate::attestation_data::AttestationData; pub use crate::attestation_duty::AttestationDuty; pub use crate::attester_slashing::AttesterSlashing; pub use crate::beacon_block::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockEip4844, BeaconBlockMerge, - BeaconBlockRef, BeaconBlockRefMut, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockEip4844, + BeaconBlockMerge, BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, }; pub use crate::beacon_block_body::{ - BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyEip4844, - BeaconBlockBodyMerge, BeaconBlockBodyRef, BeaconBlockBodyRefMut, + BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyCapella, + BeaconBlockBodyEip4844, BeaconBlockBodyMerge, BeaconBlockBodyRef, BeaconBlockBodyRefMut, }; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; @@ -129,8 +130,14 @@ pub use crate::enr_fork_id::EnrForkId; pub use crate::eth1_data::Eth1Data; pub use crate::eth_spec::EthSpecId; pub use crate::execution_block_hash::ExecutionBlockHash; -pub use crate::execution_payload::{ExecutionPayload, Transaction, Transactions}; -pub use crate::execution_payload_header::ExecutionPayloadHeader; +pub use crate::execution_payload::{ + ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge, + ExecutionPayloadRef, Transaction, Transactions, +}; +pub use crate::execution_payload_header::{ + ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderEip4844, + ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, +}; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; @@ -143,7 +150,11 @@ pub use crate::kzg_commitment::KzgCommitment; pub use crate::kzg_proof::KzgProof; pub use crate::participation_flags::ParticipationFlags; pub use crate::participation_list::ParticipationList; -pub use crate::payload::{BlindedPayload, BlockType, ExecPayload, FullPayload}; +pub use crate::payload::{ + AbstractExecPayload, BlindedPayload, BlindedPayloadCapella, BlindedPayloadEip4844, + BlindedPayloadMerge, BlindedPayloadRef, BlockType, ExecPayload, FullPayload, + FullPayloadCapella, FullPayloadEip4844, FullPayloadMerge, FullPayloadRef, +}; pub use crate::pending_attestation::PendingAttestation; pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset}; pub use crate::proposer_preparation_data::ProposerPreparationData; @@ -176,6 +187,7 @@ pub use crate::validator::Validator; pub use crate::validator_registration_data::*; pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; +pub use crate::withdrawal::Withdrawal; use serde_big_array::BigArray; pub type CommitteeIndex = u64; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 5004d65a9eb..d3a8fd698a9 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -1,14 +1,15 @@ use crate::{test_utils::TestRandom, *}; -use core::hash::Hasher; use derivative::Derivative; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; -use ssz::{Decode, DecodeError, Encode}; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; use std::convert::TryFrom; use std::fmt::Debug; use std::hash::Hash; use test_random_derive::TestRandom; -use tree_hash::{PackedEncoding, TreeHash}; +use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; #[derive(Debug)] pub enum BlockType { @@ -16,24 +17,8 @@ pub enum BlockType { Full, } -pub trait ExecPayload: - Debug - + Clone - + Encode - + Debug - + Decode - + TestRandom - + TreeHash - + Default - + PartialEq - + Serialize - + DeserializeOwned - + Hash - + TryFrom> - + From> - + Send - + 'static -{ +// + TryFrom> +pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + Send { fn block_type() -> BlockType; /// Convert the payload into a payload header. @@ -49,15 +34,109 @@ pub trait ExecPayload: fn block_hash(&self) -> ExecutionBlockHash; fn fee_recipient(&self) -> Address; fn gas_limit(&self) -> u64; + + // Is this a default payload? (pre-merge) + fn is_default(&self) -> bool; } -impl ExecPayload for FullPayload { +pub trait OwnedExecPayload: + ExecPayload + Default + Serialize + DeserializeOwned + Encode + Decode + TestRandom + 'static +{ +} + +impl OwnedExecPayload for P where + P: ExecPayload + + Default + + Serialize + + DeserializeOwned + + Encode + + Decode + + TestRandom + + 'static +{ +} + +pub trait AbstractExecPayload: + ExecPayload + Sized + From> + TryFrom> +{ + type Ref<'a>: ExecPayload + + Copy + + From<&'a Self::Merge> + + From<&'a Self::Capella> + + From<&'a Self::Eip4844>; + + type Merge: OwnedExecPayload + + Into + + From> + + TryFrom>; + type Capella: OwnedExecPayload + + Into + + From> + + TryFrom>; + type Eip4844: OwnedExecPayload + + Into + + From> + + TryFrom>; +} + +#[superstruct( + variants(Merge, Capella, Eip4844), + variant_attributes( + derive( + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, + Derivative, + ), + derivative(PartialEq, Hash(bound = "T: EthSpec")), + serde(bound = "T: EthSpec", deny_unknown_fields), + cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) + ), + ref_attributes( + derive(Debug, Derivative, TreeHash), + derivative(PartialEq, Hash(bound = "T: EthSpec")), + tree_hash(enum_behaviour = "transparent"), + ), + cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") +)] +#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Derivative)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] +#[serde(bound = "T: EthSpec")] +#[tree_hash(enum_behaviour = "transparent")] +pub struct FullPayload { + #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] + pub execution_payload: ExecutionPayloadMerge, + #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] + pub execution_payload: ExecutionPayloadCapella, + #[superstruct(only(Eip4844), partial_getter(rename = "execution_payload_eip4844"))] + pub execution_payload: ExecutionPayloadEip4844, +} + +impl From> for ExecutionPayload { + fn from(full_payload: FullPayload) -> Self { + match full_payload { + FullPayload::Merge(payload) => ExecutionPayload::Merge(payload.execution_payload), + FullPayload::Capella(payload) => ExecutionPayload::Capella(payload.execution_payload), + FullPayload::Eip4844(payload) => ExecutionPayload::Eip4844(payload.execution_payload), + } + } +} + +impl ExecPayload for FullPayloadMerge { fn block_type() -> BlockType { BlockType::Full } fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { - ExecutionPayloadHeader::from(&self.execution_payload) + ExecutionPayloadHeader::Merge(ExecutionPayloadHeaderMerge::from( + self.execution_payload.clone(), + )) } fn parent_hash(&self) -> ExecutionBlockHash { @@ -87,6 +166,381 @@ impl ExecPayload for FullPayload { fn gas_limit(&self) -> u64 { self.execution_payload.gas_limit } + + // TODO: can this function be optimized? + fn is_default(&self) -> bool { + self.execution_payload == ExecutionPayloadMerge::default() + } +} +impl ExecPayload for FullPayloadCapella { + fn block_type() -> BlockType { + BlockType::Full + } + + fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { + ExecutionPayloadHeader::Capella(ExecutionPayloadHeaderCapella::from( + self.execution_payload.clone(), + )) + } + + fn parent_hash(&self) -> ExecutionBlockHash { + self.execution_payload.parent_hash + } + + fn prev_randao(&self) -> Hash256 { + self.execution_payload.prev_randao + } + + fn block_number(&self) -> u64 { + self.execution_payload.block_number + } + + fn timestamp(&self) -> u64 { + self.execution_payload.timestamp + } + + fn block_hash(&self) -> ExecutionBlockHash { + self.execution_payload.block_hash + } + + fn fee_recipient(&self) -> Address { + self.execution_payload.fee_recipient + } + + fn gas_limit(&self) -> u64 { + self.execution_payload.gas_limit + } + + // TODO: can this function be optimized? + fn is_default(&self) -> bool { + self.execution_payload == ExecutionPayloadCapella::default() + } +} +impl ExecPayload for FullPayloadEip4844 { + fn block_type() -> BlockType { + BlockType::Full + } + + fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { + ExecutionPayloadHeader::Eip4844(ExecutionPayloadHeaderEip4844::from( + self.execution_payload.clone(), + )) + } + + fn parent_hash(&self) -> ExecutionBlockHash { + self.execution_payload.parent_hash + } + + fn prev_randao(&self) -> Hash256 { + self.execution_payload.prev_randao + } + + fn block_number(&self) -> u64 { + self.execution_payload.block_number + } + + fn timestamp(&self) -> u64 { + self.execution_payload.timestamp + } + + fn block_hash(&self) -> ExecutionBlockHash { + self.execution_payload.block_hash + } + + fn fee_recipient(&self) -> Address { + self.execution_payload.fee_recipient + } + + fn gas_limit(&self) -> u64 { + self.execution_payload.gas_limit + } + + // TODO: can this function be optimized? + fn is_default(&self) -> bool { + self.execution_payload == ExecutionPayloadEip4844::default() + } +} + +impl ExecPayload for FullPayload { + fn block_type() -> BlockType { + BlockType::Full + } + + fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { + match self { + Self::Merge(payload) => payload.to_execution_payload_header(), + Self::Capella(payload) => payload.to_execution_payload_header(), + Self::Eip4844(payload) => payload.to_execution_payload_header(), + } + } + + fn parent_hash<'a>(&'a self) -> ExecutionBlockHash { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.parent_hash + }) + } + + fn prev_randao<'a>(&'a self) -> Hash256 { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.prev_randao + }) + } + + fn block_number<'a>(&'a self) -> u64 { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.block_number + }) + } + + fn timestamp<'a>(&'a self) -> u64 { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.timestamp + }) + } + + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.block_hash + }) + } + + fn fee_recipient<'a>(&'a self) -> Address { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.fee_recipient + }) + } + + fn gas_limit<'a>(&'a self) -> u64 { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.gas_limit + }) + } + + fn is_default(&self) -> bool { + match self { + Self::Merge(payload) => payload.is_default(), + Self::Capella(payload) => payload.is_default(), + Self::Eip4844(payload) => payload.is_default(), + } + } +} + +impl FullPayload { + pub fn execution_payload(&self) -> ExecutionPayload { + match self { + Self::Merge(full) => ExecutionPayload::Merge(full.execution_payload.clone()), + Self::Capella(full) => ExecutionPayload::Capella(full.execution_payload.clone()), + Self::Eip4844(full) => ExecutionPayload::Eip4844(full.execution_payload.clone()), + } + } +} + +impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { + fn block_type() -> BlockType { + BlockType::Full + } + + fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { + match self { + Self::Merge(payload) => payload.to_execution_payload_header(), + Self::Capella(payload) => payload.to_execution_payload_header(), + Self::Eip4844(payload) => payload.to_execution_payload_header(), + } + } + + fn parent_hash<'a>(&'a self) -> ExecutionBlockHash { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.parent_hash + }) + } + + fn prev_randao<'a>(&'a self) -> Hash256 { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.prev_randao + }) + } + + fn block_number<'a>(&'a self) -> u64 { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.block_number + }) + } + + fn timestamp<'a>(&'a self) -> u64 { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.timestamp + }) + } + + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.block_hash + }) + } + + fn fee_recipient<'a>(&'a self) -> Address { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.fee_recipient + }) + } + + fn gas_limit<'a>(&'a self) -> u64 { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.gas_limit + }) + } + + // TODO: can this function be optimized? + fn is_default<'a>(&'a self) -> bool { + match self { + Self::Merge(payload_ref) => { + payload_ref.execution_payload == ExecutionPayloadMerge::default() + } + Self::Capella(payload_ref) => { + payload_ref.execution_payload == ExecutionPayloadCapella::default() + } + Self::Eip4844(payload_ref) => { + payload_ref.execution_payload == ExecutionPayloadEip4844::default() + } + } + } +} + +impl AbstractExecPayload for FullPayload { + type Ref<'a> = FullPayloadRef<'a, T>; + type Merge = FullPayloadMerge; + type Capella = FullPayloadCapella; + type Eip4844 = FullPayloadEip4844; +} + +impl From> for FullPayload { + fn from(execution_payload: ExecutionPayload) -> Self { + match execution_payload { + ExecutionPayload::Merge(execution_payload) => { + Self::Merge(FullPayloadMerge { execution_payload }) + } + ExecutionPayload::Capella(execution_payload) => { + Self::Capella(FullPayloadCapella { execution_payload }) + } + ExecutionPayload::Eip4844(execution_payload) => { + Self::Eip4844(FullPayloadEip4844 { execution_payload }) + } + } + } +} + +impl TryFrom> for FullPayload { + type Error = (); + fn try_from(_: ExecutionPayloadHeader) -> Result { + Err(()) + } +} + +impl From> for FullPayloadMerge { + fn from(execution_payload: ExecutionPayloadMerge) -> Self { + Self { execution_payload } + } +} +impl From> for FullPayloadCapella { + fn from(execution_payload: ExecutionPayloadCapella) -> Self { + Self { execution_payload } + } +} +impl From> for FullPayloadEip4844 { + fn from(execution_payload: ExecutionPayloadEip4844) -> Self { + Self { execution_payload } + } +} + +impl TryFrom> for FullPayloadMerge { + type Error = (); + fn try_from(_: ExecutionPayloadHeader) -> Result { + Err(()) + } +} +impl TryFrom> for FullPayloadCapella { + type Error = (); + fn try_from(_: ExecutionPayloadHeader) -> Result { + Err(()) + } +} +impl TryFrom> for FullPayloadEip4844 { + type Error = (); + fn try_from(_: ExecutionPayloadHeader) -> Result { + Err(()) + } +} + +impl TryFrom> for FullPayloadMerge { + type Error = (); + fn try_from(_: ExecutionPayloadHeaderMerge) -> Result { + Err(()) + } +} +impl TryFrom> for FullPayloadCapella { + type Error = (); + fn try_from(_: ExecutionPayloadHeaderCapella) -> Result { + Err(()) + } +} +impl TryFrom> for FullPayloadEip4844 { + type Error = (); + fn try_from(_: ExecutionPayloadHeaderEip4844) -> Result { + Err(()) + } +} + +#[superstruct( + variants(Merge, Capella, Eip4844), + variant_attributes( + derive( + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, + Derivative, + ), + derivative(PartialEq, Hash(bound = "T: EthSpec")), + serde(bound = "T: EthSpec", deny_unknown_fields), + cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) + ), + ref_attributes( + derive(Debug, Derivative, TreeHash), + derivative(PartialEq, Hash(bound = "T: EthSpec")), + tree_hash(enum_behaviour = "transparent"), + ), + cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") +)] +#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Derivative)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] +#[serde(bound = "T: EthSpec")] +#[tree_hash(enum_behaviour = "transparent")] +pub struct BlindedPayload { + #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] + pub execution_payload_header: ExecutionPayloadHeaderMerge, + #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] + pub execution_payload_header: ExecutionPayloadHeaderCapella, + #[superstruct(only(Eip4844), partial_getter(rename = "execution_payload_eip4844"))] + pub execution_payload_header: ExecutionPayloadHeaderEip4844, } impl ExecPayload for BlindedPayload { @@ -95,7 +549,197 @@ impl ExecPayload for BlindedPayload { } fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { - self.execution_payload_header.clone() + match self { + Self::Merge(payload) => { + ExecutionPayloadHeader::Merge(payload.execution_payload_header.clone()) + } + Self::Capella(payload) => { + ExecutionPayloadHeader::Capella(payload.execution_payload_header.clone()) + } + Self::Eip4844(payload) => { + ExecutionPayloadHeader::Eip4844(payload.execution_payload_header.clone()) + } + } + } + + fn parent_hash(&self) -> ExecutionBlockHash { + match self { + Self::Merge(payload) => payload.execution_payload_header.parent_hash, + Self::Capella(payload) => payload.execution_payload_header.parent_hash, + Self::Eip4844(payload) => payload.execution_payload_header.parent_hash, + } + } + + fn prev_randao(&self) -> Hash256 { + match self { + Self::Merge(payload) => payload.execution_payload_header.prev_randao, + Self::Capella(payload) => payload.execution_payload_header.prev_randao, + Self::Eip4844(payload) => payload.execution_payload_header.prev_randao, + } + } + + fn block_number(&self) -> u64 { + match self { + Self::Merge(payload) => payload.execution_payload_header.block_number, + Self::Capella(payload) => payload.execution_payload_header.block_number, + Self::Eip4844(payload) => payload.execution_payload_header.block_number, + } + } + + fn timestamp(&self) -> u64 { + match self { + Self::Merge(payload) => payload.execution_payload_header.timestamp, + Self::Capella(payload) => payload.execution_payload_header.timestamp, + Self::Eip4844(payload) => payload.execution_payload_header.timestamp, + } + } + + fn block_hash(&self) -> ExecutionBlockHash { + match self { + Self::Merge(payload) => payload.execution_payload_header.block_hash, + Self::Capella(payload) => payload.execution_payload_header.block_hash, + Self::Eip4844(payload) => payload.execution_payload_header.block_hash, + } + } + + fn fee_recipient(&self) -> Address { + match self { + Self::Merge(payload) => payload.execution_payload_header.fee_recipient, + Self::Capella(payload) => payload.execution_payload_header.fee_recipient, + Self::Eip4844(payload) => payload.execution_payload_header.fee_recipient, + } + } + + fn gas_limit(&self) -> u64 { + match self { + Self::Merge(payload) => payload.execution_payload_header.gas_limit, + Self::Capella(payload) => payload.execution_payload_header.gas_limit, + Self::Eip4844(payload) => payload.execution_payload_header.gas_limit, + } + } + + // TODO: can this function be optimized? + fn is_default(&self) -> bool { + match self { + /* + Self::Merge(payload) => { + payload.execution_payload_header == ExecutionPayloadHeaderMerge::default() + } + Self::Capella(payload) => { + payload.execution_payload_header == ExecutionPayloadHeaderCapella::default() + } + Self::Eip4844(payload) => { + payload.execution_payload_header == ExecutionPayloadHeaderEip4844::default() + } + */ + Self::Merge(payload) => payload.is_default(), + Self::Capella(payload) => payload.is_default(), + Self::Eip4844(payload) => payload.is_default(), + } + } +} + +// FIXME(sproul): deduplicate this +impl<'b, T: EthSpec> ExecPayload for BlindedPayloadRef<'b, T> { + fn block_type() -> BlockType { + BlockType::Blinded + } + + fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { + match self { + Self::Merge(payload) => { + ExecutionPayloadHeader::Merge(payload.execution_payload_header.clone()) + } + Self::Capella(payload) => { + ExecutionPayloadHeader::Capella(payload.execution_payload_header.clone()) + } + Self::Eip4844(payload) => { + ExecutionPayloadHeader::Eip4844(payload.execution_payload_header.clone()) + } + } + } + + fn parent_hash(&self) -> ExecutionBlockHash { + match self { + Self::Merge(payload) => payload.execution_payload_header.parent_hash, + Self::Capella(payload) => payload.execution_payload_header.parent_hash, + Self::Eip4844(payload) => payload.execution_payload_header.parent_hash, + } + } + + fn prev_randao(&self) -> Hash256 { + match self { + Self::Merge(payload) => payload.execution_payload_header.prev_randao, + Self::Capella(payload) => payload.execution_payload_header.prev_randao, + Self::Eip4844(payload) => payload.execution_payload_header.prev_randao, + } + } + + fn block_number(&self) -> u64 { + match self { + Self::Merge(payload) => payload.execution_payload_header.block_number, + Self::Capella(payload) => payload.execution_payload_header.block_number, + Self::Eip4844(payload) => payload.execution_payload_header.block_number, + } + } + + fn timestamp(&self) -> u64 { + match self { + Self::Merge(payload) => payload.execution_payload_header.timestamp, + Self::Capella(payload) => payload.execution_payload_header.timestamp, + Self::Eip4844(payload) => payload.execution_payload_header.timestamp, + } + } + + fn block_hash(&self) -> ExecutionBlockHash { + match self { + Self::Merge(payload) => payload.execution_payload_header.block_hash, + Self::Capella(payload) => payload.execution_payload_header.block_hash, + Self::Eip4844(payload) => payload.execution_payload_header.block_hash, + } + } + + fn fee_recipient(&self) -> Address { + match self { + Self::Merge(payload) => payload.execution_payload_header.fee_recipient, + Self::Capella(payload) => payload.execution_payload_header.fee_recipient, + Self::Eip4844(payload) => payload.execution_payload_header.fee_recipient, + } + } + + fn gas_limit(&self) -> u64 { + match self { + Self::Merge(payload) => payload.execution_payload_header.gas_limit, + Self::Capella(payload) => payload.execution_payload_header.gas_limit, + Self::Eip4844(payload) => payload.execution_payload_header.gas_limit, + } + } + + // TODO: can this function be optimized? + fn is_default<'a>(&'a self) -> bool { + match self { + Self::Merge(payload) => { + payload.execution_payload_header == ExecutionPayloadHeaderMerge::default() + } + Self::Capella(payload) => { + payload.execution_payload_header == ExecutionPayloadHeaderCapella::default() + } + Self::Eip4844(payload) => { + payload.execution_payload_header == ExecutionPayloadHeaderEip4844::default() + } + } + } +} + +impl ExecPayload for BlindedPayloadMerge { + fn block_type() -> BlockType { + BlockType::Full + } + + fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { + ExecutionPayloadHeader::Merge(ExecutionPayloadHeaderMerge::from( + self.execution_payload_header.clone(), + )) } fn parent_hash(&self) -> ExecutionBlockHash { @@ -125,13 +769,125 @@ impl ExecPayload for BlindedPayload { fn gas_limit(&self) -> u64 { self.execution_payload_header.gas_limit } + + fn is_default(&self) -> bool { + self.execution_payload_header == ExecutionPayloadHeaderMerge::default() + } } +impl ExecPayload for BlindedPayloadCapella { + fn block_type() -> BlockType { + BlockType::Full + } -#[derive(Debug, Clone, TestRandom, Serialize, Deserialize, Derivative)] -#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] -#[serde(bound = "T: EthSpec")] -pub struct BlindedPayload { - pub execution_payload_header: ExecutionPayloadHeader, + fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { + ExecutionPayloadHeader::Capella(ExecutionPayloadHeaderCapella::from( + self.execution_payload_header.clone(), + )) + } + + fn parent_hash(&self) -> ExecutionBlockHash { + self.execution_payload_header.parent_hash + } + + fn prev_randao(&self) -> Hash256 { + self.execution_payload_header.prev_randao + } + + fn block_number(&self) -> u64 { + self.execution_payload_header.block_number + } + + fn timestamp(&self) -> u64 { + self.execution_payload_header.timestamp + } + + fn block_hash(&self) -> ExecutionBlockHash { + self.execution_payload_header.block_hash + } + + fn fee_recipient(&self) -> Address { + self.execution_payload_header.fee_recipient + } + + fn gas_limit(&self) -> u64 { + self.execution_payload_header.gas_limit + } + + fn is_default(&self) -> bool { + self.execution_payload_header == ExecutionPayloadHeaderCapella::default() + } +} +impl ExecPayload for BlindedPayloadEip4844 { + fn block_type() -> BlockType { + BlockType::Full + } + + fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { + ExecutionPayloadHeader::Eip4844(ExecutionPayloadHeaderEip4844::from( + self.execution_payload_header.clone(), + )) + } + + fn parent_hash(&self) -> ExecutionBlockHash { + self.execution_payload_header.parent_hash + } + + fn prev_randao(&self) -> Hash256 { + self.execution_payload_header.prev_randao + } + + fn block_number(&self) -> u64 { + self.execution_payload_header.block_number + } + + fn timestamp(&self) -> u64 { + self.execution_payload_header.timestamp + } + + fn block_hash(&self) -> ExecutionBlockHash { + self.execution_payload_header.block_hash + } + + fn fee_recipient(&self) -> Address { + self.execution_payload_header.fee_recipient + } + + fn gas_limit(&self) -> u64 { + self.execution_payload_header.gas_limit + } + + fn is_default(&self) -> bool { + self.execution_payload_header == ExecutionPayloadHeaderEip4844::default() + } +} + +impl AbstractExecPayload for BlindedPayload { + type Ref<'a> = BlindedPayloadRef<'a, T>; + type Merge = BlindedPayloadMerge; + type Capella = BlindedPayloadCapella; + type Eip4844 = BlindedPayloadEip4844; +} + +impl Default for FullPayloadMerge { + fn default() -> Self { + Self { + execution_payload: ExecutionPayloadMerge::default(), + } + } +} +impl Default for FullPayloadCapella { + fn default() -> Self { + Self { + execution_payload: ExecutionPayloadCapella::default(), + } + } +} +impl Default for FullPayloadEip4844 { + fn default() -> Self { + Self { + execution_payload: ExecutionPayloadEip4844::default(), + } + } } // NOTE: the `Default` implementation for `BlindedPayload` needs to be different from the `Default` @@ -141,6 +897,7 @@ pub struct BlindedPayload { // The default `BlindedPayload` is therefore the payload header that results from blinding the // default `ExecutionPayload`, which differs from the default `ExecutionPayloadHeader` in that // its `transactions_root` is the hash of the empty list rather than 0x0. +/* impl Default for BlindedPayload { fn default() -> Self { Self { @@ -148,9 +905,86 @@ impl Default for BlindedPayload { } } } +*/ + +impl Default for BlindedPayloadMerge { + fn default() -> Self { + Self { + execution_payload_header: ExecutionPayloadHeaderMerge::from( + ExecutionPayloadMerge::default(), + ), + } + } +} + +impl Default for BlindedPayloadCapella { + fn default() -> Self { + Self { + execution_payload_header: ExecutionPayloadHeaderCapella::from( + ExecutionPayloadCapella::default(), + ), + } + } +} + +impl Default for BlindedPayloadEip4844 { + fn default() -> Self { + Self { + execution_payload_header: ExecutionPayloadHeaderEip4844::from( + ExecutionPayloadEip4844::default(), + ), + } + } +} + +impl From> for BlindedPayload { + fn from(payload: ExecutionPayload) -> Self { + match payload { + ExecutionPayload::Merge(payload) => BlindedPayload::Merge(payload.into()), + ExecutionPayload::Capella(payload) => BlindedPayload::Capella(payload.into()), + ExecutionPayload::Eip4844(payload) => BlindedPayload::Eip4844(payload.into()), + } + } +} impl From> for BlindedPayload { fn from(execution_payload_header: ExecutionPayloadHeader) -> Self { + match execution_payload_header { + ExecutionPayloadHeader::Merge(execution_payload_header) => { + Self::Merge(BlindedPayloadMerge { + execution_payload_header, + }) + } + ExecutionPayloadHeader::Capella(execution_payload_header) => { + Self::Capella(BlindedPayloadCapella { + execution_payload_header, + }) + } + ExecutionPayloadHeader::Eip4844(execution_payload_header) => { + Self::Eip4844(BlindedPayloadEip4844 { + execution_payload_header, + }) + } + } + } +} + +impl From> for BlindedPayloadMerge { + fn from(execution_payload_header: ExecutionPayloadHeaderMerge) -> Self { + Self { + execution_payload_header, + } + } +} +impl From> for BlindedPayloadCapella { + fn from(execution_payload_header: ExecutionPayloadHeaderCapella) -> Self { + Self { + execution_payload_header, + } + } +} +impl From> for BlindedPayloadEip4844 { + fn from(execution_payload_header: ExecutionPayloadHeaderEip4844) -> Self { Self { execution_payload_header, } @@ -159,36 +993,79 @@ impl From> for BlindedPayload { impl From> for ExecutionPayloadHeader { fn from(blinded: BlindedPayload) -> Self { - blinded.execution_payload_header + match blinded { + BlindedPayload::Merge(blinded_payload) => { + ExecutionPayloadHeader::Merge(blinded_payload.execution_payload_header) + } + BlindedPayload::Capella(blinded_payload) => { + ExecutionPayloadHeader::Capella(blinded_payload.execution_payload_header) + } + BlindedPayload::Eip4844(blinded_payload) => { + ExecutionPayloadHeader::Eip4844(blinded_payload.execution_payload_header) + } + } } } -impl From> for BlindedPayload { - fn from(execution_payload: ExecutionPayload) -> Self { +// FIXME(sproul): consider adding references to these From impls +impl From> for BlindedPayloadMerge { + fn from(execution_payload: ExecutionPayloadMerge) -> Self { Self { - execution_payload_header: ExecutionPayloadHeader::from(&execution_payload), + execution_payload_header: ExecutionPayloadHeaderMerge::from(execution_payload), } } } - -impl TreeHash for BlindedPayload { - fn tree_hash_type() -> tree_hash::TreeHashType { - >::tree_hash_type() +impl From> for BlindedPayloadCapella { + fn from(execution_payload: ExecutionPayloadCapella) -> Self { + Self { + execution_payload_header: ExecutionPayloadHeaderCapella::from(execution_payload), + } } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - self.execution_payload_header.tree_hash_packed_encoding() +} +impl From> for BlindedPayloadEip4844 { + fn from(execution_payload: ExecutionPayloadEip4844) -> Self { + Self { + execution_payload_header: ExecutionPayloadHeaderEip4844::from(execution_payload), + } } +} - fn tree_hash_packing_factor() -> usize { - >::tree_hash_packing_factor() +impl TryFrom> for BlindedPayloadMerge { + type Error = (); + fn try_from(header: ExecutionPayloadHeader) -> Result { + match header { + ExecutionPayloadHeader::Merge(execution_payload_header) => { + Ok(execution_payload_header.into()) + } + _ => Err(()), + } + } +} +impl TryFrom> for BlindedPayloadCapella { + type Error = (); + fn try_from(header: ExecutionPayloadHeader) -> Result { + match header { + ExecutionPayloadHeader::Capella(execution_payload_header) => { + Ok(execution_payload_header.into()) + } + _ => Err(()), + } } +} - fn tree_hash_root(&self) -> tree_hash::Hash256 { - self.execution_payload_header.tree_hash_root() +impl TryFrom> for BlindedPayloadEip4844 { + type Error = (); + fn try_from(header: ExecutionPayloadHeader) -> Result { + match header { + ExecutionPayloadHeader::Eip4844(execution_payload_header) => { + Ok(execution_payload_header.into()) + } + _ => Err(()), + } } } +/* impl Decode for BlindedPayload { fn is_ssz_fixed_len() -> bool { as Decode>::is_ssz_fixed_len() @@ -204,7 +1081,9 @@ impl Decode for BlindedPayload { }) } } + */ +/* impl Encode for BlindedPayload { fn is_ssz_fixed_len() -> bool { as Encode>::is_ssz_fixed_len() @@ -218,28 +1097,9 @@ impl Encode for BlindedPayload { self.execution_payload_header.ssz_bytes_len() } } +*/ -#[derive(Default, Debug, Clone, Serialize, Deserialize, TestRandom, Derivative)] -#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] -#[serde(bound = "T: EthSpec")] -pub struct FullPayload { - pub execution_payload: ExecutionPayload, -} - -impl From> for FullPayload { - fn from(execution_payload: ExecutionPayload) -> Self { - Self { execution_payload } - } -} - -impl TryFrom> for FullPayload { - type Error = (); - - fn try_from(_: ExecutionPayloadHeader) -> Result { - Err(()) - } -} - +/* impl TreeHash for FullPayload { fn tree_hash_type() -> tree_hash::TreeHashType { >::tree_hash_type() @@ -257,7 +1117,9 @@ impl TreeHash for FullPayload { self.execution_payload.tree_hash_root() } } +*/ +/* impl Decode for FullPayload { fn is_ssz_fixed_len() -> bool { as Decode>::is_ssz_fixed_len() @@ -283,3 +1145,4 @@ impl Encode for FullPayload { self.execution_payload.ssz_bytes_len() } } +*/ diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 4b937912ba2..70cc4c1125a 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -38,7 +38,7 @@ impl From for Hash256 { /// A `BeaconBlock` and a signature from its proposer. #[superstruct( - variants(Base, Altair, Merge, Eip4844), + variants(Base, Altair, Merge, Capella, Eip4844), variant_attributes( derive( Debug, @@ -65,13 +65,15 @@ impl From for Hash256 { #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] -pub struct SignedBeaconBlock = FullPayload> { +pub struct SignedBeaconBlock = FullPayload> { #[superstruct(only(Base), partial_getter(rename = "message_base"))] pub message: BeaconBlockBase, #[superstruct(only(Altair), partial_getter(rename = "message_altair"))] pub message: BeaconBlockAltair, #[superstruct(only(Merge), partial_getter(rename = "message_merge"))] pub message: BeaconBlockMerge, + #[superstruct(only(Capella), partial_getter(rename = "message_capella"))] + pub message: BeaconBlockCapella, #[superstruct(only(Eip4844), partial_getter(rename = "message_eip4844"))] pub message: BeaconBlockEip4844, pub signature: Signature, @@ -79,7 +81,7 @@ pub struct SignedBeaconBlock = FullPayload = SignedBeaconBlock>; -impl> SignedBeaconBlock { +impl> SignedBeaconBlock { /// Returns the name of the fork pertaining to `self`. /// /// Will return an `Err` if `self` has been instantiated to a variant conflicting with the fork @@ -131,6 +133,9 @@ impl> SignedBeaconBlock { BeaconBlock::Merge(message) => { SignedBeaconBlock::Merge(SignedBeaconBlockMerge { message, signature }) } + BeaconBlock::Capella(message) => { + SignedBeaconBlock::Capella(SignedBeaconBlockCapella { message, signature }) + } BeaconBlock::Eip4844(message) => { SignedBeaconBlock::Eip4844(SignedBeaconBlockEip4844 { message, signature }) } @@ -263,7 +268,7 @@ impl From>> impl SignedBeaconBlockMerge> { pub fn into_full_block( self, - execution_payload: ExecutionPayload, + execution_payload: ExecutionPayloadMerge, ) -> SignedBeaconBlockMerge> { let SignedBeaconBlockMerge { message: @@ -283,7 +288,7 @@ impl SignedBeaconBlockMerge> { deposits, voluntary_exits, sync_aggregate, - execution_payload: BlindedPayload { .. }, + execution_payload: BlindedPayloadMerge { .. }, }, }, signature, @@ -304,7 +309,59 @@ impl SignedBeaconBlockMerge> { deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload }, + execution_payload: FullPayloadMerge { execution_payload }, + }, + }, + signature, + } + } +} + +impl SignedBeaconBlockCapella> { + pub fn into_full_block( + self, + execution_payload: ExecutionPayloadCapella, + ) -> SignedBeaconBlockCapella> { + let SignedBeaconBlockCapella { + message: + BeaconBlockCapella { + slot, + proposer_index, + parent_root, + state_root, + body: + BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayloadCapella { .. }, + }, + }, + signature, + } = self; + SignedBeaconBlockCapella { + message: BeaconBlockCapella { + slot, + proposer_index, + parent_root, + state_root, + body: BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadCapella { execution_payload }, }, }, signature, @@ -315,7 +372,7 @@ impl SignedBeaconBlockMerge> { impl SignedBeaconBlockEip4844> { pub fn into_full_block( self, - execution_payload: ExecutionPayload, + execution_payload: ExecutionPayloadEip4844, ) -> SignedBeaconBlockEip4844> { let SignedBeaconBlockEip4844 { message: @@ -335,7 +392,7 @@ impl SignedBeaconBlockEip4844> { deposits, voluntary_exits, sync_aggregate, - execution_payload: BlindedPayload { .. }, + execution_payload: BlindedPayloadEip4844 { .. }, blob_kzg_commitments, }, }, @@ -357,7 +414,7 @@ impl SignedBeaconBlockEip4844> { deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload }, + execution_payload: FullPayloadEip4844 { execution_payload }, blob_kzg_commitments, }, }, @@ -371,15 +428,23 @@ impl SignedBeaconBlock> { self, execution_payload: Option>, ) -> Option>> { - let full_block = match self { - SignedBeaconBlock::Base(block) => SignedBeaconBlock::Base(block.into()), - SignedBeaconBlock::Altair(block) => SignedBeaconBlock::Altair(block.into()), - SignedBeaconBlock::Merge(block) => { - SignedBeaconBlock::Merge(block.into_full_block(execution_payload?)) + let full_block = match (self, execution_payload) { + (SignedBeaconBlock::Base(block), _) => SignedBeaconBlock::Base(block.into()), + (SignedBeaconBlock::Altair(block), _) => SignedBeaconBlock::Altair(block.into()), + (SignedBeaconBlock::Merge(block), Some(ExecutionPayload::Merge(payload))) => { + SignedBeaconBlock::Merge(block.into_full_block(payload)) + } + (SignedBeaconBlock::Capella(block), Some(ExecutionPayload::Capella(payload))) => { + SignedBeaconBlock::Capella(block.into_full_block(payload)) } - SignedBeaconBlock::Eip4844(block) => { - SignedBeaconBlock::Eip4844(block.into_full_block(execution_payload?)) + (SignedBeaconBlock::Eip4844(block), Some(ExecutionPayload::Eip4844(payload))) => { + SignedBeaconBlock::Eip4844(block.into_full_block(payload)) } + // avoid wildcard matching forks so that compiler will + // direct us here when a new fork has been added + (SignedBeaconBlock::Merge(_), _) => return None, + (SignedBeaconBlock::Capella(_), _) => return None, + (SignedBeaconBlock::Eip4844(_), _) => return None, }; Some(full_block) } diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal.rs new file mode 100644 index 00000000000..73688479132 --- /dev/null +++ b/consensus/types/src/withdrawal.rs @@ -0,0 +1,27 @@ +use crate::test_utils::TestRandom; +use crate::*; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +/// A deposit to potentially become a beacon chain validator. +/// +/// Spec v0.12.1 +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive( + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] +pub struct Withdrawal { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub index: u64, + pub address: Address, + pub amount: u64, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(Withdrawal); +} From c1c5dc0a649a3c6c3de30ed5b58c298331baa305 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Thu, 13 Oct 2022 17:07:32 -0500 Subject: [PATCH 050/263] Fixed some stuff in state processing (#3640) --- consensus/state_processing/src/genesis.rs | 2 +- .../src/per_block_processing.rs | 21 ++++++++-------- .../block_signature_verifier.rs | 24 +++++++++---------- .../process_operations.rs | 4 ++-- .../per_block_processing/signature_sets.rs | 12 +++++----- 5 files changed, 32 insertions(+), 31 deletions(-) diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index fb2c9bfa7d0..32160a48150 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -67,7 +67,7 @@ pub fn initialize_beacon_state_from_eth1( state.fork_mut().previous_version = spec.bellatrix_fork_version; // Override latest execution payload header. - // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/merge/beacon-chain.md#testing + // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/bellatrix/beacon-chain.md#testing *state.latest_execution_payload_header_mut()? = execution_payload_header.unwrap_or_default(); } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index e409372ddd0..417e963ed2b 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -87,7 +87,7 @@ pub enum VerifyBlockRoot { /// re-calculating the root when it is already known. Note `block_root` should be equal to the /// tree hash root of the block, NOT the signing root of the block. This function takes /// care of mixing in the domain. -pub fn per_block_processing>( +pub fn per_block_processing>( state: &mut BeaconState, signed_block: &SignedBeaconBlock, block_root: Option, @@ -232,7 +232,7 @@ pub fn process_block_header( /// Verifies the signature of a block. /// /// Spec v0.12.1 -pub fn verify_block_signature>( +pub fn verify_block_signature>( state: &BeaconState, block: &SignedBeaconBlock, block_root: Option, @@ -255,7 +255,7 @@ pub fn verify_block_signature>( /// Verifies the `randao_reveal` against the block's proposer pubkey and updates /// `state.latest_randao_mixes`. -pub fn process_randao>( +pub fn process_randao>( state: &mut BeaconState, block: BeaconBlockRef<'_, T, Payload>, verify_signatures: VerifySignatures, @@ -376,30 +376,31 @@ pub fn process_execution_payload>( /// the merge has happened or if we're on the transition block. Thus we don't want to propagate /// errors from the `BeaconState` being an earlier variant than `BeaconStateMerge` as we'd have to /// repeaetedly write code to treat these errors as false. -/// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#is_merge_transition_complete +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_merge_transition_complete pub fn is_merge_transition_complete(state: &BeaconState) -> bool { state .latest_execution_payload_header() .map(|header| *header != >::default()) .unwrap_or(false) } -/// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#is_merge_transition_block -pub fn is_merge_transition_block>( +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_merge_transition_block +pub fn is_merge_transition_block>( state: &BeaconState, body: BeaconBlockBodyRef, ) -> bool { body.execution_payload() - .map(|payload| !is_merge_transition_complete(state) && *payload != Payload::default()) + .map(|payload| !is_merge_transition_complete(state) && !payload.is_default()) .unwrap_or(false) } -/// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#is_execution_enabled -pub fn is_execution_enabled>( +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_execution_enabled +pub fn is_execution_enabled>( state: &BeaconState, body: BeaconBlockBodyRef, ) -> bool { is_merge_transition_block(state, body) || is_merge_transition_complete(state) } -/// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#compute_timestamp_at_slot + +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#compute_timestamp_at_slot pub fn compute_timestamp_at_slot( state: &BeaconState, spec: &ChainSpec, diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 78205ca92c0..cbfb5eeafe2 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -7,8 +7,8 @@ use bls::{verify_signature_sets, PublicKey, PublicKeyBytes, SignatureSet}; use rayon::prelude::*; use std::borrow::Cow; use types::{ - BeaconState, BeaconStateError, ChainSpec, EthSpec, ExecPayload, Hash256, IndexedAttestation, - SignedBeaconBlock, + AbstractExecPayload, BeaconState, BeaconStateError, ChainSpec, EthSpec, Hash256, + IndexedAttestation, SignedBeaconBlock, }; pub type Result = std::result::Result; @@ -117,7 +117,7 @@ where /// contains invalid signatures on deposits._ /// /// See `Self::verify` for more detail. - pub fn verify_entire_block>( + pub fn verify_entire_block>( state: &'a BeaconState, get_pubkey: F, decompressor: D, @@ -131,7 +131,7 @@ where } /// Includes all signatures on the block (except the deposit signatures) for verification. - pub fn include_all_signatures>( + pub fn include_all_signatures>( &mut self, block: &'a SignedBeaconBlock, block_root: Option, @@ -144,7 +144,7 @@ where /// Includes all signatures on the block (except the deposit signatures and the proposal /// signature) for verification. - pub fn include_all_signatures_except_proposal>( + pub fn include_all_signatures_except_proposal>( &mut self, block: &'a SignedBeaconBlock, ) -> Result<()> { @@ -160,7 +160,7 @@ where } /// Includes the block signature for `self.block` for verification. - pub fn include_block_proposal>( + pub fn include_block_proposal>( &mut self, block: &'a SignedBeaconBlock, block_root: Option, @@ -177,7 +177,7 @@ where } /// Includes the randao signature for `self.block` for verification. - pub fn include_randao_reveal>( + pub fn include_randao_reveal>( &mut self, block: &'a SignedBeaconBlock, ) -> Result<()> { @@ -192,7 +192,7 @@ where } /// Includes all signatures in `self.block.body.proposer_slashings` for verification. - pub fn include_proposer_slashings>( + pub fn include_proposer_slashings>( &mut self, block: &'a SignedBeaconBlock, ) -> Result<()> { @@ -221,7 +221,7 @@ where } /// Includes all signatures in `self.block.body.attester_slashings` for verification. - pub fn include_attester_slashings>( + pub fn include_attester_slashings>( &mut self, block: &'a SignedBeaconBlock, ) -> Result<()> { @@ -250,7 +250,7 @@ where } /// Includes all signatures in `self.block.body.attestations` for verification. - pub fn include_attestations>( + pub fn include_attestations>( &mut self, block: &'a SignedBeaconBlock, ) -> Result>> { @@ -289,7 +289,7 @@ where } /// Includes all signatures in `self.block.body.voluntary_exits` for verification. - pub fn include_exits>( + pub fn include_exits>( &mut self, block: &'a SignedBeaconBlock, ) -> Result<()> { @@ -313,7 +313,7 @@ where } /// Include the signature of the block's sync aggregate (if it exists) for verification. - pub fn include_sync_aggregate>( + pub fn include_sync_aggregate>( &mut self, block: &'a SignedBeaconBlock, ) -> Result<()> { diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 13c4b912355..b1f857b576b 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -9,7 +9,7 @@ use crate::VerifySignatures; use safe_arith::SafeArith; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; -pub fn process_operations<'a, T: EthSpec, Payload: ExecPayload>( +pub fn process_operations<'a, T: EthSpec, Payload: AbstractExecPayload>( state: &mut BeaconState, block_body: BeaconBlockBodyRef<'a, T, Payload>, proposer_index: u64, @@ -219,7 +219,7 @@ pub fn process_attester_slashings( } /// Wrapper function to handle calling the correct version of `process_attestations` based on /// the fork. -pub fn process_attestations<'a, T: EthSpec, Payload: ExecPayload>( +pub fn process_attestations<'a, T: EthSpec, Payload: AbstractExecPayload>( state: &mut BeaconState, block_body: BeaconBlockBodyRef<'a, T, Payload>, proposer_index: u64, diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 5ce1bfddd52..82a33acd73c 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -7,9 +7,9 @@ use ssz::DecodeError; use std::borrow::Cow; use tree_hash::TreeHash; use types::{ - AggregateSignature, AttesterSlashing, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, - DepositData, Domain, Epoch, EthSpec, ExecPayload, Fork, Hash256, InconsistentFork, - IndexedAttestation, ProposerSlashing, PublicKey, PublicKeyBytes, Signature, + AbstractExecPayload, AggregateSignature, AttesterSlashing, BeaconBlockRef, BeaconState, + BeaconStateError, ChainSpec, DepositData, Domain, Epoch, EthSpec, ExecPayload, Fork, Hash256, + InconsistentFork, IndexedAttestation, ProposerSlashing, PublicKey, PublicKeyBytes, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockHeader, SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, Unsigned, @@ -71,7 +71,7 @@ where } /// A signature set that is valid if a block was signed by the expected block producer. -pub fn block_proposal_signature_set<'a, T, F, Payload: ExecPayload>( +pub fn block_proposal_signature_set<'a, T, F, Payload: AbstractExecPayload>( state: &'a BeaconState, get_pubkey: F, signed_block: &'a SignedBeaconBlock, @@ -108,7 +108,7 @@ where /// Unlike `block_proposal_signature_set` this does **not** check that the proposer index is /// correct according to the shuffling. It should only be used if no suitable `BeaconState` is /// available. -pub fn block_proposal_signature_set_from_parts<'a, T, F, Payload: ExecPayload>( +pub fn block_proposal_signature_set_from_parts<'a, T, F, Payload: AbstractExecPayload>( signed_block: &'a SignedBeaconBlock, block_root: Option, proposer_index: u64, @@ -152,7 +152,7 @@ where } /// A signature set that is valid if the block proposers randao reveal signature is correct. -pub fn randao_signature_set<'a, T, F, Payload: ExecPayload>( +pub fn randao_signature_set<'a, T, F, Payload: AbstractExecPayload>( state: &'a BeaconState, get_pubkey: F, block: BeaconBlockRef<'a, T, Payload>, From 221c433d62dc963bd26d9bcd4f47c6921b54626b Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Fri, 14 Oct 2022 17:35:10 -0500 Subject: [PATCH 051/263] Fixed a ton of state_processing stuff (#3642) FIXME's: * consensus/fork_choice/src/fork_choice.rs * consensus/state_processing/src/per_epoch_processing/capella.rs * consensus/types/src/execution_payload_header.rs TODO's: * consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs * consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs --- beacon_node/store/src/hot_cold_store.rs | 4 +- .../store/src/impls/execution_payload.rs | 39 ++++++++- beacon_node/store/src/partial_beacon_state.rs | 80 +++++++++++++++--- consensus/fork_choice/src/fork_choice.rs | 12 ++- .../fork_choice/src/fork_choice_store.rs | 4 +- .../src/common/slash_validator.rs | 11 +-- consensus/state_processing/src/genesis.rs | 44 +++++++++- .../src/per_block_processing.rs | 39 ++++++--- .../process_operations.rs | 1 + .../per_block_processing/signature_sets.rs | 2 +- .../src/per_epoch_processing.rs | 6 +- .../src/per_epoch_processing/capella.rs | 81 +++++++++++++++++++ .../capella/full_withdrawals.rs | 10 +++ .../capella/partial_withdrawals.rs | 10 +++ consensus/state_processing/src/upgrade.rs | 4 + .../state_processing/src/upgrade/capella.rs | 74 +++++++++++++++++ .../state_processing/src/upgrade/eip4844.rs | 73 +++++++++++++++++ .../state_processing/src/upgrade/merge.rs | 4 +- consensus/types/src/beacon_state.rs | 17 ++++ .../types/src/execution_payload_header.rs | 67 +++++++++++++++ consensus/types/src/payload.rs | 11 --- 21 files changed, 538 insertions(+), 55 deletions(-) create mode 100644 consensus/state_processing/src/per_epoch_processing/capella.rs create mode 100644 consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs create mode 100644 consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs create mode 100644 consensus/state_processing/src/upgrade/capella.rs create mode 100644 consensus/state_processing/src/upgrade/eip4844.rs diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index d44b57258a3..cfba40c0bf0 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -428,7 +428,7 @@ impl, Cold: ItemStore> HotColdDB } /// Fetch a block from the store, ignoring which fork variant it *should* be for. - pub fn get_block_any_variant>( + pub fn get_block_any_variant>( &self, block_root: &Hash256, ) -> Result>, Error> { @@ -439,7 +439,7 @@ impl, Cold: ItemStore> HotColdDB /// /// This is useful for e.g. ignoring the slot-indicated fork to forcefully load a block as if it /// were for a different fork. - pub fn get_block_with>( + pub fn get_block_with>( &self, block_root: &Hash256, decoder: impl FnOnce(&[u8]) -> Result, ssz::DecodeError>, diff --git a/beacon_node/store/src/impls/execution_payload.rs b/beacon_node/store/src/impls/execution_payload.rs index ddb9a446280..ad68d1fba09 100644 --- a/beacon_node/store/src/impls/execution_payload.rs +++ b/beacon_node/store/src/impls/execution_payload.rs @@ -1,7 +1,35 @@ use crate::{DBColumn, Error, StoreItem}; use ssz::{Decode, Encode}; -use types::{EthSpec, ExecutionPayload}; +use types::{ + EthSpec, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, + ExecutionPayloadMerge, +}; +macro_rules! impl_store_item { + ($ty_name:ident) => { + impl StoreItem for $ty_name { + fn db_column() -> DBColumn { + DBColumn::ExecPayload + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) + } + } + }; +} +impl_store_item!(ExecutionPayloadMerge); +impl_store_item!(ExecutionPayloadCapella); +impl_store_item!(ExecutionPayloadEip4844); + +/// This fork-agnostic implementation should be only used for writing. +/// +/// It is very inefficient at reading, and decoding the desired fork-specific variant is recommended +/// instead. impl StoreItem for ExecutionPayload { fn db_column() -> DBColumn { DBColumn::ExecPayload @@ -12,6 +40,13 @@ impl StoreItem for ExecutionPayload { } fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(Self::from_ssz_bytes(bytes)?) + ExecutionPayloadEip4844::from_ssz_bytes(bytes) + .map(Self::Eip4844) + .or_else(|_| { + ExecutionPayloadCapella::from_ssz_bytes(bytes) + .map(Self::Capella) + .or_else(|_| ExecutionPayloadMerge::from_ssz_bytes(bytes).map(Self::Merge)) + }) + .map_err(Into::into) } } diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 46bc0274f40..74e63c58ea3 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -14,7 +14,7 @@ use types::*; /// /// Utilises lazy-loading from separate storage for its vector fields. #[superstruct( - variants(Base, Altair, Merge, Eip4844), + variants(Base, Altair, Merge, Capella, Eip4844), variant_attributes(derive(Debug, PartialEq, Clone, Encode, Decode)) )] #[derive(Debug, PartialEq, Clone, Encode)] @@ -66,9 +66,9 @@ where pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Merge, Eip4844))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub previous_epoch_participation: VariableList, - #[superstruct(only(Altair, Merge, Eip4844))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub current_epoch_participation: VariableList, // Finality @@ -78,18 +78,39 @@ where pub finalized_checkpoint: Checkpoint, // Inactivity - #[superstruct(only(Altair, Merge, Eip4844))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub inactivity_scores: VariableList, // Light-client sync committees - #[superstruct(only(Altair, Merge, Eip4844))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Merge, Eip4844))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub next_sync_committee: Arc>, // Execution - #[superstruct(only(Merge, Eip4844))] - pub latest_execution_payload_header: ExecutionPayloadHeader, + #[superstruct( + only(Merge), + partial_getter(rename = "latest_execution_payload_header_merge") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderMerge, + #[superstruct( + only(Capella), + partial_getter(rename = "latest_execution_payload_header_capella") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderCapella, + #[superstruct( + only(Eip4844), + partial_getter(rename = "latest_execution_payload_header_eip4844") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844, + + // Withdrawals + #[superstruct(only(Capella, Eip4844))] + pub withdrawal_queue: VariableList, + #[superstruct(only(Capella, Eip4844))] + pub next_withdrawal_index: u64, + #[superstruct(only(Capella, Eip4844))] + pub next_partial_withdrawal_validator_index: u64, } /// Implement the conversion function from BeaconState -> PartialBeaconState. @@ -178,6 +199,23 @@ impl PartialBeaconState { latest_execution_payload_header ] ), + BeaconState::Capella(s) => impl_from_state_forgetful!( + s, + outer, + Capella, + PartialBeaconStateCapella, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + withdrawal_queue, + next_withdrawal_index, + next_partial_withdrawal_validator_index + ] + ), BeaconState::Eip4844(s) => impl_from_state_forgetful!( s, outer, @@ -189,7 +227,10 @@ impl PartialBeaconState { current_sync_committee, next_sync_committee, inactivity_scores, - latest_execution_payload_header + latest_execution_payload_header, + withdrawal_queue, + next_withdrawal_index, + next_partial_withdrawal_validator_index ] ), } @@ -379,6 +420,22 @@ impl TryInto> for PartialBeaconState { latest_execution_payload_header ] ), + PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!( + inner, + Capella, + BeaconStateCapella, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + withdrawal_queue, + next_withdrawal_index, + next_partial_withdrawal_validator_index + ] + ), PartialBeaconState::Eip4844(inner) => impl_try_into_beacon_state!( inner, Eip4844, @@ -389,7 +446,10 @@ impl TryInto> for PartialBeaconState { current_sync_committee, next_sync_committee, inactivity_scores, - latest_execution_payload_header + latest_execution_payload_header, + withdrawal_queue, + next_withdrawal_index, + next_partial_withdrawal_validator_index ] ), }; diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 7b3111ecdaa..5f4c9931f0e 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -12,9 +12,10 @@ use std::collections::BTreeSet; use std::marker::PhantomData; use std::time::Duration; use types::{ - consts::merge::INTERVALS_PER_SLOT, AttestationShufflingId, AttesterSlashing, BeaconBlockRef, - BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, - ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, + consts::merge::INTERVALS_PER_SLOT, AbstractExecPayload, AttestationShufflingId, + AttesterSlashing, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, + EthSpec, ExecPayload, ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch, + SignedBeaconBlock, Slot, }; #[derive(Debug)] @@ -665,7 +666,7 @@ where /// The supplied block **must** pass the `state_transition` function as it will not be run /// here. #[allow(clippy::too_many_arguments)] - pub fn on_block>( + pub fn on_block>( &mut self, system_time_current_slot: Slot, block: BeaconBlockRef, @@ -777,7 +778,10 @@ where (parent_justified, parent_finalized) } else { let justification_and_finalization_state = match block { + // FIXME: verify this is correct for Capella/Eip4844 because + // epoch processing changes in Capella.. BeaconBlockRef::Eip4844(_) + | BeaconBlockRef::Capella(_) | BeaconBlockRef::Merge(_) | BeaconBlockRef::Altair(_) => { let participation_cache = diff --git a/consensus/fork_choice/src/fork_choice_store.rs b/consensus/fork_choice/src/fork_choice_store.rs index 9604e254754..a9a32173b6a 100644 --- a/consensus/fork_choice/src/fork_choice_store.rs +++ b/consensus/fork_choice/src/fork_choice_store.rs @@ -1,6 +1,6 @@ use std::collections::BTreeSet; use std::fmt::Debug; -use types::{BeaconBlockRef, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash256, Slot}; +use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, Checkpoint, EthSpec, Hash256, Slot}; /// Approximates the `Store` in "Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice": /// @@ -33,7 +33,7 @@ pub trait ForkChoiceStore: Sized { /// Called whenever `ForkChoice::on_block` has verified a block, but not yet added it to fork /// choice. Allows the implementer to performing caching or other housekeeping duties. - fn on_verified_block>( + fn on_verified_block>( &mut self, block: BeaconBlockRef, block_root: Hash256, diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index 02006d0c230..f085a41b9e7 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -45,11 +45,12 @@ pub fn slash_validator( validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?; let proposer_reward = match state { BeaconState::Base(_) => whistleblower_reward.safe_div(spec.proposer_reward_quotient)?, - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Eip4844(_) => { - whistleblower_reward - .safe_mul(PROPOSER_WEIGHT)? - .safe_div(WEIGHT_DENOMINATOR)? - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Eip4844(_) => whistleblower_reward + .safe_mul(PROPOSER_WEIGHT)? + .safe_div(WEIGHT_DENOMINATOR)?, }; // Ensure the whistleblower index is in the validator registry. diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 32160a48150..3f9328f4d5c 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -2,7 +2,9 @@ use super::per_block_processing::{ errors::BlockProcessingError, process_operations::process_deposit, }; use crate::common::DepositDataTree; -use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; +use crate::upgrade::{ + upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_eip4844, +}; use safe_arith::{ArithError, SafeArith}; use tree_hash::TreeHash; use types::DEPOSIT_TREE_DEPTH; @@ -61,6 +63,7 @@ pub fn initialize_beacon_state_from_eth1( .bellatrix_fork_epoch .map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch()) { + // this will set state.latest_execution_payload_header = ExecutionPayloadHeaderMerge::default() upgrade_to_bellatrix(&mut state, spec)?; // Remove intermediate Altair fork from `state.fork`. @@ -68,8 +71,43 @@ pub fn initialize_beacon_state_from_eth1( // Override latest execution payload header. // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/bellatrix/beacon-chain.md#testing - *state.latest_execution_payload_header_mut()? = - execution_payload_header.unwrap_or_default(); + if let Some(ExecutionPayloadHeader::Merge(ref header)) = execution_payload_header { + *state.latest_execution_payload_header_merge_mut()? = header.clone(); + } + } + + // Upgrade to capella if configured from genesis + if spec + .capella_fork_epoch + .map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch()) + { + upgrade_to_capella(&mut state, spec)?; + + // Remove intermediate Bellatrix fork from `state.fork`. + state.fork_mut().previous_version = spec.capella_fork_version; + + // Override latest execution payload header. + // See https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#testing + if let Some(ExecutionPayloadHeader::Capella(ref header)) = execution_payload_header { + *state.latest_execution_payload_header_capella_mut()? = header.clone(); + } + } + + // Upgrade to eip4844 if configured from genesis + if spec + .eip4844_fork_epoch + .map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch()) + { + upgrade_to_eip4844(&mut state, spec)?; + + // Remove intermediate Capella fork from `state.fork`. + state.fork_mut().previous_version = spec.eip4844_fork_version; + + // Override latest execution payload header. + // See https://github.com/ethereum/consensus-specs/blob/dev/specs/eip4844/beacon-chain.md#testing + if let Some(ExecutionPayloadHeader::Eip4844(header)) = execution_payload_header { + *state.latest_execution_payload_header_eip4844_mut()? = header; + } } // Now that we have our validators, initialize the caches (including the committees) diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 417e963ed2b..71ceb71c140 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -154,7 +154,7 @@ pub fn per_block_processing>( // previous block. if is_execution_enabled(state, block.body()) { let payload = block.body().execution_payload()?; - process_execution_payload(state, payload, spec)?; + process_execution_payload::(state, payload, spec)?; } process_randao(state, block, verify_randao, spec)?; @@ -319,16 +319,16 @@ pub fn get_new_eth1_data( /// Contains a partial set of checks from the `process_execution_payload` function: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#process_execution_payload -pub fn partially_verify_execution_payload>( +pub fn partially_verify_execution_payload<'payload, T: EthSpec, Payload: AbstractExecPayload>( state: &BeaconState, - payload: &Payload, + payload: Payload::Ref<'payload>, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { if is_merge_transition_complete(state) { block_verify!( - payload.parent_hash() == state.latest_execution_payload_header()?.block_hash, + payload.parent_hash() == *state.latest_execution_payload_header()?.block_hash(), BlockProcessingError::ExecutionHashChainIncontiguous { - expected: state.latest_execution_payload_header()?.block_hash, + expected: *state.latest_execution_payload_header()?.block_hash(), found: payload.parent_hash(), } ); @@ -360,14 +360,33 @@ pub fn partially_verify_execution_payload>( /// Partially equivalent to the `process_execution_payload` function: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#process_execution_payload -pub fn process_execution_payload>( +pub fn process_execution_payload<'payload, T: EthSpec, Payload: AbstractExecPayload>( state: &mut BeaconState, - payload: &Payload, + payload: Payload::Ref<'payload>, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - partially_verify_execution_payload(state, payload, spec)?; + partially_verify_execution_payload::(state, payload, spec)?; - *state.latest_execution_payload_header_mut()? = payload.to_execution_payload_header(); + match state.latest_execution_payload_header_mut()? { + ExecutionPayloadHeaderRefMut::Merge(header_mut) => { + match payload.to_execution_payload_header() { + ExecutionPayloadHeader::Merge(header) => *header_mut = header, + _ => return Err(BlockProcessingError::IncorrectStateType), + } + } + ExecutionPayloadHeaderRefMut::Capella(header_mut) => { + match payload.to_execution_payload_header() { + ExecutionPayloadHeader::Capella(header) => *header_mut = header, + _ => return Err(BlockProcessingError::IncorrectStateType), + } + } + ExecutionPayloadHeaderRefMut::Eip4844(header_mut) => { + match payload.to_execution_payload_header() { + ExecutionPayloadHeader::Eip4844(header) => *header_mut = header, + _ => return Err(BlockProcessingError::IncorrectStateType), + } + } + } Ok(()) } @@ -380,7 +399,7 @@ pub fn process_execution_payload>( pub fn is_merge_transition_complete(state: &BeaconState) -> bool { state .latest_execution_payload_header() - .map(|header| *header != >::default()) + .map(|header| !header.is_default()) .unwrap_or(false) } /// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_merge_transition_block diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index b1f857b576b..33e1a19c516 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -232,6 +232,7 @@ pub fn process_attestations<'a, T: EthSpec, Payload: AbstractExecPayload>( } BeaconBlockBodyRef::Altair(_) | BeaconBlockBodyRef::Merge(_) + | BeaconBlockBodyRef::Capella(_) | BeaconBlockBodyRef::Eip4844(_) => { altair::process_attestations( state, diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 82a33acd73c..8b60c145c8b 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -8,7 +8,7 @@ use std::borrow::Cow; use tree_hash::TreeHash; use types::{ AbstractExecPayload, AggregateSignature, AttesterSlashing, BeaconBlockRef, BeaconState, - BeaconStateError, ChainSpec, DepositData, Domain, Epoch, EthSpec, ExecPayload, Fork, Hash256, + BeaconStateError, ChainSpec, DepositData, Domain, Epoch, EthSpec, Fork, Hash256, InconsistentFork, IndexedAttestation, ProposerSlashing, PublicKey, PublicKeyBytes, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockHeader, SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, SigningData, Slot, SyncAggregate, diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index 5d357dc9660..565fae9db96 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -11,6 +11,7 @@ pub use weigh_justification_and_finalization::weigh_justification_and_finalizati pub mod altair; pub mod base; +pub mod capella; pub mod effective_balance_updates; pub mod epoch_processing_summary; pub mod errors; @@ -37,9 +38,8 @@ pub fn process_epoch( match state { BeaconState::Base(_) => base::process_epoch(state, spec), - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Eip4844(_) => { - altair::process_epoch(state, spec) - } + BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_epoch(state, spec), + BeaconState::Capella(_) | BeaconState::Eip4844(_) => capella::process_epoch(state, spec), } } diff --git a/consensus/state_processing/src/per_epoch_processing/capella.rs b/consensus/state_processing/src/per_epoch_processing/capella.rs new file mode 100644 index 00000000000..4886b280535 --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/capella.rs @@ -0,0 +1,81 @@ +use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error}; +use crate::per_epoch_processing::{ + altair, + effective_balance_updates::process_effective_balance_updates, + historical_roots_update::process_historical_roots_update, + resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset}, +}; +pub use full_withdrawals::process_full_withdrawals; +pub use partial_withdrawals::process_partial_withdrawals; +use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch}; + +pub mod full_withdrawals; +pub mod partial_withdrawals; + +pub fn process_epoch( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result, Error> { + // Ensure the committee caches are built. + state.build_committee_cache(RelativeEpoch::Previous, spec)?; + state.build_committee_cache(RelativeEpoch::Current, spec)?; + state.build_committee_cache(RelativeEpoch::Next, spec)?; + + // Pre-compute participating indices and total balances. + let participation_cache = altair::ParticipationCache::new(state, spec)?; + let sync_committee = state.current_sync_committee()?.clone(); + + // Justification and finalization. + let justification_and_finalization_state = + altair::process_justification_and_finalization(state, &participation_cache)?; + justification_and_finalization_state.apply_changes_to_state(state); + + altair::process_inactivity_updates(state, &participation_cache, spec)?; + + // Rewards and Penalties. + altair::process_rewards_and_penalties(state, &participation_cache, spec)?; + + // Registry Updates. + process_registry_updates(state, spec)?; + + // Slashings. + process_slashings( + state, + participation_cache.current_epoch_total_active_balance(), + spec, + )?; + + // Reset eth1 data votes. + process_eth1_data_reset(state)?; + + // Update effective balances with hysteresis (lag). + process_effective_balance_updates(state, spec)?; + + // Reset slashings + process_slashings_reset(state)?; + + // Set randao mix + process_randao_mixes_reset(state)?; + + // Set historical root accumulator + process_historical_roots_update(state)?; + + // Rotate current/previous epoch participation + altair::process_participation_flag_updates(state)?; + + altair::process_sync_committee_updates(state, spec)?; + + // Withdrawals + process_full_withdrawals(state)?; + + process_partial_withdrawals(state)?; + + // Rotate the epoch caches to suit the epoch transition. + state.advance_caches(spec)?; + + // FIXME: do we need a Capella variant for this? + Ok(EpochProcessingSummary::Altair { + participation_cache, + sync_committee, + }) +} diff --git a/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs b/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs new file mode 100644 index 00000000000..d7747c1fe92 --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs @@ -0,0 +1,10 @@ +use crate::EpochProcessingError; +use types::beacon_state::BeaconState; +use types::eth_spec::EthSpec; + +pub fn process_full_withdrawals( + state: &mut BeaconState, +) -> Result<(), EpochProcessingError> { + todo!("implement this"); + Ok(()) +} diff --git a/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs b/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs new file mode 100644 index 00000000000..2a576700815 --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs @@ -0,0 +1,10 @@ +use crate::EpochProcessingError; +use types::beacon_state::BeaconState; +use types::eth_spec::EthSpec; + +pub fn process_partial_withdrawals( + state: &mut BeaconState, +) -> Result<(), EpochProcessingError> { + todo!("implement this"); + Ok(()) +} diff --git a/consensus/state_processing/src/upgrade.rs b/consensus/state_processing/src/upgrade.rs index fdf13c82818..01b65710564 100644 --- a/consensus/state_processing/src/upgrade.rs +++ b/consensus/state_processing/src/upgrade.rs @@ -1,5 +1,9 @@ pub mod altair; +pub mod capella; +pub mod eip4844; pub mod merge; pub use altair::upgrade_to_altair; +pub use capella::upgrade_to_capella; +pub use eip4844::upgrade_to_eip4844; pub use merge::upgrade_to_bellatrix; diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs new file mode 100644 index 00000000000..b2abd3be207 --- /dev/null +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -0,0 +1,74 @@ +use ssz_types::VariableList; +use std::mem; +use types::{BeaconState, BeaconStateCapella, BeaconStateError as Error, ChainSpec, EthSpec, Fork}; + +/// Transform a `Merge` state into an `Capella` state. +pub fn upgrade_to_capella( + pre_state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { + let epoch = pre_state.current_epoch(); + let pre = pre_state.as_merge_mut()?; + + // Where possible, use something like `mem::take` to move fields from behind the &mut + // reference. For other fields that don't have a good default value, use `clone`. + // + // Fixed size vectors get cloned because replacing them would require the same size + // allocation as cloning. + let post = BeaconState::Capella(BeaconStateCapella { + // Versioning + genesis_time: pre.genesis_time, + genesis_validators_root: pre.genesis_validators_root, + slot: pre.slot, + fork: Fork { + previous_version: pre.fork.current_version, + current_version: spec.capella_fork_version, + epoch, + }, + // History + latest_block_header: pre.latest_block_header.clone(), + block_roots: pre.block_roots.clone(), + state_roots: pre.state_roots.clone(), + historical_roots: mem::take(&mut pre.historical_roots), + // Eth1 + eth1_data: pre.eth1_data.clone(), + eth1_data_votes: mem::take(&mut pre.eth1_data_votes), + eth1_deposit_index: pre.eth1_deposit_index, + // Registry + validators: mem::take(&mut pre.validators), + balances: mem::take(&mut pre.balances), + // Randomness + randao_mixes: pre.randao_mixes.clone(), + // Slashings + slashings: pre.slashings.clone(), + // `Participation + previous_epoch_participation: mem::take(&mut pre.previous_epoch_participation), + current_epoch_participation: mem::take(&mut pre.current_epoch_participation), + // Finality + justification_bits: pre.justification_bits.clone(), + previous_justified_checkpoint: pre.previous_justified_checkpoint, + current_justified_checkpoint: pre.current_justified_checkpoint, + finalized_checkpoint: pre.finalized_checkpoint, + // Inactivity + inactivity_scores: mem::take(&mut pre.inactivity_scores), + // Sync committees + current_sync_committee: pre.current_sync_committee.clone(), + next_sync_committee: pre.next_sync_committee.clone(), + // Execution + latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_capella(), + // Withdrawals + withdrawal_queue: VariableList::empty(), + next_withdrawal_index: 0, + next_partial_withdrawal_validator_index: 0, + // Caches + total_active_balance: pre.total_active_balance, + committee_caches: mem::take(&mut pre.committee_caches), + pubkey_cache: mem::take(&mut pre.pubkey_cache), + exit_cache: mem::take(&mut pre.exit_cache), + tree_hash_cache: mem::take(&mut pre.tree_hash_cache), + }); + + *pre_state = post; + + Ok(()) +} diff --git a/consensus/state_processing/src/upgrade/eip4844.rs b/consensus/state_processing/src/upgrade/eip4844.rs new file mode 100644 index 00000000000..666d5b0c680 --- /dev/null +++ b/consensus/state_processing/src/upgrade/eip4844.rs @@ -0,0 +1,73 @@ +use std::mem; +use types::{BeaconState, BeaconStateEip4844, BeaconStateError as Error, ChainSpec, EthSpec, Fork}; + +/// Transform a `Capella` state into an `Eip4844` state. +pub fn upgrade_to_eip4844( + pre_state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { + let epoch = pre_state.current_epoch(); + let pre = pre_state.as_capella_mut()?; + + // Where possible, use something like `mem::take` to move fields from behind the &mut + // reference. For other fields that don't have a good default value, use `clone`. + // + // Fixed size vectors get cloned because replacing them would require the same size + // allocation as cloning. + let post = BeaconState::Eip4844(BeaconStateEip4844 { + // Versioning + genesis_time: pre.genesis_time, + genesis_validators_root: pre.genesis_validators_root, + slot: pre.slot, + fork: Fork { + previous_version: pre.fork.current_version, + current_version: spec.eip4844_fork_version, + epoch, + }, + // History + latest_block_header: pre.latest_block_header.clone(), + block_roots: pre.block_roots.clone(), + state_roots: pre.state_roots.clone(), + historical_roots: mem::take(&mut pre.historical_roots), + // Eth1 + eth1_data: pre.eth1_data.clone(), + eth1_data_votes: mem::take(&mut pre.eth1_data_votes), + eth1_deposit_index: pre.eth1_deposit_index, + // Registry + validators: mem::take(&mut pre.validators), + balances: mem::take(&mut pre.balances), + // Randomness + randao_mixes: pre.randao_mixes.clone(), + // Slashings + slashings: pre.slashings.clone(), + // `Participation + previous_epoch_participation: mem::take(&mut pre.previous_epoch_participation), + current_epoch_participation: mem::take(&mut pre.current_epoch_participation), + // Finality + justification_bits: pre.justification_bits.clone(), + previous_justified_checkpoint: pre.previous_justified_checkpoint, + current_justified_checkpoint: pre.current_justified_checkpoint, + finalized_checkpoint: pre.finalized_checkpoint, + // Inactivity + inactivity_scores: mem::take(&mut pre.inactivity_scores), + // Sync committees + current_sync_committee: pre.current_sync_committee.clone(), + next_sync_committee: pre.next_sync_committee.clone(), + // Execution + latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_eip4844(), + // Withdrawals + withdrawal_queue: mem::take(&mut pre.withdrawal_queue), + next_withdrawal_index: pre.next_withdrawal_index, + next_partial_withdrawal_validator_index: pre.next_partial_withdrawal_validator_index, + // Caches + total_active_balance: pre.total_active_balance, + committee_caches: mem::take(&mut pre.committee_caches), + pubkey_cache: mem::take(&mut pre.pubkey_cache), + exit_cache: mem::take(&mut pre.exit_cache), + tree_hash_cache: mem::take(&mut pre.tree_hash_cache), + }); + + *pre_state = post; + + Ok(()) +} diff --git a/consensus/state_processing/src/upgrade/merge.rs b/consensus/state_processing/src/upgrade/merge.rs index 2e4ed441a47..c172466248a 100644 --- a/consensus/state_processing/src/upgrade/merge.rs +++ b/consensus/state_processing/src/upgrade/merge.rs @@ -1,7 +1,7 @@ use std::mem; use types::{ BeaconState, BeaconStateError as Error, BeaconStateMerge, ChainSpec, EthSpec, - ExecutionPayloadHeader, Fork, + ExecutionPayloadHeaderMerge, Fork, }; /// Transform a `Altair` state into an `Merge` state. @@ -57,7 +57,7 @@ pub fn upgrade_to_bellatrix( current_sync_committee: pre.current_sync_committee.clone(), next_sync_committee: pre.next_sync_committee.clone(), // Execution - latest_execution_payload_header: >::default(), + latest_execution_payload_header: >::default(), // Caches total_active_balance: pre.total_active_balance, committee_caches: mem::take(&mut pre.committee_caches), diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 6438a0a7e1b..10596a769ef 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -718,6 +718,23 @@ impl BeaconState { } } + pub fn latest_execution_payload_header_mut( + &mut self, + ) -> Result, Error> { + match self { + BeaconState::Base(_) | BeaconState::Altair(_) => Err(Error::IncorrectStateVariant), + BeaconState::Merge(state) => Ok(ExecutionPayloadHeaderRefMut::Merge( + &mut state.latest_execution_payload_header, + )), + BeaconState::Capella(state) => Ok(ExecutionPayloadHeaderRefMut::Capella( + &mut state.latest_execution_payload_header, + )), + BeaconState::Eip4844(state) => Ok(ExecutionPayloadHeaderRefMut::Eip4844( + &mut state.latest_execution_payload_header, + )), + } + } + /// Return `true` if the validator who produced `slot_signature` is eligible to aggregate. /// /// Spec v0.12.1 diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index f92ab956e1d..7f90f1f57bc 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -65,6 +65,73 @@ pub struct ExecutionPayloadHeader { pub withdrawals_root: Hash256, } +impl<'a, T: EthSpec> ExecutionPayloadHeaderRef<'a, T> { + // FIXME: maybe this could be a derived trait.. + pub fn is_default(self) -> bool { + match self { + ExecutionPayloadHeaderRef::Merge(header) => { + *header == ExecutionPayloadHeaderMerge::default() + } + ExecutionPayloadHeaderRef::Capella(header) => { + *header == ExecutionPayloadHeaderCapella::default() + } + ExecutionPayloadHeaderRef::Eip4844(header) => { + *header == ExecutionPayloadHeaderEip4844::default() + } + } + } +} + +impl ExecutionPayloadHeaderMerge { + pub fn upgrade_to_capella(&self) -> ExecutionPayloadHeaderCapella { + // TODO: if this is correct we should calculate and hardcode this.. + let empty_withdrawals_root = + VariableList::::empty().tree_hash_root(); + ExecutionPayloadHeaderCapella { + parent_hash: self.parent_hash, + fee_recipient: self.fee_recipient, + state_root: self.state_root, + receipts_root: self.receipts_root, + logs_bloom: self.logs_bloom.clone(), + prev_randao: self.prev_randao, + block_number: self.block_number, + gas_limit: self.gas_limit, + gas_used: self.gas_used, + timestamp: self.timestamp, + extra_data: self.extra_data.clone(), + base_fee_per_gas: self.base_fee_per_gas, + block_hash: self.block_hash, + transactions_root: self.transactions_root, + // FIXME: the spec doesn't seem to define what to do here.. + withdrawals_root: empty_withdrawals_root, + } + } +} + +impl ExecutionPayloadHeaderCapella { + pub fn upgrade_to_eip4844(&self) -> ExecutionPayloadHeaderEip4844 { + ExecutionPayloadHeaderEip4844 { + parent_hash: self.parent_hash, + fee_recipient: self.fee_recipient, + state_root: self.state_root, + receipts_root: self.receipts_root, + logs_bloom: self.logs_bloom.clone(), + prev_randao: self.prev_randao, + block_number: self.block_number, + gas_limit: self.gas_limit, + gas_used: self.gas_used, + timestamp: self.timestamp, + extra_data: self.extra_data.clone(), + base_fee_per_gas: self.base_fee_per_gas, + // TODO: verify if this is correct + excess_blobs: 0, + block_hash: self.block_hash, + transactions_root: self.transactions_root, + withdrawals_root: self.withdrawals_root, + } + } +} + impl From> for ExecutionPayloadHeaderMerge { fn from(payload: ExecutionPayloadMerge) -> Self { Self { diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index d3a8fd698a9..db7a6463346 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -621,17 +621,6 @@ impl ExecPayload for BlindedPayload { // TODO: can this function be optimized? fn is_default(&self) -> bool { match self { - /* - Self::Merge(payload) => { - payload.execution_payload_header == ExecutionPayloadHeaderMerge::default() - } - Self::Capella(payload) => { - payload.execution_payload_header == ExecutionPayloadHeaderCapella::default() - } - Self::Eip4844(payload) => { - payload.execution_payload_header == ExecutionPayloadHeaderEip4844::default() - } - */ Self::Merge(payload) => payload.is_default(), Self::Capella(payload) => payload.is_default(), Self::Eip4844(payload) => payload.is_default(), From 137f230344c1d72d1c3ce4f7876a769a82293904 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Wed, 26 Oct 2022 15:15:26 -0400 Subject: [PATCH 052/263] Capella eip 4844 cleanup (#3652) * add capella gossip boiler plate * get everything compiling Co-authored-by: realbigsean * small cleanup * small cleanup * cargo fix + some test cleanup * improve block production * add fixme for potential panic Co-authored-by: Mark Mackey --- Cargo.lock | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 138 ++++-- .../src/beacon_fork_choice_store.rs | 4 +- .../beacon_chain/src/beacon_snapshot.rs | 6 +- beacon_node/beacon_chain/src/block_reward.rs | 4 +- .../beacon_chain/src/block_verification.rs | 3 +- beacon_node/beacon_chain/src/errors.rs | 1 + .../beacon_chain/src/execution_payload.rs | 61 +-- beacon_node/builder_client/src/lib.rs | 4 +- beacon_node/execution_layer/Cargo.toml | 1 + beacon_node/execution_layer/src/engine_api.rs | 128 ++++- .../execution_layer/src/engine_api/http.rs | 120 ++--- .../src/engine_api/json_structures.rs | 465 ++++++++++++------ beacon_node/execution_layer/src/engines.rs | 9 +- beacon_node/execution_layer/src/lib.rs | 349 ++++++++----- .../test_utils/execution_block_generator.rs | 108 ++-- .../src/test_utils/handle_rpc.rs | 8 +- .../src/test_utils/mock_builder.rs | 8 +- .../src/test_utils/mock_execution_layer.rs | 70 ++- beacon_node/http_api/src/publish_blocks.rs | 15 +- beacon_node/lighthouse_network/src/config.rs | 2 +- .../lighthouse_network/src/rpc/codec/base.rs | 5 +- .../src/rpc/codec/ssz_snappy.rs | 22 +- .../lighthouse_network/src/rpc/protocol.rs | 25 +- .../lighthouse_network/src/types/pubsub.rs | 8 +- .../lighthouse_network/tests/common/mod.rs | 5 +- .../beacon_processor/worker/gossip_methods.rs | 2 + common/eth2/src/lib.rs | 17 +- common/eth2/src/types.rs | 4 +- .../src/per_block_processing.rs | 4 +- .../capella/full_withdrawals.rs | 2 +- .../capella/partial_withdrawals.rs | 2 +- consensus/types/src/chain_spec.rs | 13 +- consensus/types/src/execution_payload.rs | 18 +- .../types/src/execution_payload_header.rs | 14 + consensus/types/src/fork_context.rs | 7 + consensus/types/src/lib.rs | 8 +- consensus/types/src/payload.rs | 116 ++++- lcli/src/create_payload_header.rs | 20 +- lcli/src/new_testnet.rs | 9 +- testing/ef_tests/src/cases/common.rs | 3 +- .../ef_tests/src/cases/epoch_processing.rs | 55 ++- testing/ef_tests/src/cases/fork.rs | 1 + .../src/cases/genesis_initialization.rs | 14 +- testing/ef_tests/src/cases/operations.rs | 44 +- testing/ef_tests/src/cases/transition.rs | 4 + .../src/test_rig.rs | 39 +- testing/simulator/src/checks.rs | 4 +- validator_client/src/block_service.rs | 8 +- validator_client/src/signing_method.rs | 8 +- .../src/signing_method/web3signer.rs | 12 +- validator_client/src/validator_store.rs | 16 +- 52 files changed, 1388 insertions(+), 626 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 653e0fc3d2f..d0831365d7c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2097,6 +2097,7 @@ dependencies = [ "ssz-rs", "state_processing", "strum", + "superstruct 0.6.0", "task_executor", "tempfile", "tokio", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 943e3a20f36..a937a6ea418 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -57,7 +57,8 @@ use crate::BeaconSnapshot; use crate::{metrics, BeaconChainError}; use eth2::types::{EventKind, SseBlock, SyncDuty}; use execution_layer::{ - BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, PayloadAttributes, PayloadStatus, + BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, + PayloadAttributes, PayloadAttributesV1, PayloadAttributesV2, PayloadStatus, }; pub use fork_choice::CountUnrealized; use fork_choice::{ @@ -241,7 +242,7 @@ pub trait BeaconChainTypes: Send + Sync + 'static { } /// Used internally to split block production into discrete functions. -struct PartialBeaconBlock { +struct PartialBeaconBlock> { state: BeaconState, slot: Slot, proposer_index: u64, @@ -255,7 +256,7 @@ struct PartialBeaconBlock { deposits: Vec, voluntary_exits: Vec, sync_aggregate: Option>, - prepare_payload_handle: Option>, + prepare_payload_handle: Option>, } pub type BeaconForkChoice = ForkChoice< @@ -928,12 +929,12 @@ impl BeaconChain { // If we only have a blinded block, load the execution payload from the EL. let block_message = blinded_block.message(); - let execution_payload_header = &block_message + let execution_payload_header = block_message .execution_payload() .map_err(|_| Error::BlockVariantLacksExecutionPayload(*block_root))? - .execution_payload_header; + .to_execution_payload_header(); - let exec_block_hash = execution_payload_header.block_hash; + let exec_block_hash = execution_payload_header.block_hash(); let execution_payload = self .execution_layer @@ -944,10 +945,13 @@ impl BeaconChain { .map_err(|e| Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, e))? .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; + //FIXME(sean) avoid the clone by comparing refs to headers (`as_execution_payload_header` method ?) + let full_payload: FullPayload = execution_payload.clone().into(); + // Verify payload integrity. - let header_from_payload = ExecutionPayloadHeader::from(&execution_payload); - if header_from_payload != *execution_payload_header { - for txn in &execution_payload.transactions { + let header_from_payload = full_payload.to_execution_payload_header(); + if header_from_payload != execution_payload_header { + for txn in execution_payload.transactions() { debug!( self.log, "Reconstructed txn"; @@ -960,8 +964,8 @@ impl BeaconChain { exec_block_hash, canonical_payload_root: execution_payload_header.tree_hash_root(), reconstructed_payload_root: header_from_payload.tree_hash_root(), - canonical_transactions_root: execution_payload_header.transactions_root, - reconstructed_transactions_root: header_from_payload.transactions_root, + canonical_transactions_root: execution_payload_header.transactions_root(), + reconstructed_transactions_root: header_from_payload.transactions_root(), }); } @@ -3126,7 +3130,7 @@ impl BeaconChain { /// /// The produced block will not be inherently valid, it must be signed by a block producer. /// Block signing is out of the scope of this function and should be done by a separate program. - pub async fn produce_block>( + pub async fn produce_block + 'static>( self: &Arc, randao_reveal: Signature, slot: Slot, @@ -3142,7 +3146,9 @@ impl BeaconChain { } /// Same as `produce_block` but allowing for configuration of RANDAO-verification. - pub async fn produce_block_with_verification>( + pub async fn produce_block_with_verification< + Payload: AbstractExecPayload + 'static, + >( self: &Arc, randao_reveal: Signature, slot: Slot, @@ -3256,7 +3262,7 @@ impl BeaconChain { /// The provided `state_root_opt` should only ever be set to `Some` if the contained value is /// equal to the root of `state`. Providing this value will serve as an optimization to avoid /// performing a tree hash in some scenarios. - pub async fn produce_block_on_state>( + pub async fn produce_block_on_state + 'static>( self: &Arc, state: BeaconState, state_root_opt: Option, @@ -3291,16 +3297,17 @@ impl BeaconChain { // // Wait for the execution layer to return an execution payload (if one is required). let prepare_payload_handle = partial_beacon_block.prepare_payload_handle.take(); - let (execution_payload, kzg_commitments, blobs) = - if let Some(prepare_payload_handle) = prepare_payload_handle { - let (execution_payload, commitments, blobs) = prepare_payload_handle - .await - .map_err(BlockProductionError::TokioJoin)? - .ok_or(BlockProductionError::ShuttingDown)??; - (execution_payload, commitments, blobs) - } else { - return Err(BlockProductionError::MissingExecutionPayload); - }; + let execution_payload = if let Some(prepare_payload_handle) = prepare_payload_handle { + prepare_payload_handle + .await + .map_err(BlockProductionError::TokioJoin)? + .ok_or(BlockProductionError::ShuttingDown)?? + } else { + return Err(BlockProductionError::MissingExecutionPayload); + }; + + //FIXME(sean) waiting for the BN<>EE api for this to stabilize + let kzg_commitments = vec![]; // Part 3/3 (blocking) // @@ -3323,7 +3330,7 @@ impl BeaconChain { .map_err(BlockProductionError::TokioJoin)? } - fn produce_partial_beacon_block>( + fn produce_partial_beacon_block + 'static>( self: &Arc, mut state: BeaconState, state_root_opt: Option, @@ -3383,7 +3390,7 @@ impl BeaconChain { // allows it to run concurrently with things like attestation packing. let prepare_payload_handle = match &state { BeaconState::Base(_) | BeaconState::Altair(_) => None, - BeaconState::Merge(_) | BeaconState::Eip4844(_) => { + BeaconState::Merge(_) | BeaconState::Capella(_) | BeaconState::Eip4844(_) => { let prepare_payload_handle = get_execution_payload(self.clone(), &state, proposer_index, builder_params)?; Some(prepare_payload_handle) @@ -3556,10 +3563,10 @@ impl BeaconChain { }) } - fn complete_partial_beacon_block>( + fn complete_partial_beacon_block>( &self, partial_beacon_block: PartialBeaconBlock, - execution_payload: Payload, + block_contents: BlockProposalContents, kzg_commitments: Vec, verification: ProduceBlockVerification, ) -> Result, BlockProductionError> { @@ -3636,7 +3643,32 @@ impl BeaconChain { voluntary_exits: voluntary_exits.into(), sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, - execution_payload, + execution_payload: block_contents + .to_payload() + .try_into() + .map_err(|_| BlockProductionError::InvalidPayloadFork)?, + }, + }), + BeaconState::Capella(_) => BeaconBlock::Capella(BeaconBlockCapella { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + execution_payload: block_contents + .to_payload() + .try_into() + .map_err(|_| BlockProductionError::InvalidPayloadFork)?, }, }), BeaconState::Eip4844(_) => BeaconBlock::Eip4844(BeaconBlockEip4844 { @@ -3655,7 +3687,10 @@ impl BeaconChain { voluntary_exits: voluntary_exits.into(), sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, - execution_payload, + execution_payload: block_contents + .to_payload() + .try_into() + .map_err(|_| BlockProductionError::InvalidPayloadFork)?, //FIXME(sean) get blobs blob_kzg_commitments: VariableList::from(kzg_commitments), }, @@ -3973,16 +4008,33 @@ impl BeaconChain { return Ok(()); } - let payload_attributes = PayloadAttributes { - timestamp: self - .slot_clock - .start_of(prepare_slot) - .ok_or(Error::InvalidSlot(prepare_slot))? - .as_secs(), - prev_randao: head_random, - suggested_fee_recipient: execution_layer - .get_suggested_fee_recipient(proposer as u64) - .await, + let payload_attributes = match self.spec.fork_name_at_epoch(prepare_epoch) { + ForkName::Base | ForkName::Altair | ForkName::Merge => { + PayloadAttributes::V1(PayloadAttributesV1 { + timestamp: self + .slot_clock + .start_of(prepare_slot) + .ok_or(Error::InvalidSlot(prepare_slot))? + .as_secs(), + prev_randao: head_random, + suggested_fee_recipient: execution_layer + .get_suggested_fee_recipient(proposer as u64) + .await, + }) + } + ForkName::Capella | ForkName::Eip4844 => PayloadAttributes::V2(PayloadAttributesV2 { + timestamp: self + .slot_clock + .start_of(prepare_slot) + .ok_or(Error::InvalidSlot(prepare_slot))? + .as_secs(), + prev_randao: head_random, + suggested_fee_recipient: execution_layer + .get_suggested_fee_recipient(proposer as u64) + .await, + //FIXME(sean) + withdrawals: vec![], + }), }; debug!( @@ -4122,7 +4174,7 @@ impl BeaconChain { { // We are a proposer, check for terminal_pow_block_hash if let Some(terminal_pow_block_hash) = execution_layer - .get_terminal_pow_block_hash(&self.spec, payload_attributes.timestamp) + .get_terminal_pow_block_hash(&self.spec, payload_attributes.timestamp()) .await .map_err(Error::ForkchoiceUpdate)? { @@ -4297,7 +4349,7 @@ impl BeaconChain { /// Returns `Ok(false)` if the block is pre-Bellatrix, or has `ExecutionStatus::Valid`. /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic` or has /// `ExecutionStatus::Invalid`. - pub fn is_optimistic_or_invalid_block>( + pub fn is_optimistic_or_invalid_block>( &self, block: &SignedBeaconBlock, ) -> Result { @@ -4323,7 +4375,7 @@ impl BeaconChain { /// /// There is a potential race condition when syncing where the block_root of `head_block` could /// be pruned from the fork choice store before being read. - pub fn is_optimistic_or_invalid_head_block>( + pub fn is_optimistic_or_invalid_head_block>( &self, head_block: &SignedBeaconBlock, ) -> Result { diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 4f6003fda1b..5369e168af9 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -14,7 +14,7 @@ use std::sync::Arc; use store::{Error as StoreError, HotColdDB, ItemStore}; use superstruct::superstruct; use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, ExecPayload, + AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, Hash256, Slot, }; @@ -268,7 +268,7 @@ where self.time = slot } - fn on_verified_block>( + fn on_verified_block>( &mut self, _block: BeaconBlockRef, block_root: Hash256, diff --git a/beacon_node/beacon_chain/src/beacon_snapshot.rs b/beacon_node/beacon_chain/src/beacon_snapshot.rs index 8491622cb09..7d89df98293 100644 --- a/beacon_node/beacon_chain/src/beacon_snapshot.rs +++ b/beacon_node/beacon_chain/src/beacon_snapshot.rs @@ -1,20 +1,20 @@ use serde_derive::Serialize; use std::sync::Arc; use types::{ - beacon_state::CloneConfig, BeaconState, EthSpec, ExecPayload, FullPayload, Hash256, + beacon_state::CloneConfig, AbstractExecPayload, BeaconState, EthSpec, FullPayload, Hash256, SignedBeaconBlock, }; /// Represents some block and its associated state. Generally, this will be used for tracking the /// head, justified head and finalized head. #[derive(Clone, Serialize, PartialEq, Debug)] -pub struct BeaconSnapshot = FullPayload> { +pub struct BeaconSnapshot = FullPayload> { pub beacon_block: Arc>, pub beacon_block_root: Hash256, pub beacon_state: BeaconState, } -impl> BeaconSnapshot { +impl> BeaconSnapshot { /// Create a new checkpoint. pub fn new( beacon_block: Arc>, diff --git a/beacon_node/beacon_chain/src/block_reward.rs b/beacon_node/beacon_chain/src/block_reward.rs index 3bddd2a5215..fd0cfc7e9bd 100644 --- a/beacon_node/beacon_chain/src/block_reward.rs +++ b/beacon_node/beacon_chain/src/block_reward.rs @@ -5,10 +5,10 @@ use state_processing::{ common::get_attesting_indices_from_state, per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards, }; -use types::{BeaconBlockRef, BeaconState, EthSpec, ExecPayload, Hash256}; +use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, EthSpec, Hash256}; impl BeaconChain { - pub fn compute_block_reward>( + pub fn compute_block_reward>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, block_root: Hash256, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index fbc9acdb2e7..0ebb8e84fbb 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -81,6 +81,7 @@ use std::time::Duration; use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use task_executor::JoinHandle; use tree_hash::TreeHash; +use types::ExecPayload; use types::{ BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch, EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, @@ -1235,7 +1236,7 @@ impl ExecutionPendingBlock { .message() .body() .execution_payload() - .map(|full_payload| full_payload.execution_payload.block_hash); + .map(|full_payload| full_payload.block_hash()); // Ensure the block is a candidate for optimistic import. if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await? diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index db521d4a3d1..da944c102ff 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -263,6 +263,7 @@ pub enum BlockProductionError { MissingExecutionPayload, TokioJoin(tokio::task::JoinError), BeaconChain(BeaconChainError), + InvalidPayloadFork, } easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 16c758b377f..134e51e796e 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -12,7 +12,7 @@ use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, }; -use execution_layer::{BuilderParams, PayloadStatus}; +use execution_layer::{BlockProposalContents, BuilderParams, PayloadStatus}; use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::debug; @@ -24,14 +24,11 @@ use state_processing::per_block_processing::{ use std::sync::Arc; use tokio::task::JoinHandle; use tree_hash::TreeHash; -use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, Blob, EthSpec, ExecPayload, ExecutionBlockHash, - Hash256, KzgCommitment, SignedBeaconBlock, Slot, -}; +use types::*; -pub type PreparePayloadResult = - Result<(Payload, Vec, Vec>), BlockProductionError>; -pub type PreparePayloadHandle = JoinHandle>>; +pub type PreparePayloadResult = + Result, BlockProductionError>; +pub type PreparePayloadHandle = JoinHandle>>; #[derive(PartialEq)] pub enum AllowOptimisticImport { @@ -57,7 +54,7 @@ impl PayloadNotifier { // // We will duplicate these checks again during `per_block_processing`, however these checks // are cheap and doing them here ensures we protect the execution engine from junk. - partially_verify_execution_payload( + partially_verify_execution_payload::>( state, block.message().execution_payload()?, &chain.spec, @@ -107,7 +104,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( .ok_or(ExecutionPayloadError::NoExecutionConnection)?; let new_payload_response = execution_layer - .notify_new_payload(&execution_payload.execution_payload) + .notify_new_payload(&execution_payload.into()) .await; match new_payload_response { @@ -125,7 +122,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( "Invalid execution payload"; "validation_error" => ?validation_error, "latest_valid_hash" => ?latest_valid_hash, - "execution_block_hash" => ?execution_payload.execution_payload.block_hash, + "execution_block_hash" => ?execution_payload.block_hash(), "root" => ?block.tree_hash_root(), "graffiti" => block.body().graffiti().as_utf8_lossy(), "proposer_index" => block.proposer_index(), @@ -158,7 +155,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( chain.log, "Invalid execution payload block hash"; "validation_error" => ?validation_error, - "execution_block_hash" => ?execution_payload.execution_payload.block_hash, + "execution_block_hash" => ?execution_payload.block_hash(), "root" => ?block.tree_hash_root(), "graffiti" => block.body().graffiti().as_utf8_lossy(), "proposer_index" => block.proposer_index(), @@ -311,7 +308,7 @@ pub fn validate_execution_payload_for_gossip( } }; - if is_merge_transition_complete || execution_payload != &<_>::default() { + if is_merge_transition_complete || !execution_payload.is_default() { let expected_timestamp = chain .slot_clock .start_of(block.slot()) @@ -349,13 +346,13 @@ pub fn validate_execution_payload_for_gossip( /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal pub fn get_execution_payload< T: BeaconChainTypes, - Payload: ExecPayload + Default + Send + 'static, + Payload: AbstractExecPayload + 'static, >( chain: Arc>, state: &BeaconState, proposer_index: u64, builder_params: BuilderParams, -) -> Result, BlockProductionError> { +) -> Result, BlockProductionError> { // Compute all required values from the `state` now to avoid needing to pass it into a spawned // task. let spec = &chain.spec; @@ -364,7 +361,7 @@ pub fn get_execution_payload< let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?; let random = *state.get_randao_mix(current_epoch)?; let latest_execution_payload_header_block_hash = - state.latest_execution_payload_header()?.block_hash; + state.latest_execution_payload_header()?.block_hash(); // Spawn a task to obtain the execution payload from the EL via a series of async calls. The // `join_handle` can be used to await the result of the function. @@ -414,13 +411,14 @@ pub async fn prepare_execution_payload( proposer_index: u64, latest_execution_payload_header_block_hash: ExecutionBlockHash, builder_params: BuilderParams, -) -> PreparePayloadResult +) -> Result, BlockProductionError> where T: BeaconChainTypes, - Payload: ExecPayload + Default, + Payload: AbstractExecPayload, { let current_epoch = builder_params.slot.epoch(T::EthSpec::slots_per_epoch()); let spec = &chain.spec; + let fork = spec.fork_name_at_slot::(builder_params.slot); let execution_layer = chain .execution_layer .as_ref() @@ -434,7 +432,7 @@ where if is_terminal_block_hash_set && !is_activation_epoch_reached { // Use the "empty" payload if there's a terminal block hash, but we haven't reached the // terminal block epoch yet. - return Ok(<_>::default()); + return Ok(BlockProposalContents::default_at_fork(fork)); } let terminal_pow_block_hash = execution_layer @@ -447,7 +445,7 @@ where } else { // If the merge transition hasn't occurred yet and the EL hasn't found the terminal // block, return an "empty" payload. - return Ok(<_>::default()); + return Ok(BlockProposalContents::default_at_fork(fork)); } } else { latest_execution_payload_header_block_hash @@ -474,8 +472,8 @@ where // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. // // This future is not executed here, it's up to the caller to await it. - let (execution_payload_result, blobs_result) = tokio::join!( - execution_layer.get_payload::( + let block_contents = execution_layer + .get_payload::( parent_hash, timestamp, random, @@ -483,20 +481,9 @@ where forkchoice_update_params, builder_params, &chain.spec, - ), - execution_layer.get_blob_bundles(parent_hash, timestamp, random, proposer_index) - ); - - let execution_payload = - execution_payload_result.map_err(BlockProductionError::GetPayloadFailed)?; - let blobs = blobs_result.map_err(BlockProductionError::GetPayloadFailed)?; - - if execution_payload.block_hash() != blobs.block_hash { - return Err(BlockProductionError::BlobPayloadMismatch { - blob_block_hash: blobs.block_hash, - payload_block_hash: execution_payload.block_hash(), - }); - } + ) + .await + .map_err(BlockProductionError::GetPayloadFailed)?; - Ok((execution_payload, blobs.kzgs, blobs.blobs)) + Ok(block_contents) } diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index 3517d06b15b..fecf6512ac8 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -1,6 +1,6 @@ use eth2::types::builder_bid::SignedBuilderBid; use eth2::types::{ - BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, ExecutionPayload, + AbstractExecPayload, BlindedPayload, EthSpec, ExecutionBlockHash, ExecutionPayload, ForkVersionedResponse, PublicKeyBytes, SignedBeaconBlock, SignedValidatorRegistrationData, Slot, }; @@ -160,7 +160,7 @@ impl BuilderHttpClient { } /// `GET /eth/v1/builder/header` - pub async fn get_builder_header>( + pub async fn get_builder_header>( &self, slot: Slot, parent_hash: ExecutionBlockHash, diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 770bc4cf8cc..e3a4a738762 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -26,6 +26,7 @@ eth2_ssz = "0.4.1" eth2_ssz_types = "0.2.2" eth2 = { path = "../../common/eth2" } state_processing = { path = "../../consensus/state_processing" } +superstruct = "0.6.0" lru = "0.7.1" exit-future = "0.2.0" tree_hash = "0.4.1" diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index e11ba52a850..f04a7237892 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,13 +1,16 @@ use crate::engines::ForkChoiceState; pub use ethers_core::types::Transaction; +use ethers_core::utils::rlp::{Decodable, Rlp}; use http::deposit_methods::RpcError; pub use json_structures::TransitionConfigurationV1; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; use strum::IntoStaticStr; +use superstruct::superstruct; +use types::Withdrawal; pub use types::{ - blob::Blob, Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, - FixedVector, Hash256, KzgCommitment, KzgProof, Uint256, VariableList, + Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, FixedVector, + Hash256, Uint256, VariableList, }; pub mod auth; @@ -38,7 +41,9 @@ pub enum Error { PayloadConversionLogicFlaw, DeserializeTransaction(ssz_types::Error), DeserializeTransactions(ssz_types::Error), + DeserializeWithdrawals(ssz_types::Error), BuilderApi(builder_client::Error), + IncorrectStateVariant, } impl From for Error { @@ -111,9 +116,18 @@ pub struct ExecutionBlock { pub timestamp: u64, } -/// Representation of an exection block with enough detail to reconstruct a payload. +/// Representation of an execution block with enough detail to reconstruct a payload. +#[superstruct( + variants(Merge, Capella, Eip4844), + variant_attributes( + derive(Clone, Debug, PartialEq, Serialize, Deserialize,), + serde(bound = "T: EthSpec", rename_all = "camelCase"), + ), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] +#[serde(bound = "T: EthSpec", rename_all = "camelCase", untagged)] pub struct ExecutionBlockWithTransactions { pub parent_hash: ExecutionBlockHash, #[serde(alias = "miner")] @@ -135,16 +149,120 @@ pub struct ExecutionBlockWithTransactions { #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, pub base_fee_per_gas: Uint256, + #[superstruct(only(Eip4844))] + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub excess_blobs: u64, #[serde(rename = "hash")] pub block_hash: ExecutionBlockHash, pub transactions: Vec, + #[superstruct(only(Capella, Eip4844))] + pub withdrawals: Vec, } -#[derive(Clone, Copy, Debug, PartialEq)] +impl From> for ExecutionBlockWithTransactions { + fn from(payload: ExecutionPayload) -> Self { + match payload { + ExecutionPayload::Merge(block) => Self::Merge(ExecutionBlockWithTransactionsMerge { + parent_hash: block.parent_hash, + fee_recipient: block.fee_recipient, + state_root: block.state_root, + receipts_root: block.receipts_root, + logs_bloom: block.logs_bloom, + prev_randao: block.prev_randao, + block_number: block.block_number, + gas_limit: block.gas_limit, + gas_used: block.gas_used, + timestamp: block.timestamp, + extra_data: block.extra_data, + base_fee_per_gas: block.base_fee_per_gas, + block_hash: block.block_hash, + transactions: block + .transactions + .iter() + .map(|tx| Transaction::decode(&Rlp::new(tx))) + .collect::, _>>() + .unwrap_or_else(|_| Vec::new()), + }), + ExecutionPayload::Capella(block) => { + Self::Capella(ExecutionBlockWithTransactionsCapella { + parent_hash: block.parent_hash, + fee_recipient: block.fee_recipient, + state_root: block.state_root, + receipts_root: block.receipts_root, + logs_bloom: block.logs_bloom, + prev_randao: block.prev_randao, + block_number: block.block_number, + gas_limit: block.gas_limit, + gas_used: block.gas_used, + timestamp: block.timestamp, + extra_data: block.extra_data, + base_fee_per_gas: block.base_fee_per_gas, + block_hash: block.block_hash, + transactions: block + .transactions + .iter() + .map(|tx| Transaction::decode(&Rlp::new(tx))) + .collect::, _>>() + .unwrap_or_else(|_| Vec::new()), + withdrawals: block.withdrawals.into(), + }) + } + ExecutionPayload::Eip4844(block) => { + Self::Eip4844(ExecutionBlockWithTransactionsEip4844 { + parent_hash: block.parent_hash, + fee_recipient: block.fee_recipient, + state_root: block.state_root, + receipts_root: block.receipts_root, + logs_bloom: block.logs_bloom, + prev_randao: block.prev_randao, + block_number: block.block_number, + gas_limit: block.gas_limit, + gas_used: block.gas_used, + timestamp: block.timestamp, + extra_data: block.extra_data, + base_fee_per_gas: block.base_fee_per_gas, + excess_blobs: block.excess_blobs, + block_hash: block.block_hash, + transactions: block + .transactions + .iter() + .map(|tx| Transaction::decode(&Rlp::new(tx))) + .collect::, _>>() + .unwrap_or_else(|_| Vec::new()), + withdrawals: block.withdrawals.into(), + }) + } + } + } +} + +/* +impl From> for ExecutionPayload { + fn from(block: ExecutionBlockWithTransactions) -> Self { + map_execution_block_with_transactions!(block, |inner, cons| { + let block = inner.into(); + cons(block) + }) + } +} + */ + +#[superstruct( + variants(V1, V2), + variant_attributes(derive(Clone, Debug, PartialEq),), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] +#[derive(Clone, Debug, PartialEq)] pub struct PayloadAttributes { + #[superstruct(getter(copy))] pub timestamp: u64, + #[superstruct(getter(copy))] pub prev_randao: Hash256, + #[superstruct(getter(copy))] pub suggested_fee_recipient: Address, + #[superstruct(only(V2))] + pub withdrawals: Vec, } #[derive(Clone, Debug, PartialEq)] diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 9aa8289fc3a..6c96de5e153 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -7,6 +7,7 @@ use reqwest::header::CONTENT_TYPE; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde_json::json; + use std::time::Duration; use types::EthSpec; @@ -46,7 +47,7 @@ pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = Durati /// This error is returned during a `chainId` call by Geth. pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; -/// Contains methods to convert arbitary bytes to an ETH2 deposit contract object. +/// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object. pub mod deposit_log { use ssz::Decode; use state_processing::per_block_processing::signature_sets::deposit_pubkey_signature_message; @@ -644,7 +645,7 @@ impl HttpJsonRpc { &self, execution_payload: ExecutionPayload, ) -> Result { - let params = json!([JsonExecutionPayloadV1::from(execution_payload)]); + let params = json!([JsonExecutionPayload::from(execution_payload)]); let response: JsonPayloadStatusV1 = self .rpc_request(ENGINE_NEW_PAYLOAD_V1, params, ENGINE_NEW_PAYLOAD_TIMEOUT) @@ -659,7 +660,7 @@ impl HttpJsonRpc { ) -> Result, Error> { let params = json!([JsonPayloadIdRequest::from(payload_id)]); - let response: JsonExecutionPayloadV1 = self + let response: JsonExecutionPayload = self .rpc_request(ENGINE_GET_PAYLOAD_V1, params, ENGINE_GET_PAYLOAD_TIMEOUT) .await?; @@ -669,10 +670,10 @@ impl HttpJsonRpc { pub async fn get_blobs_bundle_v1( &self, payload_id: PayloadId, - ) -> Result, Error> { + ) -> Result, Error> { let params = json!([JsonPayloadIdRequest::from(payload_id)]); - let response: JsonBlobBundlesV1 = self + let response: JsonBlobBundles = self .rpc_request( ENGINE_GET_BLOBS_BUNDLE_V1, params, @@ -690,7 +691,7 @@ impl HttpJsonRpc { ) -> Result { let params = json!([ JsonForkChoiceStateV1::from(forkchoice_state), - payload_attributes.map(JsonPayloadAttributesV1::from) + payload_attributes.map(JsonPayloadAttributes::from) ]); let response: JsonForkchoiceUpdatedV1Response = self @@ -730,7 +731,10 @@ mod test { use std::future::Future; use std::str::FromStr; use std::sync::Arc; - use types::{MainnetEthSpec, Transactions, Unsigned, VariableList}; + use types::{ + AbstractExecPayload, ExecutionPayloadMerge, ForkName, FullPayload, MainnetEthSpec, + Transactions, Unsigned, VariableList, + }; struct Tester { server: MockServer, @@ -836,10 +840,10 @@ mod test { fn encode_transactions( transactions: Transactions, ) -> Result { - let ep: JsonExecutionPayloadV1 = JsonExecutionPayloadV1 { + let ep: JsonExecutionPayload = JsonExecutionPayload::V1(JsonExecutionPayloadV1 { transactions, ..<_>::default() - }; + }); let json = serde_json::to_value(&ep)?; Ok(json.get("transactions").unwrap().clone()) } @@ -866,8 +870,8 @@ mod test { json.as_object_mut() .unwrap() .insert("transactions".into(), transactions); - let ep: JsonExecutionPayloadV1 = serde_json::from_value(json)?; - Ok(ep.transactions) + let ep: JsonExecutionPayload = serde_json::from_value(json)?; + Ok(ep.transactions().clone()) } fn assert_transactions_serde( @@ -1018,11 +1022,11 @@ mod test { safe_block_hash: ExecutionBlockHash::repeat_byte(1), finalized_block_hash: ExecutionBlockHash::zero(), }, - Some(PayloadAttributes { + Some(PayloadAttributes::V1(PayloadAttributesV1 { timestamp: 5, prev_randao: Hash256::zero(), suggested_fee_recipient: Address::repeat_byte(0), - }), + })), ) .await; }, @@ -1053,11 +1057,11 @@ mod test { safe_block_hash: ExecutionBlockHash::repeat_byte(1), finalized_block_hash: ExecutionBlockHash::zero(), }, - Some(PayloadAttributes { + Some(PayloadAttributes::V1(PayloadAttributesV1 { timestamp: 5, prev_randao: Hash256::zero(), suggested_fee_recipient: Address::repeat_byte(0), - }), + })), ) .await }) @@ -1093,22 +1097,24 @@ mod test { .assert_request_equals( |client| async move { let _ = client - .new_payload_v1::(ExecutionPayload { - parent_hash: ExecutionBlockHash::repeat_byte(0), - fee_recipient: Address::repeat_byte(1), - state_root: Hash256::repeat_byte(1), - receipts_root: Hash256::repeat_byte(0), - logs_bloom: vec![1; 256].into(), - prev_randao: Hash256::repeat_byte(1), - block_number: 0, - gas_limit: 1, - gas_used: 2, - timestamp: 42, - extra_data: vec![].into(), - base_fee_per_gas: Uint256::from(1), - block_hash: ExecutionBlockHash::repeat_byte(1), - transactions: vec![].into(), - }) + .new_payload_v1::(ExecutionPayload::Merge( + ExecutionPayloadMerge { + parent_hash: ExecutionBlockHash::repeat_byte(0), + fee_recipient: Address::repeat_byte(1), + state_root: Hash256::repeat_byte(1), + receipts_root: Hash256::repeat_byte(0), + logs_bloom: vec![1; 256].into(), + prev_randao: Hash256::repeat_byte(1), + block_number: 0, + gas_limit: 1, + gas_used: 2, + timestamp: 42, + extra_data: vec![].into(), + base_fee_per_gas: Uint256::from(1), + block_hash: ExecutionBlockHash::repeat_byte(1), + transactions: vec![].into(), + }, + )) .await; }, json!({ @@ -1138,22 +1144,24 @@ mod test { Tester::new(false) .assert_auth_failure(|client| async move { client - .new_payload_v1::(ExecutionPayload { - parent_hash: ExecutionBlockHash::repeat_byte(0), - fee_recipient: Address::repeat_byte(1), - state_root: Hash256::repeat_byte(1), - receipts_root: Hash256::repeat_byte(0), - logs_bloom: vec![1; 256].into(), - prev_randao: Hash256::repeat_byte(1), - block_number: 0, - gas_limit: 1, - gas_used: 2, - timestamp: 42, - extra_data: vec![].into(), - base_fee_per_gas: Uint256::from(1), - block_hash: ExecutionBlockHash::repeat_byte(1), - transactions: vec![].into(), - }) + .new_payload_v1::(ExecutionPayload::Merge( + ExecutionPayloadMerge { + parent_hash: ExecutionBlockHash::repeat_byte(0), + fee_recipient: Address::repeat_byte(1), + state_root: Hash256::repeat_byte(1), + receipts_root: Hash256::repeat_byte(0), + logs_bloom: vec![1; 256].into(), + prev_randao: Hash256::repeat_byte(1), + block_number: 0, + gas_limit: 1, + gas_used: 2, + timestamp: 42, + extra_data: vec![].into(), + base_fee_per_gas: Uint256::from(1), + block_hash: ExecutionBlockHash::repeat_byte(1), + transactions: vec![].into(), + }, + )) .await }) .await; @@ -1236,11 +1244,11 @@ mod test { safe_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), finalized_block_hash: ExecutionBlockHash::zero(), }, - Some(PayloadAttributes { + Some(PayloadAttributes::V1(PayloadAttributesV1 { timestamp: 5, prev_randao: Hash256::zero(), suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), - }) + })) ) .await; }, @@ -1283,11 +1291,11 @@ mod test { safe_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), finalized_block_hash: ExecutionBlockHash::zero(), }, - Some(PayloadAttributes { + Some(PayloadAttributes::V1(PayloadAttributesV1 { timestamp: 5, prev_randao: Hash256::zero(), suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), - }) + })) ) .await .unwrap(); @@ -1346,7 +1354,7 @@ mod test { .await .unwrap(); - let expected = ExecutionPayload { + let expected = ExecutionPayload::Merge(ExecutionPayloadMerge { parent_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), @@ -1361,7 +1369,7 @@ mod test { base_fee_per_gas: Uint256::from(7), block_hash: ExecutionBlockHash::from_str("0x6359b8381a370e2f54072a5784ddd78b6ed024991558c511d4452eb4f6ac898c").unwrap(), transactions: vec![].into(), - }; + }); assert_eq!(payload, expected); }, @@ -1371,7 +1379,7 @@ mod test { // engine_newPayloadV1 REQUEST validation |client| async move { let _ = client - .new_payload_v1::(ExecutionPayload { + .new_payload_v1::(ExecutionPayload::Merge(ExecutionPayloadMerge{ parent_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), @@ -1386,7 +1394,7 @@ mod test { base_fee_per_gas: Uint256::from(7), block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), transactions: vec![].into(), - }) + })) .await; }, json!({ @@ -1425,7 +1433,7 @@ mod test { })], |client| async move { let response = client - .new_payload_v1::(ExecutionPayload::default()) + .new_payload_v1::(FullPayload::default_at_fork(ForkName::Merge).into()) .await .unwrap(); diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index fde4f706a20..82d72ca560e 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,6 +1,12 @@ use super::*; use serde::{Deserialize, Serialize}; -use types::{Blob, EthSpec, ExecutionBlockHash, FixedVector, Transaction, Unsigned, VariableList}; +use superstruct::superstruct; +use types::{ + Blob, EthSpec, ExecutionBlockHash, ExecutionPayloadEip4844, ExecutionPayloadHeaderEip4844, + FixedVector, KzgCommitment, Transaction, Unsigned, VariableList, +}; +use types::{ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge}; +use types::{ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderMerge}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -55,9 +61,19 @@ pub struct JsonPayloadIdResponse { pub payload_id: PayloadId, } -#[derive(Debug, PartialEq, Default, Serialize, Deserialize)] -#[serde(bound = "T: EthSpec", rename_all = "camelCase")] -pub struct JsonExecutionPayloadHeaderV1 { +// (V1,V2,V3) -> (Merge,Capella,EIP4844) +#[superstruct( + variants(V1, V2, V3), + variant_attributes( + derive(Debug, PartialEq, Default, Serialize, Deserialize,), + serde(bound = "T: EthSpec", rename_all = "camelCase"), + ), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(bound = "T: EthSpec", rename_all = "camelCase", untagged)] +pub struct JsonExecutionPayloadHeader { pub parent_hash: ExecutionBlockHash, pub fee_recipient: Address, pub state_root: Hash256, @@ -77,52 +93,144 @@ pub struct JsonExecutionPayloadHeaderV1 { pub extra_data: VariableList, #[serde(with = "eth2_serde_utils::u256_hex_be")] pub base_fee_per_gas: Uint256, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[superstruct(only(V3))] + pub excess_blobs: u64, pub block_hash: ExecutionBlockHash, pub transactions_root: Hash256, + #[superstruct(only(V2, V3))] + pub withdrawals_root: Hash256, } -impl From> for ExecutionPayloadHeader { - fn from(e: JsonExecutionPayloadHeaderV1) -> Self { - // Use this verbose deconstruction pattern to ensure no field is left unused. - let JsonExecutionPayloadHeaderV1 { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions_root, - } = e; +impl From> for ExecutionPayloadHeader { + fn from(json_header: JsonExecutionPayloadHeader) -> Self { + match json_header { + JsonExecutionPayloadHeader::V1(v1) => Self::Merge(ExecutionPayloadHeaderMerge { + parent_hash: v1.parent_hash, + fee_recipient: v1.fee_recipient, + state_root: v1.state_root, + receipts_root: v1.receipts_root, + logs_bloom: v1.logs_bloom, + prev_randao: v1.prev_randao, + block_number: v1.block_number, + gas_limit: v1.gas_limit, + gas_used: v1.gas_used, + timestamp: v1.timestamp, + extra_data: v1.extra_data, + base_fee_per_gas: v1.base_fee_per_gas, + block_hash: v1.block_hash, + transactions_root: v1.transactions_root, + }), + JsonExecutionPayloadHeader::V2(v2) => Self::Capella(ExecutionPayloadHeaderCapella { + parent_hash: v2.parent_hash, + fee_recipient: v2.fee_recipient, + state_root: v2.state_root, + receipts_root: v2.receipts_root, + logs_bloom: v2.logs_bloom, + prev_randao: v2.prev_randao, + block_number: v2.block_number, + gas_limit: v2.gas_limit, + gas_used: v2.gas_used, + timestamp: v2.timestamp, + extra_data: v2.extra_data, + base_fee_per_gas: v2.base_fee_per_gas, + block_hash: v2.block_hash, + transactions_root: v2.transactions_root, + withdrawals_root: v2.withdrawals_root, + }), + JsonExecutionPayloadHeader::V3(v3) => Self::Eip4844(ExecutionPayloadHeaderEip4844 { + parent_hash: v3.parent_hash, + fee_recipient: v3.fee_recipient, + state_root: v3.state_root, + receipts_root: v3.receipts_root, + logs_bloom: v3.logs_bloom, + prev_randao: v3.prev_randao, + block_number: v3.block_number, + gas_limit: v3.gas_limit, + gas_used: v3.gas_used, + timestamp: v3.timestamp, + extra_data: v3.extra_data, + base_fee_per_gas: v3.base_fee_per_gas, + excess_blobs: v3.excess_blobs, + block_hash: v3.block_hash, + transactions_root: v3.transactions_root, + withdrawals_root: v3.withdrawals_root, + }), + } + } +} - Self { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions_root, +impl From> for JsonExecutionPayloadHeader { + fn from(header: ExecutionPayloadHeader) -> Self { + match header { + ExecutionPayloadHeader::Merge(merge) => Self::V1(JsonExecutionPayloadHeaderV1 { + parent_hash: merge.parent_hash, + fee_recipient: merge.fee_recipient, + state_root: merge.state_root, + receipts_root: merge.receipts_root, + logs_bloom: merge.logs_bloom, + prev_randao: merge.prev_randao, + block_number: merge.block_number, + gas_limit: merge.gas_limit, + gas_used: merge.gas_used, + timestamp: merge.timestamp, + extra_data: merge.extra_data, + base_fee_per_gas: merge.base_fee_per_gas, + block_hash: merge.block_hash, + transactions_root: merge.transactions_root, + }), + ExecutionPayloadHeader::Capella(capella) => Self::V2(JsonExecutionPayloadHeaderV2 { + parent_hash: capella.parent_hash, + fee_recipient: capella.fee_recipient, + state_root: capella.state_root, + receipts_root: capella.receipts_root, + logs_bloom: capella.logs_bloom, + prev_randao: capella.prev_randao, + block_number: capella.block_number, + gas_limit: capella.gas_limit, + gas_used: capella.gas_used, + timestamp: capella.timestamp, + extra_data: capella.extra_data, + base_fee_per_gas: capella.base_fee_per_gas, + block_hash: capella.block_hash, + transactions_root: capella.transactions_root, + withdrawals_root: capella.withdrawals_root, + }), + ExecutionPayloadHeader::Eip4844(eip4844) => Self::V3(JsonExecutionPayloadHeaderV3 { + parent_hash: eip4844.parent_hash, + fee_recipient: eip4844.fee_recipient, + state_root: eip4844.state_root, + receipts_root: eip4844.receipts_root, + logs_bloom: eip4844.logs_bloom, + prev_randao: eip4844.prev_randao, + block_number: eip4844.block_number, + gas_limit: eip4844.gas_limit, + gas_used: eip4844.gas_used, + timestamp: eip4844.timestamp, + extra_data: eip4844.extra_data, + base_fee_per_gas: eip4844.base_fee_per_gas, + excess_blobs: eip4844.excess_blobs, + block_hash: eip4844.block_hash, + transactions_root: eip4844.transactions_root, + withdrawals_root: eip4844.withdrawals_root, + }), } } } -#[derive(Debug, PartialEq, Default, Serialize, Deserialize)] -#[serde(bound = "T: EthSpec", rename_all = "camelCase")] -pub struct JsonExecutionPayloadV1 { +// (V1,V2, V2) -> (Merge,Capella,EIP4844) +#[superstruct( + variants(V1, V2, V3), + variant_attributes( + derive(Debug, PartialEq, Default, Serialize, Deserialize,), + serde(bound = "T: EthSpec", rename_all = "camelCase"), + ), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(bound = "T: EthSpec", rename_all = "camelCase", untagged)] +pub struct JsonExecutionPayload { pub parent_hash: ExecutionBlockHash, pub fee_recipient: Address, pub state_root: Hash256, @@ -142,136 +250,219 @@ pub struct JsonExecutionPayloadV1 { pub extra_data: VariableList, #[serde(with = "eth2_serde_utils::u256_hex_be")] pub base_fee_per_gas: Uint256, + #[superstruct(only(V3))] + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub excess_blobs: u64, pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: VariableList, T::MaxTransactionsPerPayload>, + #[superstruct(only(V2, V3))] + pub withdrawals: VariableList, } -impl From> for JsonExecutionPayloadV1 { - fn from(e: ExecutionPayload) -> Self { - // Use this verbose deconstruction pattern to ensure no field is left unused. - let ExecutionPayload { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions, - } = e; +impl From> for ExecutionPayload { + fn from(json_payload: JsonExecutionPayload) -> Self { + match json_payload { + JsonExecutionPayload::V1(v1) => Self::Merge(ExecutionPayloadMerge { + parent_hash: v1.parent_hash, + fee_recipient: v1.fee_recipient, + state_root: v1.state_root, + receipts_root: v1.receipts_root, + logs_bloom: v1.logs_bloom, + prev_randao: v1.prev_randao, + block_number: v1.block_number, + gas_limit: v1.gas_limit, + gas_used: v1.gas_used, + timestamp: v1.timestamp, + extra_data: v1.extra_data, + base_fee_per_gas: v1.base_fee_per_gas, + block_hash: v1.block_hash, + transactions: v1.transactions, + }), + JsonExecutionPayload::V2(v2) => Self::Capella(ExecutionPayloadCapella { + parent_hash: v2.parent_hash, + fee_recipient: v2.fee_recipient, + state_root: v2.state_root, + receipts_root: v2.receipts_root, + logs_bloom: v2.logs_bloom, + prev_randao: v2.prev_randao, + block_number: v2.block_number, + gas_limit: v2.gas_limit, + gas_used: v2.gas_used, + timestamp: v2.timestamp, + extra_data: v2.extra_data, + base_fee_per_gas: v2.base_fee_per_gas, + block_hash: v2.block_hash, + transactions: v2.transactions, + withdrawals: v2.withdrawals, + }), + JsonExecutionPayload::V3(v3) => Self::Eip4844(ExecutionPayloadEip4844 { + parent_hash: v3.parent_hash, + fee_recipient: v3.fee_recipient, + state_root: v3.state_root, + receipts_root: v3.receipts_root, + logs_bloom: v3.logs_bloom, + prev_randao: v3.prev_randao, + block_number: v3.block_number, + gas_limit: v3.gas_limit, + gas_used: v3.gas_used, + timestamp: v3.timestamp, + extra_data: v3.extra_data, + base_fee_per_gas: v3.base_fee_per_gas, + excess_blobs: v3.excess_blobs, + block_hash: v3.block_hash, + transactions: v3.transactions, + withdrawals: v3.withdrawals, + }), + } + } +} - Self { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions, +impl From> for JsonExecutionPayload { + fn from(payload: ExecutionPayload) -> Self { + match payload { + ExecutionPayload::Merge(merge) => Self::V1(JsonExecutionPayloadV1 { + parent_hash: merge.parent_hash, + fee_recipient: merge.fee_recipient, + state_root: merge.state_root, + receipts_root: merge.receipts_root, + logs_bloom: merge.logs_bloom, + prev_randao: merge.prev_randao, + block_number: merge.block_number, + gas_limit: merge.gas_limit, + gas_used: merge.gas_used, + timestamp: merge.timestamp, + extra_data: merge.extra_data, + base_fee_per_gas: merge.base_fee_per_gas, + block_hash: merge.block_hash, + transactions: merge.transactions, + }), + ExecutionPayload::Capella(capella) => Self::V2(JsonExecutionPayloadV2 { + parent_hash: capella.parent_hash, + fee_recipient: capella.fee_recipient, + state_root: capella.state_root, + receipts_root: capella.receipts_root, + logs_bloom: capella.logs_bloom, + prev_randao: capella.prev_randao, + block_number: capella.block_number, + gas_limit: capella.gas_limit, + gas_used: capella.gas_used, + timestamp: capella.timestamp, + extra_data: capella.extra_data, + base_fee_per_gas: capella.base_fee_per_gas, + block_hash: capella.block_hash, + transactions: capella.transactions, + withdrawals: capella.withdrawals, + }), + ExecutionPayload::Eip4844(eip4844) => Self::V3(JsonExecutionPayloadV3 { + parent_hash: eip4844.parent_hash, + fee_recipient: eip4844.fee_recipient, + state_root: eip4844.state_root, + receipts_root: eip4844.receipts_root, + logs_bloom: eip4844.logs_bloom, + prev_randao: eip4844.prev_randao, + block_number: eip4844.block_number, + gas_limit: eip4844.gas_limit, + gas_used: eip4844.gas_used, + timestamp: eip4844.timestamp, + extra_data: eip4844.extra_data, + base_fee_per_gas: eip4844.base_fee_per_gas, + excess_blobs: eip4844.excess_blobs, + block_hash: eip4844.block_hash, + transactions: eip4844.transactions, + withdrawals: eip4844.withdrawals, + }), } } } -impl From> for ExecutionPayload { - fn from(e: JsonExecutionPayloadV1) -> Self { - // Use this verbose deconstruction pattern to ensure no field is left unused. - let JsonExecutionPayloadV1 { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions, - } = e; +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct JsonWithdrawal { + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub index: u64, + pub address: Address, + #[serde(with = "eth2_serde_utils::u256_hex_be")] + pub amount: Uint256, +} +impl From for JsonWithdrawal { + fn from(withdrawal: Withdrawal) -> Self { Self { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions, + index: withdrawal.index, + address: withdrawal.address, + amount: Uint256::from((withdrawal.amount as u128) * 1000000000u128), } } } +impl From for Withdrawal { + fn from(jw: JsonWithdrawal) -> Self { + Self { + index: jw.index, + address: jw.address, + //FIXME(sean) if EE gives us too large a number this panics + amount: (jw.amount / 1000000000).as_u64(), + } + } +} + +#[superstruct( + variants(V1, V2), + variant_attributes(derive(Clone, Debug, PartialEq, Serialize, Deserialize),), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] #[derive(Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct JsonPayloadAttributesV1 { +#[serde(rename_all = "camelCase", untagged)] +pub struct JsonPayloadAttributes { #[serde(with = "eth2_serde_utils::u64_hex_be")] pub timestamp: u64, pub prev_randao: Hash256, pub suggested_fee_recipient: Address, + #[superstruct(only(V2))] + pub withdrawals: Vec, } -impl From for JsonPayloadAttributesV1 { - fn from(p: PayloadAttributes) -> Self { - // Use this verbose deconstruction pattern to ensure no field is left unused. - let PayloadAttributes { - timestamp, - prev_randao, - suggested_fee_recipient, - } = p; - - Self { - timestamp, - prev_randao, - suggested_fee_recipient, +impl From for JsonPayloadAttributes { + fn from(payload_atributes: PayloadAttributes) -> Self { + match payload_atributes { + PayloadAttributes::V1(pa) => Self::V1(JsonPayloadAttributesV1 { + timestamp: pa.timestamp, + prev_randao: pa.prev_randao, + suggested_fee_recipient: pa.suggested_fee_recipient, + }), + PayloadAttributes::V2(pa) => Self::V2(JsonPayloadAttributesV2 { + timestamp: pa.timestamp, + prev_randao: pa.prev_randao, + suggested_fee_recipient: pa.suggested_fee_recipient, + withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(), + }), } } } -impl From for PayloadAttributes { - fn from(j: JsonPayloadAttributesV1) -> Self { - // Use this verbose deconstruction pattern to ensure no field is left unused. - let JsonPayloadAttributesV1 { - timestamp, - prev_randao, - suggested_fee_recipient, - } = j; - - Self { - timestamp, - prev_randao, - suggested_fee_recipient, +impl From for PayloadAttributes { + fn from(json_payload_attributes: JsonPayloadAttributes) -> Self { + match json_payload_attributes { + JsonPayloadAttributes::V1(jpa) => Self::V1(PayloadAttributesV1 { + timestamp: jpa.timestamp, + prev_randao: jpa.prev_randao, + suggested_fee_recipient: jpa.suggested_fee_recipient, + }), + JsonPayloadAttributes::V2(jpa) => Self::V2(PayloadAttributesV2 { + timestamp: jpa.timestamp, + prev_randao: jpa.prev_randao, + suggested_fee_recipient: jpa.suggested_fee_recipient, + withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(), + }), } } } #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(bound = "T: EthSpec", rename_all = "camelCase")] -pub struct JsonBlobBundlesV1 { +pub struct JsonBlobBundles { pub block_hash: ExecutionBlockHash, pub kzgs: Vec, pub blobs: Vec>, diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 339006c1ba6..da77bd9cf89 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -167,7 +167,7 @@ impl Engine { ) -> Result { let response = self .api - .forkchoice_updated_v1(forkchoice_state, payload_attributes) + .forkchoice_updated_v1(forkchoice_state, payload_attributes.clone()) .await?; if let Some(payload_id) = response.payload_id { @@ -347,13 +347,14 @@ impl Engine { } } +// TODO: revisit this - do we need to key on withdrawals as well here? impl PayloadIdCacheKey { fn new(state: &ForkChoiceState, attributes: &PayloadAttributes) -> Self { Self { head_block_hash: state.head_block_hash, - timestamp: attributes.timestamp, - prev_randao: attributes.prev_randao, - suggested_fee_recipient: attributes.suggested_fee_recipient, + timestamp: attributes.timestamp(), + prev_randao: attributes.prev_randao(), + suggested_fee_recipient: attributes.suggested_fee_recipient(), } } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 1078876ef70..08ccb0046d7 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -4,7 +4,7 @@ //! This crate only provides useful functionality for "The Merge", it does not provide any of the //! deposit-contract functionality that the `beacon_node/eth1` crate already provides. -use crate::json_structures::JsonBlobBundlesV1; +use crate::json_structures::JsonBlobBundles; use crate::payload_cache::PayloadCache; use auth::{strip_prefix, Auth, JwtKey}; use builder_client::BuilderHttpClient; @@ -33,10 +33,12 @@ use tokio::{ time::sleep, }; use tokio_stream::wrappers::WatchStream; +use types::{AbstractExecPayload, Blob, ExecPayload, ExecutionPayloadEip4844, KzgCommitment}; use types::{ - BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionBlockHash, ForkName, + BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName, ProposerPreparationData, PublicKeyBytes, SignedBeaconBlock, Slot, }; +use types::{ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge}; mod engine_api; mod engines; @@ -88,6 +90,70 @@ impl From for Error { } } +pub enum BlockProposalContents> { + Payload(Payload), + PayloadAndBlobs { + payload: Payload, + kzg_commitments: Vec, + blobs: Vec>, + }, +} + +impl> BlockProposalContents { + pub fn payload(&self) -> &Payload { + match self { + Self::Payload(payload) => payload, + Self::PayloadAndBlobs { + payload, + kzg_commitments: _, + blobs: _, + } => payload, + } + } + pub fn to_payload(self) -> Payload { + match self { + Self::Payload(payload) => payload, + Self::PayloadAndBlobs { + payload, + kzg_commitments: _, + blobs: _, + } => payload, + } + } + pub fn kzg_commitments(&self) -> Option<&[KzgCommitment]> { + match self { + Self::Payload(_) => None, + Self::PayloadAndBlobs { + payload: _, + kzg_commitments, + blobs: _, + } => Some(kzg_commitments), + } + } + pub fn blobs(&self) -> Option<&[Blob]> { + match self { + Self::Payload(_) => None, + Self::PayloadAndBlobs { + payload: _, + kzg_commitments: _, + blobs, + } => Some(blobs), + } + } + pub fn default_at_fork(fork_name: ForkName) -> Self { + match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + BlockProposalContents::Payload(Payload::default_at_fork(fork_name)) + } + ForkName::Eip4844 => BlockProposalContents::PayloadAndBlobs { + payload: Payload::default_at_fork(fork_name), + blobs: vec![], + kzg_commitments: vec![], + }, + } + } +} + #[derive(Clone, PartialEq)] pub struct ProposerPreparationDataEntry { update_epoch: Epoch, @@ -536,7 +602,7 @@ impl ExecutionLayer { /// The result will be returned from the first node that returns successfully. No more nodes /// will be contacted. #[allow(clippy::too_many_arguments)] - pub async fn get_payload>( + pub async fn get_payload>( &self, parent_hash: ExecutionBlockHash, timestamp: u64, @@ -545,7 +611,7 @@ impl ExecutionLayer { forkchoice_update_params: ForkchoiceUpdateParameters, builder_params: BuilderParams, spec: &ChainSpec, - ) -> Result { + ) -> Result, Error> { let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; match Payload::block_type() { @@ -583,7 +649,7 @@ impl ExecutionLayer { } #[allow(clippy::too_many_arguments)] - async fn get_blinded_payload>( + async fn get_blinded_payload>( &self, parent_hash: ExecutionBlockHash, timestamp: u64, @@ -592,7 +658,7 @@ impl ExecutionLayer { forkchoice_update_params: ForkchoiceUpdateParameters, builder_params: BuilderParams, spec: &ChainSpec, - ) -> Result { + ) -> Result, Error> { if let Some(builder) = self.builder() { let slot = builder_params.slot; let pubkey = builder_params.pubkey; @@ -635,6 +701,7 @@ impl ExecutionLayer { Ok(local) } (Ok(Some(relay)), Ok(local)) => { + let local_payload = local.payload(); let is_signature_valid = relay.data.verify_signature(spec); let header = relay.data.message.header; @@ -668,14 +735,14 @@ impl ExecutionLayer { falling back to local execution engine." ); Ok(local) - } else if header.timestamp() != local.timestamp() { + } else if header.timestamp() != local_payload.timestamp() { warn!( self.log(), "Invalid timestamp from connected builder, \ falling back to local execution engine." ); Ok(local) - } else if header.block_number() != local.block_number() { + } else if header.block_number() != local_payload.block_number() { warn!( self.log(), "Invalid block number from connected builder, \ @@ -706,7 +773,8 @@ impl ExecutionLayer { not match, using it anyways." ); } - Ok(header) + //FIXME(sean) the builder API needs to be updated + Ok(BlockProposalContents::Payload(header)) } } (relay_result, Err(local_error)) => { @@ -715,7 +783,10 @@ impl ExecutionLayer { relay_result .map_err(Error::Builder)? .ok_or(Error::NoHeaderFromBuilder) - .map(|d| d.data.message.header) + .map(|d| { + //FIXME(sean) the builder API needs to be updated + BlockProposalContents::Payload(d.data.message.header) + }) } }; } @@ -743,14 +814,14 @@ impl ExecutionLayer { } /// Get a full payload without caching its result in the execution layer's payload cache. - async fn get_full_payload>( + async fn get_full_payload>( &self, parent_hash: ExecutionBlockHash, timestamp: u64, prev_randao: Hash256, suggested_fee_recipient: Address, forkchoice_update_params: ForkchoiceUpdateParameters, - ) -> Result { + ) -> Result, Error> { self.get_full_payload_with( parent_hash, timestamp, @@ -763,14 +834,14 @@ impl ExecutionLayer { } /// Get a full payload and cache its result in the execution layer's payload cache. - async fn get_full_payload_caching>( + async fn get_full_payload_caching>( &self, parent_hash: ExecutionBlockHash, timestamp: u64, prev_randao: Hash256, suggested_fee_recipient: Address, forkchoice_update_params: ForkchoiceUpdateParameters, - ) -> Result { + ) -> Result, Error> { self.get_full_payload_with( parent_hash, timestamp, @@ -782,51 +853,7 @@ impl ExecutionLayer { .await } - pub async fn get_blob_bundles( - &self, - parent_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - proposer_index: u64, - ) -> Result, Error> { - let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; - - debug!( - self.log(), - "Issuing engine_getBlobsBundle"; - "suggested_fee_recipient" => ?suggested_fee_recipient, - "prev_randao" => ?prev_randao, - "timestamp" => timestamp, - "parent_hash" => ?parent_hash, - ); - self.engine() - .request(|engine| async move { - let payload_id = if let Some(id) = engine - .get_payload_id(parent_hash, timestamp, prev_randao, suggested_fee_recipient) - .await - { - // The payload id has been cached for this engine. - metrics::inc_counter_vec( - &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, - &[metrics::HIT], - ); - id - } else { - error!( - self.log(), - "Exec engine unable to produce blobs, did you call get_payload before?", - ); - return Err(ApiError::PayloadIdUnavailable); - }; - - engine.api.get_blobs_bundle_v1::(payload_id).await - }) - .await - .map_err(Box::new) - .map_err(Error::EngineError) - } - - async fn get_full_payload_with>( + async fn get_full_payload_with>( &self, parent_hash: ExecutionBlockHash, timestamp: u64, @@ -834,15 +861,7 @@ impl ExecutionLayer { suggested_fee_recipient: Address, forkchoice_update_params: ForkchoiceUpdateParameters, f: fn(&ExecutionLayer, &ExecutionPayload) -> Option>, - ) -> Result { - debug!( - self.log(), - "Issuing engine_getPayload"; - "suggested_fee_recipient" => ?suggested_fee_recipient, - "prev_randao" => ?prev_randao, - "timestamp" => timestamp, - "parent_hash" => ?parent_hash, - ); + ) -> Result, Error> { self.engine() .request(|engine| async move { let payload_id = if let Some(id) = engine @@ -871,11 +890,13 @@ impl ExecutionLayer { .finalized_hash .unwrap_or_else(ExecutionBlockHash::zero), }; - let payload_attributes = PayloadAttributes { + // FIXME: This will have to properly handle forks. To do that, + // withdrawals will need to be passed into this function + let payload_attributes = PayloadAttributes::V1(PayloadAttributesV1 { timestamp, prev_randao, suggested_fee_recipient, - }; + }); let response = engine .notify_forkchoice_updated( @@ -900,33 +921,64 @@ impl ExecutionLayer { } }; - engine - .api - .get_payload_v1::(payload_id) - .await - .map(|full_payload| { - if full_payload.fee_recipient != suggested_fee_recipient { - error!( - self.log(), - "Inconsistent fee recipient"; - "msg" => "The fee recipient returned from the Execution Engine differs \ - from the suggested_fee_recipient set on the beacon node. This could \ - indicate that fees are being diverted to another address. Please \ - ensure that the value of suggested_fee_recipient is set correctly and \ - that the Execution Engine is trusted.", - "fee_recipient" => ?full_payload.fee_recipient, - "suggested_fee_recipient" => ?suggested_fee_recipient, - ); - } - if f(self, &full_payload).is_some() { - warn!( - self.log(), - "Duplicate payload cached, this might indicate redundant proposal \ + let blob_fut = async { + //FIXME(sean) do a fork check here and return None otherwise + debug!( + self.log(), + "Issuing engine_getBlobsBundle"; + "suggested_fee_recipient" => ?suggested_fee_recipient, + "prev_randao" => ?prev_randao, + "timestamp" => timestamp, + "parent_hash" => ?parent_hash, + ); + Some(engine.api.get_blobs_bundle_v1::(payload_id).await) + }; + let payload_fut = async { + debug!( + self.log(), + "Issuing engine_getPayload"; + "suggested_fee_recipient" => ?suggested_fee_recipient, + "prev_randao" => ?prev_randao, + "timestamp" => timestamp, + "parent_hash" => ?parent_hash, + ); + engine.api.get_payload_v1::(payload_id).await + }; + + let (blob, payload) = tokio::join!(blob_fut, payload_fut); + let payload = payload.map(|full_payload| { + if full_payload.fee_recipient() != suggested_fee_recipient { + error!( + self.log(), + "Inconsistent fee recipient"; + "msg" => "The fee recipient returned from the Execution Engine differs \ + from the suggested_fee_recipient set on the beacon node. This could \ + indicate that fees are being diverted to another address. Please \ + ensure that the value of suggested_fee_recipient is set correctly and \ + that the Execution Engine is trusted.", + "fee_recipient" => ?full_payload.fee_recipient(), + "suggested_fee_recipient" => ?suggested_fee_recipient, + ); + } + if f(self, &full_payload).is_some() { + warn!( + self.log(), + "Duplicate payload cached, this might indicate redundant proposal \ attempts." - ); - } - full_payload.into() + ); + } + full_payload.into() + })?; + if let Some(blob) = blob.transpose()? { + // FIXME(sean) cache blobs + Ok(BlockProposalContents::PayloadAndBlobs { + payload, + blobs: blob.blobs, + kzg_commitments: blob.kzgs, }) + } else { + Ok(BlockProposalContents::Payload(payload)) + } }) .await .map_err(Box::new) @@ -958,9 +1010,9 @@ impl ExecutionLayer { trace!( self.log(), "Issuing engine_newPayload"; - "parent_hash" => ?execution_payload.parent_hash, - "block_hash" => ?execution_payload.block_hash, - "block_number" => execution_payload.block_number, + "parent_hash" => ?execution_payload.parent_hash(), + "block_hash" => ?execution_payload.block_hash(), + "block_number" => execution_payload.block_number(), ); let result = self @@ -975,7 +1027,7 @@ impl ExecutionLayer { ); } - process_payload_status(execution_payload.block_hash, result, self.log()) + process_payload_status(execution_payload.block_hash(), result, self.log()) .map_err(Box::new) .map_err(Error::EngineError) } @@ -1076,9 +1128,9 @@ impl ExecutionLayer { let payload_attributes = self.payload_attributes(next_slot, head_block_root).await; // Compute the "lookahead", the time between when the payload will be produced and now. - if let Some(payload_attributes) = payload_attributes { + if let Some(ref payload_attributes) = payload_attributes { if let Ok(now) = SystemTime::now().duration_since(UNIX_EPOCH) { - let timestamp = Duration::from_secs(payload_attributes.timestamp); + let timestamp = Duration::from_secs(payload_attributes.timestamp()); if let Some(lookahead) = timestamp.checked_sub(now) { metrics::observe_duration( &metrics::EXECUTION_LAYER_PAYLOAD_ATTRIBUTES_LOOKAHEAD, @@ -1105,11 +1157,16 @@ impl ExecutionLayer { .set_latest_forkchoice_state(forkchoice_state) .await; + let payload_attributes_ref = &payload_attributes; let result = self .engine() .request(|engine| async move { engine - .notify_forkchoice_updated(forkchoice_state, payload_attributes, self.log()) + .notify_forkchoice_updated( + forkchoice_state, + payload_attributes_ref.clone(), + self.log(), + ) .await }) .await; @@ -1399,7 +1456,8 @@ impl ExecutionLayer { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BY_BLOCK_HASH); if hash == ExecutionBlockHash::zero() { - return Ok(Some(ExecutionPayload::default())); + // FIXME: how to handle forks properly here? + return Ok(Some(ExecutionPayloadMerge::default().into())); } let block = if let Some(block) = engine.api.get_block_by_hash_with_txns::(hash).await? { @@ -1410,7 +1468,7 @@ impl ExecutionLayer { let transactions = VariableList::new( block - .transactions + .transactions() .into_iter() .map(|transaction| VariableList::new(transaction.rlp().to_vec())) .collect::>() @@ -1418,22 +1476,73 @@ impl ExecutionLayer { ) .map_err(ApiError::DeserializeTransactions)?; - Ok(Some(ExecutionPayload { - parent_hash: block.parent_hash, - fee_recipient: block.fee_recipient, - state_root: block.state_root, - receipts_root: block.receipts_root, - logs_bloom: block.logs_bloom, - prev_randao: block.prev_randao, - block_number: block.block_number, - gas_limit: block.gas_limit, - gas_used: block.gas_used, - timestamp: block.timestamp, - extra_data: block.extra_data, - base_fee_per_gas: block.base_fee_per_gas, - block_hash: block.block_hash, - transactions, - })) + let payload = match block { + ExecutionBlockWithTransactions::Merge(merge_block) => { + ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: merge_block.parent_hash, + fee_recipient: merge_block.fee_recipient, + state_root: merge_block.state_root, + receipts_root: merge_block.receipts_root, + logs_bloom: merge_block.logs_bloom, + prev_randao: merge_block.prev_randao, + block_number: merge_block.block_number, + gas_limit: merge_block.gas_limit, + gas_used: merge_block.gas_used, + timestamp: merge_block.timestamp, + extra_data: merge_block.extra_data, + base_fee_per_gas: merge_block.base_fee_per_gas, + block_hash: merge_block.block_hash, + transactions, + }) + } + ExecutionBlockWithTransactions::Capella(capella_block) => { + let withdrawals = VariableList::new(capella_block.withdrawals.clone()) + .map_err(ApiError::DeserializeWithdrawals)?; + + ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: capella_block.parent_hash, + fee_recipient: capella_block.fee_recipient, + state_root: capella_block.state_root, + receipts_root: capella_block.receipts_root, + logs_bloom: capella_block.logs_bloom, + prev_randao: capella_block.prev_randao, + block_number: capella_block.block_number, + gas_limit: capella_block.gas_limit, + gas_used: capella_block.gas_used, + timestamp: capella_block.timestamp, + extra_data: capella_block.extra_data, + base_fee_per_gas: capella_block.base_fee_per_gas, + block_hash: capella_block.block_hash, + transactions, + withdrawals, + }) + } + ExecutionBlockWithTransactions::Eip4844(eip4844_block) => { + let withdrawals = VariableList::new(eip4844_block.withdrawals.clone()) + .map_err(ApiError::DeserializeWithdrawals)?; + + ExecutionPayload::Eip4844(ExecutionPayloadEip4844 { + parent_hash: eip4844_block.parent_hash, + fee_recipient: eip4844_block.fee_recipient, + state_root: eip4844_block.state_root, + receipts_root: eip4844_block.receipts_root, + logs_bloom: eip4844_block.logs_bloom, + prev_randao: eip4844_block.prev_randao, + block_number: eip4844_block.block_number, + gas_limit: eip4844_block.gas_limit, + gas_used: eip4844_block.gas_used, + timestamp: eip4844_block.timestamp, + extra_data: eip4844_block.extra_data, + base_fee_per_gas: eip4844_block.base_fee_per_gas, + excess_blobs: eip4844_block.excess_blobs, + block_hash: eip4844_block.block_hash, + transactions, + withdrawals, + }) + } + }; + + Ok(Some(payload)) } pub async fn propose_blinded_beacon_block( diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 3620a02dfbb..c492bcd5a55 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -12,7 +12,10 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -use types::{EthSpec, ExecutionBlockHash, ExecutionPayload, Hash256, Uint256}; +use types::{ + EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge, + Hash256, Uint256, +}; const GAS_LIMIT: u64 = 16384; const GAS_USED: u64 = GAS_LIMIT - 1; @@ -28,21 +31,21 @@ impl Block { pub fn block_number(&self) -> u64 { match self { Block::PoW(block) => block.block_number, - Block::PoS(payload) => payload.block_number, + Block::PoS(payload) => payload.block_number(), } } pub fn parent_hash(&self) -> ExecutionBlockHash { match self { Block::PoW(block) => block.parent_hash, - Block::PoS(payload) => payload.parent_hash, + Block::PoS(payload) => payload.parent_hash(), } } pub fn block_hash(&self) -> ExecutionBlockHash { match self { Block::PoW(block) => block.block_hash, - Block::PoS(payload) => payload.block_hash, + Block::PoS(payload) => payload.block_hash(), } } @@ -63,33 +66,18 @@ impl Block { timestamp: block.timestamp, }, Block::PoS(payload) => ExecutionBlock { - block_hash: payload.block_hash, - block_number: payload.block_number, - parent_hash: payload.parent_hash, + block_hash: payload.block_hash(), + block_number: payload.block_number(), + parent_hash: payload.parent_hash(), total_difficulty, - timestamp: payload.timestamp, + timestamp: payload.timestamp(), }, } } pub fn as_execution_block_with_tx(&self) -> Option> { match self { - Block::PoS(payload) => Some(ExecutionBlockWithTransactions { - parent_hash: payload.parent_hash, - fee_recipient: payload.fee_recipient, - state_root: payload.state_root, - receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom.clone(), - prev_randao: payload.prev_randao, - block_number: payload.block_number, - gas_limit: payload.gas_limit, - gas_used: payload.gas_used, - timestamp: payload.timestamp, - extra_data: payload.extra_data.clone(), - base_fee_per_gas: payload.base_fee_per_gas, - block_hash: payload.block_hash, - transactions: vec![], - }), + Block::PoS(payload) => Some(payload.clone().into()), Block::PoW(_) => None, } } @@ -283,7 +271,9 @@ impl ExecutionBlockGenerator { // Update the block hash after modifying the block match &mut block { Block::PoW(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()), - Block::PoS(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()), + Block::PoS(b) => { + *b.block_hash_mut() = ExecutionBlockHash::from_root(b.tree_hash_root()) + } } self.block_hashes.insert(block_number, block.block_hash()); self.blocks.insert(block.block_hash(), block); @@ -295,7 +285,7 @@ impl ExecutionBlockGenerator { } pub fn new_payload(&mut self, payload: ExecutionPayload) -> PayloadStatusV1 { - let parent = if let Some(parent) = self.blocks.get(&payload.parent_hash) { + let parent = if let Some(parent) = self.blocks.get(&payload.parent_hash()) { parent } else { return PayloadStatusV1 { @@ -305,7 +295,7 @@ impl ExecutionBlockGenerator { }; }; - if payload.block_number != parent.block_number() + 1 { + if payload.block_number() != parent.block_number() + 1 { return PayloadStatusV1 { status: PayloadStatusV1Status::Invalid, latest_valid_hash: Some(parent.block_hash()), @@ -313,8 +303,8 @@ impl ExecutionBlockGenerator { }; } - let valid_hash = payload.block_hash; - self.pending_payloads.insert(payload.block_hash, payload); + let valid_hash = payload.block_hash(); + self.pending_payloads.insert(payload.block_hash(), payload); PayloadStatusV1 { status: PayloadStatusV1Status::Valid, @@ -379,24 +369,52 @@ impl ExecutionBlockGenerator { let id = payload_id_from_u64(self.next_payload_id); self.next_payload_id += 1; - let mut execution_payload = ExecutionPayload { - parent_hash: forkchoice_state.head_block_hash, - fee_recipient: attributes.suggested_fee_recipient, - receipts_root: Hash256::repeat_byte(42), - state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), - prev_randao: attributes.prev_randao, - block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, - gas_used: GAS_USED, - timestamp: attributes.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), - base_fee_per_gas: Uint256::one(), - block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), + // FIXME: think about how to test different forks + let mut execution_payload = match &attributes { + PayloadAttributes::V1(pa) => ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: forkchoice_state.head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + }), + PayloadAttributes::V2(pa) => { + ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: forkchoice_state.head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + withdrawals: pa + .withdrawals + .iter() + .cloned() + .map(Into::into) + .collect::>() + .into(), + }) + } }; - execution_payload.block_hash = + *execution_payload.block_hash_mut() = ExecutionBlockHash::from_root(execution_payload.tree_hash_root()); self.payload_ids.insert(id, execution_payload); diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 975f09fa5e0..97f7ab12127 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -75,12 +75,12 @@ pub async fn handle_rpc( } } ENGINE_NEW_PAYLOAD_V1 => { - let request: JsonExecutionPayloadV1 = get_param(params, 0)?; + let request: JsonExecutionPayload = get_param(params, 0)?; let (static_response, should_import) = if let Some(mut response) = ctx.static_new_payload_response.lock().clone() { if response.status.status == PayloadStatusV1Status::Valid { - response.status.latest_valid_hash = Some(request.block_hash) + response.status.latest_valid_hash = Some(*request.block_hash()) } (Some(response.status), response.should_import) @@ -112,11 +112,11 @@ pub async fn handle_rpc( .get_payload(&id) .ok_or_else(|| format!("no payload for id {:?}", id))?; - Ok(serde_json::to_value(JsonExecutionPayloadV1::from(response)).unwrap()) + Ok(serde_json::to_value(JsonExecutionPayload::from(response)).unwrap()) } ENGINE_FORKCHOICE_UPDATED_V1 => { let forkchoice_state: JsonForkChoiceStateV1 = get_param(params, 0)?; - let payload_attributes: Option = get_param(params, 1)?; + let payload_attributes: Option = get_param(params, 1)?; let head_block_hash = forkchoice_state.head_block_hash; diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index b8f74c1c93f..1323ea3e406 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,5 +1,5 @@ use crate::test_utils::DEFAULT_JWT_SECRET; -use crate::{Config, ExecutionLayer, PayloadAttributes}; +use crate::{Config, ExecutionLayer, PayloadAttributes, PayloadAttributesV1}; use async_trait::async_trait; use eth2::types::{BlockId, StateId, ValidatorId}; use eth2::{BeaconNodeHttpClient, Timeouts}; @@ -287,11 +287,12 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .get_randao_mix(head_state.current_epoch()) .map_err(convert_err)?; - let payload_attributes = PayloadAttributes { + // FIXME: think about proper fork here + let payload_attributes = PayloadAttributes::V1(PayloadAttributesV1 { timestamp, prev_randao: *prev_randao, suggested_fee_recipient: fee_recipient, - }; + }); self.el .insert_proposer(slot, head_block_root, val_index, payload_attributes) @@ -315,6 +316,7 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { ) .await .map_err(convert_err)? + .to_payload() .to_execution_payload_header(); let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 065abc93609..d0bc2785c30 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -1,7 +1,7 @@ use crate::{ test_utils::{ - MockServer, DEFAULT_BUILDER_THRESHOLD_WEI, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, - DEFAULT_TERMINAL_DIFFICULTY, + Block, MockServer, DEFAULT_BUILDER_THRESHOLD_WEI, DEFAULT_JWT_SECRET, + DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_DIFFICULTY, }, Config, *, }; @@ -99,20 +99,37 @@ impl MockExecutionLayer { finalized_hash: None, }; + // FIXME: this is just best guess for how to deal with forks here.. + let payload_attributes = match &latest_execution_block { + &Block::PoS(ref pos_block) => match pos_block { + &ExecutionPayload::Merge(_) => PayloadAttributes::V1(PayloadAttributesV1 { + timestamp, + prev_randao, + suggested_fee_recipient: Address::repeat_byte(42), + }), + &ExecutionPayload::Capella(_) | &ExecutionPayload::Eip4844(_) => { + PayloadAttributes::V2(PayloadAttributesV2 { + timestamp, + prev_randao, + suggested_fee_recipient: Address::repeat_byte(42), + // FIXME: think about adding withdrawals here.. + withdrawals: vec![], + }) + } + }, + // I guess a PoW blocks means we should use Merge? + &Block::PoW(_) => PayloadAttributes::V1(PayloadAttributesV1 { + timestamp, + prev_randao, + suggested_fee_recipient: Address::repeat_byte(42), + }), + }; + // Insert a proposer to ensure the fork choice updated command works. let slot = Slot::new(0); let validator_index = 0; self.el - .insert_proposer( - slot, - head_block_root, - validator_index, - PayloadAttributes { - timestamp, - prev_randao, - suggested_fee_recipient: Address::repeat_byte(42), - }, - ) + .insert_proposer(slot, head_block_root, validator_index, payload_attributes) .await; self.el @@ -132,7 +149,7 @@ impl MockExecutionLayer { slot, chain_health: ChainHealth::Healthy, }; - let payload = self + let payload: ExecutionPayload = self .el .get_payload::>( parent_hash, @@ -145,12 +162,14 @@ impl MockExecutionLayer { ) .await .unwrap() - .execution_payload; - let block_hash = payload.block_hash; - assert_eq!(payload.parent_hash, parent_hash); - assert_eq!(payload.block_number, block_number); - assert_eq!(payload.timestamp, timestamp); - assert_eq!(payload.prev_randao, prev_randao); + .to_payload() + .into(); + + let block_hash = payload.block_hash(); + assert_eq!(payload.parent_hash(), parent_hash); + assert_eq!(payload.block_number(), block_number); + assert_eq!(payload.timestamp(), timestamp); + assert_eq!(payload.prev_randao(), prev_randao); // Ensure the payload cache is empty. assert!(self @@ -175,12 +194,13 @@ impl MockExecutionLayer { ) .await .unwrap() - .execution_payload_header; - assert_eq!(payload_header.block_hash, block_hash); - assert_eq!(payload_header.parent_hash, parent_hash); - assert_eq!(payload_header.block_number, block_number); - assert_eq!(payload_header.timestamp, timestamp); - assert_eq!(payload_header.prev_randao, prev_randao); + .to_payload(); + + assert_eq!(payload_header.block_hash(), block_hash); + assert_eq!(payload_header.parent_hash(), parent_hash); + assert_eq!(payload_header.block_number(), block_number); + assert_eq!(payload_header.timestamp(), timestamp); + assert_eq!(payload_header.prev_randao(), prev_randao); // Ensure the payload cache has the correct payload. assert_eq!( diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 3c50fb95a2d..09159d3c0c7 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -9,8 +9,8 @@ use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ - BlindedPayload, ExecPayload, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, - SignedBeaconBlock, + AbstractExecPayload, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, FullPayload, + Hash256, SignedBeaconBlock, }; use warp::Rejection; @@ -158,12 +158,17 @@ async fn reconstruct_block( // If the execution block hash is zero, use an empty payload. let full_payload = if payload_header.block_hash() == ExecutionBlockHash::zero() { - ExecutionPayload::default() + FullPayload::default_at_fork( + chain + .spec + .fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch())), + ) + .into() // If we already have an execution payload with this transactions root cached, use it. } else if let Some(cached_payload) = el.get_payload_by_root(&payload_header.tree_hash_root()) { - info!(log, "Reconstructing a full block using a local payload"; "block_hash" => ?cached_payload.block_hash); + info!(log, "Reconstructing a full block using a local payload"; "block_hash" => ?cached_payload.block_hash()); cached_payload // Otherwise, this means we are attempting a blind block proposal. } else { @@ -176,7 +181,7 @@ async fn reconstruct_block( e )) })?; - info!(log, "Successfully published a block to the builder network"; "block_hash" => ?full_payload.block_hash); + info!(log, "Successfully published a block to the builder network"; "block_hash" => ?full_payload.block_hash()); full_payload }; diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 3cd2c542312..6dd917ca1b1 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -296,7 +296,7 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos match fork_context.current_fork() { // according to: https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#the-gossip-domain-gossipsub // the derivation of the message-id remains the same in the merge and for eip 4844. - ForkName::Altair | ForkName::Merge | ForkName::Eip4844 => { + ForkName::Altair | ForkName::Merge | ForkName::Capella | ForkName::Eip4844 => { let topic_len_bytes = topic_bytes.len().to_le_bytes(); let mut vec = Vec::with_capacity( prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), diff --git a/beacon_node/lighthouse_network/src/rpc/codec/base.rs b/beacon_node/lighthouse_network/src/rpc/codec/base.rs index 5fda9174cb3..164a7c025d9 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/base.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/base.rs @@ -193,16 +193,19 @@ mod tests { let mut chain_spec = Spec::default_spec(); let altair_fork_epoch = Epoch::new(1); let merge_fork_epoch = Epoch::new(2); - let eip4844_fork_epoch = Epoch::new(3); + let capella_fork_epoch = Epoch::new(3); + let eip4844_fork_epoch = Epoch::new(4); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); + chain_spec.capella_fork_epoch = Some(capella_fork_epoch); chain_spec.eip4844_fork_epoch = Some(eip4844_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Eip4844 => eip4844_fork_epoch.start_slot(Spec::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index c00ff64004e..3c40fdf8b3f 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -17,7 +17,8 @@ use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; use types::{ BlobsSidecar, EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockBase, SignedBeaconBlockEip4844, SignedBeaconBlockMerge, + SignedBeaconBlockBase, SignedBeaconBlockCapella, SignedBeaconBlockEip4844, + SignedBeaconBlockMerge, }; use unsigned_varint::codec::Uvi; @@ -413,6 +414,10 @@ fn context_bytes( // Eip4844 context being `None` implies that "merge never happened". fork_context.to_context_bytes(ForkName::Eip4844) } + SignedBeaconBlock::Capella { .. } => { + // Capella context being `None` implies that "merge never happened". + fork_context.to_context_bytes(ForkName::Capella) + } SignedBeaconBlock::Merge { .. } => { // Merge context being `None` implies that "merge never happened". fork_context.to_context_bytes(ForkName::Merge) @@ -599,6 +604,11 @@ fn handle_v2_response( decoded_buffer, )?), )))), + ForkName::Capella => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( + decoded_buffer, + )?), + )))), ForkName::Eip4844 => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Eip4844(SignedBeaconBlockEip4844::from_ssz_bytes( decoded_buffer, @@ -619,6 +629,11 @@ fn handle_v2_response( decoded_buffer, )?), )))), + ForkName::Capella => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( + decoded_buffer, + )?), + )))), ForkName::Eip4844 => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Eip4844(SignedBeaconBlockEip4844::from_ssz_bytes( decoded_buffer, @@ -682,16 +697,19 @@ mod tests { let mut chain_spec = Spec::default_spec(); let altair_fork_epoch = Epoch::new(1); let merge_fork_epoch = Epoch::new(2); - let eip4844_fork_epoch = Epoch::new(3); + let capella_fork_epoch = Epoch::new(3); + let eip4844_fork_epoch = Epoch::new(4); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); + chain_spec.capella_fork_epoch = Some(capella_fork_epoch); chain_spec.eip4844_fork_epoch = Some(eip4844_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Eip4844 => eip4844_fork_epoch.start_slot(Spec::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 3812b6153b2..dc1110954a5 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -70,10 +70,18 @@ lazy_static! { pub static ref SIGNED_BEACON_BLOCK_MERGE_MAX: usize = // Size of a full altair block *SIGNED_BEACON_BLOCK_ALTAIR_MAX - + types::ExecutionPayload::::max_execution_payload_size() // adding max size of execution payload (~16gb) + + types::ExecutionPayload::::max_execution_payload_merge_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field - pub static ref SIGNED_BEACON_BLOCK_EIP4844_MAX: usize = *SIGNED_BEACON_BLOCK_MERGE_MAX + (48 * ::max_blobs_per_block()); + pub static ref SIGNED_BEACON_BLOCK_CAPELLA_MAX: usize = *SIGNED_BEACON_BLOCK_ALTAIR_MAX + + types::ExecutionPayload::::max_execution_payload_capella_size() // adding max size of execution payload (~16gb) + + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field + + pub static ref SIGNED_BEACON_BLOCK_EIP4844_MAX: usize = *SIGNED_BEACON_BLOCK_ALTAIR_MAX + + types::ExecutionPayload::::max_execution_payload_eip4844_size() // adding max size of execution payload (~16gb) + + ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional offsets for the `ExecutionPayload` + + (::ssz_fixed_len() * ::max_blobs_per_block()) + + ssz::BYTES_PER_LENGTH_OFFSET; // Length offset for the blob commitments field. pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize = VariableList::::from(Vec::::new()) @@ -114,6 +122,8 @@ lazy_static! { pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M /// The maximum bytes that can be sent across the RPC post-merge. pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M + //FIXME(sean) should these be the same? +pub(crate) const MAX_RPC_SIZE_POST_CAPELLA: usize = 10 * 1_048_576; // 10M pub(crate) const MAX_RPC_SIZE_POST_EIP4844: usize = 10 * 1_048_576; // 10M /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; @@ -126,9 +136,10 @@ const REQUEST_TIMEOUT: u64 = 15; /// Returns the maximum bytes that can be sent across the RPC. pub fn max_rpc_size(fork_context: &ForkContext) -> usize { match fork_context.current_fork() { - ForkName::Eip4844 => MAX_RPC_SIZE_POST_EIP4844, - ForkName::Merge => MAX_RPC_SIZE_POST_MERGE, ForkName::Altair | ForkName::Base => MAX_RPC_SIZE, + ForkName::Merge => MAX_RPC_SIZE_POST_MERGE, + ForkName::Capella => MAX_RPC_SIZE_POST_CAPELLA, + ForkName::Eip4844 => MAX_RPC_SIZE_POST_EIP4844, } } @@ -149,9 +160,13 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks *SIGNED_BEACON_BLOCK_MERGE_MAX, // Merge block is larger than base and altair blocks ), + ForkName::Capella => RpcLimits::new( + *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks + *SIGNED_BEACON_BLOCK_CAPELLA_MAX, // Capella block is larger than base, altair and merge blocks + ), ForkName::Eip4844 => RpcLimits::new( *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks - *SIGNED_BEACON_BLOCK_EIP4844_MAX, // EIP 4844 block is larger than base, altair and merge blocks + *SIGNED_BEACON_BLOCK_EIP4844_MAX, // EIP 4844 block is larger than all prior fork blocks ), } } diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 781ac9a12cb..6f6de68ecb4 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -12,8 +12,8 @@ use types::signed_blobs_sidecar::SignedBlobsSidecar; use types::{ Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, - SignedBeaconBlockEip4844, SignedBeaconBlockMerge, SignedContributionAndProof, - SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, + SignedBeaconBlockCapella, SignedBeaconBlockEip4844, SignedBeaconBlockMerge, + SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] @@ -175,6 +175,10 @@ impl PubsubMessage { SignedBeaconBlockEip4844::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ), + Some(ForkName::Capella) => SignedBeaconBlock::::Capella( + SignedBeaconBlockCapella::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), None => { return Err(format!( "Unknown gossipsub fork digest: {:?}", diff --git a/beacon_node/lighthouse_network/tests/common/mod.rs b/beacon_node/lighthouse_network/tests/common/mod.rs index 7c63b6e51e0..7b3189504b4 100644 --- a/beacon_node/lighthouse_network/tests/common/mod.rs +++ b/beacon_node/lighthouse_network/tests/common/mod.rs @@ -32,16 +32,19 @@ pub fn fork_context(fork_name: ForkName) -> ForkContext { let mut chain_spec = E::default_spec(); let altair_fork_epoch = Epoch::new(1); let merge_fork_epoch = Epoch::new(2); - let eip4844_fork_epoch = Epoch::new(3); + let capella_fork_epoch = Epoch::new(3); + let eip4844_fork_epoch = Epoch::new(4); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); + chain_spec.capella_fork_epoch = Some(capella_fork_epoch); chain_spec.eip4844_fork_epoch = Some(eip4844_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(E::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(E::slots_per_epoch()), + ForkName::Capella => capella_fork_epoch.start_slot(E::slots_per_epoch()), ForkName::Eip4844 => eip4844_fork_epoch.start_slot(E::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index debfdff3d71..592b92b0380 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -706,6 +706,7 @@ impl Worker { seen_timestamp: Duration, ) { match self.chain.verify_blobs_sidecar_for_gossip(&blob) { + //FIXME(sean) Ok(verified_sidecar) => { // Register with validator monitor // Propagate @@ -2309,6 +2310,7 @@ impl Worker { return; } + &BlobError::UnknownValidator(_) => todo!(), } } } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 52e8922ccbe..81d6e7051bb 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -586,7 +586,7 @@ impl BeaconNodeHttpClient { /// `POST beacon/blocks` /// /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blocks>( + pub async fn post_beacon_blocks>( &self, block: &SignedBeaconBlock, ) -> Result<(), Error> { @@ -627,7 +627,7 @@ impl BeaconNodeHttpClient { /// `POST beacon/blinded_blocks` /// /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blinded_blocks>( + pub async fn post_beacon_blinded_blocks>( &self, block: &SignedBeaconBlock, ) -> Result<(), Error> { @@ -1248,7 +1248,7 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blocks/{slot}` - pub async fn get_validator_blocks>( + pub async fn get_validator_blocks>( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1259,7 +1259,7 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blocks/{slot}` - pub async fn get_validator_blocks_modular>( + pub async fn get_validator_blocks_modular>( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1291,7 +1291,7 @@ impl BeaconNodeHttpClient { } /// `GET v1/validator/blocks_and_blobs/{slot}` - pub async fn get_validator_blocks_and_blobs>( + pub async fn get_validator_blocks_and_blobs>( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1317,7 +1317,7 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blinded_blocks/{slot}` - pub async fn get_validator_blinded_blocks>( + pub async fn get_validator_blinded_blocks>( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1333,7 +1333,10 @@ impl BeaconNodeHttpClient { } /// `GET v1/validator/blinded_blocks/{slot}` - pub async fn get_validator_blinded_blocks_modular>( + pub async fn get_validator_blinded_blocks_modular< + T: EthSpec, + Payload: AbstractExecPayload, + >( &self, slot: Slot, randao_reveal: &SignatureBytes, diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 2ac4fcf49be..71e5b40e0ac 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1111,8 +1111,8 @@ pub struct LivenessResponseData { } #[derive(PartialEq, Debug, Serialize, Deserialize)] -#[serde(bound = "T: EthSpec, Payload: ExecPayload")] -pub struct BlocksAndBlobs> { +#[serde(bound = "T: EthSpec, Payload: AbstractExecPayload")] +pub struct BlocksAndBlobs> { pub block: BeaconBlock, pub blobs: Vec>, pub kzg_aggregate_proof: KzgProof, diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 71ceb71c140..84005183f22 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -326,9 +326,9 @@ pub fn partially_verify_execution_payload<'payload, T: EthSpec, Payload: Abstrac ) -> Result<(), BlockProcessingError> { if is_merge_transition_complete(state) { block_verify!( - payload.parent_hash() == *state.latest_execution_payload_header()?.block_hash(), + payload.parent_hash() == state.latest_execution_payload_header()?.block_hash(), BlockProcessingError::ExecutionHashChainIncontiguous { - expected: *state.latest_execution_payload_header()?.block_hash(), + expected: state.latest_execution_payload_header()?.block_hash(), found: payload.parent_hash(), } ); diff --git a/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs b/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs index d7747c1fe92..13bbc67581d 100644 --- a/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs +++ b/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs @@ -3,7 +3,7 @@ use types::beacon_state::BeaconState; use types::eth_spec::EthSpec; pub fn process_full_withdrawals( - state: &mut BeaconState, + _state: &mut BeaconState, ) -> Result<(), EpochProcessingError> { todo!("implement this"); Ok(()) diff --git a/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs b/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs index 2a576700815..a648766e218 100644 --- a/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs +++ b/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs @@ -3,7 +3,7 @@ use types::beacon_state::BeaconState; use types::eth_spec::EthSpec; pub fn process_partial_withdrawals( - state: &mut BeaconState, + _state: &mut BeaconState, ) -> Result<(), EpochProcessingError> { todo!("implement this"); Ok(()) diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index af22823d2ef..69346ec1969 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -253,11 +253,14 @@ impl ChainSpec { pub fn fork_name_at_epoch(&self, epoch: Epoch) -> ForkName { match self.eip4844_fork_epoch { Some(fork_epoch) if epoch >= fork_epoch => ForkName::Eip4844, - _ => match self.bellatrix_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge, - _ => match self.altair_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, - _ => ForkName::Base, + _ => match self.capella_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Capella, + _ => match self.bellatrix_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge, + _ => match self.altair_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, + _ => ForkName::Base, + }, }, }, } diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index f68e563e416..05dadb43467 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -43,28 +43,40 @@ pub type Transactions = VariableList< #[tree_hash(enum_behaviour = "transparent")] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct ExecutionPayload { + #[superstruct(getter(copy))] pub parent_hash: ExecutionBlockHash, + #[superstruct(getter(copy))] pub fee_recipient: Address, + #[superstruct(getter(copy))] pub state_root: Hash256, + #[superstruct(getter(copy))] pub receipts_root: Hash256, #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] pub logs_bloom: FixedVector, + #[superstruct(getter(copy))] pub prev_randao: Hash256, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub block_number: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub gas_limit: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub gas_used: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, #[serde(with = "eth2_serde_utils::quoted_u256")] + #[superstruct(getter(copy))] pub base_fee_per_gas: Uint256, #[superstruct(only(Eip4844))] #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub excess_blobs: u64, + #[superstruct(getter(copy))] pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, @@ -94,8 +106,7 @@ impl ExecutionPayload { // Max size of variable length `transactions` field + (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction())) // Max size of variable length `withdrawals` field - // TODO: check this - + (T::max_withdrawals_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + ::ssz_fixed_len())) + + (T::max_withdrawals_per_payload() * ::ssz_fixed_len()) } #[allow(clippy::integer_arithmetic)] @@ -108,8 +119,7 @@ impl ExecutionPayload { // Max size of variable length `transactions` field + (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction())) // Max size of variable length `withdrawals` field - // TODO: check this - + (T::max_withdrawals_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + ::ssz_fixed_len())) + + (T::max_withdrawals_per_payload() * ::ssz_fixed_len()) } pub fn blob_txns_iter(&self) -> Iter<'_, Transaction> { diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 7f90f1f57bc..41aa2f6d2d5 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -37,31 +37,45 @@ use BeaconStateError; #[ssz(enum_behaviour = "transparent")] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct ExecutionPayloadHeader { + #[superstruct(getter(copy))] pub parent_hash: ExecutionBlockHash, + #[superstruct(getter(copy))] pub fee_recipient: Address, + #[superstruct(getter(copy))] pub state_root: Hash256, + #[superstruct(getter(copy))] pub receipts_root: Hash256, #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] pub logs_bloom: FixedVector, + #[superstruct(getter(copy))] pub prev_randao: Hash256, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub block_number: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub gas_limit: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub gas_used: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, #[serde(with = "eth2_serde_utils::quoted_u256")] + #[superstruct(getter(copy))] pub base_fee_per_gas: Uint256, #[superstruct(only(Eip4844))] #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub excess_blobs: u64, + #[superstruct(getter(copy))] pub block_hash: ExecutionBlockHash, + #[superstruct(getter(copy))] pub transactions_root: Hash256, #[superstruct(only(Capella, Eip4844))] + #[superstruct(getter(copy))] pub withdrawals_root: Hash256, } diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index bbd5f6beae2..f5221dd913d 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -47,6 +47,13 @@ impl ForkContext { )); } + if spec.capella_fork_epoch.is_some() { + fork_to_digest.push(( + ForkName::Capella, + ChainSpec::compute_fork_digest(spec.capella_fork_version, genesis_validators_root), + )); + } + if spec.eip4844_fork_epoch.is_some() { fork_to_digest.push(( ForkName::Eip4844, diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 527b54f478e..66db2077305 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -1,5 +1,4 @@ //! Ethereum 2.0 types -#![feature(generic_associated_types)] // Required for big type-level numbers #![recursion_limit = "128"] // Clippy lint set up @@ -153,7 +152,7 @@ pub use crate::participation_list::ParticipationList; pub use crate::payload::{ AbstractExecPayload, BlindedPayload, BlindedPayloadCapella, BlindedPayloadEip4844, BlindedPayloadMerge, BlindedPayloadRef, BlockType, ExecPayload, FullPayload, - FullPayloadCapella, FullPayloadEip4844, FullPayloadMerge, FullPayloadRef, + FullPayloadCapella, FullPayloadEip4844, FullPayloadMerge, FullPayloadRef, OwnedExecPayload, }; pub use crate::pending_attestation::PendingAttestation; pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset}; @@ -164,8 +163,9 @@ pub use crate::selection_proof::SelectionProof; pub use crate::shuffling_id::AttestationShufflingId; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; pub use crate::signed_beacon_block::{ - SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockEip4844, - SignedBeaconBlockHash, SignedBeaconBlockMerge, SignedBlindedBeaconBlock, + SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella, + SignedBeaconBlockEip4844, SignedBeaconBlockHash, SignedBeaconBlockMerge, + SignedBlindedBeaconBlock, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_blobs_sidecar::SignedBlobsSidecar; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index db7a6463346..736f06e22b7 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -57,7 +57,13 @@ impl OwnedExecPayload for P where } pub trait AbstractExecPayload: - ExecPayload + Sized + From> + TryFrom> + ExecPayload + + Sized + + From> + + TryFrom> + + TryInto + + TryInto + + TryInto { type Ref<'a>: ExecPayload + Copy @@ -77,6 +83,8 @@ pub trait AbstractExecPayload: + Into + From> + TryFrom>; + + fn default_at_fork(fork_name: ForkName) -> Self; } #[superstruct( @@ -128,6 +136,22 @@ impl From> for ExecutionPayload { } } +impl<'a, T: EthSpec> From> for ExecutionPayload { + fn from(full_payload: FullPayloadRef<'a, T>) -> Self { + match full_payload { + FullPayloadRef::Merge(payload) => { + ExecutionPayload::Merge(payload.execution_payload.clone()) + } + FullPayloadRef::Capella(payload) => { + ExecutionPayload::Capella(payload.execution_payload.clone()) + } + FullPayloadRef::Eip4844(payload) => { + ExecutionPayload::Eip4844(payload.execution_payload.clone()) + } + } + } +} + impl ExecPayload for FullPayloadMerge { fn block_type() -> BlockType { BlockType::Full @@ -425,6 +449,51 @@ impl AbstractExecPayload for FullPayload { type Merge = FullPayloadMerge; type Capella = FullPayloadCapella; type Eip4844 = FullPayloadEip4844; + + fn default_at_fork(fork_name: ForkName) -> Self { + match fork_name { + //FIXME(sean) error handling + ForkName::Base | ForkName::Altair => panic!(), + ForkName::Merge => FullPayloadMerge::default().into(), + ForkName::Capella => FullPayloadCapella::default().into(), + ForkName::Eip4844 => FullPayloadEip4844::default().into(), + } + } +} + +//FIXME(sean) fix errors +impl TryInto> for FullPayload { + type Error = (); + + fn try_into(self) -> Result, Self::Error> { + match self { + FullPayload::Merge(payload) => Ok(payload), + FullPayload::Capella(_) => Err(()), + FullPayload::Eip4844(_) => Err(()), + } + } +} +impl TryInto> for FullPayload { + type Error = (); + + fn try_into(self) -> Result, Self::Error> { + match self { + FullPayload::Merge(_) => Err(()), + FullPayload::Capella(payload) => Ok(payload), + FullPayload::Eip4844(_) => Err(()), + } + } +} +impl TryInto> for FullPayload { + type Error = (); + + fn try_into(self) -> Result, Self::Error> { + match self { + FullPayload::Merge(_) => Err(()), + FullPayload::Capella(_) => Err(()), + FullPayload::Eip4844(payload) => Ok(payload), + } + } } impl From> for FullPayload { @@ -855,6 +924,51 @@ impl AbstractExecPayload for BlindedPayload { type Merge = BlindedPayloadMerge; type Capella = BlindedPayloadCapella; type Eip4844 = BlindedPayloadEip4844; + + fn default_at_fork(fork_name: ForkName) -> Self { + match fork_name { + //FIXME(sean) error handling + ForkName::Base | ForkName::Altair => panic!(), + ForkName::Merge => BlindedPayloadMerge::default().into(), + ForkName::Capella => BlindedPayloadCapella::default().into(), + ForkName::Eip4844 => BlindedPayloadEip4844::default().into(), + } + } +} + +//FIXME(sean) fix errors +impl TryInto> for BlindedPayload { + type Error = (); + + fn try_into(self) -> Result, Self::Error> { + match self { + BlindedPayload::Merge(payload) => Ok(payload), + BlindedPayload::Capella(_) => Err(()), + BlindedPayload::Eip4844(_) => Err(()), + } + } +} +impl TryInto> for BlindedPayload { + type Error = (); + + fn try_into(self) -> Result, Self::Error> { + match self { + BlindedPayload::Merge(_) => Err(()), + BlindedPayload::Capella(payload) => Ok(payload), + BlindedPayload::Eip4844(_) => Err(()), + } + } +} +impl TryInto> for BlindedPayload { + type Error = (); + + fn try_into(self) -> Result, Self::Error> { + match self { + BlindedPayload::Merge(_) => Err(()), + BlindedPayload::Capella(_) => Err(()), + BlindedPayload::Eip4844(payload) => Ok(payload), + } + } } impl Default for FullPayloadMerge { diff --git a/lcli/src/create_payload_header.rs b/lcli/src/create_payload_header.rs index 9e91f425a73..ebda9361650 100644 --- a/lcli/src/create_payload_header.rs +++ b/lcli/src/create_payload_header.rs @@ -4,7 +4,7 @@ use ssz::Encode; use std::fs::File; use std::io::Write; use std::time::{SystemTime, UNIX_EPOCH}; -use types::{EthSpec, ExecutionPayloadHeader}; +use types::{EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderMerge}; pub fn run(matches: &ArgMatches) -> Result<(), String> { let eth1_block_hash = parse_required(matches, "execution-block-hash")?; @@ -18,14 +18,16 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { let gas_limit = parse_required(matches, "gas-limit")?; let file_name = matches.value_of("file").ok_or("No file supplied")?; - let execution_payload_header: ExecutionPayloadHeader = ExecutionPayloadHeader { - gas_limit, - base_fee_per_gas, - timestamp: genesis_time, - block_hash: eth1_block_hash, - prev_randao: eth1_block_hash.into_root(), - ..ExecutionPayloadHeader::default() - }; + //FIXME(sean) + let execution_payload_header: ExecutionPayloadHeader = + ExecutionPayloadHeader::Merge(ExecutionPayloadHeaderMerge { + gas_limit, + base_fee_per_gas, + timestamp: genesis_time, + block_hash: eth1_block_hash, + prev_randao: eth1_block_hash.into_root(), + ..ExecutionPayloadHeaderMerge::default() + }); let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?; let bytes = execution_payload_header.as_ssz_bytes(); file.write_all(bytes.as_slice()) diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 5254ff5a62e..69356045724 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -10,6 +10,7 @@ use std::path::PathBuf; use std::time::{SystemTime, UNIX_EPOCH}; use types::{ test_utils::generate_deterministic_keypairs, Address, Config, EthSpec, ExecutionPayloadHeader, + ExecutionPayloadHeaderMerge, }; pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> { @@ -75,7 +76,9 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul .map_err(|e| format!("Unable to open {}: {}", filename, e))?; file.read_to_end(&mut bytes) .map_err(|e| format!("Unable to read {}: {}", filename, e))?; - ExecutionPayloadHeader::::from_ssz_bytes(bytes.as_slice()) + //FIXME(sean) + ExecutionPayloadHeaderMerge::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Merge) .map_err(|e| format!("SSZ decode failed: {:?}", e)) }) .transpose()?; @@ -84,9 +87,9 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul execution_payload_header.as_ref() { let eth1_block_hash = - parse_optional(matches, "eth1-block-hash")?.unwrap_or(payload.block_hash); + parse_optional(matches, "eth1-block-hash")?.unwrap_or(payload.block_hash()); let genesis_time = - parse_optional(matches, "genesis-time")?.unwrap_or(payload.timestamp); + parse_optional(matches, "genesis-time")?.unwrap_or(payload.timestamp()); (eth1_block_hash, genesis_time) } else { let eth1_block_hash = parse_required(matches, "eth1-block-hash").map_err(|_| { diff --git a/testing/ef_tests/src/cases/common.rs b/testing/ef_tests/src/cases/common.rs index b673fa19094..da34a886034 100644 --- a/testing/ef_tests/src/cases/common.rs +++ b/testing/ef_tests/src/cases/common.rs @@ -78,6 +78,7 @@ pub fn previous_fork(fork_name: ForkName) -> ForkName { ForkName::Base => ForkName::Base, ForkName::Altair => ForkName::Base, ForkName::Merge => ForkName::Altair, // TODO: Check this when tests are released.. - ForkName::Eip4844 => ForkName::Merge, // TODO: Check this when tests are released.. + ForkName::Capella => ForkName::Merge, // TODO: Check this when tests are released.. + ForkName::Eip4844 => ForkName::Capella, // TODO: Check this when tests are released.. } } diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 7ad3406c7a8..26a05715b91 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -97,7 +97,10 @@ impl EpochTransition for JustificationAndFinalization { justification_and_finalization_state.apply_changes_to_state(state); Ok(()) } - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Eip4844(_) => { + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Eip4844(_) => { let justification_and_finalization_state = altair::process_justification_and_finalization( state, @@ -118,13 +121,14 @@ impl EpochTransition for RewardsAndPenalties { validator_statuses.process_attestations(state)?; base::process_rewards_and_penalties(state, &mut validator_statuses, spec) } - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Eip4844(_) => { - altair::process_rewards_and_penalties( - state, - &altair::ParticipationCache::new(state, spec).unwrap(), - spec, - ) - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Eip4844(_) => altair::process_rewards_and_penalties( + state, + &altair::ParticipationCache::new(state, spec).unwrap(), + spec, + ), } } } @@ -147,7 +151,10 @@ impl EpochTransition for Slashings { spec, )?; } - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Eip4844(_) => { + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Eip4844(_) => { process_slashings( state, altair::ParticipationCache::new(state, spec) @@ -205,9 +212,10 @@ impl EpochTransition for SyncCommitteeUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Eip4844(_) => { - altair::process_sync_committee_updates(state, spec) - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Eip4844(_) => altair::process_sync_committee_updates(state, spec), } } } @@ -216,13 +224,14 @@ impl EpochTransition for InactivityUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Eip4844(_) => { - altair::process_inactivity_updates( - state, - &altair::ParticipationCache::new(state, spec).unwrap(), - spec, - ) - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Eip4844(_) => altair::process_inactivity_updates( + state, + &altair::ParticipationCache::new(state, spec).unwrap(), + spec, + ), } } } @@ -231,9 +240,10 @@ impl EpochTransition for ParticipationFlagUpdates { fn run(state: &mut BeaconState, _: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Eip4844(_) => { - altair::process_participation_flag_updates(state) - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Eip4844(_) => altair::process_participation_flag_updates(state), } } } @@ -280,6 +290,7 @@ impl> Case for EpochProcessing { } // No phase0 tests for Altair and later. ForkName::Altair | ForkName::Merge => T::name() != "participation_record_updates", + ForkName::Capella => false, // TODO: revisit when tests are out ForkName::Eip4844 => false, // TODO: revisit when tests are out } } diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs index d348323e18f..bcc76b85502 100644 --- a/testing/ef_tests/src/cases/fork.rs +++ b/testing/ef_tests/src/cases/fork.rs @@ -62,6 +62,7 @@ impl Case for ForkTest { ForkName::Altair => upgrade_to_altair(&mut result_state, spec).map(|_| result_state), ForkName::Merge => upgrade_to_bellatrix(&mut result_state, spec).map(|_| result_state), ForkName::Eip4844 => panic!("eip4844 not supported"), + ForkName::Capella => panic!("capella not supported"), }; compare_beacon_state_results_without_caches(&mut result, &mut expected) diff --git a/testing/ef_tests/src/cases/genesis_initialization.rs b/testing/ef_tests/src/cases/genesis_initialization.rs index dc139ac0b9f..d447fbd8f47 100644 --- a/testing/ef_tests/src/cases/genesis_initialization.rs +++ b/testing/ef_tests/src/cases/genesis_initialization.rs @@ -4,7 +4,10 @@ use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::initialize_beacon_state_from_eth1; use std::path::PathBuf; -use types::{BeaconState, Deposit, EthSpec, ExecutionPayloadHeader, ForkName, Hash256}; +use types::{ + BeaconState, Deposit, EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderMerge, ForkName, + Hash256, +}; #[derive(Debug, Clone, Deserialize)] struct Metadata { @@ -38,9 +41,14 @@ impl LoadCase for GenesisInitialization { let meta: Metadata = yaml_decode_file(&path.join("meta.yaml"))?; let execution_payload_header: Option> = if meta.execution_payload_header.unwrap_or(false) { - Some(ssz_decode_file( + //FIXME(sean) we could decode based on timestamp - we probably don't do decode a payload + // without a block this elsewhere at presetn. But when we support SSZ in the builder api we may need to. + // Although that API should include fork info. Hardcoding this for now + Some(ExecutionPayloadHeader::Merge(ssz_decode_file::< + ExecutionPayloadHeaderMerge, + >( &path.join("execution_payload_header.ssz_snappy"), - )?) + )?)) } else { None }; diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index cd24faa77c3..12ba118ac8b 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -5,6 +5,7 @@ use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yam use crate::testing_spec; use crate::type_name::TypeName; use serde_derive::Deserialize; +use ssz::Decode; use state_processing::per_block_processing::{ errors::BlockProcessingError, process_block_header, process_execution_payload, @@ -18,8 +19,8 @@ use std::fmt::Debug; use std::path::Path; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit, - EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedVoluntaryExit, - SyncAggregate, + EthSpec, ExecutionPayload, ExecutionPayloadMerge, ForkName, FullPayload, ProposerSlashing, + SignedVoluntaryExit, SyncAggregate, }; #[derive(Debug, Clone, Default, Deserialize)] @@ -81,16 +82,17 @@ impl Operation for Attestation { BeaconState::Base(_) => { base::process_attestations(state, &[self.clone()], VerifySignatures::True, spec) } - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Eip4844(_) => { - altair::process_attestation( - state, - self, - 0, - proposer_index, - VerifySignatures::True, - spec, - ) - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Eip4844(_) => altair::process_attestation( + state, + self, + 0, + proposer_index, + VerifySignatures::True, + spec, + ), } } } @@ -237,8 +239,13 @@ impl Operation for FullPayload { fork_name != ForkName::Base && fork_name != ForkName::Altair } + //FIXME(sean) we could decode based on timestamp - we probably don't do decode a payload + // without a block this elsewhere at presetn. But when we support SSZ in the builder api we may need to. + // Although that API should include fork info. Hardcoding this for now fn decode(path: &Path, _spec: &ChainSpec) -> Result { - ssz_decode_file(path) + ssz_decode_file::>(path) + .map(ExecutionPayload::Merge) + .map(Into::into) } fn apply_to( @@ -252,7 +259,7 @@ impl Operation for FullPayload { .as_ref() .map_or(false, |e| e.execution_valid); if valid { - process_execution_payload(state, self, spec) + process_execution_payload::>(state, self.to_ref(), spec) } else { Err(BlockProcessingError::ExecutionInvalid) } @@ -272,7 +279,12 @@ impl Operation for BlindedPayload { } fn decode(path: &Path, _spec: &ChainSpec) -> Result { - ssz_decode_file::>(path).map(Into::into) + //FIXME(sean) we could decode based on timestamp - we probably don't do decode a payload + // without a block this elsewhere at presetn. But when we support SSZ in the builder api we may need to. + // Although that API should include fork info. Hardcoding this for now + let payload: Result, Error> = + ssz_decode_file::>(path).map(Into::into); + payload.map(Into::into) } fn apply_to( @@ -286,7 +298,7 @@ impl Operation for BlindedPayload { .as_ref() .map_or(false, |e| e.execution_valid); if valid { - process_execution_payload(state, self, spec) + process_execution_payload::>(state, self.to_ref(), spec) } else { Err(BlockProcessingError::ExecutionInvalid) } diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index fb5f74f6f00..e55f6ce1d72 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -46,6 +46,10 @@ impl LoadCase for TransitionTest { spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec.eip4844_fork_epoch = Some(metadata.fork_epoch); } + ForkName::Capella => { + spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(metadata.fork_epoch); + } } // Load blocks diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 515e238e97f..bd179be53d6 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -4,7 +4,8 @@ use crate::execution_engine::{ use crate::transactions::transactions; use ethers_providers::Middleware; use execution_layer::{ - BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, PayloadStatus, + BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, PayloadAttributesV1, + PayloadStatus, }; use fork_choice::ForkchoiceUpdateParameters; use reqwest::{header::CONTENT_TYPE, Client}; @@ -278,11 +279,11 @@ impl TestRig { Slot::new(1), // Insert proposer for the next slot head_root, proposer_index, - PayloadAttributes { + PayloadAttributes::V1(PayloadAttributesV1 { timestamp, prev_randao, suggested_fee_recipient: Address::zero(), - }, + }), ) .await; @@ -329,7 +330,8 @@ impl TestRig { ) .await .unwrap() - .execution_payload; + .to_payload() + .execution_payload(); /* * Execution Engine A: @@ -337,7 +339,7 @@ impl TestRig { * Indicate that the payload is the head of the chain, before submitting a * `notify_new_payload`. */ - let head_block_hash = valid_payload.block_hash; + let head_block_hash = valid_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(42); @@ -377,7 +379,7 @@ impl TestRig { * * Do not provide payload attributes (we'll test that later). */ - let head_block_hash = valid_payload.block_hash; + let head_block_hash = valid_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(42); @@ -394,7 +396,7 @@ impl TestRig { .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); - assert_eq!(valid_payload.transactions.len(), pending_txs.len()); + assert_eq!(valid_payload.transactions().len(), pending_txs.len()); // Verify that all submitted txs were successful for pending_tx in pending_txs { @@ -414,7 +416,7 @@ impl TestRig { */ let mut invalid_payload = valid_payload.clone(); - invalid_payload.prev_randao = Hash256::from_low_u64_be(42); + *invalid_payload.prev_randao_mut() = Hash256::from_low_u64_be(42); let status = self .ee_a .execution_layer @@ -429,8 +431,8 @@ impl TestRig { * Produce another payload atop the previous one. */ - let parent_hash = valid_payload.block_hash; - let timestamp = valid_payload.timestamp + 1; + let parent_hash = valid_payload.block_hash(); + let timestamp = valid_payload.timestamp() + 1; let prev_randao = Hash256::zero(); let proposer_index = 0; let builder_params = BuilderParams { @@ -452,7 +454,8 @@ impl TestRig { ) .await .unwrap() - .execution_payload; + .to_payload() + .execution_payload(); /* * Execution Engine A: @@ -474,13 +477,13 @@ impl TestRig { * * Indicate that the payload is the head of the chain, providing payload attributes. */ - let head_block_hash = valid_payload.block_hash; + let head_block_hash = valid_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); - let payload_attributes = PayloadAttributes { - timestamp: second_payload.timestamp + 1, + let payload_attributes = PayloadAttributes::V1(PayloadAttributesV1 { + timestamp: second_payload.timestamp() + 1, prev_randao: Hash256::zero(), suggested_fee_recipient: Address::zero(), - }; + }); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(100); let validator_index = 0; @@ -524,7 +527,7 @@ impl TestRig { * * Set the second payload as the head, without providing payload attributes. */ - let head_block_hash = second_payload.block_hash; + let head_block_hash = second_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(42); @@ -576,7 +579,7 @@ impl TestRig { * * Set the second payload as the head, without providing payload attributes. */ - let head_block_hash = second_payload.block_hash; + let head_block_hash = second_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(42); @@ -605,7 +608,7 @@ async fn check_payload_reconstruction( ) { let reconstructed = ee .execution_layer - .get_payload_by_block_hash(payload.block_hash) + .get_payload_by_block_hash(payload.block_hash()) .await .unwrap() .unwrap(); diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 02f4f76d51e..d34cdbc9ff1 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -1,7 +1,7 @@ use crate::local_network::LocalNetwork; use node_test_rig::eth2::types::{BlockId, StateId}; use std::time::Duration; -use types::{Epoch, EthSpec, ExecutionBlockHash, Hash256, Slot, Unsigned}; +use types::{Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Hash256, Slot, Unsigned}; /// Checks that all of the validators have on-boarded by the start of the second eth1 voting /// period. @@ -228,7 +228,7 @@ pub async fn verify_transition_block_finalized( .map_err(|e| format!("Get state root via http failed: {:?}", e))? .message() .execution_payload() - .map(|payload| payload.execution_payload.block_hash) + .map(|payload| payload.block_hash()) .map_err(|e| format!("Execution payload does not exist: {:?}", e))?; block_hashes.push(execution_block_hash); } diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 3ef0c0e25b7..29926f3780c 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -13,8 +13,8 @@ use std::ops::Deref; use std::sync::Arc; use tokio::sync::mpsc; use types::{ - BlindedPayload, BlobsSidecar, BlockType, EthSpec, ExecPayload, ForkName, FullPayload, - PublicKeyBytes, Slot, + AbstractExecPayload, BlindedPayload, BlobsSidecar, BlockType, EthSpec, ExecPayload, ForkName, + FullPayload, PublicKeyBytes, Slot, }; #[derive(Debug)] @@ -276,7 +276,7 @@ impl BlockService { } /// Produce a block at the given slot for validator_pubkey - async fn publish_block>( + async fn publish_block>( self, slot: Slot, validator_pubkey: PublicKeyBytes, @@ -444,7 +444,7 @@ impl BlockService { "slot" => signed_block.slot().as_u64(), ); } - ForkName::Eip4844 => { + ForkName::Capella | ForkName::Eip4844 => { if matches!(Payload::block_type(), BlockType::Blinded) { //FIXME(sean) crit!( diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index 36467bd1782..2ebca2dfb70 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -34,7 +34,7 @@ pub enum Error { } /// Enumerates all messages that can be signed by a validator. -pub enum SignableMessage<'a, T: EthSpec, Payload: ExecPayload = FullPayload> { +pub enum SignableMessage<'a, T: EthSpec, Payload: AbstractExecPayload = FullPayload> { RandaoReveal(Epoch), BeaconBlock(&'a BeaconBlock), BlobsSidecar(&'a BlobsSidecar), @@ -50,7 +50,7 @@ pub enum SignableMessage<'a, T: EthSpec, Payload: ExecPayload = FullPayload> SignableMessage<'a, T, Payload> { +impl<'a, T: EthSpec, Payload: AbstractExecPayload> SignableMessage<'a, T, Payload> { /// Returns the `SignedRoot` for the contained message. /// /// The actual `SignedRoot` trait is not used since it also requires a `TreeHash` impl, which is @@ -118,7 +118,7 @@ impl SigningContext { impl SigningMethod { /// Return the signature of `signable_message`, with respect to the `signing_context`. - pub async fn get_signature>( + pub async fn get_signature>( &self, signable_message: SignableMessage<'_, T, Payload>, signing_context: SigningContext, @@ -143,7 +143,7 @@ impl SigningMethod { .await } - pub async fn get_signature_from_root>( + pub async fn get_signature_from_root>( &self, signable_message: SignableMessage<'_, T, Payload>, signing_root: Hash256, diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 6668badb923..5daa42fa3a4 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -27,6 +27,7 @@ pub enum ForkName { Phase0, Altair, Bellatrix, + Capella, Eip4844, } @@ -38,7 +39,7 @@ pub struct ForkInfo { #[derive(Debug, PartialEq, Serialize)] #[serde(bound = "T: EthSpec", rename_all = "snake_case")] -pub enum Web3SignerObject<'a, T: EthSpec, Payload: ExecPayload> { +pub enum Web3SignerObject<'a, T: EthSpec, Payload: AbstractExecPayload> { AggregationSlot { slot: Slot, }, @@ -76,7 +77,7 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: ExecPayload> { ValidatorRegistration(&'a ValidatorRegistrationData), } -impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { +impl<'a, T: EthSpec, Payload: AbstractExecPayload> Web3SignerObject<'a, T, Payload> { pub fn beacon_block(block: &'a BeaconBlock) -> Result { match block { BeaconBlock::Base(_) => Ok(Web3SignerObject::BeaconBlock { @@ -94,6 +95,11 @@ impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { block: None, block_header: Some(block.block_header()), }), + BeaconBlock::Capella(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Capella, + block: None, + block_header: Some(block.block_header()), + }), BeaconBlock::Eip4844(_) => Ok(Web3SignerObject::BeaconBlock { version: ForkName::Eip4844, block: None, @@ -126,7 +132,7 @@ impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { #[derive(Debug, PartialEq, Serialize)] #[serde(bound = "T: EthSpec")] -pub struct SigningRequest<'a, T: EthSpec, Payload: ExecPayload> { +pub struct SigningRequest<'a, T: EthSpec, Payload: AbstractExecPayload> { #[serde(rename = "type")] pub message_type: MessageType, #[serde(skip_serializing_if = "Option::is_none")] diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 389bbb8005e..306f22a2fbe 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -18,13 +18,13 @@ use std::path::Path; use std::sync::Arc; use task_executor::TaskExecutor; use types::{ - attestation::Error as AttestationError, graffiti::GraffitiString, Address, AggregateAndProof, - Attestation, BeaconBlock, BlindedPayload, BlobsSidecar, ChainSpec, ContributionAndProof, - Domain, Epoch, EthSpec, ExecPayload, Fork, FullPayload, Graffiti, Hash256, Keypair, - PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, - SignedBlobsSidecar, SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData, - Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, - SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, + attestation::Error as AttestationError, graffiti::GraffitiString, AbstractExecPayload, Address, + AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, BlobsSidecar, ChainSpec, + ContributionAndProof, Domain, Epoch, EthSpec, ExecPayload, Fork, FullPayload, Graffiti, + Hash256, Keypair, PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, + SignedBeaconBlock, SignedBlobsSidecar, SignedContributionAndProof, SignedRoot, + SignedValidatorRegistrationData, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, + SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, }; use validator_dir::ValidatorDir; @@ -455,7 +455,7 @@ impl ValidatorStore { .unwrap_or(self.builder_proposals) } - pub async fn sign_block>( + pub async fn sign_block>( &self, validator_pubkey: PublicKeyBytes, block: BeaconBlock, From f1a3b3b01c698aeb70a5bf8671217f10c156c2b0 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Thu, 27 Oct 2022 16:41:39 -0500 Subject: [PATCH 053/263] Added Capella Epoch Processing Logic (#3666) --- .../src/engine_api/json_structures.rs | 4 ++ consensus/state_processing/src/common/mod.rs | 2 + .../src/common/withdraw_balance.rs | 28 ++++++++++++++ .../src/per_epoch_processing/capella.rs | 4 +- .../capella/full_withdrawals.rs | 21 +++++++++-- .../capella/partial_withdrawals.rs | 37 +++++++++++++++++-- consensus/types/src/chain_spec.rs | 7 +++- consensus/types/src/validator.rs | 21 +++++++++++ consensus/types/src/withdrawal.rs | 1 + 9 files changed, 113 insertions(+), 12 deletions(-) create mode 100644 consensus/state_processing/src/common/withdraw_balance.rs diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 82d72ca560e..fcbd17d7336 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -381,6 +381,8 @@ impl From> for JsonExecutionPayload { pub struct JsonWithdrawal { #[serde(with = "eth2_serde_utils::u64_hex_be")] pub index: u64, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub validator_index: u64, pub address: Address, #[serde(with = "eth2_serde_utils::u256_hex_be")] pub amount: Uint256, @@ -390,6 +392,7 @@ impl From for JsonWithdrawal { fn from(withdrawal: Withdrawal) -> Self { Self { index: withdrawal.index, + validator_index: withdrawal.validator_index, address: withdrawal.address, amount: Uint256::from((withdrawal.amount as u128) * 1000000000u128), } @@ -400,6 +403,7 @@ impl From for Withdrawal { fn from(jw: JsonWithdrawal) -> Self { Self { index: jw.index, + validator_index: jw.validator_index, address: jw.address, //FIXME(sean) if EE gives us too large a number this panics amount: (jw.amount / 1000000000).as_u64(), diff --git a/consensus/state_processing/src/common/mod.rs b/consensus/state_processing/src/common/mod.rs index 8a2e2439bb6..531891ee957 100644 --- a/consensus/state_processing/src/common/mod.rs +++ b/consensus/state_processing/src/common/mod.rs @@ -4,6 +4,7 @@ mod get_attesting_indices; mod get_indexed_attestation; mod initiate_validator_exit; mod slash_validator; +mod withdraw_balance; pub mod altair; pub mod base; @@ -14,6 +15,7 @@ pub use get_attesting_indices::{get_attesting_indices, get_attesting_indices_fro pub use get_indexed_attestation::get_indexed_attestation; pub use initiate_validator_exit::initiate_validator_exit; pub use slash_validator::slash_validator; +pub use withdraw_balance::withdraw_balance; use safe_arith::SafeArith; use types::{BeaconState, BeaconStateError, EthSpec}; diff --git a/consensus/state_processing/src/common/withdraw_balance.rs b/consensus/state_processing/src/common/withdraw_balance.rs new file mode 100644 index 00000000000..29b09cc0f91 --- /dev/null +++ b/consensus/state_processing/src/common/withdraw_balance.rs @@ -0,0 +1,28 @@ +use crate::common::decrease_balance; +use safe_arith::SafeArith; +use types::{BeaconStateError as Error, *}; + +pub fn withdraw_balance( + state: &mut BeaconState, + validator_index: usize, + amount: u64, +) -> Result<(), Error> { + decrease_balance(state, validator_index as usize, amount)?; + + let withdrawal_address = Address::from_slice( + &state + .get_validator(validator_index)? + .withdrawal_credentials + .as_bytes()[12..], + ); + let withdrawal = Withdrawal { + index: *state.next_withdrawal_index()?, + validator_index: validator_index as u64, + address: withdrawal_address, + amount, + }; + state.next_withdrawal_index_mut()?.safe_add_assign(1)?; + state.withdrawal_queue_mut()?.push(withdrawal)?; + + Ok(()) +} diff --git a/consensus/state_processing/src/per_epoch_processing/capella.rs b/consensus/state_processing/src/per_epoch_processing/capella.rs index 4886b280535..d1bf71071dd 100644 --- a/consensus/state_processing/src/per_epoch_processing/capella.rs +++ b/consensus/state_processing/src/per_epoch_processing/capella.rs @@ -66,9 +66,9 @@ pub fn process_epoch( altair::process_sync_committee_updates(state, spec)?; // Withdrawals - process_full_withdrawals(state)?; + process_full_withdrawals(state, spec)?; - process_partial_withdrawals(state)?; + process_partial_withdrawals(state, spec)?; // Rotate the epoch caches to suit the epoch transition. state.advance_caches(spec)?; diff --git a/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs b/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs index 13bbc67581d..62e4b91110d 100644 --- a/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs +++ b/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs @@ -1,10 +1,23 @@ +use crate::common::withdraw_balance; use crate::EpochProcessingError; -use types::beacon_state::BeaconState; -use types::eth_spec::EthSpec; +use types::{beacon_state::BeaconState, eth_spec::EthSpec, ChainSpec}; pub fn process_full_withdrawals( - _state: &mut BeaconState, + state: &mut BeaconState, + spec: &ChainSpec, ) -> Result<(), EpochProcessingError> { - todo!("implement this"); + let current_epoch = state.current_epoch(); + // FIXME: is this the most efficient way to do this? + for validator_index in 0..state.validators().len() { + // TODO: is this the correct way to handle validators not existing? + if let (Some(validator), Some(balance)) = ( + state.validators().get(validator_index), + state.balances().get(validator_index), + ) { + if validator.is_fully_withdrawable_at(*balance, current_epoch, spec) { + withdraw_balance(state, validator_index, *balance)?; + } + } + } Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs b/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs index a648766e218..75576ef6e76 100644 --- a/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs +++ b/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs @@ -1,10 +1,39 @@ +use crate::common::withdraw_balance; use crate::EpochProcessingError; -use types::beacon_state::BeaconState; -use types::eth_spec::EthSpec; +use safe_arith::SafeArith; +use types::{beacon_state::BeaconState, eth_spec::EthSpec, ChainSpec}; pub fn process_partial_withdrawals( - _state: &mut BeaconState, + state: &mut BeaconState, + spec: &ChainSpec, ) -> Result<(), EpochProcessingError> { - todo!("implement this"); + let mut partial_withdrawals_count = 0; + let mut validator_index = *state.next_partial_withdrawal_validator_index()? as usize; + + let n_validators = state.validators().len(); + // FIXME: is this the most efficient way to do this? + for _ in 0..n_validators { + // TODO: is this the correct way to handle validators not existing? + if let (Some(validator), Some(balance)) = ( + state.validators().get(validator_index), + state.balances().get(validator_index), + ) { + if validator.is_partially_withdrawable_validator(*balance, spec) { + withdraw_balance( + state, + validator_index, + *balance - spec.max_effective_balance, + )?; + partial_withdrawals_count.safe_add_assign(1)?; + + validator_index = validator_index.safe_add(1)? % n_validators; + if partial_withdrawals_count == T::max_partial_withdrawals_per_epoch() { + break; + } + } + } + } + *state.next_partial_withdrawal_validator_index_mut()? = validator_index as u64; + Ok(()) } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 69346ec1969..cfb072ecdd9 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -73,6 +73,7 @@ pub struct ChainSpec { */ pub genesis_fork_version: [u8; 4], pub bls_withdrawal_prefix_byte: u8, + pub eth1_address_withdrawal_prefix_byte: u8, /* * Time parameters @@ -519,7 +520,8 @@ impl ChainSpec { * Initial Values */ genesis_fork_version: [0; 4], - bls_withdrawal_prefix_byte: 0, + bls_withdrawal_prefix_byte: 0x00, + eth1_address_withdrawal_prefix_byte: 0x01, /* * Time parameters @@ -748,7 +750,8 @@ impl ChainSpec { * Initial Values */ genesis_fork_version: [0x00, 0x00, 0x00, 0x64], - bls_withdrawal_prefix_byte: 0, + bls_withdrawal_prefix_byte: 0x00, + eth1_address_withdrawal_prefix_byte: 0x01, /* * Time parameters diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 21a6b39b6d0..6e63c943a1a 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -65,6 +65,27 @@ impl Validator { // Has not yet been activated && self.activation_epoch == spec.far_future_epoch } + + /// Returns `true` if the validator has eth1 withdrawal credential + pub fn has_eth1_withdrawal_credential(&self, spec: &ChainSpec) -> bool { + self.withdrawal_credentials + .as_bytes() + .first() + .map(|byte| *byte == spec.eth1_address_withdrawal_prefix_byte) + .unwrap_or(false) + } + + /// Returns `true` if the validator is fully withdrawable at some epoch + pub fn is_fully_withdrawable_at(&self, balance: u64, epoch: Epoch, spec: &ChainSpec) -> bool { + self.has_eth1_withdrawal_credential(spec) && self.withdrawable_epoch <= epoch && balance > 0 + } + + /// Returns `true` if the validator is partially withdrawable + pub fn is_partially_withdrawable_validator(&self, balance: u64, spec: &ChainSpec) -> bool { + self.has_eth1_withdrawal_credential(spec) + && self.effective_balance == spec.max_effective_balance + && balance > spec.max_effective_balance + } } impl Default for Validator { diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal.rs index 73688479132..36ee6396580 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal.rs @@ -15,6 +15,7 @@ use tree_hash_derive::TreeHash; pub struct Withdrawal { #[serde(with = "eth2_serde_utils::quoted_u64")] pub index: u64, + pub validator_index: u64, pub address: Address, pub amount: u64, } From 82eef493f317b7a9b27e6092aa79372e3c07306b Mon Sep 17 00:00:00 2001 From: realbigsean Date: Fri, 28 Oct 2022 10:18:04 -0400 Subject: [PATCH 054/263] clean up types --- consensus/tree_hash/src/impls.rs | 21 ++++++- consensus/types/src/blob.rs | 79 ------------------------ consensus/types/src/bls_field_element.rs | 58 ----------------- consensus/types/src/kzg_commitment.rs | 10 ++- consensus/types/src/kzg_proof.rs | 2 +- consensus/types/src/lib.rs | 3 - 6 files changed, 30 insertions(+), 143 deletions(-) delete mode 100644 consensus/types/src/blob.rs delete mode 100644 consensus/types/src/bls_field_element.rs diff --git a/consensus/tree_hash/src/impls.rs b/consensus/tree_hash/src/impls.rs index f27c5291861..899356f8331 100644 --- a/consensus/tree_hash/src/impls.rs +++ b/consensus/tree_hash/src/impls.rs @@ -81,7 +81,26 @@ macro_rules! impl_for_lt_32byte_u8_array { impl_for_lt_32byte_u8_array!(4); impl_for_lt_32byte_u8_array!(32); -impl_for_lt_32byte_u8_array!(48); + +impl TreeHash for [u8; 48] { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Vector + } + + fn tree_hash_packed_encoding(&self) -> PackedEncoding { + unreachable!("Vector should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("Vector should never be packed.") + } + + fn tree_hash_root(&self) -> Hash256 { + let values_per_chunk = BYTES_PER_CHUNK; + let minimum_chunk_count = (48 + values_per_chunk - 1) / values_per_chunk; + merkle_root(self, minimum_chunk_count) + } +} impl TreeHash for U128 { fn tree_hash_type() -> TreeHashType { diff --git a/consensus/types/src/blob.rs b/consensus/types/src/blob.rs deleted file mode 100644 index 89e5e8bbe77..00000000000 --- a/consensus/types/src/blob.rs +++ /dev/null @@ -1,79 +0,0 @@ -use crate::bls_field_element::BlsFieldElement; -use crate::test_utils::RngCore; -use crate::test_utils::TestRandom; -use crate::EthSpec; -use serde::{Deserialize, Serialize}; -use ssz::{Decode, DecodeError, Encode}; -use ssz_types::VariableList; -use tree_hash::{PackedEncoding, TreeHash}; - -#[derive(Default, Debug, PartialEq, Hash, Clone, Serialize, Deserialize)] -#[serde(transparent)] -pub struct Blob(pub VariableList); - -impl TestRandom for Blob { - fn random_for_test(rng: &mut impl RngCore) -> Self { - let mut res = Blob(VariableList::empty()); - for _i in 0..4096 { - let slice = ethereum_types::U256([ - rng.next_u64(), - rng.next_u64(), - rng.next_u64(), - rng.next_u64(), - ]); - let elem = BlsFieldElement(slice); - res.0.push(elem); - } - res - } -} - -impl Encode for Blob { - fn is_ssz_fixed_len() -> bool { - as Encode>::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - as Encode>::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - self.0.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.0.ssz_append(buf) - } -} - -impl Decode for Blob { - fn is_ssz_fixed_len() -> bool { - as Decode>::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - as Decode>::ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - >::from_ssz_bytes(bytes).map(Self) - } -} - -impl TreeHash for Blob { - fn tree_hash_type() -> tree_hash::TreeHashType { - >::tree_hash_type() - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - self.0.tree_hash_packed_encoding() - } - - fn tree_hash_packing_factor() -> usize { - >::tree_hash_packing_factor() - } - - fn tree_hash_root(&self) -> tree_hash::Hash256 { - self.0.tree_hash_root() - } -} diff --git a/consensus/types/src/bls_field_element.rs b/consensus/types/src/bls_field_element.rs deleted file mode 100644 index 7654f65b364..00000000000 --- a/consensus/types/src/bls_field_element.rs +++ /dev/null @@ -1,58 +0,0 @@ -use crate::Uint256; -use serde::{Deserialize, Serialize}; -use ssz::{Decode, DecodeError, Encode}; -use tree_hash::{PackedEncoding, TreeHash}; - -#[derive(Default, Debug, PartialEq, Hash, Clone, Copy, Serialize, Deserialize)] -#[serde(transparent)] -pub struct BlsFieldElement(pub Uint256); - -impl Encode for BlsFieldElement { - fn is_ssz_fixed_len() -> bool { - ::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - ::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - self.0.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.0.ssz_append(buf) - } -} - -impl Decode for BlsFieldElement { - fn is_ssz_fixed_len() -> bool { - ::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - ::ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - ::from_ssz_bytes(bytes).map(Self) - } -} - -impl TreeHash for BlsFieldElement { - fn tree_hash_type() -> tree_hash::TreeHashType { - ::tree_hash_type() - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - self.0.tree_hash_packed_encoding() - } - - fn tree_hash_packing_factor() -> usize { - ::tree_hash_packing_factor() - } - - fn tree_hash_root(&self) -> tree_hash::Hash256 { - self.0.tree_hash_root() - } -} diff --git a/consensus/types/src/kzg_commitment.rs b/consensus/types/src/kzg_commitment.rs index 1fbdb6ca2ca..3b9570cd26b 100644 --- a/consensus/types/src/kzg_commitment.rs +++ b/consensus/types/src/kzg_commitment.rs @@ -3,12 +3,20 @@ use crate::*; use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; +use std::fmt; +use std::fmt::{Display, Formatter}; use tree_hash::{PackedEncoding, TreeHash}; -//TODO: is there a way around this newtype #[derive(Derivative, Debug, Clone, Serialize, Deserialize)] #[derivative(PartialEq, Eq, Hash)] pub struct KzgCommitment(#[serde(with = "BigArray")] [u8; 48]); + +impl Display for KzgCommitment { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{}", eth2_serde_utils::hex::encode(&self.0)) + } +} + impl TreeHash for KzgCommitment { fn tree_hash_type() -> tree_hash::TreeHashType { <[u8; 48] as TreeHash>::tree_hash_type() diff --git a/consensus/types/src/kzg_proof.rs b/consensus/types/src/kzg_proof.rs index cff619ec887..9b917f8e124 100644 --- a/consensus/types/src/kzg_proof.rs +++ b/consensus/types/src/kzg_proof.rs @@ -8,7 +8,7 @@ const KZG_PROOF_BYTES_LEN: usize = 48; #[derive(Debug, PartialEq, Hash, Clone, Copy, Serialize, Deserialize)] #[serde(transparent)] -pub struct KzgProof(#[serde(with = "serde_kzg_proof")] pub [u8; KZG_PROOF_BYTES_LEN]); +pub struct KzgProof(#[serde(with = "BigArray")] pub [u8; KZG_PROOF_BYTES_LEN]); impl fmt::Display for KzgProof { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 66db2077305..39ddbf6deec 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -90,9 +90,7 @@ pub mod slot_data; #[cfg(feature = "sqlite")] pub mod sqlite; -pub mod blob; pub mod blobs_sidecar; -pub mod bls_field_element; pub mod kzg_commitment; pub mod kzg_proof; pub mod signed_blobs_sidecar; @@ -197,7 +195,6 @@ pub type Address = H160; pub type ForkVersion = [u8; 4]; pub type BLSFieldElement = Uint256; pub type Blob = FixedVector::FieldElementsPerBlob>; -pub type Polynomial = VariableList::FieldElementsPerBlob>; pub use bls::{ AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, From 9f155eec7a772e9429efe377038fca1e0c52e1b1 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Fri, 28 Oct 2022 10:25:48 -0400 Subject: [PATCH 055/263] 48 byte array serde --- consensus/types/src/kzg_proof.rs | 37 ++------------------------------ 1 file changed, 2 insertions(+), 35 deletions(-) diff --git a/consensus/types/src/kzg_proof.rs b/consensus/types/src/kzg_proof.rs index 9b917f8e124..879620bd6f0 100644 --- a/consensus/types/src/kzg_proof.rs +++ b/consensus/types/src/kzg_proof.rs @@ -1,8 +1,9 @@ use crate::test_utils::{RngCore, TestRandom}; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use std::fmt; use tree_hash::{PackedEncoding, TreeHash}; +use serde_big_array::BigArray; const KZG_PROOF_BYTES_LEN: usize = 48; @@ -34,40 +35,6 @@ impl Into<[u8; KZG_PROOF_BYTES_LEN]> for KzgProof { } } -pub mod serde_kzg_proof { - use super::*; - use serde::de::Error; - - pub fn serialize(bytes: &[u8; KZG_PROOF_BYTES_LEN], serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(ð2_serde_utils::hex::encode(bytes)) - } - - pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; KZG_PROOF_BYTES_LEN], D::Error> - where - D: Deserializer<'de>, - { - let s: String = Deserialize::deserialize(deserializer)?; - - let bytes = eth2_serde_utils::hex::decode(&s).map_err(D::Error::custom)?; - - if bytes.len() != KZG_PROOF_BYTES_LEN { - return Err(D::Error::custom(format!( - "incorrect byte length {}, expected {}", - bytes.len(), - KZG_PROOF_BYTES_LEN - ))); - } - - let mut array = [0; KZG_PROOF_BYTES_LEN]; - array[..].copy_from_slice(&bytes); - - Ok(array) - } -} - impl Encode for KzgProof { fn is_ssz_fixed_len() -> bool { <[u8; KZG_PROOF_BYTES_LEN] as Encode>::is_ssz_fixed_len() From 29f2ec46d3d9ccb73799b3e185433acb5dff388b Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 1 Nov 2022 19:58:21 +0530 Subject: [PATCH 056/263] Couple blocks and blobs in gossip (#3670) * Revert "Add more gossip verification conditions" This reverts commit 1430b561c37adb44d5705005de6bf633deb8c16d. * Revert "Add todos" This reverts commit 91efb9d4c780b55025c3793a67bd9dacc1b2c924. * Revert "Reprocess blob sidecar messages" This reverts commit 21bf3d37cdce46632cfa4e3f5abb194f172c6851. * Add the coupled topic * Decode SignedBeaconBlockAndBlobsSidecar correctly * Process Block and Blobs in beacon processor * Remove extra blob publishing logic from vc * Remove blob signing in vc * Ugly hack to compile --- Cargo.lock | 2 + beacon_node/beacon_chain/src/beacon_chain.rs | 4 +- .../beacon_chain/src/blob_verification.rs | 97 +---- beacon_node/http_api/src/lib.rs | 28 +- beacon_node/http_api/src/publish_blobs.rs | 124 ------ beacon_node/http_api/src/publish_blocks.rs | 27 +- beacon_node/lighthouse_network/Cargo.toml | 2 + beacon_node/lighthouse_network/src/lib.rs | 1 + .../lighthouse_network/src/rpc/protocol.rs | 13 +- .../src/service/gossip_cache.rs | 10 +- .../lighthouse_network/src/types/mod.rs | 2 +- .../lighthouse_network/src/types/pubsub.rs | 71 +++- .../lighthouse_network/src/types/topics.rs | 14 +- .../network/src/beacon_processor/mod.rs | 92 ++--- .../work_reprocessing_queue.rs | 159 +------ .../beacon_processor/worker/gossip_methods.rs | 111 +---- beacon_node/network/src/router/mod.rs | 6 +- beacon_node/network/src/router/processor.rs | 11 +- beacon_node/store/src/hot_cold_store.rs | 15 +- beacon_node/store/src/lib.rs | 3 +- common/eth2/src/lib.rs | 2 +- consensus/types/src/lib.rs | 2 - consensus/types/src/signed_blobs_sidecar.rs | 58 --- validator_client/src/block_service.rs | 388 ++++++------------ validator_client/src/validator_store.rs | 42 +- 25 files changed, 294 insertions(+), 990 deletions(-) delete mode 100644 beacon_node/http_api/src/publish_blobs.rs delete mode 100644 consensus/types/src/signed_blobs_sidecar.rs diff --git a/Cargo.lock b/Cargo.lock index d0831365d7c..297fca7117b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3700,6 +3700,8 @@ dependencies = [ "tokio", "tokio-io-timeout", "tokio-util 0.6.10", + "tree_hash", + "tree_hash_derive", "types", "unsigned-varint 0.6.0", "unused_port", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index a937a6ea418..f1d8e5fbe05 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1778,11 +1778,11 @@ impl BeaconChain { }) } - /// Accepts some `SignedBlobsSidecar` from the network and attempts to verify it, + /// Accepts some `BlobsSidecar` received over from the network and attempts to verify it, /// returning `Ok(_)` if it is valid to be (re)broadcast on the gossip network. pub fn verify_blobs_sidecar_for_gossip<'a>( &self, - blobs_sidecar: &'a SignedBlobsSidecar, + blobs_sidecar: &'a BlobsSidecar, ) -> Result, BlobError> { metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_REQUESTS); let _timer = metrics::start_timer(&metrics::BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES); diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index be9b0effc17..d3e0d2a17d8 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -1,13 +1,10 @@ use derivative::Derivative; use slot_clock::SlotClock; -use crate::beacon_chain::{ - BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY, - VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, -}; +use crate::beacon_chain::{BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use crate::BeaconChainError; use bls::PublicKey; -use types::{consts::eip4844::BLS_MODULUS, BeaconStateError, Hash256, SignedBlobsSidecar, Slot}; +use types::{consts::eip4844::BLS_MODULUS, BeaconStateError, BlobsSidecar, Hash256, Slot}; pub enum BlobError { /// The blob sidecar is from a slot that is later than the current slot (with respect to the @@ -61,21 +58,6 @@ pub enum BlobError { /// be equal to the given sidecar. RepeatSidecar { proposer: u64, slot: Slot }, - /// The `blobs_sidecar.message.beacon_block_root` block is unknown. - /// - /// ## Peer scoring - /// - /// The attestation points to a block we have not yet imported. It's unclear if the attestation - /// is valid or not. - UnknownHeadBlock { beacon_block_root: Hash256 }, - - /// The proposal_index corresponding to blob.beacon_block_root is not known. - /// - /// ## Peer scoring - /// - /// The block is invalid and the peer is faulty. - UnknownValidator(u64), - /// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid. /// /// ## Peer scoring @@ -97,109 +79,58 @@ impl From for BlobError { } } -/// A wrapper around a `SignedBlobsSidecar` that indicates it has been approved for re-gossiping on -/// the p2p network. +/// A wrapper around a `BlobsSidecar` that indicates it has been verified w.r.t the corresponding +/// `SignedBeaconBlock`. #[derive(Derivative)] #[derivative(Debug(bound = "T: BeaconChainTypes"))] pub struct VerifiedBlobsSidecar<'a, T: BeaconChainTypes> { - pub blob_sidecar: &'a SignedBlobsSidecar, + pub blob_sidecar: &'a BlobsSidecar, } impl<'a, T: BeaconChainTypes> VerifiedBlobsSidecar<'a, T> { pub fn verify( - blob_sidecar: &'a SignedBlobsSidecar, + blob_sidecar: &'a BlobsSidecar, chain: &BeaconChain, ) -> Result { - let block_slot = blob_sidecar.message.beacon_block_slot; - let block_root = blob_sidecar.message.beacon_block_root; + let blob_slot = blob_sidecar.beacon_block_slot; // Do not gossip or process blobs from future or past slots. let latest_permissible_slot = chain .slot_clock .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .ok_or(BeaconChainError::UnableToReadSlot)?; - if block_slot > latest_permissible_slot { + if blob_slot > latest_permissible_slot { return Err(BlobError::FutureSlot { message_slot: latest_permissible_slot, - latest_permissible_slot: block_slot, + latest_permissible_slot: blob_slot, }); } - // TODO: return `UnknownHeadBlock` if blob_root doesn't exist in fork choice - // and wherever it could be found. - let earliest_permissible_slot = chain .slot_clock .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .ok_or(BeaconChainError::UnableToReadSlot)?; - if block_slot > earliest_permissible_slot { + if blob_slot > earliest_permissible_slot { return Err(BlobError::PastSlot { message_slot: earliest_permissible_slot, - earliest_permissible_slot: block_slot, + earliest_permissible_slot: blob_slot, }); } // Verify that blobs are properly formatted //TODO: add the check while constructing a Blob type from bytes instead of after - for (i, blob) in blob_sidecar.message.blobs.iter().enumerate() { + for (i, blob) in blob_sidecar.blobs.iter().enumerate() { if blob.iter().any(|b| *b >= *BLS_MODULUS) { return Err(BlobError::BlobOutOfRange { blob_index: i }); } } // Verify that the KZG proof is a valid G1 point - // TODO(pawan): KZG commitment can also be point at infinity, use a different check - // (bls.KeyValidate) - if PublicKey::deserialize(&blob_sidecar.message.kzg_aggregate_proof.0).is_err() { + if PublicKey::deserialize(&blob_sidecar.kzg_aggregate_proof.0).is_err() { return Err(BlobError::InvalidKZGCommitment); } - let proposer_shuffling_root = chain - .canonical_head - .cached_head() - .snapshot - .beacon_state - .proposer_shuffling_decision_root(block_root)?; - - let (proposer_index, fork) = match chain - .beacon_proposer_cache - .lock() - .get_slot::(proposer_shuffling_root, block_slot) - { - Some(proposer) => (proposer.index, proposer.fork), - None => { - let state = &chain.canonical_head.cached_head().snapshot.beacon_state; - ( - state.get_beacon_proposer_index(block_slot, &chain.spec)?, - state.fork(), - ) - } - }; - let signature_is_valid = { - let pubkey_cache = chain - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout) - .map_err(BlobError::BeaconChainError)?; - - let pubkey = pubkey_cache - .get(proposer_index as usize) - .ok_or_else(|| BlobError::UnknownValidator(proposer_index as u64))?; - - blob_sidecar.verify_signature( - None, - pubkey, - &fork, - chain.genesis_validators_root, - &chain.spec, - ) - }; - - if !signature_is_valid { - return Err(BlobError::ProposalSignatureInvalid); - } + // TODO: Check that we have not already received a sidecar with a valid signature for this slot. - // TODO(pawan): Check that we have not already received a sidecar with a valid signature for this slot. - // TODO(pawan): check if block hash is already known Ok(Self { blob_sidecar }) } } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 203b462b164..edb1ad091f4 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -13,7 +13,6 @@ mod block_rewards; mod database; mod metrics; mod proposer_duties; -mod publish_blobs; mod publish_blocks; mod state_id; mod sync_committees; @@ -49,7 +48,7 @@ use types::{ Attestation, AttestationData, AttesterSlashing, BeaconStateError, BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, - SignedBeaconBlock, SignedBlindedBeaconBlock, SignedBlobsSidecar, SignedContributionAndProof, + SignedBeaconBlock, SignedBlindedBeaconBlock, SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData, }; @@ -1047,27 +1046,9 @@ pub fn serve( chain: Arc>, network_tx: UnboundedSender>, log: Logger| async move { - publish_blocks::publish_block(None, block, chain, &network_tx, log) - .await - .map(|()| warp::reply()) - }, - ); - - // POST beacon/blobs - let post_beacon_blobs = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("blobs")) - .and(warp::path::end()) - .and(warp::body::json()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) - .and(log_filter.clone()) - .and_then( - |blobs: Arc>, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| async move { - publish_blobs::publish_blobs(blobs, chain, &network_tx, log) + // need to have cached the blob sidecar somewhere in the beacon chain + // to publish + publish_blocks::publish_block(None, block, None, chain, &network_tx, log) .await .map(|()| warp::reply()) }, @@ -3183,7 +3164,6 @@ pub fn serve( post_beacon_blocks .boxed() .or(post_beacon_blinded_blocks.boxed()) - .or(post_beacon_blobs.boxed()) .or(post_beacon_pool_attestations.boxed()) .or(post_beacon_pool_attester_slashings.boxed()) .or(post_beacon_pool_proposer_slashings.boxed()) diff --git a/beacon_node/http_api/src/publish_blobs.rs b/beacon_node/http_api/src/publish_blobs.rs deleted file mode 100644 index cf08ac2d2b4..00000000000 --- a/beacon_node/http_api/src/publish_blobs.rs +++ /dev/null @@ -1,124 +0,0 @@ -use crate::metrics; -use beacon_chain::validator_monitor::{get_slot_delay_ms, timestamp_now}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use lighthouse_network::PubsubMessage; -use network::NetworkMessage; -use slog::Logger; -use std::sync::Arc; -use tokio::sync::mpsc::UnboundedSender; -use types::SignedBlobsSidecar; -use warp::Rejection; - -/// Handles a request from the HTTP API for full blocks. -pub async fn publish_blobs( - blobs_sidecar: Arc>, - chain: Arc>, - network_tx: &UnboundedSender>, - log: Logger, -) -> Result<(), Rejection> { - let seen_timestamp = timestamp_now(); - - // Send the blob, regardless of whether or not it is valid. The API - // specification is very clear that this is the desired behaviour. - crate::publish_pubsub_message( - network_tx, - PubsubMessage::BlobsSidecars(blobs_sidecar.clone()), - )?; - - // Determine the delay after the start of the slot, register it with metrics. - let delay = get_slot_delay_ms( - seen_timestamp, - blobs_sidecar.message.beacon_block_slot, - &chain.slot_clock, - ); - metrics::observe_duration(&metrics::HTTP_API_BLOB_BROADCAST_DELAY_TIMES, delay); - - //FIXME(sean) process blobs - // match chain - // .process_block(blobs_sidecar.clone(), CountUnrealized::True) - // .await - // { - // Ok(root) => { - // info!( - // log, - // "Valid block from HTTP API"; - // "block_delay" => ?delay, - // "root" => format!("{}", root), - // "proposer_index" => block.message().proposer_index(), - // "slot" => block.slot(), - // ); - // - // // Notify the validator monitor. - // chain.validator_monitor.read().register_api_block( - // seen_timestamp, - // blobs_sidecar.message(), - // root, - // &chain.slot_clock, - // ); - // - // // Update the head since it's likely this block will become the new - // // head. - // chain.recompute_head_at_current_slot().await; - // - // // Perform some logging to inform users if their blocks are being produced - // // late. - // // - // // Check to see the thresholds are non-zero to avoid logging errors with small - // // slot times (e.g., during testing) - // let crit_threshold = chain.slot_clock.unagg_attestation_production_delay(); - // let error_threshold = crit_threshold / 2; - // if delay >= crit_threshold { - // crit!( - // log, - // "Block was broadcast too late"; - // "msg" => "system may be overloaded, block likely to be orphaned", - // "delay_ms" => delay.as_millis(), - // "slot" => block.slot(), - // "root" => ?root, - // ) - // } else if delay >= error_threshold { - // error!( - // log, - // "Block broadcast was delayed"; - // "msg" => "system may be overloaded, block may be orphaned", - // "delay_ms" => delay.as_millis(), - // "slot" => block.slot(), - // "root" => ?root, - // ) - // } - // - // Ok(()) - // } - // Err(BlockError::BlockIsAlreadyKnown) => { - // info!( - // log, - // "Block from HTTP API already known"; - // "block" => ?block.canonical_root(), - // "slot" => block.slot(), - // ); - // Ok(()) - // } - // Err(BlockError::RepeatProposal { proposer, slot }) => { - // warn!( - // log, - // "Block ignored due to repeat proposal"; - // "msg" => "this can happen when a VC uses fallback BNs. \ - // whilst this is not necessarily an error, it can indicate issues with a BN \ - // or between the VC and BN.", - // "slot" => slot, - // "proposer" => proposer, - // ); - // Ok(()) - // } - // Err(e) => { - // let msg = format!("{:?}", e); - // error!( - // log, - // "Invalid block provided to HTTP API"; - // "reason" => &msg - // ); - // Err(warp_utils::reject::broadcast_without_import(msg)) - // } - // } - Ok(()) -} diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 09159d3c0c7..0167da8d47d 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -1,7 +1,7 @@ use crate::metrics; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, CountUnrealized}; -use lighthouse_network::PubsubMessage; +use lighthouse_network::{PubsubMessage, SignedBeaconBlockAndBlobsSidecar}; use network::NetworkMessage; use slog::{crit, error, info, warn, Logger}; use slot_clock::SlotClock; @@ -9,8 +9,8 @@ use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ - AbstractExecPayload, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, FullPayload, - Hash256, SignedBeaconBlock, + AbstractExecPayload, BlindedPayload, BlobsSidecar, EthSpec, ExecPayload, ExecutionBlockHash, + FullPayload, Hash256, SignedBeaconBlock, SignedBeaconBlockEip4844, }; use warp::Rejection; @@ -18,6 +18,7 @@ use warp::Rejection; pub async fn publish_block( block_root: Option, block: Arc>, + blobs_sidecar: Option>>, chain: Arc>, network_tx: &UnboundedSender>, log: Logger, @@ -26,7 +27,24 @@ pub async fn publish_block( // Send the block, regardless of whether or not it is valid. The API // specification is very clear that this is the desired behaviour. - crate::publish_pubsub_message(network_tx, PubsubMessage::BeaconBlock(block.clone()))?; + + let message = match &*block { + SignedBeaconBlock::Eip4844(block) => { + if let Some(sidecar) = blobs_sidecar { + PubsubMessage::BeaconBlockAndBlobsSidecars(Arc::new( + SignedBeaconBlockAndBlobsSidecar { + beacon_block: block.clone(), + blobs_sidecar: (*sidecar).clone(), + }, + )) + } else { + //TODO(pawan): return an empty sidecar instead + return Err(warp_utils::reject::broadcast_without_import(format!(""))); + } + } + _ => PubsubMessage::BeaconBlock(block.clone()), + }; + crate::publish_pubsub_message(network_tx, message)?; // Determine the delay after the start of the slot, register it with metrics. let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); @@ -135,6 +153,7 @@ pub async fn publish_blinded_block( publish_block::( Some(block_root), Arc::new(full_block), + None, chain, network_tx, log, diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index e5af0a74999..a8a4af1c8b4 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -13,6 +13,8 @@ serde = { version = "1.0.116", features = ["derive"] } serde_derive = "1.0.116" eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" +tree_hash = "0.4.1" +tree_hash_derive = "0.4.0" slog = { version = "2.5.2", features = ["max_level_trace"] } lighthouse_version = { path = "../../common/lighthouse_version" } tokio = { version = "1.14.0", features = ["time", "macros"] } diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index be4da809cb2..d7733f7cd3d 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -15,6 +15,7 @@ pub mod peer_manager; pub mod rpc; pub mod types; +pub use crate::types::SignedBeaconBlockAndBlobsSidecar; pub use config::gossip_max_size; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index dc1110954a5..8511d262080 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -21,7 +21,6 @@ use tokio_util::{ compat::{Compat, FuturesAsyncReadCompatExt}, }; use types::BlobsSidecar; -use types::SignedBlobsSidecar; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Blob, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, @@ -108,13 +107,11 @@ lazy_static! { .as_ssz_bytes() .len(); - pub static ref SIGNED_BLOBS_SIDECAR_MIN: usize = SignedBlobsSidecar { - message: BlobsSidecar::::empty(), - signature: Signature::empty(), - }.as_ssz_bytes() + pub static ref BLOBS_SIDECAR_MIN: usize = BlobsSidecar::::empty() + .as_ssz_bytes() .len(); - pub static ref SIGNED_BLOBS_SIDECAR_MAX: usize = *SIGNED_BLOBS_SIDECAR_MIN // Max size of variable length `blobs` field + pub static ref BLOBS_SIDECAR_MAX: usize = *BLOBS_SIDECAR_MIN // Max size of variable length `blobs` field + (MainnetEthSpec::max_blobs_per_block() * as Encode>::ssz_fixed_len()); } @@ -343,9 +340,7 @@ impl ProtocolId { Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), - Protocol::BlobsByRange => { - RpcLimits::new(*SIGNED_BLOBS_SIDECAR_MIN, *SIGNED_BLOBS_SIDECAR_MAX) - } + Protocol::BlobsByRange => RpcLimits::new(*BLOBS_SIDECAR_MIN, *BLOBS_SIDECAR_MAX), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index 1c6ffd022d0..665e383f206 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -21,7 +21,7 @@ pub struct GossipCache { /// Timeout for blocks. beacon_block: Option, /// Timeout for blobs. - blobs_sidecar: Option, + beacon_block_and_blobs_sidecar: Option, /// Timeout for aggregate attestations. aggregates: Option, /// Timeout for attestations. @@ -44,7 +44,7 @@ pub struct GossipCacheBuilder { /// Timeout for blocks. beacon_block: Option, /// Timeout for blob sidecars. - blobs_sidecar: Option, + beacon_block_and_blobs_sidecar: Option, /// Timeout for aggregate attestations. aggregates: Option, /// Timeout for attestations. @@ -121,7 +121,7 @@ impl GossipCacheBuilder { let GossipCacheBuilder { default_timeout, beacon_block, - blobs_sidecar, + beacon_block_and_blobs_sidecar, aggregates, attestation, voluntary_exit, @@ -134,7 +134,7 @@ impl GossipCacheBuilder { expirations: DelayQueue::default(), topic_msgs: HashMap::default(), beacon_block: beacon_block.or(default_timeout), - blobs_sidecar: blobs_sidecar.or(default_timeout), + beacon_block_and_blobs_sidecar: beacon_block_and_blobs_sidecar.or(default_timeout), aggregates: aggregates.or(default_timeout), attestation: attestation.or(default_timeout), voluntary_exit: voluntary_exit.or(default_timeout), @@ -157,7 +157,7 @@ impl GossipCache { pub fn insert(&mut self, topic: GossipTopic, data: Vec) { let expire_timeout = match topic.kind() { GossipKind::BeaconBlock => self.beacon_block, - GossipKind::BlobsSidecar => self.blobs_sidecar, + GossipKind::BeaconBlocksAndBlobsSidecar => self.beacon_block_and_blobs_sidecar, GossipKind::BeaconAggregateAndProof => self.aggregates, GossipKind::Attestation(_) => self.attestation, GossipKind::VoluntaryExit => self.voluntary_exit, diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index ad02e07fb70..404311ac167 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -13,7 +13,7 @@ pub type EnrSyncCommitteeBitfield = BitVector<::SyncCommitteeSu pub type Enr = discv5::enr::Enr; pub use globals::NetworkGlobals; -pub use pubsub::{PubsubMessage, SnappyTransform}; +pub use pubsub::{PubsubMessage, SignedBeaconBlockAndBlobsSidecar, SnappyTransform}; pub use subnet::{Subnet, SubnetDiscovery}; pub use sync_state::{BackFillState, SyncState}; pub use topics::{subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, CORE_TOPICS}; diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 6f6de68ecb4..1b14c93c094 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -3,25 +3,37 @@ use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use crate::TopicHash; use libp2p::gossipsub::{DataTransform, GossipsubMessage, RawGossipsubMessage}; +use serde_derive::{Deserialize, Serialize}; use snap::raw::{decompress_len, Decoder, Encoder}; use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; use std::boxed::Box; use std::io::{Error, ErrorKind}; use std::sync::Arc; -use types::signed_blobs_sidecar::SignedBlobsSidecar; +use tree_hash_derive::TreeHash; use types::{ - Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, ProposerSlashing, + Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ForkContext, ForkName, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella, SignedBeaconBlockEip4844, SignedBeaconBlockMerge, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; +/// TODO(pawan): move this to consensus/types? strictly not a consensus type +#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq)] +#[serde(bound = "T: EthSpec")] +pub struct SignedBeaconBlockAndBlobsSidecar { + // TODO(pawan): switch to a SignedBeaconBlock and use ssz offsets for decoding to make this + // future proof? + pub beacon_block: SignedBeaconBlockEip4844, + pub blobs_sidecar: BlobsSidecar, +} + #[derive(Debug, Clone, PartialEq)] pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. BeaconBlock(Arc>), - /// Gossipsub message providing notification of a new blobs sidecar. - BlobsSidecars(Arc>), + /// Gossipsub message providing notification of a new SignedBeaconBlock coupled with a blobs sidecar. + BeaconBlockAndBlobsSidecars(Arc>), /// Gossipsub message providing notification of a Aggregate attestation and associated proof. AggregateAndProofAttestation(Box>), /// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id. @@ -109,7 +121,9 @@ impl PubsubMessage { pub fn kind(&self) -> GossipKind { match self { PubsubMessage::BeaconBlock(_) => GossipKind::BeaconBlock, - PubsubMessage::BlobsSidecars(_) => GossipKind::BlobsSidecar, + PubsubMessage::BeaconBlockAndBlobsSidecars(_) => { + GossipKind::BeaconBlocksAndBlobsSidecar + } PubsubMessage::AggregateAndProofAttestation(_) => GossipKind::BeaconAggregateAndProof, PubsubMessage::Attestation(attestation_data) => { GossipKind::Attestation(attestation_data.0) @@ -171,10 +185,12 @@ impl PubsubMessage { SignedBeaconBlockMerge::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ), - Some(ForkName::Eip4844) => SignedBeaconBlock::::Eip4844( - SignedBeaconBlockEip4844::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ), + Some(ForkName::Eip4844) => { + return Err( + "beacon_block topic is not used from eip4844 fork onwards" + .to_string(), + ) + } Some(ForkName::Capella) => SignedBeaconBlock::::Capella( SignedBeaconBlockCapella::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, @@ -188,10 +204,29 @@ impl PubsubMessage { }; Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block))) } - GossipKind::BlobsSidecar => { - let blobs_sidecar = SignedBlobsSidecar::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?; - Ok(PubsubMessage::BlobsSidecars(Arc::new(blobs_sidecar))) + GossipKind::BeaconBlocksAndBlobsSidecar => { + match fork_context.from_context_bytes(gossip_topic.fork_digest) { + Some(ForkName::Eip4844) => { + let block_and_blobs_sidecar = + SignedBeaconBlockAndBlobsSidecar::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + Ok(PubsubMessage::BeaconBlockAndBlobsSidecars(Arc::new( + block_and_blobs_sidecar, + ))) + } + Some( + ForkName::Base + | ForkName::Altair + | ForkName::Merge + | ForkName::Capella, + ) + | None => { + return Err(format!( + "beacon_blobs_and_sidecar topic invalid for given fork digest {:?}", + gossip_topic.fork_digest + )) + } + } } GossipKind::VoluntaryExit => { let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data) @@ -237,7 +272,7 @@ impl PubsubMessage { // messages for us. match &self { PubsubMessage::BeaconBlock(data) => data.as_ssz_bytes(), - PubsubMessage::BlobsSidecars(data) => data.as_ssz_bytes(), + PubsubMessage::BeaconBlockAndBlobsSidecars(data) => data.as_ssz_bytes(), PubsubMessage::AggregateAndProofAttestation(data) => data.as_ssz_bytes(), PubsubMessage::VoluntaryExit(data) => data.as_ssz_bytes(), PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(), @@ -258,11 +293,11 @@ impl std::fmt::Display for PubsubMessage { block.slot(), block.message().proposer_index() ), - PubsubMessage::BlobsSidecars(blobs) => write!( + PubsubMessage::BeaconBlockAndBlobsSidecars(block_and_blob) => write!( f, - "Blobs Sidecar: slot: {}, blobs: {}", - blobs.message.beacon_block_slot, - blobs.message.blobs.len(), + "Beacon block and Blobs Sidecar: slot: {}, blobs: {}", + block_and_blob.beacon_block.message.slot, + block_and_blob.blobs_sidecar.blobs.len(), ), PubsubMessage::AggregateAndProofAttestation(att) => write!( f, diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 3e772648099..1be94a93f12 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -11,7 +11,7 @@ use crate::Subnet; pub const TOPIC_PREFIX: &str = "eth2"; pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy"; pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; -pub const BLOBS_SIDECAR_TOPIC: &str = "blobs_sidecar"; +pub const BEACON_BLOCK_AND_BLOBS_SIDECAR_TOPIC: &str = "beacon_blocks_and_blobs_sidecar"; pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof"; pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_"; pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; @@ -22,7 +22,7 @@ pub const SYNC_COMMITTEE_PREFIX_TOPIC: &str = "sync_committee_"; pub const CORE_TOPICS: [GossipKind; 7] = [ GossipKind::BeaconBlock, - GossipKind::BlobsSidecar, + GossipKind::BeaconBlocksAndBlobsSidecar, GossipKind::BeaconAggregateAndProof, GossipKind::VoluntaryExit, GossipKind::ProposerSlashing, @@ -49,8 +49,8 @@ pub struct GossipTopic { pub enum GossipKind { /// Topic for publishing beacon blocks. BeaconBlock, - /// Topic for publishing blob sidecars. - BlobsSidecar, + /// Topic for publishing beacon block coupled with blob sidecars. + BeaconBlocksAndBlobsSidecar, /// Topic for publishing aggregate attestations and proofs. BeaconAggregateAndProof, /// Topic for publishing raw attestations on a particular subnet. @@ -136,6 +136,7 @@ impl GossipTopic { let kind = match topic_parts[3] { BEACON_BLOCK_TOPIC => GossipKind::BeaconBlock, BEACON_AGGREGATE_AND_PROOF_TOPIC => GossipKind::BeaconAggregateAndProof, + BEACON_BLOCK_AND_BLOBS_SIDECAR_TOPIC => GossipKind::BeaconBlocksAndBlobsSidecar, SIGNED_CONTRIBUTION_AND_PROOF_TOPIC => GossipKind::SignedContributionAndProof, VOLUNTARY_EXIT_TOPIC => GossipKind::VoluntaryExit, PROPOSER_SLASHING_TOPIC => GossipKind::ProposerSlashing, @@ -182,7 +183,7 @@ impl From for String { let kind = match topic.kind { GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(), - GossipKind::BlobsSidecar => BLOBS_SIDECAR_TOPIC.into(), + GossipKind::BeaconBlocksAndBlobsSidecar => BEACON_BLOCK_AND_BLOBS_SIDECAR_TOPIC.into(), GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(), GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(), GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(), @@ -211,7 +212,7 @@ impl std::fmt::Display for GossipTopic { let kind = match self.kind { GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(), - GossipKind::BlobsSidecar => BLOBS_SIDECAR_TOPIC.into(), + GossipKind::BeaconBlocksAndBlobsSidecar => BEACON_BLOCK_AND_BLOBS_SIDECAR_TOPIC.into(), GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(), GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(), GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(), @@ -293,6 +294,7 @@ mod tests { VoluntaryExit, ProposerSlashing, AttesterSlashing, + BeaconBlocksAndBlobsSidecar, ] .iter() { diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 87e092332f5..dd28b15c0cf 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -46,6 +46,7 @@ use derivative::Derivative; use futures::stream::{Stream, StreamExt}; use futures::task::Poll; use lighthouse_network::rpc::methods::BlobsByRangeRequest; +use lighthouse_network::SignedBeaconBlockAndBlobsSidecar; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, StatusMessage}, Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, @@ -61,7 +62,6 @@ use std::time::Duration; use std::{cmp, collections::HashSet}; use task_executor::TaskExecutor; use tokio::sync::mpsc; -use types::signed_blobs_sidecar::SignedBlobsSidecar; use types::{ Attestation, AttesterSlashing, Hash256, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, @@ -80,8 +80,6 @@ mod worker; use crate::beacon_processor::work_reprocessing_queue::QueuedGossipBlock; pub use worker::{ChainSegmentProcessId, GossipAggregatePackage, GossipAttestationPackage}; -use self::work_reprocessing_queue::QueuedBlobsSidecar; - /// The maximum size of the channel for work events to the `BeaconProcessor`. /// /// Setting this too low will cause consensus messages to be dropped. @@ -117,9 +115,7 @@ const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024; const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024; //FIXME(sean) verify -const MAX_GOSSIP_BLOB_QUEUE_LEN: usize = 1_024; -//FIXME(sean) verify -const MAX_BLOBS_SIDECAR_REPROCESS_QUEUE_LEN: usize = 1_024; +const MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN: usize = 1_024; /// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but /// within acceptable clock disparity) that will be queued before we start dropping them. @@ -195,7 +191,7 @@ pub const GOSSIP_ATTESTATION_BATCH: &str = "gossip_attestation_batch"; pub const GOSSIP_AGGREGATE: &str = "gossip_aggregate"; pub const GOSSIP_AGGREGATE_BATCH: &str = "gossip_aggregate_batch"; pub const GOSSIP_BLOCK: &str = "gossip_block"; -pub const GOSSIP_BLOBS_SIDECAR: &str = "gossip_blobs_sidecar"; +pub const GOSSIP_BLOCK_AND_BLOBS_SIDECAR: &str = "gossip_block_and_blobs_sidecar"; pub const DELAYED_IMPORT_BLOCK: &str = "delayed_import_block"; pub const GOSSIP_VOLUNTARY_EXIT: &str = "gossip_voluntary_exit"; pub const GOSSIP_PROPOSER_SLASHING: &str = "gossip_proposer_slashing"; @@ -210,7 +206,6 @@ pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request"; pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; -pub const UNKNOWN_BLOBS_SIDECAR: &str = "unknown_blobs_sidecar"; /// A simple first-in-first-out queue with a maximum length. struct FifoQueue { @@ -415,19 +410,20 @@ impl WorkEvent { } /// Create a new `Work` event for some blobs sidecar. - pub fn gossip_blobs_sidecar( + pub fn gossip_block_and_blobs_sidecar( message_id: MessageId, peer_id: PeerId, - _peer_client: Client, - blobs: Arc>, + peer_client: Client, + block_and_blobs: Arc>, seen_timestamp: Duration, ) -> Self { Self { drop_during_sync: false, - work: Work::GossipBlobsSidecar { + work: Work::GossipBlockAndBlobsSidecar { message_id, peer_id, - blobs, + peer_client, + block_and_blobs, seen_timestamp, }, } @@ -674,20 +670,6 @@ impl std::convert::From> for WorkEvent { seen_timestamp, }, }, - ReadyWork::BlobsSidecar(QueuedBlobsSidecar { - peer_id, - message_id, - blobs_sidecar, - seen_timestamp, - }) => Self { - drop_during_sync: true, - work: Work::UnknownBlobsSidecar { - message_id, - peer_id, - blobs: blobs_sidecar, - seen_timestamp, - }, - }, } } } @@ -737,16 +719,11 @@ pub enum Work { block: Arc>, seen_timestamp: Duration, }, - GossipBlobsSidecar { + GossipBlockAndBlobsSidecar { message_id: MessageId, peer_id: PeerId, - blobs: Arc>, - seen_timestamp: Duration, - }, - UnknownBlobsSidecar { - message_id: MessageId, - peer_id: PeerId, - blobs: Arc>, + peer_client: Client, + block_and_blobs: Arc>, seen_timestamp: Duration, }, DelayedImportBlock { @@ -823,7 +800,7 @@ impl Work { Work::GossipAggregate { .. } => GOSSIP_AGGREGATE, Work::GossipAggregateBatch { .. } => GOSSIP_AGGREGATE_BATCH, Work::GossipBlock { .. } => GOSSIP_BLOCK, - Work::GossipBlobsSidecar { .. } => GOSSIP_BLOBS_SIDECAR, + Work::GossipBlockAndBlobsSidecar { .. } => GOSSIP_BLOCK_AND_BLOBS_SIDECAR, Work::DelayedImportBlock { .. } => DELAYED_IMPORT_BLOCK, Work::GossipVoluntaryExit { .. } => GOSSIP_VOLUNTARY_EXIT, Work::GossipProposerSlashing { .. } => GOSSIP_PROPOSER_SLASHING, @@ -838,7 +815,6 @@ impl Work { Work::BlobsByRangeRequest { .. } => BLOBS_BY_RANGE_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, - Work::UnknownBlobsSidecar { .. } => UNKNOWN_BLOBS_SIDECAR, } } } @@ -955,7 +931,6 @@ impl BeaconProcessor { LifoQueue::new(MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN); let mut unknown_block_attestation_queue = LifoQueue::new(MAX_UNAGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN); - let mut unknown_blobs_sidecar_queue = LifoQueue::new(MAX_BLOBS_SIDECAR_REPROCESS_QUEUE_LEN); let mut sync_message_queue = LifoQueue::new(MAX_SYNC_MESSAGE_QUEUE_LEN); let mut sync_contribution_queue = LifoQueue::new(MAX_SYNC_CONTRIBUTION_QUEUE_LEN); @@ -976,7 +951,8 @@ impl BeaconProcessor { let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN); - let mut gossip_blobs_sidecar_queue = FifoQueue::new(MAX_GOSSIP_BLOB_QUEUE_LEN); + let mut gossip_block_and_blobs_sidecar_queue = + FifoQueue::new(MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN); let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN); let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN); @@ -1086,7 +1062,7 @@ impl BeaconProcessor { } else if let Some(item) = gossip_block_queue.pop() { self.spawn_worker(item, toolbox); //FIXME(sean) - } else if let Some(item) = gossip_blobs_sidecar_queue.pop() { + } else if let Some(item) = gossip_block_and_blobs_sidecar_queue.pop() { self.spawn_worker(item, toolbox); // Check the aggregates, *then* the unaggregates since we assume that // aggregates are more valuable to local validators and effectively give us @@ -1292,8 +1268,8 @@ impl BeaconProcessor { Work::GossipBlock { .. } => { gossip_block_queue.push(work, work_id, &self.log) } - Work::GossipBlobsSidecar { .. } => { - gossip_blobs_sidecar_queue.push(work, work_id, &self.log) + Work::GossipBlockAndBlobsSidecar { .. } => { + gossip_block_and_blobs_sidecar_queue.push(work, work_id, &self.log) } Work::DelayedImportBlock { .. } => { delayed_block_queue.push(work, work_id, &self.log) @@ -1337,9 +1313,6 @@ impl BeaconProcessor { Work::UnknownBlockAggregate { .. } => { unknown_block_aggregate_queue.push(work) } - Work::UnknownBlobsSidecar { .. } => { - unknown_blobs_sidecar_queue.push(work) - } } } } @@ -1556,19 +1529,22 @@ impl BeaconProcessor { /* * Verification for blobs sidecars received on gossip. */ - Work::GossipBlobsSidecar { + Work::GossipBlockAndBlobsSidecar { message_id, peer_id, - blobs, + peer_client, + block_and_blobs, seen_timestamp, } => task_spawner.spawn_async(async move { - worker.process_gossip_blob( - message_id, - peer_id, - blobs, - Some(work_reprocessing_tx), - seen_timestamp, - ) + worker + .process_gossip_block_and_blobs_sidecar( + message_id, + peer_id, + peer_client, + block_and_blobs, + seen_timestamp, + ) + .await }), /* * Import for blocks that we received earlier than their intended slot. @@ -1755,14 +1731,6 @@ impl BeaconProcessor { seen_timestamp, ) }), - Work::UnknownBlobsSidecar { - message_id, - peer_id, - blobs, - seen_timestamp, - } => task_spawner.spawn_blocking(move || { - worker.process_gossip_blob(message_id, peer_id, blobs, None, seen_timestamp) - }), }; } } diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs index b08542eeb54..2aeec11c325 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs @@ -30,10 +30,7 @@ use task_executor::TaskExecutor; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::time::error::Error as TimeError; use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey}; -use types::{ - Attestation, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock, SignedBlobsSidecar, - SubnetId, -}; +use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock, SubnetId}; const TASK_NAME: &str = "beacon_processor_reprocess_queue"; const GOSSIP_BLOCKS: &str = "gossip_blocks"; @@ -47,10 +44,6 @@ const ADDITIONAL_QUEUED_BLOCK_DELAY: Duration = Duration::from_millis(5); /// For how long to queue aggregated and unaggregated attestations for re-processing. pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12); -/// For how long to queue blob sidecars for re-processing. -/// TODO: rethink duration -pub const QUEUED_BLOBS_SIDECARS_DELAY: Duration = Duration::from_secs(6); - /// For how long to queue rpc blocks before sending them back for reprocessing. pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(3); @@ -62,10 +55,6 @@ const MAXIMUM_QUEUED_BLOCKS: usize = 16; /// How many attestations we keep before new ones get dropped. const MAXIMUM_QUEUED_ATTESTATIONS: usize = 16_384; -/// TODO: fix number -/// How many blobs we keep before new ones get dropped. -const MAXIMUM_QUEUED_BLOB_SIDECARS: usize = 16_384; - /// Messages that the scheduler can receive. pub enum ReprocessQueueMessage { /// A block that has been received early and we should queue for later processing. @@ -80,8 +69,6 @@ pub enum ReprocessQueueMessage { UnknownBlockUnaggregate(QueuedUnaggregate), /// An aggregated attestation that references an unknown block. UnknownBlockAggregate(QueuedAggregate), - /// A blob sidecar that references an unknown block. - UnknownBlobSidecar(QueuedBlobsSidecar), } /// Events sent by the scheduler once they are ready for re-processing. @@ -90,7 +77,6 @@ pub enum ReadyWork { RpcBlock(QueuedRpcBlock), Unaggregate(QueuedUnaggregate), Aggregate(QueuedAggregate), - BlobsSidecar(QueuedBlobsSidecar), } /// An Attestation for which the corresponding block was not seen while processing, queued for @@ -132,15 +118,6 @@ pub struct QueuedRpcBlock { pub should_process: bool, } -/// A blob sidecar for which the corresponding block was not seen while processing, queued for -/// later. -pub struct QueuedBlobsSidecar { - pub peer_id: PeerId, - pub message_id: MessageId, - pub blobs_sidecar: Arc>, - pub seen_timestamp: Duration, -} - /// Unifies the different messages processed by the block delay queue. enum InboundEvent { /// A gossip block that was queued for later processing and is ready for import. @@ -150,8 +127,6 @@ enum InboundEvent { ReadyRpcBlock(QueuedRpcBlock), /// An aggregated or unaggregated attestation is ready for re-processing. ReadyAttestation(QueuedAttestationId), - /// A blob sidecar is ready for re-processing. - ReadyBlobsSidecar(QueuedBlobsSidecarId), /// A `DelayQueue` returned an error. DelayQueueError(TimeError, &'static str), /// A message sent to the `ReprocessQueue` @@ -172,7 +147,6 @@ struct ReprocessQueue { rpc_block_delay_queue: DelayQueue>, /// Queue to manage scheduled attestations. attestations_delay_queue: DelayQueue, - blobs_sidecar_delay_queue: DelayQueue, /* Queued items */ /// Queued blocks. @@ -181,19 +155,15 @@ struct ReprocessQueue { queued_aggregates: FnvHashMap, DelayKey)>, /// Queued attestations. queued_unaggregates: FnvHashMap, DelayKey)>, - queued_blob_sidecars: FnvHashMap, DelayKey)>, /// Attestations (aggregated and unaggregated) per root. awaiting_attestations_per_root: HashMap>, - awaiting_blobs_sidecars_per_root: HashMap>, /* Aux */ /// Next attestation id, used for both aggregated and unaggregated attestations next_attestation: usize, - next_sidecar: usize, early_block_debounce: TimeLatch, rpc_block_debounce: TimeLatch, attestation_delay_debounce: TimeLatch, - blobs_sidecar_debounce: TimeLatch, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -202,9 +172,6 @@ enum QueuedAttestationId { Unaggregate(usize), } -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -struct QueuedBlobsSidecarId(usize); - impl QueuedAggregate { pub fn beacon_block_root(&self) -> &Hash256 { &self.attestation.message.aggregate.data.beacon_block_root @@ -268,21 +235,6 @@ impl Stream for ReprocessQueue { Poll::Ready(None) | Poll::Pending => (), } - match self.blobs_sidecar_delay_queue.poll_expired(cx) { - Poll::Ready(Some(Ok(id))) => { - return Poll::Ready(Some(InboundEvent::ReadyBlobsSidecar(id.into_inner()))); - } - Poll::Ready(Some(Err(e))) => { - return Poll::Ready(Some(InboundEvent::DelayQueueError( - e, - "blobs_sidecar_queue", - ))); - } - // `Poll::Ready(None)` means that there are no more entries in the delay queue and we - // will continue to get this result until something else is added into the queue. - Poll::Ready(None) | Poll::Pending => (), - } - // Last empty the messages channel. match self.work_reprocessing_rx.poll_recv(cx) { Poll::Ready(Some(message)) => return Poll::Ready(Some(InboundEvent::Msg(message))), @@ -312,19 +264,14 @@ pub fn spawn_reprocess_scheduler( gossip_block_delay_queue: DelayQueue::new(), rpc_block_delay_queue: DelayQueue::new(), attestations_delay_queue: DelayQueue::new(), - blobs_sidecar_delay_queue: DelayQueue::new(), queued_gossip_block_roots: HashSet::new(), queued_aggregates: FnvHashMap::default(), queued_unaggregates: FnvHashMap::default(), - queued_blob_sidecars: FnvHashMap::default(), awaiting_attestations_per_root: HashMap::new(), - awaiting_blobs_sidecars_per_root: HashMap::new(), next_attestation: 0, - next_sidecar: 0, early_block_debounce: TimeLatch::default(), rpc_block_debounce: TimeLatch::default(), attestation_delay_debounce: TimeLatch::default(), - blobs_sidecar_debounce: TimeLatch::default(), }; executor.spawn( @@ -526,39 +473,6 @@ impl ReprocessQueue { self.next_attestation += 1; } - InboundEvent::Msg(UnknownBlobSidecar(queued_blob_sidecar)) => { - if self.blobs_sidecar_delay_queue.len() >= MAXIMUM_QUEUED_BLOB_SIDECARS { - if self.blobs_sidecar_debounce.elapsed() { - error!( - log, - "Blobs sidecar queue is full"; - "queue_size" => MAXIMUM_QUEUED_BLOB_SIDECARS, - "msg" => "check system clock" - ); - } - // Drop the attestation. - return; - } - - let id = QueuedBlobsSidecarId(self.next_sidecar); - - // Register the delay. - let delay_key = self - .blobs_sidecar_delay_queue - .insert(id, QUEUED_BLOBS_SIDECARS_DELAY); - - // Register this sidecar for the corresponding root. - self.awaiting_blobs_sidecars_per_root - .entry(queued_blob_sidecar.blobs_sidecar.message.beacon_block_root) - .or_default() - .push(id); - - // Store the blob sidecar and its info. - self.queued_blob_sidecars - .insert(self.next_sidecar, (queued_blob_sidecar, delay_key)); - - self.next_sidecar += 1; - } InboundEvent::Msg(BlockImported(root)) => { // Unqueue the attestations we have for this root, if any. if let Some(queued_ids) = self.awaiting_attestations_per_root.remove(&root) { @@ -603,43 +517,6 @@ impl ReprocessQueue { } } } - // Unqueue the blob sidecars we have for this root, if any. - // TODO: merge the 2 data structures. - if let Some(queued_ids) = self.awaiting_blobs_sidecars_per_root.remove(&root) { - for id in queued_ids { - // metrics::inc_counter( - // &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS, - // ); - - if let Some((work, delay_key)) = self - .queued_blob_sidecars - .remove(&id.0) - .map(|(blobs_sidecar, delay_key)| { - (ReadyWork::BlobsSidecar(blobs_sidecar), delay_key) - }) - { - // Remove the delay. - self.blobs_sidecar_delay_queue.remove(&delay_key); - - // Send the work. - if self.ready_work_tx.try_send(work).is_err() { - error!( - log, - "Failed to send scheduled blob sidecar"; - ); - } - } else { - // There is a mismatch between the blob sidecar ids registered for this - // root and the queued blob sidecars. This should never happen. - error!( - log, - "Unknown queued blob sidecar for block root"; - "block_root" => ?root, - "id" => ?id, - ); - } - } - } } // A block that was queued for later processing is now ready to be processed. InboundEvent::ReadyGossipBlock(ready_block) => { @@ -714,40 +591,6 @@ impl ReprocessQueue { } } } - InboundEvent::ReadyBlobsSidecar(queued_blobs_sidecar_id) => { - // metrics::inc_counter( - // &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_ATTESTATIONS, - // ); - - if let Some((root, work)) = self - .queued_blob_sidecars - .remove(&queued_blobs_sidecar_id.0) - .map(|(blobs_sidecar, _delay_key)| { - ( - blobs_sidecar.blobs_sidecar.message.beacon_block_root, - ReadyWork::BlobsSidecar(blobs_sidecar), - ) - }) - { - if self.ready_work_tx.try_send(work).is_err() { - error!( - log, - "Failed to send scheduled attestation"; - ); - } - - if let Some(queued_blob_sidecars) = - self.awaiting_blobs_sidecars_per_root.get_mut(&root) - { - if let Some(index) = queued_blob_sidecars - .iter() - .position(|&id| id == queued_blobs_sidecar_id) - { - queued_blob_sidecars.swap_remove(index); - } - } - } - } } metrics::set_gauge_vec( diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 592b92b0380..5f13e27d7bf 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -1,4 +1,3 @@ -use crate::beacon_processor::work_reprocessing_queue::QueuedBlobsSidecar; use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; use beacon_chain::store::Error; @@ -11,7 +10,10 @@ use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized, ForkChoiceError, GossipVerifiedBlock, }; -use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; +use lighthouse_network::{ + Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource, + SignedBeaconBlockAndBlobsSidecar, +}; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use ssz::Encode; @@ -19,11 +21,10 @@ use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; -use types::signed_blobs_sidecar::SignedBlobsSidecar; use types::{ - Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, - Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, + Attestation, AttesterSlashing, BlobsSidecar, EthSpec, Hash256, IndexedAttestation, + ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, + SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; use super::{ @@ -697,30 +698,15 @@ impl Worker { } #[allow(clippy::too_many_arguments)] - pub fn process_gossip_blob( + pub async fn process_gossip_block_and_blobs_sidecar( self, message_id: MessageId, peer_id: PeerId, - blob: Arc>, - reprocess_tx: Option>>, + peer_client: Client, + block_and_blob: Arc>, seen_timestamp: Duration, ) { - match self.chain.verify_blobs_sidecar_for_gossip(&blob) { - //FIXME(sean) - Ok(verified_sidecar) => { - // Register with validator monitor - // Propagate - // Apply to fork choice - } - Err(error) => self.handle_blobs_verification_failure( - peer_id, - message_id, - reprocess_tx, - error, - blob, - seen_timestamp, - ), - }; + unimplemented!() } /// Process the beacon block received from the gossip network and @@ -2235,82 +2221,7 @@ impl Worker { message_id: MessageId, reprocess_tx: Option>>, error: BlobError, - blobs_sidecar: Arc>, seen_timestamp: Duration, ) { - // TODO: metrics - match &error { - BlobError::FutureSlot { .. } => { - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); - } - BlobError::PastSlot { .. } => { - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); - } - BlobError::BeaconChainError(_e) => { - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); - } - BlobError::BlobOutOfRange { blob_index: _ } => { - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - } - BlobError::InvalidKZGCommitment => { - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - } - BlobError::ProposalSignatureInvalid => { - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - } - BlobError::RepeatSidecar { - proposer: _, - slot: _, - } => { - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); - } - BlobError::UnknownHeadBlock { beacon_block_root } => { - debug!( - self.log, - "Blob sidecar for unknown block"; - "peer_id" => %peer_id, - "block" => ?beacon_block_root - ); - if let Some(sender) = reprocess_tx { - // We don't know the block, get the sync manager to handle the block lookup, and - // send the attestation to be scheduled for re-processing. - self.sync_tx - .send(SyncMessage::UnknownBlockHash(peer_id, *beacon_block_root)) - .unwrap_or_else(|_| { - warn!( - self.log, - "Failed to send to sync service"; - "msg" => "UnknownBlockHash" - ) - }); - let msg = ReprocessQueueMessage::UnknownBlobSidecar(QueuedBlobsSidecar { - peer_id, - message_id, - blobs_sidecar, - seen_timestamp, - }); - - if sender.try_send(msg).is_err() { - error!( - self.log, - "Failed to send blob sidecar for re-processing"; - ) - } - } else { - // We shouldn't make any further attempts to process this attestation. - // - // Don't downscore the peer since it's not clear if we requested this head - // block from them or not. - self.propagate_validation_result( - message_id, - peer_id, - MessageAcceptance::Ignore, - ); - } - - return; - } - &BlobError::UnknownValidator(_) => todo!(), - } } } diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index 24a202c4973..cb90813b263 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -236,12 +236,12 @@ impl Router { block, ); } - PubsubMessage::BlobsSidecars(blobs) => { - self.processor.on_blobs_gossip( + PubsubMessage::BeaconBlockAndBlobsSidecars(block_and_blobs) => { + self.processor.on_block_and_blobs_sidecar_gossip( id, peer_id, self.network_globals.client(&peer_id), - blobs, + block_and_blobs, ); } PubsubMessage::VoluntaryExit(exit) => { diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index c2cf483d978..dadaf60c1eb 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -7,7 +7,7 @@ use crate::sync::manager::RequestId as SyncId; use crate::sync::SyncMessage; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::rpc::methods::BlobsByRangeRequest; -use lighthouse_network::rpc::*; +use lighthouse_network::{rpc::*, SignedBeaconBlockAndBlobsSidecar}; use lighthouse_network::{ Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response, }; @@ -17,7 +17,6 @@ use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::SyncCommitteeMessage; use tokio::sync::mpsc; -use types::signed_blobs_sidecar::SignedBlobsSidecar; use types::{ Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, @@ -295,18 +294,18 @@ impl Processor { )) } - pub fn on_blobs_gossip( + pub fn on_block_and_blobs_sidecar_gossip( &mut self, message_id: MessageId, peer_id: PeerId, peer_client: Client, - blobs: Arc>, + block_and_blobs: Arc>, ) { - self.send_beacon_processor_work(BeaconWorkEvent::gossip_blobs_sidecar( + self.send_beacon_processor_work(BeaconWorkEvent::gossip_block_and_blobs_sidecar( message_id, peer_id, peer_client, - blobs, + block_and_blobs, timestamp_now(), )) } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index cfba40c0bf0..c0fbef973fe 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -38,7 +38,6 @@ use std::marker::PhantomData; use std::path::Path; use std::sync::Arc; use std::time::Duration; -use types::signed_blobs_sidecar::SignedBlobsSidecar; use types::*; /// On-disk database that stores finalized states efficiently. @@ -62,7 +61,7 @@ pub struct HotColdDB, Cold: ItemStore> { /// The hot database also contains all blocks. pub hot_db: Hot, /// LRU cache of deserialized blobs. Updated whenever a blob is loaded. - blob_cache: Mutex>>, + blob_cache: Mutex>>, /// LRU cache of deserialized blocks. Updated whenever a block is loaded. block_cache: Mutex>>, /// Chain spec. @@ -480,11 +479,7 @@ impl, Cold: ItemStore> HotColdDB .key_delete(DBColumn::ExecPayload.into(), block_root.as_bytes()) } - pub fn put_blobs( - &self, - block_root: &Hash256, - blobs: SignedBlobsSidecar, - ) -> Result<(), Error> { + pub fn put_blobs(&self, block_root: &Hash256, blobs: BlobsSidecar) -> Result<(), Error> { self.hot_db.put_bytes( DBColumn::BeaconBlob.into(), block_root.as_bytes(), @@ -494,7 +489,7 @@ impl, Cold: ItemStore> HotColdDB Ok(()) } - pub fn get_blobs(&self, block_root: &Hash256) -> Result>, Error> { + pub fn get_blobs(&self, block_root: &Hash256) -> Result>, Error> { if let Some(blobs) = self.blob_cache.lock().get(block_root) { Ok(Some(blobs.clone())) } else { @@ -502,7 +497,7 @@ impl, Cold: ItemStore> HotColdDB .hot_db .get_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes())? { - let ret = SignedBlobsSidecar::from_ssz_bytes(&bytes)?; + let ret = BlobsSidecar::from_ssz_bytes(&bytes)?; self.blob_cache.lock().put(*block_root, ret.clone()); Ok(Some(ret)) } else { @@ -514,7 +509,7 @@ impl, Cold: ItemStore> HotColdDB pub fn blobs_as_kv_store_ops( &self, key: &Hash256, - blobs: &SignedBlobsSidecar, + blobs: &BlobsSidecar, ops: &mut Vec, ) { let db_key = get_key_for_col(DBColumn::BeaconBlob.into(), key.as_bytes()); diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 00e37a18ec8..d9041dd6361 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -41,7 +41,6 @@ pub use metrics::scrape_for_metrics; use parking_lot::MutexGuard; use std::sync::Arc; use strum::{EnumString, IntoStaticStr}; -use types::signed_blobs_sidecar::SignedBlobsSidecar; pub use types::*; pub type ColumnIter<'a> = Box), Error>> + 'a>; @@ -156,7 +155,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati pub enum StoreOp<'a, E: EthSpec> { PutBlock(Hash256, Arc>), PutState(Hash256, &'a BeaconState), - PutBlobs(Hash256, Arc>), + PutBlobs(Hash256, Arc>), PutStateSummary(Hash256, HotStateSummary), PutStateTemporaryFlag(Hash256), DeleteStateTemporaryFlag(Hash256), diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 81d6e7051bb..a6581b0ed43 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -608,7 +608,7 @@ impl BeaconNodeHttpClient { /// Returns `Ok(None)` on a 404 error. pub async fn post_beacon_blobs( &self, - block: &SignedBlobsSidecar, + block: &BlobsSidecar, ) -> Result<(), Error> { let mut path = self.eth_path(V1)?; diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 39ddbf6deec..cc839001dd4 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -93,7 +93,6 @@ pub mod sqlite; pub mod blobs_sidecar; pub mod kzg_commitment; pub mod kzg_proof; -pub mod signed_blobs_sidecar; use ethereum_types::{H160, H256}; @@ -166,7 +165,6 @@ pub use crate::signed_beacon_block::{ SignedBlindedBeaconBlock, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; -pub use crate::signed_blobs_sidecar::SignedBlobsSidecar; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; pub use crate::signed_voluntary_exit::SignedVoluntaryExit; pub use crate::signing_data::{SignedRoot, SigningData}; diff --git a/consensus/types/src/signed_blobs_sidecar.rs b/consensus/types/src/signed_blobs_sidecar.rs deleted file mode 100644 index 677b95bd389..00000000000 --- a/consensus/types/src/signed_blobs_sidecar.rs +++ /dev/null @@ -1,58 +0,0 @@ -use crate::{ - signing_data::SignedRoot, BlobsSidecar, ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, - SigningData, -}; -use bls::Signature; -use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use tree_hash::TreeHash; -use tree_hash_derive::TreeHash; - -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq)] -#[serde(bound = "T: EthSpec")] -pub struct SignedBlobsSidecar { - pub message: BlobsSidecar, - pub signature: Signature, -} - -impl SignedBlobsSidecar { - pub fn from_blob(blob: BlobsSidecar, signature: Signature) -> Self { - Self { - message: blob, - signature, - } - } - - /// Verify `self.signature`. - /// - /// If the root of `blob_sidecar.message` is already known it can be passed in via `object_root_opt`. - /// Otherwise, it will be computed locally. - pub fn verify_signature( - &self, - object_root_opt: Option, - pubkey: &PublicKey, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) -> bool { - let domain = spec.get_domain( - self.message.beacon_block_slot.epoch(T::slots_per_epoch()), - Domain::BlobsSideCar, - fork, - genesis_validators_root, - ); - - let message = if let Some(object_root) = object_root_opt { - SigningData { - object_root, - domain, - } - .tree_hash_root() - } else { - self.message.signing_root(domain) - }; - - self.signature.verify(pubkey, message) - } -} diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 29926f3780c..2a50b6d2eaa 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -319,284 +319,126 @@ impl BlockService { let proposer_index = self.validator_store.validator_index(&validator_pubkey); let validator_pubkey_ref = &validator_pubkey; - match self.context.eth2_config.spec.fork_name_at_slot::(slot) { - ForkName::Base | ForkName::Altair | ForkName::Merge => { - // Request block from first responsive beacon node. - let block = self - .beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - let block = match Payload::block_type() { - BlockType::Full => { - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_GET], - ); - beacon_node - .get_validator_blocks::( - slot, - randao_reveal_ref, - graffiti.as_ref(), - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })? - .data - } - BlockType::Blinded => { - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BLINDED_BEACON_BLOCK_HTTP_GET], - ); - beacon_node - .get_validator_blinded_blocks::( - slot, - randao_reveal_ref, - graffiti.as_ref(), - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })? - .data - } - }; - - if proposer_index != Some(block.proposer_index()) { - return Err(BlockError::Recoverable( - "Proposer index does not match block proposer. Beacon chain re-orged" - .to_string(), - )); - } - - Ok::<_, BlockError>(block) - }, - ) - .await?; - - let signed_block = self_ref - .validator_store - .sign_block::(*validator_pubkey_ref, block, current_slot) - .await - .map_err(|e| { - BlockError::Recoverable(format!("Unable to sign block: {:?}", e)) - })?; - - // Publish block with first available beacon node. - self.beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async { - match Payload::block_type() { - BlockType::Full => { - let _post_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_POST], - ); - beacon_node - .post_beacon_blocks(&signed_block) - .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })? - } - BlockType::Blinded => { - let _post_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], - ); - beacon_node - .post_beacon_blinded_blocks(&signed_block) - .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })? - } - } - Ok::<_, BlockError>(()) - }, - ) - .await?; - - info!( - log, - "Successfully published block"; - "block_type" => ?Payload::block_type(), - "deposits" => signed_block.message().body().deposits().len(), - "attestations" => signed_block.message().body().attestations().len(), - "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), - "slot" => signed_block.slot().as_u64(), - ); - } - ForkName::Capella | ForkName::Eip4844 => { - if matches!(Payload::block_type(), BlockType::Blinded) { - //FIXME(sean) - crit!( - log, - "`--builder-payloads` not yet supported for EIP-4844 fork" - ); - return Ok(()); - } + // Request block from first responsive beacon node. + let block = self + .beacon_nodes + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + let block = match Payload::block_type() { + BlockType::Full => { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_GET], + ); + beacon_node + .get_validator_blocks::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })? + .data + } + BlockType::Blinded => { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_GET], + ); + beacon_node + .get_validator_blinded_blocks::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })? + .data + } + }; - // Request block from first responsive beacon node. - let block_and_blobs = self - .beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_GET], - ); - let block_and_blobs = beacon_node - .get_validator_blocks_and_blobs::( - slot, - randao_reveal_ref, - graffiti.as_ref(), - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })? - .data; - - if proposer_index != Some(block_and_blobs.block.proposer_index()) { - return Err(BlockError::Recoverable( - "Proposer index does not match block proposer. Beacon chain re-orged" - .to_string(), - )); - } - - Ok::<_, BlockError>(block_and_blobs) - }, - ) - .await?; - - let blobs_sidecar = BlobsSidecar { - beacon_block_root: block_and_blobs.block.canonical_root(), - beacon_block_slot: block_and_blobs.block.slot(), - blobs: VariableList::from(block_and_blobs.blobs), - kzg_aggregate_proof: block_and_blobs.kzg_aggregate_proof, - }; - - let block = block_and_blobs.block; - let block_publish_future = async { - let signed_block = self_ref - .validator_store - .sign_block::(*validator_pubkey_ref, block, current_slot) - .await - .map_err(|e| { - BlockError::Recoverable(format!("Unable to sign block: {:?}", e)) - })?; - - // Publish block with first available beacon node. - self.beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async { - let _post_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_POST], - ); - beacon_node - .post_beacon_blocks(&signed_block) - .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })?; - Ok::<_, BlockError>(()) - }, - ) - .await?; - - info!( - log, - "Successfully published block"; - "block_type" => ?Payload::block_type(), - "deposits" => signed_block.message().body().deposits().len(), - "attestations" => signed_block.message().body().attestations().len(), - "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), - "slot" => signed_block.slot().as_u64(), - ); + if proposer_index != Some(block.proposer_index()) { + return Err(BlockError::Recoverable( + "Proposer index does not match block proposer. Beacon chain re-orged" + .to_string(), + )); + } - Ok::<_, BlockError>(()) - }; - - let blob_publish_future = async { - let signed_blobs = self_ref - .validator_store - .sign_blobs(*validator_pubkey_ref, blobs_sidecar, current_slot) - .await - .map_err(|e| { - BlockError::Recoverable(format!("Unable to sign blob: {:?}", e)) - })?; - - // Publish block with first available beacon node. - self.beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async { - let _post_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOB_HTTP_POST], - ); - beacon_node.post_beacon_blobs(&signed_blobs).await.map_err( - |e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing blob: {:?}", - e - )) - }, - )?; - Ok::<_, BlockError>(()) - }, - ) - .await?; - - info!( - log, - "Successfully published blobs"; - "block_type" => ?Payload::block_type(), - "slot" => signed_blobs.message.beacon_block_slot.as_u64(), - "block_root" => ?signed_blobs.message.beacon_block_root, - "blobs_len" => signed_blobs.message.blobs.len(), - ); + Ok::<_, BlockError>(block) + }, + ) + .await?; + let signed_block = self_ref + .validator_store + .sign_block::(*validator_pubkey_ref, block, current_slot) + .await + .map_err(|e| BlockError::Recoverable(format!("Unable to sign block: {:?}", e)))?; + + // Publish block with first available beacon node. + self.beacon_nodes + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async { + match Payload::block_type() { + BlockType::Full => { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_POST], + ); + beacon_node + .post_beacon_blocks(&signed_block) + .await + .map_err(|e| { + BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {:?}", + e + )) + })? + } + BlockType::Blinded => { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], + ); + beacon_node + .post_beacon_blinded_blocks(&signed_block) + .await + .map_err(|e| { + BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {:?}", + e + )) + })? + } + } Ok::<_, BlockError>(()) - }; - - let (res_block, res_blob) = tokio::join!(block_publish_future, blob_publish_future); + }, + ) + .await?; - res_block?; - res_blob?; - } - } + info!( + log, + "Successfully published block"; + "block_type" => ?Payload::block_type(), + "deposits" => signed_block.message().body().deposits().len(), + "attestations" => signed_block.message().body().attestations().len(), + "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), + "slot" => signed_block.slot().as_u64(), + ); Ok(()) } diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 306f22a2fbe..692365aeceb 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -22,9 +22,9 @@ use types::{ AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, BlobsSidecar, ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, ExecPayload, Fork, FullPayload, Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, - SignedBeaconBlock, SignedBlobsSidecar, SignedContributionAndProof, SignedRoot, - SignedValidatorRegistrationData, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, - SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, + SignedBeaconBlock, SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData, + Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, + SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, }; use validator_dir::ValidatorDir; @@ -532,42 +532,6 @@ impl ValidatorStore { } } - pub async fn sign_blobs( - &self, - validator_pubkey: PublicKeyBytes, - blobs_sidecar: BlobsSidecar, - current_slot: Slot, - ) -> Result, Error> { - let slot = blobs_sidecar.beacon_block_slot; - - // Make sure the blob slot is not higher than the current slot to avoid potential attacks. - if slot > current_slot { - warn!( - self.log, - "Not signing blob with slot greater than current slot"; - "blob_slot" => slot.as_u64(), - "current_slot" => current_slot.as_u64() - ); - return Err(Error::GreaterThanCurrentSlot { slot, current_slot }); - } - - let signing_epoch = slot.epoch(E::slots_per_epoch()); - let signing_context = self.signing_context(Domain::BlobsSideCar, signing_epoch); - - metrics::inc_counter_vec(&metrics::SIGNED_BLOBS_TOTAL, &[metrics::SUCCESS]); - - let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?; - let signature = signing_method - .get_signature::>( - SignableMessage::BlobsSidecar(&blobs_sidecar), - signing_context, - &self.spec, - &self.task_executor, - ) - .await?; - Ok(SignedBlobsSidecar::from_blob(blobs_sidecar, signature)) - } - pub async fn sign_attestation( &self, validator_pubkey: PublicKeyBytes, From 5ad834280be0e803866eb9a4fae45665e61b6bc8 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Tue, 1 Nov 2022 13:15:11 -0400 Subject: [PATCH 057/263] Block processing eip4844 (#3673) * add eip4844 block processing * fix blob processing code * consensus logic fixes and cleanup * use safe arith --- .../src/per_block_processing.rs | 4 + .../src/per_block_processing/eip4844.rs | 1 + .../per_block_processing/eip4844/eip4844.rs | 122 ++++++++++++++++++ .../src/per_block_processing/errors.rs | 20 +++ consensus/types/src/consts.rs | 1 + consensus/types/src/kzg_commitment.rs | 2 +- consensus/types/src/kzg_proof.rs | 2 +- consensus/types/src/lib.rs | 1 + consensus/types/src/payload.rs | 49 +++++++ 9 files changed, 200 insertions(+), 2 deletions(-) create mode 100644 consensus/state_processing/src/per_block_processing/eip4844.rs create mode 100644 consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 84005183f22..602851add01 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -12,6 +12,7 @@ pub use self::verify_attester_slashing::{ pub use self::verify_proposer_slashing::verify_proposer_slashing; pub use altair::sync_committee::process_sync_aggregate; pub use block_signature_verifier::{BlockSignatureVerifier, ParallelSignatureSets}; +pub use eip4844::eip4844::process_blob_kzg_commitments; pub use is_valid_indexed_attestation::is_valid_indexed_attestation; pub use process_operations::process_operations; pub use verify_attestation::{ @@ -24,6 +25,7 @@ pub use verify_exit::verify_exit; pub mod altair; pub mod block_signature_verifier; +pub mod eip4844; pub mod errors; mod is_valid_indexed_attestation; pub mod process_operations; @@ -171,6 +173,8 @@ pub fn per_block_processing>( )?; } + process_blob_kzg_commitments(block.body())?; + Ok(()) } diff --git a/consensus/state_processing/src/per_block_processing/eip4844.rs b/consensus/state_processing/src/per_block_processing/eip4844.rs new file mode 100644 index 00000000000..120ba304d0d --- /dev/null +++ b/consensus/state_processing/src/per_block_processing/eip4844.rs @@ -0,0 +1 @@ +pub mod eip4844; diff --git a/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs b/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs new file mode 100644 index 00000000000..56b3ed58a65 --- /dev/null +++ b/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs @@ -0,0 +1,122 @@ +use crate::BlockProcessingError; +use eth2_hashing::hash_fixed; +use itertools::{EitherOrBoth, Itertools}; +use safe_arith::SafeArith; +use ssz::Decode; +use ssz_types::VariableList; +use types::consts::eip4844::{BLOB_TX_TYPE, VERSIONED_HASH_VERSION_KZG}; +use types::{ + AbstractExecPayload, BeaconBlockBodyRef, EthSpec, ExecPayload, FullPayload, FullPayloadRef, + KzgCommitment, Transaction, Transactions, VersionedHash, +}; + +pub fn process_blob_kzg_commitments>( + block_body: BeaconBlockBodyRef, +) -> Result<(), BlockProcessingError> { + if let (Ok(payload), Ok(kzg_commitments)) = ( + block_body.execution_payload(), + block_body.blob_kzg_commitments(), + ) { + if let Some(transactions) = payload.transactions() { + if !verify_kzg_commitments_against_transactions::(transactions, kzg_commitments)? { + return Err(BlockProcessingError::BlobVersionHashMismatch); + } + } + } + + Ok(()) +} + +pub fn verify_kzg_commitments_against_transactions( + transactions: &Transactions, + kzg_commitments: &VariableList, +) -> Result { + let nested_iter = transactions + .into_iter() + .filter(|tx| { + tx.get(0) + .map(|tx_type| *tx_type == BLOB_TX_TYPE) + .unwrap_or(false) + }) + .map(|tx| tx_peek_blob_versioned_hashes::(tx)); + + itertools::process_results(nested_iter, |iter| { + let zipped_iter = iter + .flatten() + // Need to use `itertools::zip_longest` here because just zipping hides if one iter is shorter + // and `itertools::zip_eq` panics. + .zip_longest(kzg_commitments.into_iter()) + .enumerate() + .map(|(index, next)| match next { + EitherOrBoth::Both(hash, commitment) => Ok((hash?, commitment)), + // The number of versioned hashes from the blob transactions exceeds the number of + // commitments in the block. + EitherOrBoth::Left(_) => Err(BlockProcessingError::BlobNumCommitmentsMismatch { + commitments_processed_in_block: index, + commitments_processed_in_transactions: index.safe_add(1)?, + }), + // The number of commitments in the block exceeds the number of versioned hashes + // in the blob transactions. + EitherOrBoth::Right(_) => Err(BlockProcessingError::BlobNumCommitmentsMismatch { + commitments_processed_in_block: index.safe_add(1)?, + commitments_processed_in_transactions: index, + }), + }); + + itertools::process_results(zipped_iter, |mut iter| { + iter.all(|(tx_versioned_hash, commitment)| { + tx_versioned_hash == kzg_commitment_to_versioned_hash(commitment) + }) + }) + })? +} + +/// Only transactions of type `BLOB_TX_TYPE` should be passed into this function. +fn tx_peek_blob_versioned_hashes( + opaque_tx: &Transaction, +) -> Result< + impl IntoIterator> + '_, + BlockProcessingError, +> { + let tx_len = opaque_tx.len(); + let message_offset = 1.safe_add(u32::from_ssz_bytes(opaque_tx.get(1..5).ok_or( + BlockProcessingError::BlobVersionHashIndexOutOfBounds { + length: tx_len, + index: 5, + }, + )?)?)?; + + let message_offset_usize = message_offset as usize; + + // field offset: 32 + 8 + 32 + 32 + 8 + 4 + 32 + 4 + 4 + 32 = 188 + let blob_versioned_hashes_offset = message_offset.safe_add(u32::from_ssz_bytes( + opaque_tx + .get(message_offset_usize.safe_add(188)?..message_offset_usize.safe_add(192)?) + .ok_or(BlockProcessingError::BlobVersionHashIndexOutOfBounds { + length: tx_len, + index: message_offset_usize.safe_add(192)?, + })?, + )?)?; + + let num_hashes = tx_len + .safe_sub(blob_versioned_hashes_offset as usize)? + .safe_div(32)?; + + Ok((0..num_hashes).into_iter().map(move |i| { + let next_version_hash_index = + (blob_versioned_hashes_offset as usize).safe_add(i.safe_mul(32)?)?; + let bytes = opaque_tx + .get(next_version_hash_index..next_version_hash_index.safe_add(32)?) + .ok_or(BlockProcessingError::BlobVersionHashIndexOutOfBounds { + length: tx_len, + index: (next_version_hash_index as usize).safe_add(32)?, + })?; + Ok(VersionedHash::from_slice(bytes)) + })) +} + +fn kzg_commitment_to_versioned_hash(kzg_commitment: &KzgCommitment) -> VersionedHash { + let mut hashed_commitment = hash_fixed(&kzg_commitment.0); + hashed_commitment[0] = VERSIONED_HASH_VERSION_KZG; + VersionedHash::from(hashed_commitment) +} diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index e214b6e63d8..bb006dc25cd 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -1,6 +1,7 @@ use super::signature_sets::Error as SignatureSetError; use merkle_proof::MerkleTreeError; use safe_arith::ArithError; +use ssz::DecodeError; use types::*; /// The error returned from the `per_block_processing` function. Indicates that a block is either @@ -53,6 +54,7 @@ pub enum BlockProcessingError { BeaconStateError(BeaconStateError), SignatureSetError(SignatureSetError), SszTypesError(ssz_types::Error), + SszDecodeError(DecodeError), MerkleTreeError(MerkleTreeError), ArithError(ArithError), InconsistentBlockFork(InconsistentFork), @@ -70,6 +72,18 @@ pub enum BlockProcessingError { found: u64, }, ExecutionInvalid, + BlobVersionHashMismatch, + /// The number of commitments in blob transactions in the payload does not match the number + /// of commitments in the block. + BlobNumCommitmentsMismatch { + commitments_processed_in_block: usize, + /// This number depic + commitments_processed_in_transactions: usize, + }, + BlobVersionHashIndexOutOfBounds { + index: usize, + length: usize, + }, } impl From for BlockProcessingError { @@ -90,6 +104,12 @@ impl From for BlockProcessingError { } } +impl From for BlockProcessingError { + fn from(error: DecodeError) -> Self { + BlockProcessingError::SszDecodeError(error) + } +} + impl From for BlockProcessingError { fn from(e: ArithError) -> Self { BlockProcessingError::ArithError(e) diff --git a/consensus/types/src/consts.rs b/consensus/types/src/consts.rs index 2469f5f9cf0..b13e3aa9c3b 100644 --- a/consensus/types/src/consts.rs +++ b/consensus/types/src/consts.rs @@ -34,4 +34,5 @@ pub mod eip4844 { .expect("should initialize BLS_MODULUS"); } pub const BLOB_TX_TYPE: u8 = 5; + pub const VERSIONED_HASH_VERSION_KZG: u8 = 1; } diff --git a/consensus/types/src/kzg_commitment.rs b/consensus/types/src/kzg_commitment.rs index 3b9570cd26b..64ed24d9acd 100644 --- a/consensus/types/src/kzg_commitment.rs +++ b/consensus/types/src/kzg_commitment.rs @@ -9,7 +9,7 @@ use tree_hash::{PackedEncoding, TreeHash}; #[derive(Derivative, Debug, Clone, Serialize, Deserialize)] #[derivative(PartialEq, Eq, Hash)] -pub struct KzgCommitment(#[serde(with = "BigArray")] [u8; 48]); +pub struct KzgCommitment(#[serde(with = "BigArray")] pub [u8; 48]); impl Display for KzgCommitment { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { diff --git a/consensus/types/src/kzg_proof.rs b/consensus/types/src/kzg_proof.rs index 879620bd6f0..92a994a85c4 100644 --- a/consensus/types/src/kzg_proof.rs +++ b/consensus/types/src/kzg_proof.rs @@ -1,9 +1,9 @@ use crate::test_utils::{RngCore, TestRandom}; use serde::{Deserialize, Serialize}; +use serde_big_array::BigArray; use ssz::{Decode, DecodeError, Encode}; use std::fmt; use tree_hash::{PackedEncoding, TreeHash}; -use serde_big_array::BigArray; const KZG_PROOF_BYTES_LEN: usize = 48; diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index cc839001dd4..506bb0f26db 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -193,6 +193,7 @@ pub type Address = H160; pub type ForkVersion = [u8; 4]; pub type BLSFieldElement = Uint256; pub type Blob = FixedVector::FieldElementsPerBlob>; +pub type VersionedHash = Hash256; pub use bls::{ AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 736f06e22b7..ea483efaf6f 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -35,6 +35,9 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + fn fee_recipient(&self) -> Address; fn gas_limit(&self) -> u64; + /// This will return `None` on blinded blocks or pre-merge blocks. + fn transactions(&self) -> Option<&Transactions>; + // Is this a default payload? (pre-merge) fn is_default(&self) -> bool; } @@ -191,6 +194,10 @@ impl ExecPayload for FullPayloadMerge { self.execution_payload.gas_limit } + fn transactions(&self) -> Option<&Transactions> { + Some(&self.execution_payload.transactions) + } + // TODO: can this function be optimized? fn is_default(&self) -> bool { self.execution_payload == ExecutionPayloadMerge::default() @@ -235,6 +242,10 @@ impl ExecPayload for FullPayloadCapella { self.execution_payload.gas_limit } + fn transactions(&self) -> Option<&Transactions> { + Some(&self.execution_payload.transactions) + } + // TODO: can this function be optimized? fn is_default(&self) -> bool { self.execution_payload == ExecutionPayloadCapella::default() @@ -279,6 +290,10 @@ impl ExecPayload for FullPayloadEip4844 { self.execution_payload.gas_limit } + fn transactions(&self) -> Option<&Transactions> { + Some(&self.execution_payload.transactions) + } + // TODO: can this function be optimized? fn is_default(&self) -> bool { self.execution_payload == ExecutionPayloadEip4844::default() @@ -347,6 +362,13 @@ impl ExecPayload for FullPayload { }) } + fn transactions<'a>(&'a self) -> Option<&'a Transactions> { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + Some(&payload.execution_payload.transactions) + }) + } + fn is_default(&self) -> bool { match self { Self::Merge(payload) => payload.is_default(), @@ -428,6 +450,13 @@ impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { }) } + fn transactions<'a>(&'a self) -> Option<&'a Transactions> { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + Some(&payload.execution_payload.transactions) + }) + } + // TODO: can this function be optimized? fn is_default<'a>(&'a self) -> bool { match self { @@ -687,6 +716,10 @@ impl ExecPayload for BlindedPayload { } } + fn transactions(&self) -> Option<&Transactions> { + None + } + // TODO: can this function be optimized? fn is_default(&self) -> bool { match self { @@ -773,6 +806,10 @@ impl<'b, T: EthSpec> ExecPayload for BlindedPayloadRef<'b, T> { } } + fn transactions(&self) -> Option<&Transactions> { + None + } + // TODO: can this function be optimized? fn is_default<'a>(&'a self) -> bool { match self { @@ -828,6 +865,10 @@ impl ExecPayload for BlindedPayloadMerge { self.execution_payload_header.gas_limit } + fn transactions(&self) -> Option<&Transactions> { + None + } + fn is_default(&self) -> bool { self.execution_payload_header == ExecutionPayloadHeaderMerge::default() } @@ -871,6 +912,10 @@ impl ExecPayload for BlindedPayloadCapella { self.execution_payload_header.gas_limit } + fn transactions(&self) -> Option<&Transactions> { + None + } + fn is_default(&self) -> bool { self.execution_payload_header == ExecutionPayloadHeaderCapella::default() } @@ -914,6 +959,10 @@ impl ExecPayload for BlindedPayloadEip4844 { self.execution_payload_header.gas_limit } + fn transactions(&self) -> Option<&Transactions> { + None + } + fn is_default(&self) -> bool { self.execution_payload_header == ExecutionPayloadHeaderEip4844::default() } From d8a49aad2ba29fe9c9e9b843bdb58a17b0dfb1ee Mon Sep 17 00:00:00 2001 From: realbigsean Date: Tue, 1 Nov 2022 13:26:56 -0400 Subject: [PATCH 058/263] merge with unstable fixes --- .../execution_layer/src/test_utils/handle_rpc.rs | 2 +- .../src/beacon_processor/worker/gossip_methods.rs | 12 +----------- consensus/state_processing/src/consensus_context.rs | 6 +++--- testing/ef_tests/src/cases/operations.rs | 3 ++- validator_client/src/http_metrics/metrics.rs | 1 - 5 files changed, 7 insertions(+), 17 deletions(-) diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 7ca83ad39bf..ba26591baf2 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -78,7 +78,7 @@ pub async fn handle_rpc( let request: JsonExecutionPayload = get_param(params, 0)?; // Canned responses set by block hash take priority. - if let Some(status) = ctx.get_new_payload_status(&request.block_hash) { + if let Some(status) = ctx.get_new_payload_status(&request.block_hash()) { return Ok(serde_json::to_value(JsonPayloadStatusV1::from(status)).unwrap()); } diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 5f13e27d7bf..37cc1903d36 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -706,6 +706,7 @@ impl Worker { block_and_blob: Arc>, seen_timestamp: Duration, ) { + //FIXME unimplemented!() } @@ -2213,15 +2214,4 @@ impl Worker { self.propagate_if_timely(is_timely, message_id, peer_id) } - - /// Handle an error whilst verifying a `SignedBlobsSidecar` from the network. - fn handle_blobs_verification_failure( - &self, - peer_id: PeerId, - message_id: MessageId, - reprocess_tx: Option>>, - error: BlobError, - seen_timestamp: Duration, - ) { - } } diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index fdd3f95a65b..18ae5ad3b7a 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -1,8 +1,8 @@ use std::marker::PhantomData; use tree_hash::TreeHash; use types::{ - BeaconState, BeaconStateError, ChainSpec, EthSpec, ExecPayload, Hash256, SignedBeaconBlock, - Slot, + AbstractExecPayload, BeaconState, BeaconStateError, ChainSpec, EthSpec, ExecPayload, Hash256, + SignedBeaconBlock, Slot, }; #[derive(Debug)] @@ -64,7 +64,7 @@ impl ConsensusContext { self } - pub fn get_current_block_root>( + pub fn get_current_block_root>( &mut self, block: &SignedBeaconBlock, ) -> Result { diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 8e96a413925..e3dfb7f67b8 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -92,7 +92,8 @@ impl Operation for Attestation { BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) - | BeaconState::Eip4844(_) => altair::process_attestation( state, + | BeaconState::Eip4844(_) => altair::process_attestation( + state, self, 0, proposer_index, diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index cc71196f4c1..2d2e7da753a 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -13,7 +13,6 @@ pub const BEACON_BLOCK: &str = "beacon_block"; pub const BEACON_BLOCK_HTTP_GET: &str = "beacon_block_http_get"; pub const BLINDED_BEACON_BLOCK_HTTP_GET: &str = "blinded_beacon_block_http_get"; pub const BEACON_BLOCK_HTTP_POST: &str = "beacon_block_http_post"; -pub const BEACON_BLOB_HTTP_POST: &str = "beacon_blob_http_post"; pub const BLINDED_BEACON_BLOCK_HTTP_POST: &str = "blinded_beacon_block_http_post"; pub const ATTESTATIONS: &str = "attestations"; pub const ATTESTATIONS_HTTP_GET: &str = "attestations_http_get"; From c45b809b7617b35faee8b56c235ee3700d8267b0 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Wed, 2 Nov 2022 10:30:41 -0400 Subject: [PATCH 059/263] Cleanup payload types (#3675) * Add transparent support * Add `Config` struct * Deprecate `enum_behaviour` * Partially remove enum_behaviour from project * Revert "Partially remove enum_behaviour from project" This reverts commit 46ffb7fe77622cf420f7ba2fccf432c0050535d6. * Revert "Deprecate `enum_behaviour`" This reverts commit 89b64a6f53d0f68685be88d5b60d39799d9933b5. * Add `struct_behaviour` * Tidy * Move tests into `ssz_derive` * Bump ssz derive * Fix comment * newtype transaparent ssz * use ssz transparent and create macros for per fork implementations * use superstruct map macros Co-authored-by: Paul Hauner --- Cargo.lock | 3 +- beacon_node/beacon_chain/Cargo.toml | 2 +- beacon_node/eth1/Cargo.toml | 2 +- beacon_node/operation_pool/Cargo.toml | 2 +- beacon_node/store/Cargo.toml | 2 +- common/eth2/Cargo.toml | 2 +- consensus/cached_tree_hash/Cargo.toml | 2 +- consensus/fork_choice/Cargo.toml | 2 +- consensus/proto_array/Cargo.toml | 2 +- consensus/ssz/Cargo.toml | 2 +- consensus/ssz/tests/tests.rs | 142 --- consensus/ssz_derive/Cargo.toml | 5 +- consensus/ssz_derive/src/lib.rs | 454 ++++++- consensus/ssz_derive/tests/tests.rs | 215 ++++ consensus/state_processing/Cargo.toml | 2 +- consensus/tree_hash/Cargo.toml | 2 +- consensus/types/Cargo.toml | 2 +- consensus/types/src/beacon_state.rs | 1 + consensus/types/src/execution_payload.rs | 4 - .../types/src/execution_payload_header.rs | 34 + consensus/types/src/kzg_commitment.rs | 29 +- consensus/types/src/kzg_proof.rs | 36 +- consensus/types/src/payload.rs | 1086 +++++------------ slasher/Cargo.toml | 2 +- testing/ef_tests/Cargo.toml | 2 +- 25 files changed, 979 insertions(+), 1058 deletions(-) create mode 100644 consensus/ssz_derive/tests/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 08be608267b..2376a71750c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1791,9 +1791,10 @@ dependencies = [ [[package]] name = "eth2_ssz_derive" -version = "0.3.0" +version = "0.3.1" dependencies = [ "darling", + "eth2_ssz", "proc-macro2", "quote", "syn", diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index dd185ac7571..5b85833048b 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -33,7 +33,7 @@ slot_clock = { path = "../../common/slot_clock" } eth2_hashing = "0.3.0" eth2_ssz = "0.4.1" eth2_ssz_types = "0.2.2" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" state_processing = { path = "../../consensus/state_processing" } tree_hash = "0.4.1" types = { path = "../../consensus/types" } diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 7e99c43e7db..e0dd797bfaf 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -21,7 +21,7 @@ hex = "0.4.2" types = { path = "../../consensus/types"} merkle_proof = { path = "../../consensus/merkle_proof"} eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" tree_hash = "0.4.1" parking_lot = "0.12.0" slog = "2.5.2" diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index 1d67ecdccc2..8483233589f 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -13,7 +13,7 @@ parking_lot = "0.12.0" types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" rayon = "1.5.0" serde = "1.0.116" serde_derive = "1.0.116" diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 20ae37b3b14..09d960535e4 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -14,7 +14,7 @@ leveldb = { version = "0.8.6", default-features = false } parking_lot = "0.12.0" itertools = "0.10.0" eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } slog = "2.5.2" diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 294f8ec8a3d..eca086d838f 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -21,7 +21,7 @@ bytes = "1.0.1" account_utils = { path = "../../common/account_utils" } sensitive_url = { path = "../../common/sensitive_url" } eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" futures-util = "0.3.8" futures = "0.3.8" store = { path = "../../beacon_node/store", optional = true } diff --git a/consensus/cached_tree_hash/Cargo.toml b/consensus/cached_tree_hash/Cargo.toml index f9433e4a496..c362af83cd9 100644 --- a/consensus/cached_tree_hash/Cargo.toml +++ b/consensus/cached_tree_hash/Cargo.toml @@ -8,7 +8,7 @@ edition = "2021" ethereum-types = "0.12.1" eth2_ssz_types = "0.2.2" eth2_hashing = "0.3.0" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" eth2_ssz = "0.4.1" tree_hash = "0.4.1" smallvec = "1.6.1" diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index 52a738351ef..f0381e5ad99 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -11,7 +11,7 @@ types = { path = "../types" } state_processing = { path = "../state_processing" } proto_array = { path = "../proto_array" } eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } [dev-dependencies] diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index ad79ecc1e6b..1c7b19bf1da 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -11,7 +11,7 @@ path = "src/bin.rs" [dependencies] types = { path = "../types" } eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" serde = "1.0.116" serde_derive = "1.0.116" serde_yaml = "0.8.13" diff --git a/consensus/ssz/Cargo.toml b/consensus/ssz/Cargo.toml index a153c2efc14..e521853c218 100644 --- a/consensus/ssz/Cargo.toml +++ b/consensus/ssz/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" name = "ssz" [dev-dependencies] -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" [dependencies] ethereum-types = "0.12.1" diff --git a/consensus/ssz/tests/tests.rs b/consensus/ssz/tests/tests.rs index b4b91da4b58..f52d2c5cdfe 100644 --- a/consensus/ssz/tests/tests.rs +++ b/consensus/ssz/tests/tests.rs @@ -388,145 +388,3 @@ mod round_trip { round_trip(data); } } - -mod derive_macro { - use ssz::{Decode, Encode}; - use ssz_derive::{Decode, Encode}; - use std::fmt::Debug; - - fn assert_encode(item: &T, bytes: &[u8]) { - assert_eq!(item.as_ssz_bytes(), bytes); - } - - fn assert_encode_decode(item: &T, bytes: &[u8]) { - assert_encode(item, bytes); - assert_eq!(T::from_ssz_bytes(bytes).unwrap(), *item); - } - - #[derive(PartialEq, Debug, Encode, Decode)] - #[ssz(enum_behaviour = "union")] - enum TwoFixedUnion { - U8(u8), - U16(u16), - } - - #[derive(PartialEq, Debug, Encode, Decode)] - struct TwoFixedUnionStruct { - a: TwoFixedUnion, - } - - #[test] - fn two_fixed_union() { - let eight = TwoFixedUnion::U8(1); - let sixteen = TwoFixedUnion::U16(1); - - assert_encode_decode(&eight, &[0, 1]); - assert_encode_decode(&sixteen, &[1, 1, 0]); - - assert_encode_decode(&TwoFixedUnionStruct { a: eight }, &[4, 0, 0, 0, 0, 1]); - assert_encode_decode(&TwoFixedUnionStruct { a: sixteen }, &[4, 0, 0, 0, 1, 1, 0]); - } - - #[derive(PartialEq, Debug, Encode, Decode)] - struct VariableA { - a: u8, - b: Vec, - } - - #[derive(PartialEq, Debug, Encode, Decode)] - struct VariableB { - a: Vec, - b: u8, - } - - #[derive(PartialEq, Debug, Encode)] - #[ssz(enum_behaviour = "transparent")] - enum TwoVariableTrans { - A(VariableA), - B(VariableB), - } - - #[derive(PartialEq, Debug, Encode)] - struct TwoVariableTransStruct { - a: TwoVariableTrans, - } - - #[derive(PartialEq, Debug, Encode, Decode)] - #[ssz(enum_behaviour = "union")] - enum TwoVariableUnion { - A(VariableA), - B(VariableB), - } - - #[derive(PartialEq, Debug, Encode, Decode)] - struct TwoVariableUnionStruct { - a: TwoVariableUnion, - } - - #[test] - fn two_variable_trans() { - let trans_a = TwoVariableTrans::A(VariableA { - a: 1, - b: vec![2, 3], - }); - let trans_b = TwoVariableTrans::B(VariableB { - a: vec![1, 2], - b: 3, - }); - - assert_encode(&trans_a, &[1, 5, 0, 0, 0, 2, 3]); - assert_encode(&trans_b, &[5, 0, 0, 0, 3, 1, 2]); - - assert_encode( - &TwoVariableTransStruct { a: trans_a }, - &[4, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], - ); - assert_encode( - &TwoVariableTransStruct { a: trans_b }, - &[4, 0, 0, 0, 5, 0, 0, 0, 3, 1, 2], - ); - } - - #[test] - fn two_variable_union() { - let union_a = TwoVariableUnion::A(VariableA { - a: 1, - b: vec![2, 3], - }); - let union_b = TwoVariableUnion::B(VariableB { - a: vec![1, 2], - b: 3, - }); - - assert_encode_decode(&union_a, &[0, 1, 5, 0, 0, 0, 2, 3]); - assert_encode_decode(&union_b, &[1, 5, 0, 0, 0, 3, 1, 2]); - - assert_encode_decode( - &TwoVariableUnionStruct { a: union_a }, - &[4, 0, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], - ); - assert_encode_decode( - &TwoVariableUnionStruct { a: union_b }, - &[4, 0, 0, 0, 1, 5, 0, 0, 0, 3, 1, 2], - ); - } - - #[derive(PartialEq, Debug, Encode, Decode)] - #[ssz(enum_behaviour = "union")] - enum TwoVecUnion { - A(Vec), - B(Vec), - } - - #[test] - fn two_vec_union() { - assert_encode_decode(&TwoVecUnion::A(vec![]), &[0]); - assert_encode_decode(&TwoVecUnion::B(vec![]), &[1]); - - assert_encode_decode(&TwoVecUnion::A(vec![0]), &[0, 0]); - assert_encode_decode(&TwoVecUnion::B(vec![0]), &[1, 0]); - - assert_encode_decode(&TwoVecUnion::A(vec![0, 1]), &[0, 0, 1]); - assert_encode_decode(&TwoVecUnion::B(vec![0, 1]), &[1, 0, 1]); - } -} diff --git a/consensus/ssz_derive/Cargo.toml b/consensus/ssz_derive/Cargo.toml index cac617d3917..d3b2865a61d 100644 --- a/consensus/ssz_derive/Cargo.toml +++ b/consensus/ssz_derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "eth2_ssz_derive" -version = "0.3.0" +version = "0.3.1" authors = ["Paul Hauner "] edition = "2021" description = "Procedural derive macros to accompany the eth2_ssz crate." @@ -15,3 +15,6 @@ syn = "1.0.42" proc-macro2 = "1.0.23" quote = "1.0.7" darling = "0.13.0" + +[dev-dependencies] +eth2_ssz = "0.4.1" diff --git a/consensus/ssz_derive/src/lib.rs b/consensus/ssz_derive/src/lib.rs index a5a5a0dddf2..40d63fd02fa 100644 --- a/consensus/ssz_derive/src/lib.rs +++ b/consensus/ssz_derive/src/lib.rs @@ -1,7 +1,147 @@ #![recursion_limit = "256"] //! Provides procedural derive macros for the `Encode` and `Decode` traits of the `eth2_ssz` crate. //! -//! Supports field attributes, see each derive macro for more information. +//! ## Attributes +//! +//! The following struct/enum attributes are available: +//! +//! - `#[ssz(enum_behaviour = "union")]`: encodes and decodes an `enum` with a one-byte variant selector. +//! - `#[ssz(enum_behaviour = "transparent")]`: allows encoding an `enum` by serializing only the +//! value whilst ignoring outermost the `enum`. +//! - `#[ssz(struct_behaviour = "container")]`: encodes and decodes the `struct` as an SSZ +//! "container". +//! - `#[ssz(struct_behaviour = "transparent")]`: encodes and decodes a `struct` with exactly one +//! non-skipped field as if the outermost `struct` does not exist. +//! +//! The following field attributes are available: +//! +//! - `#[ssz(with = "module")]`: uses the methods in `module` to implement `ssz::Encode` and +//! `ssz::Decode`. This is useful when it's not possible to create an `impl` for that type +//! (e.g. the type is defined in another crate). +//! - `#[ssz(skip_serializing)]`: this field will not be included in the serialized SSZ vector. +//! - `#[ssz(skip_deserializing)]`: this field will not be expected in the serialized +//! SSZ vector and it will be initialized from a `Default` implementation. +//! +//! ## Examples +//! +//! ### Structs +//! +//! ```rust +//! use ssz::{Encode, Decode}; +//! use ssz_derive::{Encode, Decode}; +//! +//! /// Represented as an SSZ "list" wrapped in an SSZ "container". +//! #[derive(Debug, PartialEq, Encode, Decode)] +//! #[ssz(struct_behaviour = "container")] // "container" is the default behaviour +//! struct TypicalStruct { +//! foo: Vec +//! } +//! +//! assert_eq!( +//! TypicalStruct { foo: vec![42] }.as_ssz_bytes(), +//! vec![4, 0, 0, 0, 42] +//! ); +//! +//! assert_eq!( +//! TypicalStruct::from_ssz_bytes(&[4, 0, 0, 0, 42]).unwrap(), +//! TypicalStruct { foo: vec![42] }, +//! ); +//! +//! /// Represented as an SSZ "list" *without* an SSZ "container". +//! #[derive(Encode, Decode)] +//! #[ssz(struct_behaviour = "transparent")] +//! struct WrapperStruct { +//! foo: Vec +//! } +//! +//! assert_eq!( +//! WrapperStruct { foo: vec![42] }.as_ssz_bytes(), +//! vec![42] +//! ); +//! +//! /// Represented as an SSZ "list" *without* an SSZ "container". The `bar` byte is ignored. +//! #[derive(Debug, PartialEq, Encode, Decode)] +//! #[ssz(struct_behaviour = "transparent")] +//! struct WrapperStructSkippedField { +//! foo: Vec, +//! #[ssz(skip_serializing, skip_deserializing)] +//! bar: u8, +//! } +//! +//! assert_eq!( +//! WrapperStructSkippedField { foo: vec![42], bar: 99 }.as_ssz_bytes(), +//! vec![42] +//! ); +//! assert_eq!( +//! WrapperStructSkippedField::from_ssz_bytes(&[42]).unwrap(), +//! WrapperStructSkippedField { foo: vec![42], bar: 0 } +//! ); +//! +//! /// Represented as an SSZ "list" *without* an SSZ "container". +//! #[derive(Encode, Decode)] +//! #[ssz(struct_behaviour = "transparent")] +//! struct NewType(Vec); +//! +//! assert_eq!( +//! NewType(vec![42]).as_ssz_bytes(), +//! vec![42] +//! ); +//! +//! /// Represented as an SSZ "list" *without* an SSZ "container". The `bar` byte is ignored. +//! #[derive(Debug, PartialEq, Encode, Decode)] +//! #[ssz(struct_behaviour = "transparent")] +//! struct NewTypeSkippedField(Vec, #[ssz(skip_serializing, skip_deserializing)] u8); +//! +//! assert_eq!( +//! NewTypeSkippedField(vec![42], 99).as_ssz_bytes(), +//! vec![42] +//! ); +//! assert_eq!( +//! NewTypeSkippedField::from_ssz_bytes(&[42]).unwrap(), +//! NewTypeSkippedField(vec![42], 0) +//! ); +//! ``` +//! +//! ### Enums +//! +//! ```rust +//! use ssz::{Encode, Decode}; +//! use ssz_derive::{Encode, Decode}; +//! +//! /// Represented as an SSZ "union". +//! #[derive(Debug, PartialEq, Encode, Decode)] +//! #[ssz(enum_behaviour = "union")] +//! enum UnionEnum { +//! Foo(u8), +//! Bar(Vec), +//! } +//! +//! assert_eq!( +//! UnionEnum::Foo(42).as_ssz_bytes(), +//! vec![0, 42] +//! ); +//! assert_eq!( +//! UnionEnum::from_ssz_bytes(&[1, 42, 42]).unwrap(), +//! UnionEnum::Bar(vec![42, 42]), +//! ); +//! +//! /// Represented as only the value in the enum variant. +//! #[derive(Debug, PartialEq, Encode)] +//! #[ssz(enum_behaviour = "transparent")] +//! enum TransparentEnum { +//! Foo(u8), +//! Bar(Vec), +//! } +//! +//! assert_eq!( +//! TransparentEnum::Foo(42).as_ssz_bytes(), +//! vec![42] +//! ); +//! assert_eq!( +//! TransparentEnum::Bar(vec![42, 42]).as_ssz_bytes(), +//! vec![42, 42] +//! ); +//! ``` use darling::{FromDeriveInput, FromMeta}; use proc_macro::TokenStream; @@ -13,11 +153,18 @@ use syn::{parse_macro_input, DataEnum, DataStruct, DeriveInput, Ident}; /// extensions). const MAX_UNION_SELECTOR: u8 = 127; +const ENUM_TRANSPARENT: &str = "transparent"; +const ENUM_UNION: &str = "union"; +const NO_ENUM_BEHAVIOUR_ERROR: &str = "enums require an \"enum_behaviour\" attribute with \ + a \"transparent\" or \"union\" value, e.g., #[ssz(enum_behaviour = \"transparent\")]"; + #[derive(Debug, FromDeriveInput)] #[darling(attributes(ssz))] struct StructOpts { #[darling(default)] enum_behaviour: Option, + #[darling(default)] + struct_behaviour: Option, } /// Field-level configuration. @@ -31,40 +178,87 @@ struct FieldOpts { skip_deserializing: bool, } -const ENUM_TRANSPARENT: &str = "transparent"; -const ENUM_UNION: &str = "union"; -const ENUM_VARIANTS: &[&str] = &[ENUM_TRANSPARENT, ENUM_UNION]; -const NO_ENUM_BEHAVIOUR_ERROR: &str = "enums require an \"enum_behaviour\" attribute, \ - e.g., #[ssz(enum_behaviour = \"transparent\")]"; +enum Procedure<'a> { + Struct { + data: &'a syn::DataStruct, + behaviour: StructBehaviour, + }, + Enum { + data: &'a syn::DataEnum, + behaviour: EnumBehaviour, + }, +} -enum EnumBehaviour { +enum StructBehaviour { + Container, Transparent, +} + +enum EnumBehaviour { Union, + Transparent, } -impl EnumBehaviour { - pub fn new(s: Option) -> Option { - s.map(|s| match s.as_ref() { - ENUM_TRANSPARENT => EnumBehaviour::Transparent, - ENUM_UNION => EnumBehaviour::Union, - other => panic!( - "{} is an invalid enum_behaviour, use either {:?}", - other, ENUM_VARIANTS - ), - }) +impl<'a> Procedure<'a> { + fn read(item: &'a DeriveInput) -> Self { + let opts = StructOpts::from_derive_input(item).unwrap(); + + match &item.data { + syn::Data::Struct(data) => { + if opts.enum_behaviour.is_some() { + panic!("cannot use \"enum_behaviour\" for a struct"); + } + + match opts.struct_behaviour.as_deref() { + Some("container") | None => Procedure::Struct { + data, + behaviour: StructBehaviour::Container, + }, + Some("transparent") => Procedure::Struct { + data, + behaviour: StructBehaviour::Transparent, + }, + Some(other) => panic!( + "{} is not a valid struct behaviour, use \"container\" or \"transparent\"", + other + ), + } + } + syn::Data::Enum(data) => { + if opts.struct_behaviour.is_some() { + panic!("cannot use \"struct_behaviour\" for an enum"); + } + + match opts.enum_behaviour.as_deref() { + Some("union") => Procedure::Enum { + data, + behaviour: EnumBehaviour::Union, + }, + Some("transparent") => Procedure::Enum { + data, + behaviour: EnumBehaviour::Transparent, + }, + Some(other) => panic!( + "{} is not a valid enum behaviour, use \"container\" or \"transparent\"", + other + ), + None => panic!("{}", NO_ENUM_BEHAVIOUR_ERROR), + } + } + _ => panic!("ssz_derive only supports structs and enums"), + } } } -fn parse_ssz_fields(struct_data: &syn::DataStruct) -> Vec<(&syn::Type, &syn::Ident, FieldOpts)> { +fn parse_ssz_fields( + struct_data: &syn::DataStruct, +) -> Vec<(&syn::Type, Option<&syn::Ident>, FieldOpts)> { struct_data .fields .iter() .map(|field| { let ty = &field.ty; - let ident = match &field.ident { - Some(ref ident) => ident, - _ => panic!("ssz_derive only supports named struct fields."), - }; + let ident = field.ident.as_ref(); let field_opts_candidates = field .attrs @@ -93,21 +287,17 @@ fn parse_ssz_fields(struct_data: &syn::DataStruct) -> Vec<(&syn::Type, &syn::Ide #[proc_macro_derive(Encode, attributes(ssz))] pub fn ssz_encode_derive(input: TokenStream) -> TokenStream { let item = parse_macro_input!(input as DeriveInput); - let opts = StructOpts::from_derive_input(&item).unwrap(); - let enum_opt = EnumBehaviour::new(opts.enum_behaviour); + let procedure = Procedure::read(&item); - match &item.data { - syn::Data::Struct(s) => { - if enum_opt.is_some() { - panic!("enum_behaviour is invalid for structs"); - } - ssz_encode_derive_struct(&item, s) - } - syn::Data::Enum(s) => match enum_opt.expect(NO_ENUM_BEHAVIOUR_ERROR) { - EnumBehaviour::Transparent => ssz_encode_derive_enum_transparent(&item, s), - EnumBehaviour::Union => ssz_encode_derive_enum_union(&item, s), + match procedure { + Procedure::Struct { data, behaviour } => match behaviour { + StructBehaviour::Transparent => ssz_encode_derive_struct_transparent(&item, data), + StructBehaviour::Container => ssz_encode_derive_struct(&item, data), + }, + Procedure::Enum { data, behaviour } => match behaviour { + EnumBehaviour::Transparent => ssz_encode_derive_enum_transparent(&item, data), + EnumBehaviour::Union => ssz_encode_derive_enum_union(&item, data), }, - _ => panic!("ssz_derive only supports structs and enums"), } } @@ -132,6 +322,13 @@ fn ssz_encode_derive_struct(derive_input: &DeriveInput, struct_data: &DataStruct continue; } + let ident = match ident { + Some(ref ident) => ident, + _ => panic!( + "#[ssz(struct_behaviour = \"container\")] only supports named struct fields." + ), + }; + if let Some(module) = field_opts.with { let module = quote! { #module::encode }; field_is_ssz_fixed_len.push(quote! { #module::is_ssz_fixed_len() }); @@ -219,6 +416,82 @@ fn ssz_encode_derive_struct(derive_input: &DeriveInput, struct_data: &DataStruct output.into() } +/// Derive `ssz::Encode` "transparently" for a struct which has exactly one non-skipped field. +/// +/// The single field is encoded directly, making the outermost `struct` transparent. +/// +/// ## Field attributes +/// +/// - `#[ssz(skip_serializing)]`: the field will not be serialized. +fn ssz_encode_derive_struct_transparent( + derive_input: &DeriveInput, + struct_data: &DataStruct, +) -> TokenStream { + let name = &derive_input.ident; + let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); + let ssz_fields = parse_ssz_fields(struct_data); + let num_fields = ssz_fields + .iter() + .filter(|(_, _, field_opts)| !field_opts.skip_deserializing) + .count(); + + if num_fields != 1 { + panic!( + "A \"transparent\" struct must have exactly one non-skipped field ({} fields found)", + num_fields + ); + } + + let (ty, ident, _field_opts) = ssz_fields + .iter() + .find(|(_, _, field_opts)| !field_opts.skip_deserializing) + .expect("\"transparent\" struct must have at least one non-skipped field"); + + let output = if let Some(field_name) = ident { + quote! { + impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { + fn is_ssz_fixed_len() -> bool { + <#ty as ssz::Encode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + <#ty as ssz::Encode>::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.#field_name.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.#field_name.ssz_append(buf) + } + } + } + } else { + quote! { + impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { + fn is_ssz_fixed_len() -> bool { + <#ty as ssz::Encode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + <#ty as ssz::Encode>::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.0.ssz_append(buf) + } + } + } + }; + + output.into() +} + /// Derive `ssz::Encode` for an enum in the "transparent" method. /// /// The "transparent" method is distinct from the "union" method specified in the SSZ specification. @@ -367,24 +640,20 @@ fn ssz_encode_derive_enum_union(derive_input: &DeriveInput, enum_data: &DataEnum #[proc_macro_derive(Decode, attributes(ssz))] pub fn ssz_decode_derive(input: TokenStream) -> TokenStream { let item = parse_macro_input!(input as DeriveInput); - let opts = StructOpts::from_derive_input(&item).unwrap(); - let enum_opt = EnumBehaviour::new(opts.enum_behaviour); + let procedure = Procedure::read(&item); - match &item.data { - syn::Data::Struct(s) => { - if enum_opt.is_some() { - panic!("enum_behaviour is invalid for structs"); - } - ssz_decode_derive_struct(&item, s) - } - syn::Data::Enum(s) => match enum_opt.expect(NO_ENUM_BEHAVIOUR_ERROR) { + match procedure { + Procedure::Struct { data, behaviour } => match behaviour { + StructBehaviour::Transparent => ssz_decode_derive_struct_transparent(&item, data), + StructBehaviour::Container => ssz_decode_derive_struct(&item, data), + }, + Procedure::Enum { data, behaviour } => match behaviour { + EnumBehaviour::Union => ssz_decode_derive_enum_union(&item, data), EnumBehaviour::Transparent => panic!( "Decode cannot be derived for enum_behaviour \"{}\", only \"{}\" is valid.", ENUM_TRANSPARENT, ENUM_UNION ), - EnumBehaviour::Union => ssz_decode_derive_enum_union(&item, s), }, - _ => panic!("ssz_derive only supports structs and enums"), } } @@ -409,6 +678,13 @@ fn ssz_decode_derive_struct(item: &DeriveInput, struct_data: &DataStruct) -> Tok let mut fixed_lens = vec![]; for (ty, ident, field_opts) in parse_ssz_fields(struct_data) { + let ident = match ident { + Some(ref ident) => ident, + _ => panic!( + "#[ssz(struct_behaviour = \"container\")] only supports named struct fields." + ), + }; + field_names.push(quote! { #ident }); @@ -545,6 +821,90 @@ fn ssz_decode_derive_struct(item: &DeriveInput, struct_data: &DataStruct) -> Tok output.into() } +/// Implements `ssz::Decode` "transparently" for a `struct` with exactly one non-skipped field. +/// +/// The bytes will be decoded as if they are the inner field, without the outermost struct. The +/// outermost struct will then be applied artificially. +/// +/// ## Field attributes +/// +/// - `#[ssz(skip_deserializing)]`: during de-serialization the field will be instantiated from a +/// `Default` implementation. The decoder will assume that the field was not serialized at all +/// (e.g., if it has been serialized, an error will be raised instead of `Default` overriding it). +fn ssz_decode_derive_struct_transparent( + item: &DeriveInput, + struct_data: &DataStruct, +) -> TokenStream { + let name = &item.ident; + let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); + let ssz_fields = parse_ssz_fields(struct_data); + let num_fields = ssz_fields + .iter() + .filter(|(_, _, field_opts)| !field_opts.skip_deserializing) + .count(); + + if num_fields != 1 { + panic!( + "A \"transparent\" struct must have exactly one non-skipped field ({} fields found)", + num_fields + ); + } + + let mut fields = vec![]; + let mut wrapped_type = None; + + for (i, (ty, ident, field_opts)) in ssz_fields.into_iter().enumerate() { + if let Some(name) = ident { + if field_opts.skip_deserializing { + fields.push(quote! { + #name: <_>::default(), + }); + } else { + fields.push(quote! { + #name: <_>::from_ssz_bytes(bytes)?, + }); + wrapped_type = Some(ty); + } + } else { + let index = syn::Index::from(i); + if field_opts.skip_deserializing { + fields.push(quote! { + #index:<_>::default(), + }); + } else { + fields.push(quote! { + #index:<_>::from_ssz_bytes(bytes)?, + }); + wrapped_type = Some(ty); + } + } + } + + let ty = wrapped_type.unwrap(); + + let output = quote! { + impl #impl_generics ssz::Decode for #name #ty_generics #where_clause { + fn is_ssz_fixed_len() -> bool { + <#ty as ssz::Decode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + <#ty as ssz::Decode>::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> std::result::Result { + Ok(Self { + #( + #fields + )* + + }) + } + } + }; + output.into() +} + /// Derive `ssz::Decode` for an `enum` following the "union" SSZ spec. fn ssz_decode_derive_enum_union(derive_input: &DeriveInput, enum_data: &DataEnum) -> TokenStream { let name = &derive_input.ident; diff --git a/consensus/ssz_derive/tests/tests.rs b/consensus/ssz_derive/tests/tests.rs new file mode 100644 index 00000000000..2eeb3a48db7 --- /dev/null +++ b/consensus/ssz_derive/tests/tests.rs @@ -0,0 +1,215 @@ +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use std::fmt::Debug; +use std::marker::PhantomData; + +fn assert_encode(item: &T, bytes: &[u8]) { + assert_eq!(item.as_ssz_bytes(), bytes); +} + +fn assert_encode_decode(item: &T, bytes: &[u8]) { + assert_encode(item, bytes); + assert_eq!(T::from_ssz_bytes(bytes).unwrap(), *item); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(enum_behaviour = "union")] +enum TwoFixedUnion { + U8(u8), + U16(u16), +} + +#[derive(PartialEq, Debug, Encode, Decode)] +struct TwoFixedUnionStruct { + a: TwoFixedUnion, +} + +#[test] +fn two_fixed_union() { + let eight = TwoFixedUnion::U8(1); + let sixteen = TwoFixedUnion::U16(1); + + assert_encode_decode(&eight, &[0, 1]); + assert_encode_decode(&sixteen, &[1, 1, 0]); + + assert_encode_decode(&TwoFixedUnionStruct { a: eight }, &[4, 0, 0, 0, 0, 1]); + assert_encode_decode(&TwoFixedUnionStruct { a: sixteen }, &[4, 0, 0, 0, 1, 1, 0]); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +struct VariableA { + a: u8, + b: Vec, +} + +#[derive(PartialEq, Debug, Encode, Decode)] +struct VariableB { + a: Vec, + b: u8, +} + +#[derive(PartialEq, Debug, Encode)] +#[ssz(enum_behaviour = "transparent")] +enum TwoVariableTrans { + A(VariableA), + B(VariableB), +} + +#[derive(PartialEq, Debug, Encode)] +struct TwoVariableTransStruct { + a: TwoVariableTrans, +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(enum_behaviour = "union")] +enum TwoVariableUnion { + A(VariableA), + B(VariableB), +} + +#[derive(PartialEq, Debug, Encode, Decode)] +struct TwoVariableUnionStruct { + a: TwoVariableUnion, +} + +#[test] +fn two_variable_trans() { + let trans_a = TwoVariableTrans::A(VariableA { + a: 1, + b: vec![2, 3], + }); + let trans_b = TwoVariableTrans::B(VariableB { + a: vec![1, 2], + b: 3, + }); + + assert_encode(&trans_a, &[1, 5, 0, 0, 0, 2, 3]); + assert_encode(&trans_b, &[5, 0, 0, 0, 3, 1, 2]); + + assert_encode( + &TwoVariableTransStruct { a: trans_a }, + &[4, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], + ); + assert_encode( + &TwoVariableTransStruct { a: trans_b }, + &[4, 0, 0, 0, 5, 0, 0, 0, 3, 1, 2], + ); +} + +#[test] +fn two_variable_union() { + let union_a = TwoVariableUnion::A(VariableA { + a: 1, + b: vec![2, 3], + }); + let union_b = TwoVariableUnion::B(VariableB { + a: vec![1, 2], + b: 3, + }); + + assert_encode_decode(&union_a, &[0, 1, 5, 0, 0, 0, 2, 3]); + assert_encode_decode(&union_b, &[1, 5, 0, 0, 0, 3, 1, 2]); + + assert_encode_decode( + &TwoVariableUnionStruct { a: union_a }, + &[4, 0, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], + ); + assert_encode_decode( + &TwoVariableUnionStruct { a: union_b }, + &[4, 0, 0, 0, 1, 5, 0, 0, 0, 3, 1, 2], + ); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(enum_behaviour = "union")] +enum TwoVecUnion { + A(Vec), + B(Vec), +} + +#[test] +fn two_vec_union() { + assert_encode_decode(&TwoVecUnion::A(vec![]), &[0]); + assert_encode_decode(&TwoVecUnion::B(vec![]), &[1]); + + assert_encode_decode(&TwoVecUnion::A(vec![0]), &[0, 0]); + assert_encode_decode(&TwoVecUnion::B(vec![0]), &[1, 0]); + + assert_encode_decode(&TwoVecUnion::A(vec![0, 1]), &[0, 0, 1]); + assert_encode_decode(&TwoVecUnion::B(vec![0, 1]), &[1, 0, 1]); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(struct_behaviour = "transparent")] +struct TransparentStruct { + inner: Vec, +} + +impl TransparentStruct { + fn new(inner: u8) -> Self { + Self { inner: vec![inner] } + } +} + +#[test] +fn transparent_struct() { + assert_encode_decode(&TransparentStruct::new(42), &vec![42_u8].as_ssz_bytes()); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(struct_behaviour = "transparent")] +struct TransparentStructSkippedField { + inner: Vec, + #[ssz(skip_serializing, skip_deserializing)] + skipped: PhantomData, +} + +impl TransparentStructSkippedField { + fn new(inner: u8) -> Self { + Self { + inner: vec![inner], + skipped: PhantomData, + } + } +} + +#[test] +fn transparent_struct_skipped_field() { + assert_encode_decode( + &TransparentStructSkippedField::new(42), + &vec![42_u8].as_ssz_bytes(), + ); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(struct_behaviour = "transparent")] +struct TransparentStructNewType(Vec); + +#[test] +fn transparent_struct_newtype() { + assert_encode_decode( + &TransparentStructNewType(vec![42_u8]), + &vec![42_u8].as_ssz_bytes(), + ); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(struct_behaviour = "transparent")] +struct TransparentStructNewTypeSkippedField( + Vec, + #[ssz(skip_serializing, skip_deserializing)] PhantomData, +); + +impl TransparentStructNewTypeSkippedField { + fn new(inner: Vec) -> Self { + Self(inner, PhantomData) + } +} + +#[test] +fn transparent_struct_newtype_skipped_field() { + assert_encode_decode( + &TransparentStructNewTypeSkippedField::new(vec![42_u8]), + &vec![42_u8].as_ssz_bytes(), + ); +} diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index 46ac2bae577..ccb41830be8 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -14,7 +14,7 @@ bls = { path = "../../crypto/bls" } integer-sqrt = "0.1.5" itertools = "0.10.0" eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" eth2_ssz_types = "0.2.2" merkle_proof = { path = "../merkle_proof" } safe_arith = { path = "../safe_arith" } diff --git a/consensus/tree_hash/Cargo.toml b/consensus/tree_hash/Cargo.toml index ab080eac065..1f004724fcb 100644 --- a/consensus/tree_hash/Cargo.toml +++ b/consensus/tree_hash/Cargo.toml @@ -12,7 +12,7 @@ tree_hash_derive = "0.4.0" types = { path = "../types" } beacon_chain = { path = "../../beacon_node/beacon_chain" } eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" [dependencies] ethereum-types = "0.12.1" diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 19dbcc92416..d04d9d650fb 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -26,7 +26,7 @@ serde = {version = "1.0.116" , features = ["rc"] } serde_derive = "1.0.116" slog = "2.5.2" eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" eth2_ssz_types = "0.2.2" swap_or_not_shuffle = { path = "../swap_or_not_shuffle" } test_random_derive = { path = "../../common/test_random_derive" } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 10596a769ef..3e7cbba9243 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -120,6 +120,7 @@ pub enum Error { ArithError(ArithError), MissingBeaconBlock(SignedBeaconBlockHash), MissingBeaconState(BeaconStateHash), + PayloadConversionLogicFlaw, SyncCommitteeNotKnown { current_epoch: Epoch, epoch: Epoch, diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 05dadb43467..6110b7f4fd6 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -121,8 +121,4 @@ impl ExecutionPayload { // Max size of variable length `withdrawals` field + (T::max_withdrawals_per_payload() * ::ssz_fixed_len()) } - - pub fn blob_txns_iter(&self) -> Iter<'_, Transaction> { - self.transactions().iter() - } } diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 41aa2f6d2d5..342a2d97e76 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -79,6 +79,12 @@ pub struct ExecutionPayloadHeader { pub withdrawals_root: Hash256, } +impl ExecutionPayloadHeader { + pub fn transactions(&self) -> Option<&Transactions> { + None + } +} + impl<'a, T: EthSpec> ExecutionPayloadHeaderRef<'a, T> { // FIXME: maybe this could be a derived trait.. pub fn is_default(self) -> bool { @@ -210,6 +216,34 @@ impl From> for ExecutionPayloadHeaderEip4 } } +impl From> for ExecutionPayloadHeader { + fn from(payload: ExecutionPayloadMerge) -> Self { + Self::Merge(ExecutionPayloadHeaderMerge::from(payload)) + } +} + +impl From> for ExecutionPayloadHeader { + fn from(payload: ExecutionPayloadCapella) -> Self { + Self::Capella(ExecutionPayloadHeaderCapella::from(payload)) + } +} + +impl From> for ExecutionPayloadHeader { + fn from(payload: ExecutionPayloadEip4844) -> Self { + Self::Eip4844(ExecutionPayloadHeaderEip4844::from(payload)) + } +} + +impl From> for ExecutionPayloadHeader { + fn from(payload: ExecutionPayload) -> Self { + match payload { + ExecutionPayload::Merge(payload) => Self::from(payload), + ExecutionPayload::Capella(payload) => Self::from(payload), + ExecutionPayload::Eip4844(payload) => Self::from(payload), + } + } +} + impl TryFrom> for ExecutionPayloadHeaderMerge { type Error = BeaconStateError; fn try_from(header: ExecutionPayloadHeader) -> Result { diff --git a/consensus/types/src/kzg_commitment.rs b/consensus/types/src/kzg_commitment.rs index 64ed24d9acd..eaa429a139e 100644 --- a/consensus/types/src/kzg_commitment.rs +++ b/consensus/types/src/kzg_commitment.rs @@ -2,13 +2,14 @@ use crate::test_utils::TestRandom; use crate::*; use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; -use ssz::{Decode, DecodeError, Encode}; +use ssz_derive::{Decode, Encode}; use std::fmt; use std::fmt::{Display, Formatter}; use tree_hash::{PackedEncoding, TreeHash}; -#[derive(Derivative, Debug, Clone, Serialize, Deserialize)] +#[derive(Derivative, Debug, Clone, Encode, Decode, Serialize, Deserialize)] #[derivative(PartialEq, Eq, Hash)] +#[ssz(struct_behaviour = "transparent")] pub struct KzgCommitment(#[serde(with = "BigArray")] pub [u8; 48]); impl Display for KzgCommitment { @@ -40,27 +41,3 @@ impl TestRandom for KzgCommitment { KzgCommitment(<[u8; 48] as TestRandom>::random_for_test(rng)) } } - -impl Decode for KzgCommitment { - fn is_ssz_fixed_len() -> bool { - <[u8; 48] as Decode>::is_ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - <[u8; 48] as Decode>::from_ssz_bytes(bytes).map(KzgCommitment) - } -} - -impl Encode for KzgCommitment { - fn is_ssz_fixed_len() -> bool { - <[u8; 48] as Encode>::is_ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.0.ssz_append(buf) - } - - fn ssz_bytes_len(&self) -> usize { - self.0.ssz_bytes_len() - } -} diff --git a/consensus/types/src/kzg_proof.rs b/consensus/types/src/kzg_proof.rs index 92a994a85c4..7cd6a8e58b4 100644 --- a/consensus/types/src/kzg_proof.rs +++ b/consensus/types/src/kzg_proof.rs @@ -2,13 +2,15 @@ use crate::test_utils::{RngCore, TestRandom}; use serde::{Deserialize, Serialize}; use serde_big_array::BigArray; use ssz::{Decode, DecodeError, Encode}; +use ssz_derive::{Decode, Encode}; use std::fmt; use tree_hash::{PackedEncoding, TreeHash}; const KZG_PROOF_BYTES_LEN: usize = 48; -#[derive(Debug, PartialEq, Hash, Clone, Copy, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Hash, Clone, Copy, Encode, Decode, Serialize, Deserialize)] #[serde(transparent)] +#[ssz(struct_behaviour = "transparent")] pub struct KzgProof(#[serde(with = "BigArray")] pub [u8; KZG_PROOF_BYTES_LEN]); impl fmt::Display for KzgProof { @@ -35,38 +37,6 @@ impl Into<[u8; KZG_PROOF_BYTES_LEN]> for KzgProof { } } -impl Encode for KzgProof { - fn is_ssz_fixed_len() -> bool { - <[u8; KZG_PROOF_BYTES_LEN] as Encode>::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - <[u8; KZG_PROOF_BYTES_LEN] as Encode>::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - self.0.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.0.ssz_append(buf) - } -} - -impl Decode for KzgProof { - fn is_ssz_fixed_len() -> bool { - <[u8; KZG_PROOF_BYTES_LEN] as Decode>::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - <[u8; KZG_PROOF_BYTES_LEN] as Decode>::ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - <[u8; KZG_PROOF_BYTES_LEN]>::from_ssz_bytes(bytes).map(Self) - } -} - impl TreeHash for KzgProof { fn tree_hash_type() -> tree_hash::TreeHashType { <[u8; KZG_PROOF_BYTES_LEN]>::tree_hash_type() diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index ea483efaf6f..5b457daee3b 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -17,16 +17,17 @@ pub enum BlockType { Full, } -// + TryFrom> +/// A trait representing behavior of an `ExecutionPayload` that either has a full list of transactions +/// or a transaction hash in it's place. pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + Send { fn block_type() -> BlockType; /// Convert the payload into a payload header. fn to_execution_payload_header(&self) -> ExecutionPayloadHeader; - // We provide a subset of field accessors, for the fields used in `consensus`. - // - // More fields can be added here if you wish. + /// We provide a subset of field accessors, for the fields used in `consensus`. + /// + /// More fields can be added here if you wish. fn parent_hash(&self) -> ExecutionBlockHash; fn prev_randao(&self) -> Hash256; fn block_number(&self) -> u64; @@ -34,14 +35,13 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + fn block_hash(&self) -> ExecutionBlockHash; fn fee_recipient(&self) -> Address; fn gas_limit(&self) -> u64; - - /// This will return `None` on blinded blocks or pre-merge blocks. fn transactions(&self) -> Option<&Transactions>; - // Is this a default payload? (pre-merge) + /// Is this a default payload? (pre-merge) fn is_default(&self) -> bool; } +/// `ExecPayload` functionality the requires ownership. pub trait OwnedExecPayload: ExecPayload + Default + Serialize + DeserializeOwned + Encode + Decode + TestRandom + 'static { @@ -106,13 +106,15 @@ pub trait AbstractExecPayload: ), derivative(PartialEq, Hash(bound = "T: EthSpec")), serde(bound = "T: EthSpec", deny_unknown_fields), - cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) + cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), + ssz(struct_behaviour = "transparent"), ), ref_attributes( derive(Debug, Derivative, TreeHash), derivative(PartialEq, Hash(bound = "T: EthSpec")), tree_hash(enum_behaviour = "transparent"), ), + map_into(ExecutionPayload), cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") )] @@ -155,162 +157,16 @@ impl<'a, T: EthSpec> From> for ExecutionPayload { } } -impl ExecPayload for FullPayloadMerge { - fn block_type() -> BlockType { - BlockType::Full - } - - fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { - ExecutionPayloadHeader::Merge(ExecutionPayloadHeaderMerge::from( - self.execution_payload.clone(), - )) - } - - fn parent_hash(&self) -> ExecutionBlockHash { - self.execution_payload.parent_hash - } - - fn prev_randao(&self) -> Hash256 { - self.execution_payload.prev_randao - } - - fn block_number(&self) -> u64 { - self.execution_payload.block_number - } - - fn timestamp(&self) -> u64 { - self.execution_payload.timestamp - } - - fn block_hash(&self) -> ExecutionBlockHash { - self.execution_payload.block_hash - } - - fn fee_recipient(&self) -> Address { - self.execution_payload.fee_recipient - } - - fn gas_limit(&self) -> u64 { - self.execution_payload.gas_limit - } - - fn transactions(&self) -> Option<&Transactions> { - Some(&self.execution_payload.transactions) - } - - // TODO: can this function be optimized? - fn is_default(&self) -> bool { - self.execution_payload == ExecutionPayloadMerge::default() - } -} -impl ExecPayload for FullPayloadCapella { - fn block_type() -> BlockType { - BlockType::Full - } - - fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { - ExecutionPayloadHeader::Capella(ExecutionPayloadHeaderCapella::from( - self.execution_payload.clone(), - )) - } - - fn parent_hash(&self) -> ExecutionBlockHash { - self.execution_payload.parent_hash - } - - fn prev_randao(&self) -> Hash256 { - self.execution_payload.prev_randao - } - - fn block_number(&self) -> u64 { - self.execution_payload.block_number - } - - fn timestamp(&self) -> u64 { - self.execution_payload.timestamp - } - - fn block_hash(&self) -> ExecutionBlockHash { - self.execution_payload.block_hash - } - - fn fee_recipient(&self) -> Address { - self.execution_payload.fee_recipient - } - - fn gas_limit(&self) -> u64 { - self.execution_payload.gas_limit - } - - fn transactions(&self) -> Option<&Transactions> { - Some(&self.execution_payload.transactions) - } - - // TODO: can this function be optimized? - fn is_default(&self) -> bool { - self.execution_payload == ExecutionPayloadCapella::default() - } -} -impl ExecPayload for FullPayloadEip4844 { - fn block_type() -> BlockType { - BlockType::Full - } - - fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { - ExecutionPayloadHeader::Eip4844(ExecutionPayloadHeaderEip4844::from( - self.execution_payload.clone(), - )) - } - - fn parent_hash(&self) -> ExecutionBlockHash { - self.execution_payload.parent_hash - } - - fn prev_randao(&self) -> Hash256 { - self.execution_payload.prev_randao - } - - fn block_number(&self) -> u64 { - self.execution_payload.block_number - } - - fn timestamp(&self) -> u64 { - self.execution_payload.timestamp - } - - fn block_hash(&self) -> ExecutionBlockHash { - self.execution_payload.block_hash - } - - fn fee_recipient(&self) -> Address { - self.execution_payload.fee_recipient - } - - fn gas_limit(&self) -> u64 { - self.execution_payload.gas_limit - } - - fn transactions(&self) -> Option<&Transactions> { - Some(&self.execution_payload.transactions) - } - - // TODO: can this function be optimized? - fn is_default(&self) -> bool { - self.execution_payload == ExecutionPayloadEip4844::default() - } -} - impl ExecPayload for FullPayload { fn block_type() -> BlockType { BlockType::Full } fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { - match self { - Self::Merge(payload) => payload.to_execution_payload_header(), - Self::Capella(payload) => payload.to_execution_payload_header(), - Self::Eip4844(payload) => payload.to_execution_payload_header(), - } + let payload = map_full_payload_into_execution_payload!(self.clone(), |inner, cons| { + cons(inner.execution_payload) + }); + ExecutionPayloadHeader::from(payload) } fn parent_hash<'a>(&'a self) -> ExecutionBlockHash { @@ -362,29 +218,26 @@ impl ExecPayload for FullPayload { }) } - fn transactions<'a>(&'a self) -> Option<&'a Transactions> { + fn transactions<'a>(&'a self) -> Option<&Transactions> { map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); Some(&payload.execution_payload.transactions) }) } - fn is_default(&self) -> bool { - match self { - Self::Merge(payload) => payload.is_default(), - Self::Capella(payload) => payload.is_default(), - Self::Eip4844(payload) => payload.is_default(), - } + fn is_default<'a>(&'a self) -> bool { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload == <_>::default() + }) } } impl FullPayload { pub fn execution_payload(&self) -> ExecutionPayload { - match self { - Self::Merge(full) => ExecutionPayload::Merge(full.execution_payload.clone()), - Self::Capella(full) => ExecutionPayload::Capella(full.execution_payload.clone()), - Self::Eip4844(full) => ExecutionPayload::Eip4844(full.execution_payload.clone()), - } + map_full_payload_into_execution_payload!(self.clone(), |inner, cons| { + cons(inner.execution_payload) + }) } } @@ -393,12 +246,11 @@ impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { BlockType::Full } - fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { - match self { - Self::Merge(payload) => payload.to_execution_payload_header(), - Self::Capella(payload) => payload.to_execution_payload_header(), - Self::Eip4844(payload) => payload.to_execution_payload_header(), - } + fn to_execution_payload_header<'a>(&'a self) -> ExecutionPayloadHeader { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + ExecutionPayloadHeader::from(payload.to_execution_payload_header()) + }) } fn parent_hash<'a>(&'a self) -> ExecutionBlockHash { @@ -450,7 +302,7 @@ impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { }) } - fn transactions<'a>(&'a self) -> Option<&'a Transactions> { + fn transactions<'a>(&'a self) -> Option<&Transactions> { map_full_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); Some(&payload.execution_payload.transactions) @@ -459,17 +311,10 @@ impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { // TODO: can this function be optimized? fn is_default<'a>(&'a self) -> bool { - match self { - Self::Merge(payload_ref) => { - payload_ref.execution_payload == ExecutionPayloadMerge::default() - } - Self::Capella(payload_ref) => { - payload_ref.execution_payload == ExecutionPayloadCapella::default() - } - Self::Eip4844(payload_ref) => { - payload_ref.execution_payload == ExecutionPayloadEip4844::default() - } - } + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload == <_>::default() + }) } } @@ -490,41 +335,6 @@ impl AbstractExecPayload for FullPayload { } } -//FIXME(sean) fix errors -impl TryInto> for FullPayload { - type Error = (); - - fn try_into(self) -> Result, Self::Error> { - match self { - FullPayload::Merge(payload) => Ok(payload), - FullPayload::Capella(_) => Err(()), - FullPayload::Eip4844(_) => Err(()), - } - } -} -impl TryInto> for FullPayload { - type Error = (); - - fn try_into(self) -> Result, Self::Error> { - match self { - FullPayload::Merge(_) => Err(()), - FullPayload::Capella(payload) => Ok(payload), - FullPayload::Eip4844(_) => Err(()), - } - } -} -impl TryInto> for FullPayload { - type Error = (); - - fn try_into(self) -> Result, Self::Error> { - match self { - FullPayload::Merge(_) => Err(()), - FullPayload::Capella(_) => Err(()), - FullPayload::Eip4844(payload) => Ok(payload), - } - } -} - impl From> for FullPayload { fn from(execution_payload: ExecutionPayload) -> Self { match execution_payload { @@ -548,60 +358,6 @@ impl TryFrom> for FullPayload { } } -impl From> for FullPayloadMerge { - fn from(execution_payload: ExecutionPayloadMerge) -> Self { - Self { execution_payload } - } -} -impl From> for FullPayloadCapella { - fn from(execution_payload: ExecutionPayloadCapella) -> Self { - Self { execution_payload } - } -} -impl From> for FullPayloadEip4844 { - fn from(execution_payload: ExecutionPayloadEip4844) -> Self { - Self { execution_payload } - } -} - -impl TryFrom> for FullPayloadMerge { - type Error = (); - fn try_from(_: ExecutionPayloadHeader) -> Result { - Err(()) - } -} -impl TryFrom> for FullPayloadCapella { - type Error = (); - fn try_from(_: ExecutionPayloadHeader) -> Result { - Err(()) - } -} -impl TryFrom> for FullPayloadEip4844 { - type Error = (); - fn try_from(_: ExecutionPayloadHeader) -> Result { - Err(()) - } -} - -impl TryFrom> for FullPayloadMerge { - type Error = (); - fn try_from(_: ExecutionPayloadHeaderMerge) -> Result { - Err(()) - } -} -impl TryFrom> for FullPayloadCapella { - type Error = (); - fn try_from(_: ExecutionPayloadHeaderCapella) -> Result { - Err(()) - } -} -impl TryFrom> for FullPayloadEip4844 { - type Error = (); - fn try_from(_: ExecutionPayloadHeaderEip4844) -> Result { - Err(()) - } -} - #[superstruct( variants(Merge, Capella, Eip4844), variant_attributes( @@ -618,13 +374,15 @@ impl TryFrom> for FullPayloadEip484 ), derivative(PartialEq, Hash(bound = "T: EthSpec")), serde(bound = "T: EthSpec", deny_unknown_fields), - cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) + cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), + ssz(struct_behaviour = "transparent"), ), ref_attributes( derive(Debug, Derivative, TreeHash), derivative(PartialEq, Hash(bound = "T: EthSpec")), tree_hash(enum_behaviour = "transparent"), ), + map_into(ExecutionPayloadHeader), cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") )] @@ -647,326 +405,340 @@ impl ExecPayload for BlindedPayload { } fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { - match self { - Self::Merge(payload) => { - ExecutionPayloadHeader::Merge(payload.execution_payload_header.clone()) - } - Self::Capella(payload) => { - ExecutionPayloadHeader::Capella(payload.execution_payload_header.clone()) - } - Self::Eip4844(payload) => { - ExecutionPayloadHeader::Eip4844(payload.execution_payload_header.clone()) - } - } + map_blinded_payload_into_execution_payload_header!(self.clone(), |inner, cons| { + cons(inner.execution_payload_header) + }) } - fn parent_hash(&self) -> ExecutionBlockHash { - match self { - Self::Merge(payload) => payload.execution_payload_header.parent_hash, - Self::Capella(payload) => payload.execution_payload_header.parent_hash, - Self::Eip4844(payload) => payload.execution_payload_header.parent_hash, - } + fn parent_hash<'a>(&'a self) -> ExecutionBlockHash { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.parent_hash + }) } - fn prev_randao(&self) -> Hash256 { - match self { - Self::Merge(payload) => payload.execution_payload_header.prev_randao, - Self::Capella(payload) => payload.execution_payload_header.prev_randao, - Self::Eip4844(payload) => payload.execution_payload_header.prev_randao, - } + fn prev_randao<'a>(&'a self) -> Hash256 { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.prev_randao + }) } - fn block_number(&self) -> u64 { - match self { - Self::Merge(payload) => payload.execution_payload_header.block_number, - Self::Capella(payload) => payload.execution_payload_header.block_number, - Self::Eip4844(payload) => payload.execution_payload_header.block_number, - } + fn block_number<'a>(&'a self) -> u64 { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.block_number + }) } - fn timestamp(&self) -> u64 { - match self { - Self::Merge(payload) => payload.execution_payload_header.timestamp, - Self::Capella(payload) => payload.execution_payload_header.timestamp, - Self::Eip4844(payload) => payload.execution_payload_header.timestamp, - } + fn timestamp<'a>(&'a self) -> u64 { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.timestamp + }) } - fn block_hash(&self) -> ExecutionBlockHash { - match self { - Self::Merge(payload) => payload.execution_payload_header.block_hash, - Self::Capella(payload) => payload.execution_payload_header.block_hash, - Self::Eip4844(payload) => payload.execution_payload_header.block_hash, - } + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.block_hash + }) } - fn fee_recipient(&self) -> Address { - match self { - Self::Merge(payload) => payload.execution_payload_header.fee_recipient, - Self::Capella(payload) => payload.execution_payload_header.fee_recipient, - Self::Eip4844(payload) => payload.execution_payload_header.fee_recipient, - } + fn fee_recipient<'a>(&'a self) -> Address { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.fee_recipient + }) } - fn gas_limit(&self) -> u64 { - match self { - Self::Merge(payload) => payload.execution_payload_header.gas_limit, - Self::Capella(payload) => payload.execution_payload_header.gas_limit, - Self::Eip4844(payload) => payload.execution_payload_header.gas_limit, - } + fn gas_limit<'a>(&'a self) -> u64 { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.gas_limit + }) } - fn transactions(&self) -> Option<&Transactions> { + fn transactions<'a>(&'a self) -> Option<&Transactions> { None } - // TODO: can this function be optimized? - fn is_default(&self) -> bool { - match self { - Self::Merge(payload) => payload.is_default(), - Self::Capella(payload) => payload.is_default(), - Self::Eip4844(payload) => payload.is_default(), - } + fn is_default<'a>(&'a self) -> bool { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header == <_>::default() + }) } } -// FIXME(sproul): deduplicate this impl<'b, T: EthSpec> ExecPayload for BlindedPayloadRef<'b, T> { fn block_type() -> BlockType { BlockType::Blinded } - fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { - match self { - Self::Merge(payload) => { - ExecutionPayloadHeader::Merge(payload.execution_payload_header.clone()) - } - Self::Capella(payload) => { - ExecutionPayloadHeader::Capella(payload.execution_payload_header.clone()) - } - Self::Eip4844(payload) => { - ExecutionPayloadHeader::Eip4844(payload.execution_payload_header.clone()) - } - } + fn to_execution_payload_header<'a>(&'a self) -> ExecutionPayloadHeader { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.to_execution_payload_header() + }) } - fn parent_hash(&self) -> ExecutionBlockHash { - match self { - Self::Merge(payload) => payload.execution_payload_header.parent_hash, - Self::Capella(payload) => payload.execution_payload_header.parent_hash, - Self::Eip4844(payload) => payload.execution_payload_header.parent_hash, - } + fn parent_hash<'a>(&'a self) -> ExecutionBlockHash { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.parent_hash + }) } - fn prev_randao(&self) -> Hash256 { - match self { - Self::Merge(payload) => payload.execution_payload_header.prev_randao, - Self::Capella(payload) => payload.execution_payload_header.prev_randao, - Self::Eip4844(payload) => payload.execution_payload_header.prev_randao, - } + fn prev_randao<'a>(&'a self) -> Hash256 { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.prev_randao + }) } - fn block_number(&self) -> u64 { - match self { - Self::Merge(payload) => payload.execution_payload_header.block_number, - Self::Capella(payload) => payload.execution_payload_header.block_number, - Self::Eip4844(payload) => payload.execution_payload_header.block_number, - } + fn block_number<'a>(&'a self) -> u64 { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.block_number + }) } - fn timestamp(&self) -> u64 { - match self { - Self::Merge(payload) => payload.execution_payload_header.timestamp, - Self::Capella(payload) => payload.execution_payload_header.timestamp, - Self::Eip4844(payload) => payload.execution_payload_header.timestamp, - } + fn timestamp<'a>(&'a self) -> u64 { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.timestamp + }) } - fn block_hash(&self) -> ExecutionBlockHash { - match self { - Self::Merge(payload) => payload.execution_payload_header.block_hash, - Self::Capella(payload) => payload.execution_payload_header.block_hash, - Self::Eip4844(payload) => payload.execution_payload_header.block_hash, - } + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.block_hash + }) } - fn fee_recipient(&self) -> Address { - match self { - Self::Merge(payload) => payload.execution_payload_header.fee_recipient, - Self::Capella(payload) => payload.execution_payload_header.fee_recipient, - Self::Eip4844(payload) => payload.execution_payload_header.fee_recipient, - } + fn fee_recipient<'a>(&'a self) -> Address { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.fee_recipient + }) } - fn gas_limit(&self) -> u64 { - match self { - Self::Merge(payload) => payload.execution_payload_header.gas_limit, - Self::Capella(payload) => payload.execution_payload_header.gas_limit, - Self::Eip4844(payload) => payload.execution_payload_header.gas_limit, - } + fn gas_limit<'a>(&'a self) -> u64 { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.gas_limit + }) } - fn transactions(&self) -> Option<&Transactions> { + fn transactions<'a>(&'a self) -> Option<&Transactions> { None } // TODO: can this function be optimized? fn is_default<'a>(&'a self) -> bool { - match self { - Self::Merge(payload) => { - payload.execution_payload_header == ExecutionPayloadHeaderMerge::default() - } - Self::Capella(payload) => { - payload.execution_payload_header == ExecutionPayloadHeaderCapella::default() - } - Self::Eip4844(payload) => { - payload.execution_payload_header == ExecutionPayloadHeaderEip4844::default() - } - } + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header == <_>::default() + }) } } -impl ExecPayload for BlindedPayloadMerge { - fn block_type() -> BlockType { - BlockType::Full - } - - fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { - ExecutionPayloadHeader::Merge(ExecutionPayloadHeaderMerge::from( - self.execution_payload_header.clone(), - )) - } - - fn parent_hash(&self) -> ExecutionBlockHash { - self.execution_payload_header.parent_hash - } - - fn prev_randao(&self) -> Hash256 { - self.execution_payload_header.prev_randao - } - - fn block_number(&self) -> u64 { - self.execution_payload_header.block_number - } - - fn timestamp(&self) -> u64 { - self.execution_payload_header.timestamp - } - - fn block_hash(&self) -> ExecutionBlockHash { - self.execution_payload_header.block_hash - } - - fn fee_recipient(&self) -> Address { - self.execution_payload_header.fee_recipient - } +macro_rules! impl_exec_payload_common { + ($wrapper_type:ident, $wrapped_type_full:ident, $wrapped_header_type:ident, $wrapped_field:ident, $fork_variant:ident, $block_type_variant:ident, $f:block) => { + impl ExecPayload for $wrapper_type { + fn block_type() -> BlockType { + BlockType::$block_type_variant + } - fn gas_limit(&self) -> u64 { - self.execution_payload_header.gas_limit - } + fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { + ExecutionPayloadHeader::$fork_variant($wrapped_header_type::from( + self.$wrapped_field.clone(), + )) + } - fn transactions(&self) -> Option<&Transactions> { - None - } + fn parent_hash(&self) -> ExecutionBlockHash { + self.$wrapped_field.parent_hash + } - fn is_default(&self) -> bool { - self.execution_payload_header == ExecutionPayloadHeaderMerge::default() - } -} -impl ExecPayload for BlindedPayloadCapella { - fn block_type() -> BlockType { - BlockType::Full - } + fn prev_randao(&self) -> Hash256 { + self.$wrapped_field.prev_randao + } - fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { - ExecutionPayloadHeader::Capella(ExecutionPayloadHeaderCapella::from( - self.execution_payload_header.clone(), - )) - } + fn block_number(&self) -> u64 { + self.$wrapped_field.block_number + } - fn parent_hash(&self) -> ExecutionBlockHash { - self.execution_payload_header.parent_hash - } + fn timestamp(&self) -> u64 { + self.$wrapped_field.timestamp + } - fn prev_randao(&self) -> Hash256 { - self.execution_payload_header.prev_randao - } + fn block_hash(&self) -> ExecutionBlockHash { + self.$wrapped_field.block_hash + } - fn block_number(&self) -> u64 { - self.execution_payload_header.block_number - } + fn fee_recipient(&self) -> Address { + self.$wrapped_field.fee_recipient + } - fn timestamp(&self) -> u64 { - self.execution_payload_header.timestamp - } + fn gas_limit(&self) -> u64 { + self.$wrapped_field.gas_limit + } - fn block_hash(&self) -> ExecutionBlockHash { - self.execution_payload_header.block_hash - } + fn is_default(&self) -> bool { + self.$wrapped_field == $wrapped_type_full::default() + } - fn fee_recipient(&self) -> Address { - self.execution_payload_header.fee_recipient - } + fn transactions(&self) -> Option<&Transactions> { + let f = $f; + f(self) + } + } - fn gas_limit(&self) -> u64 { - self.execution_payload_header.gas_limit - } + impl From<$wrapped_type_full> for $wrapper_type { + fn from($wrapped_field: $wrapped_type_full) -> Self { + Self { $wrapped_field } + } + } + }; +} - fn transactions(&self) -> Option<&Transactions> { - None - } +macro_rules! impl_exec_payload_for_fork { + ($wrapper_type_header:ident, $wrapper_type_full:ident, $wrapped_type_header:ident, $wrapped_type_full:ident, $fork_variant:ident) => { + //*************** Blinded payload implementations ******************// - fn is_default(&self) -> bool { - self.execution_payload_header == ExecutionPayloadHeaderCapella::default() - } -} -impl ExecPayload for BlindedPayloadEip4844 { - fn block_type() -> BlockType { - BlockType::Full - } + impl_exec_payload_common!( + $wrapper_type_header, + $wrapped_type_header, + $wrapped_type_header, + execution_payload_header, + $fork_variant, + Blinded, + { |_| { None } } + ); + + impl TryInto<$wrapper_type_header> for BlindedPayload { + type Error = Error; + + fn try_into(self) -> Result<$wrapper_type_header, Self::Error> { + match self { + BlindedPayload::$fork_variant(payload) => Ok(payload), + _ => Err(Error::IncorrectStateVariant), + } + } + } - fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { - ExecutionPayloadHeader::Eip4844(ExecutionPayloadHeaderEip4844::from( - self.execution_payload_header.clone(), - )) - } + // NOTE: the `Default` implementation for `BlindedPayload` needs to be different from the `Default` + // implementation for `ExecutionPayloadHeader` because payloads are checked for equality against the + // default payload in `is_merge_transition_block` to determine whether the merge has occurred. + // + // The default `BlindedPayload` is therefore the payload header that results from blinding the + // default `ExecutionPayload`, which differs from the default `ExecutionPayloadHeader` in that + // its `transactions_root` is the hash of the empty list rather than 0x0. + impl Default for $wrapper_type_header { + fn default() -> Self { + Self { + execution_payload_header: $wrapped_type_header::from( + $wrapped_type_full::default(), + ), + } + } + } - fn parent_hash(&self) -> ExecutionBlockHash { - self.execution_payload_header.parent_hash - } + impl TryFrom> for $wrapper_type_header { + type Error = Error; + fn try_from(header: ExecutionPayloadHeader) -> Result { + match header { + ExecutionPayloadHeader::$fork_variant(execution_payload_header) => { + Ok(execution_payload_header.into()) + } + _ => Err(Error::PayloadConversionLogicFlaw), + } + } + } - fn prev_randao(&self) -> Hash256 { - self.execution_payload_header.prev_randao - } + // FIXME(sproul): consider adding references to these From impls + impl From<$wrapped_type_full> for $wrapper_type_header { + fn from(execution_payload: $wrapped_type_full) -> Self { + Self { + execution_payload_header: $wrapped_type_header::from(execution_payload), + } + } + } - fn block_number(&self) -> u64 { - self.execution_payload_header.block_number - } + //*************** Full payload implementations ******************// - fn timestamp(&self) -> u64 { - self.execution_payload_header.timestamp - } + impl_exec_payload_common!( + $wrapper_type_full, + $wrapped_type_full, + $wrapped_type_header, + execution_payload, + $fork_variant, + Full, + { + let c: for<'a> fn(&'a $wrapper_type_full) -> Option<&'a Transactions> = + |payload: &$wrapper_type_full| Some(&payload.execution_payload.transactions); + c + } + ); - fn block_hash(&self) -> ExecutionBlockHash { - self.execution_payload_header.block_hash - } + impl Default for $wrapper_type_full { + fn default() -> Self { + Self { + execution_payload: $wrapped_type_full::default(), + } + } + } - fn fee_recipient(&self) -> Address { - self.execution_payload_header.fee_recipient - } + impl TryFrom> for $wrapper_type_full { + type Error = Error; + fn try_from(_: ExecutionPayloadHeader) -> Result { + Err(Error::PayloadConversionLogicFlaw) + } + } - fn gas_limit(&self) -> u64 { - self.execution_payload_header.gas_limit - } + impl TryFrom<$wrapped_type_header> for $wrapper_type_full { + type Error = Error; + fn try_from(_: $wrapped_type_header) -> Result { + Err(Error::PayloadConversionLogicFlaw) + } + } - fn transactions(&self) -> Option<&Transactions> { - None - } + impl TryInto<$wrapper_type_full> for FullPayload { + type Error = Error; - fn is_default(&self) -> bool { - self.execution_payload_header == ExecutionPayloadHeaderEip4844::default() - } -} + fn try_into(self) -> Result<$wrapper_type_full, Self::Error> { + match self { + FullPayload::$fork_variant(payload) => Ok(payload), + _ => Err(Error::PayloadConversionLogicFlaw), + } + } + } + }; +} + +impl_exec_payload_for_fork!( + BlindedPayloadMerge, + FullPayloadMerge, + ExecutionPayloadHeaderMerge, + ExecutionPayloadMerge, + Merge +); +impl_exec_payload_for_fork!( + BlindedPayloadCapella, + FullPayloadCapella, + ExecutionPayloadHeaderCapella, + ExecutionPayloadCapella, + Capella +); +impl_exec_payload_for_fork!( + BlindedPayloadEip4844, + FullPayloadEip4844, + ExecutionPayloadHeaderEip4844, + ExecutionPayloadEip4844, + Eip4844 +); impl AbstractExecPayload for BlindedPayload { type Ref<'a> = BlindedPayloadRef<'a, T>; @@ -985,110 +757,6 @@ impl AbstractExecPayload for BlindedPayload { } } -//FIXME(sean) fix errors -impl TryInto> for BlindedPayload { - type Error = (); - - fn try_into(self) -> Result, Self::Error> { - match self { - BlindedPayload::Merge(payload) => Ok(payload), - BlindedPayload::Capella(_) => Err(()), - BlindedPayload::Eip4844(_) => Err(()), - } - } -} -impl TryInto> for BlindedPayload { - type Error = (); - - fn try_into(self) -> Result, Self::Error> { - match self { - BlindedPayload::Merge(_) => Err(()), - BlindedPayload::Capella(payload) => Ok(payload), - BlindedPayload::Eip4844(_) => Err(()), - } - } -} -impl TryInto> for BlindedPayload { - type Error = (); - - fn try_into(self) -> Result, Self::Error> { - match self { - BlindedPayload::Merge(_) => Err(()), - BlindedPayload::Capella(_) => Err(()), - BlindedPayload::Eip4844(payload) => Ok(payload), - } - } -} - -impl Default for FullPayloadMerge { - fn default() -> Self { - Self { - execution_payload: ExecutionPayloadMerge::default(), - } - } -} -impl Default for FullPayloadCapella { - fn default() -> Self { - Self { - execution_payload: ExecutionPayloadCapella::default(), - } - } -} -impl Default for FullPayloadEip4844 { - fn default() -> Self { - Self { - execution_payload: ExecutionPayloadEip4844::default(), - } - } -} - -// NOTE: the `Default` implementation for `BlindedPayload` needs to be different from the `Default` -// implementation for `ExecutionPayloadHeader` because payloads are checked for equality against the -// default payload in `is_merge_transition_block` to determine whether the merge has occurred. -// -// The default `BlindedPayload` is therefore the payload header that results from blinding the -// default `ExecutionPayload`, which differs from the default `ExecutionPayloadHeader` in that -// its `transactions_root` is the hash of the empty list rather than 0x0. -/* -impl Default for BlindedPayload { - fn default() -> Self { - Self { - execution_payload_header: ExecutionPayloadHeader::from(&ExecutionPayload::default()), - } - } -} -*/ - -impl Default for BlindedPayloadMerge { - fn default() -> Self { - Self { - execution_payload_header: ExecutionPayloadHeaderMerge::from( - ExecutionPayloadMerge::default(), - ), - } - } -} - -impl Default for BlindedPayloadCapella { - fn default() -> Self { - Self { - execution_payload_header: ExecutionPayloadHeaderCapella::from( - ExecutionPayloadCapella::default(), - ), - } - } -} - -impl Default for BlindedPayloadEip4844 { - fn default() -> Self { - Self { - execution_payload_header: ExecutionPayloadHeaderEip4844::from( - ExecutionPayloadEip4844::default(), - ), - } - } -} - impl From> for BlindedPayload { fn from(payload: ExecutionPayload) -> Self { match payload { @@ -1121,28 +789,6 @@ impl From> for BlindedPayload { } } -impl From> for BlindedPayloadMerge { - fn from(execution_payload_header: ExecutionPayloadHeaderMerge) -> Self { - Self { - execution_payload_header, - } - } -} -impl From> for BlindedPayloadCapella { - fn from(execution_payload_header: ExecutionPayloadHeaderCapella) -> Self { - Self { - execution_payload_header, - } - } -} -impl From> for BlindedPayloadEip4844 { - fn from(execution_payload_header: ExecutionPayloadHeaderEip4844) -> Self { - Self { - execution_payload_header, - } - } -} - impl From> for ExecutionPayloadHeader { fn from(blinded: BlindedPayload) -> Self { match blinded { @@ -1158,143 +804,3 @@ impl From> for ExecutionPayloadHeader { } } } - -// FIXME(sproul): consider adding references to these From impls -impl From> for BlindedPayloadMerge { - fn from(execution_payload: ExecutionPayloadMerge) -> Self { - Self { - execution_payload_header: ExecutionPayloadHeaderMerge::from(execution_payload), - } - } -} -impl From> for BlindedPayloadCapella { - fn from(execution_payload: ExecutionPayloadCapella) -> Self { - Self { - execution_payload_header: ExecutionPayloadHeaderCapella::from(execution_payload), - } - } -} -impl From> for BlindedPayloadEip4844 { - fn from(execution_payload: ExecutionPayloadEip4844) -> Self { - Self { - execution_payload_header: ExecutionPayloadHeaderEip4844::from(execution_payload), - } - } -} - -impl TryFrom> for BlindedPayloadMerge { - type Error = (); - fn try_from(header: ExecutionPayloadHeader) -> Result { - match header { - ExecutionPayloadHeader::Merge(execution_payload_header) => { - Ok(execution_payload_header.into()) - } - _ => Err(()), - } - } -} -impl TryFrom> for BlindedPayloadCapella { - type Error = (); - fn try_from(header: ExecutionPayloadHeader) -> Result { - match header { - ExecutionPayloadHeader::Capella(execution_payload_header) => { - Ok(execution_payload_header.into()) - } - _ => Err(()), - } - } -} - -impl TryFrom> for BlindedPayloadEip4844 { - type Error = (); - fn try_from(header: ExecutionPayloadHeader) -> Result { - match header { - ExecutionPayloadHeader::Eip4844(execution_payload_header) => { - Ok(execution_payload_header.into()) - } - _ => Err(()), - } - } -} - -/* -impl Decode for BlindedPayload { - fn is_ssz_fixed_len() -> bool { - as Decode>::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - as Decode>::ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - Ok(Self { - execution_payload_header: ExecutionPayloadHeader::from_ssz_bytes(bytes)?, - }) - } -} - */ - -/* -impl Encode for BlindedPayload { - fn is_ssz_fixed_len() -> bool { - as Encode>::is_ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.execution_payload_header.ssz_append(buf) - } - - fn ssz_bytes_len(&self) -> usize { - self.execution_payload_header.ssz_bytes_len() - } -} -*/ - -/* -impl TreeHash for FullPayload { - fn tree_hash_type() -> tree_hash::TreeHashType { - >::tree_hash_type() - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - self.execution_payload.tree_hash_packed_encoding() - } - - fn tree_hash_packing_factor() -> usize { - >::tree_hash_packing_factor() - } - - fn tree_hash_root(&self) -> tree_hash::Hash256 { - self.execution_payload.tree_hash_root() - } -} -*/ - -/* -impl Decode for FullPayload { - fn is_ssz_fixed_len() -> bool { - as Decode>::is_ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - Ok(FullPayload { - execution_payload: Decode::from_ssz_bytes(bytes)?, - }) - } -} - -impl Encode for FullPayload { - fn is_ssz_fixed_len() -> bool { - as Encode>::is_ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.execution_payload.ssz_append(buf) - } - - fn ssz_bytes_len(&self) -> usize { - self.execution_payload.ssz_bytes_len() - } -} -*/ diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 0f24fe9f04f..c5ce8793ad4 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -13,7 +13,7 @@ lmdb = ["lmdb-rkv", "lmdb-rkv-sys"] bincode = "1.3.1" byteorder = "1.3.4" eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } lazy_static = "1.4.0" lighthouse_metrics = { path = "../common/lighthouse_metrics" } diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 04a222c7af2..1f9ed4da357 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -23,7 +23,7 @@ serde_derive = "1.0.116" serde_repr = "0.1.6" serde_yaml = "0.8.13" eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" tree_hash = "0.4.1" tree_hash_derive = "0.4.0" cached_tree_hash = { path = "../../consensus/cached_tree_hash" } From fc0b06a0399935c735f5316e8abda45ba29926f2 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Fri, 4 Nov 2022 16:50:26 -0400 Subject: [PATCH 060/263] Feature gate withdrawals (#3684) * start feature gating * feature gate withdrawals --- beacon_node/Cargo.toml | 2 + beacon_node/beacon_chain/Cargo.toml | 2 + beacon_node/beacon_chain/src/beacon_chain.rs | 1 + beacon_node/execution_layer/Cargo.toml | 3 + beacon_node/execution_layer/src/engine_api.rs | 15 ++--- .../src/engine_api/json_structures.rs | 13 ++++ beacon_node/execution_layer/src/lib.rs | 4 ++ .../test_utils/execution_block_generator.rs | 1 + .../src/test_utils/mock_execution_layer.rs | 1 + beacon_node/store/Cargo.toml | 4 ++ beacon_node/store/src/partial_beacon_state.rs | 65 +++++++++++++++++++ consensus/state_processing/Cargo.toml | 2 + consensus/state_processing/src/common/mod.rs | 1 + .../src/common/withdraw_balance.rs | 1 + .../src/per_epoch_processing/capella.rs | 6 ++ .../capella/full_withdrawals.rs | 2 + .../capella/partial_withdrawals.rs | 2 + .../state_processing/src/upgrade/capella.rs | 3 + .../state_processing/src/upgrade/eip4844.rs | 13 +++- consensus/types/Cargo.toml | 1 + consensus/types/src/beacon_state.rs | 3 + consensus/types/src/execution_payload.rs | 1 + .../types/src/execution_payload_header.rs | 6 ++ lighthouse/Cargo.toml | 4 ++ 24 files changed, 144 insertions(+), 12 deletions(-) diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index b85aae2f4f4..093f09949c4 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -13,6 +13,8 @@ node_test_rig = { path = "../testing/node_test_rig" } [features] write_ssz_files = ["beacon_chain/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing. +withdrawals = ["beacon_chain/withdrawals", "types/withdrawals", "store/withdrawals", "execution_layer/withdrawals"] +withdrawals-processing = ["beacon_chain/withdrawals-processing", "store/withdrawals-processing", "execution_layer/withdrawals-processing"] [dependencies] eth2_config = { path = "../common/eth2_config" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 5b85833048b..39ff16c6b74 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -10,6 +10,8 @@ default = ["participation_metrics"] write_ssz_files = [] # Writes debugging .ssz files to /tmp during block processing. participation_metrics = [] # Exposes validator participation metrics to Prometheus. fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable +withdrawals = ["state_processing/withdrawals", "types/withdrawals", "store/withdrawals", "execution_layer/withdrawals"] +withdrawals-processing = ["state_processing/withdrawals-processing", "store/withdrawals-processing", "execution_layer/withdrawals-processing"] [dev-dependencies] maplit = "1.0.2" diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d9be36686c1..a7d0fe5c6c6 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4117,6 +4117,7 @@ impl BeaconChain { .get_suggested_fee_recipient(proposer as u64) .await, //FIXME(sean) + #[cfg(feature = "withdrawals")] withdrawals: vec![], }), }; diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 5d6339996b4..68a4f6a414e 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[features] +withdrawals = ["state_processing/withdrawals", "types/withdrawals"] +withdrawals-processing = ["state_processing/withdrawals-processing"] [dependencies] types = { path = "../../consensus/types"} diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index f04a7237892..ed940d4a88e 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -155,6 +155,7 @@ pub struct ExecutionBlockWithTransactions { #[serde(rename = "hash")] pub block_hash: ExecutionBlockHash, pub transactions: Vec, + #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub withdrawals: Vec, } @@ -204,6 +205,7 @@ impl From> for ExecutionBlockWithTransactions .map(|tx| Transaction::decode(&Rlp::new(tx))) .collect::, _>>() .unwrap_or_else(|_| Vec::new()), + #[cfg(feature = "withdrawals")] withdrawals: block.withdrawals.into(), }) } @@ -229,6 +231,7 @@ impl From> for ExecutionBlockWithTransactions .map(|tx| Transaction::decode(&Rlp::new(tx))) .collect::, _>>() .unwrap_or_else(|_| Vec::new()), + #[cfg(feature = "withdrawals")] withdrawals: block.withdrawals.into(), }) } @@ -236,17 +239,6 @@ impl From> for ExecutionBlockWithTransactions } } -/* -impl From> for ExecutionPayload { - fn from(block: ExecutionBlockWithTransactions) -> Self { - map_execution_block_with_transactions!(block, |inner, cons| { - let block = inner.into(); - cons(block) - }) - } -} - */ - #[superstruct( variants(V1, V2), variant_attributes(derive(Clone, Debug, PartialEq),), @@ -261,6 +253,7 @@ pub struct PayloadAttributes { pub prev_randao: Hash256, #[superstruct(getter(copy))] pub suggested_fee_recipient: Address, + #[cfg(feature = "withdrawals")] #[superstruct(only(V2))] pub withdrawals: Vec, } diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index e32122ba31a..6d1d70e78dc 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -99,6 +99,7 @@ pub struct JsonExecutionPayloadHeader { pub excess_blobs: u64, pub block_hash: ExecutionBlockHash, pub transactions_root: Hash256, + #[cfg(feature = "withdrawals")] #[superstruct(only(V2, V3))] pub withdrawals_root: Hash256, } @@ -137,6 +138,7 @@ impl From> for ExecutionPayloadHeader< base_fee_per_gas: v2.base_fee_per_gas, block_hash: v2.block_hash, transactions_root: v2.transactions_root, + #[cfg(feature = "withdrawals")] withdrawals_root: v2.withdrawals_root, }), JsonExecutionPayloadHeader::V3(v3) => Self::Eip4844(ExecutionPayloadHeaderEip4844 { @@ -155,6 +157,7 @@ impl From> for ExecutionPayloadHeader< excess_blobs: v3.excess_blobs, block_hash: v3.block_hash, transactions_root: v3.transactions_root, + #[cfg(feature = "withdrawals")] withdrawals_root: v3.withdrawals_root, }), } @@ -195,6 +198,7 @@ impl From> for JsonExecutionPayloadHeader< base_fee_per_gas: capella.base_fee_per_gas, block_hash: capella.block_hash, transactions_root: capella.transactions_root, + #[cfg(feature = "withdrawals")] withdrawals_root: capella.withdrawals_root, }), ExecutionPayloadHeader::Eip4844(eip4844) => Self::V3(JsonExecutionPayloadHeaderV3 { @@ -213,6 +217,7 @@ impl From> for JsonExecutionPayloadHeader< excess_blobs: eip4844.excess_blobs, block_hash: eip4844.block_hash, transactions_root: eip4844.transactions_root, + #[cfg(feature = "withdrawals")] withdrawals_root: eip4844.withdrawals_root, }), } @@ -258,6 +263,7 @@ pub struct JsonExecutionPayload { #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: VariableList, T::MaxTransactionsPerPayload>, + #[cfg(feature = "withdrawals")] #[superstruct(only(V2, V3))] pub withdrawals: VariableList, } @@ -296,6 +302,7 @@ impl From> for ExecutionPayload { base_fee_per_gas: v2.base_fee_per_gas, block_hash: v2.block_hash, transactions: v2.transactions, + #[cfg(feature = "withdrawals")] withdrawals: v2.withdrawals, }), JsonExecutionPayload::V3(v3) => Self::Eip4844(ExecutionPayloadEip4844 { @@ -314,6 +321,7 @@ impl From> for ExecutionPayload { excess_blobs: v3.excess_blobs, block_hash: v3.block_hash, transactions: v3.transactions, + #[cfg(feature = "withdrawals")] withdrawals: v3.withdrawals, }), } @@ -354,6 +362,7 @@ impl From> for JsonExecutionPayload { base_fee_per_gas: capella.base_fee_per_gas, block_hash: capella.block_hash, transactions: capella.transactions, + #[cfg(feature = "withdrawals")] withdrawals: capella.withdrawals, }), ExecutionPayload::Eip4844(eip4844) => Self::V3(JsonExecutionPayloadV3 { @@ -372,6 +381,7 @@ impl From> for JsonExecutionPayload { excess_blobs: eip4844.excess_blobs, block_hash: eip4844.block_hash, transactions: eip4844.transactions, + #[cfg(feature = "withdrawals")] withdrawals: eip4844.withdrawals, }), } @@ -425,6 +435,7 @@ pub struct JsonPayloadAttributes { pub timestamp: u64, pub prev_randao: Hash256, pub suggested_fee_recipient: Address, + #[cfg(feature = "withdrawals")] #[superstruct(only(V2))] pub withdrawals: Vec, } @@ -441,6 +452,7 @@ impl From for JsonPayloadAttributes { timestamp: pa.timestamp, prev_randao: pa.prev_randao, suggested_fee_recipient: pa.suggested_fee_recipient, + #[cfg(feature = "withdrawals")] withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(), }), } @@ -459,6 +471,7 @@ impl From for PayloadAttributes { timestamp: jpa.timestamp, prev_randao: jpa.prev_randao, suggested_fee_recipient: jpa.suggested_fee_recipient, + #[cfg(feature = "withdrawals")] withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(), }), } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 21ee0457d22..04bdb4a20de 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1499,6 +1499,7 @@ impl ExecutionLayer { }) } ExecutionBlockWithTransactions::Capella(capella_block) => { + #[cfg(feature = "withdrawals")] let withdrawals = VariableList::new(capella_block.withdrawals.clone()) .map_err(ApiError::DeserializeWithdrawals)?; @@ -1517,10 +1518,12 @@ impl ExecutionLayer { base_fee_per_gas: capella_block.base_fee_per_gas, block_hash: capella_block.block_hash, transactions, + #[cfg(feature = "withdrawals")] withdrawals, }) } ExecutionBlockWithTransactions::Eip4844(eip4844_block) => { + #[cfg(feature = "withdrawals")] let withdrawals = VariableList::new(eip4844_block.withdrawals.clone()) .map_err(ApiError::DeserializeWithdrawals)?; @@ -1540,6 +1543,7 @@ impl ExecutionLayer { excess_blobs: eip4844_block.excess_blobs, block_hash: eip4844_block.block_hash, transactions, + #[cfg(feature = "withdrawals")] withdrawals, }) } diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index c492bcd5a55..37eb8ba8f48 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -403,6 +403,7 @@ impl ExecutionBlockGenerator { base_fee_per_gas: Uint256::one(), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), + #[cfg(feature = "withdrawals")] withdrawals: pa .withdrawals .iter() diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index d0bc2785c30..62336279b06 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -113,6 +113,7 @@ impl MockExecutionLayer { prev_randao, suggested_fee_recipient: Address::repeat_byte(42), // FIXME: think about adding withdrawals here.. + #[cfg(feature = "withdrawals")] withdrawals: vec![], }) } diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 09d960535e4..b3e8e1fc6b5 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -26,3 +26,7 @@ lru = "0.7.1" sloggers = { version = "2.1.1", features = ["json"] } directory = { path = "../../common/directory" } strum = { version = "0.24.0", features = ["derive"] } + +[features] +withdrawals = ["state_processing/withdrawals", "types/withdrawals"] +withdrawals-processing = ["state_processing/withdrawals-processing"] \ No newline at end of file diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 74e63c58ea3..5cff00529e3 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -105,10 +105,13 @@ where pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844, // Withdrawals + #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub withdrawal_queue: VariableList, + #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub next_withdrawal_index: u64, + #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub next_partial_withdrawal_validator_index: u64, } @@ -199,6 +202,7 @@ impl PartialBeaconState { latest_execution_payload_header ] ), + #[cfg(feature = "withdrawals")] BeaconState::Capella(s) => impl_from_state_forgetful!( s, outer, @@ -216,6 +220,22 @@ impl PartialBeaconState { next_partial_withdrawal_validator_index ] ), + #[cfg(not(feature = "withdrawals"))] + BeaconState::Capella(s) => impl_from_state_forgetful!( + s, + outer, + Capella, + PartialBeaconStateCapella, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header + ] + ), + #[cfg(feature = "withdrawals")] BeaconState::Eip4844(s) => impl_from_state_forgetful!( s, outer, @@ -233,6 +253,21 @@ impl PartialBeaconState { next_partial_withdrawal_validator_index ] ), + #[cfg(not(feature = "withdrawals"))] + BeaconState::Eip4844(s) => impl_from_state_forgetful!( + s, + outer, + Eip4844, + PartialBeaconStateEip4844, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header + ] + ), } } @@ -420,6 +455,7 @@ impl TryInto> for PartialBeaconState { latest_execution_payload_header ] ), + #[cfg(feature = "withdrawals")] PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!( inner, Capella, @@ -436,6 +472,21 @@ impl TryInto> for PartialBeaconState { next_partial_withdrawal_validator_index ] ), + #[cfg(not(feature = "withdrawals"))] + PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!( + inner, + Capella, + BeaconStateCapella, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header + ] + ), + #[cfg(feature = "withdrawals")] PartialBeaconState::Eip4844(inner) => impl_try_into_beacon_state!( inner, Eip4844, @@ -452,6 +503,20 @@ impl TryInto> for PartialBeaconState { next_partial_withdrawal_validator_index ] ), + #[cfg(not(feature = "withdrawals"))] + PartialBeaconState::Eip4844(inner) => impl_try_into_beacon_state!( + inner, + Eip4844, + BeaconStateEip4844, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header + ] + ), }; Ok(state) } diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index ccb41830be8..39a0be3d9fd 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -43,3 +43,5 @@ arbitrary-fuzz = [ "eth2_ssz_types/arbitrary", "tree_hash/arbitrary", ] +withdrawals = ["types/withdrawals"] +withdrawals-processing = [] diff --git a/consensus/state_processing/src/common/mod.rs b/consensus/state_processing/src/common/mod.rs index 531891ee957..34091127c01 100644 --- a/consensus/state_processing/src/common/mod.rs +++ b/consensus/state_processing/src/common/mod.rs @@ -15,6 +15,7 @@ pub use get_attesting_indices::{get_attesting_indices, get_attesting_indices_fro pub use get_indexed_attestation::get_indexed_attestation; pub use initiate_validator_exit::initiate_validator_exit; pub use slash_validator::slash_validator; +#[cfg(feature = "withdrawals")] pub use withdraw_balance::withdraw_balance; use safe_arith::SafeArith; diff --git a/consensus/state_processing/src/common/withdraw_balance.rs b/consensus/state_processing/src/common/withdraw_balance.rs index 29b09cc0f91..65343f3112c 100644 --- a/consensus/state_processing/src/common/withdraw_balance.rs +++ b/consensus/state_processing/src/common/withdraw_balance.rs @@ -2,6 +2,7 @@ use crate::common::decrease_balance; use safe_arith::SafeArith; use types::{BeaconStateError as Error, *}; +#[cfg(feature = "withdrawals")] pub fn withdraw_balance( state: &mut BeaconState, validator_index: usize, diff --git a/consensus/state_processing/src/per_epoch_processing/capella.rs b/consensus/state_processing/src/per_epoch_processing/capella.rs index d1bf71071dd..ed5665d77a0 100644 --- a/consensus/state_processing/src/per_epoch_processing/capella.rs +++ b/consensus/state_processing/src/per_epoch_processing/capella.rs @@ -5,11 +5,15 @@ use crate::per_epoch_processing::{ historical_roots_update::process_historical_roots_update, resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset}, }; +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] pub use full_withdrawals::process_full_withdrawals; +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] pub use partial_withdrawals::process_partial_withdrawals; use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch}; +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] pub mod full_withdrawals; +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] pub mod partial_withdrawals; pub fn process_epoch( @@ -66,8 +70,10 @@ pub fn process_epoch( altair::process_sync_committee_updates(state, spec)?; // Withdrawals + #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] process_full_withdrawals(state, spec)?; + #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] process_partial_withdrawals(state, spec)?; // Rotate the epoch caches to suit the epoch transition. diff --git a/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs b/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs index 62e4b91110d..619301f16a2 100644 --- a/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs +++ b/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs @@ -1,7 +1,9 @@ +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] use crate::common::withdraw_balance; use crate::EpochProcessingError; use types::{beacon_state::BeaconState, eth_spec::EthSpec, ChainSpec}; +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] pub fn process_full_withdrawals( state: &mut BeaconState, spec: &ChainSpec, diff --git a/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs b/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs index 75576ef6e76..d1ae4fee5a6 100644 --- a/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs +++ b/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs @@ -1,8 +1,10 @@ +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] use crate::common::withdraw_balance; use crate::EpochProcessingError; use safe_arith::SafeArith; use types::{beacon_state::BeaconState, eth_spec::EthSpec, ChainSpec}; +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] pub fn process_partial_withdrawals( state: &mut BeaconState, spec: &ChainSpec, diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index b2abd3be207..e64c8398068 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -57,8 +57,11 @@ pub fn upgrade_to_capella( // Execution latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_capella(), // Withdrawals + #[cfg(feature = "withdrawals")] withdrawal_queue: VariableList::empty(), + #[cfg(feature = "withdrawals")] next_withdrawal_index: 0, + #[cfg(feature = "withdrawals")] next_partial_withdrawal_validator_index: 0, // Caches total_active_balance: pre.total_active_balance, diff --git a/consensus/state_processing/src/upgrade/eip4844.rs b/consensus/state_processing/src/upgrade/eip4844.rs index 666d5b0c680..d677fd66667 100644 --- a/consensus/state_processing/src/upgrade/eip4844.rs +++ b/consensus/state_processing/src/upgrade/eip4844.rs @@ -9,6 +9,14 @@ pub fn upgrade_to_eip4844( let epoch = pre_state.current_epoch(); let pre = pre_state.as_capella_mut()?; + // FIXME(sean) This is a hack to let us participate in testnets where capella doesn't exist. + // if we are disabling withdrawals, assume we should fork off of bellatrix. + let previous_fork_version = if cfg!(feature ="withdrawals") { + pre.fork.current_version + } else { + spec.bellatrix_fork_epoch + }; + // Where possible, use something like `mem::take` to move fields from behind the &mut // reference. For other fields that don't have a good default value, use `clone`. // @@ -20,7 +28,7 @@ pub fn upgrade_to_eip4844( genesis_validators_root: pre.genesis_validators_root, slot: pre.slot, fork: Fork { - previous_version: pre.fork.current_version, + previous_version: previous_fork_version, current_version: spec.eip4844_fork_version, epoch, }, @@ -56,8 +64,11 @@ pub fn upgrade_to_eip4844( // Execution latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_eip4844(), // Withdrawals + #[cfg(feature = "withdrawals")] withdrawal_queue: mem::take(&mut pre.withdrawal_queue), + #[cfg(feature = "withdrawals")] next_withdrawal_index: pre.next_withdrawal_index, + #[cfg(feature = "withdrawals")] next_partial_withdrawal_validator_index: pre.next_partial_withdrawal_validator_index, // Caches total_active_balance: pre.total_active_balance, diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index d04d9d650fb..c787a7a87aa 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -71,3 +71,4 @@ arbitrary-fuzz = [ "swap_or_not_shuffle/arbitrary", "tree_hash/arbitrary", ] +withdrawals = [] diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 3e7cbba9243..ec5aa9c4f3d 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -295,10 +295,13 @@ where pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844, // Withdrawals + #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub withdrawal_queue: VariableList, + #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub next_withdrawal_index: u64, + #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub next_partial_withdrawal_validator_index: u64, diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 6110b7f4fd6..022f378e395 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -80,6 +80,7 @@ pub struct ExecutionPayload { pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, + #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub withdrawals: VariableList, } diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 342a2d97e76..7546ca2e5f1 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -74,6 +74,7 @@ pub struct ExecutionPayloadHeader { pub block_hash: ExecutionBlockHash, #[superstruct(getter(copy))] pub transactions_root: Hash256, + #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] #[superstruct(getter(copy))] pub withdrawals_root: Hash256, @@ -104,6 +105,7 @@ impl<'a, T: EthSpec> ExecutionPayloadHeaderRef<'a, T> { impl ExecutionPayloadHeaderMerge { pub fn upgrade_to_capella(&self) -> ExecutionPayloadHeaderCapella { + #[cfg(feature = "withdrawals")] // TODO: if this is correct we should calculate and hardcode this.. let empty_withdrawals_root = VariableList::::empty().tree_hash_root(); @@ -122,6 +124,7 @@ impl ExecutionPayloadHeaderMerge { base_fee_per_gas: self.base_fee_per_gas, block_hash: self.block_hash, transactions_root: self.transactions_root, + #[cfg(feature = "withdrawals")] // FIXME: the spec doesn't seem to define what to do here.. withdrawals_root: empty_withdrawals_root, } @@ -147,6 +150,7 @@ impl ExecutionPayloadHeaderCapella { excess_blobs: 0, block_hash: self.block_hash, transactions_root: self.transactions_root, + #[cfg(feature = "withdrawals")] withdrawals_root: self.withdrawals_root, } } @@ -189,6 +193,7 @@ impl From> for ExecutionPayloadHeaderCape base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions_root: payload.transactions.tree_hash_root(), + #[cfg(feature = "withdrawals")] withdrawals_root: payload.withdrawals.tree_hash_root(), } } @@ -211,6 +216,7 @@ impl From> for ExecutionPayloadHeaderEip4 excess_blobs: payload.excess_blobs, block_hash: payload.block_hash, transactions_root: payload.transactions.tree_hash_root(), + #[cfg(feature = "withdrawals")] withdrawals_root: payload.withdrawals.tree_hash_root(), } } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 864869a149b..7864b7e82ba 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -24,6 +24,10 @@ gnosis = [] slasher-mdbx = ["slasher/mdbx"] # Support slasher LMDB backend. slasher-lmdb = ["slasher/lmdb"] +# Support for inclusion of withdrawals fields in all capella consensus types in all APIs. +withdrawals = ["types/withdrawals", "beacon_node/withdrawals"] +# Support for withdrawals consensus processing logic. +withdrawals-processing = ["beacon_node/withdrawals-processing"] [dependencies] beacon_node = { "path" = "../beacon_node" } From cb393f5b7d7af28dc56508e2562e06013493ddd2 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 7 Nov 2022 00:46:48 +0800 Subject: [PATCH 061/263] Fix compilation error (#3692) --- consensus/state_processing/src/upgrade/eip4844.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/state_processing/src/upgrade/eip4844.rs b/consensus/state_processing/src/upgrade/eip4844.rs index d677fd66667..ce88364f08e 100644 --- a/consensus/state_processing/src/upgrade/eip4844.rs +++ b/consensus/state_processing/src/upgrade/eip4844.rs @@ -14,7 +14,7 @@ pub fn upgrade_to_eip4844( let previous_fork_version = if cfg!(feature ="withdrawals") { pre.fork.current_version } else { - spec.bellatrix_fork_epoch + spec.bellatrix_fork_version }; // Where possible, use something like `mem::take` to move fields from behind the &mut From bc0af72c74139951572c44ec42bf3028d500cede Mon Sep 17 00:00:00 2001 From: realbigsean Date: Mon, 7 Nov 2022 12:36:31 -0500 Subject: [PATCH 062/263] fix topic name --- beacon_node/lighthouse_network/src/types/topics.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 1be94a93f12..8cecc2e6822 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -11,7 +11,7 @@ use crate::Subnet; pub const TOPIC_PREFIX: &str = "eth2"; pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy"; pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; -pub const BEACON_BLOCK_AND_BLOBS_SIDECAR_TOPIC: &str = "beacon_blocks_and_blobs_sidecar"; +pub const BEACON_BLOCK_AND_BLOBS_SIDECAR_TOPIC: &str = "beacon_block_and_blobs_sidecar"; pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof"; pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_"; pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; From ab13f95db5b934097a02ee43b74da1057bb2b614 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Wed, 9 Nov 2022 18:09:07 -0600 Subject: [PATCH 063/263] Updated for queueless withdrawals spec --- consensus/state_processing/src/common/mod.rs | 3 - .../src/common/withdraw_balance.rs | 29 ------- .../src/per_block_processing.rs | 4 + .../src/per_block_processing/errors.rs | 20 ++++- .../process_operations.rs | 43 +++++++++ .../per_block_processing/signature_sets.rs | 31 ++++++- .../src/per_epoch_processing.rs | 7 +- .../src/per_epoch_processing/capella.rs | 87 ------------------- .../capella/full_withdrawals.rs | 25 ------ .../capella/partial_withdrawals.rs | 41 --------- .../state_processing/src/upgrade/eip4844.rs | 2 +- consensus/types/presets/mainnet/capella.yaml | 12 +++ consensus/types/presets/minimal/capella.yaml | 12 +++ consensus/types/src/beacon_block_body.rs | 4 + consensus/types/src/beacon_state.rs | 5 +- consensus/types/src/eth_spec.rs | 18 ---- consensus/types/src/execution_payload.rs | 5 +- consensus/types/src/lib.rs | 6 +- consensus/types/src/payload.rs | 66 ++++++++++++-- consensus/types/src/validator.rs | 13 ++- 20 files changed, 209 insertions(+), 224 deletions(-) delete mode 100644 consensus/state_processing/src/common/withdraw_balance.rs delete mode 100644 consensus/state_processing/src/per_epoch_processing/capella.rs delete mode 100644 consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs delete mode 100644 consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs create mode 100644 consensus/types/presets/mainnet/capella.yaml create mode 100644 consensus/types/presets/minimal/capella.yaml diff --git a/consensus/state_processing/src/common/mod.rs b/consensus/state_processing/src/common/mod.rs index 34091127c01..8a2e2439bb6 100644 --- a/consensus/state_processing/src/common/mod.rs +++ b/consensus/state_processing/src/common/mod.rs @@ -4,7 +4,6 @@ mod get_attesting_indices; mod get_indexed_attestation; mod initiate_validator_exit; mod slash_validator; -mod withdraw_balance; pub mod altair; pub mod base; @@ -15,8 +14,6 @@ pub use get_attesting_indices::{get_attesting_indices, get_attesting_indices_fro pub use get_indexed_attestation::get_indexed_attestation; pub use initiate_validator_exit::initiate_validator_exit; pub use slash_validator::slash_validator; -#[cfg(feature = "withdrawals")] -pub use withdraw_balance::withdraw_balance; use safe_arith::SafeArith; use types::{BeaconState, BeaconStateError, EthSpec}; diff --git a/consensus/state_processing/src/common/withdraw_balance.rs b/consensus/state_processing/src/common/withdraw_balance.rs deleted file mode 100644 index 65343f3112c..00000000000 --- a/consensus/state_processing/src/common/withdraw_balance.rs +++ /dev/null @@ -1,29 +0,0 @@ -use crate::common::decrease_balance; -use safe_arith::SafeArith; -use types::{BeaconStateError as Error, *}; - -#[cfg(feature = "withdrawals")] -pub fn withdraw_balance( - state: &mut BeaconState, - validator_index: usize, - amount: u64, -) -> Result<(), Error> { - decrease_balance(state, validator_index as usize, amount)?; - - let withdrawal_address = Address::from_slice( - &state - .get_validator(validator_index)? - .withdrawal_credentials - .as_bytes()[12..], - ); - let withdrawal = Withdrawal { - index: *state.next_withdrawal_index()?, - validator_index: validator_index as u64, - address: withdrawal_address, - amount, - }; - state.next_withdrawal_index_mut()?.safe_add_assign(1)?; - state.withdrawal_queue_mut()?.push(withdrawal)?; - - Ok(()) -} diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 255a4892ae6..c39f46b95ba 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -19,6 +19,8 @@ pub use process_operations::process_operations; pub use verify_attestation::{ verify_attestation_for_block_inclusion, verify_attestation_for_state, }; +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +pub use verify_bls_to_execution_change::verify_bls_to_execution_change; pub use verify_deposit::{ get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature, }; @@ -34,6 +36,8 @@ pub mod signature_sets; pub mod tests; mod verify_attestation; mod verify_attester_slashing; +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +mod verify_bls_to_execution_change; mod verify_deposit; mod verify_exit; mod verify_proposer_slashing; diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index fdb13f42822..39c740480d0 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -49,6 +49,10 @@ pub enum BlockProcessingError { index: usize, reason: ExitInvalid, }, + BlsExecutionChangeInvalid { + index: usize, + reason: BlsExecutionChangeInvalid, + }, SyncAggregateInvalid { reason: SyncAggregateInvalid, }, @@ -180,7 +184,8 @@ impl_into_block_processing_error_with_index!( IndexedAttestationInvalid, AttestationInvalid, DepositInvalid, - ExitInvalid + ExitInvalid, + BlsExecutionChangeInvalid ); pub type HeaderValidationError = BlockOperationError; @@ -190,6 +195,7 @@ pub type AttestationValidationError = BlockOperationError; pub type SyncCommitteeMessageValidationError = BlockOperationError; pub type DepositValidationError = BlockOperationError; pub type ExitValidationError = BlockOperationError; +pub type BlsExecutionChangeValidationError = BlockOperationError; #[derive(Debug, PartialEq, Clone)] pub enum BlockOperationError { @@ -405,6 +411,18 @@ pub enum ExitInvalid { SignatureSetError(SignatureSetError), } +#[derive(Debug, PartialEq, Clone)] +pub enum BlsExecutionChangeInvalid { + /// The specified validator is not in the state's validator registry. + ValidatorUnknown(u64), + /// Validator does not have BLS Withdrawal credentials before this change + NonBlsWithdrawalCredentials, + /// Provided BLS pubkey does not match withdrawal credentials + WithdrawalCredentialsMismatch, + /// The signature is invalid + BadSignature, +} + #[derive(Debug, PartialEq, Clone)] pub enum SyncAggregateInvalid { /// One or more of the aggregate public keys is invalid. diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 6aca565b507..a85cbce6e57 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -33,6 +33,9 @@ pub fn process_operations<'a, T: EthSpec, Payload: AbstractExecPayload>( process_attestations(state, block_body, verify_signatures, ctxt, spec)?; process_deposits(state, block_body.deposits(), spec)?; process_exits(state, block_body.voluntary_exits(), verify_signatures, spec)?; + #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] + process_bls_to_execution_changes(state, block_body, verify_signatures, spec)?; + Ok(()) } @@ -279,6 +282,46 @@ pub fn process_exits( Ok(()) } +/// Validates each `bls_to_execution_change` and updates the state +/// +/// Returns `Ok(())` if the validation and state updates completed successfully. Otherwise returs +/// an `Err` describing the invalid object or cause of failure. +/// +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#new-process_bls_to_execution_change +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +pub fn process_bls_to_execution_changes<'a, T: EthSpec, Payload: AbstractExecPayload>( + state: &mut BeaconState, + block_body: BeaconBlockBodyRef<'a, T, Payload>, + verify_signatures: VerifySignatures, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + match block_body { + BeaconBlockBodyRef::Base(_) + | BeaconBlockBodyRef::Altair(_) + | BeaconBlockBodyRef::Merge(_) => Ok(()), + BeaconBlockBodyRef::Capella(_) | BeaconBlockBodyRef::Eip4844(_) => { + for (i, signed_address_change) in block_body.bls_to_execution_changes()?.enumerate() { + verify_bls_to_execution_change( + state, + &signed_address_change, + verify_signatures, + spec, + ) + .map_err(|e| e.into_with_index(i))?; + + state + .get_validator_mut(signed_address_change.message.validator_index)? + .change_withdrawal_credentials( + signed_address_change.message.to_execution_address, + spec, + ); + } + + Ok(()) + } + } +} + /// Validates each `Deposit` and updates the state, short-circuiting on an invalid object. /// /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index d07f8bb6e1a..fa37681c766 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -11,8 +11,8 @@ use types::{ BeaconStateError, ChainSpec, DepositData, Domain, Epoch, EthSpec, Fork, Hash256, InconsistentFork, IndexedAttestation, ProposerSlashing, PublicKey, PublicKeyBytes, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockHeader, - SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, SigningData, Slot, SyncAggregate, - SyncAggregatorSelectionData, Unsigned, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, + SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, Unsigned, }; pub type Result = std::result::Result; @@ -156,6 +156,33 @@ where )) } +pub fn bls_execution_change_signature_set<'a, T: EthSpec>( + state: &'a BeaconState, + signed_address_change: &'a SignedBlsToExecutionChange, + spec: &'a ChainSpec, +) -> Result> { + let domain = spec.get_domain( + state.current_epoch(), + Domain::BlsToExecutionChange, + &state.fork(), + state.genesis_validators_root(), + ); + let message = signed_address_change.message.signing_root(domain); + let signing_key = Cow::Owned( + signed_address_change + .message + .from_bls_pubkey + .decompress() + .map_err(|_| Error::PublicKeyDecompressionFailed)?, + ); + + Ok(SignatureSet::single_pubkey( + &signed_address_change.signature, + signing_key, + message, + )) +} + /// A signature set that is valid if the block proposers randao reveal signature is correct. pub fn randao_signature_set<'a, T, F, Payload: AbstractExecPayload>( state: &'a BeaconState, diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index 565fae9db96..f227b82863c 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -11,7 +11,6 @@ pub use weigh_justification_and_finalization::weigh_justification_and_finalizati pub mod altair; pub mod base; -pub mod capella; pub mod effective_balance_updates; pub mod epoch_processing_summary; pub mod errors; @@ -38,8 +37,10 @@ pub fn process_epoch( match state { BeaconState::Base(_) => base::process_epoch(state, spec), - BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_epoch(state, spec), - BeaconState::Capella(_) | BeaconState::Eip4844(_) => capella::process_epoch(state, spec), + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Eip4844(_) => altair::process_epoch(state, spec), } } diff --git a/consensus/state_processing/src/per_epoch_processing/capella.rs b/consensus/state_processing/src/per_epoch_processing/capella.rs deleted file mode 100644 index ed5665d77a0..00000000000 --- a/consensus/state_processing/src/per_epoch_processing/capella.rs +++ /dev/null @@ -1,87 +0,0 @@ -use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error}; -use crate::per_epoch_processing::{ - altair, - effective_balance_updates::process_effective_balance_updates, - historical_roots_update::process_historical_roots_update, - resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset}, -}; -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] -pub use full_withdrawals::process_full_withdrawals; -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] -pub use partial_withdrawals::process_partial_withdrawals; -use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch}; - -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] -pub mod full_withdrawals; -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] -pub mod partial_withdrawals; - -pub fn process_epoch( - state: &mut BeaconState, - spec: &ChainSpec, -) -> Result, Error> { - // Ensure the committee caches are built. - state.build_committee_cache(RelativeEpoch::Previous, spec)?; - state.build_committee_cache(RelativeEpoch::Current, spec)?; - state.build_committee_cache(RelativeEpoch::Next, spec)?; - - // Pre-compute participating indices and total balances. - let participation_cache = altair::ParticipationCache::new(state, spec)?; - let sync_committee = state.current_sync_committee()?.clone(); - - // Justification and finalization. - let justification_and_finalization_state = - altair::process_justification_and_finalization(state, &participation_cache)?; - justification_and_finalization_state.apply_changes_to_state(state); - - altair::process_inactivity_updates(state, &participation_cache, spec)?; - - // Rewards and Penalties. - altair::process_rewards_and_penalties(state, &participation_cache, spec)?; - - // Registry Updates. - process_registry_updates(state, spec)?; - - // Slashings. - process_slashings( - state, - participation_cache.current_epoch_total_active_balance(), - spec, - )?; - - // Reset eth1 data votes. - process_eth1_data_reset(state)?; - - // Update effective balances with hysteresis (lag). - process_effective_balance_updates(state, spec)?; - - // Reset slashings - process_slashings_reset(state)?; - - // Set randao mix - process_randao_mixes_reset(state)?; - - // Set historical root accumulator - process_historical_roots_update(state)?; - - // Rotate current/previous epoch participation - altair::process_participation_flag_updates(state)?; - - altair::process_sync_committee_updates(state, spec)?; - - // Withdrawals - #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] - process_full_withdrawals(state, spec)?; - - #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] - process_partial_withdrawals(state, spec)?; - - // Rotate the epoch caches to suit the epoch transition. - state.advance_caches(spec)?; - - // FIXME: do we need a Capella variant for this? - Ok(EpochProcessingSummary::Altair { - participation_cache, - sync_committee, - }) -} diff --git a/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs b/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs deleted file mode 100644 index 619301f16a2..00000000000 --- a/consensus/state_processing/src/per_epoch_processing/capella/full_withdrawals.rs +++ /dev/null @@ -1,25 +0,0 @@ -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] -use crate::common::withdraw_balance; -use crate::EpochProcessingError; -use types::{beacon_state::BeaconState, eth_spec::EthSpec, ChainSpec}; - -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] -pub fn process_full_withdrawals( - state: &mut BeaconState, - spec: &ChainSpec, -) -> Result<(), EpochProcessingError> { - let current_epoch = state.current_epoch(); - // FIXME: is this the most efficient way to do this? - for validator_index in 0..state.validators().len() { - // TODO: is this the correct way to handle validators not existing? - if let (Some(validator), Some(balance)) = ( - state.validators().get(validator_index), - state.balances().get(validator_index), - ) { - if validator.is_fully_withdrawable_at(*balance, current_epoch, spec) { - withdraw_balance(state, validator_index, *balance)?; - } - } - } - Ok(()) -} diff --git a/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs b/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs deleted file mode 100644 index d1ae4fee5a6..00000000000 --- a/consensus/state_processing/src/per_epoch_processing/capella/partial_withdrawals.rs +++ /dev/null @@ -1,41 +0,0 @@ -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] -use crate::common::withdraw_balance; -use crate::EpochProcessingError; -use safe_arith::SafeArith; -use types::{beacon_state::BeaconState, eth_spec::EthSpec, ChainSpec}; - -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] -pub fn process_partial_withdrawals( - state: &mut BeaconState, - spec: &ChainSpec, -) -> Result<(), EpochProcessingError> { - let mut partial_withdrawals_count = 0; - let mut validator_index = *state.next_partial_withdrawal_validator_index()? as usize; - - let n_validators = state.validators().len(); - // FIXME: is this the most efficient way to do this? - for _ in 0..n_validators { - // TODO: is this the correct way to handle validators not existing? - if let (Some(validator), Some(balance)) = ( - state.validators().get(validator_index), - state.balances().get(validator_index), - ) { - if validator.is_partially_withdrawable_validator(*balance, spec) { - withdraw_balance( - state, - validator_index, - *balance - spec.max_effective_balance, - )?; - partial_withdrawals_count.safe_add_assign(1)?; - - validator_index = validator_index.safe_add(1)? % n_validators; - if partial_withdrawals_count == T::max_partial_withdrawals_per_epoch() { - break; - } - } - } - } - *state.next_partial_withdrawal_validator_index_mut()? = validator_index as u64; - - Ok(()) -} diff --git a/consensus/state_processing/src/upgrade/eip4844.rs b/consensus/state_processing/src/upgrade/eip4844.rs index ce88364f08e..78fb16033e9 100644 --- a/consensus/state_processing/src/upgrade/eip4844.rs +++ b/consensus/state_processing/src/upgrade/eip4844.rs @@ -11,7 +11,7 @@ pub fn upgrade_to_eip4844( // FIXME(sean) This is a hack to let us participate in testnets where capella doesn't exist. // if we are disabling withdrawals, assume we should fork off of bellatrix. - let previous_fork_version = if cfg!(feature ="withdrawals") { + let previous_fork_version = if cfg!(feature = "withdrawals") { pre.fork.current_version } else { spec.bellatrix_fork_version diff --git a/consensus/types/presets/mainnet/capella.yaml b/consensus/types/presets/mainnet/capella.yaml new file mode 100644 index 00000000000..0c087255bfb --- /dev/null +++ b/consensus/types/presets/mainnet/capella.yaml @@ -0,0 +1,12 @@ +# Mainnet preset - Capella + +# Misc +# Max operations per block +# --------------------------------------------------------------- +# 2**4 (= 16) +MAX_BLS_TO_EXECUTION_CHANGES: 16 + +# Execution +# --------------------------------------------------------------- +# 2**4 (= 16) withdrawals +MAX_WITHDRAWALS_PER_PAYLOAD: 16 \ No newline at end of file diff --git a/consensus/types/presets/minimal/capella.yaml b/consensus/types/presets/minimal/capella.yaml new file mode 100644 index 00000000000..eacd6c7cbca --- /dev/null +++ b/consensus/types/presets/minimal/capella.yaml @@ -0,0 +1,12 @@ +# Minimal preset - Capella + +# Max operations per block +# --------------------------------------------------------------- +# 2**4 (= 16) +MAX_BLS_TO_EXECUTION_CHANGES: 16 + + +# Execution +# --------------------------------------------------------------- +# [customized] 2**2 (= 4) +MAX_WITHDRAWALS_PER_PAYLOAD: 4 \ No newline at end of file diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 36e0ce77004..ce5c127748b 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -62,6 +62,10 @@ pub struct BeaconBlockBody = FullPay #[superstruct(only(Eip4844), partial_getter(rename = "execution_payload_eip4844"))] #[serde(flatten)] pub execution_payload: Payload::Eip4844, + #[cfg(feature = "withdrawals")] + #[superstruct(only(Capella, Eip4844))] + pub bls_to_execution_changes: + VariableList, #[superstruct(only(Eip4844))] pub blob_kzg_commitments: VariableList, #[superstruct(only(Base, Altair))] diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index ec5aa9c4f3d..0243aa832bd 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -297,13 +297,10 @@ where // Withdrawals #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] - pub withdrawal_queue: VariableList, - #[cfg(feature = "withdrawals")] - #[superstruct(only(Capella, Eip4844))] pub next_withdrawal_index: u64, #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] - pub next_partial_withdrawal_validator_index: u64, + pub latest_withdrawal_validator_index: u64, // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 4cf102bd772..4cba9c79503 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -98,8 +98,6 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + /* * New in Capella */ - type MaxPartialWithdrawalsPerEpoch: Unsigned + Clone + Sync + Send + Debug + PartialEq; - type WithdrawalQueueLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxBlsToExecutionChanges: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxWithdrawalsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* @@ -235,16 +233,6 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + Self::BytesPerLogsBloom::to_usize() } - /// Returns the `MAX_PARTIAL_WITHDRAWALS_PER_EPOCH` constant for this specification. - fn max_partial_withdrawals_per_epoch() -> usize { - Self::MaxPartialWithdrawalsPerEpoch::to_usize() - } - - /// Returns the `WITHDRAWAL_QUEUE_LIMIT` constant for this specification. - fn withdrawal_queue_limit() -> usize { - Self::WithdrawalQueueLimit::to_usize() - } - /// Returns the `MAX_BLS_TO_EXECUTION_CHANGES` constant for this specification. fn max_bls_to_execution_changes() -> usize { Self::MaxBlsToExecutionChanges::to_usize() @@ -309,8 +297,6 @@ impl EthSpec for MainnetEthSpec { type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch - type MaxPartialWithdrawalsPerEpoch = U256; - type WithdrawalQueueLimit = U1099511627776; type MaxBlsToExecutionChanges = U16; type MaxWithdrawalsPerPayload = U16; @@ -358,8 +344,6 @@ impl EthSpec for MinimalEthSpec { GasLimitDenominator, MinGasLimit, MaxExtraDataBytes, - MaxPartialWithdrawalsPerEpoch, - WithdrawalQueueLimit, MaxBlsToExecutionChanges, MaxWithdrawalsPerPayload, MaxBlobsPerBlock, @@ -408,8 +392,6 @@ impl EthSpec for GnosisEthSpec { type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U2048; // 128 max attestations * 16 slots per epoch type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch - type MaxPartialWithdrawalsPerEpoch = U256; - type WithdrawalQueueLimit = U1099511627776; type MaxBlsToExecutionChanges = U16; type MaxWithdrawalsPerPayload = U16; type MaxBlobsPerBlock = U16; // 2**4 = 16 diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 022f378e395..eec88b97eb0 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -3,7 +3,6 @@ use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use std::slice::Iter; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -13,6 +12,8 @@ pub type Transactions = VariableList< ::MaxTransactionsPerPayload, >; +pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; + #[superstruct( variants(Merge, Capella, Eip4844), variant_attributes( @@ -82,7 +83,7 @@ pub struct ExecutionPayload { pub transactions: Transactions, #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] - pub withdrawals: VariableList, + pub withdrawals: Withdrawals, } impl ExecutionPayload { diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index eecfb27c488..a129a22db7d 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -27,6 +27,7 @@ pub mod beacon_block_body; pub mod beacon_block_header; pub mod beacon_committee; pub mod beacon_state; +pub mod bls_to_execution_change; pub mod builder_bid; pub mod chain_spec; pub mod checkpoint; @@ -61,6 +62,7 @@ pub mod shuffling_id; pub mod signed_aggregate_and_proof; pub mod signed_beacon_block; pub mod signed_beacon_block_header; +pub mod signed_bls_to_execution_change; pub mod signed_contribution_and_proof; pub mod signed_voluntary_exit; pub mod signing_data; @@ -117,6 +119,7 @@ pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *}; pub use crate::blobs_sidecar::BlobsSidecar; +pub use crate::bls_to_execution_change::BlsToExecutionChange; pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; pub use crate::config_and_preset::{ @@ -133,7 +136,7 @@ pub use crate::eth_spec::EthSpecId; pub use crate::execution_block_hash::ExecutionBlockHash; pub use crate::execution_payload::{ ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge, - ExecutionPayloadRef, Transaction, Transactions, + ExecutionPayloadRef, Transaction, Transactions, Withdrawals, }; pub use crate::execution_payload_header::{ ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderEip4844, @@ -170,6 +173,7 @@ pub use crate::signed_beacon_block::{ SignedBlindedBeaconBlock, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; +pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; pub use crate::signed_voluntary_exit::SignedVoluntaryExit; pub use crate::signing_data::{SignedRoot, SigningData}; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 5b457daee3b..ae992002e9e 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -36,6 +36,9 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + fn fee_recipient(&self) -> Address; fn gas_limit(&self) -> u64; fn transactions(&self) -> Option<&Transactions>; + /// fork-specific fields + #[cfg(feature = "withdrawals")] + fn withdrawals(&self) -> Option, Error>>; /// Is this a default payload? (pre-merge) fn is_default(&self) -> bool; @@ -225,6 +228,15 @@ impl ExecPayload for FullPayload { }) } + #[cfg(feature = "withdrawals")] + fn withdrawals(&self) -> Option, Error>> { + match self { + FullPayload::Merge(_) => Some(Err(Error::IncorrectStateVariant)), + FullPayload::Capella(ref inner) => Some(Ok(&inner.execution_payload.withdrawals)), + FullPayload::Eip4844(ref inner) => Some(Ok(&inner.execution_payload.withdrawals)), + } + } + fn is_default<'a>(&'a self) -> bool { map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); @@ -309,6 +321,15 @@ impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { }) } + #[cfg(feature = "withdrawals")] + fn withdrawals(&self) -> Option, Error>> { + match self { + FullPayloadRef::Merge(_inner) => Some(Err(Error::IncorrectStateVariant)), + FullPayloadRef::Capella(inner) => Some(Ok(&inner.execution_payload.withdrawals)), + FullPayloadRef::Eip4844(inner) => Some(Ok(&inner.execution_payload.withdrawals)), + } + } + // TODO: can this function be optimized? fn is_default<'a>(&'a self) -> bool { map_full_payload_ref!(&'a _, self, move |payload, cons| { @@ -463,6 +484,11 @@ impl ExecPayload for BlindedPayload { None } + #[cfg(feature = "withdrawals")] + fn withdrawals(&self) -> Option, Error>> { + None + } + fn is_default<'a>(&'a self) -> bool { map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); @@ -536,6 +562,11 @@ impl<'b, T: EthSpec> ExecPayload for BlindedPayloadRef<'b, T> { None } + #[cfg(feature = "withdrawals")] + fn withdrawals<'a>(&'a self) -> Option, Error>> { + None + } + // TODO: can this function be optimized? fn is_default<'a>(&'a self) -> bool { map_blinded_payload_ref!(&'a _, self, move |payload, cons| { @@ -546,7 +577,7 @@ impl<'b, T: EthSpec> ExecPayload for BlindedPayloadRef<'b, T> { } macro_rules! impl_exec_payload_common { - ($wrapper_type:ident, $wrapped_type_full:ident, $wrapped_header_type:ident, $wrapped_field:ident, $fork_variant:ident, $block_type_variant:ident, $f:block) => { + ($wrapper_type:ident, $wrapped_type_full:ident, $wrapped_header_type:ident, $wrapped_field:ident, $fork_variant:ident, $block_type_variant:ident, $f:block, $g:block) => { impl ExecPayload for $wrapper_type { fn block_type() -> BlockType { BlockType::$block_type_variant @@ -594,6 +625,12 @@ macro_rules! impl_exec_payload_common { let f = $f; f(self) } + + #[cfg(feature = "withdrawals")] + fn withdrawals(&self) -> Option, Error>> { + let g = $g; + g(self) + } } impl From<$wrapped_type_full> for $wrapper_type { @@ -605,7 +642,7 @@ macro_rules! impl_exec_payload_common { } macro_rules! impl_exec_payload_for_fork { - ($wrapper_type_header:ident, $wrapper_type_full:ident, $wrapped_type_header:ident, $wrapped_type_full:ident, $fork_variant:ident) => { + ($wrapper_type_header:ident, $wrapper_type_full:ident, $wrapped_type_header:ident, $wrapped_type_full:ident, $fork_variant:ident, $withdrawal_fn:block) => { //*************** Blinded payload implementations ******************// impl_exec_payload_common!( @@ -615,6 +652,7 @@ macro_rules! impl_exec_payload_for_fork { execution_payload_header, $fork_variant, Blinded, + { |_| { None } }, { |_| { None } } ); @@ -680,7 +718,8 @@ macro_rules! impl_exec_payload_for_fork { let c: for<'a> fn(&'a $wrapper_type_full) -> Option<&'a Transactions> = |payload: &$wrapper_type_full| Some(&payload.execution_payload.transactions); c - } + }, + $withdrawal_fn ); impl Default for $wrapper_type_full { @@ -723,21 +762,36 @@ impl_exec_payload_for_fork!( FullPayloadMerge, ExecutionPayloadHeaderMerge, ExecutionPayloadMerge, - Merge + Merge, + { + let c: for<'a> fn(&'a FullPayloadMerge) -> Option, Error>> = + |_| Some(Err(Error::IncorrectStateVariant)); + c + } ); impl_exec_payload_for_fork!( BlindedPayloadCapella, FullPayloadCapella, ExecutionPayloadHeaderCapella, ExecutionPayloadCapella, - Capella + Capella, + { + let c: for<'a> fn(&'a FullPayloadCapella) -> Option, Error>> = + |payload: &FullPayloadCapella| Some(Ok(&payload.execution_payload.withdrawals)); + c + } ); impl_exec_payload_for_fork!( BlindedPayloadEip4844, FullPayloadEip4844, ExecutionPayloadHeaderEip4844, ExecutionPayloadEip4844, - Eip4844 + Eip4844, + { + let c: for<'a> fn(&'a FullPayloadEip4844) -> Option, Error>> = + |payload: &FullPayloadEip4844| Some(Ok(&payload.execution_payload.withdrawals)); + c + } ); impl AbstractExecPayload for BlindedPayload { diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 6e63c943a1a..3e93474927e 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -1,5 +1,6 @@ use crate::{ - test_utils::TestRandom, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, + test_utils::TestRandom, Address, BeaconState, BlsToExecutionChange, ChainSpec, Epoch, EthSpec, + Hash256, PublicKeyBytes, }; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -75,6 +76,16 @@ impl Validator { .unwrap_or(false) } + /// Changes withdrawal credentials to the provided eth1 execution address + /// + /// WARNING: this function does NO VALIDATION - it just does it! + pub fn change_withdrawal_credentials(&mut self, execution_address: &Address, spec: &ChainSpec) { + let mut bytes = [0u8; 32]; + bytes[0] = spec.eth1_address_withdrawal_prefix_byte; + bytes[12..].copy_from_slice(execution_address.as_bytes()); + self.withdrawal_credentials = Hash256::from(bytes); + } + /// Returns `true` if the validator is fully withdrawable at some epoch pub fn is_fully_withdrawable_at(&self, balance: u64, epoch: Epoch, spec: &ChainSpec) -> bool { self.has_eth1_withdrawal_credential(spec) && self.withdrawable_epoch <= epoch && balance > 0 From 2d01ae60360de4a6de159fd3d5380c5f91b438f7 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Wed, 9 Nov 2022 19:28:14 -0600 Subject: [PATCH 064/263] Fixed compiling with withdrawals enabled --- beacon_node/beacon_chain/src/beacon_chain.rs | 12 ++++ beacon_node/store/src/partial_beacon_state.rs | 17 ++---- .../process_operations.rs | 8 ++- .../verify_bls_to_execution_change.rs | 57 +++++++++++++++++++ .../state_processing/src/upgrade/capella.rs | 4 +- .../state_processing/src/upgrade/eip4844.rs | 4 +- consensus/types/src/beacon_block_body.rs | 16 ++++++ .../types/src/bls_to_execution_change.rs | 30 ++++++++++ consensus/types/src/signed_beacon_block.rs | 8 +++ .../src/signed_bls_to_execution_change.rs | 26 +++++++++ 10 files changed, 161 insertions(+), 21 deletions(-) create mode 100644 consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs create mode 100644 consensus/types/src/bls_to_execution_change.rs create mode 100644 consensus/types/src/signed_bls_to_execution_change.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index a7d0fe5c6c6..fd4cf99f11a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -261,6 +261,8 @@ struct PartialBeaconBlock> { voluntary_exits: Vec, sync_aggregate: Option>, prepare_payload_handle: Option>, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes: Vec, } pub type BeaconForkChoice = ForkChoice< @@ -3485,6 +3487,9 @@ impl BeaconChain { let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; let deposits = eth1_chain.deposits_for_block_inclusion(&state, ð1_data, &self.spec)?; + let bls_to_execution_changes = self + .op_pool + .get_bls_to_execution_changes(&state, &self.spec); // Iterate through the naive aggregation pool and ensure all the attestations from there // are included in the operation pool. @@ -3642,6 +3647,7 @@ impl BeaconChain { voluntary_exits, sync_aggregate, prepare_payload_handle, + bls_to_execution_changes, }) } @@ -3670,6 +3676,8 @@ impl BeaconChain { // this function. We can assume that the handle has already been consumed in order to // produce said `execution_payload`. prepare_payload_handle: _, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, } = partial_beacon_block; let inner_block = match &state { @@ -3751,6 +3759,8 @@ impl BeaconChain { .to_payload() .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes: bls_to_execution_changes.into(), }, }), BeaconState::Eip4844(_) => BeaconBlock::Eip4844(BeaconBlockEip4844 { @@ -3773,6 +3783,8 @@ impl BeaconChain { .to_payload() .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes: bls_to_execution_changes.into(), //FIXME(sean) get blobs blob_kzg_commitments: VariableList::from(kzg_commitments), }, diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 5cff00529e3..ad52bc5b89b 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -107,13 +107,10 @@ where // Withdrawals #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] - pub withdrawal_queue: VariableList, - #[cfg(feature = "withdrawals")] - #[superstruct(only(Capella, Eip4844))] pub next_withdrawal_index: u64, #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] - pub next_partial_withdrawal_validator_index: u64, + pub latest_withdrawal_validator_index: u64, } /// Implement the conversion function from BeaconState -> PartialBeaconState. @@ -215,9 +212,8 @@ impl PartialBeaconState { next_sync_committee, inactivity_scores, latest_execution_payload_header, - withdrawal_queue, next_withdrawal_index, - next_partial_withdrawal_validator_index + latest_withdrawal_validator_index ] ), #[cfg(not(feature = "withdrawals"))] @@ -248,9 +244,8 @@ impl PartialBeaconState { next_sync_committee, inactivity_scores, latest_execution_payload_header, - withdrawal_queue, next_withdrawal_index, - next_partial_withdrawal_validator_index + latest_withdrawal_validator_index ] ), #[cfg(not(feature = "withdrawals"))] @@ -467,9 +462,8 @@ impl TryInto> for PartialBeaconState { next_sync_committee, inactivity_scores, latest_execution_payload_header, - withdrawal_queue, next_withdrawal_index, - next_partial_withdrawal_validator_index + latest_withdrawal_validator_index ] ), #[cfg(not(feature = "withdrawals"))] @@ -498,9 +492,8 @@ impl TryInto> for PartialBeaconState { next_sync_committee, inactivity_scores, latest_execution_payload_header, - withdrawal_queue, next_withdrawal_index, - next_partial_withdrawal_validator_index + latest_withdrawal_validator_index ] ), #[cfg(not(feature = "withdrawals"))] diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index a85cbce6e57..cc8be937d83 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -300,7 +300,9 @@ pub fn process_bls_to_execution_changes<'a, T: EthSpec, Payload: AbstractExecPay | BeaconBlockBodyRef::Altair(_) | BeaconBlockBodyRef::Merge(_) => Ok(()), BeaconBlockBodyRef::Capella(_) | BeaconBlockBodyRef::Eip4844(_) => { - for (i, signed_address_change) in block_body.bls_to_execution_changes()?.enumerate() { + for (i, signed_address_change) in + block_body.bls_to_execution_changes()?.iter().enumerate() + { verify_bls_to_execution_change( state, &signed_address_change, @@ -310,9 +312,9 @@ pub fn process_bls_to_execution_changes<'a, T: EthSpec, Payload: AbstractExecPay .map_err(|e| e.into_with_index(i))?; state - .get_validator_mut(signed_address_change.message.validator_index)? + .get_validator_mut(signed_address_change.message.validator_index as usize)? .change_withdrawal_credentials( - signed_address_change.message.to_execution_address, + &signed_address_change.message.to_execution_address, spec, ); } diff --git a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs new file mode 100644 index 00000000000..3c15691453c --- /dev/null +++ b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs @@ -0,0 +1,57 @@ +use super::errors::{BlockOperationError, BlsExecutionChangeInvalid as Invalid}; +use crate::per_block_processing::signature_sets::bls_execution_change_signature_set; +use crate::VerifySignatures; +use eth2_hashing::hash; +use types::*; + +type Result = std::result::Result>; + +fn error(reason: Invalid) -> BlockOperationError { + BlockOperationError::invalid(reason) +} + +/// Indicates if a `BlsToExecutionChange` is valid to be included in a block in the current epoch of the given +/// state. +/// +/// Returns `Ok(())` if the `SignedBlsToExecutionChange` is valid, otherwise indicates the reason for invalidity. +pub fn verify_bls_to_execution_change( + state: &BeaconState, + signed_address_change: &SignedBlsToExecutionChange, + verify_signatures: VerifySignatures, + spec: &ChainSpec, +) -> Result<()> { + let address_change = &signed_address_change.message; + + let validator = state + .validators() + .get(address_change.validator_index as usize) + .ok_or_else(|| error(Invalid::ValidatorUnknown(address_change.validator_index)))?; + + verify!( + validator + .withdrawal_credentials + .as_bytes() + .first() + .map(|byte| *byte == spec.bls_withdrawal_prefix_byte) + .unwrap_or(false), + Invalid::NonBlsWithdrawalCredentials + ); + + let pubkey_hash = hash(address_change.from_bls_pubkey.as_serialized()); + + // FIXME: Should this check be put inside the verify_signatures.is_true() condition? + // I believe that's used for fuzzing so this is a Mehdi question.. + verify!( + validator.withdrawal_credentials.as_bytes()[1..] == pubkey_hash[1..], + Invalid::WithdrawalCredentialsMismatch + ); + + if verify_signatures.is_true() { + verify!( + bls_execution_change_signature_set(state, signed_address_change, spec,)?.verify(), + Invalid::BadSignature + ); + } + + Ok(()) +} diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index e64c8398068..e4e8ed85337 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -58,11 +58,9 @@ pub fn upgrade_to_capella( latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_capella(), // Withdrawals #[cfg(feature = "withdrawals")] - withdrawal_queue: VariableList::empty(), - #[cfg(feature = "withdrawals")] next_withdrawal_index: 0, #[cfg(feature = "withdrawals")] - next_partial_withdrawal_validator_index: 0, + latest_withdrawal_validator_index: 0, // Caches total_active_balance: pre.total_active_balance, committee_caches: mem::take(&mut pre.committee_caches), diff --git a/consensus/state_processing/src/upgrade/eip4844.rs b/consensus/state_processing/src/upgrade/eip4844.rs index 78fb16033e9..8ef3a21b176 100644 --- a/consensus/state_processing/src/upgrade/eip4844.rs +++ b/consensus/state_processing/src/upgrade/eip4844.rs @@ -65,11 +65,9 @@ pub fn upgrade_to_eip4844( latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_eip4844(), // Withdrawals #[cfg(feature = "withdrawals")] - withdrawal_queue: mem::take(&mut pre.withdrawal_queue), - #[cfg(feature = "withdrawals")] next_withdrawal_index: pre.next_withdrawal_index, #[cfg(feature = "withdrawals")] - next_partial_withdrawal_validator_index: pre.next_partial_withdrawal_validator_index, + latest_withdrawal_validator_index: 0, // Caches total_active_balance: pre.total_active_balance, committee_caches: mem::take(&mut pre.committee_caches), diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index ce5c127748b..c0d7b243eb5 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -301,6 +301,8 @@ impl From>> voluntary_exits, sync_aggregate, execution_payload: FullPayloadCapella { execution_payload }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, } = body; ( @@ -317,6 +319,8 @@ impl From>> execution_payload: BlindedPayloadCapella { execution_payload_header: From::from(execution_payload.clone()), }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, }, Some(execution_payload), ) @@ -341,6 +345,8 @@ impl From>> voluntary_exits, sync_aggregate, execution_payload: FullPayloadEip4844 { execution_payload }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, blob_kzg_commitments, } = body; @@ -358,6 +364,8 @@ impl From>> execution_payload: BlindedPayloadEip4844 { execution_payload_header: From::from(execution_payload.clone()), }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, blob_kzg_commitments, }, Some(execution_payload), @@ -425,6 +433,8 @@ impl BeaconBlockBodyCapella> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadCapella { execution_payload }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, } = self; BeaconBlockBodyCapella { @@ -440,6 +450,8 @@ impl BeaconBlockBodyCapella> { execution_payload: BlindedPayloadCapella { execution_payload_header: From::from(execution_payload.clone()), }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes: bls_to_execution_changes.clone(), } } } @@ -457,6 +469,8 @@ impl BeaconBlockBodyEip4844> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadEip4844 { execution_payload }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, blob_kzg_commitments, } = self; @@ -473,6 +487,8 @@ impl BeaconBlockBodyEip4844> { execution_payload: BlindedPayloadEip4844 { execution_payload_header: From::from(execution_payload.clone()), }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes: bls_to_execution_changes.clone(), blob_kzg_commitments: blob_kzg_commitments.clone(), } } diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs new file mode 100644 index 00000000000..ca8e0ecf708 --- /dev/null +++ b/consensus/types/src/bls_to_execution_change.rs @@ -0,0 +1,30 @@ +use crate::test_utils::TestRandom; +use crate::*; +use bls::PublicKeyBytes; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +/// A deposit to potentially become a beacon chain validator. +/// +/// Spec v0.12.1 +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive( + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] +pub struct BlsToExecutionChange { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub validator_index: u64, + pub from_bls_pubkey: PublicKeyBytes, + pub to_execution_address: Address, +} + +impl SignedRoot for BlsToExecutionChange {} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(BlsToExecutionChange); +} diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 70cc4c1125a..2a8398f83f3 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -341,6 +341,8 @@ impl SignedBeaconBlockCapella> { voluntary_exits, sync_aggregate, execution_payload: BlindedPayloadCapella { .. }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, }, }, signature, @@ -362,6 +364,8 @@ impl SignedBeaconBlockCapella> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadCapella { execution_payload }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, }, }, signature, @@ -393,6 +397,8 @@ impl SignedBeaconBlockEip4844> { voluntary_exits, sync_aggregate, execution_payload: BlindedPayloadEip4844 { .. }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, blob_kzg_commitments, }, }, @@ -415,6 +421,8 @@ impl SignedBeaconBlockEip4844> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadEip4844 { execution_payload }, + #[cfg(feature = "withdrawals")] + bls_to_execution_changes, blob_kzg_commitments, }, }, diff --git a/consensus/types/src/signed_bls_to_execution_change.rs b/consensus/types/src/signed_bls_to_execution_change.rs new file mode 100644 index 00000000000..fc636bb82dd --- /dev/null +++ b/consensus/types/src/signed_bls_to_execution_change.rs @@ -0,0 +1,26 @@ +use crate::test_utils::TestRandom; +use crate::*; +use bls::Signature; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +/// A deposit to potentially become a beacon chain validator. +/// +/// Spec v0.12.1 +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive( + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] +pub struct SignedBlsToExecutionChange { + pub message: BlsToExecutionChange, + pub signature: Signature, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(SignedBlsToExecutionChange); +} From 219124234150759ee9d7a90c55fb9c44b77ca27c Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Wed, 9 Nov 2022 19:35:01 -0600 Subject: [PATCH 065/263] Added stuff that NEEDS IMPLEMENTING --- beacon_node/operation_pool/src/lib.rs | 15 ++++++++++++++- beacon_node/operation_pool/src/persistence.rs | 2 ++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 4fe5a725458..ba0567277b5 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -32,7 +32,8 @@ use std::ptr; use types::{ sync_aggregate::Error as SyncAggregateError, typenum::Unsigned, Attestation, AttestationData, AttesterSlashing, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ProposerSlashing, - SignedVoluntaryExit, Slot, SyncAggregate, SyncCommitteeContribution, Validator, + SignedBlsToExecutionChange, SignedVoluntaryExit, Slot, SyncAggregate, + SyncCommitteeContribution, Validator, }; type SyncContributions = RwLock>>>; @@ -49,6 +50,8 @@ pub struct OperationPool { proposer_slashings: RwLock>>, /// Map from exiting validator to their exit data. voluntary_exits: RwLock>>, + /// Map from credential changing validator to their execution change data. + bls_to_execution_changes: RwLock>>, /// Reward cache for accelerating attestation packing. reward_cache: RwLock, _phantom: PhantomData, @@ -509,6 +512,16 @@ impl OperationPool { ); } + /// Get a list of execution changes for inclusion in a block. + pub fn get_bls_to_execution_changes( + &self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Vec { + // FIXME: actually implement this + return vec![]; + } + /// Prune all types of transactions given the latest head state and head fork. pub fn prune_all(&self, head_state: &BeaconState, current_epoch: Epoch) { self.prune_attestations(current_epoch); diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index ed15369df73..92c5bd92f68 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -142,6 +142,8 @@ impl PersistedOperationPool { attester_slashings, proposer_slashings, voluntary_exits, + // FIXME: IMPLEMENT THIS + bls_to_execution_changes: Default::default(), reward_cache: Default::default(), _phantom: Default::default(), }; From 756e48f5dc0bc1ca10bdd0976b3a19877620aa2c Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Thu, 10 Nov 2022 11:49:55 -0600 Subject: [PATCH 066/263] BeaconState field renamed --- beacon_node/store/src/partial_beacon_state.rs | 10 +++++----- .../src/per_block_processing/process_operations.rs | 2 -- consensus/state_processing/src/upgrade/capella.rs | 2 +- consensus/state_processing/src/upgrade/eip4844.rs | 2 +- consensus/types/src/beacon_state.rs | 2 +- 5 files changed, 8 insertions(+), 10 deletions(-) diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index ad52bc5b89b..12c56284966 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -110,7 +110,7 @@ where pub next_withdrawal_index: u64, #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] - pub latest_withdrawal_validator_index: u64, + pub next_withdrawal_validator_index: u64, } /// Implement the conversion function from BeaconState -> PartialBeaconState. @@ -213,7 +213,7 @@ impl PartialBeaconState { inactivity_scores, latest_execution_payload_header, next_withdrawal_index, - latest_withdrawal_validator_index + next_withdrawal_validator_index ] ), #[cfg(not(feature = "withdrawals"))] @@ -245,7 +245,7 @@ impl PartialBeaconState { inactivity_scores, latest_execution_payload_header, next_withdrawal_index, - latest_withdrawal_validator_index + next_withdrawal_validator_index ] ), #[cfg(not(feature = "withdrawals"))] @@ -463,7 +463,7 @@ impl TryInto> for PartialBeaconState { inactivity_scores, latest_execution_payload_header, next_withdrawal_index, - latest_withdrawal_validator_index + next_withdrawal_validator_index ] ), #[cfg(not(feature = "withdrawals"))] @@ -493,7 +493,7 @@ impl TryInto> for PartialBeaconState { inactivity_scores, latest_execution_payload_header, next_withdrawal_index, - latest_withdrawal_validator_index + next_withdrawal_validator_index ] ), #[cfg(not(feature = "withdrawals"))] diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index cc8be937d83..9b24be39e7a 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -286,8 +286,6 @@ pub fn process_exits( /// /// Returns `Ok(())` if the validation and state updates completed successfully. Otherwise returs /// an `Err` describing the invalid object or cause of failure. -/// -/// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#new-process_bls_to_execution_change #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] pub fn process_bls_to_execution_changes<'a, T: EthSpec, Payload: AbstractExecPayload>( state: &mut BeaconState, diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index e4e8ed85337..ad106a84220 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -60,7 +60,7 @@ pub fn upgrade_to_capella( #[cfg(feature = "withdrawals")] next_withdrawal_index: 0, #[cfg(feature = "withdrawals")] - latest_withdrawal_validator_index: 0, + next_withdrawal_validator_index: 0, // Caches total_active_balance: pre.total_active_balance, committee_caches: mem::take(&mut pre.committee_caches), diff --git a/consensus/state_processing/src/upgrade/eip4844.rs b/consensus/state_processing/src/upgrade/eip4844.rs index 8ef3a21b176..478024f17e2 100644 --- a/consensus/state_processing/src/upgrade/eip4844.rs +++ b/consensus/state_processing/src/upgrade/eip4844.rs @@ -67,7 +67,7 @@ pub fn upgrade_to_eip4844( #[cfg(feature = "withdrawals")] next_withdrawal_index: pre.next_withdrawal_index, #[cfg(feature = "withdrawals")] - latest_withdrawal_validator_index: 0, + next_withdrawal_validator_index: 0, // Caches total_active_balance: pre.total_active_balance, committee_caches: mem::take(&mut pre.committee_caches), diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 0243aa832bd..66125c29762 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -300,7 +300,7 @@ where pub next_withdrawal_index: u64, #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] - pub latest_withdrawal_validator_index: u64, + pub next_withdrawal_validator_index: u64, // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] From 81319dfcae2df4c71533f07cbe97d4df9d610a1e Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Thu, 10 Nov 2022 15:33:26 -0600 Subject: [PATCH 067/263] Forgot one feature guard --- beacon_node/beacon_chain/src/beacon_chain.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index fd4cf99f11a..e7c456ef11f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3647,6 +3647,7 @@ impl BeaconChain { voluntary_exits, sync_aggregate, prepare_payload_handle, + #[cfg(feature = "withdrawals")] bls_to_execution_changes, }) } From 276e1845fd9ff467bf0b3dfb473f8c51fd1c5268 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Sun, 13 Nov 2022 18:20:27 -0600 Subject: [PATCH 068/263] Added process_withdrawals --- .../src/per_block_processing.rs | 88 ++++++++++++++++++ .../src/per_block_processing/errors.rs | 4 + consensus/types/src/payload.rs | 93 ++++++++++++------- 3 files changed, 150 insertions(+), 35 deletions(-) diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index c39f46b95ba..2d8a01ff48c 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -42,6 +42,7 @@ mod verify_deposit; mod verify_exit; mod verify_proposer_slashing; +use crate::common::decrease_balance; #[cfg(feature = "arbitrary-fuzz")] use arbitrary::Arbitrary; @@ -165,6 +166,8 @@ pub fn per_block_processing>( // previous block. if is_execution_enabled(state, block.body()) { let payload = block.body().execution_payload()?; + #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] + process_withdrawals::(state, payload, spec)?; process_execution_payload::(state, payload, spec)?; } @@ -455,3 +458,88 @@ pub fn compute_timestamp_at_slot( .safe_mul(spec.seconds_per_slot) .and_then(|since_genesis| state.genesis_time().safe_add(since_genesis)) } + +/// FIXME: add link to this function once the spec is stable +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +pub fn get_expected_withdrawals( + state: &BeaconState, + spec: &ChainSpec, +) -> Result, BlockProcessingError> { + let epoch = state.current_epoch(); + let mut withdrawal_index = *state.next_withdrawal_index()?; + let mut validator_index = *state.next_withdrawal_validator_index()?; + let mut withdrawals = vec![]; + + for _ in 0..state.validators().len() { + let validator = state.get_validator(validator_index as usize)?; + let balance = *state + .balances() + .get(validator_index as usize) + .ok_or_else(|| BeaconStateError::BalancesOutOfBounds(validator_index as usize))?; + if validator.is_fully_withdrawable_at(balance, epoch, spec) { + withdrawals.push(Withdrawal { + index: withdrawal_index, + validator_index, + address: Address::from_slice(&validator.withdrawal_credentials[12..]), + amount: balance, + }); + withdrawal_index.safe_add_assign(1)?; + } else if validator.is_partially_withdrawable_validator(balance, spec) { + withdrawals.push(Withdrawal { + index: withdrawal_index, + validator_index, + address: Address::from_slice(&validator.withdrawal_credentials[12..]), + amount: balance.safe_sub(spec.max_effective_balance)?, + }); + withdrawal_index.safe_add_assign(1)?; + } + if withdrawals.len() == T::max_withdrawals_per_payload() { + break; + } + validator_index = validator_index.safe_add(1)? % state.validators().len() as u64; + } + + Ok(withdrawals.into()) +} + +/// FIXME: add link to this function once the spec is stable +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +pub fn process_withdrawals<'payload, T: EthSpec, Payload: AbstractExecPayload>( + state: &mut BeaconState, + payload: Payload::Ref<'payload>, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + match state { + BeaconState::Merge(_) => Ok(()), + BeaconState::Capella(_) | BeaconState::Eip4844(_) => { + let expected_withdrawals = get_expected_withdrawals(state, spec)?; + let withdrawals_root = payload.withdrawals_root()?; + + if expected_withdrawals.tree_hash_root() != payload.withdrawals_root()? { + return Err(BlockProcessingError::WithdrawalsRootMismatch { + expected: expected_withdrawals.tree_hash_root(), + found: payload.withdrawals_root()?, + }); + } + + for withdrawal in expected_withdrawals.iter() { + decrease_balance( + state, + withdrawal.validator_index as usize, + withdrawal.amount, + )?; + } + + if let Some(latest_withdrawal) = expected_withdrawals.last() { + *state.next_withdrawal_index_mut()? = latest_withdrawal.index + 1; + let next_validator_index = + (latest_withdrawal.validator_index + 1) % state.validators().len() as u64; + *state.next_withdrawal_validator_index_mut()? = next_validator_index; + } + + Ok(()) + } + // these shouldn't even be encountered but they're here for completeness + BeaconState::Base(_) | BeaconState::Altair(_) => Ok(()), + } +} diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index 39c740480d0..8de0fd337ac 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -78,6 +78,10 @@ pub enum BlockProcessingError { }, ExecutionInvalid, ConsensusContext(ContextError), + WithdrawalsRootMismatch { + expected: Hash256, + found: Hash256, + }, BlobVersionHashMismatch, /// The number of commitments in blob transactions in the payload does not match the number /// of commitments in the block. diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index ae992002e9e..84cc70ed8fa 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -38,7 +38,7 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + fn transactions(&self) -> Option<&Transactions>; /// fork-specific fields #[cfg(feature = "withdrawals")] - fn withdrawals(&self) -> Option, Error>>; + fn withdrawals_root(&self) -> Result; /// Is this a default payload? (pre-merge) fn is_default(&self) -> bool; @@ -229,11 +229,15 @@ impl ExecPayload for FullPayload { } #[cfg(feature = "withdrawals")] - fn withdrawals(&self) -> Option, Error>> { + fn withdrawals_root(&self) -> Result { match self { - FullPayload::Merge(_) => Some(Err(Error::IncorrectStateVariant)), - FullPayload::Capella(ref inner) => Some(Ok(&inner.execution_payload.withdrawals)), - FullPayload::Eip4844(ref inner) => Some(Ok(&inner.execution_payload.withdrawals)), + FullPayload::Merge(_) => Err(Error::IncorrectStateVariant), + FullPayload::Capella(ref inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } + FullPayload::Eip4844(ref inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } } } @@ -322,11 +326,15 @@ impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { } #[cfg(feature = "withdrawals")] - fn withdrawals(&self) -> Option, Error>> { + fn withdrawals_root(&self) -> Result { match self { - FullPayloadRef::Merge(_inner) => Some(Err(Error::IncorrectStateVariant)), - FullPayloadRef::Capella(inner) => Some(Ok(&inner.execution_payload.withdrawals)), - FullPayloadRef::Eip4844(inner) => Some(Ok(&inner.execution_payload.withdrawals)), + FullPayloadRef::Merge(_) => Err(Error::IncorrectStateVariant), + FullPayloadRef::Capella(inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } + FullPayloadRef::Eip4844(inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } } } @@ -485,8 +493,16 @@ impl ExecPayload for BlindedPayload { } #[cfg(feature = "withdrawals")] - fn withdrawals(&self) -> Option, Error>> { - None + fn withdrawals_root(&self) -> Result { + match self { + BlindedPayload::Merge(_) => Err(Error::IncorrectStateVariant), + BlindedPayload::Capella(ref inner) => { + Ok(inner.execution_payload_header.withdrawals_root) + } + BlindedPayload::Eip4844(ref inner) => { + Ok(inner.execution_payload_header.withdrawals_root) + } + } } fn is_default<'a>(&'a self) -> bool { @@ -563,8 +579,16 @@ impl<'b, T: EthSpec> ExecPayload for BlindedPayloadRef<'b, T> { } #[cfg(feature = "withdrawals")] - fn withdrawals<'a>(&'a self) -> Option, Error>> { - None + fn withdrawals_root(&self) -> Result { + match self { + BlindedPayloadRef::Merge(_) => Err(Error::IncorrectStateVariant), + BlindedPayloadRef::Capella(inner) => { + Ok(inner.execution_payload_header.withdrawals_root) + } + BlindedPayloadRef::Eip4844(inner) => { + Ok(inner.execution_payload_header.withdrawals_root) + } + } } // TODO: can this function be optimized? @@ -627,7 +651,7 @@ macro_rules! impl_exec_payload_common { } #[cfg(feature = "withdrawals")] - fn withdrawals(&self) -> Option, Error>> { + fn withdrawals_root(&self) -> Result { let g = $g; g(self) } @@ -642,7 +666,7 @@ macro_rules! impl_exec_payload_common { } macro_rules! impl_exec_payload_for_fork { - ($wrapper_type_header:ident, $wrapper_type_full:ident, $wrapped_type_header:ident, $wrapped_type_full:ident, $fork_variant:ident, $withdrawal_fn:block) => { + ($wrapper_type_header:ident, $wrapper_type_full:ident, $wrapped_type_header:ident, $wrapped_type_full:ident, $fork_variant:ident) => { //*************** Blinded payload implementations ******************// impl_exec_payload_common!( @@ -653,7 +677,14 @@ macro_rules! impl_exec_payload_for_fork { $fork_variant, Blinded, { |_| { None } }, - { |_| { None } } + { + let c: for<'a> fn(&'a $wrapper_type_header) -> Result = + |payload: &$wrapper_type_header| { + let wrapper_ref_type = BlindedPayloadRef::$fork_variant(&payload); + wrapper_ref_type.withdrawals_root() + }; + c + } ); impl TryInto<$wrapper_type_header> for BlindedPayload { @@ -719,7 +750,14 @@ macro_rules! impl_exec_payload_for_fork { |payload: &$wrapper_type_full| Some(&payload.execution_payload.transactions); c }, - $withdrawal_fn + { + let c: for<'a> fn(&'a $wrapper_type_full) -> Result = + |payload: &$wrapper_type_full| { + let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); + wrapper_ref_type.withdrawals_root() + }; + c + } ); impl Default for $wrapper_type_full { @@ -762,36 +800,21 @@ impl_exec_payload_for_fork!( FullPayloadMerge, ExecutionPayloadHeaderMerge, ExecutionPayloadMerge, - Merge, - { - let c: for<'a> fn(&'a FullPayloadMerge) -> Option, Error>> = - |_| Some(Err(Error::IncorrectStateVariant)); - c - } + Merge ); impl_exec_payload_for_fork!( BlindedPayloadCapella, FullPayloadCapella, ExecutionPayloadHeaderCapella, ExecutionPayloadCapella, - Capella, - { - let c: for<'a> fn(&'a FullPayloadCapella) -> Option, Error>> = - |payload: &FullPayloadCapella| Some(Ok(&payload.execution_payload.withdrawals)); - c - } + Capella ); impl_exec_payload_for_fork!( BlindedPayloadEip4844, FullPayloadEip4844, ExecutionPayloadHeaderEip4844, ExecutionPayloadEip4844, - Eip4844, - { - let c: for<'a> fn(&'a FullPayloadEip4844) -> Option, Error>> = - |payload: &FullPayloadEip4844| Some(Ok(&payload.execution_payload.withdrawals)); - c - } + Eip4844 ); impl AbstractExecPayload for BlindedPayload { From 0cdd049da96f9970a057ea1b0555b065ff9066e4 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 15 Nov 2022 06:14:31 +1100 Subject: [PATCH 069/263] Fixes to make EF Capella tests pass (#3719) * Fixes to make EF Capella tests pass * Clippy for state_processing --- Makefile | 9 +- .../src/rpc/codec/ssz_snappy.rs | 4 +- .../lighthouse_network/src/rpc/protocol.rs | 4 +- beacon_node/store/src/hot_cold_store.rs | 18 +-- .../state_processing/src/consensus_context.rs | 2 +- .../src/per_block_processing.rs | 40 +++-- .../src/per_block_processing/eip4844.rs | 1 + .../per_block_processing/eip4844/eip4844.rs | 6 +- .../src/per_block_processing/errors.rs | 1 + .../process_operations.rs | 45 +++--- .../verify_bls_to_execution_change.rs | 4 +- .../src/per_slot_processing.rs | 12 +- .../state_processing/src/upgrade/capella.rs | 1 - consensus/types/src/beacon_block.rs | 110 +++++++++++--- consensus/types/src/beacon_block_body.rs | 2 +- consensus/types/src/beacon_state.rs | 6 +- .../types/src/beacon_state/tree_hash_cache.rs | 10 ++ consensus/types/src/blobs_sidecar.rs | 2 +- consensus/types/src/eth_spec.rs | 2 +- consensus/types/src/execution_payload.rs | 13 +- .../types/src/execution_payload_header.rs | 39 ++--- consensus/types/src/kzg_commitment.rs | 2 +- consensus/types/src/kzg_proof.rs | 3 +- consensus/types/src/lib.rs | 2 +- consensus/types/src/payload.rs | 10 +- consensus/types/src/tree_hash_impls.rs | 6 +- consensus/types/src/validator.rs | 16 +- testing/ef_tests/Makefile | 2 +- .../ef_tests/src/cases/epoch_processing.rs | 5 +- testing/ef_tests/src/cases/fork.rs | 4 +- .../src/cases/genesis_initialization.rs | 17 +-- testing/ef_tests/src/cases/operations.rs | 138 +++++++++++++----- testing/ef_tests/src/cases/transition.rs | 11 +- testing/ef_tests/src/handler.rs | 15 +- testing/ef_tests/src/lib.rs | 5 +- testing/ef_tests/src/type_name.rs | 11 ++ testing/ef_tests/tests/tests.rs | 50 ++++++- 37 files changed, 433 insertions(+), 195 deletions(-) diff --git a/Makefile b/Makefile index 33077a6c930..56e05fffcb7 100644 --- a/Makefile +++ b/Makefile @@ -20,6 +20,9 @@ CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx # Cargo profile for Cross builds. Default is for local builds, CI uses an override. CROSS_PROFILE ?= release +# List of features to use when running EF tests. +EF_TEST_FEATURES ?= beacon_chain/withdrawals,beacon_chain/withdrawals-processing + # Cargo profile for regular builds. PROFILE ?= release @@ -108,9 +111,9 @@ check-consensus: # Runs only the ef-test vectors. run-ef-tests: rm -rf $(EF_TESTS)/.accessed_file_log.txt - cargo test --release -p ef_tests --features "ef_tests" - cargo test --release -p ef_tests --features "ef_tests,fake_crypto" - cargo test --release -p ef_tests --features "ef_tests,milagro" + cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES)" + cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),fake_crypto" + cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro" ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests # Run the tests in the `beacon_chain` crate for all known forks. diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 3c40fdf8b3f..8f2867e04e0 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -683,8 +683,8 @@ mod tests { }; use std::sync::Arc; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Epoch, ForkContext, - FullPayload, Hash256, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EmptyBlock, Epoch, + ForkContext, FullPayload, Hash256, Signature, SignedBeaconBlock, Slot, }; use snap::write::FrameEncoder; diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 8511d262080..f71b8e6055c 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -22,8 +22,8 @@ use tokio_util::{ }; use types::BlobsSidecar; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Blob, EthSpec, ForkContext, - ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Blob, EmptyBlock, EthSpec, + ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, }; lazy_static! { diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index c0fbef973fe..e8c782b8c51 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -492,17 +492,15 @@ impl, Cold: ItemStore> HotColdDB pub fn get_blobs(&self, block_root: &Hash256) -> Result>, Error> { if let Some(blobs) = self.blob_cache.lock().get(block_root) { Ok(Some(blobs.clone())) + } else if let Some(bytes) = self + .hot_db + .get_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes())? + { + let ret = BlobsSidecar::from_ssz_bytes(&bytes)?; + self.blob_cache.lock().put(*block_root, ret.clone()); + Ok(Some(ret)) } else { - if let Some(bytes) = self - .hot_db - .get_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes())? - { - let ret = BlobsSidecar::from_ssz_bytes(&bytes)?; - self.blob_cache.lock().put(*block_root, ret.clone()); - Ok(Some(ret)) - } else { - Ok(None) - } + Ok(None) } } diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index 18ae5ad3b7a..121a9eccb9c 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -1,7 +1,7 @@ use std::marker::PhantomData; use tree_hash::TreeHash; use types::{ - AbstractExecPayload, BeaconState, BeaconStateError, ChainSpec, EthSpec, ExecPayload, Hash256, + AbstractExecPayload, BeaconState, BeaconStateError, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot, }; diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 2d8a01ff48c..5e59a0132c1 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -42,7 +42,9 @@ mod verify_deposit; mod verify_exit; mod verify_proposer_slashing; +#[cfg(feature = "withdrawals-processing")] use crate::common::decrease_balance; + #[cfg(feature = "arbitrary-fuzz")] use arbitrary::Arbitrary; @@ -466,21 +468,22 @@ pub fn get_expected_withdrawals( spec: &ChainSpec, ) -> Result, BlockProcessingError> { let epoch = state.current_epoch(); - let mut withdrawal_index = *state.next_withdrawal_index()?; - let mut validator_index = *state.next_withdrawal_validator_index()?; + let mut withdrawal_index = state.next_withdrawal_index()?; + let mut validator_index = state.next_withdrawal_validator_index()?; let mut withdrawals = vec![]; for _ in 0..state.validators().len() { let validator = state.get_validator(validator_index as usize)?; - let balance = *state - .balances() - .get(validator_index as usize) - .ok_or_else(|| BeaconStateError::BalancesOutOfBounds(validator_index as usize))?; + let balance = *state.balances().get(validator_index as usize).ok_or( + BeaconStateError::BalancesOutOfBounds(validator_index as usize), + )?; if validator.is_fully_withdrawable_at(balance, epoch, spec) { withdrawals.push(Withdrawal { index: withdrawal_index, validator_index, - address: Address::from_slice(&validator.withdrawal_credentials[12..]), + address: validator + .get_eth1_withdrawal_address(spec) + .ok_or(BlockProcessingError::WithdrawalCredentialsInvalid)?, amount: balance, }); withdrawal_index.safe_add_assign(1)?; @@ -488,7 +491,9 @@ pub fn get_expected_withdrawals( withdrawals.push(Withdrawal { index: withdrawal_index, validator_index, - address: Address::from_slice(&validator.withdrawal_credentials[12..]), + address: validator + .get_eth1_withdrawal_address(spec) + .ok_or(BlockProcessingError::WithdrawalCredentialsInvalid)?, amount: balance.safe_sub(spec.max_effective_balance)?, }); withdrawal_index.safe_add_assign(1)?; @@ -496,7 +501,9 @@ pub fn get_expected_withdrawals( if withdrawals.len() == T::max_withdrawals_per_payload() { break; } - validator_index = validator_index.safe_add(1)? % state.validators().len() as u64; + validator_index = validator_index + .safe_add(1)? + .safe_rem(state.validators().len() as u64)?; } Ok(withdrawals.into()) @@ -513,12 +520,13 @@ pub fn process_withdrawals<'payload, T: EthSpec, Payload: AbstractExecPayload BeaconState::Merge(_) => Ok(()), BeaconState::Capella(_) | BeaconState::Eip4844(_) => { let expected_withdrawals = get_expected_withdrawals(state, spec)?; + let expected_root = expected_withdrawals.tree_hash_root(); let withdrawals_root = payload.withdrawals_root()?; - if expected_withdrawals.tree_hash_root() != payload.withdrawals_root()? { + if expected_root != withdrawals_root { return Err(BlockProcessingError::WithdrawalsRootMismatch { - expected: expected_withdrawals.tree_hash_root(), - found: payload.withdrawals_root()?, + expected: expected_root, + found: withdrawals_root, }); } @@ -531,9 +539,11 @@ pub fn process_withdrawals<'payload, T: EthSpec, Payload: AbstractExecPayload } if let Some(latest_withdrawal) = expected_withdrawals.last() { - *state.next_withdrawal_index_mut()? = latest_withdrawal.index + 1; - let next_validator_index = - (latest_withdrawal.validator_index + 1) % state.validators().len() as u64; + *state.next_withdrawal_index_mut()? = latest_withdrawal.index.safe_add(1)?; + let next_validator_index = latest_withdrawal + .validator_index + .safe_add(1)? + .safe_rem(state.validators().len() as u64)?; *state.next_withdrawal_validator_index_mut()? = next_validator_index; } diff --git a/consensus/state_processing/src/per_block_processing/eip4844.rs b/consensus/state_processing/src/per_block_processing/eip4844.rs index 120ba304d0d..23ab3c5c074 100644 --- a/consensus/state_processing/src/per_block_processing/eip4844.rs +++ b/consensus/state_processing/src/per_block_processing/eip4844.rs @@ -1 +1,2 @@ +#[allow(clippy::module_inception)] pub mod eip4844; diff --git a/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs b/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs index 56b3ed58a65..55b1ab967e4 100644 --- a/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs +++ b/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs @@ -6,8 +6,8 @@ use ssz::Decode; use ssz_types::VariableList; use types::consts::eip4844::{BLOB_TX_TYPE, VERSIONED_HASH_VERSION_KZG}; use types::{ - AbstractExecPayload, BeaconBlockBodyRef, EthSpec, ExecPayload, FullPayload, FullPayloadRef, - KzgCommitment, Transaction, Transactions, VersionedHash, + AbstractExecPayload, BeaconBlockBodyRef, EthSpec, ExecPayload, KzgCommitment, Transaction, + Transactions, VersionedHash, }; pub fn process_blob_kzg_commitments>( @@ -34,7 +34,7 @@ pub fn verify_kzg_commitments_against_transactions( let nested_iter = transactions .into_iter() .filter(|tx| { - tx.get(0) + tx.first() .map(|tx_type| *tx_type == BLOB_TX_TYPE) .unwrap_or(false) }) diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index 8de0fd337ac..7b355b0ddc6 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -94,6 +94,7 @@ pub enum BlockProcessingError { index: usize, length: usize, }, + WithdrawalCredentialsInvalid, } impl From for BlockProcessingError { diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 9b24be39e7a..32e36c6ce6c 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -33,8 +33,11 @@ pub fn process_operations<'a, T: EthSpec, Payload: AbstractExecPayload>( process_attestations(state, block_body, verify_signatures, ctxt, spec)?; process_deposits(state, block_body.deposits(), spec)?; process_exits(state, block_body.voluntary_exits(), verify_signatures, spec)?; + #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] - process_bls_to_execution_changes(state, block_body, verify_signatures, spec)?; + if let Ok(bls_to_execution_changes) = block_body.bls_to_execution_changes() { + process_bls_to_execution_changes(state, bls_to_execution_changes, verify_signatures, spec)?; + } Ok(()) } @@ -287,39 +290,25 @@ pub fn process_exits( /// Returns `Ok(())` if the validation and state updates completed successfully. Otherwise returs /// an `Err` describing the invalid object or cause of failure. #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] -pub fn process_bls_to_execution_changes<'a, T: EthSpec, Payload: AbstractExecPayload>( +pub fn process_bls_to_execution_changes( state: &mut BeaconState, - block_body: BeaconBlockBodyRef<'a, T, Payload>, + bls_to_execution_changes: &[SignedBlsToExecutionChange], verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - match block_body { - BeaconBlockBodyRef::Base(_) - | BeaconBlockBodyRef::Altair(_) - | BeaconBlockBodyRef::Merge(_) => Ok(()), - BeaconBlockBodyRef::Capella(_) | BeaconBlockBodyRef::Eip4844(_) => { - for (i, signed_address_change) in - block_body.bls_to_execution_changes()?.iter().enumerate() - { - verify_bls_to_execution_change( - state, - &signed_address_change, - verify_signatures, - spec, - ) - .map_err(|e| e.into_with_index(i))?; - - state - .get_validator_mut(signed_address_change.message.validator_index as usize)? - .change_withdrawal_credentials( - &signed_address_change.message.to_execution_address, - spec, - ); - } + for (i, signed_address_change) in bls_to_execution_changes.iter().enumerate() { + verify_bls_to_execution_change(state, signed_address_change, verify_signatures, spec) + .map_err(|e| e.into_with_index(i))?; - Ok(()) - } + state + .get_validator_mut(signed_address_change.message.validator_index as usize)? + .change_withdrawal_credentials( + &signed_address_change.message.to_execution_address, + spec, + ); } + + Ok(()) } /// Validates each `Deposit` and updates the state, short-circuiting on an invalid object. diff --git a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs index 3c15691453c..34700a33e4e 100644 --- a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs +++ b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs @@ -42,13 +42,13 @@ pub fn verify_bls_to_execution_change( // FIXME: Should this check be put inside the verify_signatures.is_true() condition? // I believe that's used for fuzzing so this is a Mehdi question.. verify!( - validator.withdrawal_credentials.as_bytes()[1..] == pubkey_hash[1..], + validator.withdrawal_credentials.as_bytes().get(1..) == pubkey_hash.get(1..), Invalid::WithdrawalCredentialsMismatch ); if verify_signatures.is_true() { verify!( - bls_execution_change_signature_set(state, signed_address_change, spec,)?.verify(), + bls_execution_change_signature_set(state, signed_address_change, spec)?.verify(), Invalid::BadSignature ); } diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index 9018db65bcd..8d2600bb41e 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -1,4 +1,6 @@ -use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; +use crate::upgrade::{ + upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_eip4844, +}; use crate::{per_epoch_processing::EpochProcessingSummary, *}; use safe_arith::{ArithError, SafeArith}; use types::*; @@ -55,6 +57,14 @@ pub fn per_slot_processing( if spec.bellatrix_fork_epoch == Some(state.current_epoch()) { upgrade_to_bellatrix(state, spec)?; } + // Capella. + if spec.capella_fork_epoch == Some(state.current_epoch()) { + upgrade_to_capella(state, spec)?; + } + // Eip4844 + if spec.eip4844_fork_epoch == Some(state.current_epoch()) { + upgrade_to_eip4844(state, spec)?; + } } Ok(summary) diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index ad106a84220..9a883698830 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -1,4 +1,3 @@ -use ssz_types::VariableList; use std::mem; use types::{BeaconState, BeaconStateCapella, BeaconStateError as Error, ChainSpec, EthSpec, Fork}; diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index d58e890c60b..124cb08bcc0 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -78,17 +78,20 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> SignedRoot { } +/// Empty block trait for each block variant to implement. +pub trait EmptyBlock { + /// Returns an empty block to be used during genesis. + fn empty(spec: &ChainSpec) -> Self; +} + impl> BeaconBlock { - // FIXME: deal with capella / eip4844 forks here as well /// Returns an empty block to be used during genesis. pub fn empty(spec: &ChainSpec) -> Self { - if spec.bellatrix_fork_epoch == Some(T::genesis_epoch()) { - Self::Merge(BeaconBlockMerge::empty(spec)) - } else if spec.altair_fork_epoch == Some(T::genesis_epoch()) { - Self::Altair(BeaconBlockAltair::empty(spec)) - } else { - Self::Base(BeaconBlockBase::empty(spec)) - } + map_fork_name!( + spec.fork_name_at_epoch(T::genesis_epoch()), + Self, + EmptyBlock::empty(spec) + ) } /// Custom SSZ decoder that takes a `ChainSpec` as context. @@ -117,13 +120,12 @@ impl> BeaconBlock { /// Usually it's better to prefer `from_ssz_bytes` which will decode the correct variant based /// on the fork slot. pub fn any_from_ssz_bytes(bytes: &[u8]) -> Result { - BeaconBlockMerge::from_ssz_bytes(bytes) - .map(BeaconBlock::Merge) - .or_else(|_| { - BeaconBlockAltair::from_ssz_bytes(bytes) - .map(BeaconBlock::Altair) - .or_else(|_| BeaconBlockBase::from_ssz_bytes(bytes).map(BeaconBlock::Base)) - }) + BeaconBlockEip4844::from_ssz_bytes(bytes) + .map(BeaconBlock::Eip4844) + .or_else(|_| BeaconBlockCapella::from_ssz_bytes(bytes).map(BeaconBlock::Capella)) + .or_else(|_| BeaconBlockMerge::from_ssz_bytes(bytes).map(BeaconBlock::Merge)) + .or_else(|_| BeaconBlockAltair::from_ssz_bytes(bytes).map(BeaconBlock::Altair)) + .or_else(|_| BeaconBlockBase::from_ssz_bytes(bytes).map(BeaconBlock::Base)) } /// Convenience accessor for the `body` as a `BeaconBlockBodyRef`. @@ -266,9 +268,8 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockRefMut<'a, T, P } } -impl> BeaconBlockBase { - /// Returns an empty block to be used during genesis. - pub fn empty(spec: &ChainSpec) -> Self { +impl> EmptyBlock for BeaconBlockBase { + fn empty(spec: &ChainSpec) -> Self { BeaconBlockBase { slot: spec.genesis_slot, proposer_index: 0, @@ -291,7 +292,9 @@ impl> BeaconBlockBase { }, } } +} +impl> BeaconBlockBase { /// Return a block where the block has maximum size. pub fn full(spec: &ChainSpec) -> Self { let header = BeaconBlockHeader { @@ -387,9 +390,9 @@ impl> BeaconBlockBase { } } -impl> BeaconBlockAltair { +impl> EmptyBlock for BeaconBlockAltair { /// Returns an empty Altair block to be used during genesis. - pub fn empty(spec: &ChainSpec) -> Self { + fn empty(spec: &ChainSpec) -> Self { BeaconBlockAltair { slot: spec.genesis_slot, proposer_index: 0, @@ -413,7 +416,9 @@ impl> BeaconBlockAltair }, } } +} +impl> BeaconBlockAltair { /// Return an Altair block where the block has maximum size. pub fn full(spec: &ChainSpec) -> Self { let base_block: BeaconBlockBase<_, Payload> = BeaconBlockBase::full(spec); @@ -446,9 +451,9 @@ impl> BeaconBlockAltair } } -impl> BeaconBlockMerge { +impl> EmptyBlock for BeaconBlockMerge { /// Returns an empty Merge block to be used during genesis. - pub fn empty(spec: &ChainSpec) -> Self { + fn empty(spec: &ChainSpec) -> Self { BeaconBlockMerge { slot: spec.genesis_slot, proposer_index: 0, @@ -474,6 +479,67 @@ impl> BeaconBlockMerge { } } +impl> EmptyBlock for BeaconBlockCapella { + /// Returns an empty Capella block to be used during genesis. + fn empty(spec: &ChainSpec) -> Self { + BeaconBlockCapella { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyCapella { + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + proposer_slashings: VariableList::empty(), + attester_slashings: VariableList::empty(), + attestations: VariableList::empty(), + deposits: VariableList::empty(), + voluntary_exits: VariableList::empty(), + sync_aggregate: SyncAggregate::empty(), + execution_payload: Payload::Capella::default(), + #[cfg(feature = "withdrawals")] + bls_to_execution_changes: VariableList::empty(), + }, + } + } +} + +impl> EmptyBlock for BeaconBlockEip4844 { + /// Returns an empty Eip4844 block to be used during genesis. + fn empty(spec: &ChainSpec) -> Self { + BeaconBlockEip4844 { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyEip4844 { + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + proposer_slashings: VariableList::empty(), + attester_slashings: VariableList::empty(), + attestations: VariableList::empty(), + deposits: VariableList::empty(), + voluntary_exits: VariableList::empty(), + sync_aggregate: SyncAggregate::empty(), + execution_payload: Payload::Eip4844::default(), + #[cfg(feature = "withdrawals")] + bls_to_execution_changes: VariableList::empty(), + blob_kzg_commitments: VariableList::empty(), + }, + } + } +} + // We can convert pre-Bellatrix blocks without payloads into blocks "with" payloads. impl From>> for BeaconBlockBase> diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index c0d7b243eb5..1dd938ac465 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -76,7 +76,7 @@ pub struct BeaconBlockBody = FullPay } impl> BeaconBlockBody { - pub fn execution_payload<'a>(&'a self) -> Result, Error> { + pub fn execution_payload(&self) -> Result, Error> { self.to_ref().execution_payload() } } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 66125c29762..000e6f67149 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -296,10 +296,10 @@ where // Withdrawals #[cfg(feature = "withdrawals")] - #[superstruct(only(Capella, Eip4844))] + #[superstruct(only(Capella, Eip4844), partial_getter(copy))] pub next_withdrawal_index: u64, #[cfg(feature = "withdrawals")] - #[superstruct(only(Capella, Eip4844))] + #[superstruct(only(Capella, Eip4844), partial_getter(copy))] pub next_withdrawal_validator_index: u64, // Caching (not in the spec) @@ -1784,6 +1784,8 @@ impl CompareFields for BeaconState { (BeaconState::Base(x), BeaconState::Base(y)) => x.compare_fields(y), (BeaconState::Altair(x), BeaconState::Altair(y)) => x.compare_fields(y), (BeaconState::Merge(x), BeaconState::Merge(y)) => x.compare_fields(y), + (BeaconState::Capella(x), BeaconState::Capella(y)) => x.compare_fields(y), + (BeaconState::Eip4844(x), BeaconState::Eip4844(y)) => x.compare_fields(y), _ => panic!("compare_fields: mismatched state variants",), } } diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index e67d4096dd5..e50265e6607 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -363,6 +363,16 @@ impl BeaconTreeHashCacheInner { hasher.write(payload_header.tree_hash_root().as_bytes())?; } + // Withdrawal indices (Capella and later). + #[cfg(feature = "withdrawals")] + if let Ok(next_withdrawal_index) = state.next_withdrawal_index() { + hasher.write(next_withdrawal_index.tree_hash_root().as_bytes())?; + } + #[cfg(feature = "withdrawals")] + if let Ok(next_withdrawal_validator_index) = state.next_withdrawal_validator_index() { + hasher.write(next_withdrawal_validator_index.tree_hash_root().as_bytes())?; + } + let root = hasher.finish()?; self.previous_state = Some((root, state.slot())); diff --git a/consensus/types/src/blobs_sidecar.rs b/consensus/types/src/blobs_sidecar.rs index 4e9174d94cc..d4e77960601 100644 --- a/consensus/types/src/blobs_sidecar.rs +++ b/consensus/types/src/blobs_sidecar.rs @@ -4,7 +4,6 @@ use serde_derive::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; -use tree_hash::TreeHash; use tree_hash_derive::TreeHash; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] @@ -23,6 +22,7 @@ impl BlobsSidecar { pub fn empty() -> Self { Self::default() } + #[allow(clippy::integer_arithmetic)] pub fn max_size() -> usize { // Fixed part Self::empty().as_ssz_bytes().len() diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 4cba9c79503..661484fde82 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -324,6 +324,7 @@ impl EthSpec for MinimalEthSpec { type SyncSubcommitteeSize = U8; // 32 committee size / 4 sync committee subnet count type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch type SlotsPerEth1VotingPeriod = U32; // 4 epochs * 8 slots per epoch + type MaxWithdrawalsPerPayload = U4; params_from_eth_spec!(MainnetEthSpec { JustificationBitsLength, @@ -345,7 +346,6 @@ impl EthSpec for MinimalEthSpec { MinGasLimit, MaxExtraDataBytes, MaxBlsToExecutionChanges, - MaxWithdrawalsPerPayload, MaxBlobsPerBlock, FieldElementsPerBlob }); diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index eec88b97eb0..6036973d5e2 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -1,7 +1,7 @@ use crate::{test_utils::TestRandom, *}; use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; -use ssz::Encode; +use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -87,6 +87,17 @@ pub struct ExecutionPayload { } impl ExecutionPayload { + pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { + match fork_name { + ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid(format!( + "unsupported fork for ExecutionPayload: {fork_name}", + ))), + ForkName::Merge => ExecutionPayloadMerge::from_ssz_bytes(bytes).map(Self::Merge), + ForkName::Capella => ExecutionPayloadCapella::from_ssz_bytes(bytes).map(Self::Capella), + ForkName::Eip4844 => ExecutionPayloadEip4844::from_ssz_bytes(bytes).map(Self::Eip4844), + } + } + #[allow(clippy::integer_arithmetic)] /// Returns the maximum size of an execution payload. pub fn max_execution_payload_merge_size() -> usize { diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 7546ca2e5f1..6f6b5aa9535 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -1,6 +1,7 @@ use crate::{test_utils::TestRandom, *}; use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; +use ssz::Decode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; @@ -84,31 +85,34 @@ impl ExecutionPayloadHeader { pub fn transactions(&self) -> Option<&Transactions> { None } -} -impl<'a, T: EthSpec> ExecutionPayloadHeaderRef<'a, T> { - // FIXME: maybe this could be a derived trait.. - pub fn is_default(self) -> bool { - match self { - ExecutionPayloadHeaderRef::Merge(header) => { - *header == ExecutionPayloadHeaderMerge::default() + pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { + match fork_name { + ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid(format!( + "unsupported fork for ExecutionPayloadHeader: {fork_name}", + ))), + ForkName::Merge => ExecutionPayloadHeaderMerge::from_ssz_bytes(bytes).map(Self::Merge), + ForkName::Capella => { + ExecutionPayloadHeaderCapella::from_ssz_bytes(bytes).map(Self::Capella) } - ExecutionPayloadHeaderRef::Capella(header) => { - *header == ExecutionPayloadHeaderCapella::default() - } - ExecutionPayloadHeaderRef::Eip4844(header) => { - *header == ExecutionPayloadHeaderEip4844::default() + ForkName::Eip4844 => { + ExecutionPayloadHeaderEip4844::from_ssz_bytes(bytes).map(Self::Eip4844) } } } } +impl<'a, T: EthSpec> ExecutionPayloadHeaderRef<'a, T> { + pub fn is_default(self) -> bool { + map_execution_payload_header_ref!(&'a _, self, |inner, cons| { + let _ = cons(inner); + *inner == Default::default() + }) + } +} + impl ExecutionPayloadHeaderMerge { pub fn upgrade_to_capella(&self) -> ExecutionPayloadHeaderCapella { - #[cfg(feature = "withdrawals")] - // TODO: if this is correct we should calculate and hardcode this.. - let empty_withdrawals_root = - VariableList::::empty().tree_hash_root(); ExecutionPayloadHeaderCapella { parent_hash: self.parent_hash, fee_recipient: self.fee_recipient, @@ -125,8 +129,7 @@ impl ExecutionPayloadHeaderMerge { block_hash: self.block_hash, transactions_root: self.transactions_root, #[cfg(feature = "withdrawals")] - // FIXME: the spec doesn't seem to define what to do here.. - withdrawals_root: empty_withdrawals_root, + withdrawals_root: Hash256::zero(), } } } diff --git a/consensus/types/src/kzg_commitment.rs b/consensus/types/src/kzg_commitment.rs index eaa429a139e..9844df0282e 100644 --- a/consensus/types/src/kzg_commitment.rs +++ b/consensus/types/src/kzg_commitment.rs @@ -14,7 +14,7 @@ pub struct KzgCommitment(#[serde(with = "BigArray")] pub [u8; 48]); impl Display for KzgCommitment { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "{}", eth2_serde_utils::hex::encode(&self.0)) + write!(f, "{}", eth2_serde_utils::hex::encode(self.0)) } } diff --git a/consensus/types/src/kzg_proof.rs b/consensus/types/src/kzg_proof.rs index 7cd6a8e58b4..1c8e49a443b 100644 --- a/consensus/types/src/kzg_proof.rs +++ b/consensus/types/src/kzg_proof.rs @@ -1,7 +1,6 @@ use crate::test_utils::{RngCore, TestRandom}; use serde::{Deserialize, Serialize}; use serde_big_array::BigArray; -use ssz::{Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::fmt; use tree_hash::{PackedEncoding, TreeHash}; @@ -15,7 +14,7 @@ pub struct KzgProof(#[serde(with = "BigArray")] pub [u8; KZG_PROOF_BYTES_LEN]); impl fmt::Display for KzgProof { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", eth2_serde_utils::hex::encode(&self.0)) + write!(f, "{}", eth2_serde_utils::hex::encode(self.0)) } } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index a129a22db7d..ec66070b223 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -109,7 +109,7 @@ pub use crate::attestation_duty::AttestationDuty; pub use crate::attester_slashing::AttesterSlashing; pub use crate::beacon_block::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockEip4844, - BeaconBlockMerge, BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, + BeaconBlockMerge, BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, EmptyBlock, }; pub use crate::beacon_block_body::{ BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyCapella, diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 84cc70ed8fa..3081dd1cbe1 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -221,7 +221,7 @@ impl ExecPayload for FullPayload { }) } - fn transactions<'a>(&'a self) -> Option<&Transactions> { + fn transactions<'a>(&'a self) -> Option<&'a Transactions> { map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); Some(&payload.execution_payload.transactions) @@ -265,7 +265,7 @@ impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { fn to_execution_payload_header<'a>(&'a self) -> ExecutionPayloadHeader { map_full_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); - ExecutionPayloadHeader::from(payload.to_execution_payload_header()) + payload.to_execution_payload_header() }) } @@ -318,7 +318,7 @@ impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { }) } - fn transactions<'a>(&'a self) -> Option<&Transactions> { + fn transactions<'a>(&'a self) -> Option<&'a Transactions> { map_full_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); Some(&payload.execution_payload.transactions) @@ -488,7 +488,7 @@ impl ExecPayload for BlindedPayload { }) } - fn transactions<'a>(&'a self) -> Option<&Transactions> { + fn transactions(&self) -> Option<&Transactions> { None } @@ -574,7 +574,7 @@ impl<'b, T: EthSpec> ExecPayload for BlindedPayloadRef<'b, T> { }) } - fn transactions<'a>(&'a self) -> Option<&Transactions> { + fn transactions(&self) -> Option<&Transactions> { None } diff --git a/consensus/types/src/tree_hash_impls.rs b/consensus/types/src/tree_hash_impls.rs index ec23927d30c..34043c0e83f 100644 --- a/consensus/types/src/tree_hash_impls.rs +++ b/consensus/types/src/tree_hash_impls.rs @@ -17,7 +17,7 @@ impl CachedTreeHash for Validator { /// Efficiently tree hash a `Validator`, assuming it was updated by a valid state transition. /// - /// Specifically, we assume that the `pubkey` and `withdrawal_credentials` fields are constant. + /// Specifically, we assume that the `pubkey` field is constant. fn recalculate_tree_hash_root( &self, arena: &mut CacheArena, @@ -29,8 +29,8 @@ impl CachedTreeHash for Validator { .iter_mut(arena)? .enumerate() .flat_map(|(i, leaf)| { - // Fields pubkey and withdrawal_credentials are constant - if (i == 0 || i == 1) && cache.initialized { + // Pubkey field (index 0) is constant. + if i == 0 && cache.initialized { None } else if process_field_by_index(self, i, leaf, !cache.initialized) { Some(i) diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 3e93474927e..e4497c809e0 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -1,6 +1,6 @@ use crate::{ - test_utils::TestRandom, Address, BeaconState, BlsToExecutionChange, ChainSpec, Epoch, EthSpec, - Hash256, PublicKeyBytes, + test_utils::TestRandom, Address, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, + PublicKeyBytes, }; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -76,6 +76,18 @@ impl Validator { .unwrap_or(false) } + /// Get the eth1 withdrawal address if this validator has one initialized. + pub fn get_eth1_withdrawal_address(&self, spec: &ChainSpec) -> Option
{ + self.has_eth1_withdrawal_credential(spec) + .then(|| { + self.withdrawal_credentials + .as_bytes() + .get(12..) + .map(Address::from_slice) + }) + .flatten() + } + /// Changes withdrawal credentials to the provided eth1 execution address /// /// WARNING: this function does NO VALIDATION - it just does it! diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index e05ef0b06b1..5dd22de8d61 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.2.0 +TESTS_TAG := f5c7cf78 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 26a05715b91..b0e16e12c73 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -289,8 +289,9 @@ impl> Case for EpochProcessing { && T::name() != "participation_flag_updates" } // No phase0 tests for Altair and later. - ForkName::Altair | ForkName::Merge => T::name() != "participation_record_updates", - ForkName::Capella => false, // TODO: revisit when tests are out + ForkName::Altair | ForkName::Merge | ForkName::Capella => { + T::name() != "participation_record_updates" + } ForkName::Eip4844 => false, // TODO: revisit when tests are out } } diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs index bcc76b85502..f79e13005a8 100644 --- a/testing/ef_tests/src/cases/fork.rs +++ b/testing/ef_tests/src/cases/fork.rs @@ -3,7 +3,7 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::cases::common::previous_fork; use crate::decode::{ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; -use state_processing::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; +use state_processing::upgrade::{upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella}; use types::{BeaconState, ForkName}; #[derive(Debug, Clone, Default, Deserialize)] @@ -61,8 +61,8 @@ impl Case for ForkTest { ForkName::Base => panic!("phase0 not supported"), ForkName::Altair => upgrade_to_altair(&mut result_state, spec).map(|_| result_state), ForkName::Merge => upgrade_to_bellatrix(&mut result_state, spec).map(|_| result_state), + ForkName::Capella => upgrade_to_capella(&mut result_state, spec).map(|_| result_state), ForkName::Eip4844 => panic!("eip4844 not supported"), - ForkName::Capella => panic!("capella not supported"), }; compare_beacon_state_results_without_caches(&mut result, &mut expected) diff --git a/testing/ef_tests/src/cases/genesis_initialization.rs b/testing/ef_tests/src/cases/genesis_initialization.rs index d447fbd8f47..dbf6c70b29b 100644 --- a/testing/ef_tests/src/cases/genesis_initialization.rs +++ b/testing/ef_tests/src/cases/genesis_initialization.rs @@ -1,13 +1,10 @@ use super::*; use crate::case_result::compare_beacon_state_results_without_caches; -use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; +use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::initialize_beacon_state_from_eth1; use std::path::PathBuf; -use types::{ - BeaconState, Deposit, EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderMerge, ForkName, - Hash256, -}; +use types::{BeaconState, Deposit, EthSpec, ExecutionPayloadHeader, ForkName, Hash256}; #[derive(Debug, Clone, Deserialize)] struct Metadata { @@ -41,14 +38,10 @@ impl LoadCase for GenesisInitialization { let meta: Metadata = yaml_decode_file(&path.join("meta.yaml"))?; let execution_payload_header: Option> = if meta.execution_payload_header.unwrap_or(false) { - //FIXME(sean) we could decode based on timestamp - we probably don't do decode a payload - // without a block this elsewhere at presetn. But when we support SSZ in the builder api we may need to. - // Although that API should include fork info. Hardcoding this for now - Some(ExecutionPayloadHeader::Merge(ssz_decode_file::< - ExecutionPayloadHeaderMerge, - >( + Some(ssz_decode_file_with( &path.join("execution_payload_header.ssz_snappy"), - )?)) + |bytes| ExecutionPayloadHeader::from_ssz_bytes(bytes, fork_name), + )?) } else { None }; diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index e3dfb7f67b8..8c2ddd13682 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -3,17 +3,16 @@ use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use crate::testing_spec; -use crate::type_name::TypeName; use serde_derive::Deserialize; use state_processing::{ per_block_processing::{ errors::BlockProcessingError, process_block_header, process_execution_payload, process_operations::{ - altair, base, process_attester_slashings, process_deposits, process_exits, - process_proposer_slashings, + altair, base, process_attester_slashings, process_bls_to_execution_changes, + process_deposits, process_exits, process_proposer_slashings, }, - process_sync_aggregate, VerifyBlockRoot, VerifySignatures, + process_sync_aggregate, process_withdrawals, VerifyBlockRoot, VerifySignatures, }, ConsensusContext, }; @@ -21,7 +20,7 @@ use std::fmt::Debug; use std::path::Path; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit, - EthSpec, ExecutionPayload, ExecutionPayloadMerge, ForkName, FullPayload, ProposerSlashing, + EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedBlsToExecutionChange, SignedVoluntaryExit, SyncAggregate, }; @@ -36,6 +35,12 @@ struct ExecutionMetadata { execution_valid: bool, } +/// Newtype for testing withdrawals. +#[derive(Debug, Clone, Deserialize)] +pub struct WithdrawalsPayload { + payload: FullPayload, +} + #[derive(Debug, Clone)] pub struct Operations> { metadata: Metadata, @@ -45,10 +50,8 @@ pub struct Operations> { pub post: Option>, } -pub trait Operation: TypeName + Debug + Sync + Sized { - fn handler_name() -> String { - Self::name().to_lowercase() - } +pub trait Operation: Debug + Sync + Sized { + fn handler_name() -> String; fn filename() -> String { format!("{}.ssz_snappy", Self::handler_name()) @@ -58,7 +61,7 @@ pub trait Operation: TypeName + Debug + Sync + Sized { true } - fn decode(path: &Path, spec: &ChainSpec) -> Result; + fn decode(path: &Path, fork_name: ForkName, spec: &ChainSpec) -> Result; fn apply_to( &self, @@ -69,7 +72,11 @@ pub trait Operation: TypeName + Debug + Sync + Sized { } impl Operation for Attestation { - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn handler_name() -> String { + "attestation".into() + } + + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -109,7 +116,7 @@ impl Operation for AttesterSlashing { "attester_slashing".into() } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -131,7 +138,11 @@ impl Operation for AttesterSlashing { } impl Operation for Deposit { - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn handler_name() -> String { + "deposit".into() + } + + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -155,7 +166,7 @@ impl Operation for ProposerSlashing { "proposer_slashing".into() } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -181,7 +192,7 @@ impl Operation for SignedVoluntaryExit { "voluntary_exit".into() } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -204,7 +215,7 @@ impl Operation for BeaconBlock { "block.ssz_snappy".into() } - fn decode(path: &Path, spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, spec: &ChainSpec) -> Result { ssz_decode_file_with(path, |bytes| BeaconBlock::from_ssz_bytes(bytes, spec)) } @@ -239,7 +250,7 @@ impl Operation for SyncAggregate { fork_name != ForkName::Base } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -267,13 +278,11 @@ impl Operation for FullPayload { fork_name != ForkName::Base && fork_name != ForkName::Altair } - //FIXME(sean) we could decode based on timestamp - we probably don't do decode a payload - // without a block this elsewhere at presetn. But when we support SSZ in the builder api we may need to. - // Although that API should include fork info. Hardcoding this for now - fn decode(path: &Path, _spec: &ChainSpec) -> Result { - ssz_decode_file::>(path) - .map(ExecutionPayload::Merge) - .map(Into::into) + fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { + ssz_decode_file_with(path, |bytes| { + ExecutionPayload::from_ssz_bytes(bytes, fork_name) + }) + .map(Into::into) } fn apply_to( @@ -306,13 +315,11 @@ impl Operation for BlindedPayload { fork_name != ForkName::Base && fork_name != ForkName::Altair } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { - //FIXME(sean) we could decode based on timestamp - we probably don't do decode a payload - // without a block this elsewhere at presetn. But when we support SSZ in the builder api we may need to. - // Although that API should include fork info. Hardcoding this for now - let payload: Result, Error> = - ssz_decode_file::>(path).map(Into::into); - payload.map(Into::into) + fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { + ssz_decode_file_with(path, |bytes| { + ExecutionPayload::from_ssz_bytes(bytes, fork_name) + }) + .map(Into::into) } fn apply_to( @@ -333,6 +340,65 @@ impl Operation for BlindedPayload { } } +impl Operation for WithdrawalsPayload { + fn handler_name() -> String { + "withdrawals".into() + } + + fn filename() -> String { + "execution_payload.ssz_snappy".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name != ForkName::Base && fork_name != ForkName::Altair && fork_name != ForkName::Merge + } + + fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { + ssz_decode_file_with(path, |bytes| { + ExecutionPayload::from_ssz_bytes(bytes, fork_name) + }) + .map(|payload| WithdrawalsPayload { + payload: payload.into(), + }) + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + _: &Operations, + ) -> Result<(), BlockProcessingError> { + process_withdrawals::<_, FullPayload<_>>(state, self.payload.to_ref(), spec) + } +} + +impl Operation for SignedBlsToExecutionChange { + fn handler_name() -> String { + "bls_to_execution_change".into() + } + + fn filename() -> String { + "address_change.ssz_snappy".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name != ForkName::Base && fork_name != ForkName::Altair && fork_name != ForkName::Merge + } + + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { + ssz_decode_file(path) + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + _extra: &Operations, + ) -> Result<(), BlockProcessingError> { + process_bls_to_execution_changes(state, &[self.clone()], VerifySignatures::True, spec) + } +} + impl> LoadCase for Operations { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let spec = &testing_spec::(fork_name); @@ -356,7 +422,7 @@ impl> LoadCase for Operations { // Check BLS setting here before SSZ deserialization, as most types require signatures // to be valid. let (operation, bls_error) = if metadata.bls_setting.unwrap_or_default().check().is_ok() { - match O::decode(&path.join(O::filename()), spec) { + match O::decode(&path.join(O::filename()), fork_name, spec) { Ok(op) => (Some(op), None), Err(Error::InvalidBLSInput(error)) => (None, Some(error)), Err(e) => return Err(e), @@ -399,9 +465,11 @@ impl> Case for Operations { let mut expected = self.post.clone(); // Processing requires the committee caches. - state - .build_all_committee_caches(spec) - .expect("committee caches OK"); + // NOTE: some of the withdrawals tests have 0 active validators, do not try + // to build the commitee cache in this case. + if O::handler_name() != "withdrawals" { + state.build_all_committee_caches(spec).unwrap(); + } let mut result = self .operation diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index 469285ab0f8..fb7ccfea644 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -42,13 +42,16 @@ impl LoadCase for TransitionTest { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(metadata.fork_epoch); } - ForkName::Eip4844 => { + ForkName::Capella => { + spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(0)); - spec.eip4844_fork_epoch = Some(metadata.fork_epoch); + spec.capella_fork_epoch = Some(metadata.fork_epoch); } - ForkName::Capella => { + ForkName::Eip4844 => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec.capella_fork_epoch = Some(Epoch::new(0)); - spec.capella_fork_epoch = Some(metadata.fork_epoch); + spec.eip4844_fork_epoch = Some(metadata.fork_epoch); } } diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index dd5ed82da71..ed376af444f 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -24,6 +24,11 @@ pub trait Handler { fn run(&self) { for fork_name in ForkName::list_all() { + // FIXME(eip4844): enable eip4844 + if fork_name == ForkName::Eip4844 { + continue; + } + if self.is_enabled_for_fork(fork_name) { self.run_for_fork(fork_name) } @@ -218,6 +223,10 @@ impl SszStaticHandler { Self::for_forks(vec![ForkName::Merge]) } + pub fn capella_only() -> Self { + Self::for_forks(vec![ForkName::Capella]) + } + pub fn merge_and_later() -> Self { Self::for_forks(ForkName::list_all()[2..].to_vec()) } @@ -533,10 +542,8 @@ impl Handler for ForkChoiceHandler { } fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { - // Merge block tests are only enabled for Bellatrix or later. - if self.handler_name == "on_merge_block" - && (fork_name == ForkName::Base || fork_name == ForkName::Altair) - { + // Merge block tests are only enabled for Bellatrix. + if self.handler_name == "on_merge_block" && fork_name != ForkName::Merge { return false; } diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index 5c2ca3fb55e..d45b1e15c7a 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -1,10 +1,9 @@ pub use case_result::CaseResult; -pub use cases::Case; pub use cases::{ - EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, InactivityUpdates, + Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, InactivityUpdates, JustificationAndFinalization, ParticipationFlagUpdates, ParticipationRecordUpdates, RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset, - SyncCommitteeUpdates, + SyncCommitteeUpdates, WithdrawalsPayload, }; pub use decode::log_file_access; pub use error::Error; diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index c075e89b3fe..bee2d9b03df 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -45,6 +45,8 @@ type_name_generic!(BeaconBlockBody); type_name_generic!(BeaconBlockBodyBase, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyAltair, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyMerge, "BeaconBlockBody"); +type_name_generic!(BeaconBlockBodyCapella, "BeaconBlockBody"); +type_name_generic!(BeaconBlockBodyEip4844, "BeaconBlockBody"); type_name!(BeaconBlockHeader); type_name_generic!(BeaconState); type_name!(Checkpoint); @@ -54,8 +56,14 @@ type_name!(DepositData); type_name!(DepositMessage); type_name!(Eth1Data); type_name_generic!(ExecutionPayload); +type_name_generic!(ExecutionPayloadMerge, "ExecutionPayload"); +type_name_generic!(ExecutionPayloadCapella, "ExecutionPayload"); +type_name_generic!(ExecutionPayloadEip4844, "ExecutionPayload"); type_name_generic!(FullPayload, "ExecutionPayload"); type_name_generic!(ExecutionPayloadHeader); +type_name_generic!(ExecutionPayloadHeaderMerge, "ExecutionPayloadHeader"); +type_name_generic!(ExecutionPayloadHeaderCapella, "ExecutionPayloadHeader"); +type_name_generic!(ExecutionPayloadHeaderEip4844, "ExecutionPayloadHeader"); type_name_generic!(BlindedPayload, "ExecutionPayloadHeader"); type_name!(Fork); type_name!(ForkData); @@ -76,3 +84,6 @@ type_name_generic!(SyncAggregate); type_name_generic!(SyncCommittee); type_name!(Validator); type_name!(VoluntaryExit); +type_name!(Withdrawal); +type_name!(BlsToExecutionChange, "BLSToExecutionChange"); +type_name!(SignedBlsToExecutionChange, "SignedBLSToExecutionChange"); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 28c57028cf7..338a56b9f0c 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -82,6 +82,18 @@ fn operations_execution_payload_blinded() { OperationsHandler::>::default().run(); } +#[test] +fn operations_withdrawals() { + OperationsHandler::>::default().run(); + OperationsHandler::>::default().run(); +} + +#[test] +fn operations_bls_to_execution_change() { + OperationsHandler::::default().run(); + OperationsHandler::::default().run(); +} + #[test] fn sanity_blocks() { SanityBlocksHandler::::default().run(); @@ -250,6 +262,10 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::merge_only() .run(); + SszStaticHandler::, MinimalEthSpec>::capella_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::capella_only() + .run(); } // Altair and later @@ -302,18 +318,44 @@ mod ssz_static { // Merge and later #[test] fn execution_payload() { - SszStaticHandler::, MinimalEthSpec>::merge_and_later() + SszStaticHandler::, MinimalEthSpec>::merge_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::merge_only() + .run(); + SszStaticHandler::, MinimalEthSpec>::capella_only() .run(); - SszStaticHandler::, MainnetEthSpec>::merge_and_later() + SszStaticHandler::, MainnetEthSpec>::capella_only() .run(); } #[test] fn execution_payload_header() { - SszStaticHandler::, MinimalEthSpec>::merge_and_later() + SszStaticHandler::, MinimalEthSpec>::merge_only() .run(); - SszStaticHandler::, MainnetEthSpec>::merge_and_later() + SszStaticHandler::, MainnetEthSpec>::merge_only() .run(); + SszStaticHandler::, MinimalEthSpec> + ::capella_only().run(); + SszStaticHandler::, MainnetEthSpec> + ::capella_only().run(); + } + + #[test] + fn withdrawal() { + SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_only().run(); + } + + #[test] + fn bls_to_execution_change() { + SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_only().run(); + } + + #[test] + fn signed_bls_to_execution_change() { + SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_only().run(); } } From 61b4bbf870dfe02e011bfc4268835612ed9235a2 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 23 Nov 2022 04:29:47 +1100 Subject: [PATCH 070/263] Fix BlocksByRoot response types (#3743) --- beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 8f2867e04e0..01e40326c49 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -629,12 +629,12 @@ fn handle_v2_response( decoded_buffer, )?), )))), - ForkName::Capella => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + ForkName::Capella => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( decoded_buffer, )?), )))), - ForkName::Eip4844 => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + ForkName::Eip4844 => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Eip4844(SignedBeaconBlockEip4844::from_ssz_bytes( decoded_buffer, )?), From 24e5252a55148f8a49e806f2e0943cb834042b31 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Tue, 22 Nov 2022 12:27:48 -0600 Subject: [PATCH 071/263] Massive Update to Engine API (#3740) * Massive Update to Engine API * Update beacon_node/execution_layer/src/engine_api/json_structures.rs Co-authored-by: Michael Sproul * Update beacon_node/execution_layer/src/engine_api/json_structures.rs Co-authored-by: Michael Sproul * Update beacon_node/beacon_chain/src/execution_payload.rs Co-authored-by: realbigsean * Update beacon_node/execution_layer/src/engine_api.rs Co-authored-by: realbigsean Co-authored-by: Michael Sproul Co-authored-by: realbigsean --- beacon_node/beacon_chain/src/beacon_chain.rs | 75 +-- beacon_node/beacon_chain/src/errors.rs | 2 +- .../beacon_chain/src/execution_payload.rs | 17 + .../tests/payload_invalidation.rs | 8 +- beacon_node/execution_layer/Cargo.toml | 4 +- beacon_node/execution_layer/src/engine_api.rs | 46 +- .../execution_layer/src/engine_api/http.rs | 196 +++++++- .../src/engine_api/json_structures.rs | 463 ++++++++---------- beacon_node/execution_layer/src/engines.rs | 16 +- beacon_node/execution_layer/src/lib.rs | 59 ++- .../test_utils/execution_block_generator.rs | 19 +- .../src/test_utils/handle_rpc.rs | 9 +- .../src/test_utils/mock_builder.rs | 7 +- .../src/test_utils/mock_execution_layer.rs | 10 +- common/eth2/Cargo.toml | 2 + .../src/per_block_processing.rs | 6 +- testing/ef_tests/src/cases/operations.rs | 12 +- .../execution_engine_integration/Cargo.toml | 4 + .../src/test_rig.rs | 12 +- 19 files changed, 614 insertions(+), 353 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index e7c456ef11f..efedfdae539 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -77,6 +77,8 @@ use slasher::Slasher; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; +#[cfg(feature = "withdrawals")] +use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::{ common::{get_attesting_indices_from_state, get_indexed_attestation}, per_block_processing, @@ -4105,35 +4107,52 @@ impl BeaconChain { return Ok(()); } - let payload_attributes = match self.spec.fork_name_at_epoch(prepare_epoch) { + #[cfg(feature = "withdrawals")] + let head_state = &self.canonical_head.cached_head().snapshot.beacon_state; + #[cfg(feature = "withdrawals")] + let withdrawals = match self.spec.fork_name_at_epoch(prepare_epoch) { ForkName::Base | ForkName::Altair | ForkName::Merge => { - PayloadAttributes::V1(PayloadAttributesV1 { - timestamp: self - .slot_clock - .start_of(prepare_slot) - .ok_or(Error::InvalidSlot(prepare_slot))? - .as_secs(), - prev_randao: head_random, - suggested_fee_recipient: execution_layer - .get_suggested_fee_recipient(proposer as u64) - .await, - }) - } - ForkName::Capella | ForkName::Eip4844 => PayloadAttributes::V2(PayloadAttributesV2 { - timestamp: self - .slot_clock - .start_of(prepare_slot) - .ok_or(Error::InvalidSlot(prepare_slot))? - .as_secs(), - prev_randao: head_random, - suggested_fee_recipient: execution_layer - .get_suggested_fee_recipient(proposer as u64) - .await, - //FIXME(sean) - #[cfg(feature = "withdrawals")] - withdrawals: vec![], - }), - }; + None + }, + ForkName::Capella | ForkName::Eip4844 => match &head_state { + &BeaconState::Capella(_) | &BeaconState::Eip4844(_) => { + // The head_state is already BeaconState::Capella or later + // FIXME(mark) + // Might implement caching here in the future.. + Some(get_expected_withdrawals(head_state, &self.spec)) + } + &BeaconState::Base(_) | &BeaconState::Altair(_) | &BeaconState::Merge(_) => { + // We are the Capella transition block proposer, need advanced state + let mut prepare_state = self + .state_at_slot(prepare_slot, StateSkipConfig::WithoutStateRoots) + .or_else(|e| { + error!(self.log, "Capella Transition Proposer"; "Error Advancing State: " => ?e); + Err(e) + })?; + // FIXME(mark) + // Might implement caching here in the future.. + Some(get_expected_withdrawals(&prepare_state, &self.spec)) + } + }, + }.transpose().or_else(|e| { + error!(self.log, "Error preparing beacon proposer"; "while calculating expected withdrawals" => ?e); + Err(e) + }).map(|withdrawals_opt| withdrawals_opt.map(|w| w.into())) + .map_err(Error::PrepareProposerFailed)?; + + let payload_attributes = PayloadAttributes::V2(PayloadAttributesV2 { + timestamp: self + .slot_clock + .start_of(prepare_slot) + .ok_or(Error::InvalidSlot(prepare_slot))? + .as_secs(), + prev_randao: head_random, + suggested_fee_recipient: execution_layer + .get_suggested_fee_recipient(proposer as u64) + .await, + #[cfg(feature = "withdrawals")] + withdrawals, + }); debug!( self.log, diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index da944c102ff..e4d00d9ca6e 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -153,7 +153,7 @@ pub enum BeaconChainError { }, AddPayloadLogicError, ExecutionForkChoiceUpdateFailed(execution_layer::Error), - PrepareProposerBlockingFailed(execution_layer::Error), + PrepareProposerFailed(BlockProcessingError), ExecutionForkChoiceUpdateInvalid { status: PayloadStatus, }, diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 134e51e796e..bf920a6dab7 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -17,6 +17,8 @@ use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::debug; use slot_clock::SlotClock; +#[cfg(feature = "withdrawals")] +use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::per_block_processing::{ compute_timestamp_at_slot, is_execution_enabled, is_merge_transition_complete, partially_verify_execution_payload, @@ -362,6 +364,15 @@ pub fn get_execution_payload< let random = *state.get_randao_mix(current_epoch)?; let latest_execution_payload_header_block_hash = state.latest_execution_payload_header()?.block_hash(); + #[cfg(feature = "withdrawals")] + let withdrawals = match state { + &BeaconState::Capella(_) | &BeaconState::Eip4844(_) => { + Some(get_expected_withdrawals(state, spec)?.into()) + } + &BeaconState::Merge(_) => None, + // These shouldn't happen but they're here to make the pattern irrefutable + &BeaconState::Base(_) | &BeaconState::Altair(_) => None, + }; // Spawn a task to obtain the execution payload from the EL via a series of async calls. The // `join_handle` can be used to await the result of the function. @@ -378,6 +389,8 @@ pub fn get_execution_payload< proposer_index, latest_execution_payload_header_block_hash, builder_params, + #[cfg(feature = "withdrawals")] + withdrawals, ) .await }, @@ -411,6 +424,7 @@ pub async fn prepare_execution_payload( proposer_index: u64, latest_execution_payload_header_block_hash: ExecutionBlockHash, builder_params: BuilderParams, + #[cfg(feature = "withdrawals")] withdrawals: Option>, ) -> Result, BlockProductionError> where T: BeaconChainTypes, @@ -480,6 +494,9 @@ where proposer_index, forkchoice_update_params, builder_params, + fork, + #[cfg(feature = "withdrawals")] + withdrawals, &chain.spec, ) .await diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 2336c3ba994..611b2098845 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -12,9 +12,9 @@ use beacon_chain::{ INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ - json_structures::{JsonForkChoiceStateV1, JsonPayloadAttributesV1}, + json_structures::{JsonForkchoiceStateV1, JsonPayloadAttributesV1}, test_utils::ExecutionBlockGenerator, - ExecutionLayer, ForkChoiceState, PayloadAttributes, + ExecutionLayer, ForkchoiceState, PayloadAttributes, }; use fork_choice::{ CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus, @@ -117,7 +117,7 @@ impl InvalidPayloadRig { &self.harness.chain.canonical_head } - fn previous_forkchoice_update_params(&self) -> (ForkChoiceState, PayloadAttributes) { + fn previous_forkchoice_update_params(&self) -> (ForkchoiceState, PayloadAttributes) { let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); let json = mock_execution_layer .server @@ -126,7 +126,7 @@ impl InvalidPayloadRig { let params = json.get("params").expect("no params"); let fork_choice_state_json = params.get(0).expect("no payload param"); - let fork_choice_state: JsonForkChoiceStateV1 = + let fork_choice_state: JsonForkchoiceStateV1 = serde_json::from_value(fork_choice_state_json.clone()).unwrap(); let payload_param_json = params.get(1).expect("no payload param"); diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 68a4f6a414e..b3bdc54d02a 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -5,8 +5,8 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] -withdrawals = ["state_processing/withdrawals", "types/withdrawals"] -withdrawals-processing = ["state_processing/withdrawals-processing"] +withdrawals = ["state_processing/withdrawals", "types/withdrawals", "eth2/withdrawals"] +withdrawals-processing = ["state_processing/withdrawals-processing", "eth2/withdrawals-processing"] [dependencies] types = { path = "../../consensus/types"} diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index ed940d4a88e..128f23386fb 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,4 +1,4 @@ -use crate::engines::ForkChoiceState; +use crate::engines::ForkchoiceState; pub use ethers_core::types::Transaction; use ethers_core::utils::rlp::{Decodable, Rlp}; use http::deposit_methods::RpcError; @@ -7,10 +7,11 @@ use reqwest::StatusCode; use serde::{Deserialize, Serialize}; use strum::IntoStaticStr; use superstruct::superstruct; +#[cfg(feature = "withdrawals")] use types::Withdrawal; pub use types::{ Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, FixedVector, - Hash256, Uint256, VariableList, + ForkName, Hash256, Uint256, VariableList, }; pub mod auth; @@ -44,6 +45,9 @@ pub enum Error { DeserializeWithdrawals(ssz_types::Error), BuilderApi(builder_client::Error), IncorrectStateVariant, + RequiredMethodUnsupported(&'static str), + UnsupportedForkVariant(String), + BadConversion(String), } impl From for Error { @@ -255,7 +259,29 @@ pub struct PayloadAttributes { pub suggested_fee_recipient: Address, #[cfg(feature = "withdrawals")] #[superstruct(only(V2))] - pub withdrawals: Vec, + pub withdrawals: Option>, +} + +impl PayloadAttributes { + pub fn downgrade_to_v1(self) -> Result { + match self { + PayloadAttributes::V1(_) => Ok(self), + PayloadAttributes::V2(v2) => { + #[cfg(features = "withdrawals")] + if v2.withdrawals.is_some() { + return Err(Error::BadConversion( + "Downgrading from PayloadAttributesV2 with non-null withdrawaals" + .to_string(), + )); + } + Ok(PayloadAttributes::V1(PayloadAttributesV1 { + timestamp: v2.timestamp, + prev_randao: v2.prev_randao, + suggested_fee_recipient: v2.suggested_fee_recipient, + })) + } + } + } } #[derive(Clone, Debug, PartialEq)] @@ -277,3 +303,17 @@ pub struct ProposeBlindedBlockResponse { pub latest_valid_hash: Option, pub validation_error: Option, } + +// This name is work in progress, it could +// change when this method is actually proposed +// but I'm writing this as it has been described +#[derive(Clone, Copy)] +pub struct SupportedApis { + pub new_payload_v1: bool, + pub new_payload_v2: bool, + pub forkchoice_updated_v1: bool, + pub forkchoice_updated_v2: bool, + pub get_payload_v1: bool, + pub get_payload_v2: bool, + pub exchange_transition_configuration_v1: bool, +} diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index bd9f387e595..446623744e4 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -7,6 +7,7 @@ use reqwest::header::CONTENT_TYPE; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde_json::json; +use tokio::sync::RwLock; use std::time::Duration; use types::EthSpec; @@ -29,15 +30,18 @@ pub const ETH_SYNCING: &str = "eth_syncing"; pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_secs(1); pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1"; +pub const ENGINE_NEW_PAYLOAD_V2: &str = "engine_newPayloadV2"; pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1"; +pub const ENGINE_GET_PAYLOAD_V2: &str = "engine_getPayloadV2"; pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); pub const ENGINE_GET_BLOBS_BUNDLE_V1: &str = "engine_getBlobsBundleV1"; pub const ENGINE_GET_BLOBS_BUNDLE_TIMEOUT: Duration = Duration::from_secs(2); pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; +pub const ENGINE_FORKCHOICE_UPDATED_V2: &str = "engine_forkchoiceUpdatedV2"; pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1: &str = @@ -526,6 +530,7 @@ pub struct HttpJsonRpc { pub client: Client, pub url: SensitiveUrl, pub execution_timeout_multiplier: u32, + pub cached_supported_apis: RwLock>, auth: Option, } @@ -538,6 +543,7 @@ impl HttpJsonRpc { client: Client::builder().build()?, url, execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), + cached_supported_apis: Default::default(), auth: None, }) } @@ -551,6 +557,7 @@ impl HttpJsonRpc { client: Client::builder().build()?, url, execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), + cached_supported_apis: Default::default(), auth: Some(auth), }) } @@ -671,7 +678,7 @@ impl HttpJsonRpc { &self, execution_payload: ExecutionPayload, ) -> Result { - let params = json!([JsonExecutionPayload::from(execution_payload)]); + let params = json!([JsonExecutionPayloadV1::try_from(execution_payload)?]); let response: JsonPayloadStatusV1 = self .rpc_request( @@ -684,13 +691,31 @@ impl HttpJsonRpc { Ok(response.into()) } + pub async fn new_payload_v2( + &self, + execution_payload: ExecutionPayload, + ) -> Result { + let params = json!([JsonExecutionPayloadV2::try_from(execution_payload)?]); + + let response: JsonPayloadStatusV1 = self + .rpc_request( + ENGINE_NEW_PAYLOAD_V2, + params, + ENGINE_NEW_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response.into()) + } + pub async fn get_payload_v1( &self, + fork_name: ForkName, payload_id: PayloadId, ) -> Result, Error> { let params = json!([JsonPayloadIdRequest::from(payload_id)]); - let response: JsonExecutionPayload = self + let payload_v1: JsonExecutionPayloadV1 = self .rpc_request( ENGINE_GET_PAYLOAD_V1, params, @@ -698,7 +723,25 @@ impl HttpJsonRpc { ) .await?; - Ok(response.into()) + JsonExecutionPayload::V1(payload_v1).try_into_execution_payload(fork_name) + } + + pub async fn get_payload_v2( + &self, + fork_name: ForkName, + payload_id: PayloadId, + ) -> Result, Error> { + let params = json!([JsonPayloadIdRequest::from(payload_id)]); + + let payload_v2: JsonExecutionPayloadV2 = self + .rpc_request( + ENGINE_GET_PAYLOAD_V2, + params, + ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + JsonExecutionPayload::V2(payload_v2).try_into_execution_payload(fork_name) } pub async fn get_blobs_bundle_v1( @@ -720,11 +763,11 @@ impl HttpJsonRpc { pub async fn forkchoice_updated_v1( &self, - forkchoice_state: ForkChoiceState, + forkchoice_state: ForkchoiceState, payload_attributes: Option, ) -> Result { let params = json!([ - JsonForkChoiceStateV1::from(forkchoice_state), + JsonForkchoiceStateV1::from(forkchoice_state), payload_attributes.map(JsonPayloadAttributes::from) ]); @@ -739,6 +782,27 @@ impl HttpJsonRpc { Ok(response.into()) } + pub async fn forkchoice_updated_v2( + &self, + forkchoice_state: ForkchoiceState, + payload_attributes: Option, + ) -> Result { + let params = json!([ + JsonForkchoiceStateV1::from(forkchoice_state), + payload_attributes.map(JsonPayloadAttributes::from) + ]); + + let response: JsonForkchoiceUpdatedV1Response = self + .rpc_request( + ENGINE_FORKCHOICE_UPDATED_V2, + params, + ENGINE_FORKCHOICE_UPDATED_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response.into()) + } + pub async fn exchange_transition_configuration_v1( &self, transition_configuration: TransitionConfigurationV1, @@ -756,6 +820,94 @@ impl HttpJsonRpc { Ok(response) } + + // this is a stub as this method hasn't been defined yet + pub async fn supported_apis_v1(&self) -> Result { + Ok(SupportedApis { + new_payload_v1: true, + new_payload_v2: cfg!(all(feature = "withdrawals", not(test))), + forkchoice_updated_v1: true, + forkchoice_updated_v2: cfg!(all(feature = "withdrawals", not(test))), + get_payload_v1: true, + get_payload_v2: cfg!(all(feature = "withdrawals", not(test))), + exchange_transition_configuration_v1: true, + }) + } + + pub async fn set_cached_supported_apis(&self, supported_apis: SupportedApis) { + *self.cached_supported_apis.write().await = Some(supported_apis); + } + + pub async fn get_cached_supported_apis(&self) -> Result { + let cached_opt = *self.cached_supported_apis.read().await; + if let Some(supported_apis) = cached_opt { + Ok(supported_apis) + } else { + let supported_apis = self.supported_apis_v1().await?; + self.set_cached_supported_apis(supported_apis).await; + Ok(supported_apis) + } + } + + // automatically selects the latest version of + // new_payload that the execution engine supports + pub async fn new_payload( + &self, + execution_payload: ExecutionPayload, + ) -> Result { + let supported_apis = self.get_cached_supported_apis().await?; + if supported_apis.new_payload_v2 { + // FIXME: I haven't thought at all about how to handle 4844.. + self.new_payload_v2(execution_payload).await + } else if supported_apis.new_payload_v1 { + self.new_payload_v1(execution_payload).await + } else { + Err(Error::RequiredMethodUnsupported("engine_newPayload")) + } + } + + // automatically selects the latest version of + // get_payload that the execution engine supports + pub async fn get_payload( + &self, + fork_name: ForkName, + payload_id: PayloadId, + ) -> Result, Error> { + let supported_apis = self.get_cached_supported_apis().await?; + if supported_apis.get_payload_v2 { + // FIXME: I haven't thought at all about how to handle 4844.. + self.get_payload_v2(fork_name, payload_id).await + } else if supported_apis.new_payload_v1 { + self.get_payload_v1(fork_name, payload_id).await + } else { + Err(Error::RequiredMethodUnsupported("engine_getPayload")) + } + } + + // automatically selects the latest version of + // forkchoice_updated that the execution engine supports + pub async fn forkchoice_updated( + &self, + forkchoice_state: ForkchoiceState, + payload_attributes: Option, + ) -> Result { + let supported_apis = self.get_cached_supported_apis().await?; + if supported_apis.forkchoice_updated_v2 { + // FIXME: I haven't thought at all about how to handle 4844.. + self.forkchoice_updated_v2(forkchoice_state, payload_attributes) + .await + } else if supported_apis.forkchoice_updated_v1 { + self.forkchoice_updated_v1( + forkchoice_state, + payload_attributes + .map(|pa| pa.downgrade_to_v1()) + .transpose()?, + ) + .await + } else { + Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated")) + } + } } #[cfg(test)] @@ -767,8 +919,8 @@ mod test { use std::str::FromStr; use std::sync::Arc; use types::{ - AbstractExecPayload, ExecutionPayloadMerge, ForkName, FullPayload, MainnetEthSpec, - Transactions, Unsigned, VariableList, + ExecutionPayloadMerge, ForkName, FullPayload, MainnetEthSpec, Transactions, Unsigned, + VariableList, }; struct Tester { @@ -1052,7 +1204,7 @@ mod test { |client| async move { let _ = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::repeat_byte(1), safe_block_hash: ExecutionBlockHash::repeat_byte(1), finalized_block_hash: ExecutionBlockHash::zero(), @@ -1087,7 +1239,7 @@ mod test { .assert_auth_failure(|client| async move { client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::repeat_byte(1), safe_block_hash: ExecutionBlockHash::repeat_byte(1), finalized_block_hash: ExecutionBlockHash::zero(), @@ -1108,7 +1260,9 @@ mod test { Tester::new(true) .assert_request_equals( |client| async move { - let _ = client.get_payload_v1::([42; 8]).await; + let _ = client + .get_payload_v1::(ForkName::Merge, [42; 8]) + .await; }, json!({ "id": STATIC_ID, @@ -1121,7 +1275,9 @@ mod test { Tester::new(false) .assert_auth_failure(|client| async move { - client.get_payload_v1::([42; 8]).await + client + .get_payload_v1::(ForkName::Merge, [42; 8]) + .await }) .await; } @@ -1209,7 +1365,7 @@ mod test { |client| async move { let _ = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::repeat_byte(0), safe_block_hash: ExecutionBlockHash::repeat_byte(0), finalized_block_hash: ExecutionBlockHash::repeat_byte(1), @@ -1235,7 +1391,7 @@ mod test { .assert_auth_failure(|client| async move { client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::repeat_byte(0), safe_block_hash: ExecutionBlockHash::repeat_byte(0), finalized_block_hash: ExecutionBlockHash::repeat_byte(1), @@ -1274,7 +1430,7 @@ mod test { |client| async move { let _ = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), finalized_block_hash: ExecutionBlockHash::zero(), @@ -1321,7 +1477,7 @@ mod test { |client| async move { let response = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), finalized_block_hash: ExecutionBlockHash::zero(), @@ -1350,7 +1506,7 @@ mod test { // engine_getPayloadV1 REQUEST validation |client| async move { let _ = client - .get_payload_v1::(str_to_payload_id("0xa247243752eb10b4")) + .get_payload_v1::(ForkName::Merge,str_to_payload_id("0xa247243752eb10b4")) .await; }, json!({ @@ -1385,7 +1541,7 @@ mod test { })], |client| async move { let payload = client - .get_payload_v1::(str_to_payload_id("0xa247243752eb10b4")) + .get_payload_v1::(ForkName::Merge,str_to_payload_id("0xa247243752eb10b4")) .await .unwrap(); @@ -1468,7 +1624,7 @@ mod test { })], |client| async move { let response = client - .new_payload_v1::(FullPayload::default_at_fork(ForkName::Merge).into()) + .new_payload_v1::(ExecutionPayload::Merge(ExecutionPayloadMerge::default())) .await .unwrap(); @@ -1487,7 +1643,7 @@ mod test { |client| async move { let _ = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), finalized_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), @@ -1526,7 +1682,7 @@ mod test { |client| async move { let response = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), finalized_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 6d1d70e78dc..99459ec2b13 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -3,11 +3,12 @@ use serde::{Deserialize, Serialize}; use strum::EnumString; use superstruct::superstruct; use types::{ - Blob, EthSpec, ExecutionBlockHash, ExecutionPayloadEip4844, ExecutionPayloadHeaderEip4844, - FixedVector, KzgCommitment, Transaction, Unsigned, VariableList, + Blob, EthSpec, ExecutionBlockHash, FixedVector, KzgCommitment, Transaction, Unsigned, + VariableList, Withdrawal, +}; +use types::{ + ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge, }; -use types::{ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge}; -use types::{ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderMerge}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -62,7 +63,6 @@ pub struct JsonPayloadIdResponse { pub payload_id: PayloadId, } -// (V1,V2,V3) -> (Merge,Capella,EIP4844) #[superstruct( variants(V1, V2, V3), variant_attributes( @@ -74,7 +74,7 @@ pub struct JsonPayloadIdResponse { )] #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(bound = "T: EthSpec", rename_all = "camelCase", untagged)] -pub struct JsonExecutionPayloadHeader { +pub struct JsonExecutionPayload { pub parent_hash: ExecutionBlockHash, pub fee_recipient: Address, pub state_root: Hash256, @@ -94,80 +94,174 @@ pub struct JsonExecutionPayloadHeader { pub extra_data: VariableList, #[serde(with = "eth2_serde_utils::u256_hex_be")] pub base_fee_per_gas: Uint256, - #[serde(with = "eth2_serde_utils::u64_hex_be")] #[superstruct(only(V3))] + // FIXME: can't easily make this an option because of custom deserialization.. + #[serde(with = "eth2_serde_utils::u64_hex_be")] pub excess_blobs: u64, pub block_hash: ExecutionBlockHash, - pub transactions_root: Hash256, - #[cfg(feature = "withdrawals")] + #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] + pub transactions: + VariableList, T::MaxTransactionsPerPayload>, #[superstruct(only(V2, V3))] - pub withdrawals_root: Hash256, -} - -impl From> for ExecutionPayloadHeader { - fn from(json_header: JsonExecutionPayloadHeader) -> Self { - match json_header { - JsonExecutionPayloadHeader::V1(v1) => Self::Merge(ExecutionPayloadHeaderMerge { - parent_hash: v1.parent_hash, - fee_recipient: v1.fee_recipient, - state_root: v1.state_root, - receipts_root: v1.receipts_root, - logs_bloom: v1.logs_bloom, - prev_randao: v1.prev_randao, - block_number: v1.block_number, - gas_limit: v1.gas_limit, - gas_used: v1.gas_used, - timestamp: v1.timestamp, - extra_data: v1.extra_data, - base_fee_per_gas: v1.base_fee_per_gas, - block_hash: v1.block_hash, - transactions_root: v1.transactions_root, - }), - JsonExecutionPayloadHeader::V2(v2) => Self::Capella(ExecutionPayloadHeaderCapella { - parent_hash: v2.parent_hash, - fee_recipient: v2.fee_recipient, - state_root: v2.state_root, - receipts_root: v2.receipts_root, - logs_bloom: v2.logs_bloom, - prev_randao: v2.prev_randao, - block_number: v2.block_number, - gas_limit: v2.gas_limit, - gas_used: v2.gas_used, - timestamp: v2.timestamp, - extra_data: v2.extra_data, - base_fee_per_gas: v2.base_fee_per_gas, - block_hash: v2.block_hash, - transactions_root: v2.transactions_root, - #[cfg(feature = "withdrawals")] - withdrawals_root: v2.withdrawals_root, - }), - JsonExecutionPayloadHeader::V3(v3) => Self::Eip4844(ExecutionPayloadHeaderEip4844 { - parent_hash: v3.parent_hash, - fee_recipient: v3.fee_recipient, - state_root: v3.state_root, - receipts_root: v3.receipts_root, - logs_bloom: v3.logs_bloom, - prev_randao: v3.prev_randao, - block_number: v3.block_number, - gas_limit: v3.gas_limit, - gas_used: v3.gas_used, - timestamp: v3.timestamp, - extra_data: v3.extra_data, - base_fee_per_gas: v3.base_fee_per_gas, - excess_blobs: v3.excess_blobs, - block_hash: v3.block_hash, - transactions_root: v3.transactions_root, - #[cfg(feature = "withdrawals")] - withdrawals_root: v3.withdrawals_root, - }), + pub withdrawals: Option>, +} + +impl JsonExecutionPayload { + pub fn try_into_execution_payload( + self, + fork_name: ForkName, + ) -> Result, Error> { + match self { + JsonExecutionPayload::V1(v1) => match fork_name { + ForkName::Merge => Ok(ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: v1.parent_hash, + fee_recipient: v1.fee_recipient, + state_root: v1.state_root, + receipts_root: v1.receipts_root, + logs_bloom: v1.logs_bloom, + prev_randao: v1.prev_randao, + block_number: v1.block_number, + gas_limit: v1.gas_limit, + gas_used: v1.gas_used, + timestamp: v1.timestamp, + extra_data: v1.extra_data, + base_fee_per_gas: v1.base_fee_per_gas, + block_hash: v1.block_hash, + transactions: v1.transactions, + })), + _ => Err(Error::UnsupportedForkVariant(format!("Unsupported conversion from JsonExecutionPayloadV1 for {}", fork_name))), + } + JsonExecutionPayload::V2(v2) => match fork_name { + ForkName::Merge => Ok(ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: v2.parent_hash, + fee_recipient: v2.fee_recipient, + state_root: v2.state_root, + receipts_root: v2.receipts_root, + logs_bloom: v2.logs_bloom, + prev_randao: v2.prev_randao, + block_number: v2.block_number, + gas_limit: v2.gas_limit, + gas_used: v2.gas_used, + timestamp: v2.timestamp, + extra_data: v2.extra_data, + base_fee_per_gas: v2.base_fee_per_gas, + block_hash: v2.block_hash, + transactions: v2.transactions, + })), + ForkName::Capella => Ok(ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: v2.parent_hash, + fee_recipient: v2.fee_recipient, + state_root: v2.state_root, + receipts_root: v2.receipts_root, + logs_bloom: v2.logs_bloom, + prev_randao: v2.prev_randao, + block_number: v2.block_number, + gas_limit: v2.gas_limit, + gas_used: v2.gas_used, + timestamp: v2.timestamp, + extra_data: v2.extra_data, + base_fee_per_gas: v2.base_fee_per_gas, + block_hash: v2.block_hash, + transactions: v2.transactions, + #[cfg(feature = "withdrawals")] + withdrawals: v2 + .withdrawals + .map(|v| { + Into::>::into(v) + .into_iter() + .map(Into::into) + .collect::>() + .into() + }) + .ok_or(Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadCapella".to_string()))? + })), + ForkName::Eip4844 => Err(Error::UnsupportedForkVariant("JsonExecutionPayloadV2 -> ExecutionPayloadEip4844 not implemented yet as it might never be".to_string())), + _ => Err(Error::UnsupportedForkVariant(format!("Unsupported conversion from JsonExecutionPayloadV2 for {}", fork_name))), + } + JsonExecutionPayload::V3(v3) => match fork_name { + ForkName::Merge => Ok(ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: v3.parent_hash, + fee_recipient: v3.fee_recipient, + state_root: v3.state_root, + receipts_root: v3.receipts_root, + logs_bloom: v3.logs_bloom, + prev_randao: v3.prev_randao, + block_number: v3.block_number, + gas_limit: v3.gas_limit, + gas_used: v3.gas_used, + timestamp: v3.timestamp, + extra_data: v3.extra_data, + base_fee_per_gas: v3.base_fee_per_gas, + block_hash: v3.block_hash, + transactions: v3.transactions, + })), + ForkName::Capella => Ok(ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: v3.parent_hash, + fee_recipient: v3.fee_recipient, + state_root: v3.state_root, + receipts_root: v3.receipts_root, + logs_bloom: v3.logs_bloom, + prev_randao: v3.prev_randao, + block_number: v3.block_number, + gas_limit: v3.gas_limit, + gas_used: v3.gas_used, + timestamp: v3.timestamp, + extra_data: v3.extra_data, + base_fee_per_gas: v3.base_fee_per_gas, + block_hash: v3.block_hash, + transactions: v3.transactions, + #[cfg(feature = "withdrawals")] + withdrawals: v3 + .withdrawals + .map(|v| { + Into::>::into(v) + .into_iter() + .map(Into::into) + .collect::>() + .into() + }) + .ok_or(Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV3 -> ExecutionPayloadCapella".to_string()))? + })), + ForkName::Eip4844 => Ok(ExecutionPayload::Eip4844(ExecutionPayloadEip4844 { + parent_hash: v3.parent_hash, + fee_recipient: v3.fee_recipient, + state_root: v3.state_root, + receipts_root: v3.receipts_root, + logs_bloom: v3.logs_bloom, + prev_randao: v3.prev_randao, + block_number: v3.block_number, + gas_limit: v3.gas_limit, + gas_used: v3.gas_used, + timestamp: v3.timestamp, + extra_data: v3.extra_data, + base_fee_per_gas: v3.base_fee_per_gas, + // FIXME: excess_blobs probably will be an option whenever the engine API is finalized + excess_blobs: v3.excess_blobs, + block_hash: v3.block_hash, + transactions: v3.transactions, + #[cfg(feature = "withdrawals")] + withdrawals: v3 + .withdrawals + .map(|v| { + Vec::from(v) + .into_iter() + .map(Into::into) + .collect::>() + .into() + }) + .ok_or(Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV3 -> ExecutionPayloadEip4844".to_string()))?, + })), + _ => Err(Error::UnsupportedForkVariant(format!("Unsupported conversion from JsonExecutionPayloadV2 for {}", fork_name))), + } } } } -impl From> for JsonExecutionPayloadHeader { - fn from(header: ExecutionPayloadHeader) -> Self { - match header { - ExecutionPayloadHeader::Merge(merge) => Self::V1(JsonExecutionPayloadHeaderV1 { +impl TryFrom> for JsonExecutionPayloadV1 { + type Error = Error; + fn try_from(payload: ExecutionPayload) -> Result { + match payload { + ExecutionPayload::Merge(merge) => Ok(JsonExecutionPayloadV1 { parent_hash: merge.parent_hash, fee_recipient: merge.fee_recipient, state_root: merge.state_root, @@ -181,157 +275,25 @@ impl From> for JsonExecutionPayloadHeader< extra_data: merge.extra_data, base_fee_per_gas: merge.base_fee_per_gas, block_hash: merge.block_hash, - transactions_root: merge.transactions_root, - }), - ExecutionPayloadHeader::Capella(capella) => Self::V2(JsonExecutionPayloadHeaderV2 { - parent_hash: capella.parent_hash, - fee_recipient: capella.fee_recipient, - state_root: capella.state_root, - receipts_root: capella.receipts_root, - logs_bloom: capella.logs_bloom, - prev_randao: capella.prev_randao, - block_number: capella.block_number, - gas_limit: capella.gas_limit, - gas_used: capella.gas_used, - timestamp: capella.timestamp, - extra_data: capella.extra_data, - base_fee_per_gas: capella.base_fee_per_gas, - block_hash: capella.block_hash, - transactions_root: capella.transactions_root, - #[cfg(feature = "withdrawals")] - withdrawals_root: capella.withdrawals_root, - }), - ExecutionPayloadHeader::Eip4844(eip4844) => Self::V3(JsonExecutionPayloadHeaderV3 { - parent_hash: eip4844.parent_hash, - fee_recipient: eip4844.fee_recipient, - state_root: eip4844.state_root, - receipts_root: eip4844.receipts_root, - logs_bloom: eip4844.logs_bloom, - prev_randao: eip4844.prev_randao, - block_number: eip4844.block_number, - gas_limit: eip4844.gas_limit, - gas_used: eip4844.gas_used, - timestamp: eip4844.timestamp, - extra_data: eip4844.extra_data, - base_fee_per_gas: eip4844.base_fee_per_gas, - excess_blobs: eip4844.excess_blobs, - block_hash: eip4844.block_hash, - transactions_root: eip4844.transactions_root, - #[cfg(feature = "withdrawals")] - withdrawals_root: eip4844.withdrawals_root, - }), - } - } -} - -// (V1,V2, V2) -> (Merge,Capella,EIP4844) -#[superstruct( - variants(V1, V2, V3), - variant_attributes( - derive(Debug, PartialEq, Default, Serialize, Deserialize,), - serde(bound = "T: EthSpec", rename_all = "camelCase"), - ), - cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") -)] -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[serde(bound = "T: EthSpec", rename_all = "camelCase", untagged)] -pub struct JsonExecutionPayload { - pub parent_hash: ExecutionBlockHash, - pub fee_recipient: Address, - pub state_root: Hash256, - pub receipts_root: Hash256, - #[serde(with = "serde_logs_bloom")] - pub logs_bloom: FixedVector, - pub prev_randao: Hash256, - #[serde(with = "eth2_serde_utils::u64_hex_be")] - pub block_number: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] - pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] - pub gas_used: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] - pub timestamp: u64, - #[serde(with = "ssz_types::serde_utils::hex_var_list")] - pub extra_data: VariableList, - #[serde(with = "eth2_serde_utils::u256_hex_be")] - pub base_fee_per_gas: Uint256, - #[superstruct(only(V3))] - #[serde(with = "eth2_serde_utils::u64_hex_be")] - pub excess_blobs: u64, - pub block_hash: ExecutionBlockHash, - #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] - pub transactions: - VariableList, T::MaxTransactionsPerPayload>, - #[cfg(feature = "withdrawals")] - #[superstruct(only(V2, V3))] - pub withdrawals: VariableList, -} - -impl From> for ExecutionPayload { - fn from(json_payload: JsonExecutionPayload) -> Self { - match json_payload { - JsonExecutionPayload::V1(v1) => Self::Merge(ExecutionPayloadMerge { - parent_hash: v1.parent_hash, - fee_recipient: v1.fee_recipient, - state_root: v1.state_root, - receipts_root: v1.receipts_root, - logs_bloom: v1.logs_bloom, - prev_randao: v1.prev_randao, - block_number: v1.block_number, - gas_limit: v1.gas_limit, - gas_used: v1.gas_used, - timestamp: v1.timestamp, - extra_data: v1.extra_data, - base_fee_per_gas: v1.base_fee_per_gas, - block_hash: v1.block_hash, - transactions: v1.transactions, - }), - JsonExecutionPayload::V2(v2) => Self::Capella(ExecutionPayloadCapella { - parent_hash: v2.parent_hash, - fee_recipient: v2.fee_recipient, - state_root: v2.state_root, - receipts_root: v2.receipts_root, - logs_bloom: v2.logs_bloom, - prev_randao: v2.prev_randao, - block_number: v2.block_number, - gas_limit: v2.gas_limit, - gas_used: v2.gas_used, - timestamp: v2.timestamp, - extra_data: v2.extra_data, - base_fee_per_gas: v2.base_fee_per_gas, - block_hash: v2.block_hash, - transactions: v2.transactions, - #[cfg(feature = "withdrawals")] - withdrawals: v2.withdrawals, - }), - JsonExecutionPayload::V3(v3) => Self::Eip4844(ExecutionPayloadEip4844 { - parent_hash: v3.parent_hash, - fee_recipient: v3.fee_recipient, - state_root: v3.state_root, - receipts_root: v3.receipts_root, - logs_bloom: v3.logs_bloom, - prev_randao: v3.prev_randao, - block_number: v3.block_number, - gas_limit: v3.gas_limit, - gas_used: v3.gas_used, - timestamp: v3.timestamp, - extra_data: v3.extra_data, - base_fee_per_gas: v3.base_fee_per_gas, - excess_blobs: v3.excess_blobs, - block_hash: v3.block_hash, - transactions: v3.transactions, - #[cfg(feature = "withdrawals")] - withdrawals: v3.withdrawals, + transactions: merge.transactions, }), + ExecutionPayload::Capella(_) => Err(Error::UnsupportedForkVariant(format!( + "Unsupported conversion to JsonExecutionPayloadV1 for {}", + ForkName::Capella + ))), + ExecutionPayload::Eip4844(_) => Err(Error::UnsupportedForkVariant(format!( + "Unsupported conversion to JsonExecutionPayloadV1 for {}", + ForkName::Eip4844 + ))), } } } -impl From> for JsonExecutionPayload { - fn from(payload: ExecutionPayload) -> Self { +impl TryFrom> for JsonExecutionPayloadV2 { + type Error = Error; + fn try_from(payload: ExecutionPayload) -> Result { match payload { - ExecutionPayload::Merge(merge) => Self::V1(JsonExecutionPayloadV1 { + ExecutionPayload::Merge(merge) => Ok(JsonExecutionPayloadV2 { parent_hash: merge.parent_hash, fee_recipient: merge.fee_recipient, state_root: merge.state_root, @@ -346,8 +308,9 @@ impl From> for JsonExecutionPayload { base_fee_per_gas: merge.base_fee_per_gas, block_hash: merge.block_hash, transactions: merge.transactions, + withdrawals: None, }), - ExecutionPayload::Capella(capella) => Self::V2(JsonExecutionPayloadV2 { + ExecutionPayload::Capella(capella) => Ok(JsonExecutionPayloadV2 { parent_hash: capella.parent_hash, fee_recipient: capella.fee_recipient, state_root: capella.state_root, @@ -363,27 +326,20 @@ impl From> for JsonExecutionPayload { block_hash: capella.block_hash, transactions: capella.transactions, #[cfg(feature = "withdrawals")] - withdrawals: capella.withdrawals, - }), - ExecutionPayload::Eip4844(eip4844) => Self::V3(JsonExecutionPayloadV3 { - parent_hash: eip4844.parent_hash, - fee_recipient: eip4844.fee_recipient, - state_root: eip4844.state_root, - receipts_root: eip4844.receipts_root, - logs_bloom: eip4844.logs_bloom, - prev_randao: eip4844.prev_randao, - block_number: eip4844.block_number, - gas_limit: eip4844.gas_limit, - gas_used: eip4844.gas_used, - timestamp: eip4844.timestamp, - extra_data: eip4844.extra_data, - base_fee_per_gas: eip4844.base_fee_per_gas, - excess_blobs: eip4844.excess_blobs, - block_hash: eip4844.block_hash, - transactions: eip4844.transactions, - #[cfg(feature = "withdrawals")] - withdrawals: eip4844.withdrawals, + withdrawals: Some( + Vec::from(capella.withdrawals) + .into_iter() + .map(Into::into) + .collect::>() + .into(), + ), + #[cfg(not(feature = "withdrawals"))] + withdrawals: None, }), + ExecutionPayload::Eip4844(_) => Err(Error::UnsupportedForkVariant(format!( + "Unsupported conversion to JsonExecutionPayloadV1 for {}", + ForkName::Eip4844 + ))), } } } @@ -424,12 +380,15 @@ impl From for Withdrawal { #[superstruct( variants(V1, V2), - variant_attributes(derive(Clone, Debug, PartialEq, Serialize, Deserialize),), + variant_attributes( + derive(Clone, Debug, PartialEq, Serialize, Deserialize), + serde(rename_all = "camelCase") + ), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] #[derive(Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase", untagged)] +#[serde(untagged)] pub struct JsonPayloadAttributes { #[serde(with = "eth2_serde_utils::u64_hex_be")] pub timestamp: u64, @@ -437,7 +396,7 @@ pub struct JsonPayloadAttributes { pub suggested_fee_recipient: Address, #[cfg(feature = "withdrawals")] #[superstruct(only(V2))] - pub withdrawals: Vec, + pub withdrawals: Option>, } impl From for JsonPayloadAttributes { @@ -453,7 +412,9 @@ impl From for JsonPayloadAttributes { prev_randao: pa.prev_randao, suggested_fee_recipient: pa.suggested_fee_recipient, #[cfg(feature = "withdrawals")] - withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(), + withdrawals: pa + .withdrawals + .map(|w| w.into_iter().map(Into::into).collect()), }), } } @@ -472,7 +433,9 @@ impl From for PayloadAttributes { prev_randao: jpa.prev_randao, suggested_fee_recipient: jpa.suggested_fee_recipient, #[cfg(feature = "withdrawals")] - withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(), + withdrawals: jpa + .withdrawals + .map(|jw| jw.into_iter().map(Into::into).collect()), }), } } @@ -488,16 +451,16 @@ pub struct JsonBlobBundles { #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct JsonForkChoiceStateV1 { +pub struct JsonForkchoiceStateV1 { pub head_block_hash: ExecutionBlockHash, pub safe_block_hash: ExecutionBlockHash, pub finalized_block_hash: ExecutionBlockHash, } -impl From for JsonForkChoiceStateV1 { - fn from(f: ForkChoiceState) -> Self { +impl From for JsonForkchoiceStateV1 { + fn from(f: ForkchoiceState) -> Self { // Use this verbose deconstruction pattern to ensure no field is left unused. - let ForkChoiceState { + let ForkchoiceState { head_block_hash, safe_block_hash, finalized_block_hash, @@ -511,10 +474,10 @@ impl From for JsonForkChoiceStateV1 { } } -impl From for ForkChoiceState { - fn from(j: JsonForkChoiceStateV1) -> Self { +impl From for ForkchoiceState { + fn from(j: JsonForkchoiceStateV1) -> Self { // Use this verbose deconstruction pattern to ensure no field is left unused. - let JsonForkChoiceStateV1 { + let JsonForkchoiceStateV1 { head_block_hash, safe_block_hash, finalized_block_hash, diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index da77bd9cf89..264303b5d3b 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -88,7 +88,7 @@ impl State { } #[derive(Copy, Clone, PartialEq, Debug)] -pub struct ForkChoiceState { +pub struct ForkchoiceState { pub head_block_hash: ExecutionBlockHash, pub safe_block_hash: ExecutionBlockHash, pub finalized_block_hash: ExecutionBlockHash, @@ -115,7 +115,7 @@ pub struct Engine { pub api: HttpJsonRpc, payload_id_cache: Mutex>, state: RwLock, - latest_forkchoice_state: RwLock>, + latest_forkchoice_state: RwLock>, executor: TaskExecutor, log: Logger, } @@ -161,13 +161,13 @@ impl Engine { pub async fn notify_forkchoice_updated( &self, - forkchoice_state: ForkChoiceState, + forkchoice_state: ForkchoiceState, payload_attributes: Option, log: &Logger, ) -> Result { let response = self .api - .forkchoice_updated_v1(forkchoice_state, payload_attributes.clone()) + .forkchoice_updated(forkchoice_state, payload_attributes.clone()) .await?; if let Some(payload_id) = response.payload_id { @@ -187,11 +187,11 @@ impl Engine { Ok(response) } - async fn get_latest_forkchoice_state(&self) -> Option { + async fn get_latest_forkchoice_state(&self) -> Option { *self.latest_forkchoice_state.read().await } - pub async fn set_latest_forkchoice_state(&self, state: ForkChoiceState) { + pub async fn set_latest_forkchoice_state(&self, state: ForkchoiceState) { *self.latest_forkchoice_state.write().await = Some(state); } @@ -216,7 +216,7 @@ impl Engine { // For simplicity, payload attributes are never included in this call. It may be // reasonable to include them in the future. - if let Err(e) = self.api.forkchoice_updated_v1(forkchoice_state, None).await { + if let Err(e) = self.api.forkchoice_updated(forkchoice_state, None).await { debug!( self.log, "Failed to issue latest head to engine"; @@ -349,7 +349,7 @@ impl Engine { // TODO: revisit this - do we need to key on withdrawals as well here? impl PayloadIdCacheKey { - fn new(state: &ForkChoiceState, attributes: &PayloadAttributes) -> Self { + fn new(state: &ForkchoiceState, attributes: &PayloadAttributes) -> Self { Self { head_block_hash: state.head_block_hash, timestamp: attributes.timestamp(), diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 04bdb4a20de..2a19c4165e2 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -12,7 +12,7 @@ use engine_api::Error as ApiError; pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; use engines::{Engine, EngineError}; -pub use engines::{EngineState, ForkChoiceState}; +pub use engines::{EngineState, ForkchoiceState}; use fork_choice::ForkchoiceUpdateParameters; use lru::LruCache; use payload_status::process_payload_status; @@ -33,6 +33,8 @@ use tokio::{ time::sleep, }; use tokio_stream::wrappers::WatchStream; +#[cfg(feature = "withdrawals")] +use types::Withdrawal; use types::{AbstractExecPayload, Blob, ExecPayload, ExecutionPayloadEip4844, KzgCommitment}; use types::{ BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName, @@ -613,6 +615,8 @@ impl ExecutionLayer { proposer_index: u64, forkchoice_update_params: ForkchoiceUpdateParameters, builder_params: BuilderParams, + current_fork: ForkName, + #[cfg(feature = "withdrawals")] withdrawals: Option>, spec: &ChainSpec, ) -> Result, Error> { let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; @@ -630,6 +634,9 @@ impl ExecutionLayer { suggested_fee_recipient, forkchoice_update_params, builder_params, + current_fork, + #[cfg(feature = "withdrawals")] + withdrawals, spec, ) .await @@ -645,6 +652,9 @@ impl ExecutionLayer { prev_randao, suggested_fee_recipient, forkchoice_update_params, + current_fork, + #[cfg(feature = "withdrawals")] + withdrawals, ) .await } @@ -660,6 +670,8 @@ impl ExecutionLayer { suggested_fee_recipient: Address, forkchoice_update_params: ForkchoiceUpdateParameters, builder_params: BuilderParams, + current_fork: ForkName, + #[cfg(feature = "withdrawals")] withdrawals: Option>, spec: &ChainSpec, ) -> Result, Error> { if let Some(builder) = self.builder() { @@ -683,6 +695,9 @@ impl ExecutionLayer { prev_randao, suggested_fee_recipient, forkchoice_update_params, + current_fork, + #[cfg(feature = "withdrawals")] + withdrawals, ) ); @@ -812,6 +827,9 @@ impl ExecutionLayer { prev_randao, suggested_fee_recipient, forkchoice_update_params, + current_fork, + #[cfg(feature = "withdrawals")] + withdrawals, ) .await } @@ -824,6 +842,8 @@ impl ExecutionLayer { prev_randao: Hash256, suggested_fee_recipient: Address, forkchoice_update_params: ForkchoiceUpdateParameters, + current_fork: ForkName, + #[cfg(feature = "withdrawals")] withdrawals: Option>, ) -> Result, Error> { self.get_full_payload_with( parent_hash, @@ -831,6 +851,9 @@ impl ExecutionLayer { prev_randao, suggested_fee_recipient, forkchoice_update_params, + current_fork, + #[cfg(feature = "withdrawals")] + withdrawals, noop, ) .await @@ -844,6 +867,8 @@ impl ExecutionLayer { prev_randao: Hash256, suggested_fee_recipient: Address, forkchoice_update_params: ForkchoiceUpdateParameters, + current_fork: ForkName, + #[cfg(feature = "withdrawals")] withdrawals: Option>, ) -> Result, Error> { self.get_full_payload_with( parent_hash, @@ -851,6 +876,9 @@ impl ExecutionLayer { prev_randao, suggested_fee_recipient, forkchoice_update_params, + current_fork, + #[cfg(feature = "withdrawals")] + withdrawals, Self::cache_payload, ) .await @@ -863,10 +891,14 @@ impl ExecutionLayer { prev_randao: Hash256, suggested_fee_recipient: Address, forkchoice_update_params: ForkchoiceUpdateParameters, + current_fork: ForkName, + #[cfg(feature = "withdrawals")] withdrawals: Option>, f: fn(&ExecutionLayer, &ExecutionPayload) -> Option>, ) -> Result, Error> { + #[cfg(feature = "withdrawals")] + let withdrawals_ref = &withdrawals; self.engine() - .request(|engine| async move { + .request(move |engine| async move { let payload_id = if let Some(id) = engine .get_payload_id(parent_hash, timestamp, prev_randao, suggested_fee_recipient) .await @@ -884,7 +916,7 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, &[metrics::MISS], ); - let fork_choice_state = ForkChoiceState { + let fork_choice_state = ForkchoiceState { head_block_hash: parent_hash, safe_block_hash: forkchoice_update_params .justified_hash @@ -893,12 +925,14 @@ impl ExecutionLayer { .finalized_hash .unwrap_or_else(ExecutionBlockHash::zero), }; - // FIXME: This will have to properly handle forks. To do that, - // withdrawals will need to be passed into this function - let payload_attributes = PayloadAttributes::V1(PayloadAttributesV1 { + // This must always be the latest PayloadAttributes + // FIXME: How to non-capella EIP4844 testnets handle this? + let payload_attributes = PayloadAttributes::V2(PayloadAttributesV2 { timestamp, prev_randao, suggested_fee_recipient, + #[cfg(feature = "withdrawals")] + withdrawals: withdrawals_ref.clone(), }); let response = engine @@ -925,7 +959,11 @@ impl ExecutionLayer { }; let blob_fut = async { - //FIXME(sean) do a fork check here and return None otherwise + //FIXME(sean) do a fork check here and return None otherwise + // ^ + // well now we have the fork in this function so + // it should be easier to do that now + // - Mark debug!( self.log(), "Issuing engine_getBlobsBundle"; @@ -945,9 +983,8 @@ impl ExecutionLayer { "timestamp" => timestamp, "parent_hash" => ?parent_hash, ); - engine.api.get_payload_v1::(payload_id).await + engine.api.get_payload::(current_fork, payload_id).await }; - let (blob, payload) = tokio::join!(blob_fut, payload_fut); let payload = payload.map(|full_payload| { if full_payload.fee_recipient() != suggested_fee_recipient { @@ -1020,7 +1057,7 @@ impl ExecutionLayer { let result = self .engine() - .request(|engine| engine.api.new_payload_v1(execution_payload.clone())) + .request(|engine| engine.api.new_payload(execution_payload.clone())) .await; if let Ok(status) = &result { @@ -1150,7 +1187,7 @@ impl ExecutionLayer { } } - let forkchoice_state = ForkChoiceState { + let forkchoice_state = ForkchoiceState { head_block_hash, safe_block_hash: justified_block_hash, finalized_block_hash, diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 37eb8ba8f48..f2282c6039d 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -1,4 +1,4 @@ -use crate::engines::ForkChoiceState; +use crate::engines::ForkchoiceState; use crate::{ engine_api::{ json_structures::{ @@ -13,8 +13,7 @@ use std::collections::HashMap; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use types::{ - EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge, - Hash256, Uint256, + EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadMerge, Hash256, Uint256, }; const GAS_LIMIT: u64 = 16384; @@ -315,7 +314,7 @@ impl ExecutionBlockGenerator { pub fn forkchoice_updated_v1( &mut self, - forkchoice_state: ForkChoiceState, + forkchoice_state: ForkchoiceState, payload_attributes: Option, ) -> Result { if let Some(payload) = self @@ -369,7 +368,6 @@ impl ExecutionBlockGenerator { let id = payload_id_from_u64(self.next_payload_id); self.next_payload_id += 1; - // FIXME: think about how to test different forks let mut execution_payload = match &attributes { PayloadAttributes::V1(pa) => ExecutionPayload::Merge(ExecutionPayloadMerge { parent_hash: forkchoice_state.head_block_hash, @@ -388,7 +386,8 @@ impl ExecutionBlockGenerator { transactions: vec![].into(), }), PayloadAttributes::V2(pa) => { - ExecutionPayload::Capella(ExecutionPayloadCapella { + // FIXME: think about how to test different forks + ExecutionPayload::Merge(ExecutionPayloadMerge { parent_hash: forkchoice_state.head_block_hash, fee_recipient: pa.suggested_fee_recipient, receipts_root: Hash256::repeat_byte(42), @@ -403,14 +402,6 @@ impl ExecutionBlockGenerator { base_fee_per_gas: Uint256::one(), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), - #[cfg(feature = "withdrawals")] - withdrawals: pa - .withdrawals - .iter() - .cloned() - .map(Into::into) - .collect::>() - .into(), }) } }; diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index ba26591baf2..fe765cc9495 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -4,7 +4,7 @@ use crate::json_structures::*; use serde::de::DeserializeOwned; use serde_json::Value as JsonValue; use std::sync::Arc; -use types::EthSpec; +use types::{EthSpec, ForkName}; pub async fn handle_rpc( body: JsonValue, @@ -97,7 +97,8 @@ pub async fn handle_rpc( Some( ctx.execution_block_generator .write() - .new_payload(request.into()), + // FIXME: should this worry about other forks? + .new_payload(request.try_into_execution_payload(ForkName::Merge).unwrap()), ) } else { None @@ -117,10 +118,10 @@ pub async fn handle_rpc( .get_payload(&id) .ok_or_else(|| format!("no payload for id {:?}", id))?; - Ok(serde_json::to_value(JsonExecutionPayload::from(response)).unwrap()) + Ok(serde_json::to_value(JsonExecutionPayloadV1::try_from(response).unwrap()).unwrap()) } ENGINE_FORKCHOICE_UPDATED_V1 => { - let forkchoice_state: JsonForkChoiceStateV1 = get_param(params, 0)?; + let forkchoice_state: JsonForkchoiceStateV1 = get_param(params, 0)?; let payload_attributes: Option = get_param(params, 1)?; let head_block_hash = forkchoice_state.head_block_hash; diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 1323ea3e406..5c69fffbf69 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -26,7 +26,8 @@ use task_executor::TaskExecutor; use tempfile::NamedTempFile; use tree_hash::TreeHash; use types::{ - Address, BeaconState, BlindedPayload, ChainSpec, EthSpec, ExecPayload, Hash256, Slot, Uint256, + Address, BeaconState, BlindedPayload, ChainSpec, EthSpec, ExecPayload, ForkName, Hash256, Slot, + Uint256, }; #[derive(Clone)] @@ -313,6 +314,10 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { *prev_randao, fee_recipient, forkchoice_update_params, + // TODO: do we need to write a test for this if this is Capella fork? + ForkName::Merge, + #[cfg(feature = "withdrawals")] + None, ) .await .map_err(convert_err)? diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 62336279b06..cadeec1b3d4 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -114,7 +114,7 @@ impl MockExecutionLayer { suggested_fee_recipient: Address::repeat_byte(42), // FIXME: think about adding withdrawals here.. #[cfg(feature = "withdrawals")] - withdrawals: vec![], + withdrawals: Some(vec![]), }) } }, @@ -159,6 +159,10 @@ impl MockExecutionLayer { validator_index, forkchoice_update_params, builder_params, + // FIXME: do we need to consider other forks somehow? What about withdrawals? + ForkName::Merge, + #[cfg(feature = "withdrawals")] + Some(vec![]), &self.spec, ) .await @@ -191,6 +195,10 @@ impl MockExecutionLayer { validator_index, forkchoice_update_params, builder_params, + // FIXME: do we need to consider other forks somehow? What about withdrawals? + ForkName::Merge, + #[cfg(feature = "withdrawals")] + Some(vec![]), &self.spec, ) .await diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index eca086d838f..6ee02b71ba6 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -35,3 +35,5 @@ procinfo = { version = "0.4.2", optional = true } [features] default = ["lighthouse"] lighthouse = ["proto_array", "psutil", "procinfo", "store", "slashing_protection"] +withdrawals = ["store/withdrawals"] +withdrawals-processing = ["store/withdrawals-processing"] \ No newline at end of file diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 5e59a0132c1..753a1939871 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -462,7 +462,7 @@ pub fn compute_timestamp_at_slot( } /// FIXME: add link to this function once the spec is stable -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals")] pub fn get_expected_withdrawals( state: &BeaconState, spec: &ChainSpec, @@ -472,6 +472,10 @@ pub fn get_expected_withdrawals( let mut validator_index = state.next_withdrawal_validator_index()?; let mut withdrawals = vec![]; + if cfg!(not(feature = "withdrawals-processing")) { + return Ok(withdrawals.into()); + } + for _ in 0..state.validators().len() { let validator = state.get_validator(validator_index as usize)?; let balance = *state.balances().get(validator_index as usize).ok_or( diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 8c2ddd13682..9e3562bc717 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -4,15 +4,19 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use crate::testing_spec; use serde_derive::Deserialize; +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +use state_processing::per_block_processing::process_operations::{ + process_bls_to_execution_changes, process_bls_to_execution_changes, +}; use state_processing::{ per_block_processing::{ errors::BlockProcessingError, process_block_header, process_execution_payload, process_operations::{ - altair, base, process_attester_slashings, process_bls_to_execution_changes, - process_deposits, process_exits, process_proposer_slashings, + altair, base, process_attester_slashings, process_deposits, process_exits, + process_proposer_slashings, }, - process_sync_aggregate, process_withdrawals, VerifyBlockRoot, VerifySignatures, + process_sync_aggregate, VerifyBlockRoot, VerifySignatures, }, ConsensusContext, }; @@ -340,6 +344,7 @@ impl Operation for BlindedPayload { } } +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] impl Operation for WithdrawalsPayload { fn handler_name() -> String { "withdrawals".into() @@ -372,6 +377,7 @@ impl Operation for WithdrawalsPayload { } } +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] impl Operation for SignedBlsToExecutionChange { fn handler_name() -> String { "bls_to_execution_change".into() diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index a85138be95b..b5923aafe5d 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -21,3 +21,7 @@ deposit_contract = { path = "../../common/deposit_contract" } reqwest = { version = "0.11.0", features = ["json"] } hex = "0.4.2" fork_choice = { path = "../../consensus/fork_choice" } + +[features] +default = [] +withdrawals = [] \ No newline at end of file diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index b3464ec98a3..9ef96687a56 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -16,8 +16,8 @@ use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; use tokio::time::sleep; use types::{ - Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, - MainnetEthSpec, PublicKeyBytes, Slot, Uint256, + Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ForkName, FullPayload, + Hash256, MainnetEthSpec, PublicKeyBytes, Slot, Uint256, }; const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(20); @@ -326,6 +326,10 @@ impl TestRig { proposer_index, forkchoice_update_params, builder_params, + // FIXME: think about how to test other forks + ForkName::Merge, + #[cfg(feature = "withdrawals")] + None, &self.spec, ) .await @@ -450,6 +454,10 @@ impl TestRig { proposer_index, forkchoice_update_params, builder_params, + // FIXME: think about how to test other forks + ForkName::Merge, + #[cfg(feature = "withdrawals")] + None, &self.spec, ) .await From 0228b2b42d04915df1df9fafc13fa94302a2a8ae Mon Sep 17 00:00:00 2001 From: realbigsean Date: Tue, 22 Nov 2022 18:10:40 -0500 Subject: [PATCH 072/263] - fix pre-merge block production (#3746) - return `None` on pre-4844 blob requests --- beacon_node/beacon_chain/src/beacon_chain.rs | 21 ++++++++------ beacon_node/execution_layer/src/lib.rs | 30 +++++++++++--------- 2 files changed, 29 insertions(+), 22 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index efedfdae539..c243d50cb3a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3384,13 +3384,15 @@ impl BeaconChain { // // Wait for the execution layer to return an execution payload (if one is required). let prepare_payload_handle = partial_beacon_block.prepare_payload_handle.take(); - let execution_payload = if let Some(prepare_payload_handle) = prepare_payload_handle { - prepare_payload_handle - .await - .map_err(BlockProductionError::TokioJoin)? - .ok_or(BlockProductionError::ShuttingDown)?? + let block_contents = if let Some(prepare_payload_handle) = prepare_payload_handle { + Some( + prepare_payload_handle + .await + .map_err(BlockProductionError::TokioJoin)? + .ok_or(BlockProductionError::ShuttingDown)??, + ) } else { - return Err(BlockProductionError::MissingExecutionPayload); + None }; //FIXME(sean) waiting for the BN<>EE api for this to stabilize @@ -3405,7 +3407,7 @@ impl BeaconChain { move || { chain.complete_partial_beacon_block( partial_beacon_block, - execution_payload, + block_contents, kzg_commitments, verification, ) @@ -3657,7 +3659,7 @@ impl BeaconChain { fn complete_partial_beacon_block>( &self, partial_beacon_block: PartialBeaconBlock, - block_contents: BlockProposalContents, + block_contents: Option>, kzg_commitments: Vec, verification: ProduceBlockVerification, ) -> Result, BlockProductionError> { @@ -3737,6 +3739,7 @@ impl BeaconChain { sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, execution_payload: block_contents + .ok_or(BlockProductionError::MissingExecutionPayload)? .to_payload() .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, @@ -3759,6 +3762,7 @@ impl BeaconChain { sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, execution_payload: block_contents + .ok_or(BlockProductionError::MissingExecutionPayload)? .to_payload() .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, @@ -3783,6 +3787,7 @@ impl BeaconChain { sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, execution_payload: block_contents + .ok_or(BlockProductionError::MissingExecutionPayload)? .to_payload() .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 2a19c4165e2..0cdce4f129d 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -959,20 +959,22 @@ impl ExecutionLayer { }; let blob_fut = async { - //FIXME(sean) do a fork check here and return None otherwise - // ^ - // well now we have the fork in this function so - // it should be easier to do that now - // - Mark - debug!( - self.log(), - "Issuing engine_getBlobsBundle"; - "suggested_fee_recipient" => ?suggested_fee_recipient, - "prev_randao" => ?prev_randao, - "timestamp" => timestamp, - "parent_hash" => ?parent_hash, - ); - Some(engine.api.get_blobs_bundle_v1::(payload_id).await) + match current_fork { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + None + } + ForkName::Eip4844 => { + debug!( + self.log(), + "Issuing engine_getBlobsBundle"; + "suggested_fee_recipient" => ?suggested_fee_recipient, + "prev_randao" => ?prev_randao, + "timestamp" => timestamp, + "parent_hash" => ?parent_hash, + ); + Some(engine.api.get_blobs_bundle_v1::(payload_id).await) + } + } }; let payload_fut = async { debug!( From 28c96035053d6709762e0cbb8f5e934d1f8bc2e7 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Tue, 22 Nov 2022 21:42:58 -0600 Subject: [PATCH 073/263] Stuuupid camelCase (#3748) --- beacon_node/execution_layer/src/engine_api/json_structures.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 99459ec2b13..4f372beda51 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -345,6 +345,7 @@ impl TryFrom> for JsonExecutionPayloadV2 { } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] pub struct JsonWithdrawal { #[serde(with = "eth2_serde_utils::u64_hex_be")] pub index: u64, From e3ccd8fd4abfa0863ddcad660d599450e18ba9a4 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 24 Nov 2022 15:14:06 +1100 Subject: [PATCH 074/263] Two Capella bugfixes (#3749) * Two Capella bugfixes * fix payload default check in fork choice * Revert "fix payload default check in fork choice" This reverts commit e56fefbd05811526af4499711045275db366aa09. Co-authored-by: realbigsean --- beacon_node/beacon_chain/src/beacon_chain.rs | 50 ++++++------- .../beacon_chain/src/execution_payload.rs | 2 +- .../src/per_block_processing.rs | 10 ++- .../types/src/execution_payload_header.rs | 4 +- consensus/types/src/payload.rs | 75 ++++++++++++++----- 5 files changed, 88 insertions(+), 53 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index c243d50cb3a..89ccd96b159 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4112,38 +4112,30 @@ impl BeaconChain { return Ok(()); } - #[cfg(feature = "withdrawals")] - let head_state = &self.canonical_head.cached_head().snapshot.beacon_state; #[cfg(feature = "withdrawals")] let withdrawals = match self.spec.fork_name_at_epoch(prepare_epoch) { - ForkName::Base | ForkName::Altair | ForkName::Merge => { - None - }, - ForkName::Capella | ForkName::Eip4844 => match &head_state { - &BeaconState::Capella(_) | &BeaconState::Eip4844(_) => { - // The head_state is already BeaconState::Capella or later - // FIXME(mark) - // Might implement caching here in the future.. - Some(get_expected_withdrawals(head_state, &self.spec)) - } - &BeaconState::Base(_) | &BeaconState::Altair(_) | &BeaconState::Merge(_) => { - // We are the Capella transition block proposer, need advanced state - let mut prepare_state = self - .state_at_slot(prepare_slot, StateSkipConfig::WithoutStateRoots) - .or_else(|e| { - error!(self.log, "Capella Transition Proposer"; "Error Advancing State: " => ?e); - Err(e) - })?; - // FIXME(mark) - // Might implement caching here in the future.. - Some(get_expected_withdrawals(&prepare_state, &self.spec)) - } - }, - }.transpose().or_else(|e| { - error!(self.log, "Error preparing beacon proposer"; "while calculating expected withdrawals" => ?e); + ForkName::Base | ForkName::Altair | ForkName::Merge => None, + ForkName::Capella | ForkName::Eip4844 => { + // We must use the advanced state because balances can change at epoch boundaries + // and balances affect withdrawals. + // FIXME(mark) + // Might implement caching here in the future.. + let prepare_state = self + .state_at_slot(prepare_slot, StateSkipConfig::WithoutStateRoots) + .or_else(|e| { + error!(self.log, "State advance for withdrawals failed"; "error" => ?e); + Err(e) + })?; + Some(get_expected_withdrawals(&prepare_state, &self.spec)) + } + } + .transpose() + .or_else(|e| { + error!(self.log, "Error preparing beacon proposer"; "error" => ?e); Err(e) - }).map(|withdrawals_opt| withdrawals_opt.map(|w| w.into())) - .map_err(Error::PrepareProposerFailed)?; + }) + .map(|withdrawals_opt| withdrawals_opt.map(|w| w.into())) + .map_err(Error::PrepareProposerFailed)?; let payload_attributes = PayloadAttributes::V2(PayloadAttributesV2 { timestamp: self diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index bf920a6dab7..85aedc6592e 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -310,7 +310,7 @@ pub fn validate_execution_payload_for_gossip( } }; - if is_merge_transition_complete || !execution_payload.is_default() { + if is_merge_transition_complete || !execution_payload.is_default_with_empty_roots() { let expected_timestamp = chain .slot_clock .start_of(block.slot()) diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 753a1939871..d1c4cf12ac7 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -428,9 +428,11 @@ pub fn process_execution_payload<'payload, T: EthSpec, Payload: AbstractExecPayl /// repeaetedly write code to treat these errors as false. /// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_merge_transition_complete pub fn is_merge_transition_complete(state: &BeaconState) -> bool { + // We must check defaultness against the payload header with 0x0 roots, as that's what's meant + // by `ExecutionPayloadHeader()` in the spec. state .latest_execution_payload_header() - .map(|header| !header.is_default()) + .map(|header| !header.is_default_with_zero_roots()) .unwrap_or(false) } /// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_merge_transition_block @@ -438,8 +440,12 @@ pub fn is_merge_transition_block>( state: &BeaconState, body: BeaconBlockBodyRef, ) -> bool { + // For execution payloads in blocks (which may be headers) we must check defaultness against + // the payload with `transactions_root` equal to the tree hash of the empty list. body.execution_payload() - .map(|payload| !is_merge_transition_complete(state) && !payload.is_default()) + .map(|payload| { + !is_merge_transition_complete(state) && !payload.is_default_with_empty_roots() + }) .unwrap_or(false) } /// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_execution_enabled diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 6f6b5aa9535..37547614de4 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -103,9 +103,9 @@ impl ExecutionPayloadHeader { } impl<'a, T: EthSpec> ExecutionPayloadHeaderRef<'a, T> { - pub fn is_default(self) -> bool { + pub fn is_default_with_zero_roots(self) -> bool { map_execution_payload_header_ref!(&'a _, self, |inner, cons| { - let _ = cons(inner); + cons(inner); *inner == Default::default() }) } diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 3081dd1cbe1..2507a9f0eb2 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -40,8 +40,11 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + #[cfg(feature = "withdrawals")] fn withdrawals_root(&self) -> Result; - /// Is this a default payload? (pre-merge) - fn is_default(&self) -> bool; + /// Is this a default payload with 0x0 roots for transactions and withdrawals? + fn is_default_with_zero_roots(&self) -> bool; + + /// Is this a default payload with the hash of the empty list for transactions and withdrawals? + fn is_default_with_empty_roots(&self) -> bool; } /// `ExecPayload` functionality the requires ownership. @@ -241,12 +244,17 @@ impl ExecPayload for FullPayload { } } - fn is_default<'a>(&'a self) -> bool { + fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); payload.execution_payload == <_>::default() }) } + + fn is_default_with_empty_roots<'a>(&'a self) -> bool { + // For full payloads the empty/zero distinction does not exist. + self.is_default_with_zero_roots() + } } impl FullPayload { @@ -338,13 +346,17 @@ impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { } } - // TODO: can this function be optimized? - fn is_default<'a>(&'a self) -> bool { + fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_full_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); payload.execution_payload == <_>::default() }) } + + fn is_default_with_empty_roots(&self) -> bool { + // For full payloads the empty/zero distinction does not exist. + self.is_default_with_zero_roots() + } } impl AbstractExecPayload for FullPayload { @@ -505,11 +517,16 @@ impl ExecPayload for BlindedPayload { } } - fn is_default<'a>(&'a self) -> bool { - map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { - cons(payload); - payload.execution_payload_header == <_>::default() - }) + fn is_default_with_zero_roots<'a>(&'a self) -> bool { + self.to_ref().is_default_with_zero_roots() + } + + // For blinded payloads we must check "defaultness" against the default `ExecutionPayload` + // which has been blinded into an `ExecutionPayloadHeader`, NOT against the default + // `ExecutionPayloadHeader` which has a zeroed out `transactions_root`. The transactions root + // should be the root of the empty list. + fn is_default_with_empty_roots(&self) -> bool { + self.to_ref().is_default_with_empty_roots() } } @@ -591,24 +608,38 @@ impl<'b, T: EthSpec> ExecPayload for BlindedPayloadRef<'b, T> { } } - // TODO: can this function be optimized? - fn is_default<'a>(&'a self) -> bool { - map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + fn is_default_with_zero_roots<'a>(&'a self) -> bool { + map_blinded_payload_ref!(&'b _, self, move |payload, cons| { cons(payload); payload.execution_payload_header == <_>::default() }) } + + fn is_default_with_empty_roots<'a>(&'a self) -> bool { + map_blinded_payload_ref!(&'b _, self, move |payload, cons| { + cons(payload); + payload.is_default_with_empty_roots() + }) + } } macro_rules! impl_exec_payload_common { - ($wrapper_type:ident, $wrapped_type_full:ident, $wrapped_header_type:ident, $wrapped_field:ident, $fork_variant:ident, $block_type_variant:ident, $f:block, $g:block) => { + ($wrapper_type:ident, + $wrapped_type:ident, + $wrapped_type_full:ident, + $wrapped_type_header:ident, + $wrapped_field:ident, + $fork_variant:ident, + $block_type_variant:ident, + $f:block, + $g:block) => { impl ExecPayload for $wrapper_type { fn block_type() -> BlockType { BlockType::$block_type_variant } fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { - ExecutionPayloadHeader::$fork_variant($wrapped_header_type::from( + ExecutionPayloadHeader::$fork_variant($wrapped_type_header::from( self.$wrapped_field.clone(), )) } @@ -641,8 +672,12 @@ macro_rules! impl_exec_payload_common { self.$wrapped_field.gas_limit } - fn is_default(&self) -> bool { - self.$wrapped_field == $wrapped_type_full::default() + fn is_default_with_zero_roots(&self) -> bool { + self.$wrapped_field == $wrapped_type::default() + } + + fn is_default_with_empty_roots(&self) -> bool { + self.$wrapped_field == $wrapped_type::from($wrapped_type_full::default()) } fn transactions(&self) -> Option<&Transactions> { @@ -657,8 +692,8 @@ macro_rules! impl_exec_payload_common { } } - impl From<$wrapped_type_full> for $wrapper_type { - fn from($wrapped_field: $wrapped_type_full) -> Self { + impl From<$wrapped_type> for $wrapper_type { + fn from($wrapped_field: $wrapped_type) -> Self { Self { $wrapped_field } } } @@ -672,6 +707,7 @@ macro_rules! impl_exec_payload_for_fork { impl_exec_payload_common!( $wrapper_type_header, $wrapped_type_header, + $wrapped_type_full, $wrapped_type_header, execution_payload_header, $fork_variant, @@ -741,6 +777,7 @@ macro_rules! impl_exec_payload_for_fork { impl_exec_payload_common!( $wrapper_type_full, $wrapped_type_full, + $wrapped_type_full, $wrapped_type_header, execution_payload, $fork_variant, From 58b54f0a53093395e3d5854e0318423771796bd0 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Thu, 24 Nov 2022 00:41:35 -0500 Subject: [PATCH 075/263] Rename excess blobs and update 4844 json RPC serialization/deserialization (#3745) * rename excess blobs and fix json serialization/deserialization * remove coments --- beacon_node/execution_layer/src/engine_api.rs | 6 +- .../execution_layer/src/engine_api/http.rs | 3 - .../src/engine_api/json_structures.rs | 132 ++++++-------- beacon_node/execution_layer/src/lib.rs | 2 +- consensus/serde_utils/src/lib.rs | 1 + consensus/serde_utils/src/u256_hex_be_opt.rs | 169 ++++++++++++++++++ consensus/types/src/execution_payload.rs | 4 +- .../types/src/execution_payload_header.rs | 8 +- 8 files changed, 236 insertions(+), 89 deletions(-) create mode 100644 consensus/serde_utils/src/u256_hex_be_opt.rs diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 128f23386fb..b1a3cfa4138 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -154,8 +154,8 @@ pub struct ExecutionBlockWithTransactions { pub extra_data: VariableList, pub base_fee_per_gas: Uint256, #[superstruct(only(Eip4844))] - #[serde(with = "eth2_serde_utils::u64_hex_be")] - pub excess_blobs: u64, + #[serde(with = "eth2_serde_utils::u256_hex_be")] + pub excess_data_gas: Uint256, #[serde(rename = "hash")] pub block_hash: ExecutionBlockHash, pub transactions: Vec, @@ -227,7 +227,7 @@ impl From> for ExecutionBlockWithTransactions timestamp: block.timestamp, extra_data: block.extra_data, base_fee_per_gas: block.base_fee_per_gas, - excess_blobs: block.excess_blobs, + excess_data_gas: block.excess_data_gas, block_hash: block.block_hash, transactions: block .transactions diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 446623744e4..2b7728b98d0 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -857,7 +857,6 @@ impl HttpJsonRpc { ) -> Result { let supported_apis = self.get_cached_supported_apis().await?; if supported_apis.new_payload_v2 { - // FIXME: I haven't thought at all about how to handle 4844.. self.new_payload_v2(execution_payload).await } else if supported_apis.new_payload_v1 { self.new_payload_v1(execution_payload).await @@ -875,7 +874,6 @@ impl HttpJsonRpc { ) -> Result, Error> { let supported_apis = self.get_cached_supported_apis().await?; if supported_apis.get_payload_v2 { - // FIXME: I haven't thought at all about how to handle 4844.. self.get_payload_v2(fork_name, payload_id).await } else if supported_apis.new_payload_v1 { self.get_payload_v1(fork_name, payload_id).await @@ -893,7 +891,6 @@ impl HttpJsonRpc { ) -> Result { let supported_apis = self.get_cached_supported_apis().await?; if supported_apis.forkchoice_updated_v2 { - // FIXME: I haven't thought at all about how to handle 4844.. self.forkchoice_updated_v2(forkchoice_state, payload_attributes) .await } else if supported_apis.forkchoice_updated_v1 { diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 4f372beda51..0e53a3b0605 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -64,7 +64,7 @@ pub struct JsonPayloadIdResponse { } #[superstruct( - variants(V1, V2, V3), + variants(V1, V2), variant_attributes( derive(Debug, PartialEq, Default, Serialize, Deserialize,), serde(bound = "T: EthSpec", rename_all = "camelCase"), @@ -94,15 +94,18 @@ pub struct JsonExecutionPayload { pub extra_data: VariableList, #[serde(with = "eth2_serde_utils::u256_hex_be")] pub base_fee_per_gas: Uint256, - #[superstruct(only(V3))] - // FIXME: can't easily make this an option because of custom deserialization.. - #[serde(with = "eth2_serde_utils::u64_hex_be")] - pub excess_blobs: u64, + #[superstruct(only(V2))] + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + #[serde(with = "eth2_serde_utils::u256_hex_be_opt")] + pub excess_data_gas: Option, pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: VariableList, T::MaxTransactionsPerPayload>, - #[superstruct(only(V2, V3))] + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + #[superstruct(only(V2))] pub withdrawals: Option>, } @@ -175,81 +178,33 @@ impl JsonExecutionPayload { }) .ok_or(Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadCapella".to_string()))? })), - ForkName::Eip4844 => Err(Error::UnsupportedForkVariant("JsonExecutionPayloadV2 -> ExecutionPayloadEip4844 not implemented yet as it might never be".to_string())), - _ => Err(Error::UnsupportedForkVariant(format!("Unsupported conversion from JsonExecutionPayloadV2 for {}", fork_name))), - } - JsonExecutionPayload::V3(v3) => match fork_name { - ForkName::Merge => Ok(ExecutionPayload::Merge(ExecutionPayloadMerge { - parent_hash: v3.parent_hash, - fee_recipient: v3.fee_recipient, - state_root: v3.state_root, - receipts_root: v3.receipts_root, - logs_bloom: v3.logs_bloom, - prev_randao: v3.prev_randao, - block_number: v3.block_number, - gas_limit: v3.gas_limit, - gas_used: v3.gas_used, - timestamp: v3.timestamp, - extra_data: v3.extra_data, - base_fee_per_gas: v3.base_fee_per_gas, - block_hash: v3.block_hash, - transactions: v3.transactions, - })), - ForkName::Capella => Ok(ExecutionPayload::Capella(ExecutionPayloadCapella { - parent_hash: v3.parent_hash, - fee_recipient: v3.fee_recipient, - state_root: v3.state_root, - receipts_root: v3.receipts_root, - logs_bloom: v3.logs_bloom, - prev_randao: v3.prev_randao, - block_number: v3.block_number, - gas_limit: v3.gas_limit, - gas_used: v3.gas_used, - timestamp: v3.timestamp, - extra_data: v3.extra_data, - base_fee_per_gas: v3.base_fee_per_gas, - block_hash: v3.block_hash, - transactions: v3.transactions, - #[cfg(feature = "withdrawals")] - withdrawals: v3 - .withdrawals - .map(|v| { - Into::>::into(v) - .into_iter() - .map(Into::into) - .collect::>() - .into() - }) - .ok_or(Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV3 -> ExecutionPayloadCapella".to_string()))? - })), ForkName::Eip4844 => Ok(ExecutionPayload::Eip4844(ExecutionPayloadEip4844 { - parent_hash: v3.parent_hash, - fee_recipient: v3.fee_recipient, - state_root: v3.state_root, - receipts_root: v3.receipts_root, - logs_bloom: v3.logs_bloom, - prev_randao: v3.prev_randao, - block_number: v3.block_number, - gas_limit: v3.gas_limit, - gas_used: v3.gas_used, - timestamp: v3.timestamp, - extra_data: v3.extra_data, - base_fee_per_gas: v3.base_fee_per_gas, - // FIXME: excess_blobs probably will be an option whenever the engine API is finalized - excess_blobs: v3.excess_blobs, - block_hash: v3.block_hash, - transactions: v3.transactions, + parent_hash: v2.parent_hash, + fee_recipient: v2.fee_recipient, + state_root: v2.state_root, + receipts_root: v2.receipts_root, + logs_bloom: v2.logs_bloom, + prev_randao: v2.prev_randao, + block_number: v2.block_number, + gas_limit: v2.gas_limit, + gas_used: v2.gas_used, + timestamp: v2.timestamp, + extra_data: v2.extra_data, + base_fee_per_gas: v2.base_fee_per_gas, + excess_data_gas: v2.excess_data_gas.ok_or(Error::BadConversion("Null `excess_data_gas` field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))?, + block_hash: v2.block_hash, + transactions: v2.transactions, #[cfg(feature = "withdrawals")] - withdrawals: v3 + withdrawals: v2 .withdrawals .map(|v| { - Vec::from(v) + Into::>::into(v) .into_iter() .map(Into::into) .collect::>() .into() }) - .ok_or(Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV3 -> ExecutionPayloadEip4844".to_string()))?, + .ok_or(Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))? })), _ => Err(Error::UnsupportedForkVariant(format!("Unsupported conversion from JsonExecutionPayloadV2 for {}", fork_name))), } @@ -306,6 +261,7 @@ impl TryFrom> for JsonExecutionPayloadV2 { timestamp: merge.timestamp, extra_data: merge.extra_data, base_fee_per_gas: merge.base_fee_per_gas, + excess_data_gas: None, block_hash: merge.block_hash, transactions: merge.transactions, withdrawals: None, @@ -323,6 +279,7 @@ impl TryFrom> for JsonExecutionPayloadV2 { timestamp: capella.timestamp, extra_data: capella.extra_data, base_fee_per_gas: capella.base_fee_per_gas, + excess_data_gas: None, block_hash: capella.block_hash, transactions: capella.transactions, #[cfg(feature = "withdrawals")] @@ -336,10 +293,33 @@ impl TryFrom> for JsonExecutionPayloadV2 { #[cfg(not(feature = "withdrawals"))] withdrawals: None, }), - ExecutionPayload::Eip4844(_) => Err(Error::UnsupportedForkVariant(format!( - "Unsupported conversion to JsonExecutionPayloadV1 for {}", - ForkName::Eip4844 - ))), + ExecutionPayload::Eip4844(eip4844) => Ok(JsonExecutionPayloadV2 { + parent_hash: eip4844.parent_hash, + fee_recipient: eip4844.fee_recipient, + state_root: eip4844.state_root, + receipts_root: eip4844.receipts_root, + logs_bloom: eip4844.logs_bloom, + prev_randao: eip4844.prev_randao, + block_number: eip4844.block_number, + gas_limit: eip4844.gas_limit, + gas_used: eip4844.gas_used, + timestamp: eip4844.timestamp, + extra_data: eip4844.extra_data, + base_fee_per_gas: eip4844.base_fee_per_gas, + excess_data_gas: Some(eip4844.excess_data_gas), + block_hash: eip4844.block_hash, + transactions: eip4844.transactions, + #[cfg(feature = "withdrawals")] + withdrawals: Some( + Vec::from(eip4844.withdrawals) + .into_iter() + .map(Into::into) + .collect::>() + .into(), + ), + #[cfg(not(feature = "withdrawals"))] + withdrawals: None, + }), } } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 0cdce4f129d..c90ed291d56 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1579,7 +1579,7 @@ impl ExecutionLayer { timestamp: eip4844_block.timestamp, extra_data: eip4844_block.extra_data, base_fee_per_gas: eip4844_block.base_fee_per_gas, - excess_blobs: eip4844_block.excess_blobs, + excess_data_gas: eip4844_block.excess_data_gas, block_hash: eip4844_block.block_hash, transactions, #[cfg(feature = "withdrawals")] diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs index 92b5966c9a0..75fd6009b75 100644 --- a/consensus/serde_utils/src/lib.rs +++ b/consensus/serde_utils/src/lib.rs @@ -7,6 +7,7 @@ pub mod json_str; pub mod list_of_bytes_lists; pub mod quoted_u64_vec; pub mod u256_hex_be; +pub mod u256_hex_be_opt; pub mod u32_hex; pub mod u64_hex_be; pub mod u8_hex; diff --git a/consensus/serde_utils/src/u256_hex_be_opt.rs b/consensus/serde_utils/src/u256_hex_be_opt.rs new file mode 100644 index 00000000000..8eadbf0243f --- /dev/null +++ b/consensus/serde_utils/src/u256_hex_be_opt.rs @@ -0,0 +1,169 @@ +use ethereum_types::U256; + +use serde::de::Visitor; +use serde::{de, Deserializer, Serialize, Serializer}; +use std::fmt; +use std::str::FromStr; + +pub fn serialize(num: &Option, serializer: S) -> Result +where + S: Serializer, +{ + num.serialize(serializer) +} + +pub struct U256Visitor; + +impl<'de> Visitor<'de> for U256Visitor { + type Value = String; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a well formatted hex string") + } + + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + if !value.starts_with("0x") { + return Err(de::Error::custom("must start with 0x")); + } + let stripped = &value[2..]; + if stripped.is_empty() { + Err(de::Error::custom(format!( + "quantity cannot be {:?}", + stripped + ))) + } else if stripped == "0" { + Ok(value.to_string()) + } else if stripped.starts_with('0') { + Err(de::Error::custom("cannot have leading zero")) + } else { + Ok(value.to_string()) + } + } +} + +pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let decoded = deserializer.deserialize_string(U256Visitor)?; + + Some( + U256::from_str(&decoded) + .map_err(|e| de::Error::custom(format!("Invalid U256 string: {}", e))), + ) + .transpose() +} + +#[cfg(test)] +mod test { + use ethereum_types::U256; + use serde::{Deserialize, Serialize}; + use serde_json; + + #[derive(Debug, PartialEq, Serialize, Deserialize)] + #[serde(transparent)] + struct Wrapper { + #[serde(with = "super")] + val: Option, + } + + #[test] + fn encoding() { + assert_eq!( + &serde_json::to_string(&Wrapper { + val: Some(0.into()) + }) + .unwrap(), + "\"0x0\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { + val: Some(1.into()) + }) + .unwrap(), + "\"0x1\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { + val: Some(256.into()) + }) + .unwrap(), + "\"0x100\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { + val: Some(65.into()) + }) + .unwrap(), + "\"0x41\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { + val: Some(1024.into()) + }) + .unwrap(), + "\"0x400\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { + val: Some(U256::max_value() - 1) + }) + .unwrap(), + "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { + val: Some(U256::max_value()) + }) + .unwrap(), + "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" + ); + } + + #[test] + fn decoding() { + assert_eq!( + serde_json::from_str::("\"0x0\"").unwrap(), + Wrapper { + val: Some(0.into()) + }, + ); + assert_eq!( + serde_json::from_str::("\"0x41\"").unwrap(), + Wrapper { + val: Some(65.into()) + }, + ); + assert_eq!( + serde_json::from_str::("\"0x400\"").unwrap(), + Wrapper { + val: Some(1024.into()) + }, + ); + assert_eq!( + serde_json::from_str::( + "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" + ) + .unwrap(), + Wrapper { + val: Some(U256::max_value() - 1) + }, + ); + assert_eq!( + serde_json::from_str::( + "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" + ) + .unwrap(), + Wrapper { + val: Some(U256::max_value()) + }, + ); + serde_json::from_str::("\"0x\"").unwrap_err(); + serde_json::from_str::("\"0x0400\"").unwrap_err(); + serde_json::from_str::("\"400\"").unwrap_err(); + serde_json::from_str::("\"ff\"").unwrap_err(); + } +} diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 6036973d5e2..fa6348bdce3 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -74,9 +74,9 @@ pub struct ExecutionPayload { #[superstruct(getter(copy))] pub base_fee_per_gas: Uint256, #[superstruct(only(Eip4844))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "eth2_serde_utils::quoted_u256")] #[superstruct(getter(copy))] - pub excess_blobs: u64, + pub excess_data_gas: Uint256, #[superstruct(getter(copy))] pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 37547614de4..a9708153ca3 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -68,9 +68,9 @@ pub struct ExecutionPayloadHeader { #[superstruct(getter(copy))] pub base_fee_per_gas: Uint256, #[superstruct(only(Eip4844))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "eth2_serde_utils::quoted_u256")] #[superstruct(getter(copy))] - pub excess_blobs: u64, + pub excess_data_gas: Uint256, #[superstruct(getter(copy))] pub block_hash: ExecutionBlockHash, #[superstruct(getter(copy))] @@ -150,7 +150,7 @@ impl ExecutionPayloadHeaderCapella { extra_data: self.extra_data.clone(), base_fee_per_gas: self.base_fee_per_gas, // TODO: verify if this is correct - excess_blobs: 0, + excess_data_gas: Uint256::zero(), block_hash: self.block_hash, transactions_root: self.transactions_root, #[cfg(feature = "withdrawals")] @@ -216,7 +216,7 @@ impl From> for ExecutionPayloadHeaderEip4 timestamp: payload.timestamp, extra_data: payload.extra_data, base_fee_per_gas: payload.base_fee_per_gas, - excess_blobs: payload.excess_blobs, + excess_data_gas: payload.excess_data_gas, block_hash: payload.block_hash, transactions_root: payload.transactions.tree_hash_root(), #[cfg(feature = "withdrawals")] From 788b337951b85c3db3564cba8cc9a2115d82eeaf Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 25 Nov 2022 07:09:26 +1100 Subject: [PATCH 076/263] Op pool and gossip for BLS to execution changes (#3726) --- beacon_node/Cargo.toml | 7 +- beacon_node/beacon_chain/Cargo.toml | 7 +- beacon_node/beacon_chain/src/beacon_chain.rs | 42 ++++++ beacon_node/beacon_chain/src/builder.rs | 2 + .../beacon_chain/src/canonical_head.rs | 8 +- beacon_node/beacon_chain/src/errors.rs | 7 +- .../beacon_chain/src/observed_operations.rs | 18 ++- beacon_node/http_api/Cargo.toml | 3 + beacon_node/http_api/src/lib.rs | 61 ++++++++- .../src/service/gossip_cache.rs | 13 ++ .../lighthouse_network/src/service/mod.rs | 1 + .../lighthouse_network/src/service/utils.rs | 1 + .../lighthouse_network/src/types/pubsub.rs | 22 ++- .../lighthouse_network/src/types/topics.rs | 34 ++--- .../network/src/beacon_processor/mod.rs | 62 ++++++++- .../beacon_processor/worker/gossip_methods.rs | 64 ++++++++- beacon_node/network/src/metrics.rs | 13 ++ beacon_node/network/src/router/mod.rs | 12 ++ beacon_node/network/src/router/processor.rs | 17 ++- beacon_node/operation_pool/Cargo.toml | 3 + beacon_node/operation_pool/src/lib.rs | 127 ++++++++++++++++-- beacon_node/operation_pool/src/persistence.rs | 3 +- .../block_signature_verifier.rs | 22 +++ .../state_processing/src/verify_operation.rs | 53 +++++++- .../types/src/bls_to_execution_change.rs | 2 - .../src/signed_bls_to_execution_change.rs | 2 - testing/ef_tests/Makefile | 2 +- 27 files changed, 539 insertions(+), 69 deletions(-) diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 093f09949c4..18973cb9d45 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -14,7 +14,12 @@ node_test_rig = { path = "../testing/node_test_rig" } [features] write_ssz_files = ["beacon_chain/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing. withdrawals = ["beacon_chain/withdrawals", "types/withdrawals", "store/withdrawals", "execution_layer/withdrawals"] -withdrawals-processing = ["beacon_chain/withdrawals-processing", "store/withdrawals-processing", "execution_layer/withdrawals-processing"] +withdrawals-processing = [ + "beacon_chain/withdrawals-processing", + "store/withdrawals-processing", + "execution_layer/withdrawals-processing", + "http_api/withdrawals-processing", +] [dependencies] eth2_config = { path = "../common/eth2_config" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 39ff16c6b74..6d768476e6a 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -11,7 +11,12 @@ write_ssz_files = [] # Writes debugging .ssz files to /tmp during block process participation_metrics = [] # Exposes validator participation metrics to Prometheus. fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable withdrawals = ["state_processing/withdrawals", "types/withdrawals", "store/withdrawals", "execution_layer/withdrawals"] -withdrawals-processing = ["state_processing/withdrawals-processing", "store/withdrawals-processing", "execution_layer/withdrawals-processing"] +withdrawals-processing = [ + "state_processing/withdrawals-processing", + "store/withdrawals-processing", + "execution_layer/withdrawals-processing", + "operation_pool/withdrawals-processing" +] [dev-dependencies] maplit = "1.0.2" diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 89ccd96b159..51aed941f11 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -341,6 +341,10 @@ pub struct BeaconChain { /// Maintains a record of which validators we've seen attester slashings for. pub(crate) observed_attester_slashings: Mutex, T::EthSpec>>, + /// Maintains a record of which validators we've seen BLS to execution changes for. + #[cfg(feature = "withdrawals-processing")] + pub(crate) observed_bls_to_execution_changes: + Mutex>, /// Provides information from the Ethereum 1 (PoW) chain. pub eth1_chain: Option>, /// Interfaces with the execution client. @@ -2181,6 +2185,42 @@ impl BeaconChain { } } + /// Verify a signed BLS to exection change before allowing it to propagate on the gossip network. + pub fn verify_bls_to_execution_change_for_gossip( + &self, + bls_to_execution_change: SignedBlsToExecutionChange, + ) -> Result, Error> { + #[cfg(feature = "withdrawals-processing")] + { + let wall_clock_state = self.wall_clock_state()?; + Ok(self + .observed_bls_to_execution_changes + .lock() + .verify_and_observe(bls_to_execution_change, &wall_clock_state, &self.spec)?) + } + + #[cfg(not(feature = "withdrawals-processing"))] + { + drop(bls_to_execution_change); + Ok(ObservationOutcome::AlreadyKnown) + } + } + + /// Import a BLS to execution change to the op pool. + pub fn import_bls_to_execution_change( + &self, + bls_to_execution_change: SigVerifiedOp, + ) { + if self.eth1_chain.is_some() { + #[cfg(feature = "withdrawals-processing")] + self.op_pool + .insert_bls_to_execution_change(bls_to_execution_change); + + #[cfg(not(feature = "withdrawals-processing"))] + drop(bls_to_execution_change); + } + } + /// Attempt to obtain sync committee duties from the head. pub fn sync_committee_duties_from_head( &self, @@ -3491,6 +3531,8 @@ impl BeaconChain { let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; let deposits = eth1_chain.deposits_for_block_inclusion(&state, ð1_data, &self.spec)?; + + #[cfg(feature = "withdrawals")] let bls_to_execution_changes = self .op_pool .get_bls_to_execution_changes(&state, &self.spec); diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 58bbb2b5c6a..116a0c39800 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -780,6 +780,8 @@ where observed_voluntary_exits: <_>::default(), observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), + #[cfg(feature = "withdrawals-processing")] + observed_bls_to_execution_changes: <_>::default(), eth1_chain: self.eth1_chain, execution_layer: self.execution_layer, genesis_validators_root, diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index c9bd6db0e67..1aa8d8715fd 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -907,8 +907,12 @@ impl BeaconChain { .execution_status .is_optimistic_or_invalid(); - self.op_pool - .prune_all(&new_snapshot.beacon_state, self.epoch()?); + self.op_pool.prune_all( + &new_snapshot.beacon_block, + &new_snapshot.beacon_state, + self.epoch()?, + &self.spec, + ); self.observed_block_producers.write().prune( new_view diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index e4d00d9ca6e..60282426a5a 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -17,8 +17,9 @@ use ssz_types::Error as SszTypesError; use state_processing::{ block_signature_verifier::Error as BlockSignatureVerifierError, per_block_processing::errors::{ - AttestationValidationError, AttesterSlashingValidationError, ExitValidationError, - ProposerSlashingValidationError, SyncCommitteeMessageValidationError, + AttestationValidationError, AttesterSlashingValidationError, + BlsExecutionChangeValidationError, ExitValidationError, ProposerSlashingValidationError, + SyncCommitteeMessageValidationError, }, signature_sets::Error as SignatureSetError, state_advance::Error as StateAdvanceError, @@ -70,6 +71,7 @@ pub enum BeaconChainError { ExitValidationError(ExitValidationError), ProposerSlashingValidationError(ProposerSlashingValidationError), AttesterSlashingValidationError(AttesterSlashingValidationError), + BlsExecutionChangeValidationError(BlsExecutionChangeValidationError), StateSkipTooLarge { start_slot: Slot, requested_slot: Slot, @@ -212,6 +214,7 @@ easy_from_to!(SyncCommitteeMessageValidationError, BeaconChainError); easy_from_to!(ExitValidationError, BeaconChainError); easy_from_to!(ProposerSlashingValidationError, BeaconChainError); easy_from_to!(AttesterSlashingValidationError, BeaconChainError); +easy_from_to!(BlsExecutionChangeValidationError, BeaconChainError); easy_from_to!(SszTypesError, BeaconChainError); easy_from_to!(OpPoolError, BeaconChainError); easy_from_to!(NaiveAggregationError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/observed_operations.rs b/beacon_node/beacon_chain/src/observed_operations.rs index 8d8272b67d7..5781f9b5b10 100644 --- a/beacon_node/beacon_chain/src/observed_operations.rs +++ b/beacon_node/beacon_chain/src/observed_operations.rs @@ -1,5 +1,5 @@ use derivative::Derivative; -use smallvec::SmallVec; +use smallvec::{smallvec, SmallVec}; use ssz::{Decode, Encode}; use state_processing::{SigVerifiedOp, VerifyOperation}; use std::collections::HashSet; @@ -9,6 +9,9 @@ use types::{ SignedVoluntaryExit, Slot, }; +#[cfg(feature = "withdrawals-processing")] +use types::SignedBlsToExecutionChange; + /// Number of validator indices to store on the stack in `observed_validators`. pub const SMALL_VEC_SIZE: usize = 8; @@ -39,7 +42,7 @@ pub enum ObservationOutcome { AlreadyKnown, } -/// Trait for exits and slashings which can be observed using `ObservedOperations`. +/// Trait for operations which can be observed using `ObservedOperations`. pub trait ObservableOperation: VerifyOperation + Sized { /// The set of validator indices involved in this operation. /// @@ -49,13 +52,13 @@ pub trait ObservableOperation: VerifyOperation + Sized { impl ObservableOperation for SignedVoluntaryExit { fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> { - std::iter::once(self.message.validator_index).collect() + smallvec![self.message.validator_index] } } impl ObservableOperation for ProposerSlashing { fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> { - std::iter::once(self.signed_header_1.message.proposer_index).collect() + smallvec![self.signed_header_1.message.proposer_index] } } @@ -80,6 +83,13 @@ impl ObservableOperation for AttesterSlashing { } } +#[cfg(feature = "withdrawals-processing")] +impl ObservableOperation for SignedBlsToExecutionChange { + fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> { + smallvec![self.message.validator_index] + } +} + impl, E: EthSpec> ObservedOperations { pub fn verify_and_observe( &mut self, diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index fedd66c5404..cfd572083d4 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -5,6 +5,9 @@ authors = ["Paul Hauner "] edition = "2021" autotests = false # using a single test binary compiles faster +[features] +withdrawals-processing = [] + [dependencies] warp = { version = "0.3.2", features = ["tls"] } serde = { version = "1.0.116", features = ["derive"] } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index a747430eee1..e26bbe6b334 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -49,9 +49,9 @@ use types::{ Attestation, AttestationData, AttesterSlashing, BeaconStateError, BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, - SignedBeaconBlock, SignedBlindedBeaconBlock, SignedContributionAndProof, - SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncCommitteeMessage, - SyncContributionData, + SignedBeaconBlock, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, + SyncCommitteeMessage, SyncContributionData, }; use version::{ add_consensus_version_header, execution_optimistic_fork_versioned_response, @@ -1536,6 +1536,59 @@ pub fn serve( }, ); + // GET beacon/pool/bls_to_execution_changes + let get_beacon_pool_bls_to_execution_changes = beacon_pool_path + .clone() + .and(warp::path("bls_to_execution_changes")) + .and(warp::path::end()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + let address_changes = chain.op_pool.get_all_bls_to_execution_changes(); + Ok(api_types::GenericResponse::from(address_changes)) + }) + }); + + // POST beacon/pool/bls_to_execution_changes + let post_beacon_pool_bls_to_execution_changes = beacon_pool_path + .clone() + .and(warp::path("bls_to_execution_changes")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and_then( + |chain: Arc>, + address_change: SignedBlsToExecutionChange, + network_tx: UnboundedSender>| { + blocking_json_task(move || { + let outcome = chain + .verify_bls_to_execution_change_for_gossip(address_change) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + if let ObservationOutcome::New(address_change) = outcome { + #[cfg(feature = "withdrawals-processing")] + { + publish_pubsub_message( + &network_tx, + PubsubMessage::BlsToExecutionChange(Box::new( + address_change.as_inner().clone(), + )), + )?; + } + drop(network_tx); + + chain.import_bls_to_execution_change(address_change); + } + + Ok(()) + }) + }, + ); + // GET beacon/deposit_snapshot let get_beacon_deposit_snapshot = eth_v1 .and(warp::path("beacon")) @@ -3170,6 +3223,7 @@ pub fn serve( .or(get_beacon_pool_attester_slashings.boxed()) .or(get_beacon_pool_proposer_slashings.boxed()) .or(get_beacon_pool_voluntary_exits.boxed()) + .or(get_beacon_pool_bls_to_execution_changes.boxed()) .or(get_beacon_deposit_snapshot.boxed()) .or(get_config_fork_schedule.boxed()) .or(get_config_spec.boxed()) @@ -3218,6 +3272,7 @@ pub fn serve( .or(post_beacon_pool_proposer_slashings.boxed()) .or(post_beacon_pool_voluntary_exits.boxed()) .or(post_beacon_pool_sync_committees.boxed()) + .or(post_beacon_pool_bls_to_execution_changes.boxed()) .or(post_validator_duties_attester.boxed()) .or(post_validator_duties_sync.boxed()) .or(post_validator_aggregate_and_proofs.boxed()) diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index 665e383f206..58816251b88 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -36,6 +36,8 @@ pub struct GossipCache { signed_contribution_and_proof: Option, /// Timeout for sync committee messages. sync_committee_message: Option, + /// Timeout for signed BLS to execution changes. + bls_to_execution_change: Option, } #[derive(Default)] @@ -59,6 +61,8 @@ pub struct GossipCacheBuilder { signed_contribution_and_proof: Option, /// Timeout for sync committee messages. sync_committee_message: Option, + /// Timeout for signed BLS to execution changes. + bls_to_execution_change: Option, } #[allow(dead_code)] @@ -117,6 +121,12 @@ impl GossipCacheBuilder { self } + /// Timeout for BLS to execution change messages. + pub fn bls_to_execution_change_timeout(mut self, timeout: Duration) -> Self { + self.bls_to_execution_change = Some(timeout); + self + } + pub fn build(self) -> GossipCache { let GossipCacheBuilder { default_timeout, @@ -129,6 +139,7 @@ impl GossipCacheBuilder { attester_slashing, signed_contribution_and_proof, sync_committee_message, + bls_to_execution_change, } = self; GossipCache { expirations: DelayQueue::default(), @@ -142,6 +153,7 @@ impl GossipCacheBuilder { attester_slashing: attester_slashing.or(default_timeout), signed_contribution_and_proof: signed_contribution_and_proof.or(default_timeout), sync_committee_message: sync_committee_message.or(default_timeout), + bls_to_execution_change: bls_to_execution_change.or(default_timeout), } } } @@ -165,6 +177,7 @@ impl GossipCache { GossipKind::AttesterSlashing => self.attester_slashing, GossipKind::SignedContributionAndProof => self.signed_contribution_and_proof, GossipKind::SyncCommitteeMessage(_) => self.sync_committee_message, + GossipKind::BlsToExecutionChange => self.bls_to_execution_change, }; let expire_timeout = match expire_timeout { Some(expire_timeout) => expire_timeout, diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 5e770db2e98..65e805ca8b8 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -199,6 +199,7 @@ impl Network { .attester_slashing_timeout(half_epoch * 2) // .signed_contribution_and_proof_timeout(timeout) // Do not retry // .sync_committee_message_timeout(timeout) // Do not retry + .bls_to_execution_change_timeout(half_epoch * 2) .build() }; diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 8073ae77683..4e81138489f 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -253,6 +253,7 @@ pub(crate) fn create_whitelist_filter( add(ProposerSlashing); add(AttesterSlashing); add(SignedContributionAndProof); + add(BlsToExecutionChange); for id in 0..attestation_subnet_count { add(Attestation(SubnetId::new(id))); } diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 1b14c93c094..02f2bfff1df 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -15,7 +15,8 @@ use types::{ Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ForkContext, ForkName, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella, SignedBeaconBlockEip4844, SignedBeaconBlockMerge, - SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; /// TODO(pawan): move this to consensus/types? strictly not a consensus type @@ -48,6 +49,8 @@ pub enum PubsubMessage { SignedContributionAndProof(Box>), /// Gossipsub message providing notification of unaggregated sync committee signatures with its subnet id. SyncCommitteeMessage(Box<(SyncSubnetId, SyncCommitteeMessage)>), + /// Gossipsub message for BLS to execution change messages. + BlsToExecutionChange(Box), } // Implements the `DataTransform` trait of gossipsub to employ snappy compression @@ -133,6 +136,7 @@ impl PubsubMessage { PubsubMessage::AttesterSlashing(_) => GossipKind::AttesterSlashing, PubsubMessage::SignedContributionAndProof(_) => GossipKind::SignedContributionAndProof, PubsubMessage::SyncCommitteeMessage(data) => GossipKind::SyncCommitteeMessage(data.0), + PubsubMessage::BlsToExecutionChange(_) => GossipKind::BlsToExecutionChange, } } @@ -258,6 +262,14 @@ impl PubsubMessage { sync_committee, )))) } + GossipKind::BlsToExecutionChange => { + let bls_to_execution_change = + SignedBlsToExecutionChange::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + Ok(PubsubMessage::BlsToExecutionChange(Box::new( + bls_to_execution_change, + ))) + } } } } @@ -280,6 +292,7 @@ impl PubsubMessage { PubsubMessage::Attestation(data) => data.1.as_ssz_bytes(), PubsubMessage::SignedContributionAndProof(data) => data.as_ssz_bytes(), PubsubMessage::SyncCommitteeMessage(data) => data.1.as_ssz_bytes(), + PubsubMessage::BlsToExecutionChange(data) => data.as_ssz_bytes(), } } } @@ -320,6 +333,13 @@ impl std::fmt::Display for PubsubMessage { PubsubMessage::SyncCommitteeMessage(data) => { write!(f, "Sync committee message: subnet_id: {}", *data.0) } + PubsubMessage::BlsToExecutionChange(data) => { + write!( + f, + "Signed BLS to execution change: validator_index: {}, address: {:?}", + data.message.validator_index, data.message.to_execution_address + ) + } } } } diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 8cecc2e6822..5d020e132cb 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -19,8 +19,9 @@ pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; pub const SIGNED_CONTRIBUTION_AND_PROOF_TOPIC: &str = "sync_committee_contribution_and_proof"; pub const SYNC_COMMITTEE_PREFIX_TOPIC: &str = "sync_committee_"; +pub const BLS_TO_EXECUTION_CHANGE_TOPIC: &str = "bls_to_execution_change"; -pub const CORE_TOPICS: [GossipKind; 7] = [ +pub const CORE_TOPICS: [GossipKind; 8] = [ GossipKind::BeaconBlock, GossipKind::BeaconBlocksAndBlobsSidecar, GossipKind::BeaconAggregateAndProof, @@ -28,6 +29,7 @@ pub const CORE_TOPICS: [GossipKind; 7] = [ GossipKind::ProposerSlashing, GossipKind::AttesterSlashing, GossipKind::SignedContributionAndProof, + GossipKind::BlsToExecutionChange, ]; /// A gossipsub topic which encapsulates the type of messages that should be sent and received over @@ -67,6 +69,8 @@ pub enum GossipKind { /// Topic for publishing unaggregated sync committee signatures on a particular subnet. #[strum(serialize = "sync_committee")] SyncCommitteeMessage(SyncSubnetId), + /// Topic for validator messages which change their withdrawal address. + BlsToExecutionChange, } impl std::fmt::Display for GossipKind { @@ -141,6 +145,7 @@ impl GossipTopic { VOLUNTARY_EXIT_TOPIC => GossipKind::VoluntaryExit, PROPOSER_SLASHING_TOPIC => GossipKind::ProposerSlashing, ATTESTER_SLASHING_TOPIC => GossipKind::AttesterSlashing, + BLS_TO_EXECUTION_CHANGE_TOPIC => GossipKind::BlsToExecutionChange, topic => match committee_topic_index(topic) { Some(subnet) => match subnet { Subnet::Attestation(s) => GossipKind::Attestation(s), @@ -177,30 +182,8 @@ impl From for Topic { impl From for String { fn from(topic: GossipTopic) -> String { - let encoding = match topic.encoding { - GossipEncoding::SSZSnappy => SSZ_SNAPPY_ENCODING_POSTFIX, - }; - - let kind = match topic.kind { - GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(), - GossipKind::BeaconBlocksAndBlobsSidecar => BEACON_BLOCK_AND_BLOBS_SIDECAR_TOPIC.into(), - GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(), - GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(), - GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(), - GossipKind::AttesterSlashing => ATTESTER_SLASHING_TOPIC.into(), - GossipKind::Attestation(index) => format!("{}{}", BEACON_ATTESTATION_PREFIX, *index,), - GossipKind::SignedContributionAndProof => SIGNED_CONTRIBUTION_AND_PROOF_TOPIC.into(), - GossipKind::SyncCommitteeMessage(index) => { - format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index) - } - }; - format!( - "/{}/{}/{}/{}", - TOPIC_PREFIX, - hex::encode(topic.fork_digest), - kind, - encoding - ) + // Use the `Display` implementation below. + topic.to_string() } } @@ -222,6 +205,7 @@ impl std::fmt::Display for GossipTopic { GossipKind::SyncCommitteeMessage(index) => { format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index) } + GossipKind::BlsToExecutionChange => BLS_TO_EXECUTION_CHANGE_TOPIC.into(), }; write!( f, diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index dd28b15c0cf..ba71f0d95d5 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -64,8 +64,8 @@ use task_executor::TaskExecutor; use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, Hash256, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, - SyncCommitteeMessage, SyncSubnetId, + SignedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, + SubnetId, SyncCommitteeMessage, SyncSubnetId, }; use work_reprocessing_queue::{ spawn_reprocess_scheduler, QueuedAggregate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, @@ -163,6 +163,12 @@ const MAX_BLOBS_BY_RANGE_QUEUE_LEN: usize = 1_024; /// will be stored before we start dropping them. const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024; +/// Maximum number of `SignedBlsToExecutionChange` messages to queue before dropping them. +/// +/// This value is set high to accommodate the large spike that is expected immediately after Capella +/// is activated. +const MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN: usize = 16_384; + /// The name of the manager tokio task. const MANAGER_TASK_NAME: &str = "beacon_processor_manager"; @@ -206,6 +212,7 @@ pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request"; pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; +pub const GOSSIP_BLS_TO_EXECUTION_CHANGE: &str = "gossip_bls_to_execution_change"; /// A simple first-in-first-out queue with a maximum length. struct FifoQueue { @@ -515,6 +522,22 @@ impl WorkEvent { } } + /// Create a new `Work` event for some BLS to execution change. + pub fn gossip_bls_to_execution_change( + message_id: MessageId, + peer_id: PeerId, + bls_to_execution_change: Box, + ) -> Self { + Self { + drop_during_sync: false, + work: Work::GossipBlsToExecutionChange { + message_id, + peer_id, + bls_to_execution_change, + }, + } + } + /// Create a new `Work` event for some block, where the result from computation (if any) is /// sent to the other side of `result_tx`. pub fn rpc_beacon_block( @@ -789,6 +812,11 @@ pub enum Work { request_id: PeerRequestId, request: BlobsByRangeRequest, }, + GossipBlsToExecutionChange { + message_id: MessageId, + peer_id: PeerId, + bls_to_execution_change: Box, + }, } impl Work { @@ -815,6 +843,7 @@ impl Work { Work::BlobsByRangeRequest { .. } => BLOBS_BY_RANGE_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, + Work::GossipBlsToExecutionChange { .. } => GOSSIP_BLS_TO_EXECUTION_CHANGE, } } } @@ -960,6 +989,9 @@ impl BeaconProcessor { let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN); let mut blbrange_queue = FifoQueue::new(MAX_BLOBS_BY_RANGE_QUEUE_LEN); + let mut gossip_bls_to_execution_change_queue = + FifoQueue::new(MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN); + // Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to // receive them back once they are ready (`ready_work_rx`). let (ready_work_tx, ready_work_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); @@ -1194,9 +1226,12 @@ impl BeaconProcessor { self.spawn_worker(item, toolbox); } else if let Some(item) = gossip_proposer_slashing_queue.pop() { self.spawn_worker(item, toolbox); - // Check exits last since our validators don't get rewards from them. + // Check exits and address changes late since our validators don't get + // rewards from them. } else if let Some(item) = gossip_voluntary_exit_queue.pop() { self.spawn_worker(item, toolbox); + } else if let Some(item) = gossip_bls_to_execution_change_queue.pop() { + self.spawn_worker(item, toolbox); // Handle backfill sync chain segments. } else if let Some(item) = backfill_chain_segment.pop() { self.spawn_worker(item, toolbox); @@ -1313,6 +1348,9 @@ impl BeaconProcessor { Work::UnknownBlockAggregate { .. } => { unknown_block_aggregate_queue.push(work) } + Work::GossipBlsToExecutionChange { .. } => { + gossip_bls_to_execution_change_queue.push(work, work_id, &self.log) + } } } } @@ -1365,6 +1403,10 @@ impl BeaconProcessor { &metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL, gossip_attester_slashing_queue.len() as i64, ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL, + gossip_bls_to_execution_change_queue.len() as i64, + ); if aggregate_queue.is_full() && aggregate_debounce.elapsed() { error!( @@ -1623,6 +1665,20 @@ impl BeaconProcessor { seen_timestamp, ) }), + /* + * BLS to execution change verification. + */ + Work::GossipBlsToExecutionChange { + message_id, + peer_id, + bls_to_execution_change, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_bls_to_execution_change( + message_id, + peer_id, + *bls_to_execution_change, + ) + }), /* * Verification for beacon blocks received during syncing via RPC. */ diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 37cc1903d36..59f157e21bd 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -23,8 +23,9 @@ use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, BlobsSidecar, EthSpec, Hash256, IndexedAttestation, - ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, - SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, + ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, + SyncSubnetId, }; use super::{ @@ -1192,6 +1193,65 @@ impl Worker { metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_IMPORTED_TOTAL); } + pub fn process_gossip_bls_to_execution_change( + self, + message_id: MessageId, + peer_id: PeerId, + bls_to_execution_change: SignedBlsToExecutionChange, + ) { + let validator_index = bls_to_execution_change.message.validator_index; + let address = bls_to_execution_change.message.to_execution_address; + + let change = match self + .chain + .verify_bls_to_execution_change_for_gossip(bls_to_execution_change) + { + Ok(ObservationOutcome::New(change)) => change, + Ok(ObservationOutcome::AlreadyKnown) => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + debug!( + self.log, + "Dropping BLS to execution change"; + "validator_index" => validator_index, + "peer" => %peer_id + ); + return; + } + Err(e) => { + debug!( + self.log, + "Dropping invalid BLS to execution change"; + "validator_index" => validator_index, + "peer" => %peer_id, + "error" => ?e + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + // We penalize the peer slightly to prevent overuse of invalids. + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "invalid_bls_to_execution_change", + ); + return; + } + }; + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_VERIFIED_TOTAL); + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + + self.chain.import_bls_to_execution_change(change); + + debug!( + self.log, + "Successfully imported BLS to execution change"; + "validator_index" => validator_index, + "address" => ?address, + ); + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_IMPORTED_TOTAL); + } + /// Process the sync committee signature received from the gossip network and: /// /// - If it passes gossip propagation criteria, tell the network thread to forward it. diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 94de2988c8d..f23ab46a6f4 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -143,6 +143,19 @@ lazy_static! { "beacon_processor_attester_slashing_imported_total", "Total number of attester slashings imported to the op pool." ); + // Gossip BLS to execution changes. + pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_bls_to_execution_change_queue_total", + "Count of address changes from gossip waiting to be verified." + ); + pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_VERIFIED_TOTAL: Result = try_create_int_counter( + "beacon_processor_bls_to_execution_change_verified_total", + "Total number of address changes verified for propagation." + ); + pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_IMPORTED_TOTAL: Result = try_create_int_counter( + "beacon_processor_bls_to_execution_change_imported_total", + "Total number of address changes imported to the op pool." + ); // Rpc blocks. pub static ref BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: Result = try_create_int_gauge( "beacon_processor_rpc_block_queue_total", diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index cb90813b263..75986ff3f25 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -291,6 +291,18 @@ impl Router { sync_committtee_msg.0, ); } + PubsubMessage::BlsToExecutionChange(bls_to_execution_change) => { + trace!( + self.log, + "Received BLS to execution change"; + "peer_id" => %peer_id + ); + self.processor.on_bls_to_execution_change_gossip( + id, + peer_id, + bls_to_execution_change, + ); + } } } } diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index dadaf60c1eb..b8bcab84769 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -19,8 +19,8 @@ use store::SyncCommitteeMessage; use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, - SubnetId, SyncSubnetId, + SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncSubnetId, }; /// Processes validated messages from the network. It relays necessary data to the syncing thread @@ -411,6 +411,19 @@ impl Processor { )) } + pub fn on_bls_to_execution_change_gossip( + &mut self, + message_id: MessageId, + peer_id: PeerId, + bls_to_execution_change: Box, + ) { + self.send_beacon_processor_work(BeaconWorkEvent::gossip_bls_to_execution_change( + message_id, + peer_id, + bls_to_execution_change, + )) + } + fn send_beacon_processor_work(&mut self, work: BeaconWorkEvent) { self.beacon_processor_send .try_send(work) diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index 8483233589f..d7523544376 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -4,6 +4,9 @@ version = "0.2.0" authors = ["Michael Sproul "] edition = "2021" +[features] +withdrawals-processing = [] + [dependencies] derivative = "2.1.1" itertools = "0.10.0" diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index ba0567277b5..159454b9e98 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -30,10 +30,10 @@ use std::collections::{hash_map::Entry, HashMap, HashSet}; use std::marker::PhantomData; use std::ptr; use types::{ - sync_aggregate::Error as SyncAggregateError, typenum::Unsigned, Attestation, AttestationData, - AttesterSlashing, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ProposerSlashing, - SignedBlsToExecutionChange, SignedVoluntaryExit, Slot, SyncAggregate, - SyncCommitteeContribution, Validator, + sync_aggregate::Error as SyncAggregateError, typenum::Unsigned, AbstractExecPayload, + Attestation, AttestationData, AttesterSlashing, BeaconState, BeaconStateError, ChainSpec, + Epoch, EthSpec, ProposerSlashing, SignedBeaconBlock, SignedBlsToExecutionChange, + SignedVoluntaryExit, Slot, SyncAggregate, SyncCommitteeContribution, Validator, }; type SyncContributions = RwLock>>>; @@ -51,6 +51,7 @@ pub struct OperationPool { /// Map from exiting validator to their exit data. voluntary_exits: RwLock>>, /// Map from credential changing validator to their execution change data. + #[cfg(feature = "withdrawals-processing")] bls_to_execution_changes: RwLock>>, /// Reward cache for accelerating attestation packing. reward_cache: RwLock, @@ -432,7 +433,7 @@ impl OperationPool { pub fn prune_proposer_slashings(&self, head_state: &BeaconState) { prune_validator_hash_map( &mut self.proposer_slashings.write(), - |validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, + |_, validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, head_state, ); } @@ -507,28 +508,115 @@ impl OperationPool { // // We choose simplicity over the gain of pruning more exits since they are small and // should not be seen frequently. - |validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, + |_, validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, head_state, ); } + /// Insert a BLS to execution change into the pool. + pub fn insert_bls_to_execution_change( + &self, + verified_change: SigVerifiedOp, + ) { + #[cfg(feature = "withdrawals-processing")] + { + self.bls_to_execution_changes.write().insert( + verified_change.as_inner().message.validator_index, + verified_change, + ); + } + #[cfg(not(feature = "withdrawals-processing"))] + { + drop(verified_change); + } + } + /// Get a list of execution changes for inclusion in a block. + /// + /// They're in random `HashMap` order, which isn't exactly fair, but isn't unfair either. pub fn get_bls_to_execution_changes( &self, state: &BeaconState, spec: &ChainSpec, ) -> Vec { - // FIXME: actually implement this - return vec![]; + #[cfg(feature = "withdrawals-processing")] + { + filter_limit_operations( + self.bls_to_execution_changes.read().values(), + |address_change| { + address_change.signature_is_still_valid(&state.fork()) + && state + .get_validator( + address_change.as_inner().message.validator_index as usize, + ) + .map_or(false, |validator| { + !validator.has_eth1_withdrawal_credential(spec) + }) + }, + |address_change| address_change.as_inner().clone(), + T::MaxBlsToExecutionChanges::to_usize(), + ) + } + + #[cfg(not(feature = "withdrawals-processing"))] + { + drop((state, spec)); + vec![] + } + } + + /// Prune BLS to execution changes that have been applied to the state more than 1 block ago. + /// + /// The block check is necessary to avoid pruning too eagerly and losing the ability to include + /// address changes during re-orgs. This is isn't *perfect* so some address changes could + /// still get stuck if there are gnarly re-orgs and the changes can't be widely republished + /// due to the gossip duplicate rules. + pub fn prune_bls_to_execution_changes>( + &self, + head_block: &SignedBeaconBlock, + head_state: &BeaconState, + spec: &ChainSpec, + ) { + #[cfg(feature = "withdrawals-processing")] + { + prune_validator_hash_map( + &mut self.bls_to_execution_changes.write(), + |validator_index, validator| { + validator.has_eth1_withdrawal_credential(spec) + && head_block + .message() + .body() + .bls_to_execution_changes() + .map_or(true, |recent_changes| { + !recent_changes + .iter() + .any(|c| c.message.validator_index == validator_index) + }) + }, + head_state, + ); + } + + #[cfg(not(feature = "withdrawals-processing"))] + { + drop((head_block, head_state, spec)); + } } /// Prune all types of transactions given the latest head state and head fork. - pub fn prune_all(&self, head_state: &BeaconState, current_epoch: Epoch) { + pub fn prune_all>( + &self, + head_block: &SignedBeaconBlock, + head_state: &BeaconState, + current_epoch: Epoch, + spec: &ChainSpec, + ) { self.prune_attestations(current_epoch); self.prune_sync_contributions(head_state.slot()); self.prune_proposer_slashings(head_state); self.prune_attester_slashings(head_state); self.prune_voluntary_exits(head_state); + self.prune_bls_to_execution_changes(head_block, head_state, spec); } /// Total number of voluntary exits in the pool. @@ -594,6 +682,23 @@ impl OperationPool { .map(|(_, exit)| exit.as_inner().clone()) .collect() } + + /// Returns all known `SignedBlsToExecutionChange` objects. + /// + /// This method may return objects that are invalid for block inclusion. + pub fn get_all_bls_to_execution_changes(&self) -> Vec { + #[cfg(feature = "withdrawals-processing")] + { + self.bls_to_execution_changes + .read() + .iter() + .map(|(_, address_change)| address_change.as_inner().clone()) + .collect() + } + + #[cfg(not(feature = "withdrawals-processing"))] + vec![] + } } /// Filter up to a maximum number of operations out of an iterator. @@ -627,7 +732,7 @@ fn prune_validator_hash_map( prune_if: F, head_state: &BeaconState, ) where - F: Fn(&Validator) -> bool, + F: Fn(u64, &Validator) -> bool, T: VerifyOperation, { map.retain(|&validator_index, op| { @@ -635,7 +740,7 @@ fn prune_validator_hash_map( && head_state .validators() .get(validator_index as usize) - .map_or(true, |validator| !prune_if(validator)) + .map_or(true, |validator| !prune_if(validator_index, validator)) }); } diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 92c5bd92f68..184b967dbee 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -142,7 +142,8 @@ impl PersistedOperationPool { attester_slashings, proposer_slashings, voluntary_exits, - // FIXME: IMPLEMENT THIS + // FIXME(capella): implement schema migration for address changes in op pool + #[cfg(feature = "withdrawals-processing")] bls_to_execution_changes: Default::default(), reward_cache: Default::default(), _phantom: Default::default(), diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index a919f5f5ea9..a8d0acc555a 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -158,6 +158,8 @@ where // Deposits are not included because they can legally have invalid signatures. self.include_exits(block)?; self.include_sync_aggregate(block)?; + #[cfg(feature = "withdrawals")] + self.include_bls_to_execution_changes(block)?; Ok(()) } @@ -339,6 +341,26 @@ where Ok(()) } + /// Include the signature of the block's BLS to execution changes for verification. + #[cfg(feature = "withdrawals")] + pub fn include_bls_to_execution_changes>( + &mut self, + block: &'a SignedBeaconBlock, + ) -> Result<()> { + // FIXME(capella): to improve performance we might want to decompress the withdrawal pubkeys + // in parallel. + if let Ok(bls_to_execution_changes) = block.message().body().bls_to_execution_changes() { + for bls_to_execution_change in bls_to_execution_changes { + self.sets.push(bls_execution_change_signature_set( + self.state, + bls_to_execution_change, + self.spec, + )?); + } + } + Ok(()) + } + /// Verify all the signatures that have been included in `self`, returning `true` if and only if /// all the signatures are valid. /// diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index 80dee28f621..e2e434417e5 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -15,6 +15,14 @@ use types::{ SignedVoluntaryExit, }; +#[cfg(feature = "withdrawals-processing")] +use { + crate::per_block_processing::{ + errors::BlsExecutionChangeValidationError, verify_bls_to_execution_change, + }, + types::SignedBlsToExecutionChange, +}; + const MAX_FORKS_VERIFIED_AGAINST: usize = 2; /// Wrapper around an operation type that acts as proof that its signature has been checked. @@ -65,7 +73,7 @@ where fn new(op: T, state: &BeaconState) -> Self { let verified_against = VerifiedAgainst { fork_versions: op - .verification_epochs() + .verification_epochs(state.current_epoch()) .into_iter() .map(|epoch| state.fork().get_fork_version(epoch)) .collect(), @@ -87,8 +95,13 @@ where } pub fn signature_is_still_valid(&self, current_fork: &Fork) -> bool { + // Pass the fork's epoch as the effective current epoch. If the message is a current-epoch + // style message like `SignedBlsToExecutionChange` then `get_fork_version` will return the + // current fork version and we'll check it matches the fork version the message was checked + // against. + let effective_current_epoch = current_fork.epoch; self.as_inner() - .verification_epochs() + .verification_epochs(effective_current_epoch) .into_iter() .zip(self.verified_against.fork_versions.iter()) .all(|(epoch, verified_fork_version)| { @@ -118,7 +131,13 @@ pub trait VerifyOperation: Encode + Decode + Sized { /// Return the epochs at which parts of this message were verified. /// /// These need to map 1-to-1 to the `SigVerifiedOp::verified_against` for this type. - fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]>; + /// + /// If the message contains no inherent epoch it should return the `current_epoch` that is + /// passed in, as that's the epoch at which it was verified. + fn verification_epochs( + &self, + current_epoch: Epoch, + ) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]>; } impl VerifyOperation for SignedVoluntaryExit { @@ -134,7 +153,7 @@ impl VerifyOperation for SignedVoluntaryExit { } #[allow(clippy::integer_arithmetic)] - fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + fn verification_epochs(&self, _: Epoch) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { smallvec![self.message.epoch] } } @@ -152,7 +171,7 @@ impl VerifyOperation for AttesterSlashing { } #[allow(clippy::integer_arithmetic)] - fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + fn verification_epochs(&self, _: Epoch) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { smallvec![ self.attestation_1.data.target.epoch, self.attestation_2.data.target.epoch @@ -173,7 +192,7 @@ impl VerifyOperation for ProposerSlashing { } #[allow(clippy::integer_arithmetic)] - fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + fn verification_epochs(&self, _: Epoch) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { // Only need a single epoch because the slots of the two headers must be equal. smallvec![self .signed_header_1 @@ -182,3 +201,25 @@ impl VerifyOperation for ProposerSlashing { .epoch(E::slots_per_epoch())] } } + +#[cfg(feature = "withdrawals-processing")] +impl VerifyOperation for SignedBlsToExecutionChange { + type Error = BlsExecutionChangeValidationError; + + fn validate( + self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result, Self::Error> { + verify_bls_to_execution_change(state, &self, VerifySignatures::True, spec)?; + Ok(SigVerifiedOp::new(self, state)) + } + + #[allow(clippy::integer_arithmetic)] + fn verification_epochs( + &self, + current_epoch: Epoch, + ) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + smallvec![current_epoch] + } +} diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs index ca8e0ecf708..fa15a0132b4 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/bls_to_execution_change.rs @@ -7,8 +7,6 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; /// A deposit to potentially become a beacon chain validator. -/// -/// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/signed_bls_to_execution_change.rs b/consensus/types/src/signed_bls_to_execution_change.rs index fc636bb82dd..d7cce693b86 100644 --- a/consensus/types/src/signed_bls_to_execution_change.rs +++ b/consensus/types/src/signed_bls_to_execution_change.rs @@ -7,8 +7,6 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; /// A deposit to potentially become a beacon chain validator. -/// -/// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 5dd22de8d61..717ff13c976 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := f5c7cf78 +TESTS_TAG := v1.3.0-alpha.1 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) From 342489a0c38005a77f6c9339df33dd36b38b5267 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Tue, 29 Nov 2022 17:27:13 -0600 Subject: [PATCH 077/263] Fixed Payload Deserialization in DB (#3758) --- beacon_node/store/src/errors.rs | 9 ++++++++- beacon_node/store/src/hot_cold_store.rs | 23 +++++++++++++++++++++-- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index 30ee66074f8..3e1c6d012d4 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -3,7 +3,7 @@ use crate::config::StoreConfigError; use crate::hot_cold_store::HotColdDBError; use ssz::DecodeError; use state_processing::BlockReplayError; -use types::{BeaconStateError, Hash256, Slot}; +use types::{BeaconStateError, Hash256, InconsistentFork, Slot}; pub type Result = std::result::Result; @@ -45,6 +45,7 @@ pub enum Error { ResyncRequiredForExecutionPayloadSeparation, SlotClockUnavailableForMigration, V9MigrationFailure(Hash256), + InconsistentFork(InconsistentFork), } pub trait HandleUnavailable { @@ -103,6 +104,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: InconsistentFork) -> Error { + Error::InconsistentFork(e) + } +} + #[derive(Debug)] pub struct DBError { pub message: String, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index e8c782b8c51..0f0d1460fa6 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -358,7 +358,8 @@ impl, Cold: ItemStore> HotColdDB } else if !self.config.prune_payloads { // If payload pruning is disabled there's a chance we may have the payload of // this finalized block. Attempt to load it but don't error in case it's missing. - if let Some(payload) = self.get_execution_payload(block_root)? { + let fork_name = blinded_block.fork_name(&self.spec)?; + if let Some(payload) = self.get_execution_payload(block_root, fork_name)? { DatabaseBlock::Full( blinded_block .try_into_full_block(Some(payload)) @@ -407,8 +408,9 @@ impl, Cold: ItemStore> HotColdDB blinded_block: SignedBeaconBlock>, ) -> Result, Error> { if blinded_block.message().execution_payload().is_ok() { + let fork_name = blinded_block.fork_name(&self.spec)?; let execution_payload = self - .get_execution_payload(block_root)? + .get_execution_payload(block_root, fork_name)? .ok_or(HotColdDBError::MissingExecutionPayload(*block_root))?; blinded_block.try_into_full_block(Some(execution_payload)) } else { @@ -451,9 +453,26 @@ impl, Cold: ItemStore> HotColdDB } /// Load the execution payload for a block from disk. + /// This method deserializes with the proper fork. pub fn get_execution_payload( &self, block_root: &Hash256, + fork_name: ForkName, + ) -> Result>, Error> { + let column = ExecutionPayload::::db_column().into(); + let key = block_root.as_bytes(); + + match self.hot_db.get_bytes(column, key)? { + Some(bytes) => Ok(Some(ExecutionPayload::from_ssz_bytes(&bytes, fork_name)?)), + None => Ok(None), + } + } + + /// Load the execution payload for a block from disk. + /// DANGEROUS: this method just guesses the fork. + pub fn get_execution_payload_dangerous_fork_agnostic( + &self, + block_root: &Hash256, ) -> Result>, Error> { self.get_item(block_root) } From e0ea26c228053e40d4f0079860c6d95847d0756a Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Mon, 28 Nov 2022 11:43:54 -0600 Subject: [PATCH 078/263] Remove withdrawals guard for PayloadAttributesV2 --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 ++ beacon_node/execution_layer/src/engine_api.rs | 5 +---- .../execution_layer/src/engine_api/json_structures.rs | 5 ++--- beacon_node/execution_layer/src/lib.rs | 2 ++ .../execution_layer/src/test_utils/mock_execution_layer.rs | 2 ++ 5 files changed, 9 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 51aed941f11..dc0b6e71777 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4191,6 +4191,8 @@ impl BeaconChain { .await, #[cfg(feature = "withdrawals")] withdrawals, + #[cfg(not(feature = "withdrawals"))] + withdrawals: None, }); debug!( diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index b1a3cfa4138..65d3b656b32 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -7,11 +7,9 @@ use reqwest::StatusCode; use serde::{Deserialize, Serialize}; use strum::IntoStaticStr; use superstruct::superstruct; -#[cfg(feature = "withdrawals")] -use types::Withdrawal; pub use types::{ Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, FixedVector, - ForkName, Hash256, Uint256, VariableList, + ForkName, Hash256, Uint256, VariableList, Withdrawal, }; pub mod auth; @@ -257,7 +255,6 @@ pub struct PayloadAttributes { pub prev_randao: Hash256, #[superstruct(getter(copy))] pub suggested_fee_recipient: Address, - #[cfg(feature = "withdrawals")] #[superstruct(only(V2))] pub withdrawals: Option>, } diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 0e53a3b0605..1b125cde44d 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -375,8 +375,9 @@ pub struct JsonPayloadAttributes { pub timestamp: u64, pub prev_randao: Hash256, pub suggested_fee_recipient: Address, - #[cfg(feature = "withdrawals")] #[superstruct(only(V2))] + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] pub withdrawals: Option>, } @@ -392,7 +393,6 @@ impl From for JsonPayloadAttributes { timestamp: pa.timestamp, prev_randao: pa.prev_randao, suggested_fee_recipient: pa.suggested_fee_recipient, - #[cfg(feature = "withdrawals")] withdrawals: pa .withdrawals .map(|w| w.into_iter().map(Into::into).collect()), @@ -413,7 +413,6 @@ impl From for PayloadAttributes { timestamp: jpa.timestamp, prev_randao: jpa.prev_randao, suggested_fee_recipient: jpa.suggested_fee_recipient, - #[cfg(feature = "withdrawals")] withdrawals: jpa .withdrawals .map(|jw| jw.into_iter().map(Into::into).collect()), diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index c90ed291d56..96c1e09060f 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -933,6 +933,8 @@ impl ExecutionLayer { suggested_fee_recipient, #[cfg(feature = "withdrawals")] withdrawals: withdrawals_ref.clone(), + #[cfg(not(feature = "withdrawals"))] + withdrawals: None, }); let response = engine diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index cadeec1b3d4..6a9070ca421 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -115,6 +115,8 @@ impl MockExecutionLayer { // FIXME: think about adding withdrawals here.. #[cfg(feature = "withdrawals")] withdrawals: Some(vec![]), + #[cfg(not(feature = "withdrawals"))] + withdrawals: None, }) } }, From 36170ec428a799bf8b1f97dbbcd2864d919022b4 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Mon, 28 Nov 2022 16:56:54 -0600 Subject: [PATCH 079/263] Fixed some BeaconChain Tests --- beacon_node/beacon_chain/tests/merge.rs | 53 ++++++++++++------- .../tests/payload_invalidation.rs | 23 ++++---- consensus/types/src/payload.rs | 21 +++++++- 3 files changed, 65 insertions(+), 32 deletions(-) diff --git a/beacon_node/beacon_chain/tests/merge.rs b/beacon_node/beacon_chain/tests/merge.rs index 19e8902a3e8..ea4e7cb8816 100644 --- a/beacon_node/beacon_chain/tests/merge.rs +++ b/beacon_node/beacon_chain/tests/merge.rs @@ -12,17 +12,17 @@ fn verify_execution_payload_chain(chain: &[FullPayload]) { let mut prev_ep: Option> = None; for ep in chain { - assert!(*ep != FullPayload::default()); + assert!(!ep.is_default_with_empty_roots()); assert!(ep.block_hash() != ExecutionBlockHash::zero()); // Check against previous `ExecutionPayload`. if let Some(prev_ep) = prev_ep { - assert_eq!(prev_ep.block_hash(), ep.execution_payload.parent_hash); + assert_eq!(prev_ep.block_hash(), ep.execution_payload().parent_hash()); assert_eq!( - prev_ep.execution_payload.block_number + 1, - ep.execution_payload.block_number + prev_ep.execution_payload().block_number() + 1, + ep.execution_payload().block_number() ); - assert!(ep.execution_payload.timestamp > prev_ep.execution_payload.timestamp); + assert!(ep.execution_payload().timestamp() > prev_ep.execution_payload().timestamp()); } prev_ep = Some(ep.clone()); } @@ -88,7 +88,7 @@ async fn merge_with_terminal_block_hash_override() { if i == 0 { assert_eq!(execution_payload.block_hash(), genesis_pow_block_hash); } - execution_payloads.push(execution_payload); + execution_payloads.push(execution_payload.into()); } verify_execution_payload_chain(execution_payloads.as_slice()); @@ -139,9 +139,14 @@ async fn base_altair_merge_with_terminal_block_after_fork() { let merge_head = &harness.chain.head_snapshot().beacon_block; assert!(merge_head.as_merge().is_ok()); assert_eq!(merge_head.slot(), merge_fork_slot); - assert_eq!( - *merge_head.message().body().execution_payload().unwrap(), - FullPayload::default() + assert!( + merge_head + .message() + .body() + .execution_payload() + .unwrap() + .is_default_with_empty_roots(), + "Merge head is default payload" ); /* @@ -151,13 +156,14 @@ async fn base_altair_merge_with_terminal_block_after_fork() { harness.extend_slots(1).await; let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; - assert_eq!( - *one_after_merge_head + assert!( + one_after_merge_head .message() .body() .execution_payload() - .unwrap(), - FullPayload::default() + .unwrap() + .is_default_with_empty_roots(), + "One after merge head is default payload" ); assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 1); @@ -184,25 +190,34 @@ async fn base_altair_merge_with_terminal_block_after_fork() { harness.extend_slots(1).await; let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; - assert_eq!( - *one_after_merge_head + // FIXME: why is this being tested twice? + assert!( + one_after_merge_head .message() .body() .execution_payload() - .unwrap(), - FullPayload::default() + .unwrap() + .is_default_with_empty_roots(), + "One after merge head is default payload" ); assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 2); /* * Next merge block should include an exec payload. */ - for _ in 0..4 { harness.extend_slots(1).await; let block = &harness.chain.head_snapshot().beacon_block; - execution_payloads.push(block.message().body().execution_payload().unwrap().clone()); + execution_payloads.push( + block + .message() + .body() + .execution_payload() + .unwrap() + .clone() + .into(), + ); } verify_execution_payload_chain(execution_payloads.as_slice()); diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 611b2098845..5c470d4dd39 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -12,9 +12,9 @@ use beacon_chain::{ INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ - json_structures::{JsonForkchoiceStateV1, JsonPayloadAttributesV1}, + json_structures::{JsonForkchoiceStateV1, JsonPayloadAttributes, JsonPayloadAttributesV1}, test_utils::ExecutionBlockGenerator, - ExecutionLayer, ForkchoiceState, PayloadAttributes, + ExecutionLayer, ForkchoiceState, PayloadAttributes, PayloadAttributesV1, }; use fork_choice::{ CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus, @@ -133,7 +133,10 @@ impl InvalidPayloadRig { let attributes: JsonPayloadAttributesV1 = serde_json::from_value(payload_param_json.clone()).unwrap(); - (fork_choice_state.into(), attributes.into()) + ( + fork_choice_state.into(), + JsonPayloadAttributes::V1(attributes).into(), + ) } fn previous_payload_attributes(&self) -> PayloadAttributes { @@ -982,7 +985,7 @@ async fn payload_preparation() { .await .unwrap(); - let payload_attributes = PayloadAttributes { + let payload_attributes = PayloadAttributes::V1(PayloadAttributesV1 { timestamp: rig .harness .chain @@ -995,7 +998,7 @@ async fn payload_preparation() { .get_randao_mix(head.beacon_state.current_epoch()) .unwrap(), suggested_fee_recipient: fee_recipient, - }; + }); assert_eq!(rig.previous_payload_attributes(), payload_attributes); } @@ -1125,7 +1128,7 @@ async fn payload_preparation_before_transition_block() { let (fork_choice_state, payload_attributes) = rig.previous_forkchoice_update_params(); let latest_block_hash = rig.latest_execution_block_hash(); - assert_eq!(payload_attributes.suggested_fee_recipient, fee_recipient); + assert_eq!(payload_attributes.suggested_fee_recipient(), fee_recipient); assert_eq!(fork_choice_state.head_block_hash, latest_block_hash); } @@ -1367,18 +1370,16 @@ async fn build_optimistic_chain( .body() .execution_payload() .unwrap() - .execution_payload - == <_>::default(), + .is_default_with_empty_roots(), "the block *has not* undergone the merge transition" ); assert!( - post_transition_block + !post_transition_block .message() .body() .execution_payload() .unwrap() - .execution_payload - != <_>::default(), + .is_default_with_empty_roots(), "the block *has* undergone the merge transition" ); diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 2507a9f0eb2..e7bba25ff5c 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -148,8 +148,8 @@ impl From> for ExecutionPayload { } impl<'a, T: EthSpec> From> for ExecutionPayload { - fn from(full_payload: FullPayloadRef<'a, T>) -> Self { - match full_payload { + fn from(full_payload_ref: FullPayloadRef<'a, T>) -> Self { + match full_payload_ref { FullPayloadRef::Merge(payload) => { ExecutionPayload::Merge(payload.execution_payload.clone()) } @@ -163,6 +163,23 @@ impl<'a, T: EthSpec> From> for ExecutionPayload { } } +// FIXME: can this be implemented as Deref or Clone somehow? +impl<'a, T: EthSpec> From> for FullPayload { + fn from(full_payload_ref: FullPayloadRef<'a, T>) -> Self { + match full_payload_ref { + FullPayloadRef::Merge(payload_ref) => { + FullPayload::Merge(payload_ref.clone()) + } + FullPayloadRef::Capella(payload_ref) => { + FullPayload::Capella(payload_ref.clone()) + } + FullPayloadRef::Eip4844(payload_ref) => { + FullPayload::Eip4844(payload_ref.clone()) + } + } + } +} + impl ExecPayload for FullPayload { fn block_type() -> BlockType { BlockType::Full From f5e6a54f0597d0bf74d761c7167d14f7d89c10ce Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Tue, 29 Nov 2022 18:01:47 -0600 Subject: [PATCH 080/263] Refactored Execution Layer & Fixed Some Tests --- beacon_node/beacon_chain/src/beacon_chain.rs | 15 ++- .../beacon_chain/src/execution_payload.rs | 21 ++-- .../tests/payload_invalidation.rs | 16 +-- beacon_node/execution_layer/src/engine_api.rs | 22 +++- beacon_node/execution_layer/src/engines.rs | 31 ++---- beacon_node/execution_layer/src/lib.rs | 102 ++++-------------- .../src/test_utils/mock_builder.rs | 18 ++-- .../src/test_utils/mock_execution_layer.rs | 57 ++++------ consensus/types/src/payload.rs | 12 +-- consensus/types/src/withdrawal.rs | 2 +- .../src/test_rig.rs | 44 ++++---- 11 files changed, 133 insertions(+), 207 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index dc0b6e71777..20edab090d4 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -59,7 +59,7 @@ use crate::{metrics, BeaconChainError}; use eth2::types::{EventKind, SseBlock, SyncDuty}; use execution_layer::{ BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, - PayloadAttributes, PayloadAttributesV1, PayloadAttributesV2, PayloadStatus, + PayloadAttributes, PayloadStatus, }; pub use fork_choice::CountUnrealized; use fork_choice::{ @@ -4179,21 +4179,20 @@ impl BeaconChain { .map(|withdrawals_opt| withdrawals_opt.map(|w| w.into())) .map_err(Error::PrepareProposerFailed)?; - let payload_attributes = PayloadAttributes::V2(PayloadAttributesV2 { - timestamp: self - .slot_clock + let payload_attributes = PayloadAttributes::new( + self.slot_clock .start_of(prepare_slot) .ok_or(Error::InvalidSlot(prepare_slot))? .as_secs(), - prev_randao: head_random, - suggested_fee_recipient: execution_layer + head_random, + execution_layer .get_suggested_fee_recipient(proposer as u64) .await, #[cfg(feature = "withdrawals")] withdrawals, #[cfg(not(feature = "withdrawals"))] - withdrawals: None, - }); + None, + ); debug!( self.log, diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 85aedc6592e..ff3167c7042 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -12,7 +12,7 @@ use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, }; -use execution_layer::{BlockProposalContents, BuilderParams, PayloadStatus}; +use execution_layer::{BlockProposalContents, BuilderParams, PayloadAttributes, PayloadStatus}; use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::debug; @@ -483,20 +483,29 @@ where .await .map_err(BlockProductionError::BeaconChain)?; + let suggested_fee_recipient = execution_layer + .get_suggested_fee_recipient(proposer_index) + .await; + let payload_attributes = PayloadAttributes::new( + timestamp, + random, + suggested_fee_recipient, + #[cfg(feature = "withdrawals")] + withdrawals, + #[cfg(not(feature = "withdrawals"))] + None, + ); + // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. // // This future is not executed here, it's up to the caller to await it. let block_contents = execution_layer .get_payload::( parent_hash, - timestamp, - random, - proposer_index, + &payload_attributes, forkchoice_update_params, builder_params, fork, - #[cfg(feature = "withdrawals")] - withdrawals, &chain.spec, ) .await diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 5c470d4dd39..a963f071a13 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -14,7 +14,7 @@ use beacon_chain::{ use execution_layer::{ json_structures::{JsonForkchoiceStateV1, JsonPayloadAttributes, JsonPayloadAttributesV1}, test_utils::ExecutionBlockGenerator, - ExecutionLayer, ForkchoiceState, PayloadAttributes, PayloadAttributesV1, + ExecutionLayer, ForkchoiceState, PayloadAttributes, }; use fork_choice::{ CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus, @@ -985,20 +985,22 @@ async fn payload_preparation() { .await .unwrap(); - let payload_attributes = PayloadAttributes::V1(PayloadAttributesV1 { - timestamp: rig - .harness + let payload_attributes = PayloadAttributes::new( + rig.harness .chain .slot_clock .start_of(next_slot) .unwrap() .as_secs(), - prev_randao: *head + *head .beacon_state .get_randao_mix(head.beacon_state.current_epoch()) .unwrap(), - suggested_fee_recipient: fee_recipient, - }); + fee_recipient, + None, + ) + .downgrade_to_v1() + .unwrap(); assert_eq!(rig.previous_payload_attributes(), payload_attributes); } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 65d3b656b32..988b04826eb 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -243,11 +243,11 @@ impl From> for ExecutionBlockWithTransactions #[superstruct( variants(V1, V2), - variant_attributes(derive(Clone, Debug, PartialEq),), + variant_attributes(derive(Clone, Debug, Eq, Hash, PartialEq),), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct PayloadAttributes { #[superstruct(getter(copy))] pub timestamp: u64, @@ -260,14 +260,28 @@ pub struct PayloadAttributes { } impl PayloadAttributes { + pub fn new( + timestamp: u64, + prev_randao: Hash256, + suggested_fee_recipient: Address, + withdrawals: Option>, + ) -> Self { + // this should always return the highest version + PayloadAttributes::V2(PayloadAttributesV2 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + }) + } + pub fn downgrade_to_v1(self) -> Result { match self { PayloadAttributes::V1(_) => Ok(self), PayloadAttributes::V2(v2) => { - #[cfg(features = "withdrawals")] if v2.withdrawals.is_some() { return Err(Error::BadConversion( - "Downgrading from PayloadAttributesV2 with non-null withdrawaals" + "Downgrading from PayloadAttributesV2 with non-null withdrawals" .to_string(), )); } diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 264303b5d3b..b5792e3835a 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -16,6 +16,7 @@ use types::{Address, ExecutionBlockHash, Hash256}; /// The number of payload IDs that will be stored for each `Engine`. /// /// Since the size of each value is small (~100 bytes) a large number is used for safety. +/// FIXME: check this assumption now that the key includes entire payload attributes which now includes withdrawals const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512; /// Stores the remembered state of a engine. @@ -97,9 +98,7 @@ pub struct ForkchoiceState { #[derive(Hash, PartialEq, std::cmp::Eq)] struct PayloadIdCacheKey { pub head_block_hash: ExecutionBlockHash, - pub timestamp: u64, - pub prev_randao: Hash256, - pub suggested_fee_recipient: Address, + pub payload_attributes: PayloadAttributes, } #[derive(Debug)] @@ -142,20 +141,13 @@ impl Engine { pub async fn get_payload_id( &self, - head_block_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - suggested_fee_recipient: Address, + head_block_hash: &ExecutionBlockHash, + payload_attributes: &PayloadAttributes, ) -> Option { self.payload_id_cache .lock() .await - .get(&PayloadIdCacheKey { - head_block_hash, - timestamp, - prev_randao, - suggested_fee_recipient, - }) + .get(&PayloadIdCacheKey::new(head_block_hash, payload_attributes)) .cloned() } @@ -171,8 +163,8 @@ impl Engine { .await?; if let Some(payload_id) = response.payload_id { - if let Some(key) = - payload_attributes.map(|pa| PayloadIdCacheKey::new(&forkchoice_state, &pa)) + if let Some(key) = payload_attributes + .map(|pa| PayloadIdCacheKey::new(&forkchoice_state.head_block_hash, &pa)) { self.payload_id_cache.lock().await.put(key, payload_id); } else { @@ -347,14 +339,11 @@ impl Engine { } } -// TODO: revisit this - do we need to key on withdrawals as well here? impl PayloadIdCacheKey { - fn new(state: &ForkchoiceState, attributes: &PayloadAttributes) -> Self { + fn new(head_block_hash: &ExecutionBlockHash, attributes: &PayloadAttributes) -> Self { Self { - head_block_hash: state.head_block_hash, - timestamp: attributes.timestamp(), - prev_randao: attributes.prev_randao(), - suggested_fee_recipient: attributes.suggested_fee_recipient(), + head_block_hash: head_block_hash.clone(), + payload_attributes: attributes.clone(), } } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 96c1e09060f..25a19eb0b9b 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -606,21 +606,15 @@ impl ExecutionLayer { /// /// The result will be returned from the first node that returns successfully. No more nodes /// will be contacted. - #[allow(clippy::too_many_arguments)] pub async fn get_payload>( &self, parent_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - proposer_index: u64, + payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, builder_params: BuilderParams, current_fork: ForkName, - #[cfg(feature = "withdrawals")] withdrawals: Option>, spec: &ChainSpec, ) -> Result, Error> { - let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; - match Payload::block_type() { BlockType::Blinded => { let _timer = metrics::start_timer_vec( @@ -629,14 +623,10 @@ impl ExecutionLayer { ); self.get_blinded_payload( parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, + payload_attributes, forkchoice_update_params, builder_params, current_fork, - #[cfg(feature = "withdrawals")] - withdrawals, spec, ) .await @@ -648,30 +638,22 @@ impl ExecutionLayer { ); self.get_full_payload( parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, + payload_attributes, forkchoice_update_params, current_fork, - #[cfg(feature = "withdrawals")] - withdrawals, ) .await } } } - #[allow(clippy::too_many_arguments)] async fn get_blinded_payload>( &self, parent_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - suggested_fee_recipient: Address, + payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, builder_params: BuilderParams, current_fork: ForkName, - #[cfg(feature = "withdrawals")] withdrawals: Option>, spec: &ChainSpec, ) -> Result, Error> { if let Some(builder) = self.builder() { @@ -691,13 +673,9 @@ impl ExecutionLayer { builder.get_builder_header::(slot, parent_hash, &pubkey), self.get_full_payload_caching( parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, + payload_attributes, forkchoice_update_params, current_fork, - #[cfg(feature = "withdrawals")] - withdrawals, ) ); @@ -746,7 +724,7 @@ impl ExecutionLayer { falling back to local execution engine." ); Ok(local) - } else if header.prev_randao() != prev_randao { + } else if header.prev_randao() != payload_attributes.prev_randao() { warn!( self.log(), "Invalid prev randao from connected builder, \ @@ -784,7 +762,7 @@ impl ExecutionLayer { bid from connected builder, falling back to local execution engine."); Ok(local) } else { - if header.fee_recipient() != suggested_fee_recipient { + if header.fee_recipient() != payload_attributes.suggested_fee_recipient() { info!( self.log(), "Fee recipient from connected builder does \ @@ -823,13 +801,9 @@ impl ExecutionLayer { } self.get_full_payload_caching( parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, + payload_attributes, forkchoice_update_params, current_fork, - #[cfg(feature = "withdrawals")] - withdrawals, ) .await } @@ -838,22 +812,15 @@ impl ExecutionLayer { async fn get_full_payload>( &self, parent_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - suggested_fee_recipient: Address, + payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, current_fork: ForkName, - #[cfg(feature = "withdrawals")] withdrawals: Option>, ) -> Result, Error> { self.get_full_payload_with( parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, + payload_attributes, forkchoice_update_params, current_fork, - #[cfg(feature = "withdrawals")] - withdrawals, noop, ) .await @@ -863,22 +830,15 @@ impl ExecutionLayer { async fn get_full_payload_caching>( &self, parent_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - suggested_fee_recipient: Address, + payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, current_fork: ForkName, - #[cfg(feature = "withdrawals")] withdrawals: Option>, ) -> Result, Error> { self.get_full_payload_with( parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, + payload_attributes, forkchoice_update_params, current_fork, - #[cfg(feature = "withdrawals")] - withdrawals, Self::cache_payload, ) .await @@ -887,20 +847,15 @@ impl ExecutionLayer { async fn get_full_payload_with>( &self, parent_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - suggested_fee_recipient: Address, + payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, current_fork: ForkName, - #[cfg(feature = "withdrawals")] withdrawals: Option>, f: fn(&ExecutionLayer, &ExecutionPayload) -> Option>, ) -> Result, Error> { - #[cfg(feature = "withdrawals")] - let withdrawals_ref = &withdrawals; self.engine() .request(move |engine| async move { let payload_id = if let Some(id) = engine - .get_payload_id(parent_hash, timestamp, prev_randao, suggested_fee_recipient) + .get_payload_id(&parent_hash, payload_attributes) .await { // The payload id has been cached for this engine. @@ -925,22 +880,11 @@ impl ExecutionLayer { .finalized_hash .unwrap_or_else(ExecutionBlockHash::zero), }; - // This must always be the latest PayloadAttributes - // FIXME: How to non-capella EIP4844 testnets handle this? - let payload_attributes = PayloadAttributes::V2(PayloadAttributesV2 { - timestamp, - prev_randao, - suggested_fee_recipient, - #[cfg(feature = "withdrawals")] - withdrawals: withdrawals_ref.clone(), - #[cfg(not(feature = "withdrawals"))] - withdrawals: None, - }); let response = engine .notify_forkchoice_updated( fork_choice_state, - Some(payload_attributes), + Some(payload_attributes.clone()), self.log(), ) .await?; @@ -969,9 +913,9 @@ impl ExecutionLayer { debug!( self.log(), "Issuing engine_getBlobsBundle"; - "suggested_fee_recipient" => ?suggested_fee_recipient, - "prev_randao" => ?prev_randao, - "timestamp" => timestamp, + "suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(), + "prev_randao" => ?payload_attributes.prev_randao(), + "timestamp" => payload_attributes.timestamp(), "parent_hash" => ?parent_hash, ); Some(engine.api.get_blobs_bundle_v1::(payload_id).await) @@ -982,16 +926,16 @@ impl ExecutionLayer { debug!( self.log(), "Issuing engine_getPayload"; - "suggested_fee_recipient" => ?suggested_fee_recipient, - "prev_randao" => ?prev_randao, - "timestamp" => timestamp, + "suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(), + "prev_randao" => ?payload_attributes.prev_randao(), + "timestamp" => payload_attributes.timestamp(), "parent_hash" => ?parent_hash, ); engine.api.get_payload::(current_fork, payload_id).await }; let (blob, payload) = tokio::join!(blob_fut, payload_fut); let payload = payload.map(|full_payload| { - if full_payload.fee_recipient() != suggested_fee_recipient { + if full_payload.fee_recipient() != payload_attributes.suggested_fee_recipient() { error!( self.log(), "Inconsistent fee recipient"; @@ -1001,7 +945,7 @@ impl ExecutionLayer { ensure that the value of suggested_fee_recipient is set correctly and \ that the Execution Engine is trusted.", "fee_recipient" => ?full_payload.fee_recipient(), - "suggested_fee_recipient" => ?suggested_fee_recipient, + "suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(), ); } if f(self, &full_payload).is_some() { diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 5c69fffbf69..06b5e81eb31 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,5 +1,5 @@ use crate::test_utils::DEFAULT_JWT_SECRET; -use crate::{Config, ExecutionLayer, PayloadAttributes, PayloadAttributesV1}; +use crate::{Config, ExecutionLayer, PayloadAttributes}; use async_trait::async_trait; use eth2::types::{BlockId, StateId, ValidatorId}; use eth2::{BeaconNodeHttpClient, Timeouts}; @@ -289,11 +289,8 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .map_err(convert_err)?; // FIXME: think about proper fork here - let payload_attributes = PayloadAttributes::V1(PayloadAttributesV1 { - timestamp, - prev_randao: *prev_randao, - suggested_fee_recipient: fee_recipient, - }); + let payload_attributes = + PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None); self.el .insert_proposer(slot, head_block_root, val_index, payload_attributes) @@ -306,18 +303,17 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { finalized_hash: Some(finalized_execution_hash), }; + let payload_attributes = + PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None); + let payload = self .el .get_full_payload_caching::>( head_execution_hash, - timestamp, - *prev_randao, - fee_recipient, + &payload_attributes, forkchoice_update_params, // TODO: do we need to write a test for this if this is Capella fork? ForkName::Merge, - #[cfg(feature = "withdrawals")] - None, ) .await .map_err(convert_err)? diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 6a9070ca421..e6da676a871 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -98,35 +98,16 @@ impl MockExecutionLayer { justified_hash: None, finalized_hash: None, }; - - // FIXME: this is just best guess for how to deal with forks here.. - let payload_attributes = match &latest_execution_block { - &Block::PoS(ref pos_block) => match pos_block { - &ExecutionPayload::Merge(_) => PayloadAttributes::V1(PayloadAttributesV1 { - timestamp, - prev_randao, - suggested_fee_recipient: Address::repeat_byte(42), - }), - &ExecutionPayload::Capella(_) | &ExecutionPayload::Eip4844(_) => { - PayloadAttributes::V2(PayloadAttributesV2 { - timestamp, - prev_randao, - suggested_fee_recipient: Address::repeat_byte(42), - // FIXME: think about adding withdrawals here.. - #[cfg(feature = "withdrawals")] - withdrawals: Some(vec![]), - #[cfg(not(feature = "withdrawals"))] - withdrawals: None, - }) - } - }, - // I guess a PoW blocks means we should use Merge? - &Block::PoW(_) => PayloadAttributes::V1(PayloadAttributesV1 { - timestamp, - prev_randao, - suggested_fee_recipient: Address::repeat_byte(42), - }), - }; + let payload_attributes = PayloadAttributes::new( + timestamp, + prev_randao, + Address::repeat_byte(42), + // FIXME: think about how to handle different forks / withdrawals here.. + #[cfg(feature = "withdrawals")] + Some(vec![]), + #[cfg(not(feature = "withdrawals"))] + None, + ); // Insert a proposer to ensure the fork choice updated command works. let slot = Slot::new(0); @@ -152,19 +133,18 @@ impl MockExecutionLayer { slot, chain_health: ChainHealth::Healthy, }; + let suggested_fee_recipient = self.el.get_suggested_fee_recipient(validator_index).await; + let payload_attributes = + PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); let payload: ExecutionPayload = self .el .get_payload::>( parent_hash, - timestamp, - prev_randao, - validator_index, + &payload_attributes, forkchoice_update_params, builder_params, // FIXME: do we need to consider other forks somehow? What about withdrawals? ForkName::Merge, - #[cfg(feature = "withdrawals")] - Some(vec![]), &self.spec, ) .await @@ -188,19 +168,18 @@ impl MockExecutionLayer { slot, chain_health: ChainHealth::Healthy, }; + let suggested_fee_recipient = self.el.get_suggested_fee_recipient(validator_index).await; + let payload_attributes = + PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); let payload_header = self .el .get_payload::>( parent_hash, - timestamp, - prev_randao, - validator_index, + &payload_attributes, forkchoice_update_params, builder_params, // FIXME: do we need to consider other forks somehow? What about withdrawals? ForkName::Merge, - #[cfg(feature = "withdrawals")] - Some(vec![]), &self.spec, ) .await diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index e7bba25ff5c..33fa2273725 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -167,15 +167,9 @@ impl<'a, T: EthSpec> From> for ExecutionPayload { impl<'a, T: EthSpec> From> for FullPayload { fn from(full_payload_ref: FullPayloadRef<'a, T>) -> Self { match full_payload_ref { - FullPayloadRef::Merge(payload_ref) => { - FullPayload::Merge(payload_ref.clone()) - } - FullPayloadRef::Capella(payload_ref) => { - FullPayload::Capella(payload_ref.clone()) - } - FullPayloadRef::Eip4844(payload_ref) => { - FullPayload::Eip4844(payload_ref.clone()) - } + FullPayloadRef::Merge(payload_ref) => FullPayload::Merge(payload_ref.clone()), + FullPayloadRef::Capella(payload_ref) => FullPayload::Capella(payload_ref.clone()), + FullPayloadRef::Eip4844(payload_ref) => FullPayload::Eip4844(payload_ref.clone()), } } } diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal.rs index 36ee6396580..c2529747c4d 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal.rs @@ -10,7 +10,7 @@ use tree_hash_derive::TreeHash; /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] pub struct Withdrawal { #[serde(with = "eth2_serde_utils::quoted_u64")] diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 9ef96687a56..1b280d14826 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -4,8 +4,7 @@ use crate::execution_engine::{ use crate::transactions::transactions; use ethers_providers::Middleware; use execution_layer::{ - BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, PayloadAttributesV1, - PayloadStatus, + BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, PayloadStatus, }; use fork_choice::ForkchoiceUpdateParameters; use reqwest::{header::CONTENT_TYPE, Client}; @@ -279,11 +278,8 @@ impl TestRig { Slot::new(1), // Insert proposer for the next slot head_root, proposer_index, - PayloadAttributes::V1(PayloadAttributesV1 { - timestamp, - prev_randao, - suggested_fee_recipient: Address::zero(), - }), + // TODO: think about how to test different forks + PayloadAttributes::new(timestamp, prev_randao, Address::zero(), None), ) .await; @@ -316,20 +312,23 @@ impl TestRig { slot: Slot::new(0), chain_health: ChainHealth::Healthy, }; + let suggested_fee_recipient = self + .ee_a + .execution_layer + .get_suggested_fee_recipient(proposer_index) + .await; + let payload_attributes = + PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); let valid_payload = self .ee_a .execution_layer .get_payload::>( parent_hash, - timestamp, - prev_randao, - proposer_index, + &payload_attributes, forkchoice_update_params, builder_params, // FIXME: think about how to test other forks ForkName::Merge, - #[cfg(feature = "withdrawals")] - None, &self.spec, ) .await @@ -444,20 +443,23 @@ impl TestRig { slot: Slot::new(0), chain_health: ChainHealth::Healthy, }; + let suggested_fee_recipient = self + .ee_a + .execution_layer + .get_suggested_fee_recipient(proposer_index) + .await; + let payload_attributes = + PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); let second_payload = self .ee_a .execution_layer .get_payload::>( parent_hash, - timestamp, - prev_randao, - proposer_index, + &payload_attributes, forkchoice_update_params, builder_params, // FIXME: think about how to test other forks ForkName::Merge, - #[cfg(feature = "withdrawals")] - None, &self.spec, ) .await @@ -487,11 +489,9 @@ impl TestRig { */ let head_block_hash = valid_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); - let payload_attributes = PayloadAttributes::V1(PayloadAttributesV1 { - timestamp: second_payload.timestamp() + 1, - prev_randao: Hash256::zero(), - suggested_fee_recipient: Address::zero(), - }); + // TODO: think about how to handle different forks + let payload_attributes = + PayloadAttributes::new(timestamp, prev_randao, Address::zero(), None); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(100); let validator_index = 0; From 1a399767157e53555fe54186152c8a9ca58978d4 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Fri, 2 Dec 2022 17:42:12 -0600 Subject: [PATCH 081/263] Fixed Compiler Warnings & Failing Tests (#3771) --- beacon_node/beacon_chain/src/blob_verification.rs | 2 +- .../execution_layer/src/engine_api/http.rs | 3 +-- beacon_node/execution_layer/src/engines.rs | 2 +- beacon_node/execution_layer/src/lib.rs | 3 --- .../src/test_utils/mock_execution_layer.rs | 4 ++-- beacon_node/http_api/src/publish_blocks.rs | 2 +- beacon_node/lighthouse_network/src/service/mod.rs | 1 - beacon_node/lighthouse_network/tests/rpc_tests.rs | 4 ++-- .../src/beacon_processor/worker/gossip_methods.rs | 15 +++++++-------- .../built_in_network_configs/gnosis/config.yaml | 6 ++++++ .../built_in_network_configs/mainnet/config.yaml | 6 ++++++ .../block_signature_verifier.rs | 2 +- consensus/types/src/chain_spec.rs | 3 ++- consensus/types/src/fork_name.rs | 2 +- testing/ef_tests/src/cases/operations.rs | 11 +++++++++-- validator_client/src/block_service.rs | 5 ++--- validator_client/src/validator_store.rs | 11 +++++------ 17 files changed, 47 insertions(+), 35 deletions(-) diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index d3e0d2a17d8..f7928820332 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -4,7 +4,7 @@ use slot_clock::SlotClock; use crate::beacon_chain::{BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use crate::BeaconChainError; use bls::PublicKey; -use types::{consts::eip4844::BLS_MODULUS, BeaconStateError, BlobsSidecar, Hash256, Slot}; +use types::{consts::eip4844::BLS_MODULUS, BeaconStateError, BlobsSidecar, Slot}; pub enum BlobError { /// The blob sidecar is from a slot that is later than the current slot (with respect to the diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 2b7728b98d0..8eef7aece3f 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -916,8 +916,7 @@ mod test { use std::str::FromStr; use std::sync::Arc; use types::{ - ExecutionPayloadMerge, ForkName, FullPayload, MainnetEthSpec, Transactions, Unsigned, - VariableList, + ExecutionPayloadMerge, ForkName, MainnetEthSpec, Transactions, Unsigned, VariableList, }; struct Tester { diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index b5792e3835a..16562267ca4 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -11,7 +11,7 @@ use std::sync::Arc; use task_executor::TaskExecutor; use tokio::sync::{watch, Mutex, RwLock}; use tokio_stream::wrappers::WatchStream; -use types::{Address, ExecutionBlockHash, Hash256}; +use types::ExecutionBlockHash; /// The number of payload IDs that will be stored for each `Engine`. /// diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 996d98385d5..b6e85f67dcd 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -4,7 +4,6 @@ //! This crate only provides useful functionality for "The Merge", it does not provide any of the //! deposit-contract functionality that the `beacon_node/eth1` crate already provides. -use crate::json_structures::JsonBlobBundles; use crate::payload_cache::PayloadCache; use auth::{strip_prefix, Auth, JwtKey}; use builder_client::BuilderHttpClient; @@ -36,8 +35,6 @@ use tokio::{ time::sleep, }; use tokio_stream::wrappers::WatchStream; -#[cfg(feature = "withdrawals")] -use types::Withdrawal; use types::{AbstractExecPayload, Blob, ExecPayload, KzgCommitment}; use types::{ BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName, diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index e6da676a871..ddddc2c9cea 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -1,7 +1,7 @@ use crate::{ test_utils::{ - Block, MockServer, DEFAULT_BUILDER_THRESHOLD_WEI, DEFAULT_JWT_SECRET, - DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_DIFFICULTY, + MockServer, DEFAULT_BUILDER_THRESHOLD_WEI, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, + DEFAULT_TERMINAL_DIFFICULTY, }, Config, *, }; diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 139bb355805..fb296168db0 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -12,7 +12,7 @@ use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ AbstractExecPayload, BlindedPayload, BlobsSidecar, EthSpec, ExecPayload, ExecutionBlockHash, - FullPayload, Hash256, SignedBeaconBlock, SignedBeaconBlockEip4844, + FullPayload, Hash256, SignedBeaconBlock, }; use warp::Rejection; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index e5fbfe0b02a..7b464650373 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -34,7 +34,6 @@ use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent}; use libp2p::PeerId; use slog::{crit, debug, info, o, trace, warn}; -use std::io::Write; use std::path::PathBuf; use std::pin::Pin; use std::{ diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 9183453492c..ebdbb67421f 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -9,8 +9,8 @@ use std::time::Duration; use tokio::runtime::Runtime; use tokio::time::sleep; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Epoch, EthSpec, ForkContext, - ForkName, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EmptyBlock, Epoch, EthSpec, + ForkContext, ForkName, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, }; mod common; diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 327a3f6835c..a38f4d843ba 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -3,7 +3,6 @@ use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; use beacon_chain::store::Error; use beacon_chain::{ attestation_verification::{self, Error as AttnError, VerifiedAttestation}, - blob_verification::BlobError, observed_operations::ObservationOutcome, sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::get_block_delay_ms, @@ -22,8 +21,8 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, BlobsSidecar, EthSpec, Hash256, IndexedAttestation, - ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, + Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; @@ -701,11 +700,11 @@ impl Worker { #[allow(clippy::too_many_arguments)] pub async fn process_gossip_block_and_blobs_sidecar( self, - message_id: MessageId, - peer_id: PeerId, - peer_client: Client, - block_and_blob: Arc>, - seen_timestamp: Duration, + _message_id: MessageId, + _peer_id: PeerId, + _peer_client: Client, + _block_and_blob: Arc>, + _seen_timestamp: Duration, ) { //FIXME unimplemented!() diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index d55ef3f3b5f..6aa2c9590a5 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -36,6 +36,12 @@ ALTAIR_FORK_EPOCH: 512 # Merge BELLATRIX_FORK_VERSION: 0x02000064 BELLATRIX_FORK_EPOCH: 385536 +# Capella +CAPELLA_FORK_VERSION: 0x03000064 +CAPELLA_FORK_EPOCH: 18446744073709551615 +# Eip4844 +EIP4844_FORK_VERSION: 0x04000064 +EIP4844_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03000064 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 6e87a708f82..83e6de79064 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -36,6 +36,12 @@ ALTAIR_FORK_EPOCH: 74240 # Merge BELLATRIX_FORK_VERSION: 0x02000000 BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC +# Capella +CAPELLA_FORK_VERSION: 0x03000000 +CAPELLA_FORK_EPOCH: 18446744073709551615 +# Eip4844 +EIP4844_FORK_VERSION: 0x04000000 +EIP4844_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03000000 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index fe654fcaaf5..50bfbfdc454 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -8,7 +8,7 @@ use rayon::prelude::*; use std::borrow::Cow; use types::{ AbstractExecPayload, BeaconState, BeaconStateError, ChainSpec, EthSpec, Hash256, - IndexedAttestation, SignedBeaconBlock, + SignedBeaconBlock, }; pub type Result = std::result::Result; diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 9a8c4bb6f5c..d16c9b8091c 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -633,7 +633,7 @@ impl ChainSpec { * Capella hard fork params */ capella_fork_version: [0x03, 00, 00, 00], - capella_fork_epoch: Some(Epoch::new(18446744073709551615)), + capella_fork_epoch: None, /* * Eip4844 hard fork params @@ -970,6 +970,7 @@ pub struct Config { #[serde(default = "default_eip4844_fork_version")] #[serde(with = "eth2_serde_utils::bytes_4_hex")] eip4844_fork_version: [u8; 4], + #[serde(default)] #[serde(serialize_with = "serialize_fork_epoch")] #[serde(deserialize_with = "deserialize_fork_epoch")] pub eip4844_fork_epoch: Option>, diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index 42b8bdded7e..89eaff7985d 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -199,7 +199,7 @@ mod test { #[test] fn previous_and_next_fork_consistent() { - assert_eq!(ForkName::Merge.next_fork(), None); + assert_eq!(ForkName::Eip4844.next_fork(), None); assert_eq!(ForkName::Base.previous_fork(), None); for (prev_fork, fork) in ForkName::list_all().into_iter().tuple_windows() { diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index e99728ed205..431fd829f67 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -21,11 +21,15 @@ use state_processing::{ ConsensusContext, }; use std::fmt::Debug; +#[cfg(not(all(feature = "withdrawals", feature = "withdrawals-processing")))] +use std::marker::PhantomData; use std::path::Path; +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +use types::SignedBlsToExecutionChange; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit, - EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedBlsToExecutionChange, - SignedVoluntaryExit, SyncAggregate, + EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedVoluntaryExit, + SyncAggregate, }; #[derive(Debug, Clone, Default, Deserialize)] @@ -42,7 +46,10 @@ struct ExecutionMetadata { /// Newtype for testing withdrawals. #[derive(Debug, Clone, Deserialize)] pub struct WithdrawalsPayload { + #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] payload: FullPayload, + #[cfg(not(all(feature = "withdrawals", feature = "withdrawals-processing")))] + _phantom_data: PhantomData, } #[derive(Debug, Clone)] diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 2a50b6d2eaa..42f62681980 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -6,15 +6,14 @@ use crate::{ }; use crate::{http_metrics::metrics, validator_store::ValidatorStore}; use environment::RuntimeContext; -use eth2::types::{Graffiti, VariableList}; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::ops::Deref; use std::sync::Arc; use tokio::sync::mpsc; use types::{ - AbstractExecPayload, BlindedPayload, BlobsSidecar, BlockType, EthSpec, ExecPayload, ForkName, - FullPayload, PublicKeyBytes, Slot, + AbstractExecPayload, BlindedPayload, BlockType, EthSpec, FullPayload, Graffiti, PublicKeyBytes, + Slot, }; #[derive(Debug)] diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 692365aeceb..36a0d057342 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -19,12 +19,11 @@ use std::sync::Arc; use task_executor::TaskExecutor; use types::{ attestation::Error as AttestationError, graffiti::GraffitiString, AbstractExecPayload, Address, - AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, BlobsSidecar, ChainSpec, - ContributionAndProof, Domain, Epoch, EthSpec, ExecPayload, Fork, FullPayload, Graffiti, - Hash256, Keypair, PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, - SignedBeaconBlock, SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData, - Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, - SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, + AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, + Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof, + Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot, + SignedValidatorRegistrationData, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, + SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, }; use validator_dir::ValidatorDir; From 5282e200be1a6455f9a71ab7252cad3bdaeaade5 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Sat, 3 Dec 2022 14:05:25 -0600 Subject: [PATCH 082/263] Merge 'upstream/unstable' into capella (#3773) * Add API endpoint to count statuses of all validators (#3756) * Delete DB schema migrations for v11 and earlier (#3761) Co-authored-by: Mac L Co-authored-by: Michael Sproul --- beacon_node/beacon_chain/src/beacon_chain.rs | 40 -- .../src/beacon_fork_choice_store.rs | 22 +- .../beacon_chain/src/persisted_fork_choice.rs | 23 +- beacon_node/beacon_chain/src/schema_change.rs | 163 +-------- .../src/schema_change/migration_schema_v10.rs | 97 ----- .../src/schema_change/migration_schema_v11.rs | 77 ---- .../src/schema_change/migration_schema_v6.rs | 28 -- .../src/schema_change/migration_schema_v7.rs | 341 ------------------ .../src/schema_change/migration_schema_v8.rs | 50 --- .../src/schema_change/migration_schema_v9.rs | 176 --------- .../beacon_chain/src/schema_change/types.rs | 315 ---------------- .../beacon_chain/src/snapshot_cache.rs | 21 -- beacon_node/http_api/src/lib.rs | 14 + beacon_node/http_api/src/ui.rs | 71 ++++ beacon_node/store/src/hot_cold_store.rs | 10 - book/src/api-lighthouse.md | 22 ++ book/src/database-migrations.md | 6 +- 17 files changed, 122 insertions(+), 1354 deletions(-) delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v6.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v9.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/types.rs create mode 100644 beacon_node/http_api/src/ui.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 520cec1264c..1968a38a5b2 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1009,46 +1009,6 @@ impl BeaconChain { Ok(self.store.get_state(state_root, slot)?) } - /// Run a function with mutable access to a state for `block_root`. - /// - /// The primary purpose of this function is to borrow a state with its tree hash cache - /// from the snapshot cache *without moving it*. This means that calls to this function should - /// be kept to an absolute minimum, because holding the snapshot cache lock has the ability - /// to delay block import. - /// - /// If there is no appropriate state in the snapshot cache then one will be loaded from disk. - /// If no state is found on disk then `Ok(None)` will be returned. - /// - /// The 2nd parameter to the closure is a bool indicating whether the snapshot cache was used, - /// which can inform logging/metrics. - /// - /// NOTE: the medium-term plan is to delete this function and the snapshot cache in favour - /// of `tree-states`, where all caches are CoW and everything is good in the world. - pub fn with_mutable_state_for_block>( - &self, - block: &SignedBeaconBlock, - block_root: Hash256, - f: F, - ) -> Result, Error> - where - F: FnOnce(&mut BeaconState, bool) -> Result, - { - if let Some(state) = self - .snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .ok_or(Error::SnapshotCacheLockTimeout)? - .borrow_unadvanced_state_mut(block_root) - { - let cache_hit = true; - f(state, cache_hit).map(Some) - } else if let Some(mut state) = self.get_state(&block.state_root(), Some(block.slot()))? { - let cache_hit = false; - f(&mut state, cache_hit).map(Some) - } else { - Ok(None) - } - } - /// Return the sync committee at `slot + 1` from the canonical chain. /// /// This is useful when dealing with sync committee messages, because messages are signed diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 5369e168af9..d3b17ba1ea0 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -61,7 +61,7 @@ pub fn get_effective_balances(state: &BeaconState) -> Vec { } #[superstruct( - variants(V1, V8), + variants(V8), variant_attributes(derive(PartialEq, Clone, Debug, Encode, Decode)), no_enum )] @@ -75,13 +75,11 @@ pub(crate) struct CacheItem { pub(crate) type CacheItem = CacheItemV8; #[superstruct( - variants(V1, V8), + variants(V8), variant_attributes(derive(PartialEq, Clone, Default, Debug, Encode, Decode)), no_enum )] pub struct BalancesCache { - #[superstruct(only(V1))] - pub(crate) items: Vec, #[superstruct(only(V8))] pub(crate) items: Vec, } @@ -366,26 +364,20 @@ where } /// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database. -#[superstruct( - variants(V1, V7, V8, V10, V11), - variant_attributes(derive(Encode, Decode)), - no_enum -)] +#[superstruct(variants(V11), variant_attributes(derive(Encode, Decode)), no_enum)] pub struct PersistedForkChoiceStore { - #[superstruct(only(V1, V7))] - pub balances_cache: BalancesCacheV1, - #[superstruct(only(V8, V10, V11))] + #[superstruct(only(V11))] pub balances_cache: BalancesCacheV8, pub time: Slot, pub finalized_checkpoint: Checkpoint, pub justified_checkpoint: Checkpoint, pub justified_balances: Vec, pub best_justified_checkpoint: Checkpoint, - #[superstruct(only(V10, V11))] + #[superstruct(only(V11))] pub unrealized_justified_checkpoint: Checkpoint, - #[superstruct(only(V10, V11))] + #[superstruct(only(V11))] pub unrealized_finalized_checkpoint: Checkpoint, - #[superstruct(only(V7, V8, V10, V11))] + #[superstruct(only(V11))] pub proposer_boost_root: Hash256, #[superstruct(only(V11))] pub equivocating_indices: BTreeSet, diff --git a/beacon_node/beacon_chain/src/persisted_fork_choice.rs b/beacon_node/beacon_chain/src/persisted_fork_choice.rs index a60dacdc7c0..829dc2a8a77 100644 --- a/beacon_node/beacon_chain/src/persisted_fork_choice.rs +++ b/beacon_node/beacon_chain/src/persisted_fork_choice.rs @@ -1,7 +1,4 @@ -use crate::beacon_fork_choice_store::{ - PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV11, - PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8, -}; +use crate::beacon_fork_choice_store::PersistedForkChoiceStoreV11; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use store::{DBColumn, Error, StoreItem}; @@ -10,21 +7,9 @@ use superstruct::superstruct; // If adding a new version you should update this type alias and fix the breakages. pub type PersistedForkChoice = PersistedForkChoiceV11; -#[superstruct( - variants(V1, V7, V8, V10, V11), - variant_attributes(derive(Encode, Decode)), - no_enum -)] +#[superstruct(variants(V11), variant_attributes(derive(Encode, Decode)), no_enum)] pub struct PersistedForkChoice { pub fork_choice: fork_choice::PersistedForkChoice, - #[superstruct(only(V1))] - pub fork_choice_store: PersistedForkChoiceStoreV1, - #[superstruct(only(V7))] - pub fork_choice_store: PersistedForkChoiceStoreV7, - #[superstruct(only(V8))] - pub fork_choice_store: PersistedForkChoiceStoreV8, - #[superstruct(only(V10))] - pub fork_choice_store: PersistedForkChoiceStoreV10, #[superstruct(only(V11))] pub fork_choice_store: PersistedForkChoiceStoreV11, } @@ -47,8 +32,4 @@ macro_rules! impl_store_item { }; } -impl_store_item!(PersistedForkChoiceV1); -impl_store_item!(PersistedForkChoiceV7); -impl_store_item!(PersistedForkChoiceV8); -impl_store_item!(PersistedForkChoiceV10); impl_store_item!(PersistedForkChoiceV11); diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index fd55048c388..73906b1b586 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,20 +1,9 @@ //! Utilities for managing database schema changes. -mod migration_schema_v10; -mod migration_schema_v11; mod migration_schema_v12; mod migration_schema_v13; -mod migration_schema_v6; -mod migration_schema_v7; -mod migration_schema_v8; -mod migration_schema_v9; -mod types; -use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY}; +use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY}; use crate::eth1_chain::SszEth1; -use crate::persisted_fork_choice::{ - PersistedForkChoiceV1, PersistedForkChoiceV10, PersistedForkChoiceV11, PersistedForkChoiceV7, - PersistedForkChoiceV8, -}; use crate::types::ChainSpec; use slog::{warn, Logger}; use std::sync::Arc; @@ -23,6 +12,7 @@ use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}; use store::{Error as StoreError, StoreItem}; /// Migrate the database from one schema version to another, applying all requisite mutations. +#[allow(clippy::only_used_in_recursion)] // spec is not used but likely to be used in future pub fn migrate_schema( db: Arc>, deposit_contract_deploy_block: u64, @@ -62,156 +52,9 @@ pub fn migrate_schema( } // - // Migrations from before SchemaVersion(5) are deprecated. + // Migrations from before SchemaVersion(11) are deprecated. // - // Migration for adding `execution_status` field to the fork choice store. - (SchemaVersion(5), SchemaVersion(6)) => { - // Database operations to be done atomically - let mut ops = vec![]; - - // The top-level `PersistedForkChoice` struct is still V1 but will have its internal - // bytes for the fork choice updated to V6. - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(mut persisted_fork_choice) = fork_choice_opt { - migration_schema_v6::update_execution_statuses::(&mut persisted_fork_choice) - .map_err(StoreError::SchemaMigrationError)?; - - // Store the converted fork choice store under the same key. - ops.push(persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - // 1. Add `proposer_boost_root`. - // 2. Update `justified_epoch` to `justified_checkpoint` and `finalized_epoch` to - // `finalized_checkpoint`. - // 3. This migration also includes a potential update to the justified - // checkpoint in case the fork choice store's justified checkpoint and finalized checkpoint - // combination does not actually exist for any blocks in fork choice. This was possible in - // the consensus spec prior to v1.1.6. - // - // Relevant issues: - // - // https://github.com/sigp/lighthouse/issues/2741 - // https://github.com/ethereum/consensus-specs/pull/2727 - // https://github.com/ethereum/consensus-specs/pull/2730 - (SchemaVersion(6), SchemaVersion(7)) => { - // Database operations to be done atomically - let mut ops = vec![]; - - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(persisted_fork_choice_v1) = fork_choice_opt { - // This migrates the `PersistedForkChoiceStore`, adding the `proposer_boost_root` field. - let mut persisted_fork_choice_v7 = persisted_fork_choice_v1.into(); - - let result = migration_schema_v7::update_fork_choice::( - &mut persisted_fork_choice_v7, - db.clone(), - ); - - // Fall back to re-initializing fork choice from an anchor state if necessary. - if let Err(e) = result { - warn!(log, "Unable to migrate to database schema 7, re-initializing fork choice"; "error" => ?e); - migration_schema_v7::update_with_reinitialized_fork_choice::( - &mut persisted_fork_choice_v7, - db.clone(), - spec, - ) - .map_err(StoreError::SchemaMigrationError)?; - } - - // Store the converted fork choice store under the same key. - ops.push(persisted_fork_choice_v7.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - // Migration to add an `epoch` key to the fork choice's balances cache. - (SchemaVersion(7), SchemaVersion(8)) => { - let mut ops = vec![]; - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(fork_choice) = fork_choice_opt { - let updated_fork_choice = - migration_schema_v8::update_fork_choice::(fork_choice, db.clone())?; - - ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - // Upgrade from v8 to v9 to separate the execution payloads into their own column. - (SchemaVersion(8), SchemaVersion(9)) => { - migration_schema_v9::upgrade_to_v9::(db.clone(), log)?; - db.store_schema_version(to) - } - // Downgrade from v9 to v8 to ignore the separation of execution payloads - // NOTE: only works before the Bellatrix fork epoch. - (SchemaVersion(9), SchemaVersion(8)) => { - migration_schema_v9::downgrade_from_v9::(db.clone(), log)?; - db.store_schema_version(to) - } - (SchemaVersion(9), SchemaVersion(10)) => { - let mut ops = vec![]; - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(fork_choice) = fork_choice_opt { - let updated_fork_choice = migration_schema_v10::update_fork_choice(fork_choice)?; - - ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - (SchemaVersion(10), SchemaVersion(9)) => { - let mut ops = vec![]; - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(fork_choice) = fork_choice_opt { - let updated_fork_choice = migration_schema_v10::downgrade_fork_choice(fork_choice)?; - - ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - // Upgrade from v10 to v11 adding support for equivocating indices to fork choice. - (SchemaVersion(10), SchemaVersion(11)) => { - let mut ops = vec![]; - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(fork_choice) = fork_choice_opt { - let updated_fork_choice = migration_schema_v11::update_fork_choice(fork_choice); - - ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - // Downgrade from v11 to v10 removing support for equivocating indices from fork choice. - (SchemaVersion(11), SchemaVersion(10)) => { - let mut ops = vec![]; - let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; - if let Some(fork_choice) = fork_choice_opt { - let updated_fork_choice = - migration_schema_v11::downgrade_fork_choice(fork_choice, log); - - ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } // Upgrade from v11 to v12 to store richer metadata in the attestation op pool. (SchemaVersion(11), SchemaVersion(12)) => { let ops = migration_schema_v12::upgrade_to_v12::(db.clone(), log)?; diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs deleted file mode 100644 index 70e0007851c..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs +++ /dev/null @@ -1,97 +0,0 @@ -use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV8}; -use crate::persisted_fork_choice::{PersistedForkChoiceV10, PersistedForkChoiceV8}; -use crate::schema_change::{ - types::{SszContainerV10, SszContainerV7}, - StoreError, -}; -use proto_array::core::SszContainer; -use ssz::{Decode, Encode}; - -pub fn update_fork_choice( - mut fork_choice: PersistedForkChoiceV8, -) -> Result { - let ssz_container_v7 = SszContainerV7::from_ssz_bytes( - &fork_choice.fork_choice.proto_array_bytes, - ) - .map_err(|e| { - StoreError::SchemaMigrationError(format!( - "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", - e - )) - })?; - - // These transformations instantiate `node.unrealized_justified_checkpoint` and - // `node.unrealized_finalized_checkpoint` to `None`. - let ssz_container_v10: SszContainerV10 = ssz_container_v7.into(); - let ssz_container: SszContainer = ssz_container_v10.into(); - fork_choice.fork_choice.proto_array_bytes = ssz_container.as_ssz_bytes(); - - Ok(fork_choice.into()) -} - -pub fn downgrade_fork_choice( - mut fork_choice: PersistedForkChoiceV10, -) -> Result { - let ssz_container_v10 = SszContainerV10::from_ssz_bytes( - &fork_choice.fork_choice.proto_array_bytes, - ) - .map_err(|e| { - StoreError::SchemaMigrationError(format!( - "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", - e - )) - })?; - - let ssz_container_v7: SszContainerV7 = ssz_container_v10.into(); - fork_choice.fork_choice.proto_array_bytes = ssz_container_v7.as_ssz_bytes(); - - Ok(fork_choice.into()) -} - -impl From for PersistedForkChoiceStoreV10 { - fn from(other: PersistedForkChoiceStoreV8) -> Self { - Self { - balances_cache: other.balances_cache, - time: other.time, - finalized_checkpoint: other.finalized_checkpoint, - justified_checkpoint: other.justified_checkpoint, - justified_balances: other.justified_balances, - best_justified_checkpoint: other.best_justified_checkpoint, - unrealized_justified_checkpoint: other.best_justified_checkpoint, - unrealized_finalized_checkpoint: other.finalized_checkpoint, - proposer_boost_root: other.proposer_boost_root, - } - } -} - -impl From for PersistedForkChoiceV10 { - fn from(other: PersistedForkChoiceV8) -> Self { - Self { - fork_choice: other.fork_choice, - fork_choice_store: other.fork_choice_store.into(), - } - } -} - -impl From for PersistedForkChoiceStoreV8 { - fn from(other: PersistedForkChoiceStoreV10) -> Self { - Self { - balances_cache: other.balances_cache, - time: other.time, - finalized_checkpoint: other.finalized_checkpoint, - justified_checkpoint: other.justified_checkpoint, - justified_balances: other.justified_balances, - best_justified_checkpoint: other.best_justified_checkpoint, - proposer_boost_root: other.proposer_boost_root, - } - } -} - -impl From for PersistedForkChoiceV8 { - fn from(other: PersistedForkChoiceV10) -> Self { - Self { - fork_choice: other.fork_choice, - fork_choice_store: other.fork_choice_store.into(), - } - } -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs deleted file mode 100644 index dde80a5cac7..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs +++ /dev/null @@ -1,77 +0,0 @@ -use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV11}; -use crate::persisted_fork_choice::{PersistedForkChoiceV10, PersistedForkChoiceV11}; -use slog::{warn, Logger}; -use std::collections::BTreeSet; - -/// Add the equivocating indices field. -pub fn update_fork_choice(fork_choice_v10: PersistedForkChoiceV10) -> PersistedForkChoiceV11 { - let PersistedForkChoiceStoreV10 { - balances_cache, - time, - finalized_checkpoint, - justified_checkpoint, - justified_balances, - best_justified_checkpoint, - unrealized_justified_checkpoint, - unrealized_finalized_checkpoint, - proposer_boost_root, - } = fork_choice_v10.fork_choice_store; - - PersistedForkChoiceV11 { - fork_choice: fork_choice_v10.fork_choice, - fork_choice_store: PersistedForkChoiceStoreV11 { - balances_cache, - time, - finalized_checkpoint, - justified_checkpoint, - justified_balances, - best_justified_checkpoint, - unrealized_justified_checkpoint, - unrealized_finalized_checkpoint, - proposer_boost_root, - equivocating_indices: BTreeSet::new(), - }, - } -} - -pub fn downgrade_fork_choice( - fork_choice_v11: PersistedForkChoiceV11, - log: Logger, -) -> PersistedForkChoiceV10 { - let PersistedForkChoiceStoreV11 { - balances_cache, - time, - finalized_checkpoint, - justified_checkpoint, - justified_balances, - best_justified_checkpoint, - unrealized_justified_checkpoint, - unrealized_finalized_checkpoint, - proposer_boost_root, - equivocating_indices, - } = fork_choice_v11.fork_choice_store; - - if !equivocating_indices.is_empty() { - warn!( - log, - "Deleting slashed validators from fork choice store"; - "count" => equivocating_indices.len(), - "message" => "this may make your node more susceptible to following the wrong chain", - ); - } - - PersistedForkChoiceV10 { - fork_choice: fork_choice_v11.fork_choice, - fork_choice_store: PersistedForkChoiceStoreV10 { - balances_cache, - time, - finalized_checkpoint, - justified_checkpoint, - justified_balances, - best_justified_checkpoint, - unrealized_justified_checkpoint, - unrealized_finalized_checkpoint, - proposer_boost_root, - }, - } -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v6.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v6.rs deleted file mode 100644 index 231da838cdc..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v6.rs +++ /dev/null @@ -1,28 +0,0 @@ -///! These functions and structs are only relevant to the database migration from schema 5 to 6. -use crate::persisted_fork_choice::PersistedForkChoiceV1; -use crate::schema_change::types::{SszContainerV1, SszContainerV6}; -use crate::BeaconChainTypes; -use ssz::four_byte_option_impl; -use ssz::{Decode, Encode}; - -// Define a "legacy" implementation of `Option` which uses four bytes for encoding the union -// selector. -four_byte_option_impl!(four_byte_option_usize, usize); - -pub(crate) fn update_execution_statuses( - persisted_fork_choice: &mut PersistedForkChoiceV1, -) -> Result<(), String> { - let ssz_container_v1 = - SszContainerV1::from_ssz_bytes(&persisted_fork_choice.fork_choice.proto_array_bytes) - .map_err(|e| { - format!( - "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", - e - ) - })?; - - let ssz_container_v6: SszContainerV6 = ssz_container_v1.into(); - - persisted_fork_choice.fork_choice.proto_array_bytes = ssz_container_v6.as_ssz_bytes(); - Ok(()) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs deleted file mode 100644 index d953d30027f..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs +++ /dev/null @@ -1,341 +0,0 @@ -///! These functions and structs are only relevant to the database migration from schema 6 to 7. -use crate::beacon_chain::BeaconChainTypes; -use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7}; -use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7}; -use crate::schema_change::types::{ProtoNodeV6, SszContainerV10, SszContainerV6, SszContainerV7}; -use crate::types::{ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot}; -use crate::{BeaconForkChoiceStore, BeaconSnapshot}; -use fork_choice::ForkChoice; -use proto_array::{core::ProtoNode, core::SszContainer, CountUnrealizedFull, ProtoArrayForkChoice}; -use ssz::four_byte_option_impl; -use ssz::{Decode, Encode}; -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use store::hot_cold_store::HotColdDB; -use store::iter::BlockRootsIterator; -use store::Error as StoreError; - -// Define a "legacy" implementation of `Option` which uses four bytes for encoding the union -// selector. -four_byte_option_impl!(four_byte_option_usize, usize); - -/// This method is used to re-initialize fork choice from the finalized state in case we hit an -/// error during this migration. -pub(crate) fn update_with_reinitialized_fork_choice( - persisted_fork_choice: &mut PersistedForkChoiceV7, - db: Arc>, - spec: &ChainSpec, -) -> Result<(), String> { - let anchor_block_root = persisted_fork_choice - .fork_choice_store - .finalized_checkpoint - .root; - let anchor_block = db - .get_full_block_prior_to_v9(&anchor_block_root) - .map_err(|e| format!("{:?}", e))? - .ok_or_else(|| "Missing anchor beacon block".to_string())?; - let anchor_state = db - .get_state(&anchor_block.state_root(), Some(anchor_block.slot())) - .map_err(|e| format!("{:?}", e))? - .ok_or_else(|| "Missing anchor beacon state".to_string())?; - let snapshot = BeaconSnapshot { - beacon_block: Arc::new(anchor_block), - beacon_block_root: anchor_block_root, - beacon_state: anchor_state, - }; - let store = BeaconForkChoiceStore::get_forkchoice_store(db, &snapshot); - let fork_choice = ForkChoice::from_anchor( - store, - anchor_block_root, - &snapshot.beacon_block, - &snapshot.beacon_state, - // Don't provide the current slot here, just use what's in the store. We don't need to know - // the head here, plus it's nice to avoid mutating fork choice during this process. - None, - // This config will get overwritten on startup. - CountUnrealizedFull::default(), - spec, - ) - .map_err(|e| format!("{:?}", e))?; - persisted_fork_choice.fork_choice = fork_choice.to_persisted(); - Ok(()) -} - -pub(crate) fn update_fork_choice( - persisted_fork_choice: &mut PersistedForkChoiceV7, - db: Arc>, -) -> Result<(), StoreError> { - // `PersistedForkChoice` stores the `ProtoArray` as a `Vec`. Deserialize these - // bytes assuming the legacy struct, and transform them to the new struct before - // re-serializing. - let ssz_container_v6 = - SszContainerV6::from_ssz_bytes(&persisted_fork_choice.fork_choice.proto_array_bytes) - .map_err(|e| { - StoreError::SchemaMigrationError(format!( - "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", - e - )) - })?; - - // Clone the V6 proto nodes in order to maintain information about `node.justified_epoch` - // and `node.finalized_epoch`. - let nodes_v6 = ssz_container_v6.nodes.clone(); - - let justified_checkpoint = persisted_fork_choice.fork_choice_store.justified_checkpoint; - let finalized_checkpoint = persisted_fork_choice.fork_choice_store.finalized_checkpoint; - - // These transformations instantiate `node.justified_checkpoint` and `node.finalized_checkpoint` - // to `None`. - let ssz_container_v7: SszContainerV7 = - ssz_container_v6.into_ssz_container_v7(justified_checkpoint, finalized_checkpoint); - let ssz_container_v10: SszContainerV10 = ssz_container_v7.into(); - let ssz_container: SszContainer = ssz_container_v10.into(); - // `CountUnrealizedFull::default()` represents the count-unrealized-full config which will be overwritten on startup. - let mut fork_choice: ProtoArrayForkChoice = - (ssz_container, CountUnrealizedFull::default()).into(); - - update_checkpoints::(finalized_checkpoint.root, &nodes_v6, &mut fork_choice, db) - .map_err(StoreError::SchemaMigrationError)?; - - // Update the justified checkpoint in the store in case we have a discrepancy - // between the store and the proto array nodes. - update_store_justified_checkpoint(persisted_fork_choice, &mut fork_choice) - .map_err(StoreError::SchemaMigrationError)?; - - // Need to downgrade the SSZ container to V7 so that all migrations can be applied in sequence. - let ssz_container = SszContainer::from(&fork_choice); - let ssz_container_v7 = SszContainerV7::from(ssz_container); - - persisted_fork_choice.fork_choice.proto_array_bytes = ssz_container_v7.as_ssz_bytes(); - persisted_fork_choice.fork_choice_store.justified_checkpoint = justified_checkpoint; - - Ok(()) -} - -struct HeadInfo { - index: usize, - root: Hash256, - slot: Slot, -} - -fn update_checkpoints( - finalized_root: Hash256, - nodes_v6: &[ProtoNodeV6], - fork_choice: &mut ProtoArrayForkChoice, - db: Arc>, -) -> Result<(), String> { - let heads = find_finalized_descendant_heads(finalized_root, fork_choice); - - // For each head, first gather all epochs we will need to find justified or finalized roots for. - for head in heads { - // `relevant_epochs` are epochs for which we will need to find the root at the start slot. - // We don't need to worry about whether the are finalized or justified epochs. - let mut relevant_epochs = HashSet::new(); - let relevant_epoch_finder = |index, _: &mut ProtoNode| { - let (justified_epoch, finalized_epoch) = nodes_v6 - .get(index) - .map(|node: &ProtoNodeV6| (node.justified_epoch, node.finalized_epoch)) - .ok_or_else(|| "Index not found in legacy proto nodes".to_string())?; - relevant_epochs.insert(justified_epoch); - relevant_epochs.insert(finalized_epoch); - Ok(()) - }; - - apply_to_chain_of_ancestors( - finalized_root, - head.index, - fork_choice, - relevant_epoch_finder, - )?; - - // find the block roots associated with each relevant epoch. - let roots_by_epoch = - map_relevant_epochs_to_roots::(head.root, head.slot, relevant_epochs, db.clone())?; - - // Apply this mutator to the chain of descendants from this head, adding justified - // and finalized checkpoints for each. - let node_mutator = |index, node: &mut ProtoNode| { - let (justified_epoch, finalized_epoch) = nodes_v6 - .get(index) - .map(|node: &ProtoNodeV6| (node.justified_epoch, node.finalized_epoch)) - .ok_or_else(|| "Index not found in legacy proto nodes".to_string())?; - - // Update the checkpoints only if they haven't already been populated. - if node.justified_checkpoint.is_none() { - let justified_checkpoint = - roots_by_epoch - .get(&justified_epoch) - .map(|&root| Checkpoint { - epoch: justified_epoch, - root, - }); - node.justified_checkpoint = justified_checkpoint; - } - if node.finalized_checkpoint.is_none() { - let finalized_checkpoint = - roots_by_epoch - .get(&finalized_epoch) - .map(|&root| Checkpoint { - epoch: finalized_epoch, - root, - }); - node.finalized_checkpoint = finalized_checkpoint; - } - - Ok(()) - }; - - apply_to_chain_of_ancestors(finalized_root, head.index, fork_choice, node_mutator)?; - } - Ok(()) -} - -/// Coverts the given `HashSet` to a `Vec` then reverse sorts by `Epoch`. Next, a -/// single `BlockRootsIterator` is created which is used to iterate backwards from the given -/// `head_root` and `head_slot`, finding the block root at the start slot of each epoch. -fn map_relevant_epochs_to_roots( - head_root: Hash256, - head_slot: Slot, - epochs: HashSet, - db: Arc>, -) -> Result, String> { - // Convert the `HashSet` to a `Vec` and reverse sort the epochs. - let mut relevant_epochs = epochs.into_iter().collect::>(); - relevant_epochs.sort_unstable_by(|a, b| b.cmp(a)); - - // Iterate backwards from the given `head_root` and `head_slot` and find the block root at each epoch. - let mut iter = std::iter::once(Ok((head_root, head_slot))) - .chain(BlockRootsIterator::from_block(&db, head_root).map_err(|e| format!("{:?}", e))?); - let mut roots_by_epoch = HashMap::new(); - for epoch in relevant_epochs { - let start_slot = epoch.start_slot(T::EthSpec::slots_per_epoch()); - - let root = iter - .find_map(|next| match next { - Ok((root, slot)) => (slot == start_slot).then_some(Ok(root)), - Err(e) => Some(Err(format!("{:?}", e))), - }) - .transpose()? - .ok_or_else(|| "Justified root not found".to_string())?; - roots_by_epoch.insert(epoch, root); - } - Ok(roots_by_epoch) -} - -/// Applies a mutator to every node in a chain, starting from the node at the given -/// `head_index` and iterating through ancestors until the `finalized_root` is reached. -fn apply_to_chain_of_ancestors( - finalized_root: Hash256, - head_index: usize, - fork_choice: &mut ProtoArrayForkChoice, - mut node_mutator: F, -) -> Result<(), String> -where - F: FnMut(usize, &mut ProtoNode) -> Result<(), String>, -{ - let head = fork_choice - .core_proto_array_mut() - .nodes - .get_mut(head_index) - .ok_or_else(|| "Head index not found in proto nodes".to_string())?; - - node_mutator(head_index, head)?; - - let mut parent_index_opt = head.parent; - let mut parent_opt = - parent_index_opt.and_then(|index| fork_choice.core_proto_array_mut().nodes.get_mut(index)); - - // Iterate backwards through all parents until there is no reference to a parent or we reach - // the `finalized_root` node. - while let (Some(parent), Some(parent_index)) = (parent_opt, parent_index_opt) { - node_mutator(parent_index, parent)?; - - // Break out of this while loop *after* the `node_mutator` has been applied to the finalized - // node. - if parent.root == finalized_root { - break; - } - - // Update parent values - parent_index_opt = parent.parent; - parent_opt = parent_index_opt - .and_then(|index| fork_choice.core_proto_array_mut().nodes.get_mut(index)); - } - Ok(()) -} - -/// Finds all heads by finding all nodes in the proto array that are not referenced as parents. Then -/// checks that these nodes are descendants of the finalized root in order to determine if they are -/// relevant. -fn find_finalized_descendant_heads( - finalized_root: Hash256, - fork_choice: &ProtoArrayForkChoice, -) -> Vec { - let nodes_referenced_as_parents: HashSet = fork_choice - .core_proto_array() - .nodes - .iter() - .filter_map(|node| node.parent) - .collect::>(); - - fork_choice - .core_proto_array() - .nodes - .iter() - .enumerate() - .filter_map(|(index, node)| { - (!nodes_referenced_as_parents.contains(&index) - && fork_choice.is_descendant(finalized_root, node.root)) - .then_some(HeadInfo { - index, - root: node.root, - slot: node.slot, - }) - }) - .collect::>() -} - -fn update_store_justified_checkpoint( - persisted_fork_choice: &mut PersistedForkChoiceV7, - fork_choice: &mut ProtoArrayForkChoice, -) -> Result<(), String> { - let justified_checkpoint = fork_choice - .core_proto_array() - .nodes - .iter() - .filter_map(|node| { - (node.finalized_checkpoint - == Some(persisted_fork_choice.fork_choice_store.finalized_checkpoint)) - .then_some(node.justified_checkpoint) - .flatten() - }) - .max_by_key(|justified_checkpoint| justified_checkpoint.epoch) - .ok_or("Proto node with current finalized checkpoint not found")?; - - fork_choice.core_proto_array_mut().justified_checkpoint = justified_checkpoint; - Ok(()) -} - -// Add a zero `proposer_boost_root` when migrating from V1-6 to V7. -impl From for PersistedForkChoiceStoreV7 { - fn from(other: PersistedForkChoiceStoreV1) -> Self { - Self { - balances_cache: other.balances_cache, - time: other.time, - finalized_checkpoint: other.finalized_checkpoint, - justified_checkpoint: other.justified_checkpoint, - justified_balances: other.justified_balances, - best_justified_checkpoint: other.best_justified_checkpoint, - proposer_boost_root: Hash256::zero(), - } - } -} - -impl From for PersistedForkChoiceV7 { - fn from(other: PersistedForkChoiceV1) -> Self { - Self { - fork_choice: other.fork_choice, - fork_choice_store: other.fork_choice_store.into(), - } - } -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs deleted file mode 100644 index ef3f7857f9a..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs +++ /dev/null @@ -1,50 +0,0 @@ -use crate::beacon_chain::BeaconChainTypes; -use crate::beacon_fork_choice_store::{ - BalancesCacheV8, CacheItemV8, PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8, -}; -use crate::persisted_fork_choice::{PersistedForkChoiceV7, PersistedForkChoiceV8}; -use std::sync::Arc; -use store::{Error as StoreError, HotColdDB}; -use types::EthSpec; - -pub fn update_fork_choice( - fork_choice: PersistedForkChoiceV7, - db: Arc>, -) -> Result { - let PersistedForkChoiceStoreV7 { - balances_cache, - time, - finalized_checkpoint, - justified_checkpoint, - justified_balances, - best_justified_checkpoint, - proposer_boost_root, - } = fork_choice.fork_choice_store; - let mut fork_choice_store = PersistedForkChoiceStoreV8 { - balances_cache: BalancesCacheV8::default(), - time, - finalized_checkpoint, - justified_checkpoint, - justified_balances, - best_justified_checkpoint, - proposer_boost_root, - }; - - // Add epochs to the balances cache. It's safe to just use the block's epoch because - // before schema v8 the cache would always miss on skipped slots. - for item in balances_cache.items { - // Drop any blocks that aren't found, they're presumably too old and this is only a cache. - if let Some(block) = db.get_full_block_prior_to_v9(&item.block_root)? { - fork_choice_store.balances_cache.items.push(CacheItemV8 { - block_root: item.block_root, - epoch: block.slot().epoch(T::EthSpec::slots_per_epoch()), - balances: item.balances, - }); - } - } - - Ok(PersistedForkChoiceV8 { - fork_choice: fork_choice.fork_choice, - fork_choice_store, - }) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v9.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v9.rs deleted file mode 100644 index e2c48d5c89d..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v9.rs +++ /dev/null @@ -1,176 +0,0 @@ -use crate::beacon_chain::BeaconChainTypes; -use slog::{debug, error, info, Logger}; -use slot_clock::SlotClock; -use std::sync::Arc; -use std::time::Duration; -use store::{DBColumn, Error, HotColdDB, KeyValueStore}; -use types::{EthSpec, Hash256, Slot}; - -const OPS_PER_BLOCK_WRITE: usize = 2; - -/// The slot clock isn't usually available before the database is initialized, so we construct a -/// temporary slot clock by reading the genesis state. It should always exist if the database is -/// initialized at a prior schema version, however we still handle the lack of genesis state -/// gracefully. -fn get_slot_clock( - db: &HotColdDB, - log: &Logger, -) -> Result, Error> { - // At schema v8 the genesis block must be a *full* block (with payload). In all likeliness it - // actually has no payload. - let spec = db.get_chain_spec(); - let genesis_block = if let Some(block) = db.get_full_block_prior_to_v9(&Hash256::zero())? { - block - } else { - error!(log, "Missing genesis block"); - return Ok(None); - }; - let genesis_state = - if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? { - state - } else { - error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root()); - return Ok(None); - }; - Ok(Some(T::SlotClock::new( - spec.genesis_slot, - Duration::from_secs(genesis_state.genesis_time()), - Duration::from_secs(spec.seconds_per_slot), - ))) -} - -pub fn upgrade_to_v9( - db: Arc>, - log: Logger, -) -> Result<(), Error> { - // This upgrade is a no-op if the Bellatrix fork epoch has not already passed. This migration - // was implemented before the activation of Bellatrix on all networks except Kiln, so the only - // users who will need to wait for the slow copying migration are Kiln users. - let slot_clock = if let Some(slot_clock) = get_slot_clock::(&db, &log)? { - slot_clock - } else { - error!( - log, - "Unable to complete migration because genesis state or genesis block is missing" - ); - return Err(Error::SlotClockUnavailableForMigration); - }; - - let current_epoch = if let Some(slot) = slot_clock.now() { - slot.epoch(T::EthSpec::slots_per_epoch()) - } else { - return Ok(()); - }; - - let bellatrix_fork_epoch = if let Some(fork_epoch) = db.get_chain_spec().bellatrix_fork_epoch { - fork_epoch - } else { - info!( - log, - "Upgrading database schema to v9 (no-op)"; - "info" => "To downgrade before the merge run `lighthouse db migrate`" - ); - return Ok(()); - }; - - if current_epoch >= bellatrix_fork_epoch { - info!( - log, - "Upgrading database schema to v9"; - "info" => "This will take several minutes. Each block will be read from and \ - re-written to the database. You may safely exit now (Ctrl-C) and resume \ - the migration later. Downgrading is no longer possible." - ); - - for res in db.hot_db.iter_column_keys(DBColumn::BeaconBlock) { - let block_root = res?; - let block = match db.get_full_block_prior_to_v9(&block_root) { - // A pre-v9 block is present. - Ok(Some(block)) => block, - // A block is missing. - Ok(None) => return Err(Error::BlockNotFound(block_root)), - // There was an error reading a pre-v9 block. Try reading it as a post-v9 block. - Err(_) => { - if db.try_get_full_block(&block_root)?.is_some() { - // The block is present as a post-v9 block, assume that it was already - // correctly migrated. - continue; - } else { - // This scenario should not be encountered since a prior check has ensured - // that this block exists. - return Err(Error::V9MigrationFailure(block_root)); - } - } - }; - - if block.message().execution_payload().is_ok() { - // Overwrite block with blinded block and store execution payload separately. - debug!( - log, - "Rewriting Bellatrix block"; - "block_root" => ?block_root, - ); - - let mut kv_batch = Vec::with_capacity(OPS_PER_BLOCK_WRITE); - db.block_as_kv_store_ops(&block_root, block, &mut kv_batch)?; - db.hot_db.do_atomically(kv_batch)?; - } - } - } else { - info!( - log, - "Upgrading database schema to v9 (no-op)"; - "info" => "To downgrade before the merge run `lighthouse db migrate`" - ); - } - - Ok(()) -} - -// This downgrade is conditional and will only succeed if the Bellatrix fork epoch hasn't been -// reached. -pub fn downgrade_from_v9( - db: Arc>, - log: Logger, -) -> Result<(), Error> { - let slot_clock = if let Some(slot_clock) = get_slot_clock::(&db, &log)? { - slot_clock - } else { - error!( - log, - "Unable to complete migration because genesis state or genesis block is missing" - ); - return Err(Error::SlotClockUnavailableForMigration); - }; - - let current_epoch = if let Some(slot) = slot_clock.now() { - slot.epoch(T::EthSpec::slots_per_epoch()) - } else { - return Ok(()); - }; - - let bellatrix_fork_epoch = if let Some(fork_epoch) = db.get_chain_spec().bellatrix_fork_epoch { - fork_epoch - } else { - info!( - log, - "Downgrading database schema from v9"; - "info" => "You need to upgrade to v9 again before the merge" - ); - return Ok(()); - }; - - if current_epoch >= bellatrix_fork_epoch { - error!( - log, - "Downgrading from schema v9 after the Bellatrix fork epoch is not supported"; - "current_epoch" => current_epoch, - "bellatrix_fork_epoch" => bellatrix_fork_epoch, - "reason" => "You need a v9 schema database to run on a merged version of Prater or \ - mainnet. On Kiln, you have to re-sync", - ); - Err(Error::ResyncRequiredForExecutionPayloadSeparation) - } else { - Ok(()) - } -} diff --git a/beacon_node/beacon_chain/src/schema_change/types.rs b/beacon_node/beacon_chain/src/schema_change/types.rs deleted file mode 100644 index 02a54c1a3f8..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/types.rs +++ /dev/null @@ -1,315 +0,0 @@ -use crate::types::{AttestationShufflingId, Checkpoint, Epoch, Hash256, Slot}; -use proto_array::core::{ProposerBoost, ProtoNode, SszContainer, VoteTracker}; -use proto_array::ExecutionStatus; -use ssz::four_byte_option_impl; -use ssz::Encode; -use ssz_derive::{Decode, Encode}; -use superstruct::superstruct; - -// Define a "legacy" implementation of `Option` which uses four bytes for encoding the union -// selector. -four_byte_option_impl!(four_byte_option_usize, usize); -four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); - -#[superstruct( - variants(V1, V6, V7, V10), - variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode)), - no_enum -)] -pub struct ProtoNode { - pub slot: Slot, - pub state_root: Hash256, - pub target_root: Hash256, - pub current_epoch_shuffling_id: AttestationShufflingId, - pub next_epoch_shuffling_id: AttestationShufflingId, - pub root: Hash256, - #[ssz(with = "four_byte_option_usize")] - pub parent: Option, - #[superstruct(only(V1, V6))] - pub justified_epoch: Epoch, - #[superstruct(only(V1, V6))] - pub finalized_epoch: Epoch, - #[ssz(with = "four_byte_option_checkpoint")] - #[superstruct(only(V7, V10))] - pub justified_checkpoint: Option, - #[ssz(with = "four_byte_option_checkpoint")] - #[superstruct(only(V7, V10))] - pub finalized_checkpoint: Option, - pub weight: u64, - #[ssz(with = "four_byte_option_usize")] - pub best_child: Option, - #[ssz(with = "four_byte_option_usize")] - pub best_descendant: Option, - #[superstruct(only(V6, V7, V10))] - pub execution_status: ExecutionStatus, - #[ssz(with = "four_byte_option_checkpoint")] - #[superstruct(only(V10))] - pub unrealized_justified_checkpoint: Option, - #[ssz(with = "four_byte_option_checkpoint")] - #[superstruct(only(V10))] - pub unrealized_finalized_checkpoint: Option, -} - -impl Into for ProtoNodeV1 { - fn into(self) -> ProtoNodeV6 { - ProtoNodeV6 { - slot: self.slot, - state_root: self.state_root, - target_root: self.target_root, - current_epoch_shuffling_id: self.current_epoch_shuffling_id, - next_epoch_shuffling_id: self.next_epoch_shuffling_id, - root: self.root, - parent: self.parent, - justified_epoch: self.justified_epoch, - finalized_epoch: self.finalized_epoch, - weight: self.weight, - best_child: self.best_child, - best_descendant: self.best_descendant, - // We set the following execution value as if the block is a pre-merge-fork block. This - // is safe as long as we never import a merge block with the old version of proto-array. - // This will be safe since we can't actually process merge blocks until we've made this - // change to fork choice. - execution_status: ExecutionStatus::irrelevant(), - } - } -} - -impl Into for ProtoNodeV6 { - fn into(self) -> ProtoNodeV7 { - ProtoNodeV7 { - slot: self.slot, - state_root: self.state_root, - target_root: self.target_root, - current_epoch_shuffling_id: self.current_epoch_shuffling_id, - next_epoch_shuffling_id: self.next_epoch_shuffling_id, - root: self.root, - parent: self.parent, - justified_checkpoint: None, - finalized_checkpoint: None, - weight: self.weight, - best_child: self.best_child, - best_descendant: self.best_descendant, - execution_status: self.execution_status, - } - } -} - -impl Into for ProtoNodeV7 { - fn into(self) -> ProtoNodeV10 { - ProtoNodeV10 { - slot: self.slot, - state_root: self.state_root, - target_root: self.target_root, - current_epoch_shuffling_id: self.current_epoch_shuffling_id, - next_epoch_shuffling_id: self.next_epoch_shuffling_id, - root: self.root, - parent: self.parent, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - weight: self.weight, - best_child: self.best_child, - best_descendant: self.best_descendant, - execution_status: self.execution_status, - unrealized_justified_checkpoint: None, - unrealized_finalized_checkpoint: None, - } - } -} - -impl Into for ProtoNodeV10 { - fn into(self) -> ProtoNodeV7 { - ProtoNodeV7 { - slot: self.slot, - state_root: self.state_root, - target_root: self.target_root, - current_epoch_shuffling_id: self.current_epoch_shuffling_id, - next_epoch_shuffling_id: self.next_epoch_shuffling_id, - root: self.root, - parent: self.parent, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - weight: self.weight, - best_child: self.best_child, - best_descendant: self.best_descendant, - execution_status: self.execution_status, - } - } -} - -impl Into for ProtoNodeV10 { - fn into(self) -> ProtoNode { - ProtoNode { - slot: self.slot, - state_root: self.state_root, - target_root: self.target_root, - current_epoch_shuffling_id: self.current_epoch_shuffling_id, - next_epoch_shuffling_id: self.next_epoch_shuffling_id, - root: self.root, - parent: self.parent, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - weight: self.weight, - best_child: self.best_child, - best_descendant: self.best_descendant, - execution_status: self.execution_status, - unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, - unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, - } - } -} - -impl From for ProtoNodeV7 { - fn from(container: ProtoNode) -> Self { - Self { - slot: container.slot, - state_root: container.state_root, - target_root: container.target_root, - current_epoch_shuffling_id: container.current_epoch_shuffling_id, - next_epoch_shuffling_id: container.next_epoch_shuffling_id, - root: container.root, - parent: container.parent, - justified_checkpoint: container.justified_checkpoint, - finalized_checkpoint: container.finalized_checkpoint, - weight: container.weight, - best_child: container.best_child, - best_descendant: container.best_descendant, - execution_status: container.execution_status, - } - } -} - -#[superstruct( - variants(V1, V6, V7, V10), - variant_attributes(derive(Encode, Decode)), - no_enum -)] -#[derive(Encode, Decode)] -pub struct SszContainer { - pub votes: Vec, - pub balances: Vec, - pub prune_threshold: usize, - #[superstruct(only(V1, V6))] - pub justified_epoch: Epoch, - #[superstruct(only(V1, V6))] - pub finalized_epoch: Epoch, - #[superstruct(only(V7, V10))] - pub justified_checkpoint: Checkpoint, - #[superstruct(only(V7, V10))] - pub finalized_checkpoint: Checkpoint, - #[superstruct(only(V1))] - pub nodes: Vec, - #[superstruct(only(V6))] - pub nodes: Vec, - #[superstruct(only(V7))] - pub nodes: Vec, - #[superstruct(only(V10))] - pub nodes: Vec, - pub indices: Vec<(Hash256, usize)>, - #[superstruct(only(V7, V10))] - pub previous_proposer_boost: ProposerBoost, -} - -impl Into for SszContainerV1 { - fn into(self) -> SszContainerV6 { - let nodes = self.nodes.into_iter().map(Into::into).collect(); - - SszContainerV6 { - votes: self.votes, - balances: self.balances, - prune_threshold: self.prune_threshold, - justified_epoch: self.justified_epoch, - finalized_epoch: self.finalized_epoch, - nodes, - indices: self.indices, - } - } -} - -impl SszContainerV6 { - pub(crate) fn into_ssz_container_v7( - self, - justified_checkpoint: Checkpoint, - finalized_checkpoint: Checkpoint, - ) -> SszContainerV7 { - let nodes = self.nodes.into_iter().map(Into::into).collect(); - - SszContainerV7 { - votes: self.votes, - balances: self.balances, - prune_threshold: self.prune_threshold, - justified_checkpoint, - finalized_checkpoint, - nodes, - indices: self.indices, - previous_proposer_boost: ProposerBoost::default(), - } - } -} - -impl Into for SszContainerV7 { - fn into(self) -> SszContainerV10 { - let nodes = self.nodes.into_iter().map(Into::into).collect(); - - SszContainerV10 { - votes: self.votes, - balances: self.balances, - prune_threshold: self.prune_threshold, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - nodes, - indices: self.indices, - previous_proposer_boost: self.previous_proposer_boost, - } - } -} - -impl Into for SszContainerV10 { - fn into(self) -> SszContainerV7 { - let nodes = self.nodes.into_iter().map(Into::into).collect(); - - SszContainerV7 { - votes: self.votes, - balances: self.balances, - prune_threshold: self.prune_threshold, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - nodes, - indices: self.indices, - previous_proposer_boost: self.previous_proposer_boost, - } - } -} - -impl Into for SszContainerV10 { - fn into(self) -> SszContainer { - let nodes = self.nodes.into_iter().map(Into::into).collect(); - - SszContainer { - votes: self.votes, - balances: self.balances, - prune_threshold: self.prune_threshold, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - nodes, - indices: self.indices, - previous_proposer_boost: self.previous_proposer_boost, - } - } -} - -impl From for SszContainerV7 { - fn from(container: SszContainer) -> Self { - let nodes = container.nodes.into_iter().map(Into::into).collect(); - - Self { - votes: container.votes, - balances: container.balances, - prune_threshold: container.prune_threshold, - justified_checkpoint: container.justified_checkpoint, - finalized_checkpoint: container.finalized_checkpoint, - nodes, - indices: container.indices, - previous_proposer_boost: container.previous_proposer_boost, - } - } -} diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index 33447bc2efd..40b73451cb0 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -298,27 +298,6 @@ impl SnapshotCache { }) } - /// Borrow the state corresponding to `block_root` if it exists in the cache *unadvanced*. - /// - /// Care must be taken not to mutate the state in an invalid way. This function should only - /// be used to mutate the *caches* of the state, for example the tree hash cache when - /// calculating a light client merkle proof. - pub fn borrow_unadvanced_state_mut( - &mut self, - block_root: Hash256, - ) -> Option<&mut BeaconState> { - self.snapshots - .iter_mut() - .find(|snapshot| { - // If the pre-state exists then state advance has already taken the state for - // `block_root` and mutated its tree hash cache. Rather than re-building it while - // holding the snapshot cache lock (>1 second), prefer to return `None` from this - // function and force the caller to load it from disk. - snapshot.beacon_block_root == block_root && snapshot.pre_state.is_none() - }) - .map(|snapshot| &mut snapshot.beacon_state) - } - /// If there is a snapshot with `block_root`, clone it and return the clone. pub fn get_cloned( &self, diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 7746bba213f..b75e583fc76 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -17,6 +17,7 @@ mod proposer_duties; mod publish_blocks; mod state_id; mod sync_committees; +mod ui; mod validator_inclusion; mod version; @@ -2941,6 +2942,18 @@ pub fn serve( }, ); + // GET lighthouse/ui/validator_count + let get_lighthouse_ui_validator_count = warp::path("lighthouse") + .and(warp::path("ui")) + .and(warp::path("validator_count")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + ui::get_validator_count(chain).map(api_types::GenericResponse::from) + }) + }); + // GET lighthouse/syncing let get_lighthouse_syncing = warp::path("lighthouse") .and(warp::path("syncing")) @@ -3409,6 +3422,7 @@ pub fn serve( .or(get_lighthouse_attestation_performance.boxed()) .or(get_lighthouse_block_packing_efficiency.boxed()) .or(get_lighthouse_merge_readiness.boxed()) + .or(get_lighthouse_ui_validator_count.boxed()) .or(get_events.boxed()), ) .boxed() diff --git a/beacon_node/http_api/src/ui.rs b/beacon_node/http_api/src/ui.rs new file mode 100644 index 00000000000..8f9400dbbd0 --- /dev/null +++ b/beacon_node/http_api/src/ui.rs @@ -0,0 +1,71 @@ +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::types::ValidatorStatus; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; +use warp_utils::reject::beacon_chain_error; + +#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] +pub struct ValidatorCountResponse { + pub active_ongoing: u64, + pub active_exiting: u64, + pub active_slashed: u64, + pub pending_initialized: u64, + pub pending_queued: u64, + pub withdrawal_possible: u64, + pub withdrawal_done: u64, + pub exited_unslashed: u64, + pub exited_slashed: u64, +} + +pub fn get_validator_count( + chain: Arc>, +) -> Result { + let spec = &chain.spec; + let mut active_ongoing = 0; + let mut active_exiting = 0; + let mut active_slashed = 0; + let mut pending_initialized = 0; + let mut pending_queued = 0; + let mut withdrawal_possible = 0; + let mut withdrawal_done = 0; + let mut exited_unslashed = 0; + let mut exited_slashed = 0; + + chain + .with_head(|head| { + let state = &head.beacon_state; + let epoch = state.current_epoch(); + for validator in state.validators() { + let status = + ValidatorStatus::from_validator(validator, epoch, spec.far_future_epoch); + + match status { + ValidatorStatus::ActiveOngoing => active_ongoing += 1, + ValidatorStatus::ActiveExiting => active_exiting += 1, + ValidatorStatus::ActiveSlashed => active_slashed += 1, + ValidatorStatus::PendingInitialized => pending_initialized += 1, + ValidatorStatus::PendingQueued => pending_queued += 1, + ValidatorStatus::WithdrawalPossible => withdrawal_possible += 1, + ValidatorStatus::WithdrawalDone => withdrawal_done += 1, + ValidatorStatus::ExitedUnslashed => exited_unslashed += 1, + ValidatorStatus::ExitedSlashed => exited_slashed += 1, + // Since we are not invoking `superset`, all other variants will be 0. + _ => (), + } + } + Ok::<(), BeaconChainError>(()) + }) + .map_err(beacon_chain_error)?; + + Ok(ValidatorCountResponse { + active_ongoing, + active_exiting, + active_slashed, + pending_initialized, + pending_queued, + withdrawal_possible, + withdrawal_done, + exited_unslashed, + exited_slashed, + }) +} diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 0f0d1460fa6..4600e5e8269 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -391,16 +391,6 @@ impl, Cold: ItemStore> HotColdDB } } - /// Get a schema V8 or earlier full block by reading it and its payload from disk. - pub fn get_full_block_prior_to_v9( - &self, - block_root: &Hash256, - ) -> Result>, Error> { - self.get_block_with(block_root, |bytes| { - SignedBeaconBlock::from_ssz_bytes(bytes, &self.spec) - }) - } - /// Convert a blinded block into a full block by loading its execution payload if necessary. pub fn make_full_block( &self, diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index c1ba6a2dcc6..763372692ee 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -99,6 +99,28 @@ curl -X GET "http://localhost:5052/lighthouse/ui/health" -H "accept: applicatio } ``` +### `/lighthouse/ui/validator_count` + +```bash +curl -X GET "http://localhost:5052/lighthouse/ui/validator_count" -H "accept: application/json" | jq +``` + +```json +{ + "data": { + "active_ongoing":479508, + "active_exiting":0, + "active_slashed":0, + "pending_initialized":28, + "pending_queued":0, + "withdrawal_possible":933, + "withdrawal_done":0, + "exited_unslashed":0, + "exited_slashed":3 + } +} +``` + ### `/lighthouse/syncing` ```bash diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index 2b0ac836a4e..0982e10ab90 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -19,13 +19,13 @@ validator client or the slasher**. | v2.0.0 | Oct 2021 | v5 | no | | v2.1.0 | Jan 2022 | v8 | no | | v2.2.0 | Apr 2022 | v8 | no | -| v2.3.0 | May 2022 | v9 | yes (pre Bellatrix) | -| v2.4.0 | Jul 2022 | v9 | yes (pre Bellatrix) | +| v2.3.0 | May 2022 | v9 | yes from <= v3.3.0 | +| v2.4.0 | Jul 2022 | v9 | yes from <= v3.3.0 | | v2.5.0 | Aug 2022 | v11 | yes | | v3.0.0 | Aug 2022 | v11 | yes | | v3.1.0 | Sep 2022 | v12 | yes | | v3.2.0 | Oct 2022 | v12 | yes | -| v3.3.0 | TBD | v13 | yes | +| v3.3.0 | Nov 2022 | v13 | yes | > **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release > (e.g. v2.3.0). From b6486e809d4e2e7a860e0a12c19b1a8edca675c0 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Sun, 4 Dec 2022 16:08:55 -0600 Subject: [PATCH 083/263] Fixed moar tests (#3774) --- beacon_node/http_api/tests/tests.rs | 115 ++++++++---------- consensus/types/src/execution_payload.rs | 3 +- .../types/src/execution_payload_header.rs | 2 +- consensus/types/src/payload.rs | 18 ++- 4 files changed, 63 insertions(+), 75 deletions(-) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 2e795e522d5..8644dcbf1ad 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -2122,7 +2122,7 @@ impl ApiTester { self } - pub async fn test_blinded_block_production>(&self) { + pub async fn test_blinded_block_production>(&self) { let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; @@ -2182,7 +2182,7 @@ impl ApiTester { } } - pub async fn test_blinded_block_production_no_verify_randao>( + pub async fn test_blinded_block_production_no_verify_randao>( self, ) -> Self { for _ in 0..E::slots_per_epoch() { @@ -2206,7 +2206,9 @@ impl ApiTester { self } - pub async fn test_blinded_block_production_verify_randao_invalid>( + pub async fn test_blinded_block_production_verify_randao_invalid< + Payload: AbstractExecPayload, + >( self, ) -> Self { let fork = self.chain.canonical_head.cached_head().head_fork(); @@ -2664,7 +2666,7 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2673,14 +2675,11 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); - assert_eq!( - payload.execution_payload_header.fee_recipient, - expected_fee_recipient - ); - assert_eq!(payload.execution_payload_header.gas_limit, 11_111_111); + assert_eq!(payload.fee_recipient(), expected_fee_recipient); + assert_eq!(payload.gas_limit(), 11_111_111); // If this cache is empty, it indicates fallback was not used, so the payload came from the // mock builder. @@ -2707,7 +2706,7 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2716,14 +2715,11 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); - assert_eq!( - payload.execution_payload_header.fee_recipient, - expected_fee_recipient - ); - assert_eq!(payload.execution_payload_header.gas_limit, 30_000_000); + assert_eq!(payload.fee_recipient(), expected_fee_recipient); + assert_eq!(payload.gas_limit(), 30_000_000); // This cache should not be populated because fallback should not have been used. assert!(self @@ -2753,7 +2749,7 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2762,12 +2758,9 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); - assert_eq!( - payload.execution_payload_header.fee_recipient, - test_fee_recipient - ); + assert_eq!(payload.fee_recipient(), test_fee_recipient); // This cache should not be populated because fallback should not have been used. assert!(self @@ -2801,11 +2794,11 @@ impl ApiTester { .beacon_state .latest_execution_payload_header() .unwrap() - .block_hash; + .block_hash(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2814,12 +2807,9 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); - assert_eq!( - payload.execution_payload_header.parent_hash, - expected_parent_hash - ); + assert_eq!(payload.parent_hash(), expected_parent_hash); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -2856,7 +2846,7 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2865,12 +2855,9 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); - assert_eq!( - payload.execution_payload_header.prev_randao, - expected_prev_randao - ); + assert_eq!(payload.prev_randao(), expected_prev_randao); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -2901,12 +2888,12 @@ impl ApiTester { .beacon_state .latest_execution_payload_header() .unwrap() - .block_number + .block_number() + 1; let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2915,12 +2902,9 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); - assert_eq!( - payload.execution_payload_header.block_number, - expected_block_number - ); + assert_eq!(payload.block_number(), expected_block_number); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -2951,11 +2935,11 @@ impl ApiTester { .beacon_state .latest_execution_payload_header() .unwrap() - .timestamp; + .timestamp(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2964,9 +2948,9 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); - assert!(payload.execution_payload_header.timestamp > min_expected_timestamp); + assert!(payload.timestamp() > min_expected_timestamp); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -2991,7 +2975,7 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -3000,7 +2984,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -3028,7 +3012,7 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -3037,7 +3021,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -3071,7 +3055,7 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) .await @@ -3080,7 +3064,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // This cache should not be populated because fallback should not have been used. assert!(self @@ -3100,7 +3084,7 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) .await @@ -3109,7 +3093,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -3149,7 +3133,7 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) .await @@ -3158,7 +3142,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -3188,7 +3172,7 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) .await @@ -3197,7 +3181,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // This cache should not be populated because fallback should not have been used. assert!(self @@ -3231,7 +3215,7 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -3240,13 +3224,10 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); - assert_eq!( - payload.execution_payload_header.fee_recipient, - expected_fee_recipient - ); + assert_eq!(payload.fee_recipient(), expected_fee_recipient); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -3275,7 +3256,7 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -3284,7 +3265,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index fa6348bdce3..18005094e4b 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -38,8 +38,7 @@ pub type Withdrawals = VariableList::MaxWithdrawal )] #[derive(Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative)] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] -#[serde(untagged)] -#[serde(bound = "T: EthSpec")] +#[serde(bound = "T: EthSpec", untagged)] #[ssz(enum_behaviour = "transparent")] #[tree_hash(enum_behaviour = "transparent")] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index a9708153ca3..a98a68e3e55 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -33,7 +33,7 @@ use BeaconStateError; )] #[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] -#[serde(bound = "T: EthSpec")] +#[serde(bound = "T: EthSpec", untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 33fa2273725..dba94cfd7c1 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -166,11 +166,10 @@ impl<'a, T: EthSpec> From> for ExecutionPayload { // FIXME: can this be implemented as Deref or Clone somehow? impl<'a, T: EthSpec> From> for FullPayload { fn from(full_payload_ref: FullPayloadRef<'a, T>) -> Self { - match full_payload_ref { - FullPayloadRef::Merge(payload_ref) => FullPayload::Merge(payload_ref.clone()), - FullPayloadRef::Capella(payload_ref) => FullPayload::Capella(payload_ref.clone()), - FullPayloadRef::Eip4844(payload_ref) => FullPayload::Eip4844(payload_ref.clone()), - } + map_full_payload_ref!(&'a _, full_payload_ref, move |payload, cons| { + cons(payload); + payload.clone().into() + }) } } @@ -451,6 +450,15 @@ pub struct BlindedPayload { pub execution_payload_header: ExecutionPayloadHeaderEip4844, } +impl<'a, T: EthSpec> From> for BlindedPayload { + fn from(blinded_payload_ref: BlindedPayloadRef<'a, T>) -> Self { + map_blinded_payload_ref!(&'a _, blinded_payload_ref, move |payload, cons| { + cons(payload); + payload.clone().into() + }) + } +} + impl ExecPayload for BlindedPayload { fn block_type() -> BlockType { BlockType::Blinded From f7a54afde5cf91f8fb4ac67a1dd2689b3e481d8a Mon Sep 17 00:00:00 2001 From: Justin Traglia <95511699+jtraglia@users.noreply.github.com> Date: Mon, 12 Dec 2022 00:40:44 +0000 Subject: [PATCH 084/263] Fix some capella nits (#3782) --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- .../state_processing/src/per_block_processing/errors.rs | 8 ++++---- .../src/per_block_processing/process_operations.rs | 2 +- consensus/state_processing/src/upgrade/eip4844.rs | 2 +- consensus/types/src/bls_to_execution_change.rs | 1 - consensus/types/src/signed_bls_to_execution_change.rs | 1 - consensus/types/src/validator.rs | 8 ++++---- consensus/types/src/withdrawal.rs | 3 --- 8 files changed, 11 insertions(+), 16 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 1968a38a5b2..7841f53e74b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2184,7 +2184,7 @@ impl BeaconChain { } } - /// Verify a signed BLS to exection change before allowing it to propagate on the gossip network. + /// Verify a signed BLS to execution change before allowing it to propagate on the gossip network. pub fn verify_bls_to_execution_change_for_gossip( &self, bls_to_execution_change: SignedBlsToExecutionChange, diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index 7b355b0ddc6..5c34afd593e 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -305,7 +305,7 @@ pub enum AttesterSlashingInvalid { /// Describes why an object is invalid. #[derive(Debug, PartialEq, Clone)] pub enum AttestationInvalid { - /// Commmittee index exceeds number of committees in that slot. + /// Committee index exceeds number of committees in that slot. BadCommitteeIndex, /// Attestation included before the inclusion delay. IncludedTooEarly { @@ -420,11 +420,11 @@ pub enum ExitInvalid { pub enum BlsExecutionChangeInvalid { /// The specified validator is not in the state's validator registry. ValidatorUnknown(u64), - /// Validator does not have BLS Withdrawal credentials before this change + /// Validator does not have BLS Withdrawal credentials before this change. NonBlsWithdrawalCredentials, - /// Provided BLS pubkey does not match withdrawal credentials + /// Provided BLS pubkey does not match withdrawal credentials. WithdrawalCredentialsMismatch, - /// The signature is invalid + /// The signature is invalid. BadSignature, } diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index d0e855b7ade..105faba83bd 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -293,7 +293,7 @@ pub fn process_exits( /// Validates each `bls_to_execution_change` and updates the state /// -/// Returns `Ok(())` if the validation and state updates completed successfully. Otherwise returs +/// Returns `Ok(())` if the validation and state updates completed successfully. Otherwise returns /// an `Err` describing the invalid object or cause of failure. #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] pub fn process_bls_to_execution_changes( diff --git a/consensus/state_processing/src/upgrade/eip4844.rs b/consensus/state_processing/src/upgrade/eip4844.rs index 478024f17e2..6d66fd8412c 100644 --- a/consensus/state_processing/src/upgrade/eip4844.rs +++ b/consensus/state_processing/src/upgrade/eip4844.rs @@ -67,7 +67,7 @@ pub fn upgrade_to_eip4844( #[cfg(feature = "withdrawals")] next_withdrawal_index: pre.next_withdrawal_index, #[cfg(feature = "withdrawals")] - next_withdrawal_validator_index: 0, + next_withdrawal_validator_index: pre.next_withdrawal_validator_index, // Caches total_active_balance: pre.total_active_balance, committee_caches: mem::take(&mut pre.committee_caches), diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs index fa15a0132b4..497e9aa1405 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/bls_to_execution_change.rs @@ -6,7 +6,6 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -/// A deposit to potentially become a beacon chain validator. #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/signed_bls_to_execution_change.rs b/consensus/types/src/signed_bls_to_execution_change.rs index d7cce693b86..8c8131c1e0e 100644 --- a/consensus/types/src/signed_bls_to_execution_change.rs +++ b/consensus/types/src/signed_bls_to_execution_change.rs @@ -6,7 +6,6 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -/// A deposit to potentially become a beacon chain validator. #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index e4497c809e0..ebe3ca046cf 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -67,7 +67,7 @@ impl Validator { && self.activation_epoch == spec.far_future_epoch } - /// Returns `true` if the validator has eth1 withdrawal credential + /// Returns `true` if the validator has eth1 withdrawal credential. pub fn has_eth1_withdrawal_credential(&self, spec: &ChainSpec) -> bool { self.withdrawal_credentials .as_bytes() @@ -88,7 +88,7 @@ impl Validator { .flatten() } - /// Changes withdrawal credentials to the provided eth1 execution address + /// Changes withdrawal credentials to the provided eth1 execution address. /// /// WARNING: this function does NO VALIDATION - it just does it! pub fn change_withdrawal_credentials(&mut self, execution_address: &Address, spec: &ChainSpec) { @@ -98,12 +98,12 @@ impl Validator { self.withdrawal_credentials = Hash256::from(bytes); } - /// Returns `true` if the validator is fully withdrawable at some epoch + /// Returns `true` if the validator is fully withdrawable at some epoch. pub fn is_fully_withdrawable_at(&self, balance: u64, epoch: Epoch, spec: &ChainSpec) -> bool { self.has_eth1_withdrawal_credential(spec) && self.withdrawable_epoch <= epoch && balance > 0 } - /// Returns `true` if the validator is partially withdrawable + /// Returns `true` if the validator is partially withdrawable. pub fn is_partially_withdrawable_validator(&self, balance: u64, spec: &ChainSpec) -> bool { self.has_eth1_withdrawal_credential(spec) && self.effective_balance == spec.max_effective_balance diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal.rs index c2529747c4d..10530dcb507 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal.rs @@ -5,9 +5,6 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -/// A deposit to potentially become a beacon chain validator. -/// -/// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, From 173a0abab4cde6f30b6263372884eda77c986423 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 13 Dec 2022 17:03:21 +1100 Subject: [PATCH 085/263] Fix `Withdrawal` serialisation and check address change fork (#3789) * Disallow address changes before Capella * Quote u64s in Withdrawal serialisation --- beacon_node/beacon_chain/src/beacon_chain.rs | 7 +++++++ beacon_node/beacon_chain/src/errors.rs | 1 + consensus/types/src/withdrawal.rs | 2 ++ 3 files changed, 10 insertions(+) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 7841f53e74b..0bbbe92356c 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2191,7 +2191,14 @@ impl BeaconChain { ) -> Result, Error> { #[cfg(feature = "withdrawals-processing")] { + let current_fork = self.spec.fork_name_at_slot::(self.slot()?); + if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork { + // Disallow BLS to execution changes prior to the Capella fork. + return Err(Error::BlsToExecutionChangeBadFork(current_fork)); + } + let wall_clock_state = self.wall_clock_state()?; + Ok(self .observed_bls_to_execution_changes .lock() diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 60282426a5a..3a2e4a0bc53 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -206,6 +206,7 @@ pub enum BeaconChainError { MissingPersistedForkChoice, CommitteePromiseFailed(oneshot_broadcast::Error), MaxCommitteePromises(usize), + BlsToExecutionChangeBadFork(ForkName), } easy_from_to!(SlotProcessingError, BeaconChainError); diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal.rs index 10530dcb507..6f14cf1c52e 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal.rs @@ -12,8 +12,10 @@ use tree_hash_derive::TreeHash; pub struct Withdrawal { #[serde(with = "eth2_serde_utils::quoted_u64")] pub index: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] pub validator_index: u64, pub address: Address, + #[serde(with = "eth2_serde_utils::quoted_u64")] pub amount: u64, } From b1c33361ea1559fd32bd23f1c3e5940b320a3ae9 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Tue, 13 Dec 2022 10:50:24 -0600 Subject: [PATCH 086/263] Fixed Clippy Complaints & Some Failing Tests (#3791) * Fixed Clippy Complaints & Some Failing Tests * Update Dockerfile to Rust-1.65 * EF test file renamed * Touch up comments based on feedback --- Dockerfile | 2 +- beacon_node/beacon_chain/src/beacon_chain.rs | 10 ++-- .../src/engine_api/json_structures.rs | 6 +-- beacon_node/execution_layer/src/engines.rs | 2 +- beacon_node/execution_layer/src/lib.rs | 2 +- .../src/test_utils/handle_rpc.rs | 27 ++++++++-- beacon_node/http_api/src/publish_blocks.rs | 2 +- .../lighthouse_network/src/types/pubsub.rs | 6 +-- beacon_node/operation_pool/src/lib.rs | 4 ++ consensus/types/src/payload.rs | 49 +++++++++++-------- lcli/src/new_testnet.rs | 4 +- .../src/cases/merkle_proof_validity.rs | 2 +- testing/ef_tests/src/cases/operations.rs | 6 +-- testing/ef_tests/src/lib.rs | 4 +- testing/ef_tests/tests/tests.rs | 2 + 15 files changed, 79 insertions(+), 49 deletions(-) diff --git a/Dockerfile b/Dockerfile index 72423b17c68..7a0602a2213 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.62.1-bullseye AS builder +FROM rust:1.65.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG FEATURES diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 0bbbe92356c..fcd097d4d3f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2205,8 +2205,10 @@ impl BeaconChain { .verify_and_observe(bls_to_execution_change, &wall_clock_state, &self.spec)?) } + // TODO: remove this whole block once withdrawals-processing is removed #[cfg(not(feature = "withdrawals-processing"))] { + #[allow(clippy::drop_non_drop)] drop(bls_to_execution_change); Ok(ObservationOutcome::AlreadyKnown) } @@ -4342,17 +4344,17 @@ impl BeaconChain { // Might implement caching here in the future.. let prepare_state = self .state_at_slot(prepare_slot, StateSkipConfig::WithoutStateRoots) - .or_else(|e| { + .map_err(|e| { error!(self.log, "State advance for withdrawals failed"; "error" => ?e); - Err(e) + e })?; Some(get_expected_withdrawals(&prepare_state, &self.spec)) } } .transpose() - .or_else(|e| { + .map_err(|e| { error!(self.log, "Error preparing beacon proposer"; "error" => ?e); - Err(e) + e }) .map(|withdrawals_opt| withdrawals_opt.map(|w| w.into())) .map_err(Error::PrepareProposerFailed)?; diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 1b125cde44d..ea2bb4941d1 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -176,7 +176,7 @@ impl JsonExecutionPayload { .collect::>() .into() }) - .ok_or(Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadCapella".to_string()))? + .ok_or_else(|| Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadCapella".to_string()))? })), ForkName::Eip4844 => Ok(ExecutionPayload::Eip4844(ExecutionPayloadEip4844 { parent_hash: v2.parent_hash, @@ -191,7 +191,7 @@ impl JsonExecutionPayload { timestamp: v2.timestamp, extra_data: v2.extra_data, base_fee_per_gas: v2.base_fee_per_gas, - excess_data_gas: v2.excess_data_gas.ok_or(Error::BadConversion("Null `excess_data_gas` field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))?, + excess_data_gas: v2.excess_data_gas.ok_or_else(|| Error::BadConversion("Null `excess_data_gas` field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))?, block_hash: v2.block_hash, transactions: v2.transactions, #[cfg(feature = "withdrawals")] @@ -204,7 +204,7 @@ impl JsonExecutionPayload { .collect::>() .into() }) - .ok_or(Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))? + .ok_or_else(|| Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))? })), _ => Err(Error::UnsupportedForkVariant(format!("Unsupported conversion from JsonExecutionPayloadV2 for {}", fork_name))), } diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 16562267ca4..271cca26cba 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -342,7 +342,7 @@ impl Engine { impl PayloadIdCacheKey { fn new(head_block_hash: &ExecutionBlockHash, attributes: &PayloadAttributes) -> Self { Self { - head_block_hash: head_block_hash.clone(), + head_block_hash: *head_block_hash, payload_attributes: attributes.clone(), } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index b6e85f67dcd..a97bbc4faf0 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1582,7 +1582,7 @@ impl ExecutionLayer { let transactions = VariableList::new( block .transactions() - .into_iter() + .iter() .map(|transaction| VariableList::new(transaction.rlp().to_vec())) .collect::>() .map_err(ApiError::DeserializeTransaction)?, diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index fe765cc9495..c83aeccdc5f 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -74,11 +74,29 @@ pub async fn handle_rpc( .unwrap()) } } - ENGINE_NEW_PAYLOAD_V1 => { - let request: JsonExecutionPayload = get_param(params, 0)?; + ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 => { + let request = match method { + ENGINE_NEW_PAYLOAD_V1 => { + JsonExecutionPayload::V1(get_param::>(params, 0)?) + } + ENGINE_NEW_PAYLOAD_V2 => { + JsonExecutionPayload::V2(get_param::>(params, 0)?) + } + _ => unreachable!(), + }; + let fork = match request { + JsonExecutionPayload::V1(_) => ForkName::Merge, + JsonExecutionPayload::V2(ref payload) => { + if payload.withdrawals.is_none() { + ForkName::Merge + } else { + ForkName::Capella + } + } + }; // Canned responses set by block hash take priority. - if let Some(status) = ctx.get_new_payload_status(&request.block_hash()) { + if let Some(status) = ctx.get_new_payload_status(request.block_hash()) { return Ok(serde_json::to_value(JsonPayloadStatusV1::from(status)).unwrap()); } @@ -97,8 +115,7 @@ pub async fn handle_rpc( Some( ctx.execution_block_generator .write() - // FIXME: should this worry about other forks? - .new_payload(request.try_into_execution_payload(ForkName::Merge).unwrap()), + .new_payload(request.try_into_execution_payload(fork).unwrap()), ) } else { None diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index fb296168db0..c471da7d584 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -41,7 +41,7 @@ pub async fn publish_block( )) } else { //TODO(pawan): return an empty sidecar instead - return Err(warp_utils::reject::broadcast_without_import(format!(""))); + return Err(warp_utils::reject::broadcast_without_import(String::new())); } } _ => PubsubMessage::BeaconBlock(block.clone()), diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 02f2bfff1df..9cce98db946 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -224,12 +224,10 @@ impl PubsubMessage { | ForkName::Merge | ForkName::Capella, ) - | None => { - return Err(format!( + | None => Err(format!( "beacon_blobs_and_sidecar topic invalid for given fork digest {:?}", gossip_topic.fork_digest - )) - } + )), } } GossipKind::VoluntaryExit => { diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 159454b9e98..37fa6893873 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -558,8 +558,10 @@ impl OperationPool { ) } + // TODO: remove this whole block once withdrwals-processing is removed #[cfg(not(feature = "withdrawals-processing"))] { + #[allow(clippy::drop_copy)] drop((state, spec)); vec![] } @@ -597,8 +599,10 @@ impl OperationPool { ); } + // TODO: remove this whole block once withdrwals-processing is removed #[cfg(not(feature = "withdrawals-processing"))] { + #[allow(clippy::drop_copy)] drop((head_block, head_state, spec)); } } diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index dba94cfd7c1..2d9e37b81ab 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -261,7 +261,7 @@ impl ExecPayload for FullPayload { }) } - fn is_default_with_empty_roots<'a>(&'a self) -> bool { + fn is_default_with_empty_roots(&self) -> bool { // For full payloads the empty/zero distinction does not exist. self.is_default_with_zero_roots() } @@ -536,7 +536,7 @@ impl ExecPayload for BlindedPayload { } } - fn is_default_with_zero_roots<'a>(&'a self) -> bool { + fn is_default_with_zero_roots(&self) -> bool { self.to_ref().is_default_with_zero_roots() } @@ -643,13 +643,13 @@ impl<'b, T: EthSpec> ExecPayload for BlindedPayloadRef<'b, T> { } macro_rules! impl_exec_payload_common { - ($wrapper_type:ident, - $wrapped_type:ident, - $wrapped_type_full:ident, - $wrapped_type_header:ident, - $wrapped_field:ident, - $fork_variant:ident, - $block_type_variant:ident, + ($wrapper_type:ident, // BlindedPayloadMerge | FullPayloadMerge + $wrapped_type:ident, // ExecutionPayloadHeaderMerge | ExecutionPayloadMerge + $wrapped_type_full:ident, // ExecutionPayloadMerge | ExecutionPayloadMerge + $wrapped_type_header:ident, // ExecutionPayloadHeaderMerge | ExecutionPayloadHeaderMerge + $wrapped_field:ident, // execution_payload_header | execution_payload + $fork_variant:ident, // Merge | Merge + $block_type_variant:ident, // Blinded | Full $f:block, $g:block) => { impl ExecPayload for $wrapper_type { @@ -696,7 +696,15 @@ macro_rules! impl_exec_payload_common { } fn is_default_with_empty_roots(&self) -> bool { - self.$wrapped_field == $wrapped_type::from($wrapped_type_full::default()) + // FIXME: is there a better way than ignoring this lint? + // This is necessary because the first invocation of this macro might expand to: + // self.execution_payload_header == ExecutionPayloadHeaderMerge::from(ExecutionPayloadMerge::default()) + // but the second invocation might expand to: + // self.execution_payload == ExecutionPayloadMerge::from(ExecutionPayloadMerge::default()) + #[allow(clippy::cmp_owned)] + { + self.$wrapped_field == $wrapped_type::from($wrapped_type_full::default()) + } } fn transactions(&self) -> Option<&Transactions> { @@ -720,16 +728,17 @@ macro_rules! impl_exec_payload_common { } macro_rules! impl_exec_payload_for_fork { + // BlindedPayloadMerge, FullPayloadMerge, ExecutionPayloadHeaderMerge, ExecutionPayloadMerge, Merge ($wrapper_type_header:ident, $wrapper_type_full:ident, $wrapped_type_header:ident, $wrapped_type_full:ident, $fork_variant:ident) => { //*************** Blinded payload implementations ******************// impl_exec_payload_common!( - $wrapper_type_header, - $wrapped_type_header, - $wrapped_type_full, - $wrapped_type_header, + $wrapper_type_header, // BlindedPayloadMerge + $wrapped_type_header, // ExecutionPayloadHeaderMerge + $wrapped_type_full, // ExecutionPayloadMerge + $wrapped_type_header, // ExecutionPayloadHeaderMerge execution_payload_header, - $fork_variant, + $fork_variant, // Merge Blinded, { |_| { None } }, { @@ -794,12 +803,12 @@ macro_rules! impl_exec_payload_for_fork { //*************** Full payload implementations ******************// impl_exec_payload_common!( - $wrapper_type_full, - $wrapped_type_full, - $wrapped_type_full, - $wrapped_type_header, + $wrapper_type_full, // FullPayloadMerge + $wrapped_type_full, // ExecutionPayloadMerge + $wrapped_type_full, // ExecutionPayloadMerge + $wrapped_type_header, // ExecutionPayloadHeaderMerge execution_payload, - $fork_variant, + $fork_variant, // Merge Full, { let c: for<'a> fn(&'a $wrapper_type_full) -> Option<&'a Transactions> = diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 69356045724..58a7c49b397 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -87,9 +87,9 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul execution_payload_header.as_ref() { let eth1_block_hash = - parse_optional(matches, "eth1-block-hash")?.unwrap_or(payload.block_hash()); + parse_optional(matches, "eth1-block-hash")?.unwrap_or_else(|| payload.block_hash()); let genesis_time = - parse_optional(matches, "genesis-time")?.unwrap_or(payload.timestamp()); + parse_optional(matches, "genesis-time")?.unwrap_or_else(|| payload.timestamp()); (eth1_block_hash, genesis_time) } else { let eth1_block_hash = parse_required(matches, "eth1-block-hash").map_err(|_| { diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index a57abc2e070..c180774bb64 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -29,7 +29,7 @@ pub struct MerkleProofValidity { impl LoadCase for MerkleProofValidity { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let spec = &testing_spec::(fork_name); - let state = ssz_decode_state(&path.join("state.ssz_snappy"), spec)?; + let state = ssz_decode_state(&path.join("object.ssz_snappy"), spec)?; let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?; // Metadata does not exist in these tests but it is left like this just in case. let meta_path = path.join("meta.yaml"); diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 431fd829f67..f5487a6940d 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -21,8 +21,6 @@ use state_processing::{ ConsensusContext, }; use std::fmt::Debug; -#[cfg(not(all(feature = "withdrawals", feature = "withdrawals-processing")))] -use std::marker::PhantomData; use std::path::Path; #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] use types::SignedBlsToExecutionChange; @@ -44,12 +42,10 @@ struct ExecutionMetadata { } /// Newtype for testing withdrawals. +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] #[derive(Debug, Clone, Deserialize)] pub struct WithdrawalsPayload { - #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] payload: FullPayload, - #[cfg(not(all(feature = "withdrawals", feature = "withdrawals-processing")))] - _phantom_data: PhantomData, } #[derive(Debug, Clone)] diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index d45b1e15c7a..fd3bf2bd1b5 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -1,9 +1,11 @@ pub use case_result::CaseResult; +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +pub use cases::WithdrawalsPayload; pub use cases::{ Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, InactivityUpdates, JustificationAndFinalization, ParticipationFlagUpdates, ParticipationRecordUpdates, RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset, - SyncCommitteeUpdates, WithdrawalsPayload, + SyncCommitteeUpdates, }; pub use decode::log_file_access; pub use error::Error; diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index f84be64dad9..0227b92ec86 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -82,12 +82,14 @@ fn operations_execution_payload_blinded() { OperationsHandler::>::default().run(); } +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] #[test] fn operations_withdrawals() { OperationsHandler::>::default().run(); OperationsHandler::>::default().run(); } +#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] #[test] fn operations_bls_to_execution_change() { OperationsHandler::::default().run(); From 07d6ef749a6adfcf5e7476449a59b13a40deced8 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Tue, 13 Dec 2022 18:49:30 -0600 Subject: [PATCH 087/263] Fixed Payload Reconstruction Bug (#3796) --- beacon_node/beacon_chain/src/beacon_chain.rs | 3 +- beacon_node/beacon_chain/src/errors.rs | 2 + .../execution_layer/src/engine_api/http.rs | 39 ++++++++++++++++--- beacon_node/execution_layer/src/lib.rs | 20 ++++++++-- .../src/test_rig.rs | 3 +- 5 files changed, 55 insertions(+), 12 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index fcd097d4d3f..e51cdacf6a5 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -939,6 +939,7 @@ impl BeaconChain { Some(DatabaseBlock::Blinded(block)) => block, None => return Ok(None), }; + let fork = blinded_block.fork_name(&self.spec)?; // If we only have a blinded block, load the execution payload from the EL. let block_message = blinded_block.message(); @@ -953,7 +954,7 @@ impl BeaconChain { .execution_layer .as_ref() .ok_or(Error::ExecutionLayerMissing)? - .get_payload_by_block_hash(exec_block_hash) + .get_payload_by_block_hash(exec_block_hash, fork) .await .map_err(|e| Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, e))? .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 3a2e4a0bc53..5f1f0595ca7 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -207,6 +207,7 @@ pub enum BeaconChainError { CommitteePromiseFailed(oneshot_broadcast::Error), MaxCommitteePromises(usize), BlsToExecutionChangeBadFork(ForkName), + InconsistentFork(InconsistentFork), } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -230,6 +231,7 @@ easy_from_to!(ForkChoiceStoreError, BeaconChainError); easy_from_to!(HistoricalBlockError, BeaconChainError); easy_from_to!(StateAdvanceError, BeaconChainError); easy_from_to!(BlockReplayError, BeaconChainError); +easy_from_to!(InconsistentFork, BeaconChainError); #[derive(Debug)] pub enum BlockProductionError { diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 8eef7aece3f..c71cfa0c04b 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -664,14 +664,41 @@ impl HttpJsonRpc { pub async fn get_block_by_hash_with_txns( &self, block_hash: ExecutionBlockHash, + fork: ForkName, ) -> Result>, Error> { let params = json!([block_hash, true]); - self.rpc_request( - ETH_GET_BLOCK_BY_HASH, - params, - ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, - ) - .await + Ok(Some(match fork { + ForkName::Merge => ExecutionBlockWithTransactions::Merge( + self.rpc_request( + ETH_GET_BLOCK_BY_HASH, + params, + ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?, + ), + ForkName::Capella => ExecutionBlockWithTransactions::Capella( + self.rpc_request( + ETH_GET_BLOCK_BY_HASH, + params, + ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?, + ), + ForkName::Eip4844 => ExecutionBlockWithTransactions::Eip4844( + self.rpc_request( + ETH_GET_BLOCK_BY_HASH, + params, + ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?, + ), + ForkName::Base | ForkName::Altair => { + return Err(Error::UnsupportedForkVariant(format!( + "called get_block_by_hash_with_txns with fork {:?}", + fork + ))) + } + })) } pub async fn new_payload_v1( diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index a97bbc4faf0..2aaa7608e30 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1550,10 +1550,11 @@ impl ExecutionLayer { pub async fn get_payload_by_block_hash( &self, hash: ExecutionBlockHash, + fork: ForkName, ) -> Result>, Error> { self.engine() .request(|engine| async move { - self.get_payload_by_block_hash_from_engine(engine, hash) + self.get_payload_by_block_hash_from_engine(engine, hash, fork) .await }) .await @@ -1565,15 +1566,26 @@ impl ExecutionLayer { &self, engine: &Engine, hash: ExecutionBlockHash, + fork: ForkName, ) -> Result>, ApiError> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BY_BLOCK_HASH); if hash == ExecutionBlockHash::zero() { - // FIXME: how to handle forks properly here? - return Ok(Some(ExecutionPayloadMerge::default().into())); + return match fork { + ForkName::Merge => Ok(Some(ExecutionPayloadMerge::default().into())), + ForkName::Capella => Ok(Some(ExecutionPayloadCapella::default().into())), + ForkName::Eip4844 => Ok(Some(ExecutionPayloadEip4844::default().into())), + ForkName::Base | ForkName::Altair => Err(ApiError::UnsupportedForkVariant( + format!("called get_payload_by_block_hash_from_engine with {}", fork), + )), + }; } - let block = if let Some(block) = engine.api.get_block_by_hash_with_txns::(hash).await? { + let block = if let Some(block) = engine + .api + .get_block_by_hash_with_txns::(hash, fork) + .await? + { block } else { return Ok(None); diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 944e2fef6fe..4dab00689c9 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -616,7 +616,8 @@ async fn check_payload_reconstruction( ) { let reconstructed = ee .execution_layer - .get_payload_by_block_hash(payload.block_hash()) + // FIXME: handle other forks here? + .get_payload_by_block_hash(payload.block_hash(), ForkName::Merge) .await .unwrap() .unwrap(); From 75dd8780e0a7d8e095a7d92dba8aab30308b40de Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 14 Dec 2022 11:52:46 +1100 Subject: [PATCH 088/263] Use JsonPayload for payload reconstruction (#3797) --- beacon_node/execution_layer/src/engine_api.rs | 44 ++++++++++++------- beacon_node/execution_layer/src/lib.rs | 20 +++++++-- .../test_utils/execution_block_generator.rs | 2 +- 3 files changed, 46 insertions(+), 20 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 988b04826eb..424ca30d137 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,10 +1,11 @@ use crate::engines::ForkchoiceState; pub use ethers_core::types::Transaction; -use ethers_core::utils::rlp::{Decodable, Rlp}; +use ethers_core::utils::rlp::{self, Decodable, Rlp}; use http::deposit_methods::RpcError; -pub use json_structures::TransitionConfigurationV1; +pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1}; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; +use std::convert::TryFrom; use strum::IntoStaticStr; use superstruct::superstruct; pub use types::{ @@ -46,6 +47,7 @@ pub enum Error { RequiredMethodUnsupported(&'static str), UnsupportedForkVariant(String), BadConversion(String), + RlpDecoderError(rlp::DecoderError), } impl From for Error { @@ -79,6 +81,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: rlp::DecoderError) -> Self { + Error::RlpDecoderError(e) + } +} + #[derive(Clone, Copy, Debug, PartialEq, IntoStaticStr)] #[strum(serialize_all = "snake_case")] pub enum PayloadStatusV1Status { @@ -159,12 +167,14 @@ pub struct ExecutionBlockWithTransactions { pub transactions: Vec, #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] - pub withdrawals: Vec, + pub withdrawals: Vec, } -impl From> for ExecutionBlockWithTransactions { - fn from(payload: ExecutionPayload) -> Self { - match payload { +impl TryFrom> for ExecutionBlockWithTransactions { + type Error = Error; + + fn try_from(payload: ExecutionPayload) -> Result { + let json_payload = match payload { ExecutionPayload::Merge(block) => Self::Merge(ExecutionBlockWithTransactionsMerge { parent_hash: block.parent_hash, fee_recipient: block.fee_recipient, @@ -183,8 +193,7 @@ impl From> for ExecutionBlockWithTransactions .transactions .iter() .map(|tx| Transaction::decode(&Rlp::new(tx))) - .collect::, _>>() - .unwrap_or_else(|_| Vec::new()), + .collect::, _>>()?, }), ExecutionPayload::Capella(block) => { Self::Capella(ExecutionBlockWithTransactionsCapella { @@ -205,10 +214,12 @@ impl From> for ExecutionBlockWithTransactions .transactions .iter() .map(|tx| Transaction::decode(&Rlp::new(tx))) - .collect::, _>>() - .unwrap_or_else(|_| Vec::new()), + .collect::, _>>()?, #[cfg(feature = "withdrawals")] - withdrawals: block.withdrawals.into(), + withdrawals: Vec::from(block.withdrawals) + .into_iter() + .map(|withdrawal| withdrawal.into()) + .collect(), }) } ExecutionPayload::Eip4844(block) => { @@ -231,13 +242,16 @@ impl From> for ExecutionBlockWithTransactions .transactions .iter() .map(|tx| Transaction::decode(&Rlp::new(tx))) - .collect::, _>>() - .unwrap_or_else(|_| Vec::new()), + .collect::, _>>()?, #[cfg(feature = "withdrawals")] - withdrawals: block.withdrawals.into(), + withdrawals: Vec::from(block.withdrawals) + .into_iter() + .map(|withdrawal| withdrawal.into()) + .collect(), }) } - } + }; + Ok(json_payload) } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 2aaa7608e30..1980e82ce30 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1622,8 +1622,14 @@ impl ExecutionLayer { } ExecutionBlockWithTransactions::Capella(capella_block) => { #[cfg(feature = "withdrawals")] - let withdrawals = VariableList::new(capella_block.withdrawals.clone()) - .map_err(ApiError::DeserializeWithdrawals)?; + let withdrawals = VariableList::new( + capella_block + .withdrawals + .into_iter() + .map(|w| w.into()) + .collect(), + ) + .map_err(ApiError::DeserializeWithdrawals)?; ExecutionPayload::Capella(ExecutionPayloadCapella { parent_hash: capella_block.parent_hash, @@ -1646,8 +1652,14 @@ impl ExecutionLayer { } ExecutionBlockWithTransactions::Eip4844(eip4844_block) => { #[cfg(feature = "withdrawals")] - let withdrawals = VariableList::new(eip4844_block.withdrawals.clone()) - .map_err(ApiError::DeserializeWithdrawals)?; + let withdrawals = VariableList::new( + eip4844_block + .withdrawals + .into_iter() + .map(|w| w.into()) + .collect(), + ) + .map_err(ApiError::DeserializeWithdrawals)?; ExecutionPayload::Eip4844(ExecutionPayloadEip4844 { parent_hash: eip4844_block.parent_hash, diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index f2282c6039d..a7ec429e456 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -76,7 +76,7 @@ impl Block { pub fn as_execution_block_with_tx(&self) -> Option> { match self { - Block::PoS(payload) => Some(payload.clone().into()), + Block::PoS(payload) => Some(payload.clone().try_into().unwrap()), Block::PoW(_) => None, } } From 63d3dd27fc2cc050083861a7a82ed15c3bb08ca9 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 14 Dec 2022 12:01:33 +1100 Subject: [PATCH 089/263] Batch API for address changes (#3798) --- beacon_node/http_api/src/lib.rs | 72 ++++++++++++++++++++++----------- 1 file changed, 49 insertions(+), 23 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index b75e583fc76..47ea99c8733 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1673,36 +1673,62 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and(network_tx_filter.clone()) + .and(log_filter.clone()) .and_then( |chain: Arc>, - address_change: SignedBlsToExecutionChange, - network_tx: UnboundedSender>| { + address_changes: Vec, + network_tx: UnboundedSender>, + log: Logger| { blocking_json_task(move || { - let outcome = chain - .verify_bls_to_execution_change_for_gossip(address_change) - .map_err(|e| { - warp_utils::reject::object_invalid(format!( - "gossip verification failed: {:?}", - e - )) - })?; + let mut failures = vec![]; + + for (index, address_change) in address_changes.into_iter().enumerate() { + let validator_index = address_change.message.validator_index; + + match chain.verify_bls_to_execution_change_for_gossip(address_change) { + Ok(ObservationOutcome::New(verified_address_change)) => { + #[cfg(feature = "withdrawals-processing")] + { + publish_pubsub_message( + &network_tx, + PubsubMessage::BlsToExecutionChange(Box::new( + verified_address_change.as_inner().clone(), + )), + )?; + } - if let ObservationOutcome::New(address_change) = outcome { - #[cfg(feature = "withdrawals-processing")] - { - publish_pubsub_message( - &network_tx, - PubsubMessage::BlsToExecutionChange(Box::new( - address_change.as_inner().clone(), - )), - )?; + chain.import_bls_to_execution_change(verified_address_change); + } + Ok(ObservationOutcome::AlreadyKnown) => { + debug!( + log, + "BLS to execution change already known"; + "validator_index" => validator_index, + ); + } + Err(e) => { + error!( + log, + "Invalid BLS to execution change"; + "validator_index" => validator_index, + "source" => "HTTP API", + ); + failures.push(api_types::Failure::new( + index, + format!("invalid: {e:?}"), + )); + } } - drop(network_tx); - - chain.import_bls_to_execution_change(address_change); } - Ok(()) + if failures.is_empty() { + Ok(()) + } else { + Err(warp_utils::reject::indexed_bad_request( + "some BLS to execution changes failed to verify".into(), + failures, + )) + } }) }, ); From f3e8ca852e9003a6b9aaadb8c81c7021e996dced Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 14 Dec 2022 14:04:13 +1100 Subject: [PATCH 090/263] Fix Clippy --- beacon_node/http_api/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index e573e6a5d8b..783b8b68f8d 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1677,7 +1677,7 @@ pub fn serve( .and_then( |chain: Arc>, address_changes: Vec, - network_tx: UnboundedSender>, + #[allow(unused)] network_tx: UnboundedSender>, log: Logger| { blocking_json_task(move || { let mut failures = vec![]; From d48460782bfa3738f20e7823e6c304313fbf14e5 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 15 Dec 2022 11:42:35 +1100 Subject: [PATCH 091/263] Publish capella images on push (#3803) --- .github/workflows/docker.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 13b84116955..0643165c9fd 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -5,6 +5,7 @@ on: branches: - unstable - stable + - capella tags: - v* @@ -34,6 +35,11 @@ jobs: run: | echo "VERSION=latest" >> $GITHUB_ENV echo "VERSION_SUFFIX=-unstable" >> $GITHUB_ENV + - name: Extract version (if capella) + if: github.event.ref == 'refs/heads/capella' + run: | + echo "VERSION=capella" >> $GITHUB_ENV + echo "VERSION_SUFFIX=" >> $GITHUB_ENV - name: Extract version (if tagged release) if: startsWith(github.event.ref, 'refs/tags') run: | From 2c7ebc7278aa46b997f0a1959fdaebdf41719f6f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 15 Dec 2022 12:25:45 +1100 Subject: [PATCH 092/263] Enable withdrawals features in Capella docker images (#3805) --- .github/workflows/docker.yml | 1 + lcli/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 0643165c9fd..25d2cdab302 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -66,6 +66,7 @@ jobs: DOCKER_CLI_EXPERIMENTAL: enabled VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} + CROSS_FEATURES: withdrawals,withdrawals-processing steps: - uses: actions/checkout@v3 - name: Update Rust diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 1129e710f46..feda81d0302 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.62.1-bullseye AS builder +FROM rust:1.65.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG PORTABLE From 558367ab8c5c2b99c9c5aaca1b4561bbddc9b017 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 16 Dec 2022 09:20:45 +1100 Subject: [PATCH 093/263] Bounded withdrawals and spec v1.3.0-alpha.2 (#3802) --- .../src/per_block_processing.rs | 33 +++++++++++++++---- consensus/types/presets/gnosis/capella.yaml | 17 ++++++++++ consensus/types/presets/mainnet/capella.yaml | 7 +++- consensus/types/presets/minimal/capella.yaml | 7 +++- consensus/types/src/chain_spec.rs | 6 +++- consensus/types/src/config_and_preset.rs | 25 ++++++++------ consensus/types/src/lib.rs | 4 +-- consensus/types/src/preset.rs | 24 ++++++++++++++ testing/ef_tests/Makefile | 2 +- 9 files changed, 103 insertions(+), 22 deletions(-) create mode 100644 consensus/types/presets/gnosis/capella.yaml diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 52699222600..7af74428b59 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -466,7 +466,9 @@ pub fn compute_timestamp_at_slot( .and_then(|since_genesis| state.genesis_time().safe_add(since_genesis)) } -/// FIXME: add link to this function once the spec is stable +/// Compute the next batch of withdrawals which should be included in a block. +/// +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#new-get_expected_withdrawals #[cfg(feature = "withdrawals")] pub fn get_expected_withdrawals( state: &BeaconState, @@ -481,7 +483,11 @@ pub fn get_expected_withdrawals( return Ok(withdrawals.into()); } - for _ in 0..state.validators().len() { + let bound = std::cmp::min( + state.validators().len() as u64, + spec.max_validators_per_withdrawals_sweep, + ); + for _ in 0..bound { let validator = state.get_validator(validator_index as usize)?; let balance = *state.balances().get(validator_index as usize).ok_or( BeaconStateError::BalancesOutOfBounds(validator_index as usize), @@ -518,7 +524,7 @@ pub fn get_expected_withdrawals( Ok(withdrawals.into()) } -/// FIXME: add link to this function once the spec is stable +/// Apply withdrawals to the state. #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] pub fn process_withdrawals<'payload, T: EthSpec, Payload: AbstractExecPayload>( state: &mut BeaconState, @@ -547,11 +553,26 @@ pub fn process_withdrawals<'payload, T: EthSpec, Payload: AbstractExecPayload )?; } + // Update the next withdrawal index if this block contained withdrawals if let Some(latest_withdrawal) = expected_withdrawals.last() { *state.next_withdrawal_index_mut()? = latest_withdrawal.index.safe_add(1)?; - let next_validator_index = latest_withdrawal - .validator_index - .safe_add(1)? + + // Update the next validator index to start the next withdrawal sweep + if expected_withdrawals.len() == T::max_withdrawals_per_payload() { + // Next sweep starts after the latest withdrawal's validator index + let next_validator_index = latest_withdrawal + .validator_index + .safe_add(1)? + .safe_rem(state.validators().len() as u64)?; + *state.next_withdrawal_validator_index_mut()? = next_validator_index; + } + } + + // Advance sweep by the max length of the sweep if there was not a full set of withdrawals + if expected_withdrawals.len() != T::max_withdrawals_per_payload() { + let next_validator_index = state + .next_withdrawal_validator_index()? + .safe_add(spec.max_validators_per_withdrawals_sweep)? .safe_rem(state.validators().len() as u64)?; *state.next_withdrawal_validator_index_mut()? = next_validator_index; } diff --git a/consensus/types/presets/gnosis/capella.yaml b/consensus/types/presets/gnosis/capella.yaml new file mode 100644 index 00000000000..913c2956ba7 --- /dev/null +++ b/consensus/types/presets/gnosis/capella.yaml @@ -0,0 +1,17 @@ +# Mainnet preset - Capella + +# Misc +# Max operations per block +# --------------------------------------------------------------- +# 2**4 (= 16) +MAX_BLS_TO_EXECUTION_CHANGES: 16 + +# Execution +# --------------------------------------------------------------- +# 2**4 (= 16) withdrawals +MAX_WITHDRAWALS_PER_PAYLOAD: 16 + +# Withdrawals processing +# --------------------------------------------------------------- +# 2**14 (= 16384) validators +MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16384 diff --git a/consensus/types/presets/mainnet/capella.yaml b/consensus/types/presets/mainnet/capella.yaml index 0c087255bfb..913c2956ba7 100644 --- a/consensus/types/presets/mainnet/capella.yaml +++ b/consensus/types/presets/mainnet/capella.yaml @@ -9,4 +9,9 @@ MAX_BLS_TO_EXECUTION_CHANGES: 16 # Execution # --------------------------------------------------------------- # 2**4 (= 16) withdrawals -MAX_WITHDRAWALS_PER_PAYLOAD: 16 \ No newline at end of file +MAX_WITHDRAWALS_PER_PAYLOAD: 16 + +# Withdrawals processing +# --------------------------------------------------------------- +# 2**14 (= 16384) validators +MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16384 diff --git a/consensus/types/presets/minimal/capella.yaml b/consensus/types/presets/minimal/capella.yaml index eacd6c7cbca..d27253de871 100644 --- a/consensus/types/presets/minimal/capella.yaml +++ b/consensus/types/presets/minimal/capella.yaml @@ -9,4 +9,9 @@ MAX_BLS_TO_EXECUTION_CHANGES: 16 # Execution # --------------------------------------------------------------- # [customized] 2**2 (= 4) -MAX_WITHDRAWALS_PER_PAYLOAD: 4 \ No newline at end of file +MAX_WITHDRAWALS_PER_PAYLOAD: 4 + +# Withdrawals processing +# --------------------------------------------------------------- +# [customized] 2**4 (= 16) validators +MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16 diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index d16c9b8091c..bf9a7ed34db 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -158,8 +158,9 @@ pub struct ChainSpec { * Capella hard fork params */ pub capella_fork_version: [u8; 4], - /// The Capella fork epoch is optional, with `None` representing "Merge never happens". + /// The Capella fork epoch is optional, with `None` representing "Capella never happens". pub capella_fork_epoch: Option, + pub max_validators_per_withdrawals_sweep: u64, /* * Eip4844 hard fork params @@ -634,6 +635,7 @@ impl ChainSpec { */ capella_fork_version: [0x03, 00, 00, 00], capella_fork_epoch: None, + max_validators_per_withdrawals_sweep: 16384, /* * Eip4844 hard fork params @@ -707,6 +709,7 @@ impl ChainSpec { // Capella capella_fork_version: [0x03, 0x00, 0x00, 0x01], capella_fork_epoch: None, + max_validators_per_withdrawals_sweep: 16, // Eip4844 eip4844_fork_version: [0x04, 0x00, 0x00, 0x01], eip4844_fork_epoch: None, @@ -869,6 +872,7 @@ impl ChainSpec { */ capella_fork_version: [0x03, 0x00, 0x00, 0x64], capella_fork_epoch: None, + max_validators_per_withdrawals_sweep: 16384, /* * Eip4844 hard fork params diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index f72b1710de3..9a618f7cc38 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -1,5 +1,6 @@ use crate::{ - consts::altair, AltairPreset, BasePreset, BellatrixPreset, ChainSpec, Config, EthSpec, ForkName, + consts::altair, AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, ChainSpec, Config, + EthSpec, ForkName, }; use maplit::hashmap; use serde_derive::{Deserialize, Serialize}; @@ -11,7 +12,7 @@ use superstruct::superstruct; /// /// Mostly useful for the API. #[superstruct( - variants(Altair, Bellatrix), + variants(Bellatrix, Capella), variant_attributes(derive(Serialize, Deserialize, Debug, PartialEq, Clone)) )] #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] @@ -24,9 +25,10 @@ pub struct ConfigAndPreset { pub base_preset: BasePreset, #[serde(flatten)] pub altair_preset: AltairPreset, - #[superstruct(only(Bellatrix))] #[serde(flatten)] pub bellatrix_preset: BellatrixPreset, + #[superstruct(only(Capella))] + pub capella_preset: CapellaPreset, /// The `extra_fields` map allows us to gracefully decode fields intended for future hard forks. #[serde(flatten)] pub extra_fields: HashMap, @@ -37,26 +39,29 @@ impl ConfigAndPreset { let config = Config::from_chain_spec::(spec); let base_preset = BasePreset::from_chain_spec::(spec); let altair_preset = AltairPreset::from_chain_spec::(spec); + let bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); let extra_fields = get_extra_fields(spec); - if spec.bellatrix_fork_epoch.is_some() + if spec.capella_fork_epoch.is_some() || fork_name.is_none() - || fork_name == Some(ForkName::Merge) + || fork_name == Some(ForkName::Capella) { - let bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); + let capella_preset = CapellaPreset::from_chain_spec::(spec); - ConfigAndPreset::Bellatrix(ConfigAndPresetBellatrix { + ConfigAndPreset::Capella(ConfigAndPresetCapella { config, base_preset, altair_preset, bellatrix_preset, + capella_preset, extra_fields, }) } else { - ConfigAndPreset::Altair(ConfigAndPresetAltair { + ConfigAndPreset::Bellatrix(ConfigAndPresetBellatrix { config, base_preset, altair_preset, + bellatrix_preset, extra_fields, }) } @@ -131,8 +136,8 @@ mod test { .write(false) .open(tmp_file.as_ref()) .expect("error while opening the file"); - let from: ConfigAndPresetBellatrix = + let from: ConfigAndPresetCapella = serde_yaml::from_reader(reader).expect("error while deserializing"); - assert_eq!(ConfigAndPreset::Bellatrix(from), yamlconfig); + assert_eq!(ConfigAndPreset::Capella(from), yamlconfig); } } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 92f87f01dd6..6cbb9568dad 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -124,7 +124,7 @@ pub use crate::bls_to_execution_change::BlsToExecutionChange; pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; pub use crate::config_and_preset::{ - ConfigAndPreset, ConfigAndPresetAltair, ConfigAndPresetBellatrix, + ConfigAndPreset, ConfigAndPresetBellatrix, ConfigAndPresetCapella, }; pub use crate::contribution_and_proof::ContributionAndProof; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; @@ -163,7 +163,7 @@ pub use crate::payload::{ FullPayloadCapella, FullPayloadEip4844, FullPayloadMerge, FullPayloadRef, OwnedExecPayload, }; pub use crate::pending_attestation::PendingAttestation; -pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset}; +pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset, CapellaPreset}; pub use crate::proposer_preparation_data::ProposerPreparationData; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 8ee38e46a6d..7d7db228cef 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -184,6 +184,27 @@ impl BellatrixPreset { } } +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(rename_all = "UPPERCASE")] +pub struct CapellaPreset { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_bls_to_execution_changes: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_withdrawals_per_payload: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_validators_per_withdrawals_sweep: u64, +} + +impl CapellaPreset { + pub fn from_chain_spec(spec: &ChainSpec) -> Self { + Self { + max_bls_to_execution_changes: T::max_bls_to_execution_changes() as u64, + max_withdrawals_per_payload: T::max_withdrawals_per_payload() as u64, + max_validators_per_withdrawals_sweep: spec.max_validators_per_withdrawals_sweep, + } + } +} + #[cfg(test)] mod test { use super::*; @@ -219,6 +240,9 @@ mod test { let bellatrix: BellatrixPreset = preset_from_file(&preset_name, "bellatrix.yaml"); assert_eq!(bellatrix, BellatrixPreset::from_chain_spec::(&spec)); + + let capella: CapellaPreset = preset_from_file(&preset_name, "capella.yaml"); + assert_eq!(capella, CapellaPreset::from_chain_spec::(&spec)); } #[test] diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 717ff13c976..d52f546dc26 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.3.0-alpha.1 +TESTS_TAG := v1.3.0-alpha.2 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) From 3a08c7634e68875b40cdee34ba58c6566d397c9f Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Fri, 16 Dec 2022 14:56:07 -0600 Subject: [PATCH 094/263] Make engine_getPayloadV2 accept local block value --- beacon_node/execution_layer/src/engine_api/http.rs | 4 ++-- .../execution_layer/src/engine_api/json_structures.rs | 9 +++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index c71cfa0c04b..1616b216340 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -760,7 +760,7 @@ impl HttpJsonRpc { ) -> Result, Error> { let params = json!([JsonPayloadIdRequest::from(payload_id)]); - let payload_v2: JsonExecutionPayloadV2 = self + let response: JsonGetPayloadResponse = self .rpc_request( ENGINE_GET_PAYLOAD_V2, params, @@ -768,7 +768,7 @@ impl HttpJsonRpc { ) .await?; - JsonExecutionPayload::V2(payload_v2).try_into_execution_payload(fork_name) + JsonExecutionPayload::V2(response.execution_payload).try_into_execution_payload(fork_name) } pub async fn get_blobs_bundle_v1( diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index feed6215896..18e52eb06f6 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -324,6 +324,15 @@ impl TryFrom> for JsonExecutionPayloadV2 { } } +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(bound = "T: EthSpec", rename_all = "camelCase")] +pub struct JsonGetPayloadResponse { + pub execution_payload: JsonExecutionPayloadV2, + // uncomment this when geth fixes its serialization + //#[serde(with = "eth2_serde_utils::u256_hex_be")] + //pub block_value: Uint256, +} + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonWithdrawal { From b75ca74222ac1b8dda5efd6a48b1388cd622f372 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Mon, 19 Dec 2022 15:10:50 -0600 Subject: [PATCH 095/263] Removed `withdrawals` feature flag --- .github/workflows/docker.yml | 2 +- Makefile | 2 +- beacon_node/Cargo.toml | 1 - beacon_node/beacon_chain/Cargo.toml | 1 - beacon_node/beacon_chain/src/beacon_chain.rs | 16 +---- .../beacon_chain/src/execution_payload.rs | 21 ++---- beacon_node/execution_layer/Cargo.toml | 1 - beacon_node/execution_layer/src/engine_api.rs | 3 - .../execution_layer/src/engine_api/http.rs | 6 +- .../src/engine_api/json_structures.rs | 8 --- beacon_node/execution_layer/src/lib.rs | 6 -- .../src/test_utils/mock_execution_layer.rs | 3 - beacon_node/store/Cargo.toml | 1 - beacon_node/store/src/partial_beacon_state.rs | 64 ------------------- common/eth2/Cargo.toml | 1 - consensus/state_processing/Cargo.toml | 1 - .../src/per_block_processing.rs | 9 ++- .../block_signature_verifier.rs | 2 - .../process_operations.rs | 4 +- .../state_processing/src/upgrade/capella.rs | 2 - .../state_processing/src/upgrade/eip4844.rs | 10 +-- consensus/types/Cargo.toml | 1 - consensus/types/src/beacon_block.rs | 2 - consensus/types/src/beacon_block_body.rs | 9 --- consensus/types/src/beacon_state.rs | 2 - .../types/src/beacon_state/tree_hash_cache.rs | 2 - consensus/types/src/execution_payload.rs | 1 - .../types/src/execution_payload_header.rs | 5 -- consensus/types/src/payload.rs | 6 -- consensus/types/src/signed_beacon_block.rs | 4 -- lighthouse/Cargo.toml | 2 - testing/ef_tests/src/cases/operations.rs | 10 +-- testing/ef_tests/src/lib.rs | 2 +- testing/ef_tests/tests/tests.rs | 4 +- .../execution_engine_integration/Cargo.toml | 3 +- 35 files changed, 29 insertions(+), 188 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 25d2cdab302..c0a02adf4ed 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -66,7 +66,7 @@ jobs: DOCKER_CLI_EXPERIMENTAL: enabled VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} - CROSS_FEATURES: withdrawals,withdrawals-processing + CROSS_FEATURES: withdrawals-processing steps: - uses: actions/checkout@v3 - name: Update Rust diff --git a/Makefile b/Makefile index 56e05fffcb7..15d09c5867f 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx CROSS_PROFILE ?= release # List of features to use when running EF tests. -EF_TEST_FEATURES ?= beacon_chain/withdrawals,beacon_chain/withdrawals-processing +EF_TEST_FEATURES ?= beacon_chain/withdrawals-processing # Cargo profile for regular builds. PROFILE ?= release diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 309d7a83f78..bed32011f1b 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -13,7 +13,6 @@ node_test_rig = { path = "../testing/node_test_rig" } [features] write_ssz_files = ["beacon_chain/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing. -withdrawals = ["beacon_chain/withdrawals", "types/withdrawals", "store/withdrawals", "execution_layer/withdrawals"] withdrawals-processing = [ "beacon_chain/withdrawals-processing", "store/withdrawals-processing", diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 6d768476e6a..a6ac6603791 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -10,7 +10,6 @@ default = ["participation_metrics"] write_ssz_files = [] # Writes debugging .ssz files to /tmp during block processing. participation_metrics = [] # Exposes validator participation metrics to Prometheus. fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable -withdrawals = ["state_processing/withdrawals", "types/withdrawals", "store/withdrawals", "execution_layer/withdrawals"] withdrawals-processing = [ "state_processing/withdrawals-processing", "store/withdrawals-processing", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 17e0a6f121a..5c0311736ca 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -79,14 +79,12 @@ use slasher::Slasher; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; -#[cfg(feature = "withdrawals")] -use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::{ common::get_attesting_indices_from_state, per_block_processing, per_block_processing::{ - errors::AttestationValidationError, verify_attestation_for_block_inclusion, - VerifySignatures, + errors::AttestationValidationError, get_expected_withdrawals, + verify_attestation_for_block_inclusion, VerifySignatures, }, per_slot_processing, state_advance::{complete_state_advance, partial_state_advance}, @@ -287,7 +285,6 @@ struct PartialBeaconBlock> { voluntary_exits: Vec, sync_aggregate: Option>, prepare_payload_handle: Option>, - #[cfg(feature = "withdrawals")] bls_to_execution_changes: Vec, } @@ -4182,7 +4179,6 @@ impl BeaconChain { let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; let deposits = eth1_chain.deposits_for_block_inclusion(&state, ð1_data, &self.spec)?; - #[cfg(feature = "withdrawals")] let bls_to_execution_changes = self .op_pool .get_bls_to_execution_changes(&state, &self.spec); @@ -4345,7 +4341,6 @@ impl BeaconChain { voluntary_exits, sync_aggregate, prepare_payload_handle, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, }) } @@ -4375,7 +4370,6 @@ impl BeaconChain { // this function. We can assume that the handle has already been consumed in order to // produce said `execution_payload`. prepare_payload_handle: _, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, } = partial_beacon_block; @@ -4460,7 +4454,6 @@ impl BeaconChain { .to_payload() .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - #[cfg(feature = "withdrawals")] bls_to_execution_changes: bls_to_execution_changes.into(), }, }), @@ -4485,7 +4478,6 @@ impl BeaconChain { .to_payload() .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - #[cfg(feature = "withdrawals")] bls_to_execution_changes: bls_to_execution_changes.into(), //FIXME(sean) get blobs blob_kzg_commitments: VariableList::from(kzg_commitments), @@ -4743,7 +4735,6 @@ impl BeaconChain { return Ok(()); } - #[cfg(feature = "withdrawals")] let withdrawals = match self.spec.fork_name_at_slot::(prepare_slot) { ForkName::Base | ForkName::Altair | ForkName::Merge => None, ForkName::Capella | ForkName::Eip4844 => { @@ -4778,10 +4769,7 @@ impl BeaconChain { execution_layer .get_suggested_fee_recipient(proposer as u64) .await, - #[cfg(feature = "withdrawals")] withdrawals, - #[cfg(not(feature = "withdrawals"))] - None, ); debug!( diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 1982bdbf022..d52df4853df 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -17,11 +17,9 @@ use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::debug; use slot_clock::SlotClock; -#[cfg(feature = "withdrawals")] -use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::per_block_processing::{ - compute_timestamp_at_slot, is_execution_enabled, is_merge_transition_complete, - partially_verify_execution_payload, + compute_timestamp_at_slot, get_expected_withdrawals, is_execution_enabled, + is_merge_transition_complete, partially_verify_execution_payload, }; use std::sync::Arc; use tokio::task::JoinHandle; @@ -382,7 +380,6 @@ pub fn get_execution_payload< let random = *state.get_randao_mix(current_epoch)?; let latest_execution_payload_header_block_hash = state.latest_execution_payload_header()?.block_hash(); - #[cfg(feature = "withdrawals")] let withdrawals = match state { &BeaconState::Capella(_) | &BeaconState::Eip4844(_) => { Some(get_expected_withdrawals(state, spec)?.into()) @@ -407,7 +404,6 @@ pub fn get_execution_payload< proposer_index, latest_execution_payload_header_block_hash, builder_params, - #[cfg(feature = "withdrawals")] withdrawals, ) .await @@ -442,7 +438,7 @@ pub async fn prepare_execution_payload( proposer_index: u64, latest_execution_payload_header_block_hash: ExecutionBlockHash, builder_params: BuilderParams, - #[cfg(feature = "withdrawals")] withdrawals: Option>, + withdrawals: Option>, ) -> Result, BlockProductionError> where T: BeaconChainTypes, @@ -504,15 +500,8 @@ where let suggested_fee_recipient = execution_layer .get_suggested_fee_recipient(proposer_index) .await; - let payload_attributes = PayloadAttributes::new( - timestamp, - random, - suggested_fee_recipient, - #[cfg(feature = "withdrawals")] - withdrawals, - #[cfg(not(feature = "withdrawals"))] - None, - ); + let payload_attributes = + PayloadAttributes::new(timestamp, random, suggested_fee_recipient, withdrawals); // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. // diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index b3bdc54d02a..47c1e0341b6 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -5,7 +5,6 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] -withdrawals = ["state_processing/withdrawals", "types/withdrawals", "eth2/withdrawals"] withdrawals-processing = ["state_processing/withdrawals-processing", "eth2/withdrawals-processing"] [dependencies] diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 424ca30d137..80cdeacb34f 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -165,7 +165,6 @@ pub struct ExecutionBlockWithTransactions { #[serde(rename = "hash")] pub block_hash: ExecutionBlockHash, pub transactions: Vec, - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub withdrawals: Vec, } @@ -215,7 +214,6 @@ impl TryFrom> for ExecutionBlockWithTransactions .iter() .map(|tx| Transaction::decode(&Rlp::new(tx))) .collect::, _>>()?, - #[cfg(feature = "withdrawals")] withdrawals: Vec::from(block.withdrawals) .into_iter() .map(|withdrawal| withdrawal.into()) @@ -243,7 +241,6 @@ impl TryFrom> for ExecutionBlockWithTransactions .iter() .map(|tx| Transaction::decode(&Rlp::new(tx))) .collect::, _>>()?, - #[cfg(feature = "withdrawals")] withdrawals: Vec::from(block.withdrawals) .into_iter() .map(|withdrawal| withdrawal.into()) diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 1616b216340..29f66393e5c 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -852,11 +852,11 @@ impl HttpJsonRpc { pub async fn supported_apis_v1(&self) -> Result { Ok(SupportedApis { new_payload_v1: true, - new_payload_v2: cfg!(all(feature = "withdrawals", not(test))), + new_payload_v2: cfg!(not(test)), forkchoice_updated_v1: true, - forkchoice_updated_v2: cfg!(all(feature = "withdrawals", not(test))), + forkchoice_updated_v2: cfg!(not(test)), get_payload_v1: true, - get_payload_v2: cfg!(all(feature = "withdrawals", not(test))), + get_payload_v2: cfg!(not(test)), exchange_transition_configuration_v1: true, }) } diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 18e52eb06f6..13948affb55 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -166,7 +166,6 @@ impl JsonExecutionPayload { base_fee_per_gas: v2.base_fee_per_gas, block_hash: v2.block_hash, transactions: v2.transactions, - #[cfg(feature = "withdrawals")] withdrawals: v2 .withdrawals .map(|v| { @@ -194,7 +193,6 @@ impl JsonExecutionPayload { excess_data_gas: v2.excess_data_gas.ok_or_else(|| Error::BadConversion("Null `excess_data_gas` field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))?, block_hash: v2.block_hash, transactions: v2.transactions, - #[cfg(feature = "withdrawals")] withdrawals: v2 .withdrawals .map(|v| { @@ -282,7 +280,6 @@ impl TryFrom> for JsonExecutionPayloadV2 { excess_data_gas: None, block_hash: capella.block_hash, transactions: capella.transactions, - #[cfg(feature = "withdrawals")] withdrawals: Some( Vec::from(capella.withdrawals) .into_iter() @@ -290,8 +287,6 @@ impl TryFrom> for JsonExecutionPayloadV2 { .collect::>() .into(), ), - #[cfg(not(feature = "withdrawals"))] - withdrawals: None, }), ExecutionPayload::Eip4844(eip4844) => Ok(JsonExecutionPayloadV2 { parent_hash: eip4844.parent_hash, @@ -309,7 +304,6 @@ impl TryFrom> for JsonExecutionPayloadV2 { excess_data_gas: Some(eip4844.excess_data_gas), block_hash: eip4844.block_hash, transactions: eip4844.transactions, - #[cfg(feature = "withdrawals")] withdrawals: Some( Vec::from(eip4844.withdrawals) .into_iter() @@ -317,8 +311,6 @@ impl TryFrom> for JsonExecutionPayloadV2 { .collect::>() .into(), ), - #[cfg(not(feature = "withdrawals"))] - withdrawals: None, }), } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 1761af09e8e..e22da42a72c 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1633,7 +1633,6 @@ impl ExecutionLayer { }) } ExecutionBlockWithTransactions::Capella(capella_block) => { - #[cfg(feature = "withdrawals")] let withdrawals = VariableList::new( capella_block .withdrawals @@ -1642,7 +1641,6 @@ impl ExecutionLayer { .collect(), ) .map_err(ApiError::DeserializeWithdrawals)?; - ExecutionPayload::Capella(ExecutionPayloadCapella { parent_hash: capella_block.parent_hash, fee_recipient: capella_block.fee_recipient, @@ -1658,12 +1656,10 @@ impl ExecutionLayer { base_fee_per_gas: capella_block.base_fee_per_gas, block_hash: capella_block.block_hash, transactions, - #[cfg(feature = "withdrawals")] withdrawals, }) } ExecutionBlockWithTransactions::Eip4844(eip4844_block) => { - #[cfg(feature = "withdrawals")] let withdrawals = VariableList::new( eip4844_block .withdrawals @@ -1672,7 +1668,6 @@ impl ExecutionLayer { .collect(), ) .map_err(ApiError::DeserializeWithdrawals)?; - ExecutionPayload::Eip4844(ExecutionPayloadEip4844 { parent_hash: eip4844_block.parent_hash, fee_recipient: eip4844_block.fee_recipient, @@ -1689,7 +1684,6 @@ impl ExecutionLayer { excess_data_gas: eip4844_block.excess_data_gas, block_hash: eip4844_block.block_hash, transactions, - #[cfg(feature = "withdrawals")] withdrawals, }) } diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index f0f84491258..e552b7ca7ab 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -103,10 +103,7 @@ impl MockExecutionLayer { prev_randao, Address::repeat_byte(42), // FIXME: think about how to handle different forks / withdrawals here.. - #[cfg(feature = "withdrawals")] Some(vec![]), - #[cfg(not(feature = "withdrawals"))] - None, ); // Insert a proposer to ensure the fork choice updated command works. diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index b3e8e1fc6b5..897f6b020c7 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -28,5 +28,4 @@ directory = { path = "../../common/directory" } strum = { version = "0.24.0", features = ["derive"] } [features] -withdrawals = ["state_processing/withdrawals", "types/withdrawals"] withdrawals-processing = ["state_processing/withdrawals-processing"] \ No newline at end of file diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 12c56284966..ca35bc0b222 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -105,10 +105,8 @@ where pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844, // Withdrawals - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub next_withdrawal_index: u64, - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub next_withdrawal_validator_index: u64, } @@ -199,7 +197,6 @@ impl PartialBeaconState { latest_execution_payload_header ] ), - #[cfg(feature = "withdrawals")] BeaconState::Capella(s) => impl_from_state_forgetful!( s, outer, @@ -216,22 +213,6 @@ impl PartialBeaconState { next_withdrawal_validator_index ] ), - #[cfg(not(feature = "withdrawals"))] - BeaconState::Capella(s) => impl_from_state_forgetful!( - s, - outer, - Capella, - PartialBeaconStateCapella, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header - ] - ), - #[cfg(feature = "withdrawals")] BeaconState::Eip4844(s) => impl_from_state_forgetful!( s, outer, @@ -248,21 +229,6 @@ impl PartialBeaconState { next_withdrawal_validator_index ] ), - #[cfg(not(feature = "withdrawals"))] - BeaconState::Eip4844(s) => impl_from_state_forgetful!( - s, - outer, - Eip4844, - PartialBeaconStateEip4844, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header - ] - ), } } @@ -450,7 +416,6 @@ impl TryInto> for PartialBeaconState { latest_execution_payload_header ] ), - #[cfg(feature = "withdrawals")] PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!( inner, Capella, @@ -466,21 +431,6 @@ impl TryInto> for PartialBeaconState { next_withdrawal_validator_index ] ), - #[cfg(not(feature = "withdrawals"))] - PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!( - inner, - Capella, - BeaconStateCapella, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header - ] - ), - #[cfg(feature = "withdrawals")] PartialBeaconState::Eip4844(inner) => impl_try_into_beacon_state!( inner, Eip4844, @@ -496,20 +446,6 @@ impl TryInto> for PartialBeaconState { next_withdrawal_validator_index ] ), - #[cfg(not(feature = "withdrawals"))] - PartialBeaconState::Eip4844(inner) => impl_try_into_beacon_state!( - inner, - Eip4844, - BeaconStateEip4844, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header - ] - ), }; Ok(state) } diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 6ee02b71ba6..fc5eba98e29 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -35,5 +35,4 @@ procinfo = { version = "0.4.2", optional = true } [features] default = ["lighthouse"] lighthouse = ["proto_array", "psutil", "procinfo", "store", "slashing_protection"] -withdrawals = ["store/withdrawals"] withdrawals-processing = ["store/withdrawals-processing"] \ No newline at end of file diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index 39a0be3d9fd..0b79539877a 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -43,5 +43,4 @@ arbitrary-fuzz = [ "eth2_ssz_types/arbitrary", "tree_hash/arbitrary", ] -withdrawals = ["types/withdrawals"] withdrawals-processing = [] diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 7af74428b59..f1a544099f3 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -19,7 +19,7 @@ pub use process_operations::process_operations; pub use verify_attestation::{ verify_attestation_for_block_inclusion, verify_attestation_for_state, }; -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] pub use verify_bls_to_execution_change::verify_bls_to_execution_change; pub use verify_deposit::{ get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature, @@ -36,7 +36,7 @@ pub mod signature_sets; pub mod tests; mod verify_attestation; mod verify_attester_slashing; -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] mod verify_bls_to_execution_change; mod verify_deposit; mod verify_exit; @@ -165,7 +165,7 @@ pub fn per_block_processing>( // previous block. if is_execution_enabled(state, block.body()) { let payload = block.body().execution_payload()?; - #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] + #[cfg(feature = "withdrawals-processing")] process_withdrawals::(state, payload, spec)?; process_execution_payload::(state, payload, spec)?; } @@ -469,7 +469,6 @@ pub fn compute_timestamp_at_slot( /// Compute the next batch of withdrawals which should be included in a block. /// /// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#new-get_expected_withdrawals -#[cfg(feature = "withdrawals")] pub fn get_expected_withdrawals( state: &BeaconState, spec: &ChainSpec, @@ -525,7 +524,7 @@ pub fn get_expected_withdrawals( } /// Apply withdrawals to the state. -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] pub fn process_withdrawals<'payload, T: EthSpec, Payload: AbstractExecPayload>( state: &mut BeaconState, payload: Payload::Ref<'payload>, diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 50bfbfdc454..bbf2c1caa51 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -170,7 +170,6 @@ where // Deposits are not included because they can legally have invalid signatures. self.include_exits(block)?; self.include_sync_aggregate(block)?; - #[cfg(feature = "withdrawals")] self.include_bls_to_execution_changes(block)?; Ok(()) @@ -345,7 +344,6 @@ where } /// Include the signature of the block's BLS to execution changes for verification. - #[cfg(feature = "withdrawals")] pub fn include_bls_to_execution_changes>( &mut self, block: &'a SignedBeaconBlock, diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 105faba83bd..f27fd48b4f5 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -34,7 +34,7 @@ pub fn process_operations<'a, T: EthSpec, Payload: AbstractExecPayload>( process_deposits(state, block_body.deposits(), spec)?; process_exits(state, block_body.voluntary_exits(), verify_signatures, spec)?; - #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] + #[cfg(feature = "withdrawals-processing")] if let Ok(bls_to_execution_changes) = block_body.bls_to_execution_changes() { process_bls_to_execution_changes(state, bls_to_execution_changes, verify_signatures, spec)?; } @@ -295,7 +295,7 @@ pub fn process_exits( /// /// Returns `Ok(())` if the validation and state updates completed successfully. Otherwise returns /// an `Err` describing the invalid object or cause of failure. -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] pub fn process_bls_to_execution_changes( state: &mut BeaconState, bls_to_execution_changes: &[SignedBlsToExecutionChange], diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index 9a883698830..dc759b384d8 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -56,9 +56,7 @@ pub fn upgrade_to_capella( // Execution latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_capella(), // Withdrawals - #[cfg(feature = "withdrawals")] next_withdrawal_index: 0, - #[cfg(feature = "withdrawals")] next_withdrawal_validator_index: 0, // Caches total_active_balance: pre.total_active_balance, diff --git a/consensus/state_processing/src/upgrade/eip4844.rs b/consensus/state_processing/src/upgrade/eip4844.rs index 6d66fd8412c..131100bb384 100644 --- a/consensus/state_processing/src/upgrade/eip4844.rs +++ b/consensus/state_processing/src/upgrade/eip4844.rs @@ -10,12 +10,8 @@ pub fn upgrade_to_eip4844( let pre = pre_state.as_capella_mut()?; // FIXME(sean) This is a hack to let us participate in testnets where capella doesn't exist. - // if we are disabling withdrawals, assume we should fork off of bellatrix. - let previous_fork_version = if cfg!(feature = "withdrawals") { - pre.fork.current_version - } else { - spec.bellatrix_fork_version - }; + // let previous_fork_version = spec.bellatrix_fork_version; + let previous_fork_version = pre.fork.current_version; // Where possible, use something like `mem::take` to move fields from behind the &mut // reference. For other fields that don't have a good default value, use `clone`. @@ -64,9 +60,7 @@ pub fn upgrade_to_eip4844( // Execution latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_eip4844(), // Withdrawals - #[cfg(feature = "withdrawals")] next_withdrawal_index: pre.next_withdrawal_index, - #[cfg(feature = "withdrawals")] next_withdrawal_validator_index: pre.next_withdrawal_validator_index, // Caches total_active_balance: pre.total_active_balance, diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index b3ef3ae3825..671cacfa2eb 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -72,4 +72,3 @@ arbitrary-fuzz = [ "swap_or_not_shuffle/arbitrary", "tree_hash/arbitrary", ] -withdrawals = [] diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 124cb08bcc0..fd38e9faf26 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -502,7 +502,6 @@ impl> EmptyBlock for BeaconBlockCape voluntary_exits: VariableList::empty(), sync_aggregate: SyncAggregate::empty(), execution_payload: Payload::Capella::default(), - #[cfg(feature = "withdrawals")] bls_to_execution_changes: VariableList::empty(), }, } @@ -532,7 +531,6 @@ impl> EmptyBlock for BeaconBlockEip4 voluntary_exits: VariableList::empty(), sync_aggregate: SyncAggregate::empty(), execution_payload: Payload::Eip4844::default(), - #[cfg(feature = "withdrawals")] bls_to_execution_changes: VariableList::empty(), blob_kzg_commitments: VariableList::empty(), }, diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 1dd938ac465..dbdbcddb1b8 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -62,7 +62,6 @@ pub struct BeaconBlockBody = FullPay #[superstruct(only(Eip4844), partial_getter(rename = "execution_payload_eip4844"))] #[serde(flatten)] pub execution_payload: Payload::Eip4844, - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub bls_to_execution_changes: VariableList, @@ -301,7 +300,6 @@ impl From>> voluntary_exits, sync_aggregate, execution_payload: FullPayloadCapella { execution_payload }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, } = body; @@ -319,7 +317,6 @@ impl From>> execution_payload: BlindedPayloadCapella { execution_payload_header: From::from(execution_payload.clone()), }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, }, Some(execution_payload), @@ -345,7 +342,6 @@ impl From>> voluntary_exits, sync_aggregate, execution_payload: FullPayloadEip4844 { execution_payload }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, blob_kzg_commitments, } = body; @@ -364,7 +360,6 @@ impl From>> execution_payload: BlindedPayloadEip4844 { execution_payload_header: From::from(execution_payload.clone()), }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, blob_kzg_commitments, }, @@ -433,7 +428,6 @@ impl BeaconBlockBodyCapella> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadCapella { execution_payload }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, } = self; @@ -450,7 +444,6 @@ impl BeaconBlockBodyCapella> { execution_payload: BlindedPayloadCapella { execution_payload_header: From::from(execution_payload.clone()), }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes: bls_to_execution_changes.clone(), } } @@ -469,7 +462,6 @@ impl BeaconBlockBodyEip4844> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadEip4844 { execution_payload }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, blob_kzg_commitments, } = self; @@ -487,7 +479,6 @@ impl BeaconBlockBodyEip4844> { execution_payload: BlindedPayloadEip4844 { execution_payload_header: From::from(execution_payload.clone()), }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes: bls_to_execution_changes.clone(), blob_kzg_commitments: blob_kzg_commitments.clone(), } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 48a83f94f45..b3eff7374b2 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -297,10 +297,8 @@ where pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844, // Withdrawals - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844), partial_getter(copy))] pub next_withdrawal_index: u64, - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844), partial_getter(copy))] pub next_withdrawal_validator_index: u64, diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index 30dd9f8d6bc..4cfc684f4d3 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -336,11 +336,9 @@ impl BeaconTreeHashCacheInner { } // Withdrawal indices (Capella and later). - #[cfg(feature = "withdrawals")] if let Ok(next_withdrawal_index) = state.next_withdrawal_index() { hasher.write(next_withdrawal_index.tree_hash_root().as_bytes())?; } - #[cfg(feature = "withdrawals")] if let Ok(next_withdrawal_validator_index) = state.next_withdrawal_validator_index() { hasher.write(next_withdrawal_validator_index.tree_hash_root().as_bytes())?; } diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 18005094e4b..45f52fb65a7 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -80,7 +80,6 @@ pub struct ExecutionPayload { pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub withdrawals: Withdrawals, } diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index a98a68e3e55..e2c23389a1f 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -75,7 +75,6 @@ pub struct ExecutionPayloadHeader { pub block_hash: ExecutionBlockHash, #[superstruct(getter(copy))] pub transactions_root: Hash256, - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] #[superstruct(getter(copy))] pub withdrawals_root: Hash256, @@ -128,7 +127,6 @@ impl ExecutionPayloadHeaderMerge { base_fee_per_gas: self.base_fee_per_gas, block_hash: self.block_hash, transactions_root: self.transactions_root, - #[cfg(feature = "withdrawals")] withdrawals_root: Hash256::zero(), } } @@ -153,7 +151,6 @@ impl ExecutionPayloadHeaderCapella { excess_data_gas: Uint256::zero(), block_hash: self.block_hash, transactions_root: self.transactions_root, - #[cfg(feature = "withdrawals")] withdrawals_root: self.withdrawals_root, } } @@ -196,7 +193,6 @@ impl From> for ExecutionPayloadHeaderCape base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions_root: payload.transactions.tree_hash_root(), - #[cfg(feature = "withdrawals")] withdrawals_root: payload.withdrawals.tree_hash_root(), } } @@ -219,7 +215,6 @@ impl From> for ExecutionPayloadHeaderEip4 excess_data_gas: payload.excess_data_gas, block_hash: payload.block_hash, transactions_root: payload.transactions.tree_hash_root(), - #[cfg(feature = "withdrawals")] withdrawals_root: payload.withdrawals.tree_hash_root(), } } diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 2d9e37b81ab..8bba00b46df 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -37,7 +37,6 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + fn gas_limit(&self) -> u64; fn transactions(&self) -> Option<&Transactions>; /// fork-specific fields - #[cfg(feature = "withdrawals")] fn withdrawals_root(&self) -> Result; /// Is this a default payload with 0x0 roots for transactions and withdrawals? @@ -241,7 +240,6 @@ impl ExecPayload for FullPayload { }) } - #[cfg(feature = "withdrawals")] fn withdrawals_root(&self) -> Result { match self { FullPayload::Merge(_) => Err(Error::IncorrectStateVariant), @@ -343,7 +341,6 @@ impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { }) } - #[cfg(feature = "withdrawals")] fn withdrawals_root(&self) -> Result { match self { FullPayloadRef::Merge(_) => Err(Error::IncorrectStateVariant), @@ -523,7 +520,6 @@ impl ExecPayload for BlindedPayload { None } - #[cfg(feature = "withdrawals")] fn withdrawals_root(&self) -> Result { match self { BlindedPayload::Merge(_) => Err(Error::IncorrectStateVariant), @@ -614,7 +610,6 @@ impl<'b, T: EthSpec> ExecPayload for BlindedPayloadRef<'b, T> { None } - #[cfg(feature = "withdrawals")] fn withdrawals_root(&self) -> Result { match self { BlindedPayloadRef::Merge(_) => Err(Error::IncorrectStateVariant), @@ -712,7 +707,6 @@ macro_rules! impl_exec_payload_common { f(self) } - #[cfg(feature = "withdrawals")] fn withdrawals_root(&self) -> Result { let g = $g; g(self) diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 2a8398f83f3..14f9358f611 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -341,7 +341,6 @@ impl SignedBeaconBlockCapella> { voluntary_exits, sync_aggregate, execution_payload: BlindedPayloadCapella { .. }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, }, }, @@ -364,7 +363,6 @@ impl SignedBeaconBlockCapella> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadCapella { execution_payload }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, }, }, @@ -397,7 +395,6 @@ impl SignedBeaconBlockEip4844> { voluntary_exits, sync_aggregate, execution_payload: BlindedPayloadEip4844 { .. }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, blob_kzg_commitments, }, @@ -421,7 +418,6 @@ impl SignedBeaconBlockEip4844> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadEip4844 { execution_payload }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, blob_kzg_commitments, }, diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 3b4dd57533b..2db42d6ec3f 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -24,8 +24,6 @@ gnosis = [] slasher-mdbx = ["slasher/mdbx"] # Support slasher LMDB backend. slasher-lmdb = ["slasher/lmdb"] -# Support for inclusion of withdrawals fields in all capella consensus types in all APIs. -withdrawals = ["types/withdrawals", "beacon_node/withdrawals"] # Support for withdrawals consensus processing logic. withdrawals-processing = ["beacon_node/withdrawals-processing"] diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index f5487a6940d..a08ee1996ac 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -4,7 +4,7 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use crate::testing_spec; use serde_derive::Deserialize; -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] use state_processing::per_block_processing::process_operations::{ process_bls_to_execution_changes, process_bls_to_execution_changes, }; @@ -22,7 +22,7 @@ use state_processing::{ }; use std::fmt::Debug; use std::path::Path; -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] use types::SignedBlsToExecutionChange; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit, @@ -42,7 +42,7 @@ struct ExecutionMetadata { } /// Newtype for testing withdrawals. -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] #[derive(Debug, Clone, Deserialize)] pub struct WithdrawalsPayload { payload: FullPayload, @@ -341,7 +341,7 @@ impl Operation for BlindedPayload { } } -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] impl Operation for WithdrawalsPayload { fn handler_name() -> String { "withdrawals".into() @@ -374,7 +374,7 @@ impl Operation for WithdrawalsPayload { } } -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] impl Operation for SignedBlsToExecutionChange { fn handler_name() -> String { "bls_to_execution_change".into() diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index fd3bf2bd1b5..a4d4f2d52d4 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -1,5 +1,5 @@ pub use case_result::CaseResult; -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] pub use cases::WithdrawalsPayload; pub use cases::{ Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, InactivityUpdates, diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 0227b92ec86..66c4f83ecea 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -82,14 +82,14 @@ fn operations_execution_payload_blinded() { OperationsHandler::>::default().run(); } -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] #[test] fn operations_withdrawals() { OperationsHandler::>::default().run(); OperationsHandler::>::default().run(); } -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] #[test] fn operations_bls_to_execution_change() { OperationsHandler::::default().run(); diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index b5923aafe5d..e058d58afb6 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -23,5 +23,4 @@ hex = "0.4.2" fork_choice = { path = "../../consensus/fork_choice" } [features] -default = [] -withdrawals = [] \ No newline at end of file +default = [] \ No newline at end of file From 0c22d69e1546bbc35193c602dc55348e343c2a79 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Mon, 19 Dec 2022 19:35:08 -0600 Subject: [PATCH 096/263] Update consensus/state_processing/src/upgrade/eip4844.rs Co-authored-by: realbigsean --- consensus/state_processing/src/upgrade/eip4844.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/consensus/state_processing/src/upgrade/eip4844.rs b/consensus/state_processing/src/upgrade/eip4844.rs index 131100bb384..92a102ce96d 100644 --- a/consensus/state_processing/src/upgrade/eip4844.rs +++ b/consensus/state_processing/src/upgrade/eip4844.rs @@ -10,7 +10,6 @@ pub fn upgrade_to_eip4844( let pre = pre_state.as_capella_mut()?; // FIXME(sean) This is a hack to let us participate in testnets where capella doesn't exist. - // let previous_fork_version = spec.bellatrix_fork_version; let previous_fork_version = pre.fork.current_version; // Where possible, use something like `mem::take` to move fields from behind the &mut From b224ed81515167c8632ea6f6c66ef94ead4fdd81 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Mon, 19 Dec 2022 19:35:17 -0600 Subject: [PATCH 097/263] Update consensus/state_processing/src/upgrade/eip4844.rs Co-authored-by: realbigsean --- consensus/state_processing/src/upgrade/eip4844.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/consensus/state_processing/src/upgrade/eip4844.rs b/consensus/state_processing/src/upgrade/eip4844.rs index 92a102ce96d..e829c01e7e2 100644 --- a/consensus/state_processing/src/upgrade/eip4844.rs +++ b/consensus/state_processing/src/upgrade/eip4844.rs @@ -9,7 +9,6 @@ pub fn upgrade_to_eip4844( let epoch = pre_state.current_epoch(); let pre = pre_state.as_capella_mut()?; - // FIXME(sean) This is a hack to let us participate in testnets where capella doesn't exist. let previous_fork_version = pre.fork.current_version; // Where possible, use something like `mem::take` to move fields from behind the &mut From 3d253abadcb9381e95ca6d517ceef80cf707be0b Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Wed, 21 Dec 2022 11:40:21 -0600 Subject: [PATCH 098/263] Fixed spec serialization bug --- consensus/types/src/config_and_preset.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index 9a618f7cc38..ac93818b9c3 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -28,6 +28,7 @@ pub struct ConfigAndPreset { #[serde(flatten)] pub bellatrix_preset: BellatrixPreset, #[superstruct(only(Capella))] + #[serde(flatten)] pub capella_preset: CapellaPreset, /// The `extra_fields` map allows us to gracefully decode fields intended for future hard forks. #[serde(flatten)] From 96da8b938337256496d20f542fd98e7ee644cb7e Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Tue, 27 Dec 2022 15:55:43 -0600 Subject: [PATCH 099/263] Feature Guard V2 Engine API Methods --- beacon_node/execution_layer/src/engine_api/http.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 29f66393e5c..bf1da078e74 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -852,11 +852,11 @@ impl HttpJsonRpc { pub async fn supported_apis_v1(&self) -> Result { Ok(SupportedApis { new_payload_v1: true, - new_payload_v2: cfg!(not(test)), + new_payload_v2: cfg!(feature = "withdrawals-processing"), forkchoice_updated_v1: true, - forkchoice_updated_v2: cfg!(not(test)), + forkchoice_updated_v2: cfg!(feature = "withdrawals-processing"), get_payload_v1: true, - get_payload_v2: cfg!(not(test)), + get_payload_v2: cfg!(feature = "withdrawals-processing"), exchange_transition_configuration_v1: true, }) } From c922566fbc42d1795cd7e99f348931faeb641bc5 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Tue, 27 Dec 2022 15:59:34 -0600 Subject: [PATCH 100/263] Fixed Some Tests --- .../http_api/tests/interactive_tests.rs | 20 +++++++++++-------- beacon_node/http_api/tests/tests.rs | 4 ++-- validator_client/src/http_api/tests.rs | 4 ++-- 3 files changed, 16 insertions(+), 12 deletions(-) diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 17a3624afed..04d527d531c 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -5,7 +5,7 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy}, }; use eth2::types::DepositContractData; -use execution_layer::{ForkChoiceState, PayloadAttributes}; +use execution_layer::{ForkchoiceState, PayloadAttributes}; use parking_lot::Mutex; use slot_clock::SlotClock; use state_processing::state_advance::complete_state_advance; @@ -55,7 +55,7 @@ struct ForkChoiceUpdates { #[derive(Debug, Clone)] struct ForkChoiceUpdateMetadata { received_at: Duration, - state: ForkChoiceState, + state: ForkchoiceState, payload_attributes: Option, } @@ -86,7 +86,7 @@ impl ForkChoiceUpdates { .payload_attributes .as_ref() .map_or(false, |payload_attributes| { - payload_attributes.timestamp == proposal_timestamp + payload_attributes.timestamp() == proposal_timestamp }) }) .cloned() @@ -342,7 +342,7 @@ pub async fn proposer_boost_re_org_test( .lock() .set_forkchoice_updated_hook(Box::new(move |state, payload_attributes| { let received_at = chain_inner.slot_clock.now_duration().unwrap(); - let state = ForkChoiceState::from(state); + let state = ForkchoiceState::from(state); let payload_attributes = payload_attributes.map(Into::into); let update = ForkChoiceUpdateMetadata { received_at, @@ -521,16 +521,20 @@ pub async fn proposer_boost_re_org_test( if !misprediction { assert_eq!( - lookahead, payload_lookahead, + lookahead, + payload_lookahead, "lookahead={lookahead:?}, timestamp={}, prev_randao={:?}", - payload_attribs.timestamp, payload_attribs.prev_randao, + payload_attribs.timestamp(), + payload_attribs.prev_randao(), ); } else { // On a misprediction we issue the first fcU 500ms before creating a block! assert_eq!( - lookahead, fork_choice_lookahead, + lookahead, + fork_choice_lookahead, "timestamp={}, prev_randao={:?}", - payload_attribs.timestamp, payload_attribs.prev_randao, + payload_attribs.timestamp(), + payload_attribs.prev_randao(), ); } } diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 8644dcbf1ad..86733cf63ad 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1372,9 +1372,9 @@ impl ApiTester { pub async fn test_get_config_spec(self) -> Self { let result = self .client - .get_config_spec::() + .get_config_spec::() .await - .map(|res| ConfigAndPreset::Bellatrix(res.data)) + .map(|res| ConfigAndPreset::Capella(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::(&self.chain.spec, None); diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 5aa24a2b022..d453d7038ad 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -212,9 +212,9 @@ impl ApiTester { pub async fn test_get_lighthouse_spec(self) -> Self { let result = self .client - .get_lighthouse_spec::() + .get_lighthouse_spec::() .await - .map(|res| ConfigAndPreset::Bellatrix(res.data)) + .map(|res| ConfigAndPreset::Capella(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::(&E::default_spec(), None); From 986ae4360a4a2d4635a7373be957e927c62fe2bf Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Wed, 28 Dec 2022 14:46:53 -0600 Subject: [PATCH 101/263] Fix clippy complaints --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 +--- .../src/per_block_processing/eip4844/eip4844.rs | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5c0311736ca..69889014a59 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4766,9 +4766,7 @@ impl BeaconChain { .ok_or(Error::InvalidSlot(prepare_slot))? .as_secs(), pre_payload_attributes.prev_randao, - execution_layer - .get_suggested_fee_recipient(proposer as u64) - .await, + execution_layer.get_suggested_fee_recipient(proposer).await, withdrawals, ); diff --git a/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs b/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs index 55b1ab967e4..7826057a439 100644 --- a/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs +++ b/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs @@ -109,7 +109,7 @@ fn tx_peek_blob_versioned_hashes( .get(next_version_hash_index..next_version_hash_index.safe_add(32)?) .ok_or(BlockProcessingError::BlobVersionHashIndexOutOfBounds { length: tx_len, - index: (next_version_hash_index as usize).safe_add(32)?, + index: (next_version_hash_index).safe_add(32)?, })?; Ok(VersionedHash::from_slice(bytes)) })) From d8f7277bebac17f6b56bee0aab4ab3f7eb98981a Mon Sep 17 00:00:00 2001 From: realbigsean Date: Fri, 30 Dec 2022 11:00:14 -0500 Subject: [PATCH 102/263] cleanup --- beacon_node/beacon_chain/src/beacon_chain.rs | 19 --- .../beacon_chain/src/execution_payload.rs | 4 +- .../src/engine_api/json_structures.rs | 6 +- beacon_node/execution_layer/src/lib.rs | 31 ++-- beacon_node/http_api/src/publish_blocks.rs | 5 + .../lighthouse_network/src/rpc/protocol.rs | 2 +- .../network/src/beacon_processor/mod.rs | 40 +---- .../beacon_processor/worker/gossip_methods.rs | 18 +-- .../beacon_processor/worker/rpc_methods.rs | 149 ------------------ beacon_node/network/src/sync/manager.rs | 7 +- common/eth2/src/lib.rs | 21 --- consensus/types/src/eth_spec.rs | 5 - consensus/types/src/payload.rs | 24 ++- lcli/src/create_payload_header.rs | 32 +++- lcli/src/main.rs | 13 +- lcli/src/new_testnet.rs | 28 +++- validator_client/src/signing_method.rs | 3 - .../src/signing_method/web3signer.rs | 4 - 18 files changed, 109 insertions(+), 302 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 69889014a59..edf0e149c7b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -6,7 +6,6 @@ use crate::attestation_verification::{ use crate::attester_cache::{AttesterCache, AttesterCacheKey}; use crate::beacon_proposer_cache::compute_proposer_duties_from_head; use crate::beacon_proposer_cache::BeaconProposerCache; -use crate::blob_verification::{BlobError, VerifiedBlobsSidecar}; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::{ check_block_is_finalized_descendant, check_block_relevancy, get_block_root, @@ -1818,23 +1817,6 @@ impl BeaconChain { }) } - /// Accepts some `BlobsSidecar` received over from the network and attempts to verify it, - /// returning `Ok(_)` if it is valid to be (re)broadcast on the gossip network. - pub fn verify_blobs_sidecar_for_gossip<'a>( - &self, - blobs_sidecar: &'a BlobsSidecar, - ) -> Result, BlobError> { - metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_REQUESTS); - let _timer = metrics::start_timer(&metrics::BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES); - VerifiedBlobsSidecar::verify(blobs_sidecar, self).map(|v| { - if let Some(_event_handler) = self.event_handler.as_ref() { - // TODO: Handle sse events - } - metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_SUCCESSES); - v - }) - } - /// Accepts some 'LightClientFinalityUpdate' from the network and attempts to verify it pub fn verify_finality_update_for_gossip( self: &Arc, @@ -4479,7 +4461,6 @@ impl BeaconChain { .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, bls_to_execution_changes: bls_to_execution_changes.into(), - //FIXME(sean) get blobs blob_kzg_commitments: VariableList::from(kzg_commitments), }, }), diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index d52df4853df..619b713a332 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -460,7 +460,7 @@ where if is_terminal_block_hash_set && !is_activation_epoch_reached { // Use the "empty" payload if there's a terminal block hash, but we haven't reached the // terminal block epoch yet. - return Ok(BlockProposalContents::default_at_fork(fork)); + return BlockProposalContents::default_at_fork(fork).map_err(Into::into); } let terminal_pow_block_hash = execution_layer @@ -473,7 +473,7 @@ where } else { // If the merge transition hasn't occurred yet and the EL hasn't found the terminal // block, return an "empty" payload. - return Ok(BlockProposalContents::default_at_fork(fork)); + return BlockProposalContents::default_at_fork(fork).map_err(Into::into); } } else { latest_execution_payload_header_block_hash diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 13948affb55..c09541f3bd9 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -350,12 +350,14 @@ impl From for JsonWithdrawal { impl From for Withdrawal { fn from(jw: JsonWithdrawal) -> Self { + // This comparison is to avoid a scenarion where the EE gives us too large a number this + // panics when it attempts to case to a `u64`. + let amount = std::cmp::max(jw.amount / 1000000000, Uint256::from(u64::MAX)); Self { index: jw.index, validator_index: jw.validator_index, address: jw.address, - //FIXME(sean) if EE gives us too large a number this panics - amount: (jw.amount / 1000000000).as_u64(), + amount: amount.as_u64(), } } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index e22da42a72c..d79ac0c3645 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -35,7 +35,7 @@ use tokio::{ time::sleep, }; use tokio_stream::wrappers::WatchStream; -use types::{AbstractExecPayload, Blob, ExecPayload, KzgCommitment}; +use types::{AbstractExecPayload, BeaconStateError, Blob, ExecPayload, KzgCommitment}; use types::{ BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName, ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256, @@ -95,6 +95,13 @@ pub enum Error { FeeRecipientUnspecified, MissingLatestValidHash, InvalidJWTSecret(String), + BeaconStateError(BeaconStateError), +} + +impl From for Error { + fn from(e: BeaconStateError) -> Self { + Error::BeaconStateError(e) + } } impl From for Error { @@ -153,17 +160,17 @@ impl> BlockProposalContents Some(blobs), } } - pub fn default_at_fork(fork_name: ForkName) -> Self { - match fork_name { + pub fn default_at_fork(fork_name: ForkName) -> Result { + Ok(match fork_name { ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - BlockProposalContents::Payload(Payload::default_at_fork(fork_name)) + BlockProposalContents::Payload(Payload::default_at_fork(fork_name)?) } ForkName::Eip4844 => BlockProposalContents::PayloadAndBlobs { - payload: Payload::default_at_fork(fork_name), + payload: Payload::default_at_fork(fork_name)?, blobs: vec![], kzg_commitments: vec![], }, - } + }) } } @@ -803,10 +810,6 @@ impl ExecutionLayer { spec, ) { Ok(()) => Ok(ProvenancedPayload::Builder( - //FIXME(sean) the builder API needs to be updated - // NOTE the comment above was removed in the - // rebase with unstable.. I think it goes - // here now? BlockProposalContents::Payload(relay.data.message.header), )), Err(reason) if !reason.payload_invalid() => { @@ -858,19 +861,11 @@ impl ExecutionLayer { spec, ) { Ok(()) => Ok(ProvenancedPayload::Builder( - //FIXME(sean) the builder API needs to be updated - // NOTE the comment above was removed in the - // rebase with unstable.. I think it goes - // here now? BlockProposalContents::Payload(relay.data.message.header), )), // If the payload is valid then use it. The local EE failed // to produce a payload so we have no alternative. Err(e) if !e.payload_invalid() => Ok(ProvenancedPayload::Builder( - //FIXME(sean) the builder API needs to be updated - // NOTE the comment above was removed in the - // rebase with unstable.. I think it goes - // here now? BlockProposalContents::Payload(relay.data.message.header), )), Err(reason) => { diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 9e85a8b5c6c..83ab8ceee61 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -189,6 +189,11 @@ async fn reconstruct_block( .spec .fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch())), ) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Default payload construction error: {e:?}" + )) + })? .into() // If we already have an execution payload with this transactions root cached, use it. } else if let Some(cached_payload) = diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 8bf72834629..691b16e419e 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -119,8 +119,8 @@ lazy_static! { pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M /// The maximum bytes that can be sent across the RPC post-merge. pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M - //FIXME(sean) should these be the same? pub(crate) const MAX_RPC_SIZE_POST_CAPELLA: usize = 10 * 1_048_576; // 10M + // FIXME(sean) should this be increased to account for blobs? pub(crate) const MAX_RPC_SIZE_POST_EIP4844: usize = 10 * 1_048_576; // 10M /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 445d144ac74..158379b7e1d 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -115,7 +115,8 @@ const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024; /// before we start dropping them. const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024; -//FIXME(sean) verify +/// The maximum number of queued `SignedBeaconBlockAndBlobsSidecar` objects received on gossip that +/// will be stored before we start dropping them. const MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN: usize = 1_024; /// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but @@ -1186,7 +1187,6 @@ impl BeaconProcessor { // required to verify some attestations. } else if let Some(item) = gossip_block_queue.pop() { self.spawn_worker(item, toolbox); - //FIXME(sean) } else if let Some(item) = gossip_block_and_blobs_sidecar_queue.pop() { self.spawn_worker(item, toolbox); // Check the aggregates, *then* the unaggregates since we assume that @@ -1675,23 +1675,9 @@ impl BeaconProcessor { /* * Verification for blobs sidecars received on gossip. */ - Work::GossipBlockAndBlobsSidecar { - message_id, - peer_id, - peer_client, - block_and_blobs, - seen_timestamp, - } => task_spawner.spawn_async(async move { - worker - .process_gossip_block_and_blobs_sidecar( - message_id, - peer_id, - peer_client, - block_and_blobs, - seen_timestamp, - ) - .await - }), + Work::GossipBlockAndBlobsSidecar { .. } => { + warn!(self.log, "Unexpected block and blobs on gossip") + } /* * Import for blocks that we received earlier than their intended slot. */ @@ -1892,19 +1878,9 @@ impl BeaconProcessor { request, ) }), - Work::BlobsByRangeRequest { - peer_id, - request_id, - request, - } => task_spawner.spawn_blocking_with_manual_send_idle(move |send_idle_on_drop| { - worker.handle_blobs_by_range_request( - sub_executor, - send_idle_on_drop, - peer_id, - request_id, - request, - ) - }), + Work::BlobsByRangeRequest { .. } => { + warn!(self.log.clone(), "Unexpected BlobsByRange Request") + } /* * Processing of lightclient bootstrap requests from other peers. */ diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 14d69898f73..589d7e9b475 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -11,10 +11,7 @@ use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized, ForkChoiceError, GossipVerifiedBlock, NotifyExecutionLayer, }; -use lighthouse_network::{ - Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource, - SignedBeaconBlockAndBlobsSidecar, -}; +use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use ssz::Encode; @@ -699,19 +696,6 @@ impl Worker { } } - #[allow(clippy::too_many_arguments)] - pub async fn process_gossip_block_and_blobs_sidecar( - self, - _message_id: MessageId, - _peer_id: PeerId, - _peer_client: Client, - _block_and_blob: Arc>, - _seen_timestamp: Duration, - ) { - //FIXME - unimplemented!() - } - /// Process the beacon block received from the gossip network and /// if it passes gossip propagation criteria, tell the network thread to forward it. /// diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index c3a452acd4f..bfa0ea516fa 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -4,7 +4,6 @@ use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped}; use itertools::process_results; -use lighthouse_network::rpc::methods::{BlobsByRangeRequest, MAX_REQUEST_BLOBS_SIDECARS}; use lighthouse_network::rpc::StatusMessage; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; @@ -455,152 +454,4 @@ impl Worker { "load_blocks_by_range_blocks", ); } - - /// Handle a `BlobsByRange` request from the peer. - pub fn handle_blobs_by_range_request( - self, - _executor: TaskExecutor, - _send_on_drop: SendOnDrop, - peer_id: PeerId, - _request_id: PeerRequestId, - mut req: BlobsByRangeRequest, - ) { - debug!(self.log, "Received BlobsByRange Request"; - "peer_id" => %peer_id, - "count" => req.count, - "start_slot" => req.start_slot, - ); - - // Should not send more than max request blocks - if req.count > MAX_REQUEST_BLOBS_SIDECARS { - req.count = MAX_REQUEST_BLOBS_SIDECARS; - } - - //FIXME(sean) create the blobs iter - - // let forwards_block_root_iter = match self - // .chain - // .forwards_iter_block_roots(Slot::from(req.start_slot)) - // { - // Ok(iter) => iter, - // Err(BeaconChainError::HistoricalBlockError( - // HistoricalBlockError::BlockOutOfRange { - // slot, - // oldest_block_slot, - // }, - // )) => { - // debug!(self.log, "Range request failed during backfill"; "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot); - // return self.send_error_response( - // peer_id, - // RPCResponseErrorCode::ResourceUnavailable, - // "Backfilling".into(), - // request_id, - // ); - // } - // Err(e) => return error!(self.log, "Unable to obtain root iter"; "error" => ?e), - // }; - // - // // Pick out the required blocks, ignoring skip-slots. - // let mut last_block_root = None; - // let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { - // iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) - // // map skip slots to None - // .map(|(root, _)| { - // let result = if Some(root) == last_block_root { - // None - // } else { - // Some(root) - // }; - // last_block_root = Some(root); - // result - // }) - // .collect::>>() - // }); - // - // let block_roots = match maybe_block_roots { - // Ok(block_roots) => block_roots, - // Err(e) => return error!(self.log, "Error during iteration over blocks"; "error" => ?e), - // }; - // - // // remove all skip slots - // let block_roots = block_roots.into_iter().flatten().collect::>(); - // - // // Fetching blocks is async because it may have to hit the execution layer for payloads. - // executor.spawn( - // async move { - // let mut blocks_sent = 0; - // let mut send_response = true; - // - // for root in block_roots { - // match self.chain.store.get_blobs(&root) { - // Ok(Some(blob)) => { - // blocks_sent += 1; - // self.send_network_message(NetworkMessage::SendResponse { - // peer_id, - // response: Response::BlobsByRange(Some(Arc::new(VariableList::new(vec![blob.message]).unwrap()))), - // id: request_id, - // }); - // } - // Ok(None) => { - // error!( - // self.log, - // "Blob in the chain is not in the store"; - // "request_root" => ?root - // ); - // break; - // } - // Err(e) => { - // error!( - // self.log, - // "Error fetching block for peer"; - // "block_root" => ?root, - // "error" => ?e - // ); - // break; - // } - // } - // } - // - // let current_slot = self - // .chain - // .slot() - // .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); - // - // if blocks_sent < (req.count as usize) { - // debug!( - // self.log, - // "BlocksByRange Response processed"; - // "peer" => %peer_id, - // "msg" => "Failed to return all requested blocks", - // "start_slot" => req.start_slot, - // "current_slot" => current_slot, - // "requested" => req.count, - // "returned" => blocks_sent - // ); - // } else { - // debug!( - // self.log, - // "BlocksByRange Response processed"; - // "peer" => %peer_id, - // "start_slot" => req.start_slot, - // "current_slot" => current_slot, - // "requested" => req.count, - // "returned" => blocks_sent - // ); - // } - // - // if send_response { - // // send the stream terminator - // self.send_network_message(NetworkMessage::SendResponse { - // peer_id, - // response: Response::BlobsByRange(None), - // id: request_id, - // }); - // } - // - // drop(send_on_drop); - // }, - // "load_blocks_by_range_blocks", - // ); - } } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index a1eeda84ed1..0548b0906b3 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -47,7 +47,7 @@ use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; use lighthouse_network::{PeerAction, PeerId}; -use slog::{crit, debug, error, info, trace, Logger}; +use slog::{crit, debug, error, info, trace, warn, Logger}; use std::boxed::Box; use std::ops::Sub; use std::sync::Arc; @@ -592,8 +592,9 @@ impl SyncManager { .block_lookups .parent_chain_processed(chain_hash, result, &mut self.network), }, - //FIXME(sean) - SyncMessage::RpcBlob { .. } => todo!(), + SyncMessage::RpcBlob { .. } => { + warn!(self.log, "Unexpected blob message received"); + } } } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index fcfff7284a4..752e472e24d 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -628,27 +628,6 @@ impl BeaconNodeHttpClient { Ok(()) } - /// `POST beacon/blobs` - /// - /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blobs( - &self, - block: &BlobsSidecar, - ) -> Result<(), Error> { - let mut path = self.eth_path(V1)?; - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("beacon") - .push("blobs"); - - //FIXME(sean) should we re-use the proposal timeout? seems reasonable to.. - self.post_with_timeout(path, block, self.timeouts.proposal) - .await?; - - Ok(()) - } - /// `POST beacon/blinded_blocks` /// /// Returns `Ok(None)` on a 404 error. diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 661484fde82..5ed5307ffdf 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -247,11 +247,6 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + fn max_blobs_per_block() -> usize { Self::MaxBlobsPerBlock::to_usize() } - - /// FIXME: why is this called chunks_per_blob?? - fn chunks_per_blob() -> usize { - Self::FieldElementsPerBlob::to_usize() - } } /// Macro to inherit some type values from another EthSpec. diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 8bba00b46df..f56b88fc927 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -92,7 +92,7 @@ pub trait AbstractExecPayload: + From> + TryFrom>; - fn default_at_fork(fork_name: ForkName) -> Self; + fn default_at_fork(fork_name: ForkName) -> Result; } #[superstruct( @@ -372,13 +372,12 @@ impl AbstractExecPayload for FullPayload { type Capella = FullPayloadCapella; type Eip4844 = FullPayloadEip4844; - fn default_at_fork(fork_name: ForkName) -> Self { + fn default_at_fork(fork_name: ForkName) -> Result { match fork_name { - //FIXME(sean) error handling - ForkName::Base | ForkName::Altair => panic!(), - ForkName::Merge => FullPayloadMerge::default().into(), - ForkName::Capella => FullPayloadCapella::default().into(), - ForkName::Eip4844 => FullPayloadEip4844::default().into(), + ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), + ForkName::Merge => Ok(FullPayloadMerge::default().into()), + ForkName::Capella => Ok(FullPayloadCapella::default().into()), + ForkName::Eip4844 => Ok(FullPayloadEip4844::default().into()), } } } @@ -882,13 +881,12 @@ impl AbstractExecPayload for BlindedPayload { type Capella = BlindedPayloadCapella; type Eip4844 = BlindedPayloadEip4844; - fn default_at_fork(fork_name: ForkName) -> Self { + fn default_at_fork(fork_name: ForkName) -> Result { match fork_name { - //FIXME(sean) error handling - ForkName::Base | ForkName::Altair => panic!(), - ForkName::Merge => BlindedPayloadMerge::default().into(), - ForkName::Capella => BlindedPayloadCapella::default().into(), - ForkName::Eip4844 => BlindedPayloadEip4844::default().into(), + ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), + ForkName::Merge => Ok(BlindedPayloadMerge::default().into()), + ForkName::Capella => Ok(BlindedPayloadCapella::default().into()), + ForkName::Eip4844 => Ok(BlindedPayloadEip4844::default().into()), } } } diff --git a/lcli/src/create_payload_header.rs b/lcli/src/create_payload_header.rs index ebda9361650..7700f23d9dd 100644 --- a/lcli/src/create_payload_header.rs +++ b/lcli/src/create_payload_header.rs @@ -4,7 +4,10 @@ use ssz::Encode; use std::fs::File; use std::io::Write; use std::time::{SystemTime, UNIX_EPOCH}; -use types::{EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderMerge}; +use types::{ + EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderEip4844, + ExecutionPayloadHeaderMerge, ForkName, +}; pub fn run(matches: &ArgMatches) -> Result<(), String> { let eth1_block_hash = parse_required(matches, "execution-block-hash")?; @@ -17,17 +20,36 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { let base_fee_per_gas = parse_required(matches, "base-fee-per-gas")?; let gas_limit = parse_required(matches, "gas-limit")?; let file_name = matches.value_of("file").ok_or("No file supplied")?; + let fork_name: ForkName = parse_optional(matches, "fork")?.unwrap_or(ForkName::Merge); - //FIXME(sean) - let execution_payload_header: ExecutionPayloadHeader = - ExecutionPayloadHeader::Merge(ExecutionPayloadHeaderMerge { + let execution_payload_header: ExecutionPayloadHeader = match fork_name { + ForkName::Base | ForkName::Altair => return Err("invalid fork name".to_string()), + ForkName::Merge => ExecutionPayloadHeader::Merge(ExecutionPayloadHeaderMerge { gas_limit, base_fee_per_gas, timestamp: genesis_time, block_hash: eth1_block_hash, prev_randao: eth1_block_hash.into_root(), ..ExecutionPayloadHeaderMerge::default() - }); + }), + ForkName::Capella => ExecutionPayloadHeader::Capella(ExecutionPayloadHeaderCapella { + gas_limit, + base_fee_per_gas, + timestamp: genesis_time, + block_hash: eth1_block_hash, + prev_randao: eth1_block_hash.into_root(), + ..ExecutionPayloadHeaderCapella::default() + }), + ForkName::Eip4844 => ExecutionPayloadHeader::Eip4844(ExecutionPayloadHeaderEip4844 { + gas_limit, + base_fee_per_gas, + timestamp: genesis_time, + block_hash: eth1_block_hash, + prev_randao: eth1_block_hash.into_root(), + ..ExecutionPayloadHeaderEip4844::default() + }), + }; + let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?; let bytes = execution_payload_header.as_ssz_bytes(); file.write_all(bytes.as_slice()) diff --git a/lcli/src/main.rs b/lcli/src/main.rs index de6039f35a0..238c7e9f167 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -371,7 +371,8 @@ fn main() { .subcommand( SubCommand::with_name("create-payload-header") .about("Generates an SSZ file containing bytes for an `ExecutionPayloadHeader`. \ - Useful as input for `lcli new-testnet --execution-payload-header FILE`. ") + Useful as input for `lcli new-testnet --execution-payload-header FILE`. If `--fork` \ + is not provided, a payload header for the `Bellatrix` fork will be created.") .arg( Arg::with_name("execution-block-hash") .long("execution-block-hash") @@ -417,7 +418,15 @@ fn main() { .takes_value(true) .required(true) .help("Output file"), - ) + ).arg( + Arg::with_name("fork") + .long("fork") + .value_name("FORK") + .takes_value(true) + .default_value("bellatrix") + .help("The fork for which the execution payload header should be created.") + .possible_values(&["merge", "bellatrix", "capella", "eip4844"]) + ) ) .subcommand( SubCommand::with_name("new-testnet") diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 650addc18ca..4d194ff10b8 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -9,8 +9,9 @@ use std::io::Read; use std::path::PathBuf; use std::time::{SystemTime, UNIX_EPOCH}; use types::{ - test_utils::generate_deterministic_keypairs, Address, Config, EthSpec, ExecutionPayloadHeader, - ExecutionPayloadHeaderMerge, + test_utils::generate_deterministic_keypairs, Address, Config, Epoch, EthSpec, + ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderEip4844, + ExecutionPayloadHeaderMerge, ForkName, }; pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> { @@ -80,10 +81,25 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul .map_err(|e| format!("Unable to open {}: {}", filename, e))?; file.read_to_end(&mut bytes) .map_err(|e| format!("Unable to read {}: {}", filename, e))?; - //FIXME(sean) - ExecutionPayloadHeaderMerge::::from_ssz_bytes(bytes.as_slice()) - .map(ExecutionPayloadHeader::Merge) - .map_err(|e| format!("SSZ decode failed: {:?}", e)) + let fork_name = spec.fork_name_at_epoch(Epoch::new(0)); + match fork_name { + ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid( + "genesis fork must be post-merge".to_string(), + )), + ForkName::Merge => { + ExecutionPayloadHeaderMerge::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Merge) + } + ForkName::Capella => { + ExecutionPayloadHeaderCapella::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Capella) + } + ForkName::Eip4844 => { + ExecutionPayloadHeaderEip4844::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Eip4844) + } + } + .map_err(|e| format!("SSZ decode failed: {:?}", e)) }) .transpose()?; diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index 2ebca2dfb70..ae9df080965 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -37,7 +37,6 @@ pub enum Error { pub enum SignableMessage<'a, T: EthSpec, Payload: AbstractExecPayload = FullPayload> { RandaoReveal(Epoch), BeaconBlock(&'a BeaconBlock), - BlobsSidecar(&'a BlobsSidecar), AttestationData(&'a AttestationData), SignedAggregateAndProof(&'a AggregateAndProof), SelectionProof(Slot), @@ -59,7 +58,6 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> SignableMessage<'a, T, Pay match self { SignableMessage::RandaoReveal(epoch) => epoch.signing_root(domain), SignableMessage::BeaconBlock(b) => b.signing_root(domain), - SignableMessage::BlobsSidecar(b) => b.signing_root(domain), SignableMessage::AttestationData(a) => a.signing_root(domain), SignableMessage::SignedAggregateAndProof(a) => a.signing_root(domain), SignableMessage::SelectionProof(slot) => slot.signing_root(domain), @@ -182,7 +180,6 @@ impl SigningMethod { Web3SignerObject::RandaoReveal { epoch } } SignableMessage::BeaconBlock(block) => Web3SignerObject::beacon_block(block)?, - SignableMessage::BlobsSidecar(blob) => Web3SignerObject::BlobsSidecar(blob), SignableMessage::AttestationData(a) => Web3SignerObject::Attestation(a), SignableMessage::SignedAggregateAndProof(a) => { Web3SignerObject::AggregateAndProof(a) diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 5daa42fa3a4..512cbc7d023 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -11,7 +11,6 @@ pub enum MessageType { AggregateAndProof, Attestation, BlockV2, - BlobsSidecar, Deposit, RandaoReveal, VoluntaryExit, @@ -52,8 +51,6 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: AbstractExecPayload> { #[serde(skip_serializing_if = "Option::is_none")] block_header: Option, }, - //FIXME(sean) just guessing here - BlobsSidecar(&'a BlobsSidecar), #[allow(dead_code)] Deposit { pubkey: PublicKeyBytes, @@ -114,7 +111,6 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> Web3SignerObject<'a, T, Pa Web3SignerObject::AggregateAndProof(_) => MessageType::AggregateAndProof, Web3SignerObject::Attestation(_) => MessageType::Attestation, Web3SignerObject::BeaconBlock { .. } => MessageType::BlockV2, - Web3SignerObject::BlobsSidecar(_) => MessageType::BlobsSidecar, Web3SignerObject::Deposit { .. } => MessageType::Deposit, Web3SignerObject::RandaoReveal { .. } => MessageType::RandaoReveal, Web3SignerObject::VoluntaryExit(_) => MessageType::VoluntaryExit, From 4353c49855dc016928eb6a255bfb12fda044249a Mon Sep 17 00:00:00 2001 From: realbigsean Date: Tue, 3 Jan 2023 08:55:19 -0500 Subject: [PATCH 103/263] Update beacon_node/execution_layer/src/engine_api/json_structures.rs --- beacon_node/execution_layer/src/engine_api/json_structures.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index c09541f3bd9..728150a2082 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -350,8 +350,8 @@ impl From for JsonWithdrawal { impl From for Withdrawal { fn from(jw: JsonWithdrawal) -> Self { - // This comparison is to avoid a scenarion where the EE gives us too large a number this - // panics when it attempts to case to a `u64`. + // This comparison is done to avoid a scenario where the EE gives us too large a number and we + // panic when attempting to cast to a `u64`. let amount = std::cmp::max(jw.amount / 1000000000, Uint256::from(u64::MAX)); Self { index: jw.index, From be232c4587075b1be6c36dc4272b4fb03e15f0fc Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Tue, 3 Jan 2023 16:42:34 -0600 Subject: [PATCH 104/263] Update Execution Layer Tests for Capella --- Makefile | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 20 ++- beacon_node/beacon_chain/tests/capella.rs | 170 ++++++++++++++++++ beacon_node/beacon_chain/tests/main.rs | 1 + beacon_node/beacon_chain/tests/merge.rs | 9 +- .../test_utils/execution_block_generator.rs | 104 +++++++++-- .../src/test_utils/handle_rpc.rs | 122 +++++++++++-- .../src/test_utils/mock_execution_layer.rs | 7 + .../execution_layer/src/test_utils/mod.rs | 21 ++- 9 files changed, 408 insertions(+), 48 deletions(-) create mode 100644 beacon_node/beacon_chain/tests/capella.rs diff --git a/Makefile b/Makefile index 15d09c5867f..40db61de781 100644 --- a/Makefile +++ b/Makefile @@ -28,7 +28,7 @@ PROFILE ?= release # List of all hard forks. This list is used to set env variables for several tests so that # they run for different forks. -FORKS=phase0 altair merge +FORKS=phase0 altair merge capella # Builds the Lighthouse binary in release (optimized). # diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 9183583fb1b..3c4ab1ca12f 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -11,11 +11,11 @@ use crate::{ StateSkipConfig, }; use bls::get_withdrawal_credentials; -use execution_layer::test_utils::DEFAULT_JWT_SECRET; use execution_layer::{ auth::JwtKey, test_utils::{ - ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_TERMINAL_BLOCK, + ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_JWT_SECRET, + DEFAULT_TERMINAL_BLOCK, }, ExecutionLayer, }; @@ -385,12 +385,20 @@ where pub fn mock_execution_layer(mut self) -> Self { let spec = self.spec.clone().expect("cannot build without spec"); + let shanghai_time = spec.capella_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); + let eip4844_time = spec.eip4844_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); let mock = MockExecutionLayer::new( self.runtime.task_executor.clone(), spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK, spec.terminal_block_hash, spec.terminal_block_hash_activation_epoch, + shanghai_time, + eip4844_time, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), None, ); @@ -405,12 +413,20 @@ where let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); let spec = self.spec.clone().expect("cannot build without spec"); + let shanghai_time = spec.capella_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); + let eip4844_time = spec.eip4844_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); let mock_el = MockExecutionLayer::new( self.runtime.task_executor.clone(), spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK, spec.terminal_block_hash, spec.terminal_block_hash_activation_epoch, + shanghai_time, + eip4844_time, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), Some(builder_url.clone()), ) diff --git a/beacon_node/beacon_chain/tests/capella.rs b/beacon_node/beacon_chain/tests/capella.rs new file mode 100644 index 00000000000..1e39d075d86 --- /dev/null +++ b/beacon_node/beacon_chain/tests/capella.rs @@ -0,0 +1,170 @@ +#![cfg(not(debug_assertions))] // Tests run too slow in debug. + +use beacon_chain::test_utils::BeaconChainHarness; +use execution_layer::test_utils::Block; +use types::*; + +const VALIDATOR_COUNT: usize = 32; +type E = MainnetEthSpec; + +fn verify_execution_payload_chain(chain: &[FullPayload]) { + let mut prev_ep: Option> = None; + + for ep in chain { + assert!(!ep.is_default_with_empty_roots()); + assert!(ep.block_hash() != ExecutionBlockHash::zero()); + + // Check against previous `ExecutionPayload`. + if let Some(prev_ep) = prev_ep { + assert_eq!(prev_ep.block_hash(), ep.execution_payload().parent_hash()); + assert_eq!( + prev_ep.execution_payload().block_number() + 1, + ep.execution_payload().block_number() + ); + assert!(ep.execution_payload().timestamp() > prev_ep.execution_payload().timestamp()); + } + prev_ep = Some(ep.clone()); + } +} + +#[tokio::test] +async fn base_altair_merge_capella() { + let altair_fork_epoch = Epoch::new(4); + let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch()); + let bellatrix_fork_epoch = Epoch::new(8); + let merge_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch()); + let capella_fork_epoch = Epoch::new(12); + let capella_fork_slot = capella_fork_epoch.start_slot(E::slots_per_epoch()); + + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(altair_fork_epoch); + spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); + spec.capella_fork_epoch = Some(capella_fork_epoch); + + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .logger(logging::test_logger()) + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + /* + * Start with the base fork. + */ + assert!(harness.chain.head_snapshot().beacon_block.as_base().is_ok()); + + /* + * Do the Altair fork. + */ + harness.extend_to_slot(altair_fork_slot).await; + + let altair_head = &harness.chain.head_snapshot().beacon_block; + assert!(altair_head.as_altair().is_ok()); + assert_eq!(altair_head.slot(), altair_fork_slot); + + /* + * Do the merge fork, without a terminal PoW block. + */ + harness.extend_to_slot(merge_fork_slot).await; + + let merge_head = &harness.chain.head_snapshot().beacon_block; + assert!(merge_head.as_merge().is_ok()); + assert_eq!(merge_head.slot(), merge_fork_slot); + assert!( + merge_head + .message() + .body() + .execution_payload() + .unwrap() + .is_default_with_empty_roots(), + "Merge head is default payload" + ); + + /* + * Next merge block shouldn't include an exec payload. + */ + harness.extend_slots(1).await; + + let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; + assert!( + one_after_merge_head + .message() + .body() + .execution_payload() + .unwrap() + .is_default_with_empty_roots(), + "One after merge head is default payload" + ); + assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 1); + + /* + * Trigger the terminal PoW block. + */ + harness + .execution_block_generator() + .move_to_terminal_block() + .unwrap(); + + // Add a slot duration to get to the next slot + let timestamp = harness.get_timestamp_at_slot() + harness.spec.seconds_per_slot; + harness + .execution_block_generator() + .modify_last_block(|block| { + if let Block::PoW(terminal_block) = block { + terminal_block.timestamp = timestamp; + } + }); + harness.extend_slots(1).await; + + let two_after_merge_head = &harness.chain.head_snapshot().beacon_block; + assert!( + two_after_merge_head + .message() + .body() + .execution_payload() + .unwrap() + .is_default_with_empty_roots(), + "Two after merge head is default payload" + ); + assert_eq!(two_after_merge_head.slot(), merge_fork_slot + 2); + + /* + * Next merge block should include an exec payload. + */ + let mut execution_payloads = vec![]; + for _ in (merge_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() { + harness.extend_slots(1).await; + let block = &harness.chain.head_snapshot().beacon_block; + let full_payload: FullPayload = block + .message() + .body() + .execution_payload() + .unwrap() + .clone() + .into(); + // pre-capella shouldn't have withdrawals + assert!(full_payload.withdrawals_root().is_err()); + execution_payloads.push(full_payload); + } + + /* + * Should enter capella fork now. + */ + for _ in 0..16 { + harness.extend_slots(1).await; + let block = &harness.chain.head_snapshot().beacon_block; + let full_payload: FullPayload = block + .message() + .body() + .execution_payload() + .unwrap() + .clone() + .into(); + // post-capella should have withdrawals + assert!(full_payload.withdrawals_root().is_ok()); + execution_payloads.push(full_payload); + } + + verify_execution_payload_chain(execution_payloads.as_slice()); +} diff --git a/beacon_node/beacon_chain/tests/main.rs b/beacon_node/beacon_chain/tests/main.rs index 1c61e9927fc..3b8c83594bb 100644 --- a/beacon_node/beacon_chain/tests/main.rs +++ b/beacon_node/beacon_chain/tests/main.rs @@ -1,6 +1,7 @@ mod attestation_production; mod attestation_verification; mod block_verification; +mod capella; mod merge; mod op_verification; mod payload_invalidation; diff --git a/beacon_node/beacon_chain/tests/merge.rs b/beacon_node/beacon_chain/tests/merge.rs index 80ffc57be11..cd6e0e2ba3b 100644 --- a/beacon_node/beacon_chain/tests/merge.rs +++ b/beacon_node/beacon_chain/tests/merge.rs @@ -191,18 +191,17 @@ async fn base_altair_merge_with_terminal_block_after_fork() { harness.extend_slots(1).await; - let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; - // FIXME: why is this being tested twice? + let two_after_merge_head = &harness.chain.head_snapshot().beacon_block; assert!( - one_after_merge_head + two_after_merge_head .message() .body() .execution_payload() .unwrap() .is_default_with_empty_roots(), - "One after merge head is default payload" + "Two after merge head is default payload" ); - assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 2); + assert_eq!(two_after_merge_head.slot(), merge_fork_slot + 2); /* * Next merge block should include an exec payload. diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index b7206cbf878..7790dcbedd7 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -13,7 +13,8 @@ use std::collections::HashMap; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use types::{ - EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadMerge, Hash256, Uint256, + EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, + ExecutionPayloadEip4844, ExecutionPayloadMerge, ForkName, Hash256, Uint256, }; const GAS_LIMIT: u64 = 16384; @@ -113,6 +114,11 @@ pub struct ExecutionBlockGenerator { pub pending_payloads: HashMap>, pub next_payload_id: u64, pub payload_ids: HashMap>, + /* + * Post-merge fork triggers + */ + pub shanghai_time: Option, // withdrawals + pub eip4844_time: Option, // 4844 } impl ExecutionBlockGenerator { @@ -120,6 +126,8 @@ impl ExecutionBlockGenerator { terminal_total_difficulty: Uint256, terminal_block_number: u64, terminal_block_hash: ExecutionBlockHash, + shanghai_time: Option, + eip4844_time: Option, ) -> Self { let mut gen = Self { head_block: <_>::default(), @@ -132,6 +140,8 @@ impl ExecutionBlockGenerator { pending_payloads: <_>::default(), next_payload_id: 0, payload_ids: <_>::default(), + shanghai_time, + eip4844_time, }; gen.insert_pow_block(0).unwrap(); @@ -163,6 +173,16 @@ impl ExecutionBlockGenerator { } } + pub fn get_fork_at_timestamp(&self, timestamp: u64) -> ForkName { + match self.eip4844_time { + Some(fork_time) if timestamp >= fork_time => ForkName::Eip4844, + _ => match self.shanghai_time { + Some(fork_time) if timestamp >= fork_time => ForkName::Capella, + _ => ForkName::Merge, + }, + } + } + pub fn execution_block_by_number(&self, number: u64) -> Option { self.block_by_number(number) .map(|block| block.as_execution_block(self.terminal_total_difficulty)) @@ -395,7 +415,9 @@ impl ExecutionBlockGenerator { } } - pub fn forkchoice_updated_v1( + // This function expects payload_attributes to already be validated with respect to + // the current fork [obtained by self.get_fork_at_timestamp(payload_attributes.timestamp)] + pub fn forkchoice_updated( &mut self, forkchoice_state: ForkchoiceState, payload_attributes: Option, @@ -469,23 +491,65 @@ impl ExecutionBlockGenerator { transactions: vec![].into(), }), PayloadAttributes::V2(pa) => { - // FIXME: think about how to test different forks - ExecutionPayload::Merge(ExecutionPayloadMerge { - parent_hash: forkchoice_state.head_block_hash, - fee_recipient: pa.suggested_fee_recipient, - receipts_root: Hash256::repeat_byte(42), - state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), - prev_randao: pa.prev_randao, - block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, - gas_used: GAS_USED, - timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), - base_fee_per_gas: Uint256::one(), - block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - }) + match self.get_fork_at_timestamp(pa.timestamp) { + ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: forkchoice_state.head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + }), + ForkName::Capella => { + ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: forkchoice_state.head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + withdrawals: pa.withdrawals.as_ref().unwrap().clone().into(), + }) + } + ForkName::Eip4844 => { + ExecutionPayload::Eip4844(ExecutionPayloadEip4844 { + parent_hash: forkchoice_state.head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + // FIXME(4844): maybe this should be set to something? + excess_data_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + withdrawals: pa.withdrawals.as_ref().unwrap().clone().into(), + }) + } + _ => unreachable!(), + } } }; @@ -576,6 +640,8 @@ mod test { TERMINAL_DIFFICULTY.into(), TERMINAL_BLOCK, ExecutionBlockHash::zero(), + None, + None, ); for i in 0..=TERMINAL_BLOCK { diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index e8ece97f3f0..f01ae00e86c 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -82,17 +82,40 @@ pub async fn handle_rpc( ENGINE_NEW_PAYLOAD_V2 => { JsonExecutionPayload::V2(get_param::>(params, 0)?) } + // TODO(4844) add that here.. _ => unreachable!(), }; - let fork = match request { - JsonExecutionPayload::V1(_) => ForkName::Merge, - JsonExecutionPayload::V2(ref payload) => { - if payload.withdrawals.is_none() { - ForkName::Merge - } else { - ForkName::Capella + + let fork = ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(*request.timestamp()); + // validate method called correctly according to shanghai fork time + match fork { + ForkName::Merge => { + if request.withdrawals().is_ok() && request.withdrawals().unwrap().is_some() { + return Err(format!( + "{} called with `withdrawals` before capella fork!", + method + )); } } + ForkName::Capella => { + if method == ENGINE_NEW_PAYLOAD_V1 { + return Err(format!("{} called after capella fork!", method)); + } + if request.withdrawals().is_err() + || (request.withdrawals().is_ok() + && request.withdrawals().unwrap().is_none()) + { + return Err(format!( + "{} called without `withdrawals` after capella fork!", + method + )); + } + } + // TODO(4844) add 4844 error checking here + _ => unreachable!(), }; // Canned responses set by block hash take priority. @@ -125,7 +148,7 @@ pub async fn handle_rpc( Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap()) } - ENGINE_GET_PAYLOAD_V1 => { + ENGINE_GET_PAYLOAD_V1 | ENGINE_GET_PAYLOAD_V2 => { let request: JsonPayloadIdRequest = get_param(params, 0)?; let id = request.into(); @@ -135,12 +158,76 @@ pub async fn handle_rpc( .get_payload(&id) .ok_or_else(|| format!("no payload for id {:?}", id))?; - Ok(serde_json::to_value(JsonExecutionPayloadV1::try_from(response).unwrap()).unwrap()) + // validate method called correctly according to shanghai fork time + if ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(response.timestamp()) + == ForkName::Capella + && method == ENGINE_GET_PAYLOAD_V1 + { + return Err(format!("{} called after capella fork!", method)); + } + // TODO(4844) add 4844 error checking here + + match method { + ENGINE_GET_PAYLOAD_V1 => Ok(serde_json::to_value( + JsonExecutionPayloadV1::try_from(response).unwrap(), + ) + .unwrap()), + ENGINE_GET_PAYLOAD_V2 => Ok(serde_json::to_value(JsonGetPayloadResponse { + execution_payload: JsonExecutionPayloadV2::try_from(response).unwrap(), + }) + .unwrap()), + _ => unreachable!(), + } } - // FIXME(capella): handle fcu version 2 - ENGINE_FORKCHOICE_UPDATED_V1 => { + ENGINE_FORKCHOICE_UPDATED_V1 | ENGINE_FORKCHOICE_UPDATED_V2 => { let forkchoice_state: JsonForkchoiceStateV1 = get_param(params, 0)?; - let payload_attributes: Option = get_param(params, 1)?; + let payload_attributes = match method { + ENGINE_FORKCHOICE_UPDATED_V1 => { + let jpa1: Option = get_param(params, 1)?; + jpa1.map(JsonPayloadAttributes::V1) + } + ENGINE_FORKCHOICE_UPDATED_V2 => { + let jpa2: Option = get_param(params, 1)?; + jpa2.map(JsonPayloadAttributes::V2) + } + _ => unreachable!(), + }; + + // validate method called correctly according to shanghai fork time + if let Some(pa) = payload_attributes.as_ref() { + match ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(*pa.timestamp()) + { + ForkName::Merge => { + if pa.withdrawals().is_ok() && pa.withdrawals().unwrap().is_some() { + return Err(format!( + "{} called with `withdrawals` before capella fork!", + method + )); + } + } + ForkName::Capella => { + if method == ENGINE_FORKCHOICE_UPDATED_V1 { + return Err(format!("{} called after capella fork!", method)); + } + if pa.withdrawals().is_err() + || (pa.withdrawals().is_ok() && pa.withdrawals().unwrap().is_none()) + { + return Err(format!( + "{} called without `withdrawals` after capella fork!", + method + )); + } + } + // TODO(4844) add 4844 error checking here + _ => unreachable!(), + }; + } if let Some(hook_response) = ctx .hook @@ -161,13 +248,10 @@ pub async fn handle_rpc( return Ok(serde_json::to_value(response).unwrap()); } - let mut response = ctx - .execution_block_generator - .write() - .forkchoice_updated_v1( - forkchoice_state.into(), - payload_attributes.map(|json| json.into()), - )?; + let mut response = ctx.execution_block_generator.write().forkchoice_updated( + forkchoice_state.into(), + payload_attributes.map(|json| json.into()), + )?; if let Some(mut status) = ctx.static_forkchoice_updated_response.lock().clone() { if status.status == PayloadStatusV1Status::Valid { diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index e552b7ca7ab..89e0344d9ac 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -26,17 +26,22 @@ impl MockExecutionLayer { DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), Epoch::new(0), + None, + None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), None, ) } + #[allow(clippy::too_many_arguments)] pub fn new( executor: TaskExecutor, terminal_total_difficulty: Uint256, terminal_block: u64, terminal_block_hash: ExecutionBlockHash, terminal_block_hash_activation_epoch: Epoch, + shanghai_time: Option, + eip4844_time: Option, jwt_key: Option, builder_url: Option, ) -> Self { @@ -54,6 +59,8 @@ impl MockExecutionLayer { terminal_total_difficulty, terminal_block, terminal_block_hash, + shanghai_time, + eip4844_time, ); let url = SensitiveUrl::parse(&server.url()).unwrap(); diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index f18ecbe6226..bad02e36980 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -45,6 +45,8 @@ pub struct MockExecutionConfig { pub terminal_difficulty: Uint256, pub terminal_block: u64, pub terminal_block_hash: ExecutionBlockHash, + pub shanghai_time: Option, + pub eip4844_time: Option, } impl Default for MockExecutionConfig { @@ -55,6 +57,8 @@ impl Default for MockExecutionConfig { terminal_block: DEFAULT_TERMINAL_BLOCK, terminal_block_hash: ExecutionBlockHash::zero(), server_config: Config::default(), + shanghai_time: None, + eip4844_time: None, } } } @@ -74,6 +78,8 @@ impl MockServer { DEFAULT_TERMINAL_DIFFICULTY.into(), DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), + None, // FIXME(capella): should this be the default? + None, // FIXME(eip4844): should this be the default? ) } @@ -84,11 +90,18 @@ impl MockServer { terminal_block, terminal_block_hash, server_config, + shanghai_time, + eip4844_time, } = config; let last_echo_request = Arc::new(RwLock::new(None)); let preloaded_responses = Arc::new(Mutex::new(vec![])); - let execution_block_generator = - ExecutionBlockGenerator::new(terminal_difficulty, terminal_block, terminal_block_hash); + let execution_block_generator = ExecutionBlockGenerator::new( + terminal_difficulty, + terminal_block, + terminal_block_hash, + shanghai_time, + eip4844_time, + ); let ctx: Arc> = Arc::new(Context { config: server_config, @@ -140,6 +153,8 @@ impl MockServer { terminal_difficulty: Uint256, terminal_block: u64, terminal_block_hash: ExecutionBlockHash, + shanghai_time: Option, + eip4844_time: Option, ) -> Self { Self::new_with_config( handle, @@ -149,6 +164,8 @@ impl MockServer { terminal_difficulty, terminal_block, terminal_block_hash, + shanghai_time, + eip4844_time, }, ) } From 933772dd0678380383fbad4f4953ba26622b5035 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Tue, 3 Jan 2023 18:40:35 -0600 Subject: [PATCH 105/263] Fixed Operation Pool Tests --- beacon_node/beacon_chain/src/test_utils.rs | 18 ++++++++----- beacon_node/operation_pool/src/lib.rs | 31 +++++++++++++++++----- 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 3c4ab1ca12f..fec04bd540b 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -120,19 +120,23 @@ fn make_rng() -> Mutex { Mutex::new(StdRng::seed_from_u64(0x0DDB1A5E5BAD5EEDu64)) } +pub fn get_fork_from_env() -> ForkName { + let fork_string = std::env::var(FORK_NAME_ENV_VAR).unwrap_or_else(|e| { + panic!( + "{} env var must be defined when using fork_from_env: {:?}", + FORK_NAME_ENV_VAR, e + ) + }); + ForkName::from_str(fork_string.as_str()).unwrap() +} + /// Return a `ChainSpec` suitable for test usage. /// /// If the `fork_from_env` feature is enabled, read the fork to use from the FORK_NAME environment /// variable. Otherwise use the default spec. pub fn test_spec() -> ChainSpec { let mut spec = if cfg!(feature = "fork_from_env") { - let fork_name = std::env::var(FORK_NAME_ENV_VAR).unwrap_or_else(|e| { - panic!( - "{} env var must be defined when using fork_from_env: {:?}", - FORK_NAME_ENV_VAR, e - ) - }); - let fork = ForkName::from_str(fork_name.as_str()).unwrap(); + let fork = get_fork_from_env(); fork.make_genesis_spec(E::default_spec()) } else { E::default_spec() diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 37fa6893873..4a895391f0f 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -8,19 +8,18 @@ mod persistence; mod reward_cache; mod sync_aggregate_id; +use crate::attestation_storage::{AttestationMap, CheckpointKey}; +use crate::sync_aggregate_id::SyncAggregateId; pub use attestation::AttMaxCover; pub use attestation_storage::{AttestationRef, SplitAttestation}; +use attester_slashing::AttesterSlashingMaxCover; +use max_cover::maximum_cover; pub use max_cover::MaxCover; +use parking_lot::{RwLock, RwLockWriteGuard}; pub use persistence::{ PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV5, }; pub use reward_cache::RewardCache; - -use crate::attestation_storage::{AttestationMap, CheckpointKey}; -use crate::sync_aggregate_id::SyncAggregateId; -use attester_slashing::AttesterSlashingMaxCover; -use max_cover::maximum_cover; -use parking_lot::{RwLock, RwLockWriteGuard}; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::per_block_processing::{ get_slashable_indices_modular, verify_exit, VerifySignatures, @@ -767,7 +766,8 @@ mod release_tests { use super::attestation::earliest_attestation_validators; use super::*; use beacon_chain::test_utils::{ - test_spec, BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee, + get_fork_from_env, test_spec, BeaconChainHarness, EphemeralHarnessType, + RelativeSyncCommittee, }; use lazy_static::lazy_static; use maplit::hashset; @@ -1789,9 +1789,26 @@ mod release_tests { { let mut spec = test_spec::(); + if cfg!(feature = "fork_from_env") { + let fork = get_fork_from_env(); + match fork { + ForkName::Altair | ForkName::Merge | ForkName::Capella | ForkName::Eip4844 => {} + _ => panic!( + "Unknown fork {}, add it above AND below so this test doesn't panic", + fork + ), + } + } + // Give some room to sign surround slashings. + // It appears we need to set _every_ fork to some non-zero value + // here. Otherwise if we set FORK_NAME_ENV_VAR to some fork that + // isn't listed here, tests that use this function will panic in + // non-trivial ways spec.altair_fork_epoch = Some(Epoch::new(3)); spec.bellatrix_fork_epoch = Some(Epoch::new(6)); + spec.capella_fork_epoch = Some(Epoch::new(9)); + spec.eip4844_fork_epoch = Some(Epoch::new(12)); // To make exits immediately valid. spec.shard_committee_period = 0; From 8711db2f3b7dbe27756d93f3853c36c3780495ce Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Wed, 4 Jan 2023 15:14:43 -0600 Subject: [PATCH 106/263] Fix EF Tests --- beacon_node/beacon_chain/src/test_utils.rs | 21 +++++++++++++++++++++ testing/ef_tests/src/cases/fork_choice.rs | 1 + 2 files changed, 22 insertions(+) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index fec04bd540b..53b333c5fd1 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -387,6 +387,27 @@ where self } + pub fn recalculate_fork_times_with_genesis(mut self, genesis_time: u64) -> Self { + let mock = self + .mock_execution_layer + .as_mut() + .expect("must have mock execution layer to recalculate fork times"); + let spec = self + .spec + .clone() + .expect("cannot recalculate fork times without spec"); + mock.server.execution_block_generator().shanghai_time = + spec.capella_fork_epoch.map(|epoch| { + genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); + mock.server.execution_block_generator().eip4844_time = + spec.eip4844_fork_epoch.map(|epoch| { + genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); + + self + } + pub fn mock_execution_layer(mut self) -> Self { let spec = self.spec.clone().expect("cannot build without spec"); let shanghai_time = spec.capella_fork_epoch.map(|epoch| { diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 039efb36845..31165d6329c 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -311,6 +311,7 @@ impl Tester { .keypairs(vec![]) .genesis_state_ephemeral_store(case.anchor_state.clone()) .mock_execution_layer() + .recalculate_fork_times_with_genesis(0) .mock_execution_layer_all_payloads_valid() .build(); From 2ac609b64e20a98b2d8e1368771ed4fa82f6df2e Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Thu, 5 Jan 2023 13:00:44 -0600 Subject: [PATCH 107/263] Fixing Moar Failing Tests --- Makefile | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 18 ++++------ .../execution_layer/src/engine_api/http.rs | 6 ++-- beacon_node/operation_pool/src/lib.rs | 33 +++++-------------- 4 files changed, 19 insertions(+), 40 deletions(-) diff --git a/Makefile b/Makefile index 40db61de781..e1fba5b4237 100644 --- a/Makefile +++ b/Makefile @@ -120,7 +120,7 @@ run-ef-tests: test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS)) test-beacon-chain-%: - env FORK_NAME=$* cargo test --release --features fork_from_env -p beacon_chain + env FORK_NAME=$* cargo test --release --features fork_from_env,withdrawals-processing -p beacon_chain # Run the tests in the `operation_pool` crate for all known forks. test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS)) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 53b333c5fd1..c4bb9f3d860 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -120,23 +120,19 @@ fn make_rng() -> Mutex { Mutex::new(StdRng::seed_from_u64(0x0DDB1A5E5BAD5EEDu64)) } -pub fn get_fork_from_env() -> ForkName { - let fork_string = std::env::var(FORK_NAME_ENV_VAR).unwrap_or_else(|e| { - panic!( - "{} env var must be defined when using fork_from_env: {:?}", - FORK_NAME_ENV_VAR, e - ) - }); - ForkName::from_str(fork_string.as_str()).unwrap() -} - /// Return a `ChainSpec` suitable for test usage. /// /// If the `fork_from_env` feature is enabled, read the fork to use from the FORK_NAME environment /// variable. Otherwise use the default spec. pub fn test_spec() -> ChainSpec { let mut spec = if cfg!(feature = "fork_from_env") { - let fork = get_fork_from_env(); + let fork_name = std::env::var(FORK_NAME_ENV_VAR).unwrap_or_else(|e| { + panic!( + "{} env var must be defined when using fork_from_env: {:?}", + FORK_NAME_ENV_VAR, e + ) + }); + let fork = ForkName::from_str(fork_name.as_str()).unwrap(); fork.make_genesis_spec(E::default_spec()) } else { E::default_spec() diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index bf1da078e74..06abe7274f0 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -852,11 +852,11 @@ impl HttpJsonRpc { pub async fn supported_apis_v1(&self) -> Result { Ok(SupportedApis { new_payload_v1: true, - new_payload_v2: cfg!(feature = "withdrawals-processing"), + new_payload_v2: cfg!(any(feature = "withdrawals-processing", test)), forkchoice_updated_v1: true, - forkchoice_updated_v2: cfg!(feature = "withdrawals-processing"), + forkchoice_updated_v2: cfg!(any(feature = "withdrawals-processing", test)), get_payload_v1: true, - get_payload_v2: cfg!(feature = "withdrawals-processing"), + get_payload_v2: cfg!(any(feature = "withdrawals-processing", test)), exchange_transition_configuration_v1: true, }) } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 4a895391f0f..1f4660fc285 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -8,18 +8,19 @@ mod persistence; mod reward_cache; mod sync_aggregate_id; -use crate::attestation_storage::{AttestationMap, CheckpointKey}; -use crate::sync_aggregate_id::SyncAggregateId; pub use attestation::AttMaxCover; pub use attestation_storage::{AttestationRef, SplitAttestation}; -use attester_slashing::AttesterSlashingMaxCover; -use max_cover::maximum_cover; pub use max_cover::MaxCover; -use parking_lot::{RwLock, RwLockWriteGuard}; pub use persistence::{ PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV5, }; pub use reward_cache::RewardCache; + +use crate::attestation_storage::{AttestationMap, CheckpointKey}; +use crate::sync_aggregate_id::SyncAggregateId; +use attester_slashing::AttesterSlashingMaxCover; +use max_cover::maximum_cover; +use parking_lot::{RwLock, RwLockWriteGuard}; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::per_block_processing::{ get_slashable_indices_modular, verify_exit, VerifySignatures, @@ -766,8 +767,7 @@ mod release_tests { use super::attestation::earliest_attestation_validators; use super::*; use beacon_chain::test_utils::{ - get_fork_from_env, test_spec, BeaconChainHarness, EphemeralHarnessType, - RelativeSyncCommittee, + test_spec, BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee, }; use lazy_static::lazy_static; use maplit::hashset; @@ -1787,28 +1787,11 @@ mod release_tests { fn cross_fork_harness() -> (BeaconChainHarness>, ChainSpec) { - let mut spec = test_spec::(); - - if cfg!(feature = "fork_from_env") { - let fork = get_fork_from_env(); - match fork { - ForkName::Altair | ForkName::Merge | ForkName::Capella | ForkName::Eip4844 => {} - _ => panic!( - "Unknown fork {}, add it above AND below so this test doesn't panic", - fork - ), - } - } + let mut spec = E::default_spec(); // Give some room to sign surround slashings. - // It appears we need to set _every_ fork to some non-zero value - // here. Otherwise if we set FORK_NAME_ENV_VAR to some fork that - // isn't listed here, tests that use this function will panic in - // non-trivial ways spec.altair_fork_epoch = Some(Epoch::new(3)); spec.bellatrix_fork_epoch = Some(Epoch::new(6)); - spec.capella_fork_epoch = Some(Epoch::new(9)); - spec.eip4844_fork_epoch = Some(Epoch::new(12)); // To make exits immediately valid. spec.shard_committee_period = 0; From cb94f639b00b168c49cc60f216f1bbede1ee5f9f Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Sun, 8 Jan 2023 18:05:28 -0600 Subject: [PATCH 108/263] Isolate withdrawals-processing Feature (#3854) --- Makefile | 4 +- beacon_node/Cargo.toml | 1 - beacon_node/beacon_chain/Cargo.toml | 2 - beacon_node/beacon_chain/src/beacon_chain.rs | 36 ++---- beacon_node/beacon_chain/src/builder.rs | 1 - .../beacon_chain/src/observed_operations.rs | 6 +- beacon_node/execution_layer/Cargo.toml | 2 +- beacon_node/http_api/Cargo.toml | 3 - beacon_node/http_api/src/lib.rs | 16 +-- beacon_node/operation_pool/Cargo.toml | 3 - beacon_node/operation_pool/src/lib.rs | 113 ++++++------------ beacon_node/operation_pool/src/persistence.rs | 1 - common/eth2/Cargo.toml | 1 - .../src/per_block_processing.rs | 8 +- .../process_operations.rs | 5 +- .../state_processing/src/verify_operation.rs | 17 +-- testing/ef_tests/src/cases/operations.rs | 19 +-- testing/ef_tests/src/lib.rs | 1 - testing/ef_tests/tests/tests.rs | 2 - 19 files changed, 74 insertions(+), 167 deletions(-) diff --git a/Makefile b/Makefile index e1fba5b4237..41721c2d656 100644 --- a/Makefile +++ b/Makefile @@ -89,12 +89,12 @@ build-release-tarballs: # Runs the full workspace tests in **release**, without downloading any additional # test vectors. test-release: - cargo test --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher + cargo test --workspace --features withdrawals-processing --release --exclude ef_tests --exclude beacon_chain --exclude slasher # Runs the full workspace tests in **debug**, without downloading any additional test # vectors. test-debug: - cargo test --workspace --exclude ef_tests --exclude beacon_chain + cargo test --workspace --features withdrawals-processing --exclude ef_tests --exclude beacon_chain # Runs cargo-fmt (linter). cargo-fmt: diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index bed32011f1b..d6b0b643a44 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -17,7 +17,6 @@ withdrawals-processing = [ "beacon_chain/withdrawals-processing", "store/withdrawals-processing", "execution_layer/withdrawals-processing", - "http_api/withdrawals-processing", ] [dependencies] diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index a6ac6603791..c89f1650e31 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -12,9 +12,7 @@ participation_metrics = [] # Exposes validator participation metrics to Prometh fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable withdrawals-processing = [ "state_processing/withdrawals-processing", - "store/withdrawals-processing", "execution_layer/withdrawals-processing", - "operation_pool/withdrawals-processing" ] [dev-dependencies] diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index edf0e149c7b..798a9b80823 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -362,7 +362,6 @@ pub struct BeaconChain { pub(crate) observed_attester_slashings: Mutex, T::EthSpec>>, /// Maintains a record of which validators we've seen BLS to execution changes for. - #[cfg(feature = "withdrawals-processing")] pub(crate) observed_bls_to_execution_changes: Mutex>, /// The most recently validated light client finality update received on gossip. @@ -2232,29 +2231,18 @@ impl BeaconChain { &self, bls_to_execution_change: SignedBlsToExecutionChange, ) -> Result, Error> { - #[cfg(feature = "withdrawals-processing")] - { - let current_fork = self.spec.fork_name_at_slot::(self.slot()?); - if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork { - // Disallow BLS to execution changes prior to the Capella fork. - return Err(Error::BlsToExecutionChangeBadFork(current_fork)); - } - - let wall_clock_state = self.wall_clock_state()?; - - Ok(self - .observed_bls_to_execution_changes - .lock() - .verify_and_observe(bls_to_execution_change, &wall_clock_state, &self.spec)?) + let current_fork = self.spec.fork_name_at_slot::(self.slot()?); + if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork { + // Disallow BLS to execution changes prior to the Capella fork. + return Err(Error::BlsToExecutionChangeBadFork(current_fork)); } - // TODO: remove this whole block once withdrawals-processing is removed - #[cfg(not(feature = "withdrawals-processing"))] - { - #[allow(clippy::drop_non_drop)] - drop(bls_to_execution_change); - Ok(ObservationOutcome::AlreadyKnown) - } + let wall_clock_state = self.wall_clock_state()?; + + Ok(self + .observed_bls_to_execution_changes + .lock() + .verify_and_observe(bls_to_execution_change, &wall_clock_state, &self.spec)?) } /// Import a BLS to execution change to the op pool. @@ -2263,12 +2251,8 @@ impl BeaconChain { bls_to_execution_change: SigVerifiedOp, ) { if self.eth1_chain.is_some() { - #[cfg(feature = "withdrawals-processing")] self.op_pool .insert_bls_to_execution_change(bls_to_execution_change); - - #[cfg(not(feature = "withdrawals-processing"))] - drop(bls_to_execution_change); } } diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 3073de9ca7f..b73ad2f25ec 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -798,7 +798,6 @@ where observed_voluntary_exits: <_>::default(), observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), - #[cfg(feature = "withdrawals-processing")] observed_bls_to_execution_changes: <_>::default(), latest_seen_finality_update: <_>::default(), latest_seen_optimistic_update: <_>::default(), diff --git a/beacon_node/beacon_chain/src/observed_operations.rs b/beacon_node/beacon_chain/src/observed_operations.rs index 5781f9b5b10..6e53373939a 100644 --- a/beacon_node/beacon_chain/src/observed_operations.rs +++ b/beacon_node/beacon_chain/src/observed_operations.rs @@ -6,12 +6,9 @@ use std::collections::HashSet; use std::marker::PhantomData; use types::{ AttesterSlashing, BeaconState, ChainSpec, EthSpec, ForkName, ProposerSlashing, - SignedVoluntaryExit, Slot, + SignedBlsToExecutionChange, SignedVoluntaryExit, Slot, }; -#[cfg(feature = "withdrawals-processing")] -use types::SignedBlsToExecutionChange; - /// Number of validator indices to store on the stack in `observed_validators`. pub const SMALL_VEC_SIZE: usize = 8; @@ -83,7 +80,6 @@ impl ObservableOperation for AttesterSlashing { } } -#[cfg(feature = "withdrawals-processing")] impl ObservableOperation for SignedBlsToExecutionChange { fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> { smallvec![self.message.validator_index] diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 47c1e0341b6..30312939d6f 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] -withdrawals-processing = ["state_processing/withdrawals-processing", "eth2/withdrawals-processing"] +withdrawals-processing = ["state_processing/withdrawals-processing"] [dependencies] types = { path = "../../consensus/types"} diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index e8a97fd0beb..077e3aa7cda 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -5,9 +5,6 @@ authors = ["Paul Hauner "] edition = "2021" autotests = false # using a single test binary compiles faster -[features] -withdrawals-processing = [] - [dependencies] warp = { version = "0.3.2", features = ["tls"] } serde = { version = "1.0.116", features = ["derive"] } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 2d6c4468049..9d04938adcd 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1687,16 +1687,12 @@ pub fn serve( match chain.verify_bls_to_execution_change_for_gossip(address_change) { Ok(ObservationOutcome::New(verified_address_change)) => { - #[cfg(feature = "withdrawals-processing")] - { - publish_pubsub_message( - &network_tx, - PubsubMessage::BlsToExecutionChange(Box::new( - verified_address_change.as_inner().clone(), - )), - )?; - } - + publish_pubsub_message( + &network_tx, + PubsubMessage::BlsToExecutionChange(Box::new( + verified_address_change.as_inner().clone(), + )), + )?; chain.import_bls_to_execution_change(verified_address_change); } Ok(ObservationOutcome::AlreadyKnown) => { diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index d7523544376..8483233589f 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -4,9 +4,6 @@ version = "0.2.0" authors = ["Michael Sproul "] edition = "2021" -[features] -withdrawals-processing = [] - [dependencies] derivative = "2.1.1" itertools = "0.10.0" diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 1f4660fc285..a69e0a750fc 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -51,7 +51,6 @@ pub struct OperationPool { /// Map from exiting validator to their exit data. voluntary_exits: RwLock>>, /// Map from credential changing validator to their execution change data. - #[cfg(feature = "withdrawals-processing")] bls_to_execution_changes: RwLock>>, /// Reward cache for accelerating attestation packing. reward_cache: RwLock, @@ -518,17 +517,10 @@ impl OperationPool { &self, verified_change: SigVerifiedOp, ) { - #[cfg(feature = "withdrawals-processing")] - { - self.bls_to_execution_changes.write().insert( - verified_change.as_inner().message.validator_index, - verified_change, - ); - } - #[cfg(not(feature = "withdrawals-processing"))] - { - drop(verified_change); - } + self.bls_to_execution_changes.write().insert( + verified_change.as_inner().message.validator_index, + verified_change, + ); } /// Get a list of execution changes for inclusion in a block. @@ -539,32 +531,19 @@ impl OperationPool { state: &BeaconState, spec: &ChainSpec, ) -> Vec { - #[cfg(feature = "withdrawals-processing")] - { - filter_limit_operations( - self.bls_to_execution_changes.read().values(), - |address_change| { - address_change.signature_is_still_valid(&state.fork()) - && state - .get_validator( - address_change.as_inner().message.validator_index as usize, - ) - .map_or(false, |validator| { - !validator.has_eth1_withdrawal_credential(spec) - }) - }, - |address_change| address_change.as_inner().clone(), - T::MaxBlsToExecutionChanges::to_usize(), - ) - } - - // TODO: remove this whole block once withdrwals-processing is removed - #[cfg(not(feature = "withdrawals-processing"))] - { - #[allow(clippy::drop_copy)] - drop((state, spec)); - vec![] - } + filter_limit_operations( + self.bls_to_execution_changes.read().values(), + |address_change| { + address_change.signature_is_still_valid(&state.fork()) + && state + .get_validator(address_change.as_inner().message.validator_index as usize) + .map_or(false, |validator| { + !validator.has_eth1_withdrawal_credential(spec) + }) + }, + |address_change| address_change.as_inner().clone(), + T::MaxBlsToExecutionChanges::to_usize(), + ) } /// Prune BLS to execution changes that have been applied to the state more than 1 block ago. @@ -579,32 +558,22 @@ impl OperationPool { head_state: &BeaconState, spec: &ChainSpec, ) { - #[cfg(feature = "withdrawals-processing")] - { - prune_validator_hash_map( - &mut self.bls_to_execution_changes.write(), - |validator_index, validator| { - validator.has_eth1_withdrawal_credential(spec) - && head_block - .message() - .body() - .bls_to_execution_changes() - .map_or(true, |recent_changes| { - !recent_changes - .iter() - .any(|c| c.message.validator_index == validator_index) - }) - }, - head_state, - ); - } - - // TODO: remove this whole block once withdrwals-processing is removed - #[cfg(not(feature = "withdrawals-processing"))] - { - #[allow(clippy::drop_copy)] - drop((head_block, head_state, spec)); - } + prune_validator_hash_map( + &mut self.bls_to_execution_changes.write(), + |validator_index, validator| { + validator.has_eth1_withdrawal_credential(spec) + && head_block + .message() + .body() + .bls_to_execution_changes() + .map_or(true, |recent_changes| { + !recent_changes + .iter() + .any(|c| c.message.validator_index == validator_index) + }) + }, + head_state, + ); } /// Prune all types of transactions given the latest head state and head fork. @@ -691,17 +660,11 @@ impl OperationPool { /// /// This method may return objects that are invalid for block inclusion. pub fn get_all_bls_to_execution_changes(&self) -> Vec { - #[cfg(feature = "withdrawals-processing")] - { - self.bls_to_execution_changes - .read() - .iter() - .map(|(_, address_change)| address_change.as_inner().clone()) - .collect() - } - - #[cfg(not(feature = "withdrawals-processing"))] - vec![] + self.bls_to_execution_changes + .read() + .iter() + .map(|(_, address_change)| address_change.as_inner().clone()) + .collect() } } diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 184b967dbee..b232e5a5546 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -143,7 +143,6 @@ impl PersistedOperationPool { proposer_slashings, voluntary_exits, // FIXME(capella): implement schema migration for address changes in op pool - #[cfg(feature = "withdrawals-processing")] bls_to_execution_changes: Default::default(), reward_cache: Default::default(), _phantom: Default::default(), diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index fc5eba98e29..eca086d838f 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -35,4 +35,3 @@ procinfo = { version = "0.4.2", optional = true } [features] default = ["lighthouse"] lighthouse = ["proto_array", "psutil", "procinfo", "store", "slashing_protection"] -withdrawals-processing = ["store/withdrawals-processing"] \ No newline at end of file diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index f1a544099f3..0192fe0cec2 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -19,7 +19,6 @@ pub use process_operations::process_operations; pub use verify_attestation::{ verify_attestation_for_block_inclusion, verify_attestation_for_state, }; -#[cfg(feature = "withdrawals-processing")] pub use verify_bls_to_execution_change::verify_bls_to_execution_change; pub use verify_deposit::{ get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature, @@ -36,13 +35,11 @@ pub mod signature_sets; pub mod tests; mod verify_attestation; mod verify_attester_slashing; -#[cfg(feature = "withdrawals-processing")] mod verify_bls_to_execution_change; mod verify_deposit; mod verify_exit; mod verify_proposer_slashing; -#[cfg(feature = "withdrawals-processing")] use crate::common::decrease_balance; #[cfg(feature = "arbitrary-fuzz")] @@ -165,7 +162,6 @@ pub fn per_block_processing>( // previous block. if is_execution_enabled(state, block.body()) { let payload = block.body().execution_payload()?; - #[cfg(feature = "withdrawals-processing")] process_withdrawals::(state, payload, spec)?; process_execution_payload::(state, payload, spec)?; } @@ -524,12 +520,14 @@ pub fn get_expected_withdrawals( } /// Apply withdrawals to the state. -#[cfg(feature = "withdrawals-processing")] pub fn process_withdrawals<'payload, T: EthSpec, Payload: AbstractExecPayload>( state: &mut BeaconState, payload: Payload::Ref<'payload>, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { + if cfg!(not(feature = "withdrawals-processing")) { + return Ok(()); + } match state { BeaconState::Merge(_) => Ok(()), BeaconState::Capella(_) | BeaconState::Eip4844(_) => { diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index f27fd48b4f5..eacb7617c78 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -34,7 +34,6 @@ pub fn process_operations<'a, T: EthSpec, Payload: AbstractExecPayload>( process_deposits(state, block_body.deposits(), spec)?; process_exits(state, block_body.voluntary_exits(), verify_signatures, spec)?; - #[cfg(feature = "withdrawals-processing")] if let Ok(bls_to_execution_changes) = block_body.bls_to_execution_changes() { process_bls_to_execution_changes(state, bls_to_execution_changes, verify_signatures, spec)?; } @@ -295,13 +294,15 @@ pub fn process_exits( /// /// Returns `Ok(())` if the validation and state updates completed successfully. Otherwise returns /// an `Err` describing the invalid object or cause of failure. -#[cfg(feature = "withdrawals-processing")] pub fn process_bls_to_execution_changes( state: &mut BeaconState, bls_to_execution_changes: &[SignedBlsToExecutionChange], verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { + if cfg!(not(feature = "withdrawals-processing")) { + return Ok(()); + } for (i, signed_address_change) in bls_to_execution_changes.iter().enumerate() { verify_bls_to_execution_change(state, signed_address_change, verify_signatures, spec) .map_err(|e| e.into_with_index(i))?; diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index e2e434417e5..efd356462da 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -1,8 +1,10 @@ use crate::per_block_processing::{ errors::{ - AttesterSlashingValidationError, ExitValidationError, ProposerSlashingValidationError, + AttesterSlashingValidationError, BlsExecutionChangeValidationError, ExitValidationError, + ProposerSlashingValidationError, }, - verify_attester_slashing, verify_exit, verify_proposer_slashing, + verify_attester_slashing, verify_bls_to_execution_change, verify_exit, + verify_proposer_slashing, }; use crate::VerifySignatures; use derivative::Derivative; @@ -12,15 +14,7 @@ use ssz_derive::{Decode, Encode}; use std::marker::PhantomData; use types::{ AttesterSlashing, BeaconState, ChainSpec, Epoch, EthSpec, Fork, ForkVersion, ProposerSlashing, - SignedVoluntaryExit, -}; - -#[cfg(feature = "withdrawals-processing")] -use { - crate::per_block_processing::{ - errors::BlsExecutionChangeValidationError, verify_bls_to_execution_change, - }, - types::SignedBlsToExecutionChange, + SignedBlsToExecutionChange, SignedVoluntaryExit, }; const MAX_FORKS_VERIFIED_AGAINST: usize = 2; @@ -202,7 +196,6 @@ impl VerifyOperation for ProposerSlashing { } } -#[cfg(feature = "withdrawals-processing")] impl VerifyOperation for SignedBlsToExecutionChange { type Error = BlsExecutionChangeValidationError; diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index a08ee1996ac..71954405c0c 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -4,30 +4,24 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use crate::testing_spec; use serde_derive::Deserialize; -#[cfg(feature = "withdrawals-processing")] -use state_processing::per_block_processing::process_operations::{ - process_bls_to_execution_changes, process_bls_to_execution_changes, -}; use state_processing::{ per_block_processing::{ errors::BlockProcessingError, process_block_header, process_execution_payload, process_operations::{ - altair, base, process_attester_slashings, process_deposits, process_exits, - process_proposer_slashings, + altair, base, process_attester_slashings, process_bls_to_execution_changes, + process_deposits, process_exits, process_proposer_slashings, }, - process_sync_aggregate, VerifyBlockRoot, VerifySignatures, + process_sync_aggregate, process_withdrawals, VerifyBlockRoot, VerifySignatures, }, ConsensusContext, }; use std::fmt::Debug; use std::path::Path; -#[cfg(feature = "withdrawals-processing")] -use types::SignedBlsToExecutionChange; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit, - EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedVoluntaryExit, - SyncAggregate, + EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedBlsToExecutionChange, + SignedVoluntaryExit, SyncAggregate, }; #[derive(Debug, Clone, Default, Deserialize)] @@ -42,7 +36,6 @@ struct ExecutionMetadata { } /// Newtype for testing withdrawals. -#[cfg(feature = "withdrawals-processing")] #[derive(Debug, Clone, Deserialize)] pub struct WithdrawalsPayload { payload: FullPayload, @@ -341,7 +334,6 @@ impl Operation for BlindedPayload { } } -#[cfg(feature = "withdrawals-processing")] impl Operation for WithdrawalsPayload { fn handler_name() -> String { "withdrawals".into() @@ -374,7 +366,6 @@ impl Operation for WithdrawalsPayload { } } -#[cfg(feature = "withdrawals-processing")] impl Operation for SignedBlsToExecutionChange { fn handler_name() -> String { "bls_to_execution_change".into() diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index a4d4f2d52d4..f52b7bca17b 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -1,5 +1,4 @@ pub use case_result::CaseResult; -#[cfg(feature = "withdrawals-processing")] pub use cases::WithdrawalsPayload; pub use cases::{ Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, InactivityUpdates, diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 66c4f83ecea..f84be64dad9 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -82,14 +82,12 @@ fn operations_execution_payload_blinded() { OperationsHandler::>::default().run(); } -#[cfg(feature = "withdrawals-processing")] #[test] fn operations_withdrawals() { OperationsHandler::>::default().run(); OperationsHandler::>::default().run(); } -#[cfg(feature = "withdrawals-processing")] #[test] fn operations_bls_to_execution_change() { OperationsHandler::::default().run(); From 11f4784ae6bbcfa00cd4c8ffd59266385a337add Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Sun, 8 Jan 2023 19:38:02 -0600 Subject: [PATCH 109/263] Added bls_to_execution_changes to PersistedOpPool (#3857) * Added bls_to_execution_changes to PersistedOpPool --- beacon_node/beacon_chain/src/schema_change.rs | 9 +++ .../src/schema_change/migration_schema_v12.rs | 8 +- .../src/schema_change/migration_schema_v14.rs | 75 +++++++++++++++++++ beacon_node/operation_pool/src/lib.rs | 3 +- beacon_node/operation_pool/src/persistence.rs | 69 +++++++++++++---- beacon_node/store/src/metadata.rs | 2 +- 6 files changed, 146 insertions(+), 20 deletions(-) create mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 73906b1b586..8684bafe2d0 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,6 +1,7 @@ //! Utilities for managing database schema changes. mod migration_schema_v12; mod migration_schema_v13; +mod migration_schema_v14; use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY}; use crate::eth1_chain::SszEth1; @@ -114,6 +115,14 @@ pub fn migrate_schema( Ok(()) } + (SchemaVersion(13), SchemaVersion(14)) => { + let ops = migration_schema_v14::upgrade_to_v14::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(14), SchemaVersion(13)) => { + let ops = migration_schema_v14::downgrade_from_v14::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs index bb72b28c0ec..c9aa2097f8a 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs @@ -168,16 +168,14 @@ pub fn downgrade_from_v12( log: Logger, ) -> Result, Error> { // Load a V12 op pool and transform it to V5. - let PersistedOperationPoolV12 { + let PersistedOperationPoolV12:: { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, - } = if let Some(PersistedOperationPool::::V12(op_pool)) = - db.get_item(&OP_POOL_DB_KEY)? - { - op_pool + } = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? { + op_pool_v12 } else { debug!(log, "Nothing to do, no operation pool stored"); return Ok(vec![]); diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs new file mode 100644 index 00000000000..02422a403b5 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs @@ -0,0 +1,75 @@ +use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; +use operation_pool::{ + PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14, +}; +use slog::{debug, info, Logger}; +use std::sync::Arc; +use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; + +pub fn upgrade_to_v14( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Load a V12 op pool and transform it to V14. + let PersistedOperationPoolV12:: { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + } = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? { + op_pool_v12 + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + // initialize with empty vector + let bls_to_execution_changes = vec![]; + let v14 = PersistedOperationPool::V14(PersistedOperationPoolV14 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + }); + Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)]) +} + +pub fn downgrade_from_v14( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Load a V14 op pool and transform it to V12. + let PersistedOperationPoolV14 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + } = if let Some(PersistedOperationPool::::V14(op_pool)) = + db.get_item(&OP_POOL_DB_KEY)? + { + op_pool + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + info!( + log, + "Dropping bls_to_execution_changes from pool"; + "count" => bls_to_execution_changes.len(), + ); + + let v12 = PersistedOperationPoolV12 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + }; + Ok(vec![v12.as_kv_store_op(OP_POOL_DB_KEY)]) +} diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index a69e0a750fc..70e0d56bc91 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -12,7 +12,8 @@ pub use attestation::AttMaxCover; pub use attestation_storage::{AttestationRef, SplitAttestation}; pub use max_cover::MaxCover; pub use persistence::{ - PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV5, + PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14, + PersistedOperationPoolV5, }; pub use reward_cache::RewardCache; diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index b232e5a5546..043e6fb7fd8 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -18,7 +18,7 @@ type PersistedSyncContributions = Vec<(SyncAggregateId, Vec { #[superstruct(only(V5))] pub attestations_v5: Vec<(AttestationId, Vec>)>, /// Attestations and their attesting indices. - #[superstruct(only(V12))] + #[superstruct(only(V12, V14))] pub attestations: Vec<(Attestation, Vec)>, /// Mapping from sync contribution ID to sync contributions and aggregate. pub sync_contributions: PersistedSyncContributions, @@ -40,20 +40,23 @@ pub struct PersistedOperationPool { #[superstruct(only(V5))] pub attester_slashings_v5: Vec<(AttesterSlashing, ForkVersion)>, /// Attester slashings. - #[superstruct(only(V12))] + #[superstruct(only(V12, V14))] pub attester_slashings: Vec, T>>, /// [DEPRECATED] Proposer slashings. #[superstruct(only(V5))] pub proposer_slashings_v5: Vec, /// Proposer slashings with fork information. - #[superstruct(only(V12))] + #[superstruct(only(V12, V14))] pub proposer_slashings: Vec>, /// [DEPRECATED] Voluntary exits. #[superstruct(only(V5))] pub voluntary_exits_v5: Vec, /// Voluntary exits with fork information. - #[superstruct(only(V12))] + #[superstruct(only(V12, V14))] pub voluntary_exits: Vec>, + /// BLS to Execution Changes + #[superstruct(only(V14))] + pub bls_to_execution_changes: Vec>, } impl PersistedOperationPool { @@ -99,12 +102,20 @@ impl PersistedOperationPool { .map(|(_, exit)| exit.clone()) .collect(); - PersistedOperationPool::V12(PersistedOperationPoolV12 { + let bls_to_execution_changes = operation_pool + .bls_to_execution_changes + .read() + .iter() + .map(|(_, bls_to_execution_change)| bls_to_execution_change.clone()) + .collect(); + + PersistedOperationPool::V14(PersistedOperationPoolV14 { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, + bls_to_execution_changes, }) } @@ -127,23 +138,41 @@ impl PersistedOperationPool { ); let sync_contributions = RwLock::new(self.sync_contributions().iter().cloned().collect()); let attestations = match self { - PersistedOperationPool::V5(_) => return Err(OpPoolError::IncorrectOpPoolVariant), - PersistedOperationPool::V12(pool) => { + PersistedOperationPool::V5(_) | PersistedOperationPool::V12(_) => { + return Err(OpPoolError::IncorrectOpPoolVariant) + } + PersistedOperationPool::V14(ref pool) => { let mut map = AttestationMap::default(); - for (att, attesting_indices) in pool.attestations { + for (att, attesting_indices) in pool.attestations.clone() { map.insert(att, attesting_indices); } RwLock::new(map) } }; + let bls_to_execution_changes = match self { + PersistedOperationPool::V5(_) | PersistedOperationPool::V12(_) => { + return Err(OpPoolError::IncorrectOpPoolVariant) + } + PersistedOperationPool::V14(pool) => RwLock::new( + pool.bls_to_execution_changes + .iter() + .cloned() + .map(|bls_to_execution_change| { + ( + bls_to_execution_change.as_inner().message.validator_index, + bls_to_execution_change, + ) + }) + .collect(), + ), + }; let op_pool = OperationPool { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, - // FIXME(capella): implement schema migration for address changes in op pool - bls_to_execution_changes: Default::default(), + bls_to_execution_changes, reward_cache: Default::default(), _phantom: Default::default(), }; @@ -165,6 +194,20 @@ impl StoreItem for PersistedOperationPoolV5 { } } +impl StoreItem for PersistedOperationPoolV12 { + fn db_column() -> DBColumn { + DBColumn::OpPool + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + PersistedOperationPoolV12::from_ssz_bytes(bytes).map_err(Into::into) + } +} + /// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`. impl StoreItem for PersistedOperationPool { fn db_column() -> DBColumn { @@ -177,8 +220,8 @@ impl StoreItem for PersistedOperationPool { fn from_store_bytes(bytes: &[u8]) -> Result { // Default deserialization to the latest variant. - PersistedOperationPoolV12::from_ssz_bytes(bytes) - .map(Self::V12) + PersistedOperationPoolV14::from_ssz_bytes(bytes) + .map(Self::V14) .map_err(Into::into) } } diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 5cb3f122008..fb5769635d2 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(13); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(14); // All the keys that get stored under the `BeaconMeta` column. // From 87c44697d090347d32a093d5bd0be88c38519290 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 9 Jan 2023 14:40:09 +1100 Subject: [PATCH 110/263] Bump MSRV to 1.65 (#3860) --- lighthouse/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 2db42d6ec3f..be022e311d3 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -4,7 +4,7 @@ version = "3.3.0" authors = ["Sigma Prime "] edition = "2021" autotests = false -rust-version = "1.62" +rust-version = "1.65" [features] default = ["slasher-mdbx"] From 98b11bbd3fec8b78602a80243eb889d75e1f7d67 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Tue, 10 Jan 2023 20:40:21 -0500 Subject: [PATCH 111/263] add historical summaries (#3865) * add historical summaries * fix tree hash caching, disable the sanity slots test with fake crypto * add ssz static HistoricalSummary * only store historical summaries after capella * Teach `UpdatePattern` about Capella * Tidy EF tests * Clippy Co-authored-by: Michael Sproul --- beacon_node/store/src/chunked_vector.rs | 78 ++++++++++++++-- beacon_node/store/src/hot_cold_store.rs | 4 +- beacon_node/store/src/lib.rs | 2 + beacon_node/store/src/partial_beacon_state.rs | 72 +++++++++++---- .../src/per_epoch_processing.rs | 12 +-- .../src/per_epoch_processing/capella.rs | 78 ++++++++++++++++ .../capella/historical_summaries_update.rs | 23 +++++ .../state_processing/src/upgrade/capella.rs | 4 +- .../state_processing/src/upgrade/eip4844.rs | 3 +- consensus/types/src/beacon_state.rs | 8 +- .../types/src/beacon_state/tree_hash_cache.rs | 38 ++++++-- consensus/types/src/historical_summary.rs | 88 +++++++++++++++++++ consensus/types/src/lib.rs | 1 + lcli/src/main.rs | 1 - testing/ef_tests/Makefile | 2 +- testing/ef_tests/check_all_files_accessed.py | 2 - .../ef_tests/src/cases/epoch_processing.rs | 30 ++++++- testing/ef_tests/src/handler.rs | 5 ++ testing/ef_tests/src/lib.rs | 8 +- testing/ef_tests/src/type_name.rs | 2 + testing/ef_tests/tests/tests.rs | 13 +++ 21 files changed, 424 insertions(+), 50 deletions(-) create mode 100644 consensus/state_processing/src/per_epoch_processing/capella.rs create mode 100644 consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs create mode 100644 consensus/types/src/historical_summary.rs diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index 8c64d4bcc05..73edfbb0744 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -18,6 +18,7 @@ use self::UpdatePattern::*; use crate::*; use ssz::{Decode, Encode}; use typenum::Unsigned; +use types::historical_summary::HistoricalSummary; /// Description of how a `BeaconState` field is updated during state processing. /// @@ -26,7 +27,18 @@ use typenum::Unsigned; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum UpdatePattern { /// The value is updated once per `n` slots. - OncePerNSlots { n: u64 }, + OncePerNSlots { + n: u64, + /// The slot at which the field begins to accumulate values. + /// + /// The field should not be read or written until `activation_slot` is reached, and the + /// activation slot should act as an offset when converting slots to vector indices. + activation_slot: Option, + /// The slot at which the field ceases to accumulate values. + /// + /// If this is `None` then the field is continually updated. + deactivation_slot: Option, + }, /// The value is updated once per epoch, for the epoch `current_epoch - lag`. OncePerEpoch { lag: u64 }, } @@ -98,12 +110,30 @@ pub trait Field: Copy { fn start_and_end_vindex(current_slot: Slot, spec: &ChainSpec) -> (usize, usize) { // We take advantage of saturating subtraction on slots and epochs match Self::update_pattern(spec) { - OncePerNSlots { n } => { + OncePerNSlots { + n, + activation_slot, + deactivation_slot, + } => { // Per-slot changes exclude the index for the current slot, because // it won't be set until the slot completes (think of `state_roots`, `block_roots`). // This also works for the `historical_roots` because at the `n`th slot, the 0th // entry of the list is created, and before that the list is empty. - let end_vindex = current_slot / n; + // + // To account for the switch from historical roots to historical summaries at + // Capella we also modify the current slot by the activation and deactivation slots. + // The activation slot acts as an offset (subtraction) while the deactivation slot + // acts as a clamp (min). + let slot_with_clamp = deactivation_slot.map_or(current_slot, |deactivation_slot| { + std::cmp::min(current_slot, deactivation_slot) + }); + let slot_with_clamp_and_offset = if let Some(activation_slot) = activation_slot { + slot_with_clamp - activation_slot + } else { + // Return (0, 0) to indicate that the field should not be read/written. + return (0, 0); + }; + let end_vindex = slot_with_clamp_and_offset / n; let start_vindex = end_vindex - Self::Length::to_u64(); (start_vindex.as_usize(), end_vindex.as_usize()) } @@ -295,7 +325,11 @@ field!( Hash256, T::SlotsPerHistoricalRoot, DBColumn::BeaconBlockRoots, - |_| OncePerNSlots { n: 1 }, + |_| OncePerNSlots { + n: 1, + activation_slot: Some(Slot::new(0)), + deactivation_slot: None + }, |state: &BeaconState<_>, index, _| safe_modulo_index(state.block_roots(), index) ); @@ -305,7 +339,11 @@ field!( Hash256, T::SlotsPerHistoricalRoot, DBColumn::BeaconStateRoots, - |_| OncePerNSlots { n: 1 }, + |_| OncePerNSlots { + n: 1, + activation_slot: Some(Slot::new(0)), + deactivation_slot: None, + }, |state: &BeaconState<_>, index, _| safe_modulo_index(state.state_roots(), index) ); @@ -315,8 +353,12 @@ field!( Hash256, T::HistoricalRootsLimit, DBColumn::BeaconHistoricalRoots, - |_| OncePerNSlots { - n: T::SlotsPerHistoricalRoot::to_u64() + |spec: &ChainSpec| OncePerNSlots { + n: T::SlotsPerHistoricalRoot::to_u64(), + activation_slot: Some(Slot::new(0)), + deactivation_slot: spec + .capella_fork_epoch + .map(|fork_epoch| fork_epoch.start_slot(T::slots_per_epoch())), }, |state: &BeaconState<_>, index, _| safe_modulo_index(state.historical_roots(), index) ); @@ -331,6 +373,27 @@ field!( |state: &BeaconState<_>, index, _| safe_modulo_index(state.randao_mixes(), index) ); +field!( + HistoricalSummaries, + VariableLengthField, + HistoricalSummary, + T::HistoricalRootsLimit, + DBColumn::BeaconHistoricalSummaries, + |spec: &ChainSpec| OncePerNSlots { + n: T::SlotsPerHistoricalRoot::to_u64(), + activation_slot: spec + .capella_fork_epoch + .map(|fork_epoch| fork_epoch.start_slot(T::slots_per_epoch())), + deactivation_slot: None, + }, + |state: &BeaconState<_>, index, _| safe_modulo_index( + state + .historical_summaries() + .map_err(|_| ChunkError::InvalidFork)?, + index + ) +); + pub fn store_updated_vector, E: EthSpec, S: KeyValueStore>( field: F, store: &S, @@ -679,6 +742,7 @@ pub enum ChunkError { end_vindex: usize, length: usize, }, + InvalidFork, } #[cfg(test)] diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 4600e5e8269..93af0ca184e 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1,5 +1,5 @@ use crate::chunked_vector::{ - store_updated_vector, BlockRoots, HistoricalRoots, RandaoMixes, StateRoots, + store_updated_vector, BlockRoots, HistoricalRoots, HistoricalSummaries, RandaoMixes, StateRoots, }; use crate::config::{ OnDiskStoreConfig, StoreConfig, DEFAULT_SLOTS_PER_RESTORE_POINT, @@ -948,6 +948,7 @@ impl, Cold: ItemStore> HotColdDB store_updated_vector(StateRoots, db, state, &self.spec, ops)?; store_updated_vector(HistoricalRoots, db, state, &self.spec, ops)?; store_updated_vector(RandaoMixes, db, state, &self.spec, ops)?; + store_updated_vector(HistoricalSummaries, db, state, &self.spec, ops)?; // 3. Store restore point. let restore_point_index = state.slot().as_u64() / self.config.slots_per_restore_point; @@ -1002,6 +1003,7 @@ impl, Cold: ItemStore> HotColdDB partial_state.load_state_roots(&self.cold_db, &self.spec)?; partial_state.load_historical_roots(&self.cold_db, &self.spec)?; partial_state.load_randao_mixes(&self.cold_db, &self.spec)?; + partial_state.load_historical_summaries(&self.cold_db, &self.spec)?; partial_state.try_into() } diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index d9041dd6361..9db5f45ecee 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -214,6 +214,8 @@ pub enum DBColumn { /// For Optimistically Imported Merge Transition Blocks #[strum(serialize = "otb")] OptimisticTransitionBlock, + #[strum(serialize = "bhs")] + BeaconHistoricalSummaries, } /// A block from the database, which might have an execution payload or not. diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index ca35bc0b222..55697bd3160 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -1,12 +1,13 @@ use crate::chunked_vector::{ - load_variable_list_from_db, load_vector_from_db, BlockRoots, HistoricalRoots, RandaoMixes, - StateRoots, + load_variable_list_from_db, load_vector_from_db, BlockRoots, HistoricalRoots, + HistoricalSummaries, RandaoMixes, StateRoots, }; use crate::{get_key_for_col, DBColumn, Error, KeyValueStore, KeyValueStoreOp}; use ssz::{Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::convert::TryInto; use std::sync::Arc; +use types::historical_summary::HistoricalSummary; use types::superstruct; use types::*; @@ -104,16 +105,20 @@ where )] pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844, - // Withdrawals + // Capella #[superstruct(only(Capella, Eip4844))] pub next_withdrawal_index: u64, #[superstruct(only(Capella, Eip4844))] pub next_withdrawal_validator_index: u64, + + #[ssz(skip_serializing, skip_deserializing)] + #[superstruct(only(Capella, Eip4844))] + pub historical_summaries: Option>, } /// Implement the conversion function from BeaconState -> PartialBeaconState. macro_rules! impl_from_state_forgetful { - ($s:ident, $outer:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*]) => { + ($s:ident, $outer:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*], [$($extra_fields_opt:ident),*]) => { PartialBeaconState::$variant_name($struct_name { // Versioning genesis_time: $s.genesis_time, @@ -154,6 +159,11 @@ macro_rules! impl_from_state_forgetful { // Variant-specific fields $( $extra_fields: $s.$extra_fields.clone() + ),*, + + // Variant-specific optional + $( + $extra_fields_opt: None ),* }) } @@ -168,7 +178,8 @@ impl PartialBeaconState { outer, Base, PartialBeaconStateBase, - [previous_epoch_attestations, current_epoch_attestations] + [previous_epoch_attestations, current_epoch_attestations], + [] ), BeaconState::Altair(s) => impl_from_state_forgetful!( s, @@ -181,7 +192,8 @@ impl PartialBeaconState { current_sync_committee, next_sync_committee, inactivity_scores - ] + ], + [] ), BeaconState::Merge(s) => impl_from_state_forgetful!( s, @@ -195,7 +207,8 @@ impl PartialBeaconState { next_sync_committee, inactivity_scores, latest_execution_payload_header - ] + ], + [] ), BeaconState::Capella(s) => impl_from_state_forgetful!( s, @@ -211,7 +224,8 @@ impl PartialBeaconState { latest_execution_payload_header, next_withdrawal_index, next_withdrawal_validator_index - ] + ], + [historical_summaries] ), BeaconState::Eip4844(s) => impl_from_state_forgetful!( s, @@ -227,7 +241,8 @@ impl PartialBeaconState { latest_execution_payload_header, next_withdrawal_index, next_withdrawal_validator_index - ] + ], + [historical_summaries] ), } } @@ -303,6 +318,23 @@ impl PartialBeaconState { Ok(()) } + pub fn load_historical_summaries>( + &mut self, + store: &S, + spec: &ChainSpec, + ) -> Result<(), Error> { + let slot = self.slot(); + if let Ok(historical_summaries) = self.historical_summaries_mut() { + if historical_summaries.is_none() { + *historical_summaries = + Some(load_variable_list_from_db::( + store, slot, spec, + )?); + } + } + Ok(()) + } + pub fn load_randao_mixes>( &mut self, store: &S, @@ -326,7 +358,7 @@ impl PartialBeaconState { /// Implement the conversion from PartialBeaconState -> BeaconState. macro_rules! impl_try_into_beacon_state { - ($inner:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*]) => { + ($inner:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*], [$($extra_opt_fields:ident),*]) => { BeaconState::$variant_name($struct_name { // Versioning genesis_time: $inner.genesis_time, @@ -371,6 +403,11 @@ macro_rules! impl_try_into_beacon_state { // Variant-specific fields $( $extra_fields: $inner.$extra_fields + ),*, + + // Variant-specific optional fields + $( + $extra_opt_fields: unpack_field($inner.$extra_opt_fields)? ),* }) } @@ -389,7 +426,8 @@ impl TryInto> for PartialBeaconState { inner, Base, BeaconStateBase, - [previous_epoch_attestations, current_epoch_attestations] + [previous_epoch_attestations, current_epoch_attestations], + [] ), PartialBeaconState::Altair(inner) => impl_try_into_beacon_state!( inner, @@ -401,7 +439,8 @@ impl TryInto> for PartialBeaconState { current_sync_committee, next_sync_committee, inactivity_scores - ] + ], + [] ), PartialBeaconState::Merge(inner) => impl_try_into_beacon_state!( inner, @@ -414,7 +453,8 @@ impl TryInto> for PartialBeaconState { next_sync_committee, inactivity_scores, latest_execution_payload_header - ] + ], + [] ), PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!( inner, @@ -429,7 +469,8 @@ impl TryInto> for PartialBeaconState { latest_execution_payload_header, next_withdrawal_index, next_withdrawal_validator_index - ] + ], + [historical_summaries] ), PartialBeaconState::Eip4844(inner) => impl_try_into_beacon_state!( inner, @@ -444,7 +485,8 @@ impl TryInto> for PartialBeaconState { latest_execution_payload_header, next_withdrawal_index, next_withdrawal_validator_index - ] + ], + [historical_summaries] ), }; Ok(state) diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index f227b82863c..996e39c27fb 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -3,14 +3,16 @@ pub use epoch_processing_summary::EpochProcessingSummary; use errors::EpochProcessingError as Error; pub use justification_and_finalization_state::JustificationAndFinalizationState; -pub use registry_updates::process_registry_updates; use safe_arith::SafeArith; -pub use slashings::process_slashings; use types::{BeaconState, ChainSpec, EthSpec}; + +pub use registry_updates::process_registry_updates; +pub use slashings::process_slashings; pub use weigh_justification_and_finalization::weigh_justification_and_finalization; pub mod altair; pub mod base; +pub mod capella; pub mod effective_balance_updates; pub mod epoch_processing_summary; pub mod errors; @@ -37,10 +39,8 @@ pub fn process_epoch( match state { BeaconState::Base(_) => base::process_epoch(state, spec), - BeaconState::Altair(_) - | BeaconState::Merge(_) - | BeaconState::Capella(_) - | BeaconState::Eip4844(_) => altair::process_epoch(state, spec), + BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_epoch(state, spec), + BeaconState::Capella(_) | BeaconState::Eip4844(_) => capella::process_epoch(state, spec), } } diff --git a/consensus/state_processing/src/per_epoch_processing/capella.rs b/consensus/state_processing/src/per_epoch_processing/capella.rs new file mode 100644 index 00000000000..aaf301f29ec --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/capella.rs @@ -0,0 +1,78 @@ +use super::altair::inactivity_updates::process_inactivity_updates; +use super::altair::justification_and_finalization::process_justification_and_finalization; +use super::altair::participation_cache::ParticipationCache; +use super::altair::participation_flag_updates::process_participation_flag_updates; +use super::altair::rewards_and_penalties::process_rewards_and_penalties; +use super::altair::sync_committee_updates::process_sync_committee_updates; +use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error}; +use crate::per_epoch_processing::{ + effective_balance_updates::process_effective_balance_updates, + resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset}, +}; +use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch}; + +pub use historical_summaries_update::process_historical_summaries_update; + +mod historical_summaries_update; + +pub fn process_epoch( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result, Error> { + // Ensure the committee caches are built. + state.build_committee_cache(RelativeEpoch::Previous, spec)?; + state.build_committee_cache(RelativeEpoch::Current, spec)?; + state.build_committee_cache(RelativeEpoch::Next, spec)?; + + // Pre-compute participating indices and total balances. + let participation_cache = ParticipationCache::new(state, spec)?; + let sync_committee = state.current_sync_committee()?.clone(); + + // Justification and finalization. + let justification_and_finalization_state = + process_justification_and_finalization(state, &participation_cache)?; + justification_and_finalization_state.apply_changes_to_state(state); + + process_inactivity_updates(state, &participation_cache, spec)?; + + // Rewards and Penalties. + process_rewards_and_penalties(state, &participation_cache, spec)?; + + // Registry Updates. + process_registry_updates(state, spec)?; + + // Slashings. + process_slashings( + state, + participation_cache.current_epoch_total_active_balance(), + spec, + )?; + + // Reset eth1 data votes. + process_eth1_data_reset(state)?; + + // Update effective balances with hysteresis (lag). + process_effective_balance_updates(state, spec)?; + + // Reset slashings + process_slashings_reset(state)?; + + // Set randao mix + process_randao_mixes_reset(state)?; + + // Set historical summaries accumulator + process_historical_summaries_update(state)?; + + // Rotate current/previous epoch participation + process_participation_flag_updates(state)?; + + process_sync_committee_updates(state, spec)?; + + // Rotate the epoch caches to suit the epoch transition. + state.advance_caches(spec)?; + + Ok(EpochProcessingSummary::Altair { + participation_cache, + sync_committee, + }) +} diff --git a/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs b/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs new file mode 100644 index 00000000000..9a87ceb6050 --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs @@ -0,0 +1,23 @@ +use crate::EpochProcessingError; +use safe_arith::SafeArith; +use types::historical_summary::HistoricalSummary; +use types::{BeaconState, EthSpec}; + +pub fn process_historical_summaries_update( + state: &mut BeaconState, +) -> Result<(), EpochProcessingError> { + // Set historical block root accumulator. + let next_epoch = state.next_epoch()?; + if next_epoch + .as_u64() + .safe_rem((T::slots_per_historical_root() as u64).safe_div(T::slots_per_epoch())?)? + == 0 + { + let summary = HistoricalSummary::new(state); + return state + .historical_summaries_mut()? + .push(summary) + .map_err(Into::into); + } + Ok(()) +} diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index dc759b384d8..3b933fac37a 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -1,3 +1,4 @@ +use ssz_types::VariableList; use std::mem; use types::{BeaconState, BeaconStateCapella, BeaconStateError as Error, ChainSpec, EthSpec, Fork}; @@ -55,9 +56,10 @@ pub fn upgrade_to_capella( next_sync_committee: pre.next_sync_committee.clone(), // Execution latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_capella(), - // Withdrawals + // Capella next_withdrawal_index: 0, next_withdrawal_validator_index: 0, + historical_summaries: VariableList::default(), // Caches total_active_balance: pre.total_active_balance, committee_caches: mem::take(&mut pre.committee_caches), diff --git a/consensus/state_processing/src/upgrade/eip4844.rs b/consensus/state_processing/src/upgrade/eip4844.rs index e829c01e7e2..4f6ff9d1943 100644 --- a/consensus/state_processing/src/upgrade/eip4844.rs +++ b/consensus/state_processing/src/upgrade/eip4844.rs @@ -57,9 +57,10 @@ pub fn upgrade_to_eip4844( next_sync_committee: pre.next_sync_committee.clone(), // Execution latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_eip4844(), - // Withdrawals + // Capella next_withdrawal_index: pre.next_withdrawal_index, next_withdrawal_validator_index: pre.next_withdrawal_validator_index, + historical_summaries: pre.historical_summaries.clone(), // Caches total_active_balance: pre.total_active_balance, committee_caches: mem::take(&mut pre.committee_caches), diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 625ff3d1745..f51a7bf9fb2 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -14,6 +14,7 @@ use ssz::{ssz_encode, Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::Unsigned, BitVector, FixedVector}; use std::convert::TryInto; +use std::hash::Hash; use std::{fmt, mem, sync::Arc}; use superstruct::superstruct; use swap_or_not_shuffle::compute_shuffled_index; @@ -25,6 +26,7 @@ pub use self::committee_cache::{ compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, CommitteeCache, }; +use crate::historical_summary::HistoricalSummary; pub use clone_config::CloneConfig; pub use eth_spec::*; pub use iter::BlockRootsIter; @@ -223,6 +225,7 @@ where pub block_roots: FixedVector, #[compare_fields(as_slice)] pub state_roots: FixedVector, + // Frozen in Capella, replaced by historical_summaries pub historical_roots: VariableList, // Ethereum 1.0 chain data @@ -296,11 +299,14 @@ where )] pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844, - // Withdrawals + // Capella #[superstruct(only(Capella, Eip4844), partial_getter(copy))] pub next_withdrawal_index: u64, #[superstruct(only(Capella, Eip4844), partial_getter(copy))] pub next_withdrawal_validator_index: u64, + // Deep history valid from Capella onwards. + #[superstruct(only(Capella, Eip4844))] + pub historical_summaries: VariableList, // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index 4cfc684f4d3..5515fb753af 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -3,6 +3,7 @@ #![allow(clippy::indexing_slicing)] use super::Error; +use crate::historical_summary::HistoricalSummaryCache; use crate::{BeaconState, EthSpec, Hash256, ParticipationList, Slot, Unsigned, Validator}; use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache}; use rayon::prelude::*; @@ -142,6 +143,7 @@ pub struct BeaconTreeHashCacheInner { block_roots: TreeHashCache, state_roots: TreeHashCache, historical_roots: TreeHashCache, + historical_summaries: OptionalTreeHashCache, balances: TreeHashCache, randao_mixes: TreeHashCache, slashings: TreeHashCache, @@ -164,6 +166,14 @@ impl BeaconTreeHashCacheInner { let historical_roots = state .historical_roots() .new_tree_hash_cache(&mut fixed_arena); + let historical_summaries = OptionalTreeHashCache::new( + state + .historical_summaries() + .ok() + .map(HistoricalSummaryCache::new) + .as_ref(), + ); + let randao_mixes = state.randao_mixes().new_tree_hash_cache(&mut fixed_arena); let validators = ValidatorsListTreeHashCache::new::(state.validators()); @@ -200,6 +210,7 @@ impl BeaconTreeHashCacheInner { block_roots, state_roots, historical_roots, + historical_summaries, balances, randao_mixes, slashings, @@ -249,6 +260,7 @@ impl BeaconTreeHashCacheInner { .slashings() .recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)?, ]; + // Participation if let BeaconState::Base(state) = state { leaves.push(state.previous_epoch_attestations.tree_hash_root()); @@ -291,6 +303,24 @@ impl BeaconTreeHashCacheInner { if let Ok(payload_header) = state.latest_execution_payload_header() { leaves.push(payload_header.tree_hash_root()); } + + // Withdrawal indices (Capella and later). + if let Ok(next_withdrawal_index) = state.next_withdrawal_index() { + leaves.push(next_withdrawal_index.tree_hash_root()); + } + if let Ok(next_withdrawal_validator_index) = state.next_withdrawal_validator_index() { + leaves.push(next_withdrawal_validator_index.tree_hash_root()); + } + + // Historical roots/summaries (Capella and later). + if let Ok(historical_summaries) = state.historical_summaries() { + leaves.push( + self.historical_summaries.recalculate_tree_hash_root( + &HistoricalSummaryCache::new(historical_summaries), + )?, + ); + } + Ok(leaves) } @@ -335,14 +365,6 @@ impl BeaconTreeHashCacheInner { hasher.write(leaf.as_bytes())?; } - // Withdrawal indices (Capella and later). - if let Ok(next_withdrawal_index) = state.next_withdrawal_index() { - hasher.write(next_withdrawal_index.tree_hash_root().as_bytes())?; - } - if let Ok(next_withdrawal_validator_index) = state.next_withdrawal_validator_index() { - hasher.write(next_withdrawal_validator_index.tree_hash_root().as_bytes())?; - } - let root = hasher.finish()?; self.previous_state = Some((root, state.slot())); diff --git a/consensus/types/src/historical_summary.rs b/consensus/types/src/historical_summary.rs new file mode 100644 index 00000000000..1f79ee49fad --- /dev/null +++ b/consensus/types/src/historical_summary.rs @@ -0,0 +1,88 @@ +use crate::test_utils::TestRandom; +use crate::Unsigned; +use crate::{BeaconState, EthSpec, Hash256}; +use cached_tree_hash::Error; +use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache}; +use compare_fields_derive::CompareFields; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; +use test_random_derive::TestRandom; +use tree_hash::{mix_in_length, TreeHash, BYTES_PER_CHUNK}; +use tree_hash_derive::TreeHash; + +/// `HistoricalSummary` matches the components of the phase0 `HistoricalBatch` +/// making the two hash_tree_root-compatible. This struct is introduced into the beacon state +/// in the Capella hard fork. +/// +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#historicalsummary +#[derive( + Debug, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + CompareFields, + Clone, + Copy, + Default, +)] +pub struct HistoricalSummary { + block_summary_root: Hash256, + state_summary_root: Hash256, +} + +impl HistoricalSummary { + pub fn new(state: &BeaconState) -> Self { + Self { + block_summary_root: state.block_roots().tree_hash_root(), + state_summary_root: state.state_roots().tree_hash_root(), + } + } +} + +/// Wrapper type allowing the implementation of `CachedTreeHash`. +#[derive(Debug)] +pub struct HistoricalSummaryCache<'a, N: Unsigned> { + pub inner: &'a VariableList, +} + +impl<'a, N: Unsigned> HistoricalSummaryCache<'a, N> { + pub fn new(inner: &'a VariableList) -> Self { + Self { inner } + } + + #[allow(clippy::len_without_is_empty)] + pub fn len(&self) -> usize { + self.inner.len() + } +} + +impl<'a, N: Unsigned> CachedTreeHash for HistoricalSummaryCache<'a, N> { + fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache { + TreeHashCache::new(arena, int_log(N::to_usize()), self.len()) + } + + fn recalculate_tree_hash_root( + &self, + arena: &mut CacheArena, + cache: &mut TreeHashCache, + ) -> Result { + Ok(mix_in_length( + &cache.recalculate_merkle_root(arena, leaf_iter(self.inner))?, + self.len(), + )) + } +} + +pub fn leaf_iter( + values: &[HistoricalSummary], +) -> impl Iterator + ExactSizeIterator + '_ { + values + .iter() + .map(|value| value.tree_hash_root()) + .map(Hash256::to_fixed_bytes) +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 6cbb9568dad..f0806e35ad8 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -49,6 +49,7 @@ pub mod fork_name; pub mod free_attestation; pub mod graffiti; pub mod historical_batch; +pub mod historical_summary; pub mod indexed_attestation; pub mod light_client_bootstrap; pub mod light_client_finality_update; diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 238c7e9f167..3af485be108 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -741,7 +741,6 @@ fn main() { .value_name("PATH") .takes_value(true) .conflicts_with("beacon-url") - .requires("pre-state-path") .help("Path to load a SignedBeaconBlock from file as SSZ."), ) .arg( diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 69b18ee747a..354631e5c37 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.3.0-alpha.2 +TESTS_TAG := v1.3.0-rc.0 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 04f45ac5da3..b0982413665 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -41,8 +41,6 @@ "tests/.*/.*/ssz_static/LightClientFinalityUpdate", # Eip4844 tests are disabled for now. "tests/.*/eip4844", - # Capella tests are disabled for now. - "tests/.*/capella", # One of the EF researchers likes to pack the tarballs on a Mac ".*\.DS_Store.*", # More Mac weirdness. diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index b0e16e12c73..59a8ebd41c8 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -5,6 +5,7 @@ use crate::decode::{ssz_decode_state, yaml_decode_file}; use crate::type_name; use crate::type_name::TypeName; use serde_derive::Deserialize; +use state_processing::per_epoch_processing::capella::process_historical_summaries_update; use state_processing::per_epoch_processing::{ altair, base, effective_balance_updates::process_effective_balance_updates, @@ -57,6 +58,8 @@ pub struct RandaoMixesReset; #[derive(Debug)] pub struct HistoricalRootsUpdate; #[derive(Debug)] +pub struct HistoricalSummariesUpdate; +#[derive(Debug)] pub struct ParticipationRecordUpdates; #[derive(Debug)] pub struct SyncCommitteeUpdates; @@ -77,6 +80,7 @@ type_name!(EffectiveBalanceUpdates, "effective_balance_updates"); type_name!(SlashingsReset, "slashings_reset"); type_name!(RandaoMixesReset, "randao_mixes_reset"); type_name!(HistoricalRootsUpdate, "historical_roots_update"); +type_name!(HistoricalSummariesUpdate, "historical_summaries_update"); type_name!(ParticipationRecordUpdates, "participation_record_updates"); type_name!(SyncCommitteeUpdates, "sync_committee_updates"); type_name!(InactivityUpdates, "inactivity_updates"); @@ -194,7 +198,23 @@ impl EpochTransition for RandaoMixesReset { impl EpochTransition for HistoricalRootsUpdate { fn run(state: &mut BeaconState, _spec: &ChainSpec) -> Result<(), EpochProcessingError> { - process_historical_roots_update(state) + match state { + BeaconState::Base(_) | BeaconState::Altair(_) | BeaconState::Merge(_) => { + process_historical_roots_update(state) + } + _ => Ok(()), + } + } +} + +impl EpochTransition for HistoricalSummariesUpdate { + fn run(state: &mut BeaconState, _spec: &ChainSpec) -> Result<(), EpochProcessingError> { + match state { + BeaconState::Capella(_) | BeaconState::Eip4844(_) => { + process_historical_summaries_update(state) + } + _ => Ok(()), + } } } @@ -287,10 +307,16 @@ impl> Case for EpochProcessing { T::name() != "sync_committee_updates" && T::name() != "inactivity_updates" && T::name() != "participation_flag_updates" + && T::name() != "historical_summaries_update" } // No phase0 tests for Altair and later. - ForkName::Altair | ForkName::Merge | ForkName::Capella => { + ForkName::Altair | ForkName::Merge => { + T::name() != "participation_record_updates" + && T::name() != "historical_summaries_update" + } + ForkName::Capella => { T::name() != "participation_record_updates" + && T::name() != "historical_roots_update" } ForkName::Eip4844 => false, // TODO: revisit when tests are out } diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 66f81616b75..07db7cd2a1d 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -374,6 +374,11 @@ impl Handler for SanitySlotsHandler { fn handler_name(&self) -> String { "slots".into() } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + // Some sanity tests compute sync committees, which requires real crypto. + fork_name == ForkName::Base || cfg!(not(feature = "fake_crypto")) + } } #[derive(Derivative)] diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index f52b7bca17b..5ab2b4b7b43 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -1,10 +1,10 @@ pub use case_result::CaseResult; pub use cases::WithdrawalsPayload; pub use cases::{ - Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, InactivityUpdates, - JustificationAndFinalization, ParticipationFlagUpdates, ParticipationRecordUpdates, - RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset, - SyncCommitteeUpdates, + Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, HistoricalSummariesUpdate, + InactivityUpdates, JustificationAndFinalization, ParticipationFlagUpdates, + ParticipationRecordUpdates, RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, + SlashingsReset, SyncCommitteeUpdates, }; pub use decode::log_file_access; pub use error::Error; diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index bee2d9b03df..0239293e098 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -1,4 +1,5 @@ //! Mapping from types to canonical string identifiers used in testing. +use types::historical_summary::HistoricalSummary; use types::*; pub trait TypeName { @@ -87,3 +88,4 @@ type_name!(VoluntaryExit); type_name!(Withdrawal); type_name!(BlsToExecutionChange, "BLSToExecutionChange"); type_name!(SignedBlsToExecutionChange, "SignedBLSToExecutionChange"); +type_name!(HistoricalSummary); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index f84be64dad9..8a7209b89b1 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -215,6 +215,7 @@ macro_rules! ssz_static_test_no_run { #[cfg(feature = "fake_crypto")] mod ssz_static { use ef_tests::{Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler}; + use types::historical_summary::HistoricalSummary; use types::*; ssz_static_test!(aggregate_and_proof, AggregateAndProof<_>); @@ -357,6 +358,12 @@ mod ssz_static { SszStaticHandler::::capella_only().run(); SszStaticHandler::::capella_only().run(); } + + #[test] + fn historical_summary() { + SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_only().run(); + } } #[test] @@ -423,6 +430,12 @@ fn epoch_processing_historical_roots_update() { EpochProcessingHandler::::default().run(); } +#[test] +fn epoch_processing_historical_summaries_update() { + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + #[test] fn epoch_processing_participation_record_updates() { EpochProcessingHandler::::default().run(); From 52c1055fdcf2c416948ea0ea12833f50195f3c8e Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Wed, 11 Jan 2023 22:15:08 -0600 Subject: [PATCH 112/263] Remove `withdrawals-processing` feature (#3864) * Use spec to Determine Supported Engine APIs * Remove `withdrawals-processing` feature * Fixed Tests * Missed Some Spots * Fixed Another Test * Stupid Clippy --- .github/workflows/docker.yml | 2 +- Makefile | 8 +-- beacon_node/Cargo.toml | 5 -- beacon_node/beacon_chain/Cargo.toml | 4 -- beacon_node/beacon_chain/src/test_utils.rs | 10 ++- beacon_node/client/src/builder.rs | 1 + beacon_node/eth1/src/inner.rs | 2 +- beacon_node/eth1/src/service.rs | 17 +++-- beacon_node/eth1/tests/test.rs | 17 +++-- beacon_node/execution_layer/Cargo.toml | 2 - beacon_node/execution_layer/src/engine_api.rs | 2 +- .../execution_layer/src/engine_api/http.rs | 67 ++++++++++++++----- beacon_node/execution_layer/src/lib.rs | 12 +++- .../src/test_utils/mock_builder.rs | 3 +- .../src/test_utils/mock_execution_layer.rs | 26 +++---- beacon_node/store/Cargo.toml | 5 +- consensus/state_processing/Cargo.toml | 1 - .../src/per_block_processing.rs | 7 -- .../process_operations.rs | 3 - lighthouse/Cargo.toml | 2 - .../src/test_rig.rs | 9 ++- 21 files changed, 113 insertions(+), 92 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index c0a02adf4ed..ad1bb0d79aa 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -66,7 +66,7 @@ jobs: DOCKER_CLI_EXPERIMENTAL: enabled VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} - CROSS_FEATURES: withdrawals-processing + CROSS_FEATURES: null steps: - uses: actions/checkout@v3 - name: Update Rust diff --git a/Makefile b/Makefile index 41721c2d656..c81f43b0e02 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx CROSS_PROFILE ?= release # List of features to use when running EF tests. -EF_TEST_FEATURES ?= beacon_chain/withdrawals-processing +EF_TEST_FEATURES ?= # Cargo profile for regular builds. PROFILE ?= release @@ -89,12 +89,12 @@ build-release-tarballs: # Runs the full workspace tests in **release**, without downloading any additional # test vectors. test-release: - cargo test --workspace --features withdrawals-processing --release --exclude ef_tests --exclude beacon_chain --exclude slasher + cargo test --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher # Runs the full workspace tests in **debug**, without downloading any additional test # vectors. test-debug: - cargo test --workspace --features withdrawals-processing --exclude ef_tests --exclude beacon_chain + cargo test --workspace --exclude ef_tests --exclude beacon_chain # Runs cargo-fmt (linter). cargo-fmt: @@ -120,7 +120,7 @@ run-ef-tests: test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS)) test-beacon-chain-%: - env FORK_NAME=$* cargo test --release --features fork_from_env,withdrawals-processing -p beacon_chain + env FORK_NAME=$* cargo test --release --features fork_from_env -p beacon_chain # Run the tests in the `operation_pool` crate for all known forks. test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS)) diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index d6b0b643a44..d47f77da930 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -13,11 +13,6 @@ node_test_rig = { path = "../testing/node_test_rig" } [features] write_ssz_files = ["beacon_chain/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing. -withdrawals-processing = [ - "beacon_chain/withdrawals-processing", - "store/withdrawals-processing", - "execution_layer/withdrawals-processing", -] [dependencies] eth2_config = { path = "../common/eth2_config" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index c89f1650e31..5b85833048b 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -10,10 +10,6 @@ default = ["participation_metrics"] write_ssz_files = [] # Writes debugging .ssz files to /tmp during block processing. participation_metrics = [] # Exposes validator participation metrics to Prometheus. fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable -withdrawals-processing = [ - "state_processing/withdrawals-processing", - "execution_layer/withdrawals-processing", -] [dev-dependencies] maplit = "1.0.2" diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index c4bb9f3d860..7de76d2351e 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -366,6 +366,7 @@ where .collect::>() .unwrap(); + let spec = MainnetEthSpec::default_spec(); let config = execution_layer::Config { execution_endpoints: urls, secret_files: vec![], @@ -376,6 +377,7 @@ where config, self.runtime.task_executor.clone(), self.log.clone(), + &spec, ) .unwrap(); @@ -414,13 +416,11 @@ where }); let mock = MockExecutionLayer::new( self.runtime.task_executor.clone(), - spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK, - spec.terminal_block_hash, - spec.terminal_block_hash_activation_epoch, shanghai_time, eip4844_time, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), + spec, None, ); self.execution_layer = Some(mock.el.clone()); @@ -442,13 +442,11 @@ where }); let mock_el = MockExecutionLayer::new( self.runtime.task_executor.clone(), - spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK, - spec.terminal_block_hash, - spec.terminal_block_hash_activation_epoch, shanghai_time, eip4844_time, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), + spec.clone(), Some(builder_url.clone()), ) .move_to_terminal_block(); diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index f3e937b2e5f..028a160e873 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -154,6 +154,7 @@ where config, context.executor.clone(), context.log().clone(), + &spec, ) .map_err(|e| format!("unable to start execution layer endpoints: {:?}", e))?; Some(execution_layer) diff --git a/beacon_node/eth1/src/inner.rs b/beacon_node/eth1/src/inner.rs index 0468a02d2e3..a44b31050fe 100644 --- a/beacon_node/eth1/src/inner.rs +++ b/beacon_node/eth1/src/inner.rs @@ -122,7 +122,7 @@ impl SszEth1Cache { cache: self.deposit_cache.to_deposit_cache()?, last_processed_block: self.last_processed_block, }), - endpoint: endpoint_from_config(&config) + endpoint: endpoint_from_config(&config, &spec) .map_err(|e| format!("Failed to create endpoint: {:?}", e))?, to_finalize: RwLock::new(None), // Set the remote head_block zero when creating a new instance. We only care about diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 31082394baf..56c2411ba18 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -363,7 +363,7 @@ impl Default for Config { } } -pub fn endpoint_from_config(config: &Config) -> Result { +pub fn endpoint_from_config(config: &Config, spec: &ChainSpec) -> Result { match config.endpoint.clone() { Eth1Endpoint::Auth { endpoint, @@ -373,11 +373,16 @@ pub fn endpoint_from_config(config: &Config) -> Result { } => { let auth = Auth::new_with_path(jwt_path, jwt_id, jwt_version) .map_err(|e| format!("Failed to initialize jwt auth: {:?}", e))?; - HttpJsonRpc::new_with_auth(endpoint, auth, Some(config.execution_timeout_multiplier)) - .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)) + HttpJsonRpc::new_with_auth( + endpoint, + auth, + Some(config.execution_timeout_multiplier), + spec, + ) + .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)) } Eth1Endpoint::NoAuth(endpoint) => { - HttpJsonRpc::new(endpoint, Some(config.execution_timeout_multiplier)) + HttpJsonRpc::new(endpoint, Some(config.execution_timeout_multiplier), spec) .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)) } } @@ -404,7 +409,7 @@ impl Service { deposit_cache: RwLock::new(DepositUpdater::new( config.deposit_contract_deploy_block, )), - endpoint: endpoint_from_config(&config)?, + endpoint: endpoint_from_config(&config, &spec)?, to_finalize: RwLock::new(None), remote_head_block: RwLock::new(None), config: RwLock::new(config), @@ -433,7 +438,7 @@ impl Service { inner: Arc::new(Inner { block_cache: <_>::default(), deposit_cache: RwLock::new(deposit_cache), - endpoint: endpoint_from_config(&config) + endpoint: endpoint_from_config(&config, &spec) .map_err(Error::FailedToInitializeFromSnapshot)?, to_finalize: RwLock::new(None), remote_head_block: RwLock::new(None), diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index 069a6e4aade..eb0d2371cb0 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -494,7 +494,8 @@ mod deposit_tree { let mut deposit_counts = vec![]; let client = - HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); + HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None, spec) + .unwrap(); // Perform deposits to the smart contract, recording it's state along the way. for deposit in &deposits { @@ -598,8 +599,12 @@ mod http { .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; let web3 = eth1.web3(); - let client = - HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); + let client = HttpJsonRpc::new( + SensitiveUrl::parse(ð1.endpoint()).unwrap(), + None, + &MainnetEthSpec::default_spec(), + ) + .unwrap(); let block_number = get_block_number(&web3).await; let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await; @@ -697,6 +702,7 @@ mod fast { let web3 = eth1.web3(); let now = get_block_number(&web3).await; + let spec = MainnetEthSpec::default_spec(); let service = Service::new( Config { endpoint: Eth1Endpoint::NoAuth( @@ -710,11 +716,12 @@ mod fast { ..Config::default() }, log, - MainnetEthSpec::default_spec(), + spec.clone(), ) .unwrap(); let client = - HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); + HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None, &spec) + .unwrap(); let n = 10; let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); for deposit in &deposits { diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 30312939d6f..5d6339996b4 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -4,8 +4,6 @@ version = "0.1.0" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html -[features] -withdrawals-processing = ["state_processing/withdrawals-processing"] [dependencies] types = { path = "../../consensus/types"} diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 80cdeacb34f..4970361a5a9 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -329,7 +329,7 @@ pub struct ProposeBlindedBlockResponse { // This name is work in progress, it could // change when this method is actually proposed // but I'm writing this as it has been described -#[derive(Clone, Copy)] +#[derive(Clone, Copy, Debug)] pub struct SupportedApis { pub new_payload_v1: bool, pub new_payload_v2: bool, diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 06abe7274f0..8ad3066f7f4 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -10,7 +10,7 @@ use serde_json::json; use tokio::sync::RwLock; use std::time::Duration; -use types::EthSpec; +use types::{ChainSpec, EthSpec}; pub use deposit_log::{DepositLog, Log}; pub use reqwest::Client; @@ -538,12 +538,27 @@ impl HttpJsonRpc { pub fn new( url: SensitiveUrl, execution_timeout_multiplier: Option, + spec: &ChainSpec, ) -> Result { + // FIXME: remove this `cached_supported_apis` spec hack once the `engine_getCapabilities` + // method is implemented in all execution clients: + // https://github.com/ethereum/execution-apis/issues/321 + let cached_supported_apis = RwLock::new(Some(SupportedApis { + new_payload_v1: true, + new_payload_v2: spec.capella_fork_epoch.is_some() || spec.eip4844_fork_epoch.is_some(), + forkchoice_updated_v1: true, + forkchoice_updated_v2: spec.capella_fork_epoch.is_some() + || spec.eip4844_fork_epoch.is_some(), + get_payload_v1: true, + get_payload_v2: spec.capella_fork_epoch.is_some() || spec.eip4844_fork_epoch.is_some(), + exchange_transition_configuration_v1: true, + })); + Ok(Self { client: Client::builder().build()?, url, execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), - cached_supported_apis: Default::default(), + cached_supported_apis, auth: None, }) } @@ -552,12 +567,27 @@ impl HttpJsonRpc { url: SensitiveUrl, auth: Auth, execution_timeout_multiplier: Option, + spec: &ChainSpec, ) -> Result { + // FIXME: remove this `cached_supported_apis` spec hack once the `engine_getCapabilities` + // method is implemented in all execution clients: + // https://github.com/ethereum/execution-apis/issues/321 + let cached_supported_apis = RwLock::new(Some(SupportedApis { + new_payload_v1: true, + new_payload_v2: spec.capella_fork_epoch.is_some() || spec.eip4844_fork_epoch.is_some(), + forkchoice_updated_v1: true, + forkchoice_updated_v2: spec.capella_fork_epoch.is_some() + || spec.eip4844_fork_epoch.is_some(), + get_payload_v1: true, + get_payload_v2: spec.capella_fork_epoch.is_some() || spec.eip4844_fork_epoch.is_some(), + exchange_transition_configuration_v1: true, + })); + Ok(Self { client: Client::builder().build()?, url, execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), - cached_supported_apis: Default::default(), + cached_supported_apis, auth: Some(auth), }) } @@ -848,21 +878,25 @@ impl HttpJsonRpc { Ok(response) } - // this is a stub as this method hasn't been defined yet - pub async fn supported_apis_v1(&self) -> Result { + // TODO: This is currently a stub for the `engine_getCapabilities` + // method. This stub is unused because we set cached_supported_apis + // in the constructor based on the `spec` + // Implement this once the execution clients support it + // https://github.com/ethereum/execution-apis/issues/321 + pub async fn get_capabilities(&self) -> Result { Ok(SupportedApis { new_payload_v1: true, - new_payload_v2: cfg!(any(feature = "withdrawals-processing", test)), + new_payload_v2: true, forkchoice_updated_v1: true, - forkchoice_updated_v2: cfg!(any(feature = "withdrawals-processing", test)), + forkchoice_updated_v2: true, get_payload_v1: true, - get_payload_v2: cfg!(any(feature = "withdrawals-processing", test)), + get_payload_v2: true, exchange_transition_configuration_v1: true, }) } - pub async fn set_cached_supported_apis(&self, supported_apis: SupportedApis) { - *self.cached_supported_apis.write().await = Some(supported_apis); + pub async fn set_cached_supported_apis(&self, supported_apis: Option) { + *self.cached_supported_apis.write().await = supported_apis; } pub async fn get_cached_supported_apis(&self) -> Result { @@ -870,8 +904,8 @@ impl HttpJsonRpc { if let Some(supported_apis) = cached_opt { Ok(supported_apis) } else { - let supported_apis = self.supported_apis_v1().await?; - self.set_cached_supported_apis(supported_apis).await; + let supported_apis = self.get_capabilities().await?; + self.set_cached_supported_apis(Some(supported_apis)).await; Ok(supported_apis) } } @@ -955,6 +989,7 @@ mod test { impl Tester { pub fn new(with_auth: bool) -> Self { let server = MockServer::unit_testing(); + let spec = MainnetEthSpec::default_spec(); let rpc_url = SensitiveUrl::parse(&server.url()).unwrap(); let echo_url = SensitiveUrl::parse(&format!("{}/echo", server.url())).unwrap(); @@ -965,13 +1000,13 @@ mod test { let echo_auth = Auth::new(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), None, None); ( - Arc::new(HttpJsonRpc::new_with_auth(rpc_url, rpc_auth, None).unwrap()), - Arc::new(HttpJsonRpc::new_with_auth(echo_url, echo_auth, None).unwrap()), + Arc::new(HttpJsonRpc::new_with_auth(rpc_url, rpc_auth, None, &spec).unwrap()), + Arc::new(HttpJsonRpc::new_with_auth(echo_url, echo_auth, None, &spec).unwrap()), ) } else { ( - Arc::new(HttpJsonRpc::new(rpc_url, None).unwrap()), - Arc::new(HttpJsonRpc::new(echo_url, None).unwrap()), + Arc::new(HttpJsonRpc::new(rpc_url, None, &spec).unwrap()), + Arc::new(HttpJsonRpc::new(echo_url, None, &spec).unwrap()), ) }; diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index d79ac0c3645..355977b6a2c 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -258,7 +258,12 @@ pub struct ExecutionLayer { impl ExecutionLayer { /// Instantiate `Self` with an Execution engine specified in `Config`, using JSON-RPC via HTTP. - pub fn from_config(config: Config, executor: TaskExecutor, log: Logger) -> Result { + pub fn from_config( + config: Config, + executor: TaskExecutor, + log: Logger, + spec: &ChainSpec, + ) -> Result { let Config { execution_endpoints: urls, builder_url, @@ -313,8 +318,9 @@ impl ExecutionLayer { let engine: Engine = { let auth = Auth::new(jwt_key, jwt_id, jwt_version); debug!(log, "Loaded execution endpoint"; "endpoint" => %execution_url, "jwt_path" => ?secret_file.as_path()); - let api = HttpJsonRpc::new_with_auth(execution_url, auth, execution_timeout_multiplier) - .map_err(Error::ApiError)?; + let api = + HttpJsonRpc::new_with_auth(execution_url, auth, execution_timeout_multiplier, spec) + .map_err(Error::ApiError)?; Engine::new(api, executor.clone(), &log) }; diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 06b5e81eb31..8ce4a65564a 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -84,7 +84,8 @@ impl TestingBuilder { }; let el = - ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap(); + ExecutionLayer::from_config(config, executor.clone(), executor.log().clone(), &spec) + .unwrap(); // This should probably be done for all fields, we only update ones we are testing with so far. let mut context = Context::for_mainnet(); diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 89e0344d9ac..d061f13a6b5 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -9,7 +9,7 @@ use sensitive_url::SensitiveUrl; use task_executor::TaskExecutor; use tempfile::NamedTempFile; use tree_hash::TreeHash; -use types::{Address, ChainSpec, Epoch, EthSpec, FullPayload, Hash256, Uint256}; +use types::{Address, ChainSpec, Epoch, EthSpec, FullPayload, Hash256, MainnetEthSpec}; pub struct MockExecutionLayer { pub server: MockServer, @@ -20,15 +20,17 @@ pub struct MockExecutionLayer { impl MockExecutionLayer { pub fn default_params(executor: TaskExecutor) -> Self { + let mut spec = MainnetEthSpec::default_spec(); + spec.terminal_total_difficulty = DEFAULT_TERMINAL_DIFFICULTY.into(); + spec.terminal_block_hash = ExecutionBlockHash::zero(); + spec.terminal_block_hash_activation_epoch = Epoch::new(0); Self::new( executor, - DEFAULT_TERMINAL_DIFFICULTY.into(), DEFAULT_TERMINAL_BLOCK, - ExecutionBlockHash::zero(), - Epoch::new(0), None, None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), + spec, None, ) } @@ -36,29 +38,22 @@ impl MockExecutionLayer { #[allow(clippy::too_many_arguments)] pub fn new( executor: TaskExecutor, - terminal_total_difficulty: Uint256, terminal_block: u64, - terminal_block_hash: ExecutionBlockHash, - terminal_block_hash_activation_epoch: Epoch, shanghai_time: Option, eip4844_time: Option, jwt_key: Option, + spec: ChainSpec, builder_url: Option, ) -> Self { let handle = executor.handle().unwrap(); - let mut spec = T::default_spec(); - spec.terminal_total_difficulty = terminal_total_difficulty; - spec.terminal_block_hash = terminal_block_hash; - spec.terminal_block_hash_activation_epoch = terminal_block_hash_activation_epoch; - let jwt_key = jwt_key.unwrap_or_else(JwtKey::random); let server = MockServer::new( &handle, jwt_key, - terminal_total_difficulty, + spec.terminal_total_difficulty, terminal_block, - terminal_block_hash, + spec.terminal_block_hash, shanghai_time, eip4844_time, ); @@ -78,7 +73,8 @@ impl MockExecutionLayer { ..Default::default() }; let el = - ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap(); + ExecutionLayer::from_config(config, executor.clone(), executor.log().clone(), &spec) + .unwrap(); Self { server, diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 897f6b020c7..7ec2af9f9db 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -25,7 +25,4 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lru = "0.7.1" sloggers = { version = "2.1.1", features = ["json"] } directory = { path = "../../common/directory" } -strum = { version = "0.24.0", features = ["derive"] } - -[features] -withdrawals-processing = ["state_processing/withdrawals-processing"] \ No newline at end of file +strum = { version = "0.24.0", features = ["derive"] } \ No newline at end of file diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index 0b79539877a..ccb41830be8 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -43,4 +43,3 @@ arbitrary-fuzz = [ "eth2_ssz_types/arbitrary", "tree_hash/arbitrary", ] -withdrawals-processing = [] diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 0192fe0cec2..8170b27e7cb 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -474,10 +474,6 @@ pub fn get_expected_withdrawals( let mut validator_index = state.next_withdrawal_validator_index()?; let mut withdrawals = vec![]; - if cfg!(not(feature = "withdrawals-processing")) { - return Ok(withdrawals.into()); - } - let bound = std::cmp::min( state.validators().len() as u64, spec.max_validators_per_withdrawals_sweep, @@ -525,9 +521,6 @@ pub fn process_withdrawals<'payload, T: EthSpec, Payload: AbstractExecPayload payload: Payload::Ref<'payload>, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - if cfg!(not(feature = "withdrawals-processing")) { - return Ok(()); - } match state { BeaconState::Merge(_) => Ok(()), BeaconState::Capella(_) | BeaconState::Eip4844(_) => { diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index eacb7617c78..48c524fd422 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -300,9 +300,6 @@ pub fn process_bls_to_execution_changes( verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - if cfg!(not(feature = "withdrawals-processing")) { - return Ok(()); - } for (i, signed_address_change) in bls_to_execution_changes.iter().enumerate() { verify_bls_to_execution_change(state, signed_address_change, verify_signatures, spec) .map_err(|e| e.into_with_index(i))?; diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index be022e311d3..1e66d7c0496 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -24,8 +24,6 @@ gnosis = [] slasher-mdbx = ["slasher/mdbx"] # Support slasher LMDB backend. slasher-lmdb = ["slasher/lmdb"] -# Support for withdrawals consensus processing logic. -withdrawals-processing = ["beacon_node/withdrawals-processing"] [dependencies] beacon_node = { "path" = "../beacon_node" } diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 4dab00689c9..2daacb0add9 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -110,6 +110,8 @@ impl TestRig { let (runtime_shutdown, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); + let mut spec = MainnetEthSpec::default_spec(); + spec.terminal_total_difficulty = Uint256::zero(); let fee_recipient = None; @@ -125,7 +127,7 @@ impl TestRig { ..Default::default() }; let execution_layer = - ExecutionLayer::from_config(config, executor.clone(), log.clone()).unwrap(); + ExecutionLayer::from_config(config, executor.clone(), log.clone(), &spec).unwrap(); ExecutionPair { execution_engine, execution_layer, @@ -144,16 +146,13 @@ impl TestRig { ..Default::default() }; let execution_layer = - ExecutionLayer::from_config(config, executor, log.clone()).unwrap(); + ExecutionLayer::from_config(config, executor, log.clone(), &spec).unwrap(); ExecutionPair { execution_engine, execution_layer, } }; - let mut spec = MainnetEthSpec::default_spec(); - spec.terminal_total_difficulty = Uint256::zero(); - Self { runtime, ee_a, From 56e6b3557a747ec251299285bd0ad4d93245d0d5 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 12 Jan 2023 15:17:03 +1100 Subject: [PATCH 113/263] Fix Arbitrary implementations (#3867) * Fix Arbitrary implementations * Remove remaining vestiges of arbitrary-fuzz * Remove FIXME * Clippy --- Cargo.lock | 62 ++++++++++++++----- Cargo.toml | 1 + consensus/ssz_types/src/bitfield.rs | 4 +- consensus/ssz_types/src/fixed_vector.rs | 2 +- consensus/ssz_types/src/variable_list.rs | 2 +- consensus/types/Cargo.toml | 28 ++++----- consensus/types/src/aggregate_and_proof.rs | 15 ++++- consensus/types/src/attestation.rs | 13 +++- consensus/types/src/attestation_data.rs | 2 +- consensus/types/src/attestation_duty.rs | 3 +- consensus/types/src/attester_slashing.rs | 13 +++- consensus/types/src/beacon_block.rs | 18 ++++-- consensus/types/src/beacon_block_body.rs | 17 +++-- consensus/types/src/beacon_block_header.rs | 14 ++++- consensus/types/src/beacon_committee.rs | 3 +- consensus/types/src/beacon_state.rs | 10 +-- .../types/src/beacon_state/committee_cache.rs | 1 - .../types/src/beacon_state/exit_cache.rs | 1 - .../types/src/beacon_state/pubkey_cache.rs | 1 - .../types/src/beacon_state/tree_hash_cache.rs | 1 - consensus/types/src/blobs_sidecar.rs | 15 ++++- .../types/src/bls_to_execution_change.rs | 13 +++- consensus/types/src/chain_spec.rs | 3 +- consensus/types/src/checkpoint.rs | 2 +- consensus/types/src/contribution_and_proof.rs | 15 ++++- consensus/types/src/deposit.rs | 13 +++- consensus/types/src/deposit_data.rs | 13 +++- consensus/types/src/deposit_message.rs | 14 ++++- consensus/types/src/enr_fork_id.rs | 13 +++- consensus/types/src/eth1_data.rs | 2 +- consensus/types/src/eth_spec.rs | 13 ++-- consensus/types/src/execution_block_hash.rs | 14 ++++- consensus/types/src/execution_payload.rs | 9 ++- .../types/src/execution_payload_header.rs | 9 ++- consensus/types/src/fork.rs | 2 +- consensus/types/src/fork_data.rs | 13 +++- consensus/types/src/free_attestation.rs | 3 +- consensus/types/src/graffiti.rs | 2 +- consensus/types/src/historical_batch.rs | 15 ++++- consensus/types/src/indexed_attestation.rs | 13 +++- consensus/types/src/kzg_commitment.rs | 4 +- consensus/types/src/kzg_proof.rs | 13 +++- consensus/types/src/light_client_bootstrap.rs | 14 ++++- .../types/src/light_client_finality_update.rs | 14 ++++- .../src/light_client_optimistic_update.rs | 14 ++++- consensus/types/src/light_client_update.rs | 14 ++++- consensus/types/src/participation_flags.rs | 2 +- consensus/types/src/payload.rs | 23 +++++-- consensus/types/src/pending_attestation.rs | 26 ++++---- consensus/types/src/proposer_slashing.rs | 14 ++++- consensus/types/src/relative_epoch.rs | 6 +- consensus/types/src/selection_proof.rs | 3 +- .../types/src/signed_aggregate_and_proof.rs | 15 ++++- consensus/types/src/signed_beacon_block.rs | 16 ++--- .../types/src/signed_beacon_block_header.rs | 14 ++++- .../src/signed_bls_to_execution_change.rs | 13 +++- .../src/signed_contribution_and_proof.rs | 15 ++++- consensus/types/src/signed_voluntary_exit.rs | 13 +++- consensus/types/src/signing_data.rs | 14 ++++- consensus/types/src/slot_epoch.rs | 30 +++++++-- consensus/types/src/subnet_id.rs | 3 +- consensus/types/src/sync_aggregate.rs | 13 +++- .../src/sync_aggregator_selection_data.rs | 13 +++- consensus/types/src/sync_committee.rs | 15 ++++- .../types/src/sync_committee_contribution.rs | 15 ++++- consensus/types/src/sync_committee_message.rs | 14 ++++- consensus/types/src/sync_selection_proof.rs | 3 +- consensus/types/src/sync_subnet_id.rs | 3 +- consensus/types/src/validator.rs | 14 ++++- consensus/types/src/voluntary_exit.rs | 13 +++- consensus/types/src/withdrawal.rs | 14 ++++- 71 files changed, 610 insertions(+), 194 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c2a78cb524c..c26dd1cf44a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -156,9 +156,8 @@ checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" [[package]] name = "arbitrary" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29d47fbf90d5149a107494b15a7dc8d69b351be2db3bb9691740e88ec17fd880" +version = "1.2.2" +source = "git+https://github.com/michaelsproul/arbitrary?rev=a572fd8743012a4f1ada5ee5968b1b3619c427ba#a572fd8743012a4f1ada5ee5968b1b3619c427ba" dependencies = [ "derive_arbitrary", ] @@ -1231,8 +1230,18 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.13.4", + "darling_macro 0.13.4", +] + +[[package]] +name = "darling" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0dd3cd20dc6b5a876612a6e5accfe7f3dd883db6d07acfbf14c128f61550dfa" +dependencies = [ + "darling_core 0.14.2", + "darling_macro 0.14.2", ] [[package]] @@ -1249,13 +1258,38 @@ dependencies = [ "syn", ] +[[package]] +name = "darling_core" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn", +] + [[package]] name = "darling_macro" version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ - "darling_core", + "darling_core 0.13.4", + "quote", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e" +dependencies = [ + "darling_core 0.14.2", "quote", "syn", ] @@ -1357,10 +1391,10 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4903dff04948f22033ca30232ab8eca2c3fc4c913a8b6a34ee5199699814817f" +version = "1.2.2" +source = "git+https://github.com/michaelsproul/arbitrary?rev=a572fd8743012a4f1ada5ee5968b1b3619c427ba#a572fd8743012a4f1ada5ee5968b1b3619c427ba" dependencies = [ + "darling 0.14.2", "proc-macro2", "quote", "syn", @@ -1856,7 +1890,7 @@ dependencies = [ name = "eth2_ssz_derive" version = "0.3.1" dependencies = [ - "darling", + "darling 0.13.4", "eth2_ssz", "proc-macro2", "quote", @@ -5791,7 +5825,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ - "darling", + "darling 0.13.4", "proc-macro2", "quote", "syn", @@ -6353,7 +6387,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a99807a055ff4ff5d249bb84c80d9eabb55ca3c452187daae43fd5b51ef695" dependencies = [ - "darling", + "darling 0.13.4", "itertools", "proc-macro2", "quote", @@ -6367,7 +6401,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75b9e5728aa1a87141cefd4e7509903fc01fa0dcb108022b1e841a67c5159fc5" dependencies = [ - "darling", + "darling 0.13.4", "itertools", "proc-macro2", "quote", @@ -6991,7 +7025,7 @@ dependencies = [ name = "tree_hash_derive" version = "0.4.0" dependencies = [ - "darling", + "darling 0.13.4", "quote", "syn", ] diff --git a/Cargo.toml b/Cargo.toml index e254400e88b..c32dd7d69d8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -100,6 +100,7 @@ eth2_hashing = { path = "crypto/eth2_hashing" } tree_hash = { path = "consensus/tree_hash" } tree_hash_derive = { path = "consensus/tree_hash_derive" } eth2_serde_utils = { path = "consensus/serde_utils" } +arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="a572fd8743012a4f1ada5ee5968b1b3619c427ba" } [profile.maxperf] inherits = "release" diff --git a/consensus/ssz_types/src/bitfield.rs b/consensus/ssz_types/src/bitfield.rs index 0539cc7d2c6..b7bde225786 100644 --- a/consensus/ssz_types/src/bitfield.rs +++ b/consensus/ssz_types/src/bitfield.rs @@ -660,7 +660,7 @@ impl arbitrary::Arbitrary<'_> for Bitfield> { let size = N::to_usize(); let mut vec = smallvec![0u8; size]; u.fill_buffer(&mut vec)?; - Ok(Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat)?) + Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat) } } @@ -672,7 +672,7 @@ impl arbitrary::Arbitrary<'_> for Bitfield> { let size = std::cmp::min(rand, max_size); let mut vec = smallvec![0u8; size]; u.fill_buffer(&mut vec)?; - Ok(Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat)?) + Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat) } } diff --git a/consensus/ssz_types/src/fixed_vector.rs b/consensus/ssz_types/src/fixed_vector.rs index 1ad82a38419..9625f27f3ab 100644 --- a/consensus/ssz_types/src/fixed_vector.rs +++ b/consensus/ssz_types/src/fixed_vector.rs @@ -291,7 +291,7 @@ impl<'a, T: arbitrary::Arbitrary<'a>, N: 'static + Unsigned> arbitrary::Arbitrar for _ in 0..size { vec.push(::arbitrary(u)?); } - Ok(Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat)?) + Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat) } } diff --git a/consensus/ssz_types/src/variable_list.rs b/consensus/ssz_types/src/variable_list.rs index a342b361ed9..ef1f113bbde 100644 --- a/consensus/ssz_types/src/variable_list.rs +++ b/consensus/ssz_types/src/variable_list.rs @@ -273,7 +273,7 @@ impl<'a, T: arbitrary::Arbitrary<'a>, N: 'static + Unsigned> arbitrary::Arbitrar for _ in 0..size { vec.push(::arbitrary(u)?); } - Ok(Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat)?) + Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat) } } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 671cacfa2eb..7459d4eb3aa 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -11,11 +11,11 @@ harness = false [dependencies] serde-big-array = {version = "0.3.2", features = ["const-generics"]} merkle_proof = { path = "../../consensus/merkle_proof" } -bls = { path = "../../crypto/bls" } +bls = { path = "../../crypto/bls", features = ["arbitrary"] } compare_fields = { path = "../../common/compare_fields" } compare_fields_derive = { path = "../../common/compare_fields_derive" } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } -ethereum-types = "0.12.1" +ethereum-types = { version = "0.12.1", features = ["arbitrary"] } eth2_hashing = "0.3.0" hex = "0.4.2" int_to_bytes = { path = "../int_to_bytes" } @@ -26,12 +26,12 @@ safe_arith = { path = "../safe_arith" } serde = {version = "1.0.116" , features = ["rc"] } serde_derive = "1.0.116" slog = "2.5.2" -eth2_ssz = "0.4.1" +eth2_ssz = { version = "0.4.1", features = ["arbitrary"] } eth2_ssz_derive = "0.3.1" -eth2_ssz_types = "0.2.2" -swap_or_not_shuffle = { path = "../swap_or_not_shuffle" } +eth2_ssz_types = { version = "0.2.2", features = ["arbitrary"] } +swap_or_not_shuffle = { path = "../swap_or_not_shuffle", features = ["arbitrary"] } test_random_derive = { path = "../../common/test_random_derive" } -tree_hash = "0.4.1" +tree_hash = { version = "0.4.1", features = ["arbitrary"] } tree_hash_derive = "0.4.0" rand_xorshift = "0.3.0" cached_tree_hash = { path = "../cached_tree_hash" } @@ -39,7 +39,9 @@ serde_yaml = "0.8.13" tempfile = "3.1.0" derivative = "2.1.1" rusqlite = { version = "0.25.3", features = ["bundled"], optional = true } -arbitrary = { version = "1.0", features = ["derive"], optional = true } +# The arbitrary dependency is enabled by default since Capella to avoid complexity introduced by +# `AbstractExecPayload` +arbitrary = { version = "1.0", features = ["derive"] } eth2_serde_utils = "0.1.1" regex = "1.5.5" lazy_static = "1.4.0" @@ -63,12 +65,6 @@ default = ["sqlite", "legacy-arith"] # Allow saturating arithmetic on slots and epochs. Enabled by default, but deprecated. legacy-arith = [] sqlite = ["rusqlite"] -arbitrary-fuzz = [ - "arbitrary", - "ethereum-types/arbitrary", - "bls/arbitrary", - "eth2_ssz/arbitrary", - "eth2_ssz_types/arbitrary", - "swap_or_not_shuffle/arbitrary", - "tree_hash/arbitrary", -] +# The `arbitrary-fuzz` feature is a no-op provided for backwards compatibility. +# For simplicity `Arbitrary` is now derived regardless of the feature's presence. +arbitrary-fuzz = [] diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/aggregate_and_proof.rs index 19c8f8a0a80..39a0a28c0ce 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/aggregate_and_proof.rs @@ -11,9 +11,20 @@ use tree_hash_derive::TreeHash; /// A Validators aggregate attestation and selection proof. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] +#[derive( + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct AggregateAndProof { /// The index of the validator that created the attestation. #[serde(with = "eth2_serde_utils::quoted_u64")] diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 12586e28d5e..5c333e0d456 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -23,12 +23,21 @@ pub enum Error { /// Details an attestation that can be slashable. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, + arbitrary::Arbitrary, + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + Derivative, )] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct Attestation { pub aggregation_bits: BitList, pub data: AttestationData, diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation_data.rs index 8792a3c56d7..c6a661c85dd 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation_data.rs @@ -10,8 +10,8 @@ use tree_hash_derive::TreeHash; /// The data upon which an attestation is based. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( + arbitrary::Arbitrary, Debug, Clone, PartialEq, diff --git a/consensus/types/src/attestation_duty.rs b/consensus/types/src/attestation_duty.rs index ecfa613ed44..87a9c932a45 100644 --- a/consensus/types/src/attestation_duty.rs +++ b/consensus/types/src/attestation_duty.rs @@ -1,8 +1,7 @@ use crate::*; use serde_derive::{Deserialize, Serialize}; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Copy, Default, Serialize, Deserialize)] +#[derive(arbitrary::Arbitrary, Debug, PartialEq, Clone, Copy, Default, Serialize, Deserialize)] pub struct AttestationDuty { /// The slot during which the attester must attest. pub slot: Slot, diff --git a/consensus/types/src/attester_slashing.rs b/consensus/types/src/attester_slashing.rs index b239f62e46c..c5634950745 100644 --- a/consensus/types/src/attester_slashing.rs +++ b/consensus/types/src/attester_slashing.rs @@ -9,12 +9,21 @@ use tree_hash_derive::TreeHash; /// Two conflicting attestations. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Derivative, Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + Derivative, + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + arbitrary::Arbitrary, )] #[derivative(PartialEq, Eq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct AttesterSlashing { pub attestation_1: IndexedAttestation, pub attestation_2: IndexedAttestation, diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index fd38e9faf26..f7b9790b4dc 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -29,10 +29,14 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, Derivative, + arbitrary::Arbitrary ), - derivative(PartialEq, Hash(bound = "T: EthSpec, Payload: ExecPayload")), - serde(bound = "T: EthSpec, Payload: ExecPayload", deny_unknown_fields), - cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), + derivative(PartialEq, Hash(bound = "T: EthSpec, Payload: AbstractExecPayload")), + serde( + bound = "T: EthSpec, Payload: AbstractExecPayload", + deny_unknown_fields + ), + arbitrary(bound = "T: EthSpec, Payload: AbstractExecPayload"), ), ref_attributes( derive(Debug, PartialEq, TreeHash), @@ -41,11 +45,13 @@ use tree_hash_derive::TreeHash; map_ref_into(BeaconBlockBodyRef, BeaconBlock), map_ref_mut_into(BeaconBlockBodyRefMut) )] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] +#[derive( + Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative, arbitrary::Arbitrary, +)] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(untagged)] -#[serde(bound = "T: EthSpec, Payload: ExecPayload")] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[serde(bound = "T: EthSpec, Payload: AbstractExecPayload")] +#[arbitrary(bound = "T: EthSpec, Payload: AbstractExecPayload")] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] pub struct BeaconBlock = FullPayload> { diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index dbdbcddb1b8..28c9213d1f4 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -26,19 +26,23 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, Derivative, + arbitrary::Arbitrary ), - derivative(PartialEq, Hash(bound = "T: EthSpec, Payload: ExecPayload")), - serde(bound = "T: EthSpec, Payload: ExecPayload", deny_unknown_fields), - cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) + derivative(PartialEq, Hash(bound = "T: EthSpec, Payload: AbstractExecPayload")), + serde( + bound = "T: EthSpec, Payload: AbstractExecPayload", + deny_unknown_fields + ), + arbitrary(bound = "T: EthSpec, Payload: AbstractExecPayload"), ), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] -#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] +#[derive(Debug, Clone, Serialize, Deserialize, Derivative, arbitrary::Arbitrary)] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(untagged)] -#[serde(bound = "T: EthSpec, Payload: ExecPayload")] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[serde(bound = "T: EthSpec, Payload: AbstractExecPayload")] +#[arbitrary(bound = "T: EthSpec, Payload: AbstractExecPayload")] pub struct BeaconBlockBody = FullPayload> { pub randao_reveal: Signature, pub eth1_data: Eth1Data, @@ -71,6 +75,7 @@ pub struct BeaconBlockBody = FullPay #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[serde(skip)] + #[arbitrary(default)] pub _phantom: PhantomData, } diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/beacon_block_header.rs index cca8fef8416..c6d6678f31a 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/beacon_block_header.rs @@ -10,9 +10,19 @@ use tree_hash_derive::TreeHash; /// A header of a `BeaconBlock`. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct BeaconBlockHeader { pub slot: Slot, diff --git a/consensus/types/src/beacon_committee.rs b/consensus/types/src/beacon_committee.rs index 6483c009af7..ad293c3a3bb 100644 --- a/consensus/types/src/beacon_committee.rs +++ b/consensus/types/src/beacon_committee.rs @@ -17,8 +17,7 @@ impl<'a> BeaconCommittee<'a> { } } -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Default, Clone, Debug, PartialEq)] +#[derive(arbitrary::Arbitrary, Default, Clone, Debug, PartialEq)] pub struct OwnedBeaconCommittee { pub slot: Slot, pub index: CommitteeIndex, diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index f51a7bf9fb2..b44c14ded5a 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -147,8 +147,7 @@ impl AllowNextEpoch { } } -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(PartialEq, Eq, Hash, Clone, Copy)] +#[derive(PartialEq, Eq, Hash, Clone, Copy, arbitrary::Arbitrary)] pub struct BeaconStateHash(Hash256); impl fmt::Debug for BeaconStateHash { @@ -190,18 +189,19 @@ impl From for Hash256 { TreeHash, TestRandom, CompareFields, + arbitrary::Arbitrary ), serde(bound = "T: EthSpec", deny_unknown_fields), + arbitrary(bound = "T: EthSpec"), derivative(Clone), - cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) ), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] -#[derive(Debug, PartialEq, Serialize, Deserialize, Encode, TreeHash)] +#[derive(Debug, PartialEq, Serialize, Deserialize, Encode, TreeHash, arbitrary::Arbitrary)] #[serde(untagged)] #[serde(bound = "T: EthSpec")] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[arbitrary(bound = "T: EthSpec")] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] pub struct BeaconState diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 03adaf3d443..8afef1183be 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -336,7 +336,6 @@ pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> V active } -#[cfg(feature = "arbitrary-fuzz")] impl arbitrary::Arbitrary<'_> for CommitteeCache { fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { Ok(Self::default()) diff --git a/consensus/types/src/beacon_state/exit_cache.rs b/consensus/types/src/beacon_state/exit_cache.rs index 1c199c04755..b657d62ae62 100644 --- a/consensus/types/src/beacon_state/exit_cache.rs +++ b/consensus/types/src/beacon_state/exit_cache.rs @@ -61,7 +61,6 @@ impl ExitCache { } } -#[cfg(feature = "arbitrary-fuzz")] impl arbitrary::Arbitrary<'_> for ExitCache { fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { Ok(Self::default()) diff --git a/consensus/types/src/beacon_state/pubkey_cache.rs b/consensus/types/src/beacon_state/pubkey_cache.rs index d81801e77d2..590ea30f999 100644 --- a/consensus/types/src/beacon_state/pubkey_cache.rs +++ b/consensus/types/src/beacon_state/pubkey_cache.rs @@ -42,7 +42,6 @@ impl PubkeyCache { } } -#[cfg(feature = "arbitrary-fuzz")] impl arbitrary::Arbitrary<'_> for PubkeyCache { fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { Ok(Self::default()) diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index 5515fb753af..efc6573d2bc 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -600,7 +600,6 @@ impl OptionalTreeHashCacheInner { } } -#[cfg(feature = "arbitrary-fuzz")] impl arbitrary::Arbitrary<'_> for BeaconTreeHashCache { fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { Ok(Self::default()) diff --git a/consensus/types/src/blobs_sidecar.rs b/consensus/types/src/blobs_sidecar.rs index d4e77960601..227be3e2f82 100644 --- a/consensus/types/src/blobs_sidecar.rs +++ b/consensus/types/src/blobs_sidecar.rs @@ -6,9 +6,20 @@ use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; use tree_hash_derive::TreeHash; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq, Default)] +#[derive( + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + PartialEq, + Default, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct BlobsSidecar { pub beacon_block_root: Hash256, pub beacon_block_slot: Slot, diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs index 497e9aa1405..f6064f65ab5 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/bls_to_execution_change.rs @@ -6,9 +6,18 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct BlsToExecutionChange { #[serde(with = "eth2_serde_utils::quoted_u64")] diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index bf9a7ed34db..1f947c9e7b2 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -29,8 +29,7 @@ pub enum Domain { /// Lighthouse's internal configuration struct. /// /// Contains a mixture of "preset" and "config" values w.r.t to the EF definitions. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(PartialEq, Debug, Clone)] +#[derive(arbitrary::Arbitrary, PartialEq, Debug, Clone)] pub struct ChainSpec { /* * Config name diff --git a/consensus/types/src/checkpoint.rs b/consensus/types/src/checkpoint.rs index cad7fab754e..e84798f6f7d 100644 --- a/consensus/types/src/checkpoint.rs +++ b/consensus/types/src/checkpoint.rs @@ -8,8 +8,8 @@ use tree_hash_derive::TreeHash; /// Casper FFG checkpoint, used in attestations. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( + arbitrary::Arbitrary, Debug, Clone, Copy, diff --git a/consensus/types/src/contribution_and_proof.rs b/consensus/types/src/contribution_and_proof.rs index 855e36bc903..167b0857c5a 100644 --- a/consensus/types/src/contribution_and_proof.rs +++ b/consensus/types/src/contribution_and_proof.rs @@ -9,9 +9,20 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; /// A Validators aggregate sync committee contribution and selection proof. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct ContributionAndProof { /// The index of the validator that created the sync contribution. #[serde(with = "eth2_serde_utils::quoted_u64")] diff --git a/consensus/types/src/deposit.rs b/consensus/types/src/deposit.rs index a347cf675cf..bbc3bd9fb89 100644 --- a/consensus/types/src/deposit.rs +++ b/consensus/types/src/deposit.rs @@ -11,9 +11,18 @@ pub const DEPOSIT_TREE_DEPTH: usize = 32; /// A deposit to potentially become a beacon chain validator. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct Deposit { pub proof: FixedVector, diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit_data.rs index 6c5444e110f..1969311671f 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit_data.rs @@ -10,9 +10,18 @@ use tree_hash_derive::TreeHash; /// The data supplied by the user to the deposit contract. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct DepositData { pub pubkey: PublicKeyBytes, diff --git a/consensus/types/src/deposit_message.rs b/consensus/types/src/deposit_message.rs index d1f245bc980..63073401c22 100644 --- a/consensus/types/src/deposit_message.rs +++ b/consensus/types/src/deposit_message.rs @@ -10,8 +10,18 @@ use tree_hash_derive::TreeHash; /// The data supplied by the user to the deposit contract. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] pub struct DepositMessage { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/enr_fork_id.rs index 0fe929a1e93..3556e31a9fc 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/enr_fork_id.rs @@ -10,9 +10,18 @@ use tree_hash_derive::TreeHash; /// a nodes local ENR. /// /// Spec v0.11 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Default, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct EnrForkId { #[serde(with = "eth2_serde_utils::bytes_4_hex")] diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/eth1_data.rs index 4fd7d3373c5..6b2396e112c 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/eth1_data.rs @@ -9,8 +9,8 @@ use tree_hash_derive::TreeHash; /// Contains data obtained from the Eth1 chain. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( + arbitrary::Arbitrary, Debug, PartialEq, Clone, diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 5ed5307ffdf..e45f5b392ac 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -48,7 +48,9 @@ impl fmt::Display for EthSpecId { } } -pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + Eq { +pub trait EthSpec: + 'static + Default + Sync + Send + Clone + Debug + PartialEq + Eq + for<'a> arbitrary::Arbitrary<'a> +{ /* * Constants */ @@ -258,8 +260,7 @@ macro_rules! params_from_eth_spec { } /// Ethereum Foundation specifications. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize, arbitrary::Arbitrary)] pub struct MainnetEthSpec; impl EthSpec for MainnetEthSpec { @@ -305,8 +306,7 @@ impl EthSpec for MainnetEthSpec { } /// Ethereum Foundation minimal spec, as defined in the eth2.0-specs repo. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize, arbitrary::Arbitrary)] pub struct MinimalEthSpec; impl EthSpec for MinimalEthSpec { @@ -355,8 +355,7 @@ impl EthSpec for MinimalEthSpec { } /// Gnosis Beacon Chain specifications. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize, arbitrary::Arbitrary)] pub struct GnosisEthSpec; impl EthSpec for GnosisEthSpec { diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution_block_hash.rs index 988dcece5e8..363a35a86a1 100644 --- a/consensus/types/src/execution_block_hash.rs +++ b/consensus/types/src/execution_block_hash.rs @@ -6,8 +6,18 @@ use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use std::fmt; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Default, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash, Derivative)] +#[derive( + arbitrary::Arbitrary, + Default, + Clone, + Copy, + Serialize, + Deserialize, + Eq, + PartialEq, + Hash, + Derivative, +)] #[derivative(Debug = "transparent")] #[serde(transparent)] pub struct ExecutionBlockHash(Hash256); diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 45f52fb65a7..a57d4114128 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -28,20 +28,23 @@ pub type Withdrawals = VariableList::MaxWithdrawal TreeHash, TestRandom, Derivative, + arbitrary::Arbitrary ), derivative(PartialEq, Hash(bound = "T: EthSpec")), serde(bound = "T: EthSpec", deny_unknown_fields), - cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) + arbitrary(bound = "T: EthSpec") ), cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") )] -#[derive(Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative)] +#[derive( + Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative, arbitrary::Arbitrary, +)] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec", untagged)] +#[arbitrary(bound = "T: EthSpec")] #[ssz(enum_behaviour = "transparent")] #[tree_hash(enum_behaviour = "transparent")] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct ExecutionPayload { #[superstruct(getter(copy))] pub parent_hash: ExecutionBlockHash, diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index e2c23389a1f..42e44ed739c 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -22,21 +22,24 @@ use BeaconStateError; TreeHash, TestRandom, Derivative, + arbitrary::Arbitrary ), derivative(PartialEq, Hash(bound = "T: EthSpec")), serde(bound = "T: EthSpec", deny_unknown_fields), - cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) + arbitrary(bound = "T: EthSpec") ), ref_attributes(derive(PartialEq, TreeHash), tree_hash(enum_behaviour = "transparent")), cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") )] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] +#[derive( + Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative, arbitrary::Arbitrary, +)] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec", untagged)] +#[arbitrary(bound = "T: EthSpec")] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct ExecutionPayloadHeader { #[superstruct(getter(copy))] pub parent_hash: ExecutionBlockHash, diff --git a/consensus/types/src/fork.rs b/consensus/types/src/fork.rs index 44b8a16637d..de332f0cada 100644 --- a/consensus/types/src/fork.rs +++ b/consensus/types/src/fork.rs @@ -9,8 +9,8 @@ use tree_hash_derive::TreeHash; /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( + arbitrary::Arbitrary, Debug, Clone, Copy, diff --git a/consensus/types/src/fork_data.rs b/consensus/types/src/fork_data.rs index be13f71e4d5..cc790393159 100644 --- a/consensus/types/src/fork_data.rs +++ b/consensus/types/src/fork_data.rs @@ -9,9 +9,18 @@ use tree_hash_derive::TreeHash; /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Default, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct ForkData { #[serde(with = "eth2_serde_utils::bytes_4_hex")] diff --git a/consensus/types/src/free_attestation.rs b/consensus/types/src/free_attestation.rs index 81a778d8421..dd3782d3ce1 100644 --- a/consensus/types/src/free_attestation.rs +++ b/consensus/types/src/free_attestation.rs @@ -4,8 +4,7 @@ use super::{AttestationData, Signature}; use serde_derive::Serialize; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize)] +#[derive(arbitrary::Arbitrary, Debug, Clone, PartialEq, Serialize)] pub struct FreeAttestation { pub data: AttestationData, pub signature: Signature, diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index 2b0a645cd05..6288cdbe807 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -14,7 +14,7 @@ pub const GRAFFITI_BYTES_LEN: usize = 32; /// The 32-byte `graffiti` field on a beacon block. #[derive(Default, Debug, PartialEq, Hash, Clone, Copy, Serialize, Deserialize)] #[serde(transparent)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(arbitrary::Arbitrary)] pub struct Graffiti(#[serde(with = "serde_graffiti")] pub [u8; GRAFFITI_BYTES_LEN]); impl Graffiti { diff --git a/consensus/types/src/historical_batch.rs b/consensus/types/src/historical_batch.rs index 325f5f85377..e75b64cae93 100644 --- a/consensus/types/src/historical_batch.rs +++ b/consensus/types/src/historical_batch.rs @@ -10,8 +10,19 @@ use tree_hash_derive::TreeHash; /// Historical block and state roots. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + arbitrary::Arbitrary, +)] +#[arbitrary(bound = "T: EthSpec")] pub struct HistoricalBatch { pub block_roots: FixedVector, pub state_roots: FixedVector, diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index 32271cfa935..16ffb1ad8fa 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -12,12 +12,21 @@ use tree_hash_derive::TreeHash; /// To be included in an `AttesterSlashing`. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Derivative, Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + Derivative, + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + arbitrary::Arbitrary, )] #[derivative(PartialEq, Eq)] // to satisfy Clippy's lint about `Hash` #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct IndexedAttestation { /// Lists validator registry indices, not committee indices. #[serde(with = "quoted_variable_list_u64")] diff --git a/consensus/types/src/kzg_commitment.rs b/consensus/types/src/kzg_commitment.rs index 9844df0282e..4612af5de15 100644 --- a/consensus/types/src/kzg_commitment.rs +++ b/consensus/types/src/kzg_commitment.rs @@ -7,7 +7,9 @@ use std::fmt; use std::fmt::{Display, Formatter}; use tree_hash::{PackedEncoding, TreeHash}; -#[derive(Derivative, Debug, Clone, Encode, Decode, Serialize, Deserialize)] +#[derive( + Derivative, Debug, Clone, Encode, Decode, Serialize, Deserialize, arbitrary::Arbitrary, +)] #[derivative(PartialEq, Eq, Hash)] #[ssz(struct_behaviour = "transparent")] pub struct KzgCommitment(#[serde(with = "BigArray")] pub [u8; 48]); diff --git a/consensus/types/src/kzg_proof.rs b/consensus/types/src/kzg_proof.rs index 1c8e49a443b..9c1136ce51d 100644 --- a/consensus/types/src/kzg_proof.rs +++ b/consensus/types/src/kzg_proof.rs @@ -7,7 +7,18 @@ use tree_hash::{PackedEncoding, TreeHash}; const KZG_PROOF_BYTES_LEN: usize = 48; -#[derive(Debug, PartialEq, Hash, Clone, Copy, Encode, Decode, Serialize, Deserialize)] +#[derive( + Debug, + PartialEq, + Hash, + Clone, + Copy, + Encode, + Decode, + Serialize, + Deserialize, + arbitrary::Arbitrary, +)] #[serde(transparent)] #[ssz(struct_behaviour = "transparent")] pub struct KzgProof(#[serde(with = "BigArray")] pub [u8; KZG_PROOF_BYTES_LEN]); diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index d2a46c04a43..1a5eed2205d 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -8,9 +8,19 @@ use tree_hash::TreeHash; /// A LightClientBootstrap is the initializer we send over to lightclient nodes /// that are trying to generate their basic storage when booting up. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct LightClientBootstrap { /// Requested beacon block header. pub header: BeaconBlockHeader, diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index cae6266f9e7..08069c93084 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -10,9 +10,19 @@ use tree_hash::TreeHash; /// A LightClientFinalityUpdate is the update lightclient request or received by a gossip that /// signal a new finalized beacon block header for the light client sync protocol. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct LightClientFinalityUpdate { /// The last `BeaconBlockHeader` from the last attested block by the sync committee. pub attested_header: BeaconBlockHeader, diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 8dda8cd5aed..7a39bd9ac1c 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -9,9 +9,19 @@ use tree_hash::TreeHash; /// A LightClientOptimisticUpdate is the update we send on each slot, /// it is based off the current unfinalized epoch is verified only against BLS signature. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct LightClientOptimisticUpdate { /// The last `BeaconBlockHeader` from the last attested block by the sync committee. pub attested_header: BeaconBlockHeader, diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 7d01f39bfc8..ca35f96802b 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -52,9 +52,19 @@ impl From for Error { /// A LightClientUpdate is the update we request solely to either complete the bootstraping process, /// or to sync up to the last committee period, we need to have one ready for each ALTAIR period /// we go over, note: there is no need to keep all of the updates from [ALTAIR_PERIOD, CURRENT_PERIOD]. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct LightClientUpdate { /// The last `BeaconBlockHeader` from the last attested block by the sync committee. pub attested_header: BeaconBlockHeader, diff --git a/consensus/types/src/participation_flags.rs b/consensus/types/src/participation_flags.rs index a2dd4948641..bd98f8da078 100644 --- a/consensus/types/src/participation_flags.rs +++ b/consensus/types/src/participation_flags.rs @@ -7,7 +7,7 @@ use tree_hash::{PackedEncoding, TreeHash, TreeHashType}; #[derive(Debug, Default, Clone, Copy, PartialEq, Deserialize, Serialize, TestRandom)] #[serde(transparent)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(arbitrary::Arbitrary)] pub struct ParticipationFlags { #[serde(with = "eth2_serde_utils::quoted_u8")] bits: u8, diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index f56b88fc927..2008f501cb2 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -48,7 +48,15 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + /// `ExecPayload` functionality the requires ownership. pub trait OwnedExecPayload: - ExecPayload + Default + Serialize + DeserializeOwned + Encode + Decode + TestRandom + 'static + ExecPayload + + Default + + Serialize + + DeserializeOwned + + Encode + + Decode + + TestRandom + + for<'a> arbitrary::Arbitrary<'a> + + 'static { } @@ -60,6 +68,7 @@ impl OwnedExecPayload for P where + Encode + Decode + TestRandom + + for<'a> arbitrary::Arbitrary<'a> + 'static { } @@ -108,10 +117,11 @@ pub trait AbstractExecPayload: TestRandom, TreeHash, Derivative, + arbitrary::Arbitrary, ), derivative(PartialEq, Hash(bound = "T: EthSpec")), serde(bound = "T: EthSpec", deny_unknown_fields), - cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), + arbitrary(bound = "T: EthSpec"), ssz(struct_behaviour = "transparent"), ), ref_attributes( @@ -123,9 +133,10 @@ pub trait AbstractExecPayload: cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") )] -#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Derivative)] +#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Derivative, arbitrary::Arbitrary)] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] #[tree_hash(enum_behaviour = "transparent")] pub struct FullPayload { #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] @@ -418,10 +429,11 @@ impl TryFrom> for FullPayload { TestRandom, TreeHash, Derivative, + arbitrary::Arbitrary ), derivative(PartialEq, Hash(bound = "T: EthSpec")), serde(bound = "T: EthSpec", deny_unknown_fields), - cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), + arbitrary(bound = "T: EthSpec"), ssz(struct_behaviour = "transparent"), ), ref_attributes( @@ -433,9 +445,10 @@ impl TryFrom> for FullPayload { cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") )] -#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Derivative)] +#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Derivative, arbitrary::Arbitrary)] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] #[tree_hash(enum_behaviour = "transparent")] pub struct BlindedPayload { #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] diff --git a/consensus/types/src/pending_attestation.rs b/consensus/types/src/pending_attestation.rs index 2a65bff66f8..1b9903ebbe5 100644 --- a/consensus/types/src/pending_attestation.rs +++ b/consensus/types/src/pending_attestation.rs @@ -9,7 +9,19 @@ use tree_hash_derive::TreeHash; /// An attestation that has been included in the state but not yet fully processed. /// /// Spec v0.12.1 -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + arbitrary::Arbitrary, +)] +#[arbitrary(bound = "T: EthSpec")] pub struct PendingAttestation { pub aggregation_bits: BitList, pub data: AttestationData, @@ -19,18 +31,6 @@ pub struct PendingAttestation { pub proposer_index: u64, } -#[cfg(feature = "arbitrary-fuzz")] -impl arbitrary::Arbitrary<'_> for PendingAttestation { - fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { - Ok(Self { - aggregation_bits: >::arbitrary(u)?, - data: AttestationData::arbitrary(u)?, - inclusion_delay: u64::arbitrary(u)?, - proposer_index: u64::arbitrary(u)?, - }) - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/proposer_slashing.rs b/consensus/types/src/proposer_slashing.rs index ca048b149ac..1ac2464a47f 100644 --- a/consensus/types/src/proposer_slashing.rs +++ b/consensus/types/src/proposer_slashing.rs @@ -9,9 +9,19 @@ use tree_hash_derive::TreeHash; /// Two conflicting proposals from the same proposer (validator). /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct ProposerSlashing { pub signed_header_1: SignedBeaconBlockHeader, diff --git a/consensus/types/src/relative_epoch.rs b/consensus/types/src/relative_epoch.rs index e681ce15c20..77a46b56e86 100644 --- a/consensus/types/src/relative_epoch.rs +++ b/consensus/types/src/relative_epoch.rs @@ -14,15 +14,11 @@ impl From for Error { } } -#[cfg(feature = "arbitrary-fuzz")] -use arbitrary::Arbitrary; - /// Defines the epochs relative to some epoch. Most useful when referring to the committees prior /// to and following some epoch. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] -#[derive(Debug, PartialEq, Clone, Copy)] +#[derive(Debug, PartialEq, Clone, Copy, arbitrary::Arbitrary)] pub enum RelativeEpoch { /// The prior epoch. Previous, diff --git a/consensus/types/src/selection_proof.rs b/consensus/types/src/selection_proof.rs index 0a360b01554..f8bc8ba69fb 100644 --- a/consensus/types/src/selection_proof.rs +++ b/consensus/types/src/selection_proof.rs @@ -7,8 +7,7 @@ use ssz::Encode; use std::cmp; use std::convert::TryInto; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(PartialEq, Debug, Clone)] +#[derive(arbitrary::Arbitrary, PartialEq, Debug, Clone)] pub struct SelectionProof(Signature); impl SelectionProof { diff --git a/consensus/types/src/signed_aggregate_and_proof.rs b/consensus/types/src/signed_aggregate_and_proof.rs index 0047bd3ccd4..6d86c056349 100644 --- a/consensus/types/src/signed_aggregate_and_proof.rs +++ b/consensus/types/src/signed_aggregate_and_proof.rs @@ -12,9 +12,20 @@ use tree_hash_derive::TreeHash; /// gossipsub topic. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct SignedAggregateAndProof { /// The `AggregateAndProof` that was signed. pub message: AggregateAndProof, diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 14f9358f611..cd6cd5cb9ec 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -8,8 +8,7 @@ use superstruct::superstruct; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(PartialEq, Eq, Hash, Clone, Copy)] +#[derive(arbitrary::Arbitrary, PartialEq, Eq, Hash, Clone, Copy)] pub struct SignedBeaconBlockHash(Hash256); impl fmt::Debug for SignedBeaconBlockHash { @@ -49,20 +48,23 @@ impl From for Hash256 { Decode, TreeHash, Derivative, + arbitrary::Arbitrary ), derivative(PartialEq, Hash(bound = "E: EthSpec")), - cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), - serde(bound = "E: EthSpec, Payload: ExecPayload"), + serde(bound = "E: EthSpec, Payload: AbstractExecPayload"), + arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload"), ), map_into(BeaconBlock), map_ref_into(BeaconBlockRef), map_ref_mut_into(BeaconBlockRefMut) )] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] +#[derive( + Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative, arbitrary::Arbitrary, +)] #[derivative(PartialEq, Hash(bound = "E: EthSpec"))] #[serde(untagged)] -#[serde(bound = "E: EthSpec, Payload: ExecPayload")] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[serde(bound = "E: EthSpec, Payload: AbstractExecPayload")] +#[arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload")] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] pub struct SignedBeaconBlock = FullPayload> { diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/signed_beacon_block_header.rs index dc786beb6e9..c265eded1d5 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/signed_beacon_block_header.rs @@ -10,9 +10,19 @@ use tree_hash_derive::TreeHash; /// A signed header of a `BeaconBlock`. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Eq, + Hash, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct SignedBeaconBlockHeader { pub message: BeaconBlockHeader, diff --git a/consensus/types/src/signed_bls_to_execution_change.rs b/consensus/types/src/signed_bls_to_execution_change.rs index 8c8131c1e0e..92b79fad3f9 100644 --- a/consensus/types/src/signed_bls_to_execution_change.rs +++ b/consensus/types/src/signed_bls_to_execution_change.rs @@ -6,9 +6,18 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct SignedBlsToExecutionChange { pub message: BlsToExecutionChange, diff --git a/consensus/types/src/signed_contribution_and_proof.rs b/consensus/types/src/signed_contribution_and_proof.rs index 245d33ff485..4cb35884338 100644 --- a/consensus/types/src/signed_contribution_and_proof.rs +++ b/consensus/types/src/signed_contribution_and_proof.rs @@ -10,9 +10,20 @@ use tree_hash_derive::TreeHash; /// A Validators signed contribution proof to publish on the `sync_committee_contribution_and_proof` /// gossipsub topic. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct SignedContributionAndProof { /// The `ContributionAndProof` that was signed. pub message: ContributionAndProof, diff --git a/consensus/types/src/signed_voluntary_exit.rs b/consensus/types/src/signed_voluntary_exit.rs index 69f0e6e2c9f..3392826a62f 100644 --- a/consensus/types/src/signed_voluntary_exit.rs +++ b/consensus/types/src/signed_voluntary_exit.rs @@ -9,9 +9,18 @@ use tree_hash_derive::TreeHash; /// An exit voluntarily submitted a validator who wishes to withdraw. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct SignedVoluntaryExit { pub message: VoluntaryExit, diff --git a/consensus/types/src/signing_data.rs b/consensus/types/src/signing_data.rs index 61f7e839fa2..b80d4a40d5a 100644 --- a/consensus/types/src/signing_data.rs +++ b/consensus/types/src/signing_data.rs @@ -7,8 +7,18 @@ use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] pub struct SigningData { pub object_root: Hash256, pub domain: Hash256, diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/slot_epoch.rs index 277aa9deaee..2716367c7eb 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/slot_epoch.rs @@ -24,13 +24,35 @@ use std::iter::Iterator; #[cfg(feature = "legacy-arith")] use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign}; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[derive( + arbitrary::Arbitrary, + Clone, + Copy, + Default, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, +)] #[serde(transparent)] pub struct Slot(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[derive( + arbitrary::Arbitrary, + Clone, + Copy, + Default, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, +)] #[serde(transparent)] pub struct Epoch(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index e1de2776150..fd06eb78a12 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -18,8 +18,7 @@ lazy_static! { }; } -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] pub struct SubnetId(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); diff --git a/consensus/types/src/sync_aggregate.rs b/consensus/types/src/sync_aggregate.rs index 2292b021118..300c86fc0f8 100644 --- a/consensus/types/src/sync_aggregate.rs +++ b/consensus/types/src/sync_aggregate.rs @@ -20,12 +20,21 @@ impl From for Error { } } -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + Derivative, + arbitrary::Arbitrary, )] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct SyncAggregate { pub sync_committee_bits: BitVector, pub sync_committee_signature: AggregateSignature, diff --git a/consensus/types/src/sync_aggregator_selection_data.rs b/consensus/types/src/sync_aggregator_selection_data.rs index 963b9dc6040..9e72438be20 100644 --- a/consensus/types/src/sync_aggregator_selection_data.rs +++ b/consensus/types/src/sync_aggregator_selection_data.rs @@ -6,9 +6,18 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Clone, Serialize, Deserialize, Hash, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Hash, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct SyncAggregatorSelectionData { pub slot: Slot, diff --git a/consensus/types/src/sync_committee.rs b/consensus/types/src/sync_committee.rs index 598d5fc16fc..43ba23f121c 100644 --- a/consensus/types/src/sync_committee.rs +++ b/consensus/types/src/sync_committee.rs @@ -25,9 +25,20 @@ impl From for Error { } } -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct SyncCommittee { pub pubkeys: FixedVector, pub aggregate_pubkey: PublicKeyBytes, diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index c79ceb92fbb..ef8b52becfc 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -15,9 +15,20 @@ pub enum Error { } /// An aggregation of `SyncCommitteeMessage`s, used in creating a `SignedContributionAndProof`. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct SyncCommitteeContribution { pub slot: Slot, pub beacon_block_root: Hash256, diff --git a/consensus/types/src/sync_committee_message.rs b/consensus/types/src/sync_committee_message.rs index 21dfd9c2882..5c2fb083743 100644 --- a/consensus/types/src/sync_committee_message.rs +++ b/consensus/types/src/sync_committee_message.rs @@ -8,8 +8,18 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; /// The data upon which a `SyncCommitteeContribution` is based. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] pub struct SyncCommitteeMessage { pub slot: Slot, pub beacon_block_root: Hash256, diff --git a/consensus/types/src/sync_selection_proof.rs b/consensus/types/src/sync_selection_proof.rs index 51395c0c135..570abace1eb 100644 --- a/consensus/types/src/sync_selection_proof.rs +++ b/consensus/types/src/sync_selection_proof.rs @@ -12,8 +12,7 @@ use ssz_types::typenum::Unsigned; use std::cmp; use std::convert::TryInto; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(PartialEq, Debug, Clone)] +#[derive(arbitrary::Arbitrary, PartialEq, Debug, Clone)] pub struct SyncSelectionProof(Signature); impl SyncSelectionProof { diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_subnet_id.rs index 9babe323950..11bcf268941 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_subnet_id.rs @@ -19,8 +19,7 @@ lazy_static! { }; } -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] pub struct SyncSubnetId(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index ebe3ca046cf..43b892cdf3d 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -10,8 +10,18 @@ use tree_hash_derive::TreeHash; /// Information about a `BeaconChain` validator. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] +#[derive( + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, +)] pub struct Validator { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index cc10632d07c..20c84986c29 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -11,9 +11,18 @@ use tree_hash_derive::TreeHash; /// An exit voluntarily submitted a validator who wishes to withdraw. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct VoluntaryExit { /// Earliest epoch when voluntary exit can be processed. diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal.rs index 6f14cf1c52e..5221ff63f09 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal.rs @@ -5,9 +5,19 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct Withdrawal { #[serde(with = "eth2_serde_utils::quoted_u64")] From aa896decc1245efefd340207d89400e93a878809 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 12 Jan 2023 19:13:01 +1100 Subject: [PATCH 114/263] Fix some beacon_chain tests --- beacon_node/beacon_chain/tests/capella.rs | 9 +++------ beacon_node/beacon_chain/tests/merge.rs | 9 +++------ 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/beacon_node/beacon_chain/tests/capella.rs b/beacon_node/beacon_chain/tests/capella.rs index 1e39d075d86..e910e8134f1 100644 --- a/beacon_node/beacon_chain/tests/capella.rs +++ b/beacon_node/beacon_chain/tests/capella.rs @@ -16,12 +16,9 @@ fn verify_execution_payload_chain(chain: &[FullPayload]) { // Check against previous `ExecutionPayload`. if let Some(prev_ep) = prev_ep { - assert_eq!(prev_ep.block_hash(), ep.execution_payload().parent_hash()); - assert_eq!( - prev_ep.execution_payload().block_number() + 1, - ep.execution_payload().block_number() - ); - assert!(ep.execution_payload().timestamp() > prev_ep.execution_payload().timestamp()); + assert_eq!(prev_ep.block_hash(), ep.parent_hash()); + assert_eq!(prev_ep.block_number() + 1, ep.block_number()); + assert!(ep.timestamp() > prev_ep.timestamp()); } prev_ep = Some(ep.clone()); } diff --git a/beacon_node/beacon_chain/tests/merge.rs b/beacon_node/beacon_chain/tests/merge.rs index cd6e0e2ba3b..1e0112a4954 100644 --- a/beacon_node/beacon_chain/tests/merge.rs +++ b/beacon_node/beacon_chain/tests/merge.rs @@ -17,12 +17,9 @@ fn verify_execution_payload_chain(chain: &[FullPayload]) { // Check against previous `ExecutionPayload`. if let Some(prev_ep) = prev_ep { - assert_eq!(prev_ep.block_hash(), ep.execution_payload().parent_hash()); - assert_eq!( - prev_ep.execution_payload().block_number() + 1, - ep.execution_payload().block_number() - ); - assert!(ep.execution_payload().timestamp() > prev_ep.execution_payload().timestamp()); + assert_eq!(prev_ep.block_hash(), ep.parent_hash()); + assert_eq!(prev_ep.block_number() + 1, ep.block_number()); + assert!(ep.timestamp() > prev_ep.timestamp()); } prev_ep = Some(ep.clone()); } From 8e2931d73b3d6abddd88d49c4e3f209e3cdf9c7c Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 13 Jan 2023 12:46:54 +1100 Subject: [PATCH 115/263] Verify blockHash with withdrawals --- beacon_node/beacon_chain/src/chain_config.rs | 5 ++- beacon_node/execution_layer/src/block_hash.rs | 36 +++++++++++++++++-- consensus/types/src/execution_block_header.rs | 7 +++- 3 files changed, 41 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 4767dfe5ee1..2051a62369e 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -91,9 +91,8 @@ impl Default for ChainConfig { count_unrealized_full: CountUnrealizedFull::default(), checkpoint_sync_url_timeout: 60, prepare_payload_lookahead: Duration::from_secs(4), - // TODO(capella): disabled until withdrawal verification is implemented - // See: https://github.com/sigp/lighthouse/issues/3870 - optimistic_finalized_sync: false, + // This value isn't actually read except in tests. + optimistic_finalized_sync: true, } } } diff --git a/beacon_node/execution_layer/src/block_hash.rs b/beacon_node/execution_layer/src/block_hash.rs index 133c42bf2e1..e9b7dcc17f3 100644 --- a/beacon_node/execution_layer/src/block_hash.rs +++ b/beacon_node/execution_layer/src/block_hash.rs @@ -1,4 +1,5 @@ use crate::{ + json_structures::JsonWithdrawal, keccak::{keccak256, KeccakHasher}, metrics, Error, ExecutionLayer, }; @@ -6,8 +7,8 @@ use ethers_core::utils::rlp::RlpStream; use keccak_hash::KECCAK_EMPTY_LIST_RLP; use triehash::ordered_trie_root; use types::{ - map_execution_block_header_fields, Address, EthSpec, ExecutionBlockHash, ExecutionBlockHeader, - ExecutionPayloadRef, Hash256, Hash64, Uint256, + map_execution_block_header_fields_except_withdrawals, Address, EthSpec, ExecutionBlockHash, + ExecutionBlockHeader, ExecutionPayloadRef, Hash256, Hash64, Uint256, }; impl ExecutionLayer { @@ -24,11 +25,23 @@ impl ExecutionLayer { payload.transactions().iter().map(|txn_bytes| &**txn_bytes), ); + // Calculate withdrawals root (post-Capella). + let rlp_withdrawals_root = if let Ok(withdrawals) = payload.withdrawals() { + Some(ordered_trie_root::( + withdrawals.iter().map(|withdrawal| { + rlp_encode_withdrawal(&JsonWithdrawal::from(withdrawal.clone())) + }), + )) + } else { + None + }; + // Construct the block header. let exec_block_header = ExecutionBlockHeader::from_payload( payload, KECCAK_EMPTY_LIST_RLP.as_fixed_bytes().into(), rlp_transactions_root, + rlp_withdrawals_root, ); // Hash the RLP encoding of the block header. @@ -47,13 +60,27 @@ impl ExecutionLayer { } } +/// RLP encode a withdrawal. +pub fn rlp_encode_withdrawal(withdrawal: &JsonWithdrawal) -> Vec { + let mut rlp_stream = RlpStream::new(); + rlp_stream.begin_list(4); + rlp_stream.append(&withdrawal.index); + rlp_stream.append(&withdrawal.validator_index); + rlp_stream.append(&withdrawal.address); + rlp_stream.append(&withdrawal.amount); + rlp_stream.out().into() +} + /// RLP encode an execution block header. pub fn rlp_encode_block_header(header: &ExecutionBlockHeader) -> Vec { let mut rlp_header_stream = RlpStream::new(); rlp_header_stream.begin_unbounded_list(); - map_execution_block_header_fields!(&header, |_, field| { + map_execution_block_header_fields_except_withdrawals!(&header, |_, field| { rlp_header_stream.append(field); }); + if let Some(withdrawals_root) = &header.withdrawals_root { + rlp_header_stream.append(withdrawals_root); + } rlp_header_stream.finalize_unbounded_list(); rlp_header_stream.out().into() } @@ -99,6 +126,7 @@ mod test { mix_hash: Hash256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(), nonce: Hash64::zero(), base_fee_per_gas: 0x036b_u64.into(), + withdrawals_root: None, }; let expected_rlp = "f90200a0e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082036b"; let expected_hash = @@ -126,6 +154,7 @@ mod test { mix_hash: Hash256::from_str("0000000000000000000000000000000000000000000000000000000000020000").unwrap(), nonce: Hash64::zero(), base_fee_per_gas: 0x036b_u64.into(), + withdrawals_root: None, }; let expected_rlp = "f901fda0927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000002000088000000000000000082036b"; let expected_hash = @@ -154,6 +183,7 @@ mod test { mix_hash: Hash256::from_str("bf5289894b2ceab3549f92f063febbac896b280ddb18129a57cff13113c11b13").unwrap(), nonce: Hash64::zero(), base_fee_per_gas: 0x34187b238_u64.into(), + withdrawals_root: None, }; let expected_hash = Hash256::from_str("6da69709cd5a34079b6604d29cd78fc01dacd7c6268980057ad92a2bede87351") diff --git a/consensus/types/src/execution_block_header.rs b/consensus/types/src/execution_block_header.rs index eb540894086..b19988ff7df 100644 --- a/consensus/types/src/execution_block_header.rs +++ b/consensus/types/src/execution_block_header.rs @@ -24,7 +24,9 @@ use metastruct::metastruct; /// /// Credit to Reth for the type definition. #[derive(Debug, Clone, PartialEq, Eq, Hash)] -#[metastruct(mappings(map_execution_block_header_fields()))] +#[metastruct(mappings(map_execution_block_header_fields_except_withdrawals(exclude( + withdrawals_root +))))] pub struct ExecutionBlockHeader { pub parent_hash: Hash256, pub ommers_hash: Hash256, @@ -42,6 +44,7 @@ pub struct ExecutionBlockHeader { pub mix_hash: Hash256, pub nonce: Hash64, pub base_fee_per_gas: Uint256, + pub withdrawals_root: Option, } impl ExecutionBlockHeader { @@ -49,6 +52,7 @@ impl ExecutionBlockHeader { payload: ExecutionPayloadRef, rlp_empty_list_root: Hash256, rlp_transactions_root: Hash256, + rlp_withdrawals_root: Option, ) -> Self { // Most of these field mappings are defined in EIP-3675 except for `mixHash`, which is // defined in EIP-4399. @@ -69,6 +73,7 @@ impl ExecutionBlockHeader { mix_hash: payload.prev_randao(), nonce: Hash64::zero(), base_fee_per_gas: payload.base_fee_per_gas(), + withdrawals_root: rlp_withdrawals_root, } } } From d9dd9b43eeff1bf6adf60ed44e7be71fe51b5994 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Fri, 13 Jan 2023 10:47:19 -0600 Subject: [PATCH 116/263] Sign BlsToExecutionChange w/ GENESIS_FORK_VERSION --- .../src/per_block_processing/signature_sets.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index fa37681c766..c05d3f057d7 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -161,10 +161,9 @@ pub fn bls_execution_change_signature_set<'a, T: EthSpec>( signed_address_change: &'a SignedBlsToExecutionChange, spec: &'a ChainSpec, ) -> Result> { - let domain = spec.get_domain( - state.current_epoch(), + let domain = spec.compute_domain( Domain::BlsToExecutionChange, - &state.fork(), + spec.genesis_fork_version, state.genesis_validators_root(), ); let message = signed_address_change.message.signing_root(domain); From 05c1291d8a8118df5cf1d7bedb385efa53177339 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Fri, 13 Jan 2023 12:53:25 -0600 Subject: [PATCH 117/263] Don't Penalize Early `bls_to_execution_change` --- .../src/beacon_processor/worker/gossip_methods.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 589d7e9b475..08f659e5865 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -1217,11 +1217,13 @@ impl Worker { ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); // We penalize the peer slightly to prevent overuse of invalids. - self.gossip_penalize_peer( - peer_id, - PeerAction::HighToleranceError, - "invalid_bls_to_execution_change", - ); + if !matches!(e, BeaconChainError::BlsToExecutionChangeBadFork(_)) { + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "invalid_bls_to_execution_change", + ); + } return; } }; From 13196837361d2b4aae404f4fb973720fddc35388 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Fri, 13 Jan 2023 14:59:03 -0500 Subject: [PATCH 118/263] Update gossip_methods.rs --- .../beacon_processor/worker/gossip_methods.rs | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 08f659e5865..959a16eb962 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -1215,9 +1215,20 @@ impl Worker { "peer" => %peer_id, "error" => ?e ); - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - // We penalize the peer slightly to prevent overuse of invalids. - if !matches!(e, BeaconChainError::BlsToExecutionChangeBadFork(_)) { + // We ignore pre-capella messages without penalizing peers. + if matches!(e, BeaconChainError::BlsToExecutionChangeBadFork(_)) { + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } else { + // We penalize the peer slightly to prevent overuse of invalids. + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Reject, + ); self.gossip_penalize_peer( peer_id, PeerAction::HighToleranceError, From 35c9e2407b23d263eeac749d769008d8f71cd825 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Fri, 13 Jan 2023 15:11:46 -0500 Subject: [PATCH 119/263] bump ef-tests --- testing/ef_tests/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 354631e5c37..1feba41c86f 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.3.0-rc.0 +TESTS_TAG := v1.3.0-rc.1 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) From 302eaca3448e11ac9cf2c5d64d4b8954ed909c0a Mon Sep 17 00:00:00 2001 From: realbigsean Date: Fri, 13 Jan 2023 16:15:27 -0500 Subject: [PATCH 120/263] intentionally skip `LightClientHeader` ssz static tests --- testing/ef_tests/check_all_files_accessed.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index b0982413665..f8ddc0a9f23 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -39,6 +39,8 @@ "tests/.*/.*/ssz_static/LightClientOptimistic", # LightClientFinalityUpdate "tests/.*/.*/ssz_static/LightClientFinalityUpdate", + # LightClientHeader + "tests/.*/.*/ssz_static/LightClientHeader", # Eip4844 tests are disabled for now. "tests/.*/eip4844", # One of the EF researchers likes to pack the tarballs on a Mac From 51088725fbbe2d3cdb3f21eb968561ec5ffc1553 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Sun, 15 Jan 2023 19:03:42 -0600 Subject: [PATCH 121/263] CL-EL withdrawals harmonization using Gwei units (#3884) --- .../execution_layer/src/engine_api/json_structures.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 728150a2082..78a3cb475a6 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -333,8 +333,8 @@ pub struct JsonWithdrawal { #[serde(with = "eth2_serde_utils::u64_hex_be")] pub validator_index: u64, pub address: Address, - #[serde(with = "eth2_serde_utils::u256_hex_be")] - pub amount: Uint256, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub amount: u64, } impl From for JsonWithdrawal { @@ -343,21 +343,18 @@ impl From for JsonWithdrawal { index: withdrawal.index, validator_index: withdrawal.validator_index, address: withdrawal.address, - amount: Uint256::from((withdrawal.amount as u128) * 1000000000u128), + amount: withdrawal.amount, } } } impl From for Withdrawal { fn from(jw: JsonWithdrawal) -> Self { - // This comparison is done to avoid a scenario where the EE gives us too large a number and we - // panic when attempting to cast to a `u64`. - let amount = std::cmp::max(jw.amount / 1000000000, Uint256::from(u64::MAX)); Self { index: jw.index, validator_index: jw.validator_index, address: jw.address, - amount: amount.as_u64(), + amount: jw.amount, } } } From 4d9e137e6ad4e738a976164d393f2e0b4896a00f Mon Sep 17 00:00:00 2001 From: Madman600 <38760981+Madman600@users.noreply.github.com> Date: Mon, 16 Jan 2023 03:42:08 +0000 Subject: [PATCH 122/263] Update checkpoint-sync.md (#3831) Remove infura checkpoint sync instructions. Co-authored-by: Adam Patacchiola --- book/src/checkpoint-sync.md | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md index 736aa08f1cf..893c545cb93 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/checkpoint-sync.md @@ -48,17 +48,6 @@ The Ethereum community provides various [public endpoints](https://eth-clients.g lighthouse bn --checkpoint-sync-url https://example.com/ ... ``` -### Use Infura as a remote beacon node provider - -You can use Infura as the remote beacon node provider to load the initial checkpoint state. - -1. Sign up for the free Infura ETH2 API using the `Create new project tab` on the [Infura dashboard](https://infura.io/dashboard). -2. Copy the HTTPS endpoint for the required network (Mainnet/Prater). -3. Use it as the url for the `--checkpoint-sync-url` flag. e.g. -``` -lighthouse bn --checkpoint-sync-url https://:@eth2-beacon-mainnet.infura.io ... -``` - ## Backfilling Blocks Once forwards sync completes, Lighthouse will commence a "backfill sync" to download the blocks From 912ea2a5cab0ba8c4a9ac9fd4f3717e50a332301 Mon Sep 17 00:00:00 2001 From: Santiago Medina Date: Mon, 16 Jan 2023 03:42:09 +0000 Subject: [PATCH 123/263] Return HTTP 404 rather than 405 (#3836) ## Issue Addressed Issue #3112 ## Proposed Changes Add `Filter::recover` to the GET chain to handle rejections specifically as 404 NOT FOUND ## Additional Info Making a request to `http://localhost:5052/not_real` now returns the following: ``` { "code": 404, "message": "NOT_FOUND", "stacktraces": [] } ``` Co-authored-by: Paul Hauner --- beacon_node/http_api/src/lib.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 6cfdaf5db6a..8cd0b856b51 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3383,7 +3383,8 @@ pub fn serve( .or(get_lighthouse_attestation_performance.boxed()) .or(get_lighthouse_block_packing_efficiency.boxed()) .or(get_lighthouse_merge_readiness.boxed()) - .or(get_events.boxed()), + .or(get_events.boxed()) + .recover(warp_utils::reject::handle_rejection), ) .boxed() .or(warp::post().and( @@ -3407,7 +3408,8 @@ pub fn serve( .or(post_lighthouse_database_reconstruct.boxed()) .or(post_lighthouse_database_historical_blocks.boxed()) .or(post_lighthouse_block_rewards.boxed()) - .or(post_lighthouse_ui_validator_metrics.boxed()), + .or(post_lighthouse_ui_validator_metrics.boxed()) + .recover(warp_utils::reject::handle_rejection), )) .recover(warp_utils::reject::handle_rejection) .with(slog_logging(log.clone())) From 6ac1c5b43951f26f18df8e0b7553fa93c30e0250 Mon Sep 17 00:00:00 2001 From: Mac L Date: Mon, 16 Jan 2023 03:42:10 +0000 Subject: [PATCH 124/263] Add CLI flag to specify the format of logs written to the logfile (#3839) ## Proposed Changes Decouple the stdout and logfile formats by adding the `--logfile-format` CLI flag. This behaves identically to the existing `--log-format` flag, but instead will only affect the logs written to the logfile. The `--log-format` flag will no longer have any effect on the contents of the logfile. ## Additional Info This avoids being a breaking change by causing `logfile-format` to default to the value of `--log-format` if it is not provided. This means that users who were previously relying on being able to use a JSON formatted logfile will be able to continue to use `--log-format JSON`. Users who want to use JSON on stdout and default logs in the logfile, will need to pass the following flags: `--log-format JSON --logfile-format DEFAULT` --- lcli/src/main.rs | 1 + lighthouse/environment/src/lib.rs | 4 +++- lighthouse/src/main.rs | 15 +++++++++++++++ lighthouse/tests/beacon_node.rs | 19 ++++++++++++++++++- testing/simulator/src/eth1_sim.rs | 1 + testing/simulator/src/no_eth1_sim.rs | 1 + testing/simulator/src/sync_sim.rs | 1 + 7 files changed, 40 insertions(+), 2 deletions(-) diff --git a/lcli/src/main.rs b/lcli/src/main.rs index de6039f35a0..137a4534b46 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -792,6 +792,7 @@ fn run( debug_level: String::from("trace"), logfile_debug_level: String::from("trace"), log_format: None, + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0, diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index fad7edeb196..8ef67e82ddb 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -50,6 +50,7 @@ pub struct LoggerConfig { pub debug_level: String, pub logfile_debug_level: String, pub log_format: Option, + pub logfile_format: Option, pub log_color: bool, pub disable_log_timestamp: bool, pub max_log_size: u64, @@ -64,6 +65,7 @@ impl Default for LoggerConfig { debug_level: String::from("info"), logfile_debug_level: String::from("debug"), log_format: None, + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 200, @@ -252,7 +254,7 @@ impl EnvironmentBuilder { let file_logger = FileLoggerBuilder::new(&path) .level(logfile_level) .channel_size(LOG_CHANNEL_SIZE) - .format(match config.log_format.as_deref() { + .format(match config.logfile_format.as_deref() { Some("JSON") => Format::Json, _ => Format::default(), }) diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index da72204f967..64ee0432f8a 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -99,6 +99,15 @@ fn main() { .default_value("debug") .global(true), ) + .arg( + Arg::with_name("logfile-format") + .long("logfile-format") + .value_name("FORMAT") + .help("Specifies the log format used when emitting logs to the logfile.") + .possible_values(&["DEFAULT", "JSON"]) + .takes_value(true) + .global(true) + ) .arg( Arg::with_name("logfile-max-size") .long("logfile-max-size") @@ -402,6 +411,11 @@ fn run( .value_of("logfile-debug-level") .ok_or("Expected --logfile-debug-level flag")?; + let logfile_format = matches + .value_of("logfile-format") + // Ensure that `logfile-format` defaults to the value of `log-format`. + .or_else(|| matches.value_of("log-format")); + let logfile_max_size: u64 = matches .value_of("logfile-max-size") .ok_or("Expected --logfile-max-size flag")? @@ -452,6 +466,7 @@ fn run( debug_level: String::from(debug_level), logfile_debug_level: String::from(logfile_debug_level), log_format: log_format.map(String::from), + logfile_format: logfile_format.map(String::from), log_color, disable_log_timestamp, max_log_size: logfile_max_size * 1_024 * 1_024, diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 4a2e160e8bc..7e581ee6152 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1662,7 +1662,24 @@ fn logfile_no_restricted_perms_flag() { assert!(config.logger_config.is_restricted == false); }); } - +#[test] +fn logfile_format_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.logger_config.logfile_format, None)); +} +#[test] +fn logfile_format_flag() { + CommandLineTest::new() + .flag("logfile-format", Some("JSON")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.logger_config.logfile_format, + Some("JSON".to_string()) + ) + }); +} #[test] fn sync_eth1_chain_default() { CommandLineTest::new() diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 8284bff6096..42aefea7a53 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -62,6 +62,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { debug_level: String::from("debug"), logfile_debug_level: String::from("debug"), log_format: None, + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0, diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index 53c4447da2c..1a026ded46d 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -47,6 +47,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { debug_level: String::from("debug"), logfile_debug_level: String::from("debug"), log_format: None, + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0, diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index 1c8b41f0573..9d759715eba 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -51,6 +51,7 @@ fn syncing_sim( debug_level: String::from(log_level), logfile_debug_level: String::from("debug"), log_format: log_format.map(String::from), + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0, From 9a970ce3a2ff5ab64d19e48aac984ef12db1078c Mon Sep 17 00:00:00 2001 From: David Theodore Date: Tue, 17 Jan 2023 05:13:47 +0000 Subject: [PATCH 125/263] add better err reporting UnableToOpenVotingKeystore (#3781) ## Issue Addressed #3780 ## Proposed Changes Add error reporting that notifies the node operator that the `voting_keystore_path` in their `validator_definitions.yml` file may be incorrect. ## Additional Info There is more info in issue #3780 Co-authored-by: Paul Hauner --- validator_client/src/lib.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 4db9804054a..00c3db7aa10 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -31,6 +31,7 @@ use crate::beacon_node_fallback::{ }; use crate::doppelganger_service::DoppelgangerService; use crate::graffiti_file::GraffitiFile; +use crate::initialized_validators::Error::UnableToOpenVotingKeystore; use account_utils::validator_definitions::ValidatorDefinitions; use attestation_service::{AttestationService, AttestationServiceBuilder}; use block_service::{BlockService, BlockServiceBuilder}; @@ -184,7 +185,16 @@ impl ProductionValidatorClient { log.clone(), ) .await - .map_err(|e| format!("Unable to initialize validators: {:?}", e))?; + .map_err(|e| { + match e { + UnableToOpenVotingKeystore(err) => { + format!("Unable to initialize validators: {:?}. If you have recently moved the location of your data directory \ + make sure to update the location of voting_keystore_path in your validator_definitions.yml", err) + }, + err => { + format!("Unable to initialize validators: {:?}", err)} + } + })?; let voting_pubkeys: Vec<_> = validators.iter_voting_pubkeys().collect(); From b4d9fc03ee54ecb8b916453189fbf422eb943285 Mon Sep 17 00:00:00 2001 From: GeemoCandama Date: Tue, 17 Jan 2023 05:13:48 +0000 Subject: [PATCH 126/263] add logging for starting request and receiving block (#3858) ## Issue Addressed #3853 ## Proposed Changes Added `INFO` level logs for requesting and receiving the unsigned block. ## Additional Info Logging for successfully publishing the signed block is already there. And seemingly there is a log for when "We realize we are going to produce a block" in the `start_update_service`: `info!(log, "Block production service started"); `. Is there anywhere else you'd like to see logging around this event? Co-authored-by: GeemoCandama <104614073+GeemoCandama@users.noreply.github.com> --- validator_client/src/block_service.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index d4acbe7563d..bef51a694a3 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -333,6 +333,11 @@ impl BlockService { let proposer_index = self.validator_store.validator_index(&validator_pubkey); let validator_pubkey_ref = &validator_pubkey; + info!( + log, + "Requesting unsigned block"; + "slot" => slot.as_u64(), + ); // Request block from first responsive beacon node. let block = self .beacon_nodes @@ -383,6 +388,11 @@ impl BlockService { } }; + info!( + log, + "Received unsigned block"; + "slot" => slot.as_u64(), + ); if proposer_index != Some(block.proposer_index()) { return Err(BlockError::Recoverable( "Proposer index does not match block proposer. Beacon chain re-orged" @@ -401,6 +411,11 @@ impl BlockService { .await .map_err(|e| BlockError::Recoverable(format!("Unable to sign block: {:?}", e)))?; + info!( + log, + "Publishing signed block"; + "slot" => slot.as_u64(), + ); // Publish block with first available beacon node. self.beacon_nodes .first_success( From 480309fb96a3939229460e8b812c755d3e5e0a77 Mon Sep 17 00:00:00 2001 From: aliask Date: Tue, 17 Jan 2023 05:13:49 +0000 Subject: [PATCH 127/263] Fix some dead links in markdown files (#3885) ## Issue Addressed No issue has been raised for these broken links. ## Proposed Changes Update links with the new URLs for the same document. ## Additional Info ~The link for the [Lighthouse Development Updates](https://eepurl.com/dh9Lvb/) mailing list is also broken, but I can't find the correct link.~ Co-authored-by: Paul Hauner --- README.md | 2 +- book/src/merge-migration.md | 4 ++-- book/src/run_a_node.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 859d5c4c63a..3565882d6e7 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ of the Lighthouse book. The best place for discussion is the [Lighthouse Discord server](https://discord.gg/cyAszAh). -Sign up to the [Lighthouse Development Updates](https://eepurl.com/dh9Lvb/) mailing list for email +Sign up to the [Lighthouse Development Updates](https://eepurl.com/dh9Lvb) mailing list for email notifications about releases, network status and other important information. Encrypt sensitive messages using our [PGP diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index 08f1b51e42a..ec9aeaaee86 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -58,7 +58,7 @@ supported. Each execution engine has its own flags for configuring the engine API and JWT. Please consult the relevant page for your execution engine for the required flags: -- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/interface/consensus-clients) +- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/getting-started/consensus-clients) - [Nethermind: Running Nethermind Post Merge](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) - [Besu: Prepare For The Merge](https://besu.hyperledger.org/en/stable/HowTo/Upgrade/Prepare-for-The-Merge/) - [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) @@ -203,5 +203,5 @@ guidance for specific setups. - [Ethereum.org: The Merge](https://ethereum.org/en/upgrades/merge/) - [Ethereum Staking Launchpad: Merge Readiness](https://launchpad.ethereum.org/en/merge-readiness). - [CoinCashew: Ethereum Merge Upgrade Checklist](https://www.coincashew.com/coins/overview-eth/ethereum-merge-upgrade-checklist-for-home-stakers-and-validators) -- [EthDocker: Merge Preparation](https://eth-docker.net/docs/About/MergePrep/) +- [EthDocker: Merge Preparation](https://eth-docker.net/About/MergePrep/) - [Remy Roy: How to join the Goerli/Prater merge testnet](https://github.com/remyroy/ethstaker/blob/main/merge-goerli-prater.md) diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md index 5ce42aa6305..fb112c36753 100644 --- a/book/src/run_a_node.md +++ b/book/src/run_a_node.md @@ -26,7 +26,7 @@ has authority to control the execution engine. Each execution engine has its own flags for configuring the engine API and JWT. Please consult the relevant page of your execution engine for the required flags: -- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/interface/consensus-clients) +- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/getting-started/consensus-clients) - [Nethermind: Running Nethermind & CL](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) - [Besu: Connect to Mainnet](https://besu.hyperledger.org/en/stable/public-networks/get-started/connect/mainnet/) - [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) From 26787412cd5e5447f00123b4e4afe5d779765b0f Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Thu, 19 Jan 2023 05:42:17 -0600 Subject: [PATCH 128/263] Update engine_api to Latest spec (#3893) * Update engine_api to Latest spec * Small Test Fix * Fix Test Deserialization Issue --- .../tests/payload_invalidation.rs | 4 +- beacon_node/execution_layer/src/engine_api.rs | 73 +-- .../execution_layer/src/engine_api/http.rs | 78 ++-- .../src/engine_api/json_structures.rs | 433 +++++++++--------- .../test_utils/execution_block_generator.rs | 4 +- .../src/test_utils/handle_rpc.rs | 87 ++-- 6 files changed, 367 insertions(+), 312 deletions(-) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 2d8427e30e0..54d7734471c 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1007,9 +1007,7 @@ async fn payload_preparation() { .unwrap(), fee_recipient, None, - ) - .downgrade_to_v1() - .unwrap(); + ); assert_eq!(rig.previous_payload_attributes(), payload_attributes); } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 4970361a5a9..afc5cffe2fb 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -12,6 +12,7 @@ pub use types::{ Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, FixedVector, ForkName, Hash256, Uint256, VariableList, Withdrawal, }; +use types::{ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge}; pub mod auth; pub mod http; @@ -267,7 +268,7 @@ pub struct PayloadAttributes { #[superstruct(getter(copy))] pub suggested_fee_recipient: Address, #[superstruct(only(V2))] - pub withdrawals: Option>, + pub withdrawals: Vec, } impl PayloadAttributes { @@ -277,31 +278,18 @@ impl PayloadAttributes { suggested_fee_recipient: Address, withdrawals: Option>, ) -> Self { - // this should always return the highest version - PayloadAttributes::V2(PayloadAttributesV2 { - timestamp, - prev_randao, - suggested_fee_recipient, - withdrawals, - }) - } - - pub fn downgrade_to_v1(self) -> Result { - match self { - PayloadAttributes::V1(_) => Ok(self), - PayloadAttributes::V2(v2) => { - if v2.withdrawals.is_some() { - return Err(Error::BadConversion( - "Downgrading from PayloadAttributesV2 with non-null withdrawals" - .to_string(), - )); - } - Ok(PayloadAttributes::V1(PayloadAttributesV1 { - timestamp: v2.timestamp, - prev_randao: v2.prev_randao, - suggested_fee_recipient: v2.suggested_fee_recipient, - })) - } + match withdrawals { + Some(withdrawals) => PayloadAttributes::V2(PayloadAttributesV2 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + }), + None => PayloadAttributes::V1(PayloadAttributesV1 { + timestamp, + prev_randao, + suggested_fee_recipient, + }), } } } @@ -326,6 +314,39 @@ pub struct ProposeBlindedBlockResponse { pub validation_error: Option, } +#[superstruct( + variants(Merge, Capella, Eip4844), + variant_attributes(derive(Clone, Debug, PartialEq),), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] +#[derive(Clone, Debug, PartialEq)] +pub struct GetPayloadResponse { + #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] + pub execution_payload: ExecutionPayloadMerge, + #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] + pub execution_payload: ExecutionPayloadCapella, + #[superstruct(only(Eip4844), partial_getter(rename = "execution_payload_eip4844"))] + pub execution_payload: ExecutionPayloadEip4844, + pub block_value: Uint256, +} + +impl GetPayloadResponse { + pub fn execution_payload(self) -> ExecutionPayload { + match self { + GetPayloadResponse::Merge(response) => { + ExecutionPayload::Merge(response.execution_payload) + } + GetPayloadResponse::Capella(response) => { + ExecutionPayload::Capella(response.execution_payload) + } + GetPayloadResponse::Eip4844(response) => { + ExecutionPayload::Eip4844(response.execution_payload) + } + } + } +} + // This name is work in progress, it could // change when this method is actually proposed // but I'm writing this as it has been described diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 8ad3066f7f4..60725192b71 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -735,7 +735,7 @@ impl HttpJsonRpc { &self, execution_payload: ExecutionPayload, ) -> Result { - let params = json!([JsonExecutionPayloadV1::try_from(execution_payload)?]); + let params = json!([JsonExecutionPayload::from(execution_payload)]); let response: JsonPayloadStatusV1 = self .rpc_request( @@ -752,7 +752,7 @@ impl HttpJsonRpc { &self, execution_payload: ExecutionPayload, ) -> Result { - let params = json!([JsonExecutionPayloadV2::try_from(execution_payload)?]); + let params = json!([JsonExecutionPayload::from(execution_payload)]); let response: JsonPayloadStatusV1 = self .rpc_request( @@ -767,7 +767,6 @@ impl HttpJsonRpc { pub async fn get_payload_v1( &self, - fork_name: ForkName, payload_id: PayloadId, ) -> Result, Error> { let params = json!([JsonPayloadIdRequest::from(payload_id)]); @@ -780,25 +779,41 @@ impl HttpJsonRpc { ) .await?; - JsonExecutionPayload::V1(payload_v1).try_into_execution_payload(fork_name) + Ok(JsonExecutionPayload::V1(payload_v1).into()) } pub async fn get_payload_v2( &self, fork_name: ForkName, payload_id: PayloadId, - ) -> Result, Error> { + ) -> Result, Error> { let params = json!([JsonPayloadIdRequest::from(payload_id)]); - let response: JsonGetPayloadResponse = self - .rpc_request( - ENGINE_GET_PAYLOAD_V2, - params, - ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, - ) - .await?; - - JsonExecutionPayload::V2(response.execution_payload).try_into_execution_payload(fork_name) + match fork_name { + ForkName::Merge => { + let response: JsonGetPayloadResponseV1 = self + .rpc_request( + ENGINE_GET_PAYLOAD_V2, + params, + ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + Ok(JsonGetPayloadResponse::V1(response).into()) + } + ForkName::Capella => { + let response: JsonGetPayloadResponseV2 = self + .rpc_request( + ENGINE_GET_PAYLOAD_V2, + params, + ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + Ok(JsonGetPayloadResponse::V2(response).into()) + } + ForkName::Base | ForkName::Altair | ForkName::Eip4844 => Err( + Error::UnsupportedForkVariant(format!("called get_payload_v2 with {}", fork_name)), + ), + } } pub async fn get_blobs_bundle_v1( @@ -935,9 +950,15 @@ impl HttpJsonRpc { ) -> Result, Error> { let supported_apis = self.get_cached_supported_apis().await?; if supported_apis.get_payload_v2 { - self.get_payload_v2(fork_name, payload_id).await + // TODO: modify this method to return GetPayloadResponse instead + // of throwing away the `block_value` and returning only the + // ExecutionPayload + Ok(self + .get_payload_v2(fork_name, payload_id) + .await? + .execution_payload()) } else if supported_apis.new_payload_v1 { - self.get_payload_v1(fork_name, payload_id).await + self.get_payload_v1(payload_id).await } else { Err(Error::RequiredMethodUnsupported("engine_getPayload")) } @@ -955,13 +976,8 @@ impl HttpJsonRpc { self.forkchoice_updated_v2(forkchoice_state, payload_attributes) .await } else if supported_apis.forkchoice_updated_v1 { - self.forkchoice_updated_v1( - forkchoice_state, - payload_attributes - .map(|pa| pa.downgrade_to_v1()) - .transpose()?, - ) - .await + self.forkchoice_updated_v1(forkchoice_state, payload_attributes) + .await } else { Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated")) } @@ -976,9 +992,7 @@ mod test { use std::future::Future; use std::str::FromStr; use std::sync::Arc; - use types::{ - ExecutionPayloadMerge, ForkName, MainnetEthSpec, Transactions, Unsigned, VariableList, - }; + use types::{ExecutionPayloadMerge, MainnetEthSpec, Transactions, Unsigned, VariableList}; struct Tester { server: MockServer, @@ -1318,9 +1332,7 @@ mod test { Tester::new(true) .assert_request_equals( |client| async move { - let _ = client - .get_payload_v1::(ForkName::Merge, [42; 8]) - .await; + let _ = client.get_payload_v1::([42; 8]).await; }, json!({ "id": STATIC_ID, @@ -1333,9 +1345,7 @@ mod test { Tester::new(false) .assert_auth_failure(|client| async move { - client - .get_payload_v1::(ForkName::Merge, [42; 8]) - .await + client.get_payload_v1::([42; 8]).await }) .await; } @@ -1564,7 +1574,7 @@ mod test { // engine_getPayloadV1 REQUEST validation |client| async move { let _ = client - .get_payload_v1::(ForkName::Merge,str_to_payload_id("0xa247243752eb10b4")) + .get_payload_v1::(str_to_payload_id("0xa247243752eb10b4")) .await; }, json!({ @@ -1599,7 +1609,7 @@ mod test { })], |client| async move { let payload = client - .get_payload_v1::(ForkName::Merge,str_to_payload_id("0xa247243752eb10b4")) + .get_payload_v1::(str_to_payload_id("0xa247243752eb10b4")) .await .unwrap(); diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 78a3cb475a6..ace15ebd847 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -64,7 +64,7 @@ pub struct JsonPayloadIdResponse { } #[superstruct( - variants(V1, V2), + variants(V1, V2, V3), variant_attributes( derive(Debug, PartialEq, Default, Serialize, Deserialize,), serde(bound = "T: EthSpec", rename_all = "camelCase"), @@ -94,235 +94,234 @@ pub struct JsonExecutionPayload { pub extra_data: VariableList, #[serde(with = "eth2_serde_utils::u256_hex_be")] pub base_fee_per_gas: Uint256, - #[superstruct(only(V2))] - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(default)] - #[serde(with = "eth2_serde_utils::u256_hex_be_opt")] - pub excess_data_gas: Option, + #[superstruct(only(V3))] + #[serde(with = "eth2_serde_utils::u256_hex_be")] + pub excess_data_gas: Uint256, pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: VariableList, T::MaxTransactionsPerPayload>, - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(default)] - #[superstruct(only(V2))] - pub withdrawals: Option>, -} - -impl JsonExecutionPayload { - pub fn try_into_execution_payload( - self, - fork_name: ForkName, - ) -> Result, Error> { - match self { - JsonExecutionPayload::V1(v1) => match fork_name { - ForkName::Merge => Ok(ExecutionPayload::Merge(ExecutionPayloadMerge { - parent_hash: v1.parent_hash, - fee_recipient: v1.fee_recipient, - state_root: v1.state_root, - receipts_root: v1.receipts_root, - logs_bloom: v1.logs_bloom, - prev_randao: v1.prev_randao, - block_number: v1.block_number, - gas_limit: v1.gas_limit, - gas_used: v1.gas_used, - timestamp: v1.timestamp, - extra_data: v1.extra_data, - base_fee_per_gas: v1.base_fee_per_gas, - block_hash: v1.block_hash, - transactions: v1.transactions, - })), - _ => Err(Error::UnsupportedForkVariant(format!("Unsupported conversion from JsonExecutionPayloadV1 for {}", fork_name))), - } - JsonExecutionPayload::V2(v2) => match fork_name { - ForkName::Merge => Ok(ExecutionPayload::Merge(ExecutionPayloadMerge { - parent_hash: v2.parent_hash, - fee_recipient: v2.fee_recipient, - state_root: v2.state_root, - receipts_root: v2.receipts_root, - logs_bloom: v2.logs_bloom, - prev_randao: v2.prev_randao, - block_number: v2.block_number, - gas_limit: v2.gas_limit, - gas_used: v2.gas_used, - timestamp: v2.timestamp, - extra_data: v2.extra_data, - base_fee_per_gas: v2.base_fee_per_gas, - block_hash: v2.block_hash, - transactions: v2.transactions, - })), - ForkName::Capella => Ok(ExecutionPayload::Capella(ExecutionPayloadCapella { - parent_hash: v2.parent_hash, - fee_recipient: v2.fee_recipient, - state_root: v2.state_root, - receipts_root: v2.receipts_root, - logs_bloom: v2.logs_bloom, - prev_randao: v2.prev_randao, - block_number: v2.block_number, - gas_limit: v2.gas_limit, - gas_used: v2.gas_used, - timestamp: v2.timestamp, - extra_data: v2.extra_data, - base_fee_per_gas: v2.base_fee_per_gas, - block_hash: v2.block_hash, - transactions: v2.transactions, - withdrawals: v2 - .withdrawals - .map(|v| { - Into::>::into(v) - .into_iter() - .map(Into::into) - .collect::>() - .into() - }) - .ok_or_else(|| Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadCapella".to_string()))? - })), - ForkName::Eip4844 => Ok(ExecutionPayload::Eip4844(ExecutionPayloadEip4844 { - parent_hash: v2.parent_hash, - fee_recipient: v2.fee_recipient, - state_root: v2.state_root, - receipts_root: v2.receipts_root, - logs_bloom: v2.logs_bloom, - prev_randao: v2.prev_randao, - block_number: v2.block_number, - gas_limit: v2.gas_limit, - gas_used: v2.gas_used, - timestamp: v2.timestamp, - extra_data: v2.extra_data, - base_fee_per_gas: v2.base_fee_per_gas, - excess_data_gas: v2.excess_data_gas.ok_or_else(|| Error::BadConversion("Null `excess_data_gas` field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))?, - block_hash: v2.block_hash, - transactions: v2.transactions, - withdrawals: v2 - .withdrawals - .map(|v| { - Into::>::into(v) - .into_iter() - .map(Into::into) - .collect::>() - .into() - }) - .ok_or_else(|| Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))? - })), - _ => Err(Error::UnsupportedForkVariant(format!("Unsupported conversion from JsonExecutionPayloadV2 for {}", fork_name))), - } + #[superstruct(only(V2, V3))] + pub withdrawals: VariableList, +} + +impl From> for JsonExecutionPayloadV1 { + fn from(payload: ExecutionPayloadMerge) -> Self { + JsonExecutionPayloadV1 { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + } + } +} +impl From> for JsonExecutionPayloadV2 { + fn from(payload: ExecutionPayloadCapella) -> Self { + JsonExecutionPayloadV2 { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .cloned() + .map(Into::into) + .collect::>() + .into(), + } + } +} +impl From> for JsonExecutionPayloadV3 { + fn from(payload: ExecutionPayloadEip4844) -> Self { + JsonExecutionPayloadV3 { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + excess_data_gas: payload.excess_data_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .cloned() + .map(Into::into) + .collect::>() + .into(), } } } -impl TryFrom> for JsonExecutionPayloadV1 { - type Error = Error; - fn try_from(payload: ExecutionPayload) -> Result { - match payload { - ExecutionPayload::Merge(merge) => Ok(JsonExecutionPayloadV1 { - parent_hash: merge.parent_hash, - fee_recipient: merge.fee_recipient, - state_root: merge.state_root, - receipts_root: merge.receipts_root, - logs_bloom: merge.logs_bloom, - prev_randao: merge.prev_randao, - block_number: merge.block_number, - gas_limit: merge.gas_limit, - gas_used: merge.gas_used, - timestamp: merge.timestamp, - extra_data: merge.extra_data, - base_fee_per_gas: merge.base_fee_per_gas, - block_hash: merge.block_hash, - transactions: merge.transactions, - }), - ExecutionPayload::Capella(_) => Err(Error::UnsupportedForkVariant(format!( - "Unsupported conversion to JsonExecutionPayloadV1 for {}", - ForkName::Capella - ))), - ExecutionPayload::Eip4844(_) => Err(Error::UnsupportedForkVariant(format!( - "Unsupported conversion to JsonExecutionPayloadV1 for {}", - ForkName::Eip4844 - ))), +impl From> for JsonExecutionPayload { + fn from(execution_payload: ExecutionPayload) -> Self { + match execution_payload { + ExecutionPayload::Merge(payload) => JsonExecutionPayload::V1(payload.into()), + ExecutionPayload::Capella(payload) => JsonExecutionPayload::V2(payload.into()), + ExecutionPayload::Eip4844(payload) => JsonExecutionPayload::V3(payload.into()), } } } -impl TryFrom> for JsonExecutionPayloadV2 { - type Error = Error; - fn try_from(payload: ExecutionPayload) -> Result { - match payload { - ExecutionPayload::Merge(merge) => Ok(JsonExecutionPayloadV2 { - parent_hash: merge.parent_hash, - fee_recipient: merge.fee_recipient, - state_root: merge.state_root, - receipts_root: merge.receipts_root, - logs_bloom: merge.logs_bloom, - prev_randao: merge.prev_randao, - block_number: merge.block_number, - gas_limit: merge.gas_limit, - gas_used: merge.gas_used, - timestamp: merge.timestamp, - extra_data: merge.extra_data, - base_fee_per_gas: merge.base_fee_per_gas, - excess_data_gas: None, - block_hash: merge.block_hash, - transactions: merge.transactions, - withdrawals: None, - }), - ExecutionPayload::Capella(capella) => Ok(JsonExecutionPayloadV2 { - parent_hash: capella.parent_hash, - fee_recipient: capella.fee_recipient, - state_root: capella.state_root, - receipts_root: capella.receipts_root, - logs_bloom: capella.logs_bloom, - prev_randao: capella.prev_randao, - block_number: capella.block_number, - gas_limit: capella.gas_limit, - gas_used: capella.gas_used, - timestamp: capella.timestamp, - extra_data: capella.extra_data, - base_fee_per_gas: capella.base_fee_per_gas, - excess_data_gas: None, - block_hash: capella.block_hash, - transactions: capella.transactions, - withdrawals: Some( - Vec::from(capella.withdrawals) - .into_iter() - .map(Into::into) - .collect::>() - .into(), - ), - }), - ExecutionPayload::Eip4844(eip4844) => Ok(JsonExecutionPayloadV2 { - parent_hash: eip4844.parent_hash, - fee_recipient: eip4844.fee_recipient, - state_root: eip4844.state_root, - receipts_root: eip4844.receipts_root, - logs_bloom: eip4844.logs_bloom, - prev_randao: eip4844.prev_randao, - block_number: eip4844.block_number, - gas_limit: eip4844.gas_limit, - gas_used: eip4844.gas_used, - timestamp: eip4844.timestamp, - extra_data: eip4844.extra_data, - base_fee_per_gas: eip4844.base_fee_per_gas, - excess_data_gas: Some(eip4844.excess_data_gas), - block_hash: eip4844.block_hash, - transactions: eip4844.transactions, - withdrawals: Some( - Vec::from(eip4844.withdrawals) - .into_iter() - .map(Into::into) - .collect::>() - .into(), - ), - }), +impl From> for ExecutionPayloadMerge { + fn from(payload: JsonExecutionPayloadV1) -> Self { + ExecutionPayloadMerge { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + } + } +} +impl From> for ExecutionPayloadCapella { + fn from(payload: JsonExecutionPayloadV2) -> Self { + ExecutionPayloadCapella { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .cloned() + .map(Into::into) + .collect::>() + .into(), + } + } +} +impl From> for ExecutionPayloadEip4844 { + fn from(payload: JsonExecutionPayloadV3) -> Self { + ExecutionPayloadEip4844 { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + excess_data_gas: payload.excess_data_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .cloned() + .map(Into::into) + .collect::>() + .into(), + } + } +} + +impl From> for ExecutionPayload { + fn from(json_execution_payload: JsonExecutionPayload) -> Self { + match json_execution_payload { + JsonExecutionPayload::V1(payload) => ExecutionPayload::Merge(payload.into()), + JsonExecutionPayload::V2(payload) => ExecutionPayload::Capella(payload.into()), + JsonExecutionPayload::V3(payload) => ExecutionPayload::Eip4844(payload.into()), } } } +#[superstruct( + variants(V1, V2, V3), + variant_attributes( + derive(Debug, PartialEq, Serialize, Deserialize), + serde(bound = "T: EthSpec", rename_all = "camelCase") + ), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] #[derive(Debug, PartialEq, Serialize, Deserialize)] -#[serde(bound = "T: EthSpec", rename_all = "camelCase")] +#[serde(untagged)] pub struct JsonGetPayloadResponse { + #[superstruct(only(V1), partial_getter(rename = "execution_payload_v1"))] + pub execution_payload: JsonExecutionPayloadV1, + #[superstruct(only(V2), partial_getter(rename = "execution_payload_v2"))] pub execution_payload: JsonExecutionPayloadV2, - // uncomment this when geth fixes its serialization - //#[serde(with = "eth2_serde_utils::u256_hex_be")] - //pub block_value: Uint256, + #[superstruct(only(V3), partial_getter(rename = "execution_payload_v3"))] + pub execution_payload: JsonExecutionPayloadV3, + #[serde(with = "eth2_serde_utils::u256_hex_be")] + pub block_value: Uint256, +} + +impl From> for GetPayloadResponse { + fn from(json_get_payload_response: JsonGetPayloadResponse) -> Self { + match json_get_payload_response { + JsonGetPayloadResponse::V1(response) => { + GetPayloadResponse::Merge(GetPayloadResponseMerge { + execution_payload: response.execution_payload.into(), + block_value: response.block_value, + }) + } + JsonGetPayloadResponse::V2(response) => { + GetPayloadResponse::Capella(GetPayloadResponseCapella { + execution_payload: response.execution_payload.into(), + block_value: response.block_value, + }) + } + JsonGetPayloadResponse::V3(response) => { + GetPayloadResponse::Eip4844(GetPayloadResponseEip4844 { + execution_payload: response.execution_payload.into(), + block_value: response.block_value, + }) + } + } + } } #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] @@ -376,9 +375,7 @@ pub struct JsonPayloadAttributes { pub prev_randao: Hash256, pub suggested_fee_recipient: Address, #[superstruct(only(V2))] - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(default)] - pub withdrawals: Option>, + pub withdrawals: Vec, } impl From for JsonPayloadAttributes { @@ -393,9 +390,7 @@ impl From for JsonPayloadAttributes { timestamp: pa.timestamp, prev_randao: pa.prev_randao, suggested_fee_recipient: pa.suggested_fee_recipient, - withdrawals: pa - .withdrawals - .map(|w| w.into_iter().map(Into::into).collect()), + withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(), }), } } @@ -413,9 +408,7 @@ impl From for PayloadAttributes { timestamp: jpa.timestamp, prev_randao: jpa.prev_randao, suggested_fee_recipient: jpa.suggested_fee_recipient, - withdrawals: jpa - .withdrawals - .map(|jw| jw.into_iter().map(Into::into).collect()), + withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(), }), } } diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 7790dcbedd7..63893375db6 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -524,7 +524,7 @@ impl ExecutionBlockGenerator { base_fee_per_gas: Uint256::one(), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), - withdrawals: pa.withdrawals.as_ref().unwrap().clone().into(), + withdrawals: pa.withdrawals.clone().into(), }) } ForkName::Eip4844 => { @@ -545,7 +545,7 @@ impl ExecutionBlockGenerator { excess_data_gas: Uint256::one(), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), - withdrawals: pa.withdrawals.as_ref().unwrap().clone().into(), + withdrawals: pa.withdrawals.clone().into(), }) } _ => unreachable!(), diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index f01ae00e86c..1e096364975 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -79,9 +79,12 @@ pub async fn handle_rpc( ENGINE_NEW_PAYLOAD_V1 => { JsonExecutionPayload::V1(get_param::>(params, 0)?) } - ENGINE_NEW_PAYLOAD_V2 => { - JsonExecutionPayload::V2(get_param::>(params, 0)?) - } + ENGINE_NEW_PAYLOAD_V2 => get_param::>(params, 0) + .map(|jep| JsonExecutionPayload::V2(jep)) + .or_else(|_| { + get_param::>(params, 0) + .map(|jep| JsonExecutionPayload::V1(jep)) + })?, // TODO(4844) add that here.. _ => unreachable!(), }; @@ -93,9 +96,9 @@ pub async fn handle_rpc( // validate method called correctly according to shanghai fork time match fork { ForkName::Merge => { - if request.withdrawals().is_ok() && request.withdrawals().unwrap().is_some() { + if matches!(request, JsonExecutionPayload::V2(_)) { return Err(format!( - "{} called with `withdrawals` before capella fork!", + "{} called with `ExecutionPayloadV2` before capella fork!", method )); } @@ -104,12 +107,9 @@ pub async fn handle_rpc( if method == ENGINE_NEW_PAYLOAD_V1 { return Err(format!("{} called after capella fork!", method)); } - if request.withdrawals().is_err() - || (request.withdrawals().is_ok() - && request.withdrawals().unwrap().is_none()) - { + if matches!(request, JsonExecutionPayload::V1(_)) { return Err(format!( - "{} called without `withdrawals` after capella fork!", + "{} called with `ExecutionPayloadV1` after capella fork!", method )); } @@ -138,7 +138,7 @@ pub async fn handle_rpc( Some( ctx.execution_block_generator .write() - .new_payload(request.try_into_execution_payload(fork).unwrap()), + .new_payload(request.into()), ) } else { None @@ -171,14 +171,26 @@ pub async fn handle_rpc( // TODO(4844) add 4844 error checking here match method { - ENGINE_GET_PAYLOAD_V1 => Ok(serde_json::to_value( - JsonExecutionPayloadV1::try_from(response).unwrap(), - ) - .unwrap()), - ENGINE_GET_PAYLOAD_V2 => Ok(serde_json::to_value(JsonGetPayloadResponse { - execution_payload: JsonExecutionPayloadV2::try_from(response).unwrap(), - }) - .unwrap()), + ENGINE_GET_PAYLOAD_V1 => { + Ok(serde_json::to_value(JsonExecutionPayload::from(response)).unwrap()) + } + ENGINE_GET_PAYLOAD_V2 => Ok(match JsonExecutionPayload::from(response) { + JsonExecutionPayload::V1(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseV1 { + execution_payload, + block_value: 0.into(), + }) + .unwrap() + } + JsonExecutionPayload::V2(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseV2 { + execution_payload, + block_value: 0.into(), + }) + .unwrap() + } + _ => unreachable!(), + }), _ => unreachable!(), } } @@ -190,8 +202,31 @@ pub async fn handle_rpc( jpa1.map(JsonPayloadAttributes::V1) } ENGINE_FORKCHOICE_UPDATED_V2 => { - let jpa2: Option = get_param(params, 1)?; - jpa2.map(JsonPayloadAttributes::V2) + // we can't use `deny_unknown_fields` without breaking compatibility with some + // clients that haven't updated to the latest engine_api spec. So instead we'll + // need to deserialize based on timestamp + get_param::>(params, 1).and_then(|pa| { + pa.and_then(|pa| { + match ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(*pa.timestamp()) + { + ForkName::Merge => { + get_param::>(params, 1) + .map(|opt| opt.map(JsonPayloadAttributes::V1)) + .transpose() + } + ForkName::Capella => { + get_param::>(params, 1) + .map(|opt| opt.map(JsonPayloadAttributes::V2)) + .transpose() + } + _ => unreachable!(), + } + }) + .transpose() + })? } _ => unreachable!(), }; @@ -204,9 +239,9 @@ pub async fn handle_rpc( .get_fork_at_timestamp(*pa.timestamp()) { ForkName::Merge => { - if pa.withdrawals().is_ok() && pa.withdrawals().unwrap().is_some() { + if matches!(pa, JsonPayloadAttributes::V2(_)) { return Err(format!( - "{} called with `withdrawals` before capella fork!", + "{} called with `JsonPayloadAttributesV2` before capella fork!", method )); } @@ -215,11 +250,9 @@ pub async fn handle_rpc( if method == ENGINE_FORKCHOICE_UPDATED_V1 { return Err(format!("{} called after capella fork!", method)); } - if pa.withdrawals().is_err() - || (pa.withdrawals().is_ok() && pa.withdrawals().unwrap().is_none()) - { + if matches!(pa, JsonPayloadAttributes::V1(_)) { return Err(format!( - "{} called without `withdrawals` after capella fork!", + "{} called with `JsonPayloadAttributesV1` after capella fork!", method )); } From 208f531ae7dacff9549eb354eb2e2dd23f0d7490 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Fri, 20 Jan 2023 00:46:55 +0000 Subject: [PATCH 129/263] update antithesis dockerfile (#3883) Resolves https://github.com/sigp/lighthouse/issues/3879 Co-authored-by: realbigsean --- testing/antithesis/Dockerfile.libvoidstar | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar index 32e2d5648df..bae18073297 100644 --- a/testing/antithesis/Dockerfile.libvoidstar +++ b/testing/antithesis/Dockerfile.libvoidstar @@ -1,11 +1,9 @@ -FROM rust:1.62.1-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev +FROM rust:1.66.1-bullseye AS builder +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse # Build lighthouse directly with a cargo build command, bypassing the Makefile. -# We have to use nightly in order to disable the new LLVM pass manager. -RUN rustup default nightly-2022-07-26 && cd lighthouse && LD_LIBRARY_PATH=/lighthouse/testing/antithesis/libvoidstar/ RUSTFLAGS="-Znew-llvm-pass-manager=no -Cpasses=sancov -Cllvm-args=-sanitizer-coverage-level=3 -Cllvm-args=-sanitizer-coverage-trace-pc-guard -Ccodegen-units=1 -Cdebuginfo=2 -L/lighthouse/testing/antithesis/libvoidstar/ -lvoidstar" cargo build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern --verbose --bin lighthouse - +RUN cd lighthouse && LD_LIBRARY_PATH=/lighthouse/testing/antithesis/libvoidstar/ RUSTFLAGS="-Cpasses=sancov-module -Cllvm-args=-sanitizer-coverage-level=3 -Cllvm-args=-sanitizer-coverage-trace-pc-guard -Ccodegen-units=1 -Cdebuginfo=2 -L/lighthouse/testing/antithesis/libvoidstar/ -lvoidstar" cargo build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern --verbose --bin lighthouse # build lcli binary directly with cargo install command, bypassing the makefile RUN cargo install --path /lighthouse/lcli --force --locked From f8a3b3b95acb356104d6f5b990e6bf4e9ff801b0 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 20 Jan 2023 00:46:56 +0000 Subject: [PATCH 130/263] Improve block delay metrics (#3894) We recently ran a large-block experiment on the testnet and plan to do a further experiment on mainnet. Although the metrics recovered from lighthouse nodes were quite useful, I think we could do with greater resolution in the block delay metrics and get some specific values for each block (currently these can be lost to large exponential histogram buckets). This PR increases the resolution of the block delay histogram buckets, but also introduces a new metric which records the last block delay. Depending on the polling resolution of the metric server, we can lose some block delay information, however it will always give us a specific value and we will not lose exact data based on poor resolution histogram buckets. --- .../src/beacon_processor/worker/gossip_methods.rs | 4 ++++ beacon_node/network/src/metrics.rs | 10 +++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index ef23f6761f6..c142359f3e6 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -715,6 +715,10 @@ impl Worker { &metrics::BEACON_BLOCK_GOSSIP_SLOT_START_DELAY_TIME, block_delay, ); + metrics::set_gauge( + &metrics::BEACON_BLOCK_LAST_DELAY, + block_delay.as_millis() as i64, + ); let verification_result = self .chain diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index b4f3f29f934..baf00720b09 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -335,10 +335,18 @@ lazy_static! { pub static ref BEACON_BLOCK_GOSSIP_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( "beacon_block_gossip_slot_start_delay_time", "Duration between when the block is received and the start of the slot it belongs to.", + // Create a custom bucket list for greater granularity in block delay + Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) + // NOTE: Previous values, which we may want to switch back to. // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] - decimal_buckets(-1,2) + //decimal_buckets(-1,2) ); + pub static ref BEACON_BLOCK_LAST_DELAY: Result = try_create_int_gauge( + "beacon_block_last_delay", + "Keeps track of the last block's delay from the start of the slot" + ); + pub static ref BEACON_BLOCK_GOSSIP_ARRIVED_LATE_TOTAL: Result = try_create_int_counter( "beacon_block_gossip_arrived_late_total", "Count of times when a gossip block arrived from the network later than the attestation deadline.", From 4deab888c9c48b16b29cf8bfc4b731524d9e2d33 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 20 Jan 2023 04:19:29 +0000 Subject: [PATCH 131/263] Switch allocator to jemalloc (#3697) ## Proposed Changes Another `tree-states` motivated PR, this adds `jemalloc` as the default allocator, with an option to use the system allocator by compiling with `FEATURES="" make`. - [x] Metrics - [x] Test on Windows - [x] Test on macOS - [x] Test with `musl` - [x] Metrics dashboard on `lighthouse-metrics` (https://github.com/sigp/lighthouse-metrics/pull/37) Co-authored-by: Michael Sproul --- .cargo/config.toml | 4 +++ .github/workflows/test-suite.yml | 14 +------- Cargo.lock | 41 +++++++++++++++++++++++ Cargo.toml | 1 + Makefile | 14 +++++--- book/src/installation-source.md | 7 +++- bors.toml | 1 - common/malloc_utils/Cargo.toml | 12 +++++-- common/malloc_utils/src/jemalloc.rs | 52 +++++++++++++++++++++++++++++ common/malloc_utils/src/lib.rs | 44 ++++++++++++++++++------ lcli/Cargo.toml | 5 +++ lighthouse/Cargo.toml | 2 ++ lighthouse/src/main.rs | 10 ++++++ 13 files changed, 175 insertions(+), 32 deletions(-) create mode 100644 .cargo/config.toml create mode 100644 common/malloc_utils/src/jemalloc.rs diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 00000000000..dac01630032 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,4 @@ +[env] +# Set the number of arenas to 16 when using jemalloc. +JEMALLOC_SYS_WITH_MALLOC_CONF = "abort_conf:true,narenas:16" + diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 8d52f7fa7e2..57fee718300 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -306,16 +306,6 @@ jobs: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Typecheck benchmark code without running it run: make check-benches - check-consensus: - name: check-consensus - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Typecheck consensus code in strict mode - run: make check-consensus clippy: name: clippy runs-on: ubuntu-latest @@ -382,14 +372,12 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust (${{ env.PINNED_NIGHTLY }}) run: rustup toolchain install $PINNED_NIGHTLY - # NOTE: cargo-udeps version is pinned until this issue is resolved: - # https://github.com/est31/cargo-udeps/issues/135 - name: Install Protoc uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install cargo-udeps - run: cargo install cargo-udeps --locked --force --version 0.1.30 + run: cargo install cargo-udeps --locked --force - name: Create Cargo config dir run: mkdir -p .cargo - name: Install custom Cargo config diff --git a/Cargo.lock b/Cargo.lock index f1daf4dbdfb..56b3724016b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2695,6 +2695,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "fs_extra" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" + [[package]] name = "funty" version = "1.1.0" @@ -3595,6 +3601,38 @@ version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +[[package]] +name = "jemalloc-ctl" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1891c671f3db85d8ea8525dd43ab147f9977041911d24a03e5a36187a7bfde9" +dependencies = [ + "jemalloc-sys", + "libc", + "paste", +] + +[[package]] +name = "jemalloc-sys" +version = "0.5.2+5.3.0-patched" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "134163979b6eed9564c98637b710b40979939ba351f59952708234ea11b5f3f8" +dependencies = [ + "cc", + "fs_extra", + "libc", +] + +[[package]] +name = "jemallocator" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16c2514137880c52b0b4822b563fadd38257c1f380858addb74a400889696ea6" +dependencies = [ + "jemalloc-sys", + "libc", +] + [[package]] name = "js-sys" version = "0.3.60" @@ -3703,6 +3741,7 @@ dependencies = [ "lighthouse_network", "lighthouse_version", "log", + "malloc_utils", "sensitive_url", "serde", "serde_json", @@ -4510,6 +4549,8 @@ dependencies = [ name = "malloc_utils" version = "0.1.0" dependencies = [ + "jemalloc-ctl", + "jemallocator", "lazy_static", "libc", "lighthouse_metrics", diff --git a/Cargo.toml b/Cargo.toml index e254400e88b..de01771eb9c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -88,6 +88,7 @@ members = [ "validator_client", "validator_client/slashing_protection", ] +resolver = "2" [patch] [patch.crates-io] diff --git a/Makefile b/Makefile index 33077a6c930..68ada1b4b94 100644 --- a/Makefile +++ b/Makefile @@ -14,8 +14,16 @@ BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release" PINNED_NIGHTLY ?= nightly CLIPPY_PINNED_NIGHTLY=nightly-2022-05-19 +# List of features to use when building natively. Can be overriden via the environment. +# No jemalloc on Windows +ifeq ($(OS),Windows_NT) + FEATURES?= +else + FEATURES?=jemalloc +endif + # List of features to use when cross-compiling. Can be overridden via the environment. -CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx +CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx,jemalloc # Cargo profile for Cross builds. Default is for local builds, CI uses an override. CROSS_PROFILE ?= release @@ -101,10 +109,6 @@ cargo-fmt: check-benches: cargo check --workspace --benches -# Typechecks consensus code *without* allowing deprecated legacy arithmetic or metrics. -check-consensus: - cargo check -p state_processing --no-default-features - # Runs only the ef-test vectors. run-ef-tests: rm -rf $(EF_TESTS)/.accessed_file_log.txt diff --git a/book/src/installation-source.md b/book/src/installation-source.md index b3d83ef9f9e..8e515a41bd5 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -64,6 +64,7 @@ choco install protoc These dependencies are for compiling Lighthouse natively on Windows. Lighthouse can also run successfully under the [Windows Subsystem for Linux (WSL)][WSL]. If using Ubuntu under WSL, you should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)](#ubuntu) section. + [WSL]: https://docs.microsoft.com/en-us/windows/wsl/about ## Build Lighthouse @@ -128,8 +129,12 @@ Commonly used features include: * `gnosis`: support for the Gnosis Beacon Chain. * `portable`: support for legacy hardware. * `modern`: support for exclusively modern hardware. -* `slasher-mdbx`: support for the MDBX slasher backend (enabled by default). +* `slasher-mdbx`: support for the MDBX slasher backend. Enabled by default. * `slasher-lmdb`: support for the LMDB slasher backend. +* `jemalloc`: use [`jemalloc`][jemalloc] to allocate memory. Enabled by default on Linux and macOS. + Not supported on Windows. + +[jemalloc]: https://jemalloc.net/ ## Compilation Profiles diff --git a/bors.toml b/bors.toml index 096ac3b29a2..9e633d63f57 100644 --- a/bors.toml +++ b/bors.toml @@ -10,7 +10,6 @@ status = [ "merge-transition-ubuntu", "no-eth1-simulator-ubuntu", "check-benchmarks", - "check-consensus", "clippy", "arbitrary-check", "cargo-audit", diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index 569eed6082b..c88ec0bd5af 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -4,13 +4,21 @@ version = "0.1.0" authors = ["Paul Hauner "] edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - [dependencies] lighthouse_metrics = { path = "../lighthouse_metrics" } lazy_static = "1.4.0" libc = "0.2.79" parking_lot = "0.12.0" +jemalloc-ctl = { version = "0.5.0", optional = true } + +# Jemalloc's background_threads feature requires Linux (pthreads). +[target.'cfg(target_os = "linux")'.dependencies] +jemallocator = { version = "0.5.0", optional = true, features = ["stats", "background_threads"] } + +[target.'cfg(not(target_os = "linux"))'.dependencies] +jemallocator = { version = "0.5.0", optional = true, features = ["stats"] } [features] mallinfo2 = [] +jemalloc = ["jemallocator", "jemalloc-ctl"] +jemalloc-profiling = ["jemallocator/profiling"] diff --git a/common/malloc_utils/src/jemalloc.rs b/common/malloc_utils/src/jemalloc.rs new file mode 100644 index 00000000000..c796ea39a19 --- /dev/null +++ b/common/malloc_utils/src/jemalloc.rs @@ -0,0 +1,52 @@ +//! Set the allocator to `jemalloc`. +//! +//! Due to `jemalloc` requiring configuration at compile time or immediately upon runtime +//! initialisation it is configured via a Cargo config file in `.cargo/config.toml`. +//! +//! The `jemalloc` tuning can be overriden by: +//! +//! A) `JEMALLOC_SYS_WITH_MALLOC_CONF` at compile-time. +//! B) `_RJEM_MALLOC_CONF` at runtime. +use jemalloc_ctl::{arenas, epoch, stats, Error}; +use lazy_static::lazy_static; +use lighthouse_metrics::{set_gauge, try_create_int_gauge, IntGauge}; + +#[global_allocator] +static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; + +// Metrics for jemalloc. +lazy_static! { + pub static ref NUM_ARENAS: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_num_arenas", "The number of arenas in use"); + pub static ref BYTES_ALLOCATED: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_bytes_allocated", "Equivalent to stats.allocated"); + pub static ref BYTES_ACTIVE: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_bytes_active", "Equivalent to stats.active"); + pub static ref BYTES_MAPPED: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_bytes_mapped", "Equivalent to stats.mapped"); + pub static ref BYTES_METADATA: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_bytes_metadata", "Equivalent to stats.metadata"); + pub static ref BYTES_RESIDENT: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_bytes_resident", "Equivalent to stats.resident"); + pub static ref BYTES_RETAINED: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_bytes_retained", "Equivalent to stats.retained"); +} + +pub fn scrape_jemalloc_metrics() { + scrape_jemalloc_metrics_fallible().unwrap() +} + +pub fn scrape_jemalloc_metrics_fallible() -> Result<(), Error> { + // Advance the epoch so that the underlying statistics are updated. + epoch::advance()?; + + set_gauge(&NUM_ARENAS, arenas::narenas::read()? as i64); + set_gauge(&BYTES_ALLOCATED, stats::allocated::read()? as i64); + set_gauge(&BYTES_ACTIVE, stats::active::read()? as i64); + set_gauge(&BYTES_MAPPED, stats::mapped::read()? as i64); + set_gauge(&BYTES_METADATA, stats::metadata::read()? as i64); + set_gauge(&BYTES_RESIDENT, stats::resident::read()? as i64); + set_gauge(&BYTES_RETAINED, stats::retained::read()? as i64); + + Ok(()) +} diff --git a/common/malloc_utils/src/lib.rs b/common/malloc_utils/src/lib.rs index b8aed948f8b..3bb242369f7 100644 --- a/common/malloc_utils/src/lib.rs +++ b/common/malloc_utils/src/lib.rs @@ -2,18 +2,18 @@ //! //! ## Conditional Compilation //! -//! Presently, only configuration for "The GNU Allocator" from `glibc` is supported. All other -//! allocators are ignored. +//! This crate can be compiled with different feature flags to support different allocators: //! -//! It is assumed that if the following two statements are correct then we should expect to -//! configure `glibc`: +//! - Jemalloc, via the `jemalloc` feature. +//! - GNU malloc, if no features are set and the system supports it. +//! - The system allocator, if no features are set and the allocator is not GNU malloc. +//! +//! It is assumed that if Jemalloc is not in use, and the following two statements are correct then +//! we should expect to configure `glibc`: //! //! - `target_os = linux` //! - `target_env != musl` //! -//! In all other cases this library will not attempt to do anything (i.e., all functions are -//! no-ops). -//! //! If the above conditions are fulfilled but `glibc` still isn't present at runtime then a panic //! may be triggered. It is understood that there's no way to be certain that a compatible `glibc` //! is present: https://github.com/rust-lang/rust/issues/33244. @@ -24,18 +24,42 @@ //! detecting `glibc` are best-effort. If this crate throws errors about undefined external //! functions, then try to compile with the `not_glibc_interface` module. -#[cfg(all(target_os = "linux", not(target_env = "musl")))] +#[cfg(all( + target_os = "linux", + not(target_env = "musl"), + not(feature = "jemalloc") +))] mod glibc; +#[cfg(feature = "jemalloc")] +mod jemalloc; + pub use interface::*; -#[cfg(all(target_os = "linux", not(target_env = "musl")))] +#[cfg(all( + target_os = "linux", + not(target_env = "musl"), + not(feature = "jemalloc") +))] mod interface { pub use crate::glibc::configure_glibc_malloc as configure_memory_allocator; pub use crate::glibc::scrape_mallinfo_metrics as scrape_allocator_metrics; } -#[cfg(any(not(target_os = "linux"), target_env = "musl"))] +#[cfg(feature = "jemalloc")] +mod interface { + #[allow(dead_code)] + pub fn configure_memory_allocator() -> Result<(), String> { + Ok(()) + } + + pub use crate::jemalloc::scrape_jemalloc_metrics as scrape_allocator_metrics; +} + +#[cfg(all( + any(not(target_os = "linux"), target_env = "musl"), + not(feature = "jemalloc") +))] mod interface { #[allow(dead_code, clippy::unnecessary_wraps)] pub fn configure_memory_allocator() -> Result<(), String> { diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 93e529755d7..8ebac0ca610 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -8,6 +8,7 @@ edition = "2021" [features] portable = ["bls/supranational-portable"] fake_crypto = ['bls/fake_crypto'] +jemalloc = ["malloc_utils/jemalloc"] [dependencies] bls = { path = "../crypto/bls" } @@ -40,3 +41,7 @@ eth2 = { path = "../common/eth2" } snap = "1.0.1" beacon_chain = { path = "../beacon_node/beacon_chain" } store = { path = "../beacon_node/store" } +malloc_utils = { path = "../common/malloc_utils" } + +[package.metadata.cargo-udeps.ignore] +normal = ["malloc_utils"] diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index f9597ade8d4..e88aa24857c 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -24,6 +24,8 @@ gnosis = [] slasher-mdbx = ["slasher/mdbx"] # Support slasher LMDB backend. slasher-lmdb = ["slasher/lmdb"] +# Use jemalloc. +jemalloc = ["malloc_utils/jemalloc"] [dependencies] beacon_node = { "path" = "../beacon_node" } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 64ee0432f8a..babe2f8dca7 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -31,6 +31,14 @@ fn bls_library_name() -> &'static str { } } +fn allocator_name() -> &'static str { + if cfg!(feature = "jemalloc") { + "jemalloc" + } else { + "system" + } +} + fn main() { // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. if std::env::var("RUST_BACKTRACE").is_err() { @@ -51,10 +59,12 @@ fn main() { "{}\n\ BLS library: {}\n\ SHA256 hardware acceleration: {}\n\ + Allocator: {}\n\ Specs: mainnet (true), minimal ({}), gnosis ({})", VERSION.replace("Lighthouse/", ""), bls_library_name(), have_sha_extensions(), + allocator_name(), cfg!(feature = "spec-minimal"), cfg!(feature = "gnosis"), ).as_str() From 3e67fa303805a9bb25f0c2b6b5e62dada0e28065 Mon Sep 17 00:00:00 2001 From: antondlr Date: Fri, 20 Jan 2023 20:26:32 +0000 Subject: [PATCH 132/263] fix multiarch docker builds (#3904) ## Issue Addressed #3902 Tested and confirmed working [here](https://github.com/antondlr/lighthouse/actions/runs/3970418322) ## Additional Info buildx v0.10.0 added provenance attestations to images but they are packed in a way that's incompatible with `docker manifest` https://github.com/docker/buildx/releases --- .github/workflows/docker.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 13b84116955..76e5d031aab 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -99,6 +99,7 @@ jobs: --platform=linux/${SHORT_ARCH} \ --file ./Dockerfile.cross . \ --tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX} \ + --provenance=false \ --push build-docker-multiarch: name: build-docker-multiarch${{ matrix.modernity }} From d8abf2fc41506cb52e629b7ab9c513b854b8dfeb Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sat, 21 Jan 2023 10:39:59 +1100 Subject: [PATCH 133/263] Import BLS to execution changes before Capella (#3892) * Import BLS to execution changes before Capella * Test for BLS to execution change HTTP API * Pack BLS to execution changes in LIFO order * Remove unused var * Clippy --- beacon_node/beacon_chain/src/beacon_chain.rs | 60 +++++- beacon_node/beacon_chain/src/errors.rs | 3 +- beacon_node/beacon_chain/src/test_utils.rs | 58 ++++++ beacon_node/http_api/src/lib.rs | 46 +++-- beacon_node/http_api/tests/fork_tests.rs | 180 +++++++++++++++++- .../beacon_processor/worker/gossip_methods.rs | 2 +- .../src/bls_to_execution_changes.rs | 105 ++++++++++ beacon_node/operation_pool/src/lib.rs | 61 +++--- beacon_node/operation_pool/src/persistence.rs | 24 +-- common/eth2/src/lib.rs | 18 ++ .../state_processing/src/verify_operation.rs | 31 +-- .../types/src/bls_to_execution_change.rs | 20 ++ 12 files changed, 517 insertions(+), 91 deletions(-) create mode 100644 beacon_node/operation_pool/src/bls_to_execution_changes.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 798a9b80823..77de5eb14a5 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2227,32 +2227,74 @@ impl BeaconChain { } /// Verify a signed BLS to execution change before allowing it to propagate on the gossip network. - pub fn verify_bls_to_execution_change_for_gossip( + pub fn verify_bls_to_execution_change_for_http_api( &self, bls_to_execution_change: SignedBlsToExecutionChange, ) -> Result, Error> { - let current_fork = self.spec.fork_name_at_slot::(self.slot()?); - if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork { - // Disallow BLS to execution changes prior to the Capella fork. - return Err(Error::BlsToExecutionChangeBadFork(current_fork)); + // Before checking the gossip duplicate filter, check that no prior change is already + // in our op pool. Ignore these messages: do not gossip, do not try to override the pool. + match self + .op_pool + .bls_to_execution_change_in_pool_equals(&bls_to_execution_change) + { + Some(true) => return Ok(ObservationOutcome::AlreadyKnown), + Some(false) => return Err(Error::BlsToExecutionConflictsWithPool), + None => (), } - let wall_clock_state = self.wall_clock_state()?; + // Use the head state to save advancing to the wall-clock slot unnecessarily. The message is + // signed with respect to the genesis fork version, and the slot check for gossip is applied + // separately. This `Arc` clone of the head is nice and cheap. + let head_snapshot = self.head().snapshot; + let head_state = &head_snapshot.beacon_state; Ok(self .observed_bls_to_execution_changes .lock() - .verify_and_observe(bls_to_execution_change, &wall_clock_state, &self.spec)?) + .verify_and_observe(bls_to_execution_change, head_state, &self.spec)?) + } + + /// Verify a signed BLS to execution change before allowing it to propagate on the gossip network. + pub fn verify_bls_to_execution_change_for_gossip( + &self, + bls_to_execution_change: SignedBlsToExecutionChange, + ) -> Result, Error> { + // Ignore BLS to execution changes on gossip prior to Capella. + if !self.current_slot_is_post_capella()? { + return Err(Error::BlsToExecutionPriorToCapella); + } + self.verify_bls_to_execution_change_for_http_api(bls_to_execution_change) + .or_else(|e| { + // On gossip treat conflicts the same as duplicates [IGNORE]. + match e { + Error::BlsToExecutionConflictsWithPool => Ok(ObservationOutcome::AlreadyKnown), + e => Err(e), + } + }) + } + + /// Check if the current slot is greater than or equal to the Capella fork epoch. + pub fn current_slot_is_post_capella(&self) -> Result { + let current_fork = self.spec.fork_name_at_slot::(self.slot()?); + if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork { + Ok(false) + } else { + Ok(true) + } } /// Import a BLS to execution change to the op pool. + /// + /// Return `true` if the change was added to the pool. pub fn import_bls_to_execution_change( &self, bls_to_execution_change: SigVerifiedOp, - ) { + ) -> bool { if self.eth1_chain.is_some() { self.op_pool - .insert_bls_to_execution_change(bls_to_execution_change); + .insert_bls_to_execution_change(bls_to_execution_change) + } else { + false } } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 47ebc23ba60..e4f675e70a7 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -206,7 +206,8 @@ pub enum BeaconChainError { MissingPersistedForkChoice, CommitteePromiseFailed(oneshot_broadcast::Error), MaxCommitteePromises(usize), - BlsToExecutionChangeBadFork(ForkName), + BlsToExecutionPriorToCapella, + BlsToExecutionConflictsWithPool, InconsistentFork(InconsistentFork), ProposerHeadForkChoiceError(fork_choice::Error), } diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 0373e9fc00a..e71c1a98707 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -148,6 +148,7 @@ pub struct Builder { eth_spec_instance: T::EthSpec, spec: Option, validator_keypairs: Option>, + withdrawal_keypairs: Vec>, chain_config: Option, store_config: Option, #[allow(clippy::type_complexity)] @@ -170,6 +171,17 @@ impl Builder> { .clone() .expect("cannot build without validator keypairs"); + // For the interop genesis state we know that the withdrawal credentials are set equal + // to the validator keypairs. Check for any manually initialised credentials. + assert!( + self.withdrawal_keypairs.is_empty(), + "withdrawal credentials are ignored by fresh_ephemeral_store" + ); + self.withdrawal_keypairs = validator_keypairs + .iter() + .map(|kp| Some(kp.clone())) + .collect(); + let store = Arc::new( HotColdDB::open_ephemeral( self.store_config.clone().unwrap_or_default(), @@ -282,6 +294,7 @@ where eth_spec_instance, spec: None, validator_keypairs: None, + withdrawal_keypairs: vec![], chain_config: None, store_config: None, store: None, @@ -539,6 +552,7 @@ where spec: chain.spec.clone(), chain: Arc::new(chain), validator_keypairs, + withdrawal_keypairs: self.withdrawal_keypairs, shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), runtime: self.runtime, mock_execution_layer: self.mock_execution_layer, @@ -554,6 +568,12 @@ where /// Used for testing. pub struct BeaconChainHarness { pub validator_keypairs: Vec, + /// Optional BLS withdrawal keys for each validator. + /// + /// If a validator index is missing from this vec or their entry is `None` then either + /// no BLS withdrawal key was set for them (they had an address from genesis) or the test + /// initializer neglected to set this field. + pub withdrawal_keypairs: Vec>, pub chain: Arc>, pub spec: ChainSpec, @@ -1465,6 +1485,44 @@ where .sign(sk, &fork, genesis_validators_root, &self.chain.spec) } + pub fn make_bls_to_execution_change( + &self, + validator_index: u64, + address: Address, + ) -> SignedBlsToExecutionChange { + let keypair = self.get_withdrawal_keypair(validator_index); + self.make_bls_to_execution_change_with_keys( + validator_index, + address, + &keypair.pk, + &keypair.sk, + ) + } + + pub fn make_bls_to_execution_change_with_keys( + &self, + validator_index: u64, + address: Address, + pubkey: &PublicKey, + secret_key: &SecretKey, + ) -> SignedBlsToExecutionChange { + let genesis_validators_root = self.chain.genesis_validators_root; + BlsToExecutionChange { + validator_index, + from_bls_pubkey: pubkey.compress(), + to_execution_address: address, + } + .sign(secret_key, genesis_validators_root, &self.chain.spec) + } + + pub fn get_withdrawal_keypair(&self, validator_index: u64) -> &Keypair { + self.withdrawal_keypairs + .get(validator_index as usize) + .expect("BLS withdrawal key missing from harness") + .as_ref() + .expect("no withdrawal key for validator") + } + pub fn add_voluntary_exit( &self, block: &mut BeaconBlock, diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 0d36601cf71..3f30255730a 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1677,7 +1677,7 @@ pub fn serve( .and_then( |chain: Arc>, address_changes: Vec, - #[allow(unused)] network_tx: UnboundedSender>, + network_tx: UnboundedSender>, log: Logger| { blocking_json_task(move || { let mut failures = vec![]; @@ -1685,15 +1685,38 @@ pub fn serve( for (index, address_change) in address_changes.into_iter().enumerate() { let validator_index = address_change.message.validator_index; - match chain.verify_bls_to_execution_change_for_gossip(address_change) { + match chain.verify_bls_to_execution_change_for_http_api(address_change) { Ok(ObservationOutcome::New(verified_address_change)) => { - publish_pubsub_message( - &network_tx, - PubsubMessage::BlsToExecutionChange(Box::new( - verified_address_change.as_inner().clone(), - )), - )?; - chain.import_bls_to_execution_change(verified_address_change); + let validator_index = + verified_address_change.as_inner().message.validator_index; + let address = verified_address_change + .as_inner() + .message + .to_execution_address; + + // New to P2P *and* op pool, gossip immediately if post-Capella. + let publish = chain.current_slot_is_post_capella().unwrap_or(false); + if publish { + publish_pubsub_message( + &network_tx, + PubsubMessage::BlsToExecutionChange(Box::new( + verified_address_change.as_inner().clone(), + )), + )?; + } + + // Import to op pool (may return `false` if there's a race). + let imported = + chain.import_bls_to_execution_change(verified_address_change); + + info!( + log, + "Processed BLS to execution change"; + "validator_index" => validator_index, + "address" => ?address, + "published" => publish, + "imported" => imported, + ); } Ok(ObservationOutcome::AlreadyKnown) => { debug!( @@ -1703,11 +1726,12 @@ pub fn serve( ); } Err(e) => { - error!( + warn!( log, "Invalid BLS to execution change"; "validator_index" => validator_index, - "source" => "HTTP API", + "reason" => ?e, + "source" => "HTTP", ); failures.push(api_types::Failure::new( index, diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 942a1167c2f..eaaa4e86463 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -1,8 +1,8 @@ //! Tests for API behaviour across fork boundaries. use crate::common::*; use beacon_chain::{test_utils::RelativeSyncCommittee, StateSkipConfig}; -use eth2::types::{StateId, SyncSubcommittee}; -use types::{ChainSpec, Epoch, EthSpec, MinimalEthSpec, Slot}; +use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; +use types::{Address, ChainSpec, Epoch, EthSpec, MinimalEthSpec, Slot}; type E = MinimalEthSpec; @@ -12,6 +12,14 @@ fn altair_spec(altair_fork_epoch: Epoch) -> ChainSpec { spec } +fn capella_spec(capella_fork_epoch: Epoch) -> ChainSpec { + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(capella_fork_epoch); + spec +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn sync_committee_duties_across_fork() { let validator_count = E::sync_committee_size(); @@ -307,3 +315,171 @@ async fn sync_committee_indices_across_fork() { ); } } + +/// Assert that an HTTP API error has the given status code and indexed errors for the given indices. +fn assert_server_indexed_error(error: eth2::Error, status_code: u16, indices: Vec) { + let eth2::Error::ServerIndexedMessage(IndexedErrorMessage { + code, + failures, + .. + }) = error else { + panic!("wrong error, expected ServerIndexedMessage, got: {error:?}") + }; + assert_eq!(code, status_code); + assert_eq!(failures.len(), indices.len()); + for (index, failure) in indices.into_iter().zip(failures) { + assert_eq!(failure.index, index as u64); + } +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn bls_to_execution_changes_update_all_around_capella_fork() { + let validator_count = 128; + let fork_epoch = Epoch::new(2); + let spec = capella_spec(fork_epoch); + let max_bls_to_execution_changes = E::max_bls_to_execution_changes(); + let tester = InteractiveTester::::new(Some(spec.clone()), validator_count).await; + let harness = &tester.harness; + let client = &tester.client; + + let all_validators = harness.get_all_validators(); + let all_validators_u64 = all_validators.iter().map(|x| *x as u64).collect::>(); + + // Create a bunch of valid address changes. + let valid_address_changes = all_validators_u64 + .iter() + .map(|&validator_index| { + harness.make_bls_to_execution_change( + validator_index, + Address::from_low_u64_be(validator_index), + ) + }) + .collect::>(); + + // Address changes which conflict with `valid_address_changes` on the address chosen. + let conflicting_address_changes = all_validators_u64 + .iter() + .map(|&validator_index| { + harness.make_bls_to_execution_change( + validator_index, + Address::from_low_u64_be(validator_index + 1), + ) + }) + .collect::>(); + + // Address changes signed with the wrong key. + let wrong_key_address_changes = all_validators_u64 + .iter() + .map(|&validator_index| { + // Use the correct pubkey. + let pubkey = &harness.get_withdrawal_keypair(validator_index).pk; + // And the wrong secret key. + let secret_key = &harness + .get_withdrawal_keypair((validator_index + 1) % validator_count as u64) + .sk; + harness.make_bls_to_execution_change_with_keys( + validator_index, + Address::from_low_u64_be(validator_index), + pubkey, + secret_key, + ) + }) + .collect::>(); + + // Submit some changes before Capella. Just enough to fill two blocks. + let num_pre_capella = validator_count / 4; + let blocks_filled_pre_capella = 2; + assert_eq!( + num_pre_capella, + blocks_filled_pre_capella * max_bls_to_execution_changes + ); + + client + .post_beacon_pool_bls_to_execution_changes(&valid_address_changes[..num_pre_capella]) + .await + .unwrap(); + + // Conflicting changes for the same validators should all fail. + let error = client + .post_beacon_pool_bls_to_execution_changes(&conflicting_address_changes[..num_pre_capella]) + .await + .unwrap_err(); + assert_server_indexed_error(error, 400, (0..num_pre_capella).collect()); + + // Re-submitting the same changes should be accepted. + client + .post_beacon_pool_bls_to_execution_changes(&valid_address_changes[..num_pre_capella]) + .await + .unwrap(); + + // Invalid changes signed with the wrong keys should all be rejected without affecting the seen + // indices filters (apply ALL of them). + let error = client + .post_beacon_pool_bls_to_execution_changes(&wrong_key_address_changes) + .await + .unwrap_err(); + assert_server_indexed_error(error, 400, all_validators.clone()); + + // Advance to right before Capella. + let capella_slot = fork_epoch.start_slot(E::slots_per_epoch()); + harness.extend_to_slot(capella_slot - 1).await; + assert_eq!(harness.head_slot(), capella_slot - 1); + + // Add Capella blocks which should be full of BLS to execution changes. + for i in 0..validator_count / max_bls_to_execution_changes { + let head_block_root = harness.extend_slots(1).await; + let head_block = harness + .chain + .get_block(&head_block_root) + .await + .unwrap() + .unwrap(); + + let bls_to_execution_changes = head_block + .message() + .body() + .bls_to_execution_changes() + .unwrap(); + + // Block should be full. + assert_eq!( + bls_to_execution_changes.len(), + max_bls_to_execution_changes, + "block not full on iteration {i}" + ); + + // Included changes should be the ones from `valid_address_changes` in any order. + for address_change in bls_to_execution_changes.iter() { + assert!(valid_address_changes.contains(address_change)); + } + + // After the initial 2 blocks, add the rest of the changes using a large + // request containing all the valid, all the conflicting and all the invalid. + // Despite the invalid and duplicate messages, the new ones should still get picked up by + // the pool. + if i == blocks_filled_pre_capella - 1 { + let all_address_changes: Vec<_> = [ + valid_address_changes.clone(), + conflicting_address_changes.clone(), + wrong_key_address_changes.clone(), + ] + .concat(); + + let error = client + .post_beacon_pool_bls_to_execution_changes(&all_address_changes) + .await + .unwrap_err(); + assert_server_indexed_error( + error, + 400, + (validator_count..3 * validator_count).collect(), + ); + } + } + + // Eventually all validators should have eth1 withdrawal credentials. + let head_state = harness.get_current_state(); + for validator in head_state.validators() { + assert!(validator.has_eth1_withdrawal_credential(&spec)); + } +} diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 00141312bd8..700bed8668f 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -1220,7 +1220,7 @@ impl Worker { "error" => ?e ); // We ignore pre-capella messages without penalizing peers. - if matches!(e, BeaconChainError::BlsToExecutionChangeBadFork(_)) { + if matches!(e, BeaconChainError::BlsToExecutionPriorToCapella) { self.propagate_validation_result( message_id, peer_id, diff --git a/beacon_node/operation_pool/src/bls_to_execution_changes.rs b/beacon_node/operation_pool/src/bls_to_execution_changes.rs new file mode 100644 index 00000000000..84513d466e9 --- /dev/null +++ b/beacon_node/operation_pool/src/bls_to_execution_changes.rs @@ -0,0 +1,105 @@ +use state_processing::SigVerifiedOp; +use std::collections::{hash_map::Entry, HashMap}; +use std::sync::Arc; +use types::{ + AbstractExecPayload, BeaconState, ChainSpec, EthSpec, SignedBeaconBlock, + SignedBlsToExecutionChange, +}; + +/// Pool of BLS to execution changes that maintains a LIFO queue and an index by validator. +/// +/// Using the LIFO queue for block production disincentivises spam on P2P at the Capella fork, +/// and is less-relevant after that. +#[derive(Debug, Default)] +pub struct BlsToExecutionChanges { + /// Map from validator index to BLS to execution change. + by_validator_index: HashMap>>, + /// Last-in-first-out (LIFO) queue of verified messages. + queue: Vec>>, +} + +impl BlsToExecutionChanges { + pub fn existing_change_equals( + &self, + address_change: &SignedBlsToExecutionChange, + ) -> Option { + self.by_validator_index + .get(&address_change.message.validator_index) + .map(|existing| existing.as_inner() == address_change) + } + + pub fn insert( + &mut self, + verified_change: SigVerifiedOp, + ) -> bool { + // Wrap in an `Arc` once on insert. + let verified_change = Arc::new(verified_change); + match self + .by_validator_index + .entry(verified_change.as_inner().message.validator_index) + { + Entry::Vacant(entry) => { + self.queue.push(verified_change.clone()); + entry.insert(verified_change); + true + } + Entry::Occupied(_) => false, + } + } + + /// FIFO ordering, used for persistence to disk. + pub fn iter_fifo( + &self, + ) -> impl Iterator>> { + self.queue.iter() + } + + /// LIFO ordering, used for block packing. + pub fn iter_lifo( + &self, + ) -> impl Iterator>> { + self.queue.iter().rev() + } + + /// Prune BLS to execution changes that have been applied to the state more than 1 block ago. + /// + /// The block check is necessary to avoid pruning too eagerly and losing the ability to include + /// address changes during re-orgs. This is isn't *perfect* so some address changes could + /// still get stuck if there are gnarly re-orgs and the changes can't be widely republished + /// due to the gossip duplicate rules. + pub fn prune>( + &mut self, + head_block: &SignedBeaconBlock, + head_state: &BeaconState, + spec: &ChainSpec, + ) { + let mut validator_indices_pruned = vec![]; + + self.queue.retain(|address_change| { + let validator_index = address_change.as_inner().message.validator_index; + head_state + .validators() + .get(validator_index as usize) + .map_or(true, |validator| { + let prune = validator.has_eth1_withdrawal_credential(spec) + && head_block + .message() + .body() + .bls_to_execution_changes() + .map_or(true, |recent_changes| { + !recent_changes + .iter() + .any(|c| c.message.validator_index == validator_index) + }); + if prune { + validator_indices_pruned.push(validator_index); + } + !prune + }) + }); + + for validator_index in validator_indices_pruned { + self.by_validator_index.remove(&validator_index); + } + } +} diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 70e0d56bc91..4643addad52 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -2,6 +2,7 @@ mod attestation; mod attestation_id; mod attestation_storage; mod attester_slashing; +mod bls_to_execution_changes; mod max_cover; mod metrics; mod persistence; @@ -18,6 +19,7 @@ pub use persistence::{ pub use reward_cache::RewardCache; use crate::attestation_storage::{AttestationMap, CheckpointKey}; +use crate::bls_to_execution_changes::BlsToExecutionChanges; use crate::sync_aggregate_id::SyncAggregateId; use attester_slashing::AttesterSlashingMaxCover; use max_cover::maximum_cover; @@ -51,8 +53,8 @@ pub struct OperationPool { proposer_slashings: RwLock>>, /// Map from exiting validator to their exit data. voluntary_exits: RwLock>>, - /// Map from credential changing validator to their execution change data. - bls_to_execution_changes: RwLock>>, + /// Map from credential changing validator to their position in the queue. + bls_to_execution_changes: RwLock>, /// Reward cache for accelerating attestation packing. reward_cache: RwLock, _phantom: PhantomData, @@ -513,15 +515,28 @@ impl OperationPool { ); } - /// Insert a BLS to execution change into the pool. + /// Check if an address change equal to `address_change` is already in the pool. + /// + /// Return `None` if no address change for the validator index exists in the pool. + pub fn bls_to_execution_change_in_pool_equals( + &self, + address_change: &SignedBlsToExecutionChange, + ) -> Option { + self.bls_to_execution_changes + .read() + .existing_change_equals(address_change) + } + + /// Insert a BLS to execution change into the pool, *only if* no prior change is known. + /// + /// Return `true` if the change was inserted. pub fn insert_bls_to_execution_change( &self, verified_change: SigVerifiedOp, - ) { - self.bls_to_execution_changes.write().insert( - verified_change.as_inner().message.validator_index, - verified_change, - ); + ) -> bool { + self.bls_to_execution_changes + .write() + .insert(verified_change) } /// Get a list of execution changes for inclusion in a block. @@ -533,7 +548,7 @@ impl OperationPool { spec: &ChainSpec, ) -> Vec { filter_limit_operations( - self.bls_to_execution_changes.read().values(), + self.bls_to_execution_changes.read().iter_lifo(), |address_change| { address_change.signature_is_still_valid(&state.fork()) && state @@ -548,33 +563,15 @@ impl OperationPool { } /// Prune BLS to execution changes that have been applied to the state more than 1 block ago. - /// - /// The block check is necessary to avoid pruning too eagerly and losing the ability to include - /// address changes during re-orgs. This is isn't *perfect* so some address changes could - /// still get stuck if there are gnarly re-orgs and the changes can't be widely republished - /// due to the gossip duplicate rules. pub fn prune_bls_to_execution_changes>( &self, head_block: &SignedBeaconBlock, head_state: &BeaconState, spec: &ChainSpec, ) { - prune_validator_hash_map( - &mut self.bls_to_execution_changes.write(), - |validator_index, validator| { - validator.has_eth1_withdrawal_credential(spec) - && head_block - .message() - .body() - .bls_to_execution_changes() - .map_or(true, |recent_changes| { - !recent_changes - .iter() - .any(|c| c.message.validator_index == validator_index) - }) - }, - head_state, - ); + self.bls_to_execution_changes + .write() + .prune(head_block, head_state, spec) } /// Prune all types of transactions given the latest head state and head fork. @@ -663,8 +660,8 @@ impl OperationPool { pub fn get_all_bls_to_execution_changes(&self) -> Vec { self.bls_to_execution_changes .read() - .iter() - .map(|(_, address_change)| address_change.as_inner().clone()) + .iter_fifo() + .map(|address_change| address_change.as_inner().clone()) .collect() } } diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 043e6fb7fd8..4948040ae10 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -1,5 +1,6 @@ use crate::attestation_id::AttestationId; use crate::attestation_storage::AttestationMap; +use crate::bls_to_execution_changes::BlsToExecutionChanges; use crate::sync_aggregate_id::SyncAggregateId; use crate::OpPoolError; use crate::OperationPool; @@ -105,8 +106,8 @@ impl PersistedOperationPool { let bls_to_execution_changes = operation_pool .bls_to_execution_changes .read() - .iter() - .map(|(_, bls_to_execution_change)| bls_to_execution_change.clone()) + .iter_fifo() + .map(|bls_to_execution_change| (**bls_to_execution_change).clone()) .collect(); PersistedOperationPool::V14(PersistedOperationPoolV14 { @@ -153,18 +154,13 @@ impl PersistedOperationPool { PersistedOperationPool::V5(_) | PersistedOperationPool::V12(_) => { return Err(OpPoolError::IncorrectOpPoolVariant) } - PersistedOperationPool::V14(pool) => RwLock::new( - pool.bls_to_execution_changes - .iter() - .cloned() - .map(|bls_to_execution_change| { - ( - bls_to_execution_change.as_inner().message.validator_index, - bls_to_execution_change, - ) - }) - .collect(), - ), + PersistedOperationPool::V14(pool) => { + let mut bls_to_execution_changes = BlsToExecutionChanges::default(); + for bls_to_execution_change in pool.bls_to_execution_changes { + bls_to_execution_changes.insert(bls_to_execution_change); + } + RwLock::new(bls_to_execution_changes) + } }; let op_pool = OperationPool { attestations, diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 752e472e24d..1dc83d19b81 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1012,6 +1012,24 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST beacon/pool/bls_to_execution_changes` + pub async fn post_beacon_pool_bls_to_execution_changes( + &self, + address_changes: &[SignedBlsToExecutionChange], + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("bls_to_execution_changes"); + + self.post(path, &address_changes).await?; + + Ok(()) + } + /// `GET beacon/deposit_snapshot` pub async fn get_deposit_snapshot(&self) -> Result, Error> { use ssz::Decode; diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index efd356462da..50ac2ff3de5 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -67,7 +67,7 @@ where fn new(op: T, state: &BeaconState) -> Self { let verified_against = VerifiedAgainst { fork_versions: op - .verification_epochs(state.current_epoch()) + .verification_epochs() .into_iter() .map(|epoch| state.fork().get_fork_version(epoch)) .collect(), @@ -89,13 +89,9 @@ where } pub fn signature_is_still_valid(&self, current_fork: &Fork) -> bool { - // Pass the fork's epoch as the effective current epoch. If the message is a current-epoch - // style message like `SignedBlsToExecutionChange` then `get_fork_version` will return the - // current fork version and we'll check it matches the fork version the message was checked - // against. - let effective_current_epoch = current_fork.epoch; + // The .all() will return true if the iterator is empty. self.as_inner() - .verification_epochs(effective_current_epoch) + .verification_epochs() .into_iter() .zip(self.verified_against.fork_versions.iter()) .all(|(epoch, verified_fork_version)| { @@ -126,12 +122,8 @@ pub trait VerifyOperation: Encode + Decode + Sized { /// /// These need to map 1-to-1 to the `SigVerifiedOp::verified_against` for this type. /// - /// If the message contains no inherent epoch it should return the `current_epoch` that is - /// passed in, as that's the epoch at which it was verified. - fn verification_epochs( - &self, - current_epoch: Epoch, - ) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]>; + /// If the message is valid across all forks it should return an empty smallvec. + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]>; } impl VerifyOperation for SignedVoluntaryExit { @@ -147,7 +139,7 @@ impl VerifyOperation for SignedVoluntaryExit { } #[allow(clippy::integer_arithmetic)] - fn verification_epochs(&self, _: Epoch) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { smallvec![self.message.epoch] } } @@ -165,7 +157,7 @@ impl VerifyOperation for AttesterSlashing { } #[allow(clippy::integer_arithmetic)] - fn verification_epochs(&self, _: Epoch) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { smallvec![ self.attestation_1.data.target.epoch, self.attestation_2.data.target.epoch @@ -186,7 +178,7 @@ impl VerifyOperation for ProposerSlashing { } #[allow(clippy::integer_arithmetic)] - fn verification_epochs(&self, _: Epoch) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { // Only need a single epoch because the slots of the two headers must be equal. smallvec![self .signed_header_1 @@ -209,10 +201,7 @@ impl VerifyOperation for SignedBlsToExecutionChange { } #[allow(clippy::integer_arithmetic)] - fn verification_epochs( - &self, - current_epoch: Epoch, - ) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { - smallvec![current_epoch] + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + smallvec![] } } diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs index f6064f65ab5..cb73e43f9ac 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/bls_to_execution_change.rs @@ -28,6 +28,26 @@ pub struct BlsToExecutionChange { impl SignedRoot for BlsToExecutionChange {} +impl BlsToExecutionChange { + pub fn sign( + self, + secret_key: &SecretKey, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> SignedBlsToExecutionChange { + let domain = spec.compute_domain( + Domain::BlsToExecutionChange, + spec.genesis_fork_version, + genesis_validators_root, + ); + let message = self.signing_root(domain); + SignedBlsToExecutionChange { + message: self, + signature: secret_key.sign(message), + } + } +} + #[cfg(test)] mod tests { use super::*; From 2802bc9a9c052654a1dac3e79a1f6b7cc2cdcf6c Mon Sep 17 00:00:00 2001 From: naviechan Date: Tue, 24 Jan 2023 02:06:42 +0000 Subject: [PATCH 134/263] Implement sync_committee_rewards API (per-validator reward) (#3903) ## Issue Addressed [#3661](https://github.com/sigp/lighthouse/issues/3661) ## Proposed Changes `/eth/v1/beacon/rewards/sync_committee/{block_id}` ``` { "execution_optimistic": false, "finalized": false, "data": [ { "validator_index": "0", "reward": "2000" } ] } ``` The issue contains the implementation of three per-validator reward APIs: * `sync_committee_rewards` * [`attestation_rewards`](https://github.com/sigp/lighthouse/pull/3822) * `block_rewards` This PR only implements the `sync_committe_rewards `. The endpoints can be viewed in the Ethereum Beacon nodes API browser: [https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Rewards](https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Rewards) ## Additional Info The implementation of [consensus client reward APIs](https://github.com/eth-protocol-fellows/cohort-three/blob/master/projects/project-ideas.md#consensus-client-reward-apis) is part of the [EPF](https://github.com/eth-protocol-fellows/cohort-three). Co-authored-by: navie Co-authored-by: kevinbogner --- beacon_node/beacon_chain/src/errors.rs | 1 + beacon_node/beacon_chain/src/lib.rs | 1 + .../src/sync_committee_rewards.rs | 87 +++++++++++++ beacon_node/beacon_chain/src/test_utils.rs | 25 ++++ beacon_node/beacon_chain/tests/main.rs | 1 + beacon_node/beacon_chain/tests/rewards.rs | 121 ++++++++++++++++++ beacon_node/http_api/src/lib.rs | 37 ++++++ .../http_api/src/sync_committee_rewards.rs | 77 +++++++++++ common/eth2/src/lib.rs | 18 +++ common/eth2/src/lighthouse.rs | 2 + .../src/lighthouse/sync_committee_rewards.rs | 12 ++ 11 files changed, 382 insertions(+) create mode 100644 beacon_node/beacon_chain/src/sync_committee_rewards.rs create mode 100644 beacon_node/beacon_chain/tests/rewards.rs create mode 100644 beacon_node/http_api/src/sync_committee_rewards.rs create mode 100644 common/eth2/src/lighthouse/sync_committee_rewards.rs diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 17f58b223f4..24ea07833d9 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -160,6 +160,7 @@ pub enum BeaconChainError { BlockRewardSlotError, BlockRewardAttestationError, BlockRewardSyncError, + SyncCommitteeRewardsSyncError, HeadMissingFromForkChoice(Hash256), FinalizedBlockMissingFromForkChoice(Hash256), HeadBlockMissingFromForkChoice(Hash256), diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index ae1c5e4b766..ae3e98f9131 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -40,6 +40,7 @@ pub mod schema_change; mod shuffling_cache; mod snapshot_cache; pub mod state_advance_timer; +pub mod sync_committee_rewards; pub mod sync_committee_verification; pub mod test_utils; mod timeout_rw_lock; diff --git a/beacon_node/beacon_chain/src/sync_committee_rewards.rs b/beacon_node/beacon_chain/src/sync_committee_rewards.rs new file mode 100644 index 00000000000..561fed1a86a --- /dev/null +++ b/beacon_node/beacon_chain/src/sync_committee_rewards.rs @@ -0,0 +1,87 @@ +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; + +use eth2::lighthouse::SyncCommitteeReward; +use safe_arith::SafeArith; +use slog::error; +use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards; +use std::collections::HashMap; +use store::RelativeEpoch; +use types::{BeaconBlockRef, BeaconState, ExecPayload}; + +impl BeaconChain { + pub fn compute_sync_committee_rewards>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + state: &mut BeaconState, + ) -> Result, BeaconChainError> { + if block.slot() != state.slot() { + return Err(BeaconChainError::BlockRewardSlotError); + } + + let spec = &self.spec; + + state.build_committee_cache(RelativeEpoch::Current, spec)?; + + let sync_aggregate = block.body().sync_aggregate()?; + + let sync_committee = state.current_sync_committee()?.clone(); + + let sync_committee_indices = state.get_sync_committee_indices(&sync_committee)?; + + let (participant_reward_value, proposer_reward_per_bit) = + compute_sync_aggregate_rewards(state, spec).map_err(|e| { + error!( + self.log, "Error calculating sync aggregate rewards"; + "error" => ?e + ); + BeaconChainError::SyncCommitteeRewardsSyncError + })?; + + let mut balances = HashMap::::new(); + + let mut total_proposer_rewards = 0; + let proposer_index = state.get_beacon_proposer_index(block.slot(), spec)?; + + // Apply rewards to participant balances. Keep track of proposer rewards + for (validator_index, participant_bit) in sync_committee_indices + .iter() + .zip(sync_aggregate.sync_committee_bits.iter()) + { + let participant_balance = balances + .entry(*validator_index) + .or_insert_with(|| state.balances()[*validator_index]); + + if participant_bit { + participant_balance.safe_add_assign(participant_reward_value)?; + + balances + .entry(proposer_index) + .or_insert_with(|| state.balances()[proposer_index]) + .safe_add_assign(proposer_reward_per_bit)?; + + total_proposer_rewards.safe_add_assign(proposer_reward_per_bit)?; + } else { + *participant_balance = participant_balance.saturating_sub(participant_reward_value); + } + } + + Ok(balances + .iter() + .filter_map(|(i, new_balance)| { + let reward = if *i != proposer_index { + *new_balance as i64 - state.balances()[*i] as i64 + } else if sync_committee_indices.contains(i) { + *new_balance as i64 + - state.balances()[*i] as i64 + - total_proposer_rewards as i64 + } else { + return None; + }; + Some(SyncCommitteeReward { + validator_index: *i as u64, + reward, + }) + }) + .collect()) + } +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 66de3f02d23..749487dc5aa 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -2,6 +2,7 @@ pub use crate::persisted_beacon_chain::PersistedBeaconChain; pub use crate::{ beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, + sync_committee_verification::Error as SyncCommitteeError, validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification, }; @@ -1980,6 +1981,30 @@ where (honest_head, faulty_head) } + + pub fn process_sync_contributions( + &self, + sync_contributions: HarnessSyncContributions, + ) -> Result<(), SyncCommitteeError> { + let mut verified_contributions = Vec::with_capacity(sync_contributions.len()); + + for (_, contribution_and_proof) in sync_contributions { + let signed_contribution_and_proof = contribution_and_proof.unwrap(); + + let verified_contribution = self + .chain + .verify_sync_contribution_for_gossip(signed_contribution_and_proof)?; + + verified_contributions.push(verified_contribution); + } + + for verified_contribution in verified_contributions { + self.chain + .add_contribution_to_block_inclusion_pool(verified_contribution)?; + } + + Ok(()) + } } // Junk `Debug` impl to satistfy certain trait bounds during testing. diff --git a/beacon_node/beacon_chain/tests/main.rs b/beacon_node/beacon_chain/tests/main.rs index 1c61e9927fc..eceb4f2e85a 100644 --- a/beacon_node/beacon_chain/tests/main.rs +++ b/beacon_node/beacon_chain/tests/main.rs @@ -4,6 +4,7 @@ mod block_verification; mod merge; mod op_verification; mod payload_invalidation; +mod rewards; mod store_tests; mod sync_committee_verification; mod tests; diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs new file mode 100644 index 00000000000..b61bea12429 --- /dev/null +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -0,0 +1,121 @@ +#![cfg(test)] + +use std::collections::HashMap; + +use beacon_chain::test_utils::{ + generate_deterministic_keypairs, BeaconChainHarness, EphemeralHarnessType, +}; +use beacon_chain::{ + test_utils::{AttestationStrategy, BlockStrategy, RelativeSyncCommittee}, + types::{Epoch, EthSpec, Keypair, MinimalEthSpec}, +}; +use lazy_static::lazy_static; + +pub const VALIDATOR_COUNT: usize = 64; + +lazy_static! { + static ref KEYPAIRS: Vec = generate_deterministic_keypairs(VALIDATOR_COUNT); +} + +fn get_harness() -> BeaconChainHarness> { + let mut spec = E::default_spec(); + + spec.altair_fork_epoch = Some(Epoch::new(0)); // We use altair for all tests + + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .keypairs(KEYPAIRS.to_vec()) + .fresh_ephemeral_store() + .build(); + + harness.advance_slot(); + + harness +} + +#[tokio::test] +async fn test_sync_committee_rewards() { + let num_block_produced = MinimalEthSpec::slots_per_epoch(); + let harness = get_harness::(); + + let latest_block_root = harness + .extend_chain( + num_block_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Create and add sync committee message to op_pool + let sync_contributions = harness.make_sync_contributions( + &harness.get_current_state(), + latest_block_root, + harness.get_current_slot(), + RelativeSyncCommittee::Current, + ); + + harness + .process_sync_contributions(sync_contributions) + .unwrap(); + + // Add block + let chain = &harness.chain; + let (head_state, head_state_root) = harness.get_current_state_and_root(); + let target_slot = harness.get_current_slot() + 1; + + let (block_root, mut state) = harness + .add_attested_block_at_slot(target_slot, head_state, head_state_root, &[]) + .await + .unwrap(); + + let block = harness.get_block(block_root).unwrap(); + let parent_block = chain + .get_blinded_block(&block.parent_root()) + .unwrap() + .unwrap(); + let parent_state = chain + .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .unwrap() + .unwrap(); + + let reward_payload = chain + .compute_sync_committee_rewards(block.message(), &mut state) + .unwrap(); + + let rewards = reward_payload + .iter() + .map(|reward| (reward.validator_index, reward.reward)) + .collect::>(); + + let proposer_index = state + .get_beacon_proposer_index(target_slot, &MinimalEthSpec::default_spec()) + .unwrap(); + + let mut mismatches = vec![]; + + for validator in state.validators() { + let validator_index = state + .clone() + .get_validator_index(&validator.pubkey) + .unwrap() + .unwrap(); + let pre_state_balance = parent_state.balances()[validator_index]; + let post_state_balance = state.balances()[validator_index]; + let sync_committee_reward = rewards.get(&(validator_index as u64)).unwrap_or(&0); + + if validator_index == proposer_index { + continue; // Ignore proposer + } + + if pre_state_balance as i64 + *sync_committee_reward != post_state_balance as i64 { + mismatches.push(validator_index.to_string()); + } + } + + assert_eq!( + mismatches.len(), + 0, + "Expect 0 mismatches, but these validators have mismatches on balance: {} ", + mismatches.join(",") + ); +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 8cd0b856b51..1399bb99a4f 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -16,6 +16,7 @@ mod metrics; mod proposer_duties; mod publish_blocks; mod state_id; +mod sync_committee_rewards; mod sync_committees; mod ui; mod validator_inclusion; @@ -1699,6 +1700,41 @@ pub fn serve( }, ); + /* + * beacon/rewards + */ + + let beacon_rewards_path = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("rewards")) + .and(chain_filter.clone()); + + // POST beacon/rewards/sync_committee/{block_id} + let post_beacon_rewards_sync_committee = beacon_rewards_path + .clone() + .and(warp::path("sync_committee")) + .and(block_id_or_err) + .and(warp::path::end()) + .and(warp::body::json()) + .and(log_filter.clone()) + .and_then( + |chain: Arc>, + block_id: BlockId, + validators: Vec, + log: Logger| { + blocking_json_task(move || { + let (rewards, execution_optimistic) = + sync_committee_rewards::compute_sync_committee_rewards( + chain, block_id, validators, log, + )?; + + Ok(rewards) + .map(api_types::GenericResponse::from) + .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + }) + }, + ); + /* * config */ @@ -3396,6 +3432,7 @@ pub fn serve( .or(post_beacon_pool_proposer_slashings.boxed()) .or(post_beacon_pool_voluntary_exits.boxed()) .or(post_beacon_pool_sync_committees.boxed()) + .or(post_beacon_rewards_sync_committee.boxed()) .or(post_validator_duties_attester.boxed()) .or(post_validator_duties_sync.boxed()) .or(post_validator_aggregate_and_proofs.boxed()) diff --git a/beacon_node/http_api/src/sync_committee_rewards.rs b/beacon_node/http_api/src/sync_committee_rewards.rs new file mode 100644 index 00000000000..ae369115d5c --- /dev/null +++ b/beacon_node/http_api/src/sync_committee_rewards.rs @@ -0,0 +1,77 @@ +use crate::{BlockId, ExecutionOptimistic}; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::lighthouse::SyncCommitteeReward; +use eth2::types::ValidatorId; +use slog::{debug, Logger}; +use state_processing::BlockReplayer; +use std::sync::Arc; +use types::{BeaconState, SignedBlindedBeaconBlock}; +use warp_utils::reject::{beacon_chain_error, custom_not_found}; + +pub fn compute_sync_committee_rewards( + chain: Arc>, + block_id: BlockId, + validators: Vec, + log: Logger, +) -> Result<(Option>, ExecutionOptimistic), warp::Rejection> { + let (block, execution_optimistic) = block_id.blinded_block(&chain)?; + + let mut state = get_state_before_applying_block(chain.clone(), &block)?; + + let reward_payload = chain + .compute_sync_committee_rewards(block.message(), &mut state) + .map_err(beacon_chain_error)?; + + let data = if reward_payload.is_empty() { + debug!(log, "compute_sync_committee_rewards returned empty"); + None + } else if validators.is_empty() { + Some(reward_payload) + } else { + Some( + reward_payload + .into_iter() + .filter(|reward| { + validators.iter().any(|validator| match validator { + ValidatorId::Index(i) => reward.validator_index == *i, + ValidatorId::PublicKey(pubkey) => match state.get_validator_index(pubkey) { + Ok(Some(i)) => reward.validator_index == i as u64, + _ => false, + }, + }) + }) + .collect::>(), + ) + }; + + Ok((data, execution_optimistic)) +} + +fn get_state_before_applying_block( + chain: Arc>, + block: &SignedBlindedBeaconBlock, +) -> Result, warp::reject::Rejection> { + let parent_block: SignedBlindedBeaconBlock = chain + .get_blinded_block(&block.parent_root()) + .and_then(|maybe_block| { + maybe_block.ok_or_else(|| BeaconChainError::MissingBeaconBlock(block.parent_root())) + }) + .map_err(|e| custom_not_found(format!("Parent block is not available! {:?}", e)))?; + + let parent_state = chain + .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .and_then(|maybe_state| { + maybe_state + .ok_or_else(|| BeaconChainError::MissingBeaconState(parent_block.state_root())) + }) + .map_err(|e| custom_not_found(format!("Parent state is not available! {:?}", e)))?; + + let replayer = BlockReplayer::new(parent_state, &chain.spec) + .no_signature_verification() + .state_root_iter([Ok((parent_block.state_root(), parent_block.slot()))].into_iter()) + .minimal_block_root_verification() + .apply_blocks(vec![], Some(block.slot())) + .map_err(beacon_chain_error)?; + + Ok(replayer.into_state()) +} diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 58b4c88b3c7..00b664446d3 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1026,6 +1026,24 @@ impl BeaconNodeHttpClient { .transpose() } + /// `POST beacon/rewards/sync_committee` + pub async fn post_beacon_rewards_sync_committee( + &self, + rewards: &[Option>], + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("rewards") + .push("sync_committee"); + + self.post(path, &rewards).await?; + + Ok(()) + } + /// `POST validator/contribution_and_proofs` pub async fn post_validator_contribution_and_proofs( &self, diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 2dced1c449a..068abd693a2 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -3,6 +3,7 @@ mod attestation_performance; mod block_packing_efficiency; mod block_rewards; +mod sync_committee_rewards; use crate::{ ok_or_error, @@ -27,6 +28,7 @@ pub use block_packing_efficiency::{ }; pub use block_rewards::{AttestationRewards, BlockReward, BlockRewardMeta, BlockRewardsQuery}; pub use lighthouse_network::{types::SyncState, PeerInfo}; +pub use sync_committee_rewards::SyncCommitteeReward; // Define "legacy" implementations of `Option` which use four bytes for encoding the union // selector. diff --git a/common/eth2/src/lighthouse/sync_committee_rewards.rs b/common/eth2/src/lighthouse/sync_committee_rewards.rs new file mode 100644 index 00000000000..cdd6850650c --- /dev/null +++ b/common/eth2/src/lighthouse/sync_committee_rewards.rs @@ -0,0 +1,12 @@ +use serde::{Deserialize, Serialize}; + +// Details about the rewards paid to sync committee members for attesting headers +// All rewards in GWei + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct SyncCommitteeReward { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub validator_index: u64, + // sync committee reward in gwei for the validator + pub reward: i64, +} From 3d4dd6af7511874c532ca01d2312a73f22179d5c Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Tue, 24 Jan 2023 16:22:51 +0100 Subject: [PATCH 135/263] Use eth1_withdrawal_credentials in Test States (#3898) * Use eth1_withdrawal_credential in Some Test States * Update beacon_node/genesis/src/interop.rs Co-authored-by: Michael Sproul * Update beacon_node/genesis/src/interop.rs Co-authored-by: Michael Sproul * Increase validator sizes * Pick next sync committee message Co-authored-by: Michael Sproul Co-authored-by: Paul Hauner --- beacon_node/beacon_chain/src/test_utils.rs | 6 +- beacon_node/beacon_chain/tests/store_tests.rs | 24 +-- .../tests/sync_committee_verification.rs | 7 +- beacon_node/beacon_chain/tests/tests.rs | 2 +- beacon_node/genesis/src/interop.rs | 155 +++++++++++++++++- beacon_node/genesis/src/lib.rs | 4 +- consensus/types/src/beacon_state/tests.rs | 4 +- 7 files changed, 174 insertions(+), 28 deletions(-) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index e71c1a98707..2477640803f 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -22,7 +22,7 @@ use execution_layer::{ }; use fork_choice::CountUnrealized; use futures::channel::mpsc::Receiver; -pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; +pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; use merkle_proof::MerkleTree; use parking_lot::Mutex; @@ -191,7 +191,7 @@ impl Builder> { .unwrap(), ); let mutator = move |builder: BeaconChainBuilder<_>| { - let genesis_state = interop_genesis_state::( + let genesis_state = interop_genesis_state_with_eth1::( &validator_keypairs, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), @@ -252,7 +252,7 @@ impl Builder> { .expect("cannot build without validator keypairs"); let mutator = move |builder: BeaconChainBuilder<_>| { - let genesis_state = interop_genesis_state::( + let genesis_state = interop_genesis_state_with_eth1::( &validator_keypairs, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 8a6ea9cfe1a..622ea7aecd1 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -1013,8 +1013,8 @@ fn check_shuffling_compatible( // Ensure blocks from abandoned forks are pruned from the Hot DB #[tokio::test] async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1123,8 +1123,8 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { #[tokio::test] async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1255,8 +1255,8 @@ async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { #[tokio::test] async fn pruning_does_not_touch_blocks_prior_to_finalization() { - const HONEST_VALIDATOR_COUNT: usize = 16; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8; + const HONEST_VALIDATOR_COUNT: usize = 32; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1350,8 +1350,8 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() { #[tokio::test] async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1495,8 +1495,8 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { // This is to check if state outside of normal block processing are pruned correctly. #[tokio::test] async fn prunes_skipped_slots_states() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1624,8 +1624,8 @@ async fn prunes_skipped_slots_states() { // This is to check if state outside of normal block processing are pruned correctly. #[tokio::test] async fn finalizes_non_epoch_start_slot() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 1e51b0ffb9b..239f55e7d38 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -45,6 +45,7 @@ fn get_valid_sync_committee_message( harness: &BeaconChainHarness>, slot: Slot, relative_sync_committee: RelativeSyncCommittee, + message_index: usize, ) -> (SyncCommitteeMessage, usize, SecretKey, SyncSubnetId) { let head_state = harness.chain.head_beacon_state_cloned(); let head_block_root = harness.chain.head_snapshot().beacon_block_root; @@ -52,7 +53,7 @@ fn get_valid_sync_committee_message( .make_sync_committee_messages(&head_state, head_block_root, slot, relative_sync_committee) .get(0) .expect("sync messages should exist") - .get(0) + .get(message_index) .expect("first sync message should exist") .clone(); @@ -494,7 +495,7 @@ async fn unaggregated_gossip_verification() { let current_slot = harness.chain.slot().expect("should get slot"); let (valid_sync_committee_message, expected_validator_index, validator_sk, subnet_id) = - get_valid_sync_committee_message(&harness, current_slot, RelativeSyncCommittee::Current); + get_valid_sync_committee_message(&harness, current_slot, RelativeSyncCommittee::Current, 0); macro_rules! assert_invalid { ($desc: tt, $attn_getter: expr, $subnet_getter: expr, $($error: pat_param) |+ $( if $guard: expr )?) => { @@ -644,7 +645,7 @@ async fn unaggregated_gossip_verification() { // **Incorrectly** create a sync message using the current sync committee let (next_valid_sync_committee_message, _, _, next_subnet_id) = - get_valid_sync_committee_message(&harness, target_slot, RelativeSyncCommittee::Current); + get_valid_sync_committee_message(&harness, target_slot, RelativeSyncCommittee::Current, 1); assert_invalid!( "sync message on incorrect subnet", diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index d80db132ef9..384fcbe5db6 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -19,7 +19,7 @@ use types::{ }; // Should ideally be divisible by 3. -pub const VALIDATOR_COUNT: usize = 24; +pub const VALIDATOR_COUNT: usize = 48; lazy_static! { /// A cached set of keys. diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index d8c25baec80..f24e94d1baa 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -10,6 +10,20 @@ use types::{ pub const DEFAULT_ETH1_BLOCK_HASH: &[u8] = &[0x42; 32]; +fn bls_withdrawal_credentials(pubkey: &PublicKey, spec: &ChainSpec) -> Hash256 { + let mut credentials = hash(&pubkey.as_ssz_bytes()); + credentials[0] = spec.bls_withdrawal_prefix_byte; + Hash256::from_slice(&credentials) +} + +fn eth1_withdrawal_credentials(pubkey: &PublicKey, spec: &ChainSpec) -> Hash256 { + let fake_execution_address = &hash(&pubkey.as_ssz_bytes())[0..20]; + let mut credentials = [0u8; 32]; + credentials[0] = spec.eth1_address_withdrawal_prefix_byte; + credentials[12..].copy_from_slice(fake_execution_address); + Hash256::from_slice(&credentials) +} + /// Builds a genesis state as defined by the Eth2 interop procedure (see below). /// /// Reference: @@ -24,17 +38,67 @@ pub fn interop_genesis_state( let eth1_timestamp = 2_u64.pow(40); let amount = spec.max_effective_balance; - let withdrawal_credentials = |pubkey: &PublicKey| { - let mut credentials = hash(&pubkey.as_ssz_bytes()); - credentials[0] = spec.bls_withdrawal_prefix_byte; - Hash256::from_slice(&credentials) + let datas = keypairs + .into_par_iter() + .map(|keypair| { + let mut data = DepositData { + withdrawal_credentials: bls_withdrawal_credentials(&keypair.pk, spec), + pubkey: keypair.pk.clone().into(), + amount, + signature: Signature::empty().into(), + }; + + data.signature = data.create_signature(&keypair.sk, spec); + + data + }) + .collect::>(); + + let mut state = initialize_beacon_state_from_eth1( + eth1_block_hash, + eth1_timestamp, + genesis_deposits(datas, spec)?, + execution_payload_header, + spec, + ) + .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; + + *state.genesis_time_mut() = genesis_time; + + // Invalidate all the caches after all the manual state surgery. + state + .drop_all_caches() + .map_err(|e| format!("Unable to drop caches: {:?}", e))?; + + Ok(state) +} + +// returns an interop genesis state except every other +// validator has eth1 withdrawal credentials +pub fn interop_genesis_state_with_eth1( + keypairs: &[Keypair], + genesis_time: u64, + eth1_block_hash: Hash256, + execution_payload_header: Option>, + spec: &ChainSpec, +) -> Result, String> { + let eth1_timestamp = 2_u64.pow(40); + let amount = spec.max_effective_balance; + + let withdrawal_credentials = |index: usize, pubkey: &PublicKey| { + if index % 2 == 0 { + bls_withdrawal_credentials(pubkey, spec) + } else { + eth1_withdrawal_credentials(pubkey, spec) + } }; let datas = keypairs .into_par_iter() - .map(|keypair| { + .enumerate() + .map(|(index, keypair)| { let mut data = DepositData { - withdrawal_credentials: withdrawal_credentials(&keypair.pk), + withdrawal_credentials: withdrawal_credentials(index, &keypair.pk), pubkey: keypair.pk.clone().into(), amount, signature: Signature::empty().into(), @@ -133,4 +197,83 @@ mod test { "validator count should be correct" ); } + + #[test] + fn interop_state_with_eth1() { + let validator_count = 16; + let genesis_time = 42; + let spec = &TestEthSpec::default_spec(); + + let keypairs = generate_deterministic_keypairs(validator_count); + + let state = interop_genesis_state_with_eth1::( + &keypairs, + genesis_time, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + None, + spec, + ) + .expect("should build state"); + + assert_eq!( + state.eth1_data().block_hash, + Hash256::from_slice(&[0x42; 32]), + "eth1 block hash should be co-ordinated junk" + ); + + assert_eq!( + state.genesis_time(), + genesis_time, + "genesis time should be as specified" + ); + + for b in state.balances() { + assert_eq!( + *b, spec.max_effective_balance, + "validator balances should be max effective balance" + ); + } + + for (index, v) in state.validators().iter().enumerate() { + let creds = v.withdrawal_credentials.as_bytes(); + if index % 2 == 0 { + assert_eq!( + creds[0], spec.bls_withdrawal_prefix_byte, + "first byte of withdrawal creds should be bls prefix" + ); + assert_eq!( + &creds[1..], + &hash(&v.pubkey.as_ssz_bytes())[1..], + "rest of withdrawal creds should be pubkey hash" + ); + } else { + assert_eq!( + creds[0], spec.eth1_address_withdrawal_prefix_byte, + "first byte of withdrawal creds should be eth1 prefix" + ); + assert_eq!( + creds[1..12], + [0u8; 11], + "bytes [1:12] of withdrawal creds must be zero" + ); + assert_eq!( + &creds[12..], + &hash(&v.pubkey.as_ssz_bytes())[0..20], + "rest of withdrawal creds should be first 20 bytes of pubkey hash" + ) + } + } + + assert_eq!( + state.balances().len(), + validator_count, + "validator balances len should be correct" + ); + + assert_eq!( + state.validators().len(), + validator_count, + "validator count should be correct" + ); + } } diff --git a/beacon_node/genesis/src/lib.rs b/beacon_node/genesis/src/lib.rs index 1233d99fd31..4d5439ac1b3 100644 --- a/beacon_node/genesis/src/lib.rs +++ b/beacon_node/genesis/src/lib.rs @@ -5,5 +5,7 @@ mod interop; pub use eth1::Config as Eth1Config; pub use eth1::Eth1Endpoint; pub use eth1_genesis_service::{Eth1GenesisService, Statistics}; -pub use interop::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; +pub use interop::{ + interop_genesis_state, interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH, +}; pub use types::test_utils::generate_deterministic_keypairs; diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index abca10e3726..d63eaafc4b9 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -2,7 +2,7 @@ use crate::test_utils::*; use crate::test_utils::{SeedableRng, XorShiftRng}; use beacon_chain::test_utils::{ - interop_genesis_state, test_spec, BeaconChainHarness, EphemeralHarnessType, + interop_genesis_state_with_eth1, test_spec, BeaconChainHarness, EphemeralHarnessType, DEFAULT_ETH1_BLOCK_HASH, }; use beacon_chain::types::{ @@ -551,7 +551,7 @@ fn tree_hash_cache_linear_history_long_skip() { let spec = &test_spec::(); // This state has a cache that advances normally each slot. - let mut state: BeaconState = interop_genesis_state( + let mut state: BeaconState = interop_genesis_state_with_eth1( &keypairs, 0, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), From a7351c00c0354f54981a929cc9bb1f4c31f43322 Mon Sep 17 00:00:00 2001 From: GeemoCandama Date: Tue, 24 Jan 2023 22:17:50 +0000 Subject: [PATCH 136/263] light client optimistic update reprocessing (#3799) ## Issue Addressed Currently there is a race between receiving blocks and receiving light client optimistic updates (in unstable), which results in processing errors. This is a continuation of PR #3693 and seeks to progress on issue #3651 ## Proposed Changes Add the parent_root to ReprocessQueueMessage::BlockImported so we can remove blocks from queue when a block arrives that has the same parent root. We use the parent root as opposed to the block_root because the LightClientOptimisticUpdate does not contain the block_root. If light_client_optimistic_update.attested_header.canonical_root() != head_block.message().parent_root() then we queue the update. Otherwise we process immediately. ## Additional Info michaelsproul came up with this idea. The code was heavily based off of the attestation reprocessing. I have not properly tested this to see if it works as intended. --- ...t_client_optimistic_update_verification.rs | 15 ++ .../network/src/beacon_processor/mod.rs | 50 ++++- .../work_reprocessing_queue.rs | 200 +++++++++++++++++- .../beacon_processor/worker/gossip_methods.rs | 120 ++++++++--- .../beacon_processor/worker/sync_methods.rs | 6 +- beacon_node/network/src/metrics.rs | 15 ++ 6 files changed, 370 insertions(+), 36 deletions(-) diff --git a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs index ec9c90e7355..20d7181808a 100644 --- a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs @@ -2,6 +2,7 @@ use crate::{ beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes, }; use derivative::Derivative; +use eth2::types::Hash256; use slot_clock::SlotClock; use std::time::Duration; use strum::AsRefStr; @@ -36,6 +37,8 @@ pub enum Error { SigSlotStartIsNone, /// Failed to construct a LightClientOptimisticUpdate from state. FailedConstructingUpdate, + /// Unknown block with parent root. + UnknownBlockParentRoot(Hash256), /// Beacon chain error occured. BeaconChainError(BeaconChainError), LightClientUpdateError(LightClientUpdateError), @@ -58,6 +61,7 @@ impl From for Error { #[derivative(Clone(bound = "T: BeaconChainTypes"))] pub struct VerifiedLightClientOptimisticUpdate { light_client_optimistic_update: LightClientOptimisticUpdate, + pub parent_root: Hash256, seen_timestamp: Duration, } @@ -107,6 +111,16 @@ impl VerifiedLightClientOptimisticUpdate { None => return Err(Error::SigSlotStartIsNone), } + // check if we can process the optimistic update immediately + // otherwise queue + let canonical_root = light_client_optimistic_update + .attested_header + .canonical_root(); + + if canonical_root != head_block.message().parent_root() { + return Err(Error::UnknownBlockParentRoot(canonical_root)); + } + let optimistic_update = LightClientOptimisticUpdate::new(&chain.spec, head_block, &attested_state)?; @@ -119,6 +133,7 @@ impl VerifiedLightClientOptimisticUpdate { Ok(Self { light_client_optimistic_update, + parent_root: canonical_root, seen_timestamp, }) } diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 743a97a29c2..8118443a65b 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -67,7 +67,8 @@ use types::{ SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; use work_reprocessing_queue::{ - spawn_reprocess_scheduler, QueuedAggregate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, + spawn_reprocess_scheduler, QueuedAggregate, QueuedLightClientUpdate, QueuedRpcBlock, + QueuedUnaggregate, ReadyWork, }; use worker::{Toolbox, Worker}; @@ -137,6 +138,10 @@ const MAX_GOSSIP_FINALITY_UPDATE_QUEUE_LEN: usize = 1_024; /// before we start dropping them. const MAX_GOSSIP_OPTIMISTIC_UPDATE_QUEUE_LEN: usize = 1_024; +/// The maximum number of queued `LightClientOptimisticUpdate` objects received on gossip that will be stored +/// for reprocessing before we start dropping them. +const MAX_GOSSIP_OPTIMISTIC_UPDATE_REPROCESS_QUEUE_LEN: usize = 128; + /// The maximum number of queued `SyncCommitteeMessage` objects that will be stored before we start dropping /// them. const MAX_SYNC_MESSAGE_QUEUE_LEN: usize = 2048; @@ -213,6 +218,7 @@ pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap"; pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; +pub const UNKNOWN_LIGHT_CLIENT_UPDATE: &str = "unknown_light_client_update"; /// A simple first-in-first-out queue with a maximum length. struct FifoQueue { @@ -694,6 +700,21 @@ impl std::convert::From> for WorkEvent { seen_timestamp, }, }, + ReadyWork::LightClientUpdate(QueuedLightClientUpdate { + peer_id, + message_id, + light_client_optimistic_update, + seen_timestamp, + .. + }) => Self { + drop_during_sync: true, + work: Work::UnknownLightClientOptimisticUpdate { + message_id, + peer_id, + light_client_optimistic_update, + seen_timestamp, + }, + }, } } } @@ -733,6 +754,12 @@ pub enum Work { aggregate: Box>, seen_timestamp: Duration, }, + UnknownLightClientOptimisticUpdate { + message_id: MessageId, + peer_id: PeerId, + light_client_optimistic_update: Box>, + seen_timestamp: Duration, + }, GossipAggregateBatch { packages: Vec>, }, @@ -845,6 +872,7 @@ impl Work { Work::LightClientBootstrapRequest { .. } => LIGHT_CLIENT_BOOTSTRAP_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, + Work::UnknownLightClientOptimisticUpdate { .. } => UNKNOWN_LIGHT_CLIENT_UPDATE, } } } @@ -979,6 +1007,8 @@ impl BeaconProcessor { // Using a FIFO queue for light client updates to maintain sequence order. let mut finality_update_queue = FifoQueue::new(MAX_GOSSIP_FINALITY_UPDATE_QUEUE_LEN); let mut optimistic_update_queue = FifoQueue::new(MAX_GOSSIP_OPTIMISTIC_UPDATE_QUEUE_LEN); + let mut unknown_light_client_update_queue = + FifoQueue::new(MAX_GOSSIP_OPTIMISTIC_UPDATE_REPROCESS_QUEUE_LEN); // Using a FIFO queue since blocks need to be imported sequentially. let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN); @@ -1346,6 +1376,9 @@ impl BeaconProcessor { Work::UnknownBlockAggregate { .. } => { unknown_block_aggregate_queue.push(work) } + Work::UnknownLightClientOptimisticUpdate { .. } => { + unknown_light_client_update_queue.push(work, work_id, &self.log) + } } } } @@ -1665,6 +1698,7 @@ impl BeaconProcessor { message_id, peer_id, *light_client_optimistic_update, + Some(work_reprocessing_tx), seen_timestamp, ) }), @@ -1787,6 +1821,20 @@ impl BeaconProcessor { seen_timestamp, ) }), + Work::UnknownLightClientOptimisticUpdate { + message_id, + peer_id, + light_client_optimistic_update, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_optimistic_update( + message_id, + peer_id, + *light_client_optimistic_update, + None, + seen_timestamp, + ) + }), }; } } diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs index 2aeec11c325..8c568a7eefd 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs @@ -19,7 +19,7 @@ use futures::task::Poll; use futures::{Stream, StreamExt}; use lighthouse_network::{MessageId, PeerId}; use logging::TimeLatch; -use slog::{crit, debug, error, warn, Logger}; +use slog::{crit, debug, error, trace, warn, Logger}; use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; use std::pin::Pin; @@ -30,12 +30,16 @@ use task_executor::TaskExecutor; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::time::error::Error as TimeError; use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey}; -use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock, SubnetId}; +use types::{ + Attestation, EthSpec, Hash256, LightClientOptimisticUpdate, SignedAggregateAndProof, + SignedBeaconBlock, SubnetId, +}; const TASK_NAME: &str = "beacon_processor_reprocess_queue"; const GOSSIP_BLOCKS: &str = "gossip_blocks"; const RPC_BLOCKS: &str = "rpc_blocks"; const ATTESTATIONS: &str = "attestations"; +const LIGHT_CLIENT_UPDATES: &str = "lc_updates"; /// Queue blocks for re-processing with an `ADDITIONAL_QUEUED_BLOCK_DELAY` after the slot starts. /// This is to account for any slight drift in the system clock. @@ -44,6 +48,9 @@ const ADDITIONAL_QUEUED_BLOCK_DELAY: Duration = Duration::from_millis(5); /// For how long to queue aggregated and unaggregated attestations for re-processing. pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12); +/// For how long to queue light client updates for re-processing. +pub const QUEUED_LIGHT_CLIENT_UPDATE_DELAY: Duration = Duration::from_secs(12); + /// For how long to queue rpc blocks before sending them back for reprocessing. pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(3); @@ -55,6 +62,9 @@ const MAXIMUM_QUEUED_BLOCKS: usize = 16; /// How many attestations we keep before new ones get dropped. const MAXIMUM_QUEUED_ATTESTATIONS: usize = 16_384; +/// How many light client updates we keep before new ones get dropped. +const MAXIMUM_QUEUED_LIGHT_CLIENT_UPDATES: usize = 128; + /// Messages that the scheduler can receive. pub enum ReprocessQueueMessage { /// A block that has been received early and we should queue for later processing. @@ -62,13 +72,18 @@ pub enum ReprocessQueueMessage { /// A gossip block for hash `X` is being imported, we should queue the rpc block for the same /// hash until the gossip block is imported. RpcBlock(QueuedRpcBlock), - /// A block that was successfully processed. We use this to handle attestations for unknown - /// blocks. - BlockImported(Hash256), + /// A block that was successfully processed. We use this to handle attestations and light client updates + /// for unknown blocks. + BlockImported { + block_root: Hash256, + parent_root: Hash256, + }, /// An unaggregated attestation that references an unknown block. UnknownBlockUnaggregate(QueuedUnaggregate), /// An aggregated attestation that references an unknown block. UnknownBlockAggregate(QueuedAggregate), + /// A light client optimistic update that references a parent root that has not been seen as a parent. + UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate), } /// Events sent by the scheduler once they are ready for re-processing. @@ -77,6 +92,7 @@ pub enum ReadyWork { RpcBlock(QueuedRpcBlock), Unaggregate(QueuedUnaggregate), Aggregate(QueuedAggregate), + LightClientUpdate(QueuedLightClientUpdate), } /// An Attestation for which the corresponding block was not seen while processing, queued for @@ -99,6 +115,16 @@ pub struct QueuedAggregate { pub seen_timestamp: Duration, } +/// A light client update for which the corresponding parent block was not seen while processing, +/// queued for later. +pub struct QueuedLightClientUpdate { + pub peer_id: PeerId, + pub message_id: MessageId, + pub light_client_optimistic_update: Box>, + pub parent_root: Hash256, + pub seen_timestamp: Duration, +} + /// A block that arrived early and has been queued for later import. pub struct QueuedGossipBlock { pub peer_id: PeerId, @@ -127,6 +153,8 @@ enum InboundEvent { ReadyRpcBlock(QueuedRpcBlock), /// An aggregated or unaggregated attestation is ready for re-processing. ReadyAttestation(QueuedAttestationId), + /// A light client update that is ready for re-processing. + ReadyLightClientUpdate(QueuedLightClientUpdateId), /// A `DelayQueue` returned an error. DelayQueueError(TimeError, &'static str), /// A message sent to the `ReprocessQueue` @@ -147,6 +175,8 @@ struct ReprocessQueue { rpc_block_delay_queue: DelayQueue>, /// Queue to manage scheduled attestations. attestations_delay_queue: DelayQueue, + /// Queue to manage scheduled light client updates. + lc_updates_delay_queue: DelayQueue, /* Queued items */ /// Queued blocks. @@ -157,15 +187,23 @@ struct ReprocessQueue { queued_unaggregates: FnvHashMap, DelayKey)>, /// Attestations (aggregated and unaggregated) per root. awaiting_attestations_per_root: HashMap>, + /// Queued Light Client Updates. + queued_lc_updates: FnvHashMap, DelayKey)>, + /// Light Client Updates per parent_root. + awaiting_lc_updates_per_parent_root: HashMap>, /* Aux */ /// Next attestation id, used for both aggregated and unaggregated attestations next_attestation: usize, + next_lc_update: usize, early_block_debounce: TimeLatch, rpc_block_debounce: TimeLatch, attestation_delay_debounce: TimeLatch, + lc_update_delay_debounce: TimeLatch, } +pub type QueuedLightClientUpdateId = usize; + #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum QueuedAttestationId { Aggregate(usize), @@ -235,6 +273,20 @@ impl Stream for ReprocessQueue { Poll::Ready(None) | Poll::Pending => (), } + match self.lc_updates_delay_queue.poll_expired(cx) { + Poll::Ready(Some(Ok(lc_id))) => { + return Poll::Ready(Some(InboundEvent::ReadyLightClientUpdate( + lc_id.into_inner(), + ))); + } + Poll::Ready(Some(Err(e))) => { + return Poll::Ready(Some(InboundEvent::DelayQueueError(e, "lc_updates_queue"))); + } + // `Poll::Ready(None)` means that there are no more entries in the delay queue and we + // will continue to get this result until something else is added into the queue. + Poll::Ready(None) | Poll::Pending => (), + } + // Last empty the messages channel. match self.work_reprocessing_rx.poll_recv(cx) { Poll::Ready(Some(message)) => return Poll::Ready(Some(InboundEvent::Msg(message))), @@ -264,14 +316,19 @@ pub fn spawn_reprocess_scheduler( gossip_block_delay_queue: DelayQueue::new(), rpc_block_delay_queue: DelayQueue::new(), attestations_delay_queue: DelayQueue::new(), + lc_updates_delay_queue: DelayQueue::new(), queued_gossip_block_roots: HashSet::new(), + queued_lc_updates: FnvHashMap::default(), queued_aggregates: FnvHashMap::default(), queued_unaggregates: FnvHashMap::default(), awaiting_attestations_per_root: HashMap::new(), + awaiting_lc_updates_per_parent_root: HashMap::new(), next_attestation: 0, + next_lc_update: 0, early_block_debounce: TimeLatch::default(), rpc_block_debounce: TimeLatch::default(), attestation_delay_debounce: TimeLatch::default(), + lc_update_delay_debounce: TimeLatch::default(), }; executor.spawn( @@ -473,9 +530,49 @@ impl ReprocessQueue { self.next_attestation += 1; } - InboundEvent::Msg(BlockImported(root)) => { + InboundEvent::Msg(UnknownLightClientOptimisticUpdate( + queued_light_client_optimistic_update, + )) => { + if self.lc_updates_delay_queue.len() >= MAXIMUM_QUEUED_LIGHT_CLIENT_UPDATES { + if self.lc_update_delay_debounce.elapsed() { + error!( + log, + "Light client updates delay queue is full"; + "queue_size" => MAXIMUM_QUEUED_LIGHT_CLIENT_UPDATES, + "msg" => "check system clock" + ); + } + // Drop the light client update. + return; + } + + let lc_id: QueuedLightClientUpdateId = self.next_lc_update; + + // Register the delay. + let delay_key = self + .lc_updates_delay_queue + .insert(lc_id, QUEUED_LIGHT_CLIENT_UPDATE_DELAY); + + // Register the light client update for the corresponding root. + self.awaiting_lc_updates_per_parent_root + .entry(queued_light_client_optimistic_update.parent_root) + .or_default() + .push(lc_id); + + // Store the light client update and its info. + self.queued_lc_updates.insert( + self.next_lc_update, + (queued_light_client_optimistic_update, delay_key), + ); + + self.next_lc_update += 1; + } + InboundEvent::Msg(BlockImported { + block_root, + parent_root, + }) => { // Unqueue the attestations we have for this root, if any. - if let Some(queued_ids) = self.awaiting_attestations_per_root.remove(&root) { + if let Some(queued_ids) = self.awaiting_attestations_per_root.remove(&block_root) { for id in queued_ids { metrics::inc_counter( &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS, @@ -511,12 +608,62 @@ impl ReprocessQueue { error!( log, "Unknown queued attestation for block root"; - "block_root" => ?root, + "block_root" => ?block_root, "att_id" => ?id, ); } } } + // Unqueue the light client optimistic updates we have for this root, if any. + if let Some(queued_lc_id) = self + .awaiting_lc_updates_per_parent_root + .remove(&parent_root) + { + debug!( + log, + "Dequeuing light client optimistic updates"; + "parent_root" => %parent_root, + "count" => queued_lc_id.len(), + ); + + for lc_id in queued_lc_id { + metrics::inc_counter( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_OPTIMISTIC_UPDATES, + ); + if let Some((work, delay_key)) = self.queued_lc_updates.remove(&lc_id).map( + |(light_client_optimistic_update, delay_key)| { + ( + ReadyWork::LightClientUpdate(light_client_optimistic_update), + delay_key, + ) + }, + ) { + // Remove the delay + self.lc_updates_delay_queue.remove(&delay_key); + + // Send the work + match self.ready_work_tx.try_send(work) { + Ok(_) => trace!( + log, + "reprocessing light client update sent"; + ), + Err(_) => error!( + log, + "Failed to send scheduled light client update"; + ), + } + } else { + // There is a mismatch between the light client update ids registered for this + // root and the queued light client updates. This should never happen. + error!( + log, + "Unknown queued light client update for parent root"; + "parent_root" => ?parent_root, + "lc_id" => ?lc_id, + ); + } + } + } } // A block that was queued for later processing is now ready to be processed. InboundEvent::ReadyGossipBlock(ready_block) => { @@ -591,6 +738,38 @@ impl ReprocessQueue { } } } + InboundEvent::ReadyLightClientUpdate(queued_id) => { + metrics::inc_counter( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_OPTIMISTIC_UPDATES, + ); + + if let Some((parent_root, work)) = self.queued_lc_updates.remove(&queued_id).map( + |(queued_lc_update, _delay_key)| { + ( + queued_lc_update.parent_root, + ReadyWork::LightClientUpdate(queued_lc_update), + ) + }, + ) { + if self.ready_work_tx.try_send(work).is_err() { + error!( + log, + "Failed to send scheduled light client optimistic update"; + ); + } + + if let Some(queued_lc_updates) = self + .awaiting_lc_updates_per_parent_root + .get_mut(&parent_root) + { + if let Some(index) = + queued_lc_updates.iter().position(|&id| id == queued_id) + { + queued_lc_updates.swap_remove(index); + } + } + } + } } metrics::set_gauge_vec( @@ -608,5 +787,10 @@ impl ReprocessQueue { &[ATTESTATIONS], self.attestations_delay_queue.len() as i64, ); + metrics::set_gauge_vec( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL, + &[LIGHT_CLIENT_UPDATES], + self.lc_updates_delay_queue.len() as i64, + ); } } diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index c142359f3e6..3601ccb195e 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -28,7 +28,8 @@ use types::{ use super::{ super::work_reprocessing_queue::{ - QueuedAggregate, QueuedGossipBlock, QueuedUnaggregate, ReprocessQueueMessage, + QueuedAggregate, QueuedGossipBlock, QueuedLightClientUpdate, QueuedUnaggregate, + ReprocessQueueMessage, }, Worker, }; @@ -953,7 +954,10 @@ impl Worker { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); if reprocess_tx - .try_send(ReprocessQueueMessage::BlockImported(block_root)) + .try_send(ReprocessQueueMessage::BlockImported { + block_root, + parent_root: block.message().parent_root(), + }) .is_err() { error!( @@ -1330,7 +1334,7 @@ impl Worker { LightClientFinalityUpdateError::InvalidLightClientFinalityUpdate => { debug!( self.log, - "LC invalid finality update"; + "Light client invalid finality update"; "peer" => %peer_id, "error" => ?e, ); @@ -1344,7 +1348,7 @@ impl Worker { LightClientFinalityUpdateError::TooEarly => { debug!( self.log, - "LC finality update too early"; + "Light client finality update too early"; "peer" => %peer_id, "error" => ?e, ); @@ -1357,7 +1361,7 @@ impl Worker { } LightClientFinalityUpdateError::FinalityUpdateAlreadySeen => debug!( self.log, - "LC finality update already seen"; + "Light client finality update already seen"; "peer" => %peer_id, "error" => ?e, ), @@ -1366,7 +1370,7 @@ impl Worker { | LightClientFinalityUpdateError::SigSlotStartIsNone | LightClientFinalityUpdateError::FailedConstructingUpdate => debug!( self.log, - "LC error constructing finality update"; + "Light client error constructing finality update"; "peer" => %peer_id, "error" => ?e, ), @@ -1381,22 +1385,77 @@ impl Worker { message_id: MessageId, peer_id: PeerId, light_client_optimistic_update: LightClientOptimisticUpdate, + reprocess_tx: Option>>, seen_timestamp: Duration, ) { - match self - .chain - .verify_optimistic_update_for_gossip(light_client_optimistic_update, seen_timestamp) - { - Ok(_verified_light_client_optimistic_update) => { + match self.chain.verify_optimistic_update_for_gossip( + light_client_optimistic_update.clone(), + seen_timestamp, + ) { + Ok(verified_light_client_optimistic_update) => { + debug!( + self.log, + "Light client successful optimistic update"; + "peer" => %peer_id, + "parent_root" => %verified_light_client_optimistic_update.parent_root, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); } Err(e) => { - metrics::register_optimistic_update_error(&e); match e { + LightClientOptimisticUpdateError::UnknownBlockParentRoot(parent_root) => { + metrics::inc_counter( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_SENT_OPTIMISTIC_UPDATES, + ); + debug!( + self.log, + "Optimistic update for unknown block"; + "peer_id" => %peer_id, + "parent_root" => ?parent_root + ); + + if let Some(sender) = reprocess_tx { + let msg = ReprocessQueueMessage::UnknownLightClientOptimisticUpdate( + QueuedLightClientUpdate { + peer_id, + message_id, + light_client_optimistic_update: Box::new( + light_client_optimistic_update, + ), + parent_root, + seen_timestamp, + }, + ); + + if sender.try_send(msg).is_err() { + error!( + self.log, + "Failed to send optimistic update for re-processing"; + ) + } + } else { + debug!( + self.log, + "Not sending light client update because it had been reprocessed"; + "peer_id" => %peer_id, + "parent_root" => ?parent_root + ); + + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + return; + } LightClientOptimisticUpdateError::InvalidLightClientOptimisticUpdate => { + metrics::register_optimistic_update_error(&e); + debug!( self.log, - "LC invalid optimistic update"; + "Light client invalid optimistic update"; "peer" => %peer_id, "error" => ?e, ); @@ -1408,9 +1467,10 @@ impl Worker { ) } LightClientOptimisticUpdateError::TooEarly => { + metrics::register_optimistic_update_error(&e); debug!( self.log, - "LC optimistic update too early"; + "Light client optimistic update too early"; "peer" => %peer_id, "error" => ?e, ); @@ -1421,21 +1481,29 @@ impl Worker { "light_client_gossip_error", ); } - LightClientOptimisticUpdateError::OptimisticUpdateAlreadySeen => debug!( - self.log, - "LC optimistic update already seen"; - "peer" => %peer_id, - "error" => ?e, - ), + LightClientOptimisticUpdateError::OptimisticUpdateAlreadySeen => { + metrics::register_optimistic_update_error(&e); + + debug!( + self.log, + "Light client optimistic update already seen"; + "peer" => %peer_id, + "error" => ?e, + ) + } LightClientOptimisticUpdateError::BeaconChainError(_) | LightClientOptimisticUpdateError::LightClientUpdateError(_) | LightClientOptimisticUpdateError::SigSlotStartIsNone - | LightClientOptimisticUpdateError::FailedConstructingUpdate => debug!( - self.log, - "LC error constructing optimistic update"; - "peer" => %peer_id, - "error" => ?e, - ), + | LightClientOptimisticUpdateError::FailedConstructingUpdate => { + metrics::register_optimistic_update_error(&e); + + debug!( + self.log, + "Light client error constructing optimistic update"; + "peer" => %peer_id, + "error" => ?e, + ) + } } self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 1ec045e97eb..6e6e6815504 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -84,6 +84,7 @@ impl Worker { } }; let slot = block.slot(); + let parent_root = block.message().parent_root(); let result = self .chain .process_block( @@ -101,7 +102,10 @@ impl Worker { info!(self.log, "New RPC block received"; "slot" => slot, "hash" => %hash); // Trigger processing for work referencing this block. - let reprocess_msg = ReprocessQueueMessage::BlockImported(hash); + let reprocess_msg = ReprocessQueueMessage::BlockImported { + block_root: hash, + parent_root, + }; if reprocess_tx.try_send(reprocess_msg).is_err() { error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %hash) }; diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index baf00720b09..8dc76877a1b 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -370,6 +370,21 @@ lazy_static! { "Number of queued attestations where as matching block has been imported." ); + /* + * Light client update reprocessing queue metrics. + */ + pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_OPTIMISTIC_UPDATES: Result = try_create_int_counter( + "beacon_processor_reprocessing_queue_expired_optimistic_updates", + "Number of queued light client optimistic updates which have expired before a matching block has been found." + ); + pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_OPTIMISTIC_UPDATES: Result = try_create_int_counter( + "beacon_processor_reprocessing_queue_matched_optimistic_updates", + "Number of queued light client optimistic updates where as matching block has been imported." + ); + pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_SENT_OPTIMISTIC_UPDATES: Result = try_create_int_counter( + "beacon_processor_reprocessing_queue_sent_optimistic_updates", + "Number of queued light client optimistic updates where as matching block has been imported." + ); } pub fn update_bandwidth_metrics(bandwidth: Arc) { From e8d1dd4e7c33de5a5a24e87a50811c5dcd43c6bc Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Jan 2023 02:17:10 +0000 Subject: [PATCH 137/263] Fix docs for `oldest_block_slot` (#3911) ## Proposed Changes Update the docs to correct the description of `oldest_block_slot`. Credit to `laern` on Discord for noticing this. --- book/src/checkpoint-sync.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md index 893c545cb93..47dc03b20c4 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/checkpoint-sync.md @@ -97,7 +97,7 @@ You can opt-in to reconstructing all of the historic states by providing the The database keeps track of three markers to determine the availability of historic blocks and states: -* `oldest_block_slot`: All blocks with slots less than or equal to this value are available in the +* `oldest_block_slot`: All blocks with slots greater than or equal to this value are available in the database. Additionally, the genesis block is always available. * `state_lower_limit`: All states with slots _less than or equal to_ this value are available in the database. The minimum value is 0, indicating that the genesis state is always available. From 79a20e8a5fae598e0f832c8bc35756b1849cf21a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Jan 2023 15:46:47 +1100 Subject: [PATCH 138/263] Update sync rewards API for abstract exec payload --- beacon_node/beacon_chain/src/sync_committee_rewards.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/sync_committee_rewards.rs b/beacon_node/beacon_chain/src/sync_committee_rewards.rs index 561fed1a86a..2221aa1d5eb 100644 --- a/beacon_node/beacon_chain/src/sync_committee_rewards.rs +++ b/beacon_node/beacon_chain/src/sync_committee_rewards.rs @@ -6,10 +6,10 @@ use slog::error; use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards; use std::collections::HashMap; use store::RelativeEpoch; -use types::{BeaconBlockRef, BeaconState, ExecPayload}; +use types::{AbstractExecPayload, BeaconBlockRef, BeaconState}; impl BeaconChain { - pub fn compute_sync_committee_rewards>( + pub fn compute_sync_committee_rewards>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, state: &mut BeaconState, From e48487db01d128f50c3acf5444565b5b777aecd5 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Jan 2023 15:47:07 +1100 Subject: [PATCH 139/263] Fix the new BLS to execution change test --- Cargo.lock | 1 + beacon_node/beacon_chain/src/test_utils.rs | 16 +--- beacon_node/genesis/src/interop.rs | 91 ++++++++++--------- beacon_node/genesis/src/lib.rs | 3 +- beacon_node/http_api/Cargo.toml | 1 + beacon_node/http_api/tests/common.rs | 30 ++++-- beacon_node/http_api/tests/fork_tests.rs | 45 ++++++++- .../http_api/tests/interactive_tests.rs | 3 +- 8 files changed, 124 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2d3849cf66a..18426b9e5d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3215,6 +3215,7 @@ dependencies = [ "eth2_ssz", "execution_layer", "futures", + "genesis", "hex", "lazy_static", "lighthouse_metrics", diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 51a5b64159c..c5da518994c 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -172,17 +172,6 @@ impl Builder> { .clone() .expect("cannot build without validator keypairs"); - // For the interop genesis state we know that the withdrawal credentials are set equal - // to the validator keypairs. Check for any manually initialised credentials. - assert!( - self.withdrawal_keypairs.is_empty(), - "withdrawal credentials are ignored by fresh_ephemeral_store" - ); - self.withdrawal_keypairs = validator_keypairs - .iter() - .map(|kp| Some(kp.clone())) - .collect(); - let store = Arc::new( HotColdDB::open_ephemeral( self.store_config.clone().unwrap_or_default(), @@ -321,6 +310,11 @@ where self } + pub fn withdrawal_keypairs(mut self, withdrawal_keypairs: Vec>) -> Self { + self.withdrawal_keypairs = withdrawal_keypairs; + self + } + pub fn default_spec(self) -> Self { self.spec_or_default(None) } diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index f24e94d1baa..122ca8eda6b 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -10,7 +10,7 @@ use types::{ pub const DEFAULT_ETH1_BLOCK_HASH: &[u8] = &[0x42; 32]; -fn bls_withdrawal_credentials(pubkey: &PublicKey, spec: &ChainSpec) -> Hash256 { +pub fn bls_withdrawal_credentials(pubkey: &PublicKey, spec: &ChainSpec) -> Hash256 { let mut credentials = hash(&pubkey.as_ssz_bytes()); credentials[0] = spec.bls_withdrawal_prefix_byte; Hash256::from_slice(&credentials) @@ -35,42 +35,18 @@ pub fn interop_genesis_state( execution_payload_header: Option>, spec: &ChainSpec, ) -> Result, String> { - let eth1_timestamp = 2_u64.pow(40); - let amount = spec.max_effective_balance; - - let datas = keypairs - .into_par_iter() - .map(|keypair| { - let mut data = DepositData { - withdrawal_credentials: bls_withdrawal_credentials(&keypair.pk, spec), - pubkey: keypair.pk.clone().into(), - amount, - signature: Signature::empty().into(), - }; - - data.signature = data.create_signature(&keypair.sk, spec); - - data - }) + let withdrawal_credentials = keypairs + .iter() + .map(|keypair| bls_withdrawal_credentials(&keypair.pk, spec)) .collect::>(); - - let mut state = initialize_beacon_state_from_eth1( + interop_genesis_state_with_withdrawal_credentials::( + keypairs, + &withdrawal_credentials, + genesis_time, eth1_block_hash, - eth1_timestamp, - genesis_deposits(datas, spec)?, execution_payload_header, spec, ) - .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; - - *state.genesis_time_mut() = genesis_time; - - // Invalidate all the caches after all the manual state surgery. - state - .drop_all_caches() - .map_err(|e| format!("Unable to drop caches: {:?}", e))?; - - Ok(state) } // returns an interop genesis state except every other @@ -82,23 +58,52 @@ pub fn interop_genesis_state_with_eth1( execution_payload_header: Option>, spec: &ChainSpec, ) -> Result, String> { + let withdrawal_credentials = keypairs + .iter() + .enumerate() + .map(|(index, keypair)| { + if index % 2 == 0 { + bls_withdrawal_credentials(&keypair.pk, spec) + } else { + eth1_withdrawal_credentials(&keypair.pk, spec) + } + }) + .collect::>(); + interop_genesis_state_with_withdrawal_credentials::( + keypairs, + &withdrawal_credentials, + genesis_time, + eth1_block_hash, + execution_payload_header, + spec, + ) +} + +pub fn interop_genesis_state_with_withdrawal_credentials( + keypairs: &[Keypair], + withdrawal_credentials: &[Hash256], + genesis_time: u64, + eth1_block_hash: Hash256, + execution_payload_header: Option>, + spec: &ChainSpec, +) -> Result, String> { + if keypairs.len() != withdrawal_credentials.len() { + return Err(format!( + "wrong number of withdrawal credentials, expected: {}, got: {}", + keypairs.len(), + withdrawal_credentials.len() + )); + } + let eth1_timestamp = 2_u64.pow(40); let amount = spec.max_effective_balance; - let withdrawal_credentials = |index: usize, pubkey: &PublicKey| { - if index % 2 == 0 { - bls_withdrawal_credentials(pubkey, spec) - } else { - eth1_withdrawal_credentials(pubkey, spec) - } - }; - let datas = keypairs .into_par_iter() - .enumerate() - .map(|(index, keypair)| { + .zip(withdrawal_credentials.into_par_iter()) + .map(|(keypair, &withdrawal_credentials)| { let mut data = DepositData { - withdrawal_credentials: withdrawal_credentials(index, &keypair.pk), + withdrawal_credentials, pubkey: keypair.pk.clone().into(), amount, signature: Signature::empty().into(), diff --git a/beacon_node/genesis/src/lib.rs b/beacon_node/genesis/src/lib.rs index 4d5439ac1b3..3fb053bf880 100644 --- a/beacon_node/genesis/src/lib.rs +++ b/beacon_node/genesis/src/lib.rs @@ -6,6 +6,7 @@ pub use eth1::Config as Eth1Config; pub use eth1::Eth1Endpoint; pub use eth1_genesis_service::{Eth1GenesisService, Statistics}; pub use interop::{ - interop_genesis_state, interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH, + bls_withdrawal_credentials, interop_genesis_state, interop_genesis_state_with_eth1, + interop_genesis_state_with_withdrawal_credentials, DEFAULT_ETH1_BLOCK_HASH, }; pub use types::test_utils::generate_deterministic_keypairs; diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 077e3aa7cda..0dc918f425e 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -45,6 +45,7 @@ logging = { path = "../../common/logging" } serde_json = "1.0.58" proto_array = { path = "../../consensus/proto_array" } unused_port = {path = "../../common/unused_port"} +genesis = { path = "../genesis" } [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index 7c228d9803f..ee027357977 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -1,5 +1,7 @@ use beacon_chain::{ - test_utils::{BeaconChainHarness, BoxedMutator, EphemeralHarnessType}, + test_utils::{ + BeaconChainHarness, BoxedMutator, Builder as HarnessBuilder, EphemeralHarnessType, + }, BeaconChain, BeaconChainTypes, }; use directory::DEFAULT_ROOT_DIR; @@ -55,25 +57,39 @@ pub struct ApiServer> { pub external_peer_id: PeerId, } +type Initializer = Box< + dyn FnOnce(HarnessBuilder>) -> HarnessBuilder>, +>; type Mutator = BoxedMutator, MemoryStore>; impl InteractiveTester { pub async fn new(spec: Option, validator_count: usize) -> Self { - Self::new_with_mutator(spec, validator_count, None).await + Self::new_with_initializer_and_mutator(spec, validator_count, None, None).await } - pub async fn new_with_mutator( + pub async fn new_with_initializer_and_mutator( spec: Option, validator_count: usize, + initializer: Option>, mutator: Option>, ) -> Self { let mut harness_builder = BeaconChainHarness::builder(E::default()) .spec_or_default(spec) - .deterministic_keypairs(validator_count) .logger(test_logger()) - .mock_execution_layer() - .fresh_ephemeral_store(); - + .mock_execution_layer(); + + harness_builder = if let Some(initializer) = initializer { + // Apply custom initialization provided by the caller. + initializer(harness_builder) + } else { + // Apply default initial configuration. + harness_builder + .deterministic_keypairs(validator_count) + .fresh_ephemeral_store() + }; + + // Add a mutator for the beacon chain builder which will be called in + // `HarnessBuilder::build`. if let Some(mutator) = mutator { harness_builder = harness_builder.initial_mutator(mutator); } diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index eaaa4e86463..e61470fe959 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -1,8 +1,15 @@ //! Tests for API behaviour across fork boundaries. use crate::common::*; -use beacon_chain::{test_utils::RelativeSyncCommittee, StateSkipConfig}; +use beacon_chain::{ + test_utils::{RelativeSyncCommittee, DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME}, + StateSkipConfig, +}; use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; -use types::{Address, ChainSpec, Epoch, EthSpec, MinimalEthSpec, Slot}; +use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials}; +use types::{ + test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs}, + Address, ChainSpec, Epoch, EthSpec, Hash256, MinimalEthSpec, Slot, +}; type E = MinimalEthSpec; @@ -338,7 +345,39 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { let fork_epoch = Epoch::new(2); let spec = capella_spec(fork_epoch); let max_bls_to_execution_changes = E::max_bls_to_execution_changes(); - let tester = InteractiveTester::::new(Some(spec.clone()), validator_count).await; + + // Use a genesis state with entirely BLS withdrawal credentials. + // Offset keypairs by `validator_count` to create keys distinct from the signing keys. + let validator_keypairs = generate_deterministic_keypairs(validator_count); + let withdrawal_keypairs = (0..validator_count) + .map(|i| Some(generate_deterministic_keypair(i + validator_count))) + .collect::>(); + let withdrawal_credentials = withdrawal_keypairs + .iter() + .map(|keypair| bls_withdrawal_credentials(&keypair.as_ref().unwrap().pk, &spec)) + .collect::>(); + let genesis_state = interop_genesis_state_with_withdrawal_credentials( + &validator_keypairs, + &withdrawal_credentials, + HARNESS_GENESIS_TIME, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + None, + &spec, + ) + .unwrap(); + + let tester = InteractiveTester::::new_with_initializer_and_mutator( + Some(spec.clone()), + validator_count, + Some(Box::new(|harness_builder| { + harness_builder + .keypairs(validator_keypairs) + .withdrawal_keypairs(withdrawal_keypairs) + .genesis_state_ephemeral_store(genesis_state) + })), + None, + ) + .await; let harness = &tester.harness; let client = &tester.client; diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 04d527d531c..fdcc0d5fded 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -278,9 +278,10 @@ pub async fn proposer_boost_re_org_test( let num_empty_votes = Some(attesters_per_slot * percent_empty_votes / 100); let num_head_votes = Some(attesters_per_slot * percent_head_votes / 100); - let tester = InteractiveTester::::new_with_mutator( + let tester = InteractiveTester::::new_with_initializer_and_mutator( Some(spec), validator_count, + None, Some(Box::new(move |builder| { builder .proposer_re_org_threshold(Some(ReOrgThreshold(re_org_threshold))) From 16bdb2771b7ca16a84f90d4fdc4ca6b37288312a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Jan 2023 16:18:00 +1100 Subject: [PATCH 140/263] Update another test broken by the shuffling change --- beacon_node/http_api/tests/interactive_tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index fdcc0d5fded..7096fac4255 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -545,7 +545,7 @@ pub async fn proposer_boost_re_org_test( pub async fn fork_choice_before_proposal() { // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. - let validator_count = 32; + let validator_count = 64; let all_validators = (0..validator_count).collect::>(); let num_initial: u64 = 31; From 0866b739d0cb4b7974e70cbd5b67388f36cd1361 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 27 Jan 2023 09:48:42 +0000 Subject: [PATCH 141/263] Clippy 1.67 (#3916) ## Proposed Changes Clippy 1.67.0 put us on blast for the size of some of our errors, most of them written by me ( :eyes: ). This PR shrinks the size of `BeaconChainError` by dropping some extraneous info and boxing an inner error which should only occur infrequently anyway. For the `AttestationSlashInfo` and `BlockSlashInfo` I opted to ignore the lint as they are always used in a `Result` where `A` is a similar size. This means they don't bloat the size of the `Result`, so it's a bit annoying for Clippy to report this as an issue. I also chose to ignore `clippy::uninlined-format-args` because I think the benefit-to-churn ratio is too low. E.g. sometimes we have long identifiers in `format!` args and IMO the non-inlined form is easier to read: ```rust // I prefer this... format!( "{} did {} to {}", REALLY_LONG_CONSTANT_NAME, ANOTHER_REALLY_LONG_CONSTANT_NAME, regular_long_identifier_name ); // To this format!("{REALLY_LONG_CONSTANT_NAME} did {ANOTHER_REALLY_LONG_CONSTANT_NAME} to {regular_long_identifier_name}"); ``` I tried generating an automatic diff with `cargo clippy --fix` but it came out at: ``` 250 files changed, 1209 insertions(+), 1469 deletions(-) ``` Which seems like a bad idea when we'd have to back-merge it to `capella` and `eip4844` :scream: --- Makefile | 3 ++- .../beacon_chain/src/attestation_verification.rs | 5 +++++ beacon_node/beacon_chain/src/beacon_chain.rs | 6 +++--- beacon_node/beacon_chain/src/block_verification.rs | 5 +++++ beacon_node/beacon_chain/src/errors.rs | 4 +--- beacon_node/beacon_chain/src/fork_choice_signal.rs | 4 ++-- common/compare_fields/src/lib.rs | 10 +++------- common/compare_fields_derive/src/lib.rs | 2 +- .../src/per_block_processing/process_operations.rs | 8 ++++---- 9 files changed, 26 insertions(+), 21 deletions(-) diff --git a/Makefile b/Makefile index 68ada1b4b94..ebad9b63f8d 100644 --- a/Makefile +++ b/Makefile @@ -164,7 +164,8 @@ lint: -A clippy::from-over-into \ -A clippy::upper-case-acronyms \ -A clippy::vec-init-then-push \ - -A clippy::question-mark + -A clippy::question-mark \ + -A clippy::uninlined-format-args nightly-lint: cp .github/custom/clippy.toml . diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index b60ce7efe5c..04f601fad97 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -27,6 +27,11 @@ //! ▼ //! impl VerifiedAttestation //! ``` + +// Ignore this lint for `AttestationSlashInfo` which is of comparable size to the non-error types it +// is returned alongside. +#![allow(clippy::result_large_err)] + mod batch; use crate::{ diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 55d6ae29efb..3366e1364cf 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -975,7 +975,9 @@ impl BeaconChain { .ok_or(Error::ExecutionLayerMissing)? .get_payload_by_block_hash(exec_block_hash) .await - .map_err(|e| Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, e))? + .map_err(|e| { + Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, Box::new(e)) + })? .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; // Verify payload integrity. @@ -992,8 +994,6 @@ impl BeaconChain { return Err(Error::InconsistentPayloadReconstructed { slot: blinded_block.slot(), exec_block_hash, - canonical_payload_root: execution_payload_header.tree_hash_root(), - reconstructed_payload_root: header_from_payload.tree_hash_root(), canonical_transactions_root: execution_payload_header.transactions_root, reconstructed_transactions_root: header_from_payload.transactions_root, }); diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index ab317e96b96..ad08bd9f4f3 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -42,6 +42,11 @@ //! END //! //! ``` + +// Ignore this lint for `BlockSlashInfo` which is of comparable size to the non-error types it is +// returned alongside. +#![allow(clippy::result_large_err)] + use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 24ea07833d9..788369e55e7 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -141,13 +141,11 @@ pub enum BeaconChainError { BuilderMissing, ExecutionLayerMissing, BlockVariantLacksExecutionPayload(Hash256), - ExecutionLayerErrorPayloadReconstruction(ExecutionBlockHash, execution_layer::Error), + ExecutionLayerErrorPayloadReconstruction(ExecutionBlockHash, Box), BlockHashMissingFromExecutionLayer(ExecutionBlockHash), InconsistentPayloadReconstructed { slot: Slot, exec_block_hash: ExecutionBlockHash, - canonical_payload_root: Hash256, - reconstructed_payload_root: Hash256, canonical_transactions_root: Hash256, reconstructed_transactions_root: Hash256, }, diff --git a/beacon_node/beacon_chain/src/fork_choice_signal.rs b/beacon_node/beacon_chain/src/fork_choice_signal.rs index fd92de661da..f5424d417eb 100644 --- a/beacon_node/beacon_chain/src/fork_choice_signal.rs +++ b/beacon_node/beacon_chain/src/fork_choice_signal.rs @@ -43,7 +43,7 @@ impl ForkChoiceSignalTx { /// /// Return an error if the provided `slot` is strictly less than any previously provided slot. pub fn notify_fork_choice_complete(&self, slot: Slot) -> Result<(), BeaconChainError> { - let &(ref lock, ref condvar) = &*self.pair; + let (lock, condvar) = &*self.pair; let mut current_slot = lock.lock(); @@ -72,7 +72,7 @@ impl Default for ForkChoiceSignalTx { impl ForkChoiceSignalRx { pub fn wait_for_fork_choice(&self, slot: Slot, timeout: Duration) -> ForkChoiceWaitResult { - let &(ref lock, ref condvar) = &*self.pair; + let (lock, condvar) = &*self.pair; let mut current_slot = lock.lock(); diff --git a/common/compare_fields/src/lib.rs b/common/compare_fields/src/lib.rs index a0166eb500a..bc2f5446ad2 100644 --- a/common/compare_fields/src/lib.rs +++ b/common/compare_fields/src/lib.rs @@ -115,11 +115,7 @@ impl Comparison { let mut children = vec![]; for i in 0..std::cmp::max(a.len(), b.len()) { - children.push(FieldComparison::new( - format!("{:}", i), - &a.get(i), - &b.get(i), - )); + children.push(FieldComparison::new(format!("{i}"), &a.get(i), &b.get(i))); } Self::parent(field_name, a == b, children) @@ -164,8 +160,8 @@ impl FieldComparison { Self { field_name, equal: a == b, - a: format!("{:?}", a), - b: format!("{:?}", b), + a: format!("{a:?}"), + b: format!("{b:?}"), } } diff --git a/common/compare_fields_derive/src/lib.rs b/common/compare_fields_derive/src/lib.rs index beabc6ca9ba..752c09ee056 100644 --- a/common/compare_fields_derive/src/lib.rs +++ b/common/compare_fields_derive/src/lib.rs @@ -32,7 +32,7 @@ pub fn compare_fields_derive(input: TokenStream) -> TokenStream { _ => panic!("compare_fields_derive only supports named struct fields."), }; - let field_name = format!("{:}", ident_a); + let field_name = ident_a.to_string(); let ident_b = ident_a.clone(); let quote = if is_slice(field) { diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 9f27c4c9a1e..9aa1e6d376c 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -9,9 +9,9 @@ use crate::VerifySignatures; use safe_arith::SafeArith; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; -pub fn process_operations<'a, T: EthSpec, Payload: ExecPayload>( +pub fn process_operations>( state: &mut BeaconState, - block_body: BeaconBlockBodyRef<'a, T, Payload>, + block_body: BeaconBlockBodyRef, verify_signatures: VerifySignatures, ctxt: &mut ConsensusContext, spec: &ChainSpec, @@ -232,9 +232,9 @@ pub fn process_attester_slashings( } /// Wrapper function to handle calling the correct version of `process_attestations` based on /// the fork. -pub fn process_attestations<'a, T: EthSpec, Payload: ExecPayload>( +pub fn process_attestations>( state: &mut BeaconState, - block_body: BeaconBlockBodyRef<'a, T, Payload>, + block_body: BeaconBlockBodyRef, verify_signatures: VerifySignatures, ctxt: &mut ConsensusContext, spec: &ChainSpec, From 7b7595347d110ebf2e6b6b04bece048ef1424fe5 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Tue, 31 Jan 2023 11:26:23 -0600 Subject: [PATCH 142/263] exchangeCapabilities & Capella Readiness Logging (#3918) * Undo Passing Spec to Engine API * Utilize engine_exchangeCapabilities * Add Logging to Indicate Capella Readiness * Add exchangeCapabilities to mock_execution_layer * Send Nested Array for engine_exchangeCapabilities * Use Mutex Instead of RwLock for EngineCapabilities * Improve Locking to Avoid Deadlock * Prettier logic for get_engine_capabilities * Improve Comments * Update beacon_node/beacon_chain/src/capella_readiness.rs Co-authored-by: Michael Sproul * Update beacon_node/beacon_chain/src/capella_readiness.rs Co-authored-by: Michael Sproul * Update beacon_node/beacon_chain/src/capella_readiness.rs Co-authored-by: Michael Sproul * Update beacon_node/beacon_chain/src/capella_readiness.rs Co-authored-by: Michael Sproul * Update beacon_node/beacon_chain/src/capella_readiness.rs Co-authored-by: Michael Sproul * Update beacon_node/client/src/notifier.rs Co-authored-by: Michael Sproul * Update beacon_node/execution_layer/src/engine_api/http.rs Co-authored-by: Michael Sproul * Addressed Michael's Comments --------- Co-authored-by: Michael Sproul --- .../beacon_chain/src/capella_readiness.rs | 135 +++++++++++ beacon_node/beacon_chain/src/lib.rs | 1 + .../beacon_chain/src/merge_readiness.rs | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 2 - beacon_node/client/src/builder.rs | 1 - beacon_node/client/src/notifier.rs | 71 +++++- beacon_node/eth1/src/inner.rs | 2 +- beacon_node/eth1/src/service.rs | 17 +- beacon_node/eth1/tests/test.rs | 14 +- beacon_node/execution_layer/src/engine_api.rs | 39 +++- .../execution_layer/src/engine_api/http.rs | 215 ++++++++++++------ beacon_node/execution_layer/src/engines.rs | 61 ++++- beacon_node/execution_layer/src/lib.rs | 33 ++- .../src/test_utils/handle_rpc.rs | 168 +++++++++----- .../src/test_utils/mock_builder.rs | 3 +- .../src/test_utils/mock_execution_layer.rs | 5 +- .../execution_layer/src/test_utils/mod.rs | 20 +- .../src/test_rig.rs | 4 +- 18 files changed, 601 insertions(+), 192 deletions(-) create mode 100644 beacon_node/beacon_chain/src/capella_readiness.rs diff --git a/beacon_node/beacon_chain/src/capella_readiness.rs b/beacon_node/beacon_chain/src/capella_readiness.rs new file mode 100644 index 00000000000..b1563210585 --- /dev/null +++ b/beacon_node/beacon_chain/src/capella_readiness.rs @@ -0,0 +1,135 @@ +//! Provides tools for checking if a node is ready for the Capella upgrade and following merge +//! transition. + +use crate::{BeaconChain, BeaconChainTypes}; +use execution_layer::http::{ + ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V2, +}; +use serde::{Deserialize, Serialize}; +use std::fmt; +use std::time::Duration; +use types::*; + +/// The time before the Capella fork when we will start issuing warnings about preparation. +use super::merge_readiness::SECONDS_IN_A_WEEK; +pub const CAPELLA_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; +pub const ENGINE_CAPABILITIES_REFRESH_INTERVAL: u64 = 300; + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +#[serde(tag = "type")] +pub enum CapellaReadiness { + /// The execution engine is capella-enabled (as far as we can tell) + Ready, + /// The EL can be reached and has the correct configuration, however it's not yet synced. + NotSynced, + /// We are connected to an execution engine which doesn't support the V2 engine api methods + V2MethodsNotSupported { error: String }, + /// The transition configuration with the EL failed, there might be a problem with + /// connectivity, authentication or a difference in configuration. + ExchangeCapabilitiesFailed { error: String }, + /// The user has not configured an execution endpoint + NoExecutionEndpoint, +} + +impl fmt::Display for CapellaReadiness { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + CapellaReadiness::Ready => { + write!(f, "This node appears ready for Capella.") + } + CapellaReadiness::ExchangeCapabilitiesFailed { error } => write!( + f, + "Could not exchange capabilities with the \ + execution endpoint: {}", + error + ), + CapellaReadiness::NotSynced => write!( + f, + "The execution endpoint is connected and configured, \ + however it is not yet synced" + ), + CapellaReadiness::NoExecutionEndpoint => write!( + f, + "The --execution-endpoint flag is not specified, this is a \ + requirement post-merge" + ), + CapellaReadiness::V2MethodsNotSupported { error } => write!( + f, + "The execution endpoint does not appear to support \ + the required engine api methods for Capella: {}", + error + ), + } + } +} + +impl BeaconChain { + /// Returns `true` if capella epoch is set and Capella fork has occurred or will + /// occur within `CAPELLA_READINESS_PREPARATION_SECONDS` + pub fn is_time_to_prepare_for_capella(&self, current_slot: Slot) -> bool { + if let Some(capella_epoch) = self.spec.capella_fork_epoch { + let capella_slot = capella_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let capella_readiness_preparation_slots = + CAPELLA_READINESS_PREPARATION_SECONDS / self.spec.seconds_per_slot; + // Return `true` if Capella has happened or is within the preparation time. + current_slot + capella_readiness_preparation_slots > capella_slot + } else { + // The Capella fork epoch has not been defined yet, no need to prepare. + false + } + } + + /// Attempts to connect to the EL and confirm that it is ready for capella. + pub async fn check_capella_readiness(&self) -> CapellaReadiness { + if let Some(el) = self.execution_layer.as_ref() { + match el + .get_engine_capabilities(Some(Duration::from_secs( + ENGINE_CAPABILITIES_REFRESH_INTERVAL, + ))) + .await + { + Err(e) => { + // The EL was either unreachable or responded with an error + CapellaReadiness::ExchangeCapabilitiesFailed { + error: format!("{:?}", e), + } + } + Ok(capabilities) => { + let mut missing_methods = String::from("Required Methods Unsupported:"); + let mut all_good = true; + if !capabilities.get_payload_v2 { + missing_methods.push(' '); + missing_methods.push_str(ENGINE_GET_PAYLOAD_V2); + all_good = false; + } + if !capabilities.forkchoice_updated_v2 { + missing_methods.push(' '); + missing_methods.push_str(ENGINE_FORKCHOICE_UPDATED_V2); + all_good = false; + } + if !capabilities.new_payload_v2 { + missing_methods.push(' '); + missing_methods.push_str(ENGINE_NEW_PAYLOAD_V2); + all_good = false; + } + + if all_good { + if !el.is_synced_for_notifier().await { + // The EL is not synced. + CapellaReadiness::NotSynced + } else { + CapellaReadiness::Ready + } + } else { + CapellaReadiness::V2MethodsNotSupported { + error: missing_methods, + } + } + } + } + } else { + CapellaReadiness::NoExecutionEndpoint + } + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index c17b48517cd..2444c144f3d 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -11,6 +11,7 @@ mod block_times_cache; mod block_verification; pub mod builder; pub mod canonical_head; +pub mod capella_readiness; pub mod chain_config; mod early_attester_cache; mod errors; diff --git a/beacon_node/beacon_chain/src/merge_readiness.rs b/beacon_node/beacon_chain/src/merge_readiness.rs index 4ef2102fd51..c66df39eedf 100644 --- a/beacon_node/beacon_chain/src/merge_readiness.rs +++ b/beacon_node/beacon_chain/src/merge_readiness.rs @@ -8,7 +8,7 @@ use std::fmt::Write; use types::*; /// The time before the Bellatrix fork when we will start issuing warnings about preparation. -const SECONDS_IN_A_WEEK: u64 = 604800; +pub const SECONDS_IN_A_WEEK: u64 = 604800; pub const MERGE_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; #[derive(Default, Debug, Serialize, Deserialize)] diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index c5da518994c..875ff845aff 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -375,7 +375,6 @@ where .collect::>() .unwrap(); - let spec = MainnetEthSpec::default_spec(); let config = execution_layer::Config { execution_endpoints: urls, secret_files: vec![], @@ -386,7 +385,6 @@ where config, self.runtime.task_executor.clone(), self.log.clone(), - &spec, ) .unwrap(); diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index b19b636c7d9..3b016ebda9c 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -154,7 +154,6 @@ where config, context.executor.clone(), context.log().clone(), - &spec, ) .map_err(|e| format!("unable to start execution layer endpoints: {:?}", e))?; Some(execution_layer) diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 1da7a79707d..c1d830bc089 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,5 +1,6 @@ use crate::metrics; use beacon_chain::{ + capella_readiness::CapellaReadiness, merge_readiness::{MergeConfig, MergeReadiness}, BeaconChain, BeaconChainTypes, ExecutionStatus, }; @@ -313,6 +314,7 @@ pub fn spawn_notifier( eth1_logging(&beacon_chain, &log); merge_readiness_logging(current_slot, &beacon_chain, &log).await; + capella_readiness_logging(current_slot, &beacon_chain, &log).await; } }; @@ -350,12 +352,15 @@ async fn merge_readiness_logging( } if merge_completed && !has_execution_layer { - error!( - log, - "Execution endpoint required"; - "info" => "you need an execution engine to validate blocks, see: \ - https://lighthouse-book.sigmaprime.io/merge-migration.html" - ); + if !beacon_chain.is_time_to_prepare_for_capella(current_slot) { + // logging of the EE being offline is handled in `capella_readiness_logging()` + error!( + log, + "Execution endpoint required"; + "info" => "you need an execution engine to validate blocks, see: \ + https://lighthouse-book.sigmaprime.io/merge-migration.html" + ); + } return; } @@ -419,6 +424,60 @@ async fn merge_readiness_logging( } } +/// Provides some helpful logging to users to indicate if their node is ready for Capella +async fn capella_readiness_logging( + current_slot: Slot, + beacon_chain: &BeaconChain, + log: &Logger, +) { + let capella_completed = beacon_chain + .canonical_head + .cached_head() + .snapshot + .beacon_block + .message() + .body() + .execution_payload() + .map_or(false, |payload| payload.withdrawals_root().is_ok()); + + let has_execution_layer = beacon_chain.execution_layer.is_some(); + + if capella_completed && has_execution_layer + || !beacon_chain.is_time_to_prepare_for_capella(current_slot) + { + return; + } + + if capella_completed && !has_execution_layer { + error!( + log, + "Execution endpoint required"; + "info" => "you need a Capella enabled execution engine to validate blocks, see: \ + https://lighthouse-book.sigmaprime.io/merge-migration.html" + ); + return; + } + + match beacon_chain.check_capella_readiness().await { + CapellaReadiness::Ready => { + info!(log, "Ready for Capella") + } + readiness @ CapellaReadiness::ExchangeCapabilitiesFailed { error: _ } => { + error!( + log, + "Not ready for Capella"; + "info" => %readiness, + "hint" => "try updating Lighthouse and/or the execution layer", + ) + } + readiness => warn!( + log, + "Not ready for Capella"; + "info" => %readiness, + ), + } +} + fn eth1_logging(beacon_chain: &BeaconChain, log: &Logger) { let current_slot_opt = beacon_chain.slot().ok(); diff --git a/beacon_node/eth1/src/inner.rs b/beacon_node/eth1/src/inner.rs index a44b31050fe..0468a02d2e3 100644 --- a/beacon_node/eth1/src/inner.rs +++ b/beacon_node/eth1/src/inner.rs @@ -122,7 +122,7 @@ impl SszEth1Cache { cache: self.deposit_cache.to_deposit_cache()?, last_processed_block: self.last_processed_block, }), - endpoint: endpoint_from_config(&config, &spec) + endpoint: endpoint_from_config(&config) .map_err(|e| format!("Failed to create endpoint: {:?}", e))?, to_finalize: RwLock::new(None), // Set the remote head_block zero when creating a new instance. We only care about diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 56c2411ba18..31082394baf 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -363,7 +363,7 @@ impl Default for Config { } } -pub fn endpoint_from_config(config: &Config, spec: &ChainSpec) -> Result { +pub fn endpoint_from_config(config: &Config) -> Result { match config.endpoint.clone() { Eth1Endpoint::Auth { endpoint, @@ -373,16 +373,11 @@ pub fn endpoint_from_config(config: &Config, spec: &ChainSpec) -> Result { let auth = Auth::new_with_path(jwt_path, jwt_id, jwt_version) .map_err(|e| format!("Failed to initialize jwt auth: {:?}", e))?; - HttpJsonRpc::new_with_auth( - endpoint, - auth, - Some(config.execution_timeout_multiplier), - spec, - ) - .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)) + HttpJsonRpc::new_with_auth(endpoint, auth, Some(config.execution_timeout_multiplier)) + .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)) } Eth1Endpoint::NoAuth(endpoint) => { - HttpJsonRpc::new(endpoint, Some(config.execution_timeout_multiplier), spec) + HttpJsonRpc::new(endpoint, Some(config.execution_timeout_multiplier)) .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)) } } @@ -409,7 +404,7 @@ impl Service { deposit_cache: RwLock::new(DepositUpdater::new( config.deposit_contract_deploy_block, )), - endpoint: endpoint_from_config(&config, &spec)?, + endpoint: endpoint_from_config(&config)?, to_finalize: RwLock::new(None), remote_head_block: RwLock::new(None), config: RwLock::new(config), @@ -438,7 +433,7 @@ impl Service { inner: Arc::new(Inner { block_cache: <_>::default(), deposit_cache: RwLock::new(deposit_cache), - endpoint: endpoint_from_config(&config, &spec) + endpoint: endpoint_from_config(&config) .map_err(Error::FailedToInitializeFromSnapshot)?, to_finalize: RwLock::new(None), remote_head_block: RwLock::new(None), diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index eb0d2371cb0..cd680478cc5 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -494,8 +494,7 @@ mod deposit_tree { let mut deposit_counts = vec![]; let client = - HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None, spec) - .unwrap(); + HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); // Perform deposits to the smart contract, recording it's state along the way. for deposit in &deposits { @@ -599,12 +598,8 @@ mod http { .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; let web3 = eth1.web3(); - let client = HttpJsonRpc::new( - SensitiveUrl::parse(ð1.endpoint()).unwrap(), - None, - &MainnetEthSpec::default_spec(), - ) - .unwrap(); + let client = + HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); let block_number = get_block_number(&web3).await; let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await; @@ -720,8 +715,7 @@ mod fast { ) .unwrap(); let client = - HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None, &spec) - .unwrap(); + HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); let n = 10; let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); for deposit in &deposits { diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index afc5cffe2fb..da5e991b090 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,4 +1,9 @@ use crate::engines::ForkchoiceState; +use crate::http::{ + ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, ENGINE_FORKCHOICE_UPDATED_V1, + ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, + ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, +}; pub use ethers_core::types::Transaction; use ethers_core::utils::rlp::{self, Decodable, Rlp}; use http::deposit_methods::RpcError; @@ -347,11 +352,8 @@ impl GetPayloadResponse { } } -// This name is work in progress, it could -// change when this method is actually proposed -// but I'm writing this as it has been described #[derive(Clone, Copy, Debug)] -pub struct SupportedApis { +pub struct EngineCapabilities { pub new_payload_v1: bool, pub new_payload_v2: bool, pub forkchoice_updated_v1: bool, @@ -360,3 +362,32 @@ pub struct SupportedApis { pub get_payload_v2: bool, pub exchange_transition_configuration_v1: bool, } + +impl EngineCapabilities { + pub fn to_response(&self) -> Vec<&str> { + let mut response = Vec::new(); + if self.new_payload_v1 { + response.push(ENGINE_NEW_PAYLOAD_V1); + } + if self.new_payload_v2 { + response.push(ENGINE_NEW_PAYLOAD_V2); + } + if self.forkchoice_updated_v1 { + response.push(ENGINE_FORKCHOICE_UPDATED_V1); + } + if self.forkchoice_updated_v2 { + response.push(ENGINE_FORKCHOICE_UPDATED_V2); + } + if self.get_payload_v1 { + response.push(ENGINE_GET_PAYLOAD_V1); + } + if self.get_payload_v2 { + response.push(ENGINE_GET_PAYLOAD_V2); + } + if self.exchange_transition_configuration_v1 { + response.push(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1); + } + + response + } +} diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 60725192b71..d1faab42c97 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -7,10 +7,11 @@ use reqwest::header::CONTENT_TYPE; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde_json::json; -use tokio::sync::RwLock; +use std::collections::HashSet; +use tokio::sync::Mutex; -use std::time::Duration; -use types::{ChainSpec, EthSpec}; +use std::time::{Duration, SystemTime}; +use types::EthSpec; pub use deposit_log::{DepositLog, Log}; pub use reqwest::Client; @@ -48,8 +49,37 @@ pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1: &str = "engine_exchangeTransitionConfigurationV1"; pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = Duration::from_secs(1); +pub const ENGINE_EXCHANGE_CAPABILITIES: &str = "engine_exchangeCapabilities"; +pub const ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT: Duration = Duration::from_secs(1); + /// This error is returned during a `chainId` call by Geth. pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; +/// This code is returned by all clients when a method is not supported +/// (verified geth, nethermind, erigon, besu) +pub const METHOD_NOT_FOUND_CODE: i64 = -32601; + +pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ + ENGINE_NEW_PAYLOAD_V1, + ENGINE_NEW_PAYLOAD_V2, + ENGINE_GET_PAYLOAD_V1, + ENGINE_GET_PAYLOAD_V2, + ENGINE_FORKCHOICE_UPDATED_V1, + ENGINE_FORKCHOICE_UPDATED_V2, + ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, +]; + +/// This is necessary because a user might run a capella-enabled version of +/// lighthouse before they update to a capella-enabled execution engine. +// TODO (mark): rip this out once we are post-capella on mainnet +pub static PRE_CAPELLA_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { + new_payload_v1: true, + new_payload_v2: false, + forkchoice_updated_v1: true, + forkchoice_updated_v2: false, + get_payload_v1: true, + get_payload_v2: false, + exchange_transition_configuration_v1: true, +}; /// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object. pub mod deposit_log { @@ -526,11 +556,47 @@ pub mod deposit_methods { } } +#[derive(Clone, Debug)] +pub struct CapabilitiesCacheEntry { + engine_capabilities: EngineCapabilities, + fetch_time: SystemTime, +} + +impl CapabilitiesCacheEntry { + pub fn new(engine_capabilities: EngineCapabilities) -> Self { + Self { + engine_capabilities, + fetch_time: SystemTime::now(), + } + } + + pub fn engine_capabilities(&self) -> &EngineCapabilities { + &self.engine_capabilities + } + + pub fn age(&self) -> Duration { + // duration_since() may fail because measurements taken earlier + // are not guaranteed to always be before later measurements + // due to anomalies such as the system clock being adjusted + // either forwards or backwards + // + // In such cases, we'll just say the age is zero + SystemTime::now() + .duration_since(self.fetch_time) + .unwrap_or(Duration::ZERO) + } + + /// returns `true` if the entry's age is >= age_limit + pub fn older_than(&self, age_limit: Option) -> bool { + age_limit.map_or(false, |limit| self.age() >= limit) + } +} + pub struct HttpJsonRpc { pub client: Client, pub url: SensitiveUrl, pub execution_timeout_multiplier: u32, - pub cached_supported_apis: RwLock>, + pub engine_capabilities_cache: Mutex>, auth: Option, } @@ -538,27 +604,12 @@ impl HttpJsonRpc { pub fn new( url: SensitiveUrl, execution_timeout_multiplier: Option, - spec: &ChainSpec, ) -> Result { - // FIXME: remove this `cached_supported_apis` spec hack once the `engine_getCapabilities` - // method is implemented in all execution clients: - // https://github.com/ethereum/execution-apis/issues/321 - let cached_supported_apis = RwLock::new(Some(SupportedApis { - new_payload_v1: true, - new_payload_v2: spec.capella_fork_epoch.is_some() || spec.eip4844_fork_epoch.is_some(), - forkchoice_updated_v1: true, - forkchoice_updated_v2: spec.capella_fork_epoch.is_some() - || spec.eip4844_fork_epoch.is_some(), - get_payload_v1: true, - get_payload_v2: spec.capella_fork_epoch.is_some() || spec.eip4844_fork_epoch.is_some(), - exchange_transition_configuration_v1: true, - })); - Ok(Self { client: Client::builder().build()?, url, execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), - cached_supported_apis, + engine_capabilities_cache: Mutex::new(None), auth: None, }) } @@ -567,27 +618,12 @@ impl HttpJsonRpc { url: SensitiveUrl, auth: Auth, execution_timeout_multiplier: Option, - spec: &ChainSpec, ) -> Result { - // FIXME: remove this `cached_supported_apis` spec hack once the `engine_getCapabilities` - // method is implemented in all execution clients: - // https://github.com/ethereum/execution-apis/issues/321 - let cached_supported_apis = RwLock::new(Some(SupportedApis { - new_payload_v1: true, - new_payload_v2: spec.capella_fork_epoch.is_some() || spec.eip4844_fork_epoch.is_some(), - forkchoice_updated_v1: true, - forkchoice_updated_v2: spec.capella_fork_epoch.is_some() - || spec.eip4844_fork_epoch.is_some(), - get_payload_v1: true, - get_payload_v2: spec.capella_fork_epoch.is_some() || spec.eip4844_fork_epoch.is_some(), - exchange_transition_configuration_v1: true, - })); - Ok(Self { client: Client::builder().build()?, url, execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), - cached_supported_apis, + engine_capabilities_cache: Mutex::new(None), auth: Some(auth), }) } @@ -893,35 +929,67 @@ impl HttpJsonRpc { Ok(response) } - // TODO: This is currently a stub for the `engine_getCapabilities` - // method. This stub is unused because we set cached_supported_apis - // in the constructor based on the `spec` - // Implement this once the execution clients support it - // https://github.com/ethereum/execution-apis/issues/321 - pub async fn get_capabilities(&self) -> Result { - Ok(SupportedApis { - new_payload_v1: true, - new_payload_v2: true, - forkchoice_updated_v1: true, - forkchoice_updated_v2: true, - get_payload_v1: true, - get_payload_v2: true, - exchange_transition_configuration_v1: true, - }) + pub async fn exchange_capabilities(&self) -> Result { + let params = json!([LIGHTHOUSE_CAPABILITIES]); + + let response: Result, _> = self + .rpc_request( + ENGINE_EXCHANGE_CAPABILITIES, + params, + ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT * self.execution_timeout_multiplier, + ) + .await; + + match response { + // TODO (mark): rip this out once we are post capella on mainnet + Err(error) => match error { + Error::ServerMessage { code, message: _ } if code == METHOD_NOT_FOUND_CODE => { + Ok(PRE_CAPELLA_ENGINE_CAPABILITIES) + } + _ => Err(error), + }, + Ok(capabilities) => Ok(EngineCapabilities { + new_payload_v1: capabilities.contains(ENGINE_NEW_PAYLOAD_V1), + new_payload_v2: capabilities.contains(ENGINE_NEW_PAYLOAD_V2), + forkchoice_updated_v1: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V1), + forkchoice_updated_v2: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V2), + get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), + get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), + exchange_transition_configuration_v1: capabilities + .contains(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1), + }), + } } - pub async fn set_cached_supported_apis(&self, supported_apis: Option) { - *self.cached_supported_apis.write().await = supported_apis; + pub async fn clear_exchange_capabilties_cache(&self) { + *self.engine_capabilities_cache.lock().await = None; } - pub async fn get_cached_supported_apis(&self) -> Result { - let cached_opt = *self.cached_supported_apis.read().await; - if let Some(supported_apis) = cached_opt { - Ok(supported_apis) + /// Returns the execution engine capabilities resulting from a call to + /// engine_exchangeCapabilities. If the capabilities cache is not populated, + /// or if it is populated with a cached result of age >= `age_limit`, this + /// method will fetch the result from the execution engine and populate the + /// cache before returning it. Otherwise it will return a cached result from + /// a previous call. + /// + /// Set `age_limit` to `None` to always return the cached result + /// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE + pub async fn get_engine_capabilities( + &self, + age_limit: Option, + ) -> Result { + let mut lock = self.engine_capabilities_cache.lock().await; + + if lock + .as_ref() + .map_or(true, |entry| entry.older_than(age_limit)) + { + let engine_capabilities = self.exchange_capabilities().await?; + *lock = Some(CapabilitiesCacheEntry::new(engine_capabilities)); + Ok(engine_capabilities) } else { - let supported_apis = self.get_capabilities().await?; - self.set_cached_supported_apis(Some(supported_apis)).await; - Ok(supported_apis) + // here entry is guaranteed to exist so unwrap() is safe + Ok(*lock.as_ref().unwrap().engine_capabilities()) } } @@ -931,10 +999,10 @@ impl HttpJsonRpc { &self, execution_payload: ExecutionPayload, ) -> Result { - let supported_apis = self.get_cached_supported_apis().await?; - if supported_apis.new_payload_v2 { + let engine_capabilities = self.get_engine_capabilities(None).await?; + if engine_capabilities.new_payload_v2 { self.new_payload_v2(execution_payload).await - } else if supported_apis.new_payload_v1 { + } else if engine_capabilities.new_payload_v1 { self.new_payload_v1(execution_payload).await } else { Err(Error::RequiredMethodUnsupported("engine_newPayload")) @@ -948,8 +1016,8 @@ impl HttpJsonRpc { fork_name: ForkName, payload_id: PayloadId, ) -> Result, Error> { - let supported_apis = self.get_cached_supported_apis().await?; - if supported_apis.get_payload_v2 { + let engine_capabilities = self.get_engine_capabilities(None).await?; + if engine_capabilities.get_payload_v2 { // TODO: modify this method to return GetPayloadResponse instead // of throwing away the `block_value` and returning only the // ExecutionPayload @@ -957,7 +1025,7 @@ impl HttpJsonRpc { .get_payload_v2(fork_name, payload_id) .await? .execution_payload()) - } else if supported_apis.new_payload_v1 { + } else if engine_capabilities.new_payload_v1 { self.get_payload_v1(payload_id).await } else { Err(Error::RequiredMethodUnsupported("engine_getPayload")) @@ -971,11 +1039,11 @@ impl HttpJsonRpc { forkchoice_state: ForkchoiceState, payload_attributes: Option, ) -> Result { - let supported_apis = self.get_cached_supported_apis().await?; - if supported_apis.forkchoice_updated_v2 { + let engine_capabilities = self.get_engine_capabilities(None).await?; + if engine_capabilities.forkchoice_updated_v2 { self.forkchoice_updated_v2(forkchoice_state, payload_attributes) .await - } else if supported_apis.forkchoice_updated_v1 { + } else if engine_capabilities.forkchoice_updated_v1 { self.forkchoice_updated_v1(forkchoice_state, payload_attributes) .await } else { @@ -1003,7 +1071,6 @@ mod test { impl Tester { pub fn new(with_auth: bool) -> Self { let server = MockServer::unit_testing(); - let spec = MainnetEthSpec::default_spec(); let rpc_url = SensitiveUrl::parse(&server.url()).unwrap(); let echo_url = SensitiveUrl::parse(&format!("{}/echo", server.url())).unwrap(); @@ -1014,13 +1081,13 @@ mod test { let echo_auth = Auth::new(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), None, None); ( - Arc::new(HttpJsonRpc::new_with_auth(rpc_url, rpc_auth, None, &spec).unwrap()), - Arc::new(HttpJsonRpc::new_with_auth(echo_url, echo_auth, None, &spec).unwrap()), + Arc::new(HttpJsonRpc::new_with_auth(rpc_url, rpc_auth, None).unwrap()), + Arc::new(HttpJsonRpc::new_with_auth(echo_url, echo_auth, None).unwrap()), ) } else { ( - Arc::new(HttpJsonRpc::new(rpc_url, None, &spec).unwrap()), - Arc::new(HttpJsonRpc::new(echo_url, None, &spec).unwrap()), + Arc::new(HttpJsonRpc::new(rpc_url, None).unwrap()), + Arc::new(HttpJsonRpc::new(echo_url, None).unwrap()), ) }; diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 271cca26cba..5532fbb3454 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -1,13 +1,15 @@ //! Provides generic behaviour for multiple execution engines, specifically fallback behaviour. use crate::engine_api::{ - Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, PayloadId, + EngineCapabilities, Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, + PayloadId, }; use crate::HttpJsonRpc; use lru::LruCache; -use slog::{debug, error, info, Logger}; +use slog::{debug, error, info, warn, Logger}; use std::future::Future; use std::sync::Arc; +use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::{watch, Mutex, RwLock}; use tokio_stream::wrappers::WatchStream; @@ -18,6 +20,7 @@ use types::ExecutionBlockHash; /// Since the size of each value is small (~100 bytes) a large number is used for safety. /// FIXME: check this assumption now that the key includes entire payload attributes which now includes withdrawals const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512; +const CACHED_ENGINE_CAPABILITIES_AGE_LIMIT: Duration = Duration::from_secs(900); // 15 minutes /// Stores the remembered state of a engine. #[derive(Copy, Clone, PartialEq, Debug, Eq, Default)] @@ -29,6 +32,14 @@ enum EngineStateInternal { AuthFailed, } +#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)] +enum CapabilitiesCacheAction { + #[default] + None, + Update, + Clear, +} + /// A subset of the engine state to inform other services if the engine is online or offline. #[derive(Debug, Clone, PartialEq, Eq, Copy)] pub enum EngineState { @@ -231,7 +242,7 @@ impl Engine { /// Run the `EngineApi::upcheck` function if the node's last known state is not synced. This /// might be used to recover the node if offline. pub async fn upcheck(&self) { - let state: EngineStateInternal = match self.api.upcheck().await { + let (state, cache_action) = match self.api.upcheck().await { Ok(()) => { let mut state = self.state.write().await; if **state != EngineStateInternal::Synced { @@ -249,12 +260,12 @@ impl Engine { ); } state.update(EngineStateInternal::Synced); - **state + (**state, CapabilitiesCacheAction::Update) } Err(EngineApiError::IsSyncing) => { let mut state = self.state.write().await; state.update(EngineStateInternal::Syncing); - **state + (**state, CapabilitiesCacheAction::Update) } Err(EngineApiError::Auth(err)) => { error!( @@ -265,7 +276,7 @@ impl Engine { let mut state = self.state.write().await; state.update(EngineStateInternal::AuthFailed); - **state + (**state, CapabilitiesCacheAction::None) } Err(e) => { error!( @@ -276,10 +287,30 @@ impl Engine { let mut state = self.state.write().await; state.update(EngineStateInternal::Offline); - **state + // need to clear the engine capabilities cache if we detect the + // execution engine is offline as it is likely the engine is being + // updated to a newer version with new capabilities + (**state, CapabilitiesCacheAction::Clear) } }; + // do this after dropping state lock guard to avoid holding two locks at once + match cache_action { + CapabilitiesCacheAction::None => {} + CapabilitiesCacheAction::Update => { + if let Err(e) = self + .get_engine_capabilities(Some(CACHED_ENGINE_CAPABILITIES_AGE_LIMIT)) + .await + { + warn!(self.log, + "Error during exchange capabilities"; + "error" => ?e, + ) + } + } + CapabilitiesCacheAction::Clear => self.api.clear_exchange_capabilties_cache().await, + } + debug!( self.log, "Execution engine upcheck complete"; @@ -287,6 +318,22 @@ impl Engine { ); } + /// Returns the execution engine capabilities resulting from a call to + /// engine_exchangeCapabilities. If the capabilities cache is not populated, + /// or if it is populated with a cached result of age >= `age_limit`, this + /// method will fetch the result from the execution engine and populate the + /// cache before returning it. Otherwise it will return a cached result from + /// a previous call. + /// + /// Set `age_limit` to `None` to always return the cached result + /// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE + pub async fn get_engine_capabilities( + &self, + age_limit: Option, + ) -> Result { + self.api.get_engine_capabilities(age_limit).await + } + /// Run `func` on the node regardless of the node's current state. /// /// ## Note diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 0a1a1eef390..ad72453f126 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -7,6 +7,7 @@ use crate::payload_cache::PayloadCache; use auth::{strip_prefix, Auth, JwtKey}; use builder_client::BuilderHttpClient; +pub use engine_api::EngineCapabilities; use engine_api::Error as ApiError; pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; @@ -265,12 +266,7 @@ pub struct ExecutionLayer { impl ExecutionLayer { /// Instantiate `Self` with an Execution engine specified in `Config`, using JSON-RPC via HTTP. - pub fn from_config( - config: Config, - executor: TaskExecutor, - log: Logger, - spec: &ChainSpec, - ) -> Result { + pub fn from_config(config: Config, executor: TaskExecutor, log: Logger) -> Result { let Config { execution_endpoints: urls, builder_url, @@ -325,9 +321,8 @@ impl ExecutionLayer { let engine: Engine = { let auth = Auth::new(jwt_key, jwt_id, jwt_version); debug!(log, "Loaded execution endpoint"; "endpoint" => %execution_url, "jwt_path" => ?secret_file.as_path()); - let api = - HttpJsonRpc::new_with_auth(execution_url, auth, execution_timeout_multiplier, spec) - .map_err(Error::ApiError)?; + let api = HttpJsonRpc::new_with_auth(execution_url, auth, execution_timeout_multiplier) + .map_err(Error::ApiError)?; Engine::new(api, executor.clone(), &log) }; @@ -1367,6 +1362,26 @@ impl ExecutionLayer { } } + /// Returns the execution engine capabilities resulting from a call to + /// engine_exchangeCapabilities. If the capabilities cache is not populated, + /// or if it is populated with a cached result of age >= `age_limit`, this + /// method will fetch the result from the execution engine and populate the + /// cache before returning it. Otherwise it will return a cached result from + /// a previous call. + /// + /// Set `age_limit` to `None` to always return the cached result + /// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE + pub async fn get_engine_capabilities( + &self, + age_limit: Option, + ) -> Result { + self.engine() + .request(|engine| engine.get_engine_capabilities(age_limit)) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } + /// Used during block production to determine if the merge has been triggered. /// /// ## Specification diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 1e096364975..31a8a5da19d 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -6,20 +6,27 @@ use serde_json::Value as JsonValue; use std::sync::Arc; use types::{EthSpec, ForkName}; +pub const GENERIC_ERROR_CODE: i64 = -1234; +pub const BAD_PARAMS_ERROR_CODE: i64 = -32602; +pub const UNKNOWN_PAYLOAD_ERROR_CODE: i64 = -38001; +pub const FORK_REQUEST_MISMATCH_ERROR_CODE: i64 = -32000; + pub async fn handle_rpc( body: JsonValue, ctx: Arc>, -) -> Result { +) -> Result { *ctx.previous_request.lock() = Some(body.clone()); let method = body .get("method") .and_then(JsonValue::as_str) - .ok_or_else(|| "missing/invalid method field".to_string())?; + .ok_or_else(|| "missing/invalid method field".to_string()) + .map_err(|s| (s, GENERIC_ERROR_CODE))?; let params = body .get("params") - .ok_or_else(|| "missing/invalid params field".to_string())?; + .ok_or_else(|| "missing/invalid params field".to_string()) + .map_err(|s| (s, GENERIC_ERROR_CODE))?; match method { ETH_SYNCING => Ok(JsonValue::Bool(false)), @@ -27,7 +34,8 @@ pub async fn handle_rpc( let tag = params .get(0) .and_then(JsonValue::as_str) - .ok_or_else(|| "missing/invalid params[0] value".to_string())?; + .ok_or_else(|| "missing/invalid params[0] value".to_string()) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; match tag { "latest" => Ok(serde_json::to_value( @@ -36,7 +44,10 @@ pub async fn handle_rpc( .latest_execution_block(), ) .unwrap()), - other => Err(format!("The tag {} is not supported", other)), + other => Err(( + format!("The tag {} is not supported", other), + BAD_PARAMS_ERROR_CODE, + )), } } ETH_GET_BLOCK_BY_HASH => { @@ -47,7 +58,8 @@ pub async fn handle_rpc( .and_then(|s| { s.parse() .map_err(|e| format!("unable to parse hash: {:?}", e)) - })?; + }) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; // If we have a static response set, just return that. if let Some(response) = *ctx.static_get_block_by_hash_response.lock() { @@ -57,7 +69,8 @@ pub async fn handle_rpc( let full_tx = params .get(1) .and_then(JsonValue::as_bool) - .ok_or_else(|| "missing/invalid params[1] value".to_string())?; + .ok_or_else(|| "missing/invalid params[1] value".to_string()) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; if full_tx { Ok(serde_json::to_value( ctx.execution_block_generator @@ -76,15 +89,17 @@ pub async fn handle_rpc( } ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 => { let request = match method { - ENGINE_NEW_PAYLOAD_V1 => { - JsonExecutionPayload::V1(get_param::>(params, 0)?) - } + ENGINE_NEW_PAYLOAD_V1 => JsonExecutionPayload::V1( + get_param::>(params, 0) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?, + ), ENGINE_NEW_PAYLOAD_V2 => get_param::>(params, 0) .map(|jep| JsonExecutionPayload::V2(jep)) .or_else(|_| { get_param::>(params, 0) .map(|jep| JsonExecutionPayload::V1(jep)) - })?, + }) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?, // TODO(4844) add that here.. _ => unreachable!(), }; @@ -97,20 +112,29 @@ pub async fn handle_rpc( match fork { ForkName::Merge => { if matches!(request, JsonExecutionPayload::V2(_)) { - return Err(format!( - "{} called with `ExecutionPayloadV2` before capella fork!", - method + return Err(( + format!( + "{} called with `ExecutionPayloadV2` before Capella fork!", + method + ), + GENERIC_ERROR_CODE, )); } } ForkName::Capella => { if method == ENGINE_NEW_PAYLOAD_V1 { - return Err(format!("{} called after capella fork!", method)); + return Err(( + format!("{} called after Capella fork!", method), + GENERIC_ERROR_CODE, + )); } if matches!(request, JsonExecutionPayload::V1(_)) { - return Err(format!( - "{} called with `ExecutionPayloadV1` after capella fork!", - method + return Err(( + format!( + "{} called with `ExecutionPayloadV1` after Capella fork!", + method + ), + GENERIC_ERROR_CODE, )); } } @@ -149,14 +173,20 @@ pub async fn handle_rpc( Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap()) } ENGINE_GET_PAYLOAD_V1 | ENGINE_GET_PAYLOAD_V2 => { - let request: JsonPayloadIdRequest = get_param(params, 0)?; + let request: JsonPayloadIdRequest = + get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; let id = request.into(); let response = ctx .execution_block_generator .write() .get_payload(&id) - .ok_or_else(|| format!("no payload for id {:?}", id))?; + .ok_or_else(|| { + ( + format!("no payload for id {:?}", id), + UNKNOWN_PAYLOAD_ERROR_CODE, + ) + })?; // validate method called correctly according to shanghai fork time if ctx @@ -166,7 +196,10 @@ pub async fn handle_rpc( == ForkName::Capella && method == ENGINE_GET_PAYLOAD_V1 { - return Err(format!("{} called after capella fork!", method)); + return Err(( + format!("{} called after Capella fork!", method), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); } // TODO(4844) add 4844 error checking here @@ -195,38 +228,42 @@ pub async fn handle_rpc( } } ENGINE_FORKCHOICE_UPDATED_V1 | ENGINE_FORKCHOICE_UPDATED_V2 => { - let forkchoice_state: JsonForkchoiceStateV1 = get_param(params, 0)?; + let forkchoice_state: JsonForkchoiceStateV1 = + get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; let payload_attributes = match method { ENGINE_FORKCHOICE_UPDATED_V1 => { - let jpa1: Option = get_param(params, 1)?; + let jpa1: Option = + get_param(params, 1).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; jpa1.map(JsonPayloadAttributes::V1) } ENGINE_FORKCHOICE_UPDATED_V2 => { // we can't use `deny_unknown_fields` without breaking compatibility with some // clients that haven't updated to the latest engine_api spec. So instead we'll // need to deserialize based on timestamp - get_param::>(params, 1).and_then(|pa| { - pa.and_then(|pa| { - match ctx - .execution_block_generator - .read() - .get_fork_at_timestamp(*pa.timestamp()) - { - ForkName::Merge => { - get_param::>(params, 1) - .map(|opt| opt.map(JsonPayloadAttributes::V1)) - .transpose() + get_param::>(params, 1) + .and_then(|pa| { + pa.and_then(|pa| { + match ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(*pa.timestamp()) + { + ForkName::Merge => { + get_param::>(params, 1) + .map(|opt| opt.map(JsonPayloadAttributes::V1)) + .transpose() + } + ForkName::Capella => { + get_param::>(params, 1) + .map(|opt| opt.map(JsonPayloadAttributes::V2)) + .transpose() + } + _ => unreachable!(), } - ForkName::Capella => { - get_param::>(params, 1) - .map(|opt| opt.map(JsonPayloadAttributes::V2)) - .transpose() - } - _ => unreachable!(), - } + }) + .transpose() }) - .transpose() - })? + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? } _ => unreachable!(), }; @@ -240,20 +277,29 @@ pub async fn handle_rpc( { ForkName::Merge => { if matches!(pa, JsonPayloadAttributes::V2(_)) { - return Err(format!( - "{} called with `JsonPayloadAttributesV2` before capella fork!", - method + return Err(( + format!( + "{} called with `JsonPayloadAttributesV2` before Capella fork!", + method + ), + GENERIC_ERROR_CODE, )); } } ForkName::Capella => { if method == ENGINE_FORKCHOICE_UPDATED_V1 { - return Err(format!("{} called after capella fork!", method)); + return Err(( + format!("{} called after Capella fork!", method), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); } if matches!(pa, JsonPayloadAttributes::V1(_)) { - return Err(format!( - "{} called with `JsonPayloadAttributesV1` after capella fork!", - method + return Err(( + format!( + "{} called with `JsonPayloadAttributesV1` after Capella fork!", + method + ), + FORK_REQUEST_MISMATCH_ERROR_CODE, )); } } @@ -281,10 +327,14 @@ pub async fn handle_rpc( return Ok(serde_json::to_value(response).unwrap()); } - let mut response = ctx.execution_block_generator.write().forkchoice_updated( - forkchoice_state.into(), - payload_attributes.map(|json| json.into()), - )?; + let mut response = ctx + .execution_block_generator + .write() + .forkchoice_updated( + forkchoice_state.into(), + payload_attributes.map(|json| json.into()), + ) + .map_err(|s| (s, GENERIC_ERROR_CODE))?; if let Some(mut status) = ctx.static_forkchoice_updated_response.lock().clone() { if status.status == PayloadStatusV1Status::Valid { @@ -305,9 +355,13 @@ pub async fn handle_rpc( }; Ok(serde_json::to_value(transition_config).unwrap()) } - other => Err(format!( - "The method {} does not exist/is not available", - other + ENGINE_EXCHANGE_CAPABILITIES => { + let engine_capabilities = ctx.engine_capabilities.read(); + Ok(serde_json::to_value(engine_capabilities.to_response()).unwrap()) + } + other => Err(( + format!("The method {} does not exist/is not available", other), + METHOD_NOT_FOUND_CODE, )), } } diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 8ce4a65564a..06b5e81eb31 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -84,8 +84,7 @@ impl TestingBuilder { }; let el = - ExecutionLayer::from_config(config, executor.clone(), executor.log().clone(), &spec) - .unwrap(); + ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap(); // This should probably be done for all fields, we only update ones we are testing with so far. let mut context = Context::for_mainnet(); diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index d061f13a6b5..ad73b2b4e79 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -73,8 +73,7 @@ impl MockExecutionLayer { ..Default::default() }; let el = - ExecutionLayer::from_config(config, executor.clone(), executor.log().clone(), &spec) - .unwrap(); + ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap(); Self { server, @@ -106,7 +105,7 @@ impl MockExecutionLayer { prev_randao, Address::repeat_byte(42), // FIXME: think about how to handle different forks / withdrawals here.. - Some(vec![]), + None, ); // Insert a proposer to ensure the fork choice updated command works. diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index bad02e36980..adf9358f06d 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -22,6 +22,7 @@ use tokio::{runtime, sync::oneshot}; use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; +use crate::EngineCapabilities; pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; pub use hook::Hook; pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder}; @@ -31,6 +32,15 @@ pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; pub const DEFAULT_TERMINAL_BLOCK: u64 = 64; pub const DEFAULT_JWT_SECRET: [u8; 32] = [42; 32]; pub const DEFAULT_BUILDER_THRESHOLD_WEI: u128 = 1_000_000_000_000_000_000; +pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { + new_payload_v1: true, + new_payload_v2: true, + forkchoice_updated_v1: true, + forkchoice_updated_v2: true, + get_payload_v1: true, + get_payload_v2: true, + exchange_transition_configuration_v1: true, +}; mod execution_block_generator; mod handle_rpc; @@ -117,6 +127,7 @@ impl MockServer { hook: <_>::default(), new_payload_statuses: <_>::default(), fcu_payload_statuses: <_>::default(), + engine_capabilities: Arc::new(RwLock::new(DEFAULT_ENGINE_CAPABILITIES)), _phantom: PhantomData, }); @@ -147,6 +158,10 @@ impl MockServer { } } + pub fn set_engine_capabilities(&self, engine_capabilities: EngineCapabilities) { + *self.ctx.engine_capabilities.write() = engine_capabilities; + } + pub fn new( handle: &runtime::Handle, jwt_key: JwtKey, @@ -469,6 +484,7 @@ pub struct Context { pub new_payload_statuses: Arc>>, pub fcu_payload_statuses: Arc>>, + pub engine_capabilities: Arc>, pub _phantom: PhantomData, } @@ -620,11 +636,11 @@ pub fn serve( "jsonrpc": JSONRPC_VERSION, "result": result }), - Err(message) => json!({ + Err((message, code)) => json!({ "id": id, "jsonrpc": JSONRPC_VERSION, "error": { - "code": -1234, // Junk error code. + "code": code, "message": message } }), diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 2daacb0add9..fe7e51e9235 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -127,7 +127,7 @@ impl TestRig { ..Default::default() }; let execution_layer = - ExecutionLayer::from_config(config, executor.clone(), log.clone(), &spec).unwrap(); + ExecutionLayer::from_config(config, executor.clone(), log.clone()).unwrap(); ExecutionPair { execution_engine, execution_layer, @@ -146,7 +146,7 @@ impl TestRig { ..Default::default() }; let execution_layer = - ExecutionLayer::from_config(config, executor, log.clone(), &spec).unwrap(); + ExecutionLayer::from_config(config, executor, log.clone()).unwrap(); ExecutionPair { execution_engine, execution_layer, From 90b6ae62e630c1b39e7d7f0595b0ec8027c0291c Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Wed, 1 Feb 2023 19:37:46 -0600 Subject: [PATCH 143/263] Use Local Payload if More Profitable than Builder (#3934) * Use Local Payload if More Profitable than Builder * Rename clone -> clone_from_ref * Minimize Clones of GetPayloadResponse * Cleanup & Fix Tests * Added Tests for Payload Choice by Profit * Fix Outdated Comments --- beacon_node/beacon_chain/src/test_utils.rs | 8 +- beacon_node/execution_layer/src/engine_api.rs | 55 +++-- .../execution_layer/src/engine_api/http.rs | 23 +-- beacon_node/execution_layer/src/lib.rs | 112 ++++++++--- .../src/test_utils/handle_rpc.rs | 5 +- .../src/test_utils/mock_builder.rs | 4 +- .../src/test_utils/mock_execution_layer.rs | 4 +- .../execution_layer/src/test_utils/mod.rs | 2 + beacon_node/http_api/tests/tests.rs | 190 ++++++++++++++++-- consensus/types/src/execution_payload.rs | 10 + 10 files changed, 344 insertions(+), 69 deletions(-) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 875ff845aff..daba7115e05 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -426,6 +426,7 @@ where DEFAULT_TERMINAL_BLOCK, shanghai_time, eip4844_time, + None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), spec, None, @@ -435,7 +436,11 @@ where self } - pub fn mock_execution_layer_with_builder(mut self, beacon_url: SensitiveUrl) -> Self { + pub fn mock_execution_layer_with_builder( + mut self, + beacon_url: SensitiveUrl, + builder_threshold: Option, + ) -> Self { // Get a random unused port let port = unused_port::unused_tcp_port().unwrap(); let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); @@ -452,6 +457,7 @@ where DEFAULT_TERMINAL_BLOCK, shanghai_time, eip4844_time, + builder_threshold, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), spec.clone(), Some(builder_url.clone()), diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index da5e991b090..9918b679c35 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -14,8 +14,8 @@ use std::convert::TryFrom; use strum::IntoStaticStr; use superstruct::superstruct; pub use types::{ - Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, FixedVector, - ForkName, Hash256, Uint256, VariableList, Withdrawal, + Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, + ExecutionPayloadRef, FixedVector, ForkName, Hash256, Uint256, VariableList, Withdrawal, }; use types::{ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge}; @@ -322,6 +322,8 @@ pub struct ProposeBlindedBlockResponse { #[superstruct( variants(Merge, Capella, Eip4844), variant_attributes(derive(Clone, Debug, PartialEq),), + map_into(ExecutionPayload), + map_ref_into(ExecutionPayloadRef), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] @@ -336,22 +338,47 @@ pub struct GetPayloadResponse { pub block_value: Uint256, } -impl GetPayloadResponse { - pub fn execution_payload(self) -> ExecutionPayload { - match self { - GetPayloadResponse::Merge(response) => { - ExecutionPayload::Merge(response.execution_payload) - } - GetPayloadResponse::Capella(response) => { - ExecutionPayload::Capella(response.execution_payload) - } - GetPayloadResponse::Eip4844(response) => { - ExecutionPayload::Eip4844(response.execution_payload) - } +impl<'a, T: EthSpec> From> for ExecutionPayloadRef<'a, T> { + fn from(response: GetPayloadResponseRef<'a, T>) -> Self { + map_get_payload_response_ref_into_execution_payload_ref!(&'a _, response, |inner, cons| { + cons(&inner.execution_payload) + }) + } +} + +impl From> for ExecutionPayload { + fn from(response: GetPayloadResponse) -> Self { + map_get_payload_response_into_execution_payload!(response, |inner, cons| { + cons(inner.execution_payload) + }) + } +} + +impl From> for (ExecutionPayload, Uint256) { + fn from(response: GetPayloadResponse) -> Self { + match response { + GetPayloadResponse::Merge(inner) => ( + ExecutionPayload::Merge(inner.execution_payload), + inner.block_value, + ), + GetPayloadResponse::Capella(inner) => ( + ExecutionPayload::Capella(inner.execution_payload), + inner.block_value, + ), + GetPayloadResponse::Eip4844(inner) => ( + ExecutionPayload::Eip4844(inner.execution_payload), + inner.block_value, + ), } } } +impl GetPayloadResponse { + pub fn execution_payload_ref(&self) -> ExecutionPayloadRef { + self.to_ref().into() + } +} + #[derive(Clone, Copy, Debug)] pub struct EngineCapabilities { pub new_payload_v1: bool, diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index d1faab42c97..3871ca27afd 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -804,7 +804,7 @@ impl HttpJsonRpc { pub async fn get_payload_v1( &self, payload_id: PayloadId, - ) -> Result, Error> { + ) -> Result, Error> { let params = json!([JsonPayloadIdRequest::from(payload_id)]); let payload_v1: JsonExecutionPayloadV1 = self @@ -815,7 +815,11 @@ impl HttpJsonRpc { ) .await?; - Ok(JsonExecutionPayload::V1(payload_v1).into()) + Ok(GetPayloadResponse::Merge(GetPayloadResponseMerge { + execution_payload: payload_v1.into(), + // Have to guess zero here as we don't know the value + block_value: Uint256::zero(), + })) } pub async fn get_payload_v2( @@ -1015,16 +1019,10 @@ impl HttpJsonRpc { &self, fork_name: ForkName, payload_id: PayloadId, - ) -> Result, Error> { + ) -> Result, Error> { let engine_capabilities = self.get_engine_capabilities(None).await?; if engine_capabilities.get_payload_v2 { - // TODO: modify this method to return GetPayloadResponse instead - // of throwing away the `block_value` and returning only the - // ExecutionPayload - Ok(self - .get_payload_v2(fork_name, payload_id) - .await? - .execution_payload()) + self.get_payload_v2(fork_name, payload_id).await } else if engine_capabilities.new_payload_v1 { self.get_payload_v1(payload_id).await } else { @@ -1675,10 +1673,11 @@ mod test { } })], |client| async move { - let payload = client + let payload: ExecutionPayload<_> = client .get_payload_v1::(str_to_payload_id("0xa247243752eb10b4")) .await - .unwrap(); + .unwrap() + .into(); let expected = ExecutionPayload::Merge(ExecutionPayloadMerge { parent_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index ad72453f126..8f206886e4b 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -119,9 +119,13 @@ impl From for Error { } pub enum BlockProposalContents> { - Payload(Payload), + Payload { + payload: Payload, + block_value: Uint256, + }, PayloadAndBlobs { payload: Payload, + block_value: Uint256, kzg_commitments: Vec, blobs: Vec>, }, @@ -130,9 +134,13 @@ pub enum BlockProposalContents> { impl> BlockProposalContents { pub fn payload(&self) -> &Payload { match self { - Self::Payload(payload) => payload, + Self::Payload { + payload, + block_value: _, + } => payload, Self::PayloadAndBlobs { payload, + block_value: _, kzg_commitments: _, blobs: _, } => payload, @@ -140,9 +148,13 @@ impl> BlockProposalContents Payload { match self { - Self::Payload(payload) => payload, + Self::Payload { + payload, + block_value: _, + } => payload, Self::PayloadAndBlobs { payload, + block_value: _, kzg_commitments: _, blobs: _, } => payload, @@ -150,9 +162,13 @@ impl> BlockProposalContents Option<&[KzgCommitment]> { match self { - Self::Payload(_) => None, + Self::Payload { + payload: _, + block_value: _, + } => None, Self::PayloadAndBlobs { payload: _, + block_value: _, kzg_commitments, blobs: _, } => Some(kzg_commitments), @@ -160,21 +176,43 @@ impl> BlockProposalContents Option<&[Blob]> { match self { - Self::Payload(_) => None, + Self::Payload { + payload: _, + block_value: _, + } => None, Self::PayloadAndBlobs { payload: _, + block_value: _, kzg_commitments: _, blobs, } => Some(blobs), } } + pub fn block_value(&self) -> &Uint256 { + match self { + Self::Payload { + payload: _, + block_value, + } => block_value, + Self::PayloadAndBlobs { + payload: _, + block_value, + kzg_commitments: _, + blobs: _, + } => block_value, + } + } pub fn default_at_fork(fork_name: ForkName) -> Result { Ok(match fork_name { ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - BlockProposalContents::Payload(Payload::default_at_fork(fork_name)?) + BlockProposalContents::Payload { + payload: Payload::default_at_fork(fork_name)?, + block_value: Uint256::zero(), + } } ForkName::Eip4844 => BlockProposalContents::PayloadAndBlobs { payload: Payload::default_at_fork(fork_name)?, + block_value: Uint256::zero(), blobs: vec![], kzg_commitments: vec![], }, @@ -366,12 +404,12 @@ impl ExecutionLayer { &self.inner.builder } - /// Cache a full payload, keyed on the `tree_hash_root` of its `transactions` field. - fn cache_payload(&self, payload: &ExecutionPayload) -> Option> { - self.inner.payload_cache.put(payload.clone()) + /// Cache a full payload, keyed on the `tree_hash_root` of the payload + fn cache_payload(&self, payload: ExecutionPayloadRef) -> Option> { + self.inner.payload_cache.put(payload.clone_from_ref()) } - /// Attempt to retrieve a full payload from the payload cache by the `transactions_root`. + /// Attempt to retrieve a full payload from the payload cache by the payload root pub fn get_payload_by_root(&self, root: &Hash256) -> Option> { self.inner.payload_cache.pop(root) } @@ -808,6 +846,18 @@ impl ExecutionLayer { "parent_hash" => ?parent_hash, ); + let relay_value = relay.data.message.value; + let local_value = *local.block_value(); + if local_value >= relay_value { + info!( + self.log(), + "Local block is more profitable than relay block"; + "local_block_value" => %local_value, + "relay_value" => %relay_value + ); + return Ok(ProvenancedPayload::Local(local)); + } + match verify_builder_bid( &relay, parent_hash, @@ -818,7 +868,10 @@ impl ExecutionLayer { spec, ) { Ok(()) => Ok(ProvenancedPayload::Builder( - BlockProposalContents::Payload(relay.data.message.header), + BlockProposalContents::Payload { + payload: relay.data.message.header, + block_value: relay.data.message.value, + }, )), Err(reason) if !reason.payload_invalid() => { info!( @@ -869,12 +922,18 @@ impl ExecutionLayer { spec, ) { Ok(()) => Ok(ProvenancedPayload::Builder( - BlockProposalContents::Payload(relay.data.message.header), + BlockProposalContents::Payload { + payload: relay.data.message.header, + block_value: relay.data.message.value, + }, )), // If the payload is valid then use it. The local EE failed // to produce a payload so we have no alternative. Err(e) if !e.payload_invalid() => Ok(ProvenancedPayload::Builder( - BlockProposalContents::Payload(relay.data.message.header), + BlockProposalContents::Payload { + payload: relay.data.message.header, + block_value: relay.data.message.value, + }, )), Err(reason) => { metrics::inc_counter_vec( @@ -988,7 +1047,7 @@ impl ExecutionLayer { payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, current_fork: ForkName, - f: fn(&ExecutionLayer, &ExecutionPayload) -> Option>, + f: fn(&ExecutionLayer, ExecutionPayloadRef) -> Option>, ) -> Result, Error> { self.engine() .request(move |engine| async move { @@ -1071,9 +1130,9 @@ impl ExecutionLayer { ); engine.api.get_payload::(current_fork, payload_id).await }; - let (blob, payload) = tokio::join!(blob_fut, payload_fut); - let payload = payload.map(|full_payload| { - if full_payload.fee_recipient() != payload_attributes.suggested_fee_recipient() { + let (blob, payload_response) = tokio::join!(blob_fut, payload_fut); + let (execution_payload, block_value) = payload_response.map(|payload_response| { + if payload_response.execution_payload_ref().fee_recipient() != payload_attributes.suggested_fee_recipient() { error!( self.log(), "Inconsistent fee recipient"; @@ -1082,28 +1141,32 @@ impl ExecutionLayer { indicate that fees are being diverted to another address. Please \ ensure that the value of suggested_fee_recipient is set correctly and \ that the Execution Engine is trusted.", - "fee_recipient" => ?full_payload.fee_recipient(), + "fee_recipient" => ?payload_response.execution_payload_ref().fee_recipient(), "suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(), ); } - if f(self, &full_payload).is_some() { + if f(self, payload_response.execution_payload_ref()).is_some() { warn!( self.log(), "Duplicate payload cached, this might indicate redundant proposal \ attempts." ); } - full_payload.into() + payload_response.into() })?; if let Some(blob) = blob.transpose()? { // FIXME(sean) cache blobs Ok(BlockProposalContents::PayloadAndBlobs { - payload, + payload: execution_payload.into(), + block_value, blobs: blob.blobs, kzg_commitments: blob.kzgs, }) } else { - Ok(BlockProposalContents::Payload(payload)) + Ok(BlockProposalContents::Payload { + payload: execution_payload.into(), + block_value, + }) } }) .await @@ -2089,7 +2152,10 @@ mod test { } } -fn noop(_: &ExecutionLayer, _: &ExecutionPayload) -> Option> { +fn noop( + _: &ExecutionLayer, + _: ExecutionPayloadRef, +) -> Option> { None } diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 31a8a5da19d..138c8f6bcb6 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -1,6 +1,7 @@ use super::Context; use crate::engine_api::{http::*, *}; use crate::json_structures::*; +use crate::test_utils::DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI; use serde::de::DeserializeOwned; use serde_json::Value as JsonValue; use std::sync::Arc; @@ -211,14 +212,14 @@ pub async fn handle_rpc( JsonExecutionPayload::V1(execution_payload) => { serde_json::to_value(JsonGetPayloadResponseV1 { execution_payload, - block_value: 0.into(), + block_value: DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI.into(), }) .unwrap() } JsonExecutionPayload::V2(execution_payload) => { serde_json::to_value(JsonGetPayloadResponseV2 { execution_payload, - block_value: 0.into(), + block_value: DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI.into(), }) .unwrap() } diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 06b5e81eb31..40a0c41afab 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,4 +1,4 @@ -use crate::test_utils::DEFAULT_JWT_SECRET; +use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; use crate::{Config, ExecutionLayer, PayloadAttributes}; use async_trait::async_trait; use eth2::types::{BlockId, StateId, ValidatorId}; @@ -328,7 +328,7 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { let mut message = BuilderBid { header, - value: ssz_rs::U256::default(), + value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, public_key: self.builder_sk.public_key(), }; diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index ad73b2b4e79..1a5d1fd1983 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -29,6 +29,7 @@ impl MockExecutionLayer { DEFAULT_TERMINAL_BLOCK, None, None, + None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), spec, None, @@ -41,6 +42,7 @@ impl MockExecutionLayer { terminal_block: u64, shanghai_time: Option, eip4844_time: Option, + builder_threshold: Option, jwt_key: Option, spec: ChainSpec, builder_url: Option, @@ -69,7 +71,7 @@ impl MockExecutionLayer { builder_url, secret_files: vec![path], suggested_fee_recipient: Some(Address::repeat_byte(42)), - builder_profit_threshold: DEFAULT_BUILDER_THRESHOLD_WEI, + builder_profit_threshold: builder_threshold.unwrap_or(DEFAULT_BUILDER_THRESHOLD_WEI), ..Default::default() }; let el = diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index adf9358f06d..077d29575ee 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -32,6 +32,8 @@ pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; pub const DEFAULT_TERMINAL_BLOCK: u64 = 64; pub const DEFAULT_JWT_SECRET: [u8; 32] = [42; 32]; pub const DEFAULT_BUILDER_THRESHOLD_WEI: u128 = 1_000_000_000_000_000_000; +pub const DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI: u128 = 10_000_000_000_000_000; +pub const DEFAULT_BUILDER_PAYLOAD_VALUE_WEI: u128 = 20_000_000_000_000_000; pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { new_payload_v1: true, new_payload_v2: true, diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 86733cf63ad..43099c7a916 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -11,9 +11,11 @@ use eth2::{ types::{BlockId as CoreBlockId, StateId as CoreStateId, *}, BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; -use execution_layer::test_utils::Operation; use execution_layer::test_utils::TestingBuilder; use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI; +use execution_layer::test_utils::{ + Operation, DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, +}; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; use http_api::{BlockId, StateId}; @@ -72,38 +74,53 @@ struct ApiTester { mock_builder: Option>>, } +struct ApiTesterConfig { + spec: ChainSpec, + builder_threshold: Option, +} + +impl Default for ApiTesterConfig { + fn default() -> Self { + let mut spec = E::default_spec(); + spec.shard_committee_period = 2; + Self { + spec, + builder_threshold: None, + } + } +} + impl ApiTester { pub async fn new() -> Self { // This allows for testing voluntary exits without building out a massive chain. - let mut spec = E::default_spec(); - spec.shard_committee_period = 2; - Self::new_from_spec(spec).await + Self::new_from_config(ApiTesterConfig::default()).await } pub async fn new_with_hard_forks(altair: bool, bellatrix: bool) -> Self { - let mut spec = E::default_spec(); - spec.shard_committee_period = 2; + let mut config = ApiTesterConfig::default(); // Set whether the chain has undergone each hard fork. if altair { - spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); } if bellatrix { - spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); } - Self::new_from_spec(spec).await + Self::new_from_config(config).await } - pub async fn new_from_spec(spec: ChainSpec) -> Self { + pub async fn new_from_config(config: ApiTesterConfig) -> Self { // Get a random unused port + let spec = config.spec; let port = unused_port::unused_tcp_port().unwrap(); let beacon_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); let harness = Arc::new( BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) + .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() - .mock_execution_layer_with_builder(beacon_url.clone()) + .mock_execution_layer_with_builder(beacon_url.clone(), config.builder_threshold) .build(), ); @@ -358,6 +375,28 @@ impl ApiTester { tester } + pub async fn new_mev_tester_no_builder_threshold() -> Self { + let mut config = ApiTesterConfig { + builder_threshold: Some(0), + spec: E::default_spec(), + }; + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + let tester = Self::new_from_config(config) + .await + .test_post_validator_register_validator() + .await; + tester + .mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, + ))); + tester + } + fn skip_slots(self, count: u64) -> Self { for _ in 0..count { self.chain @@ -3278,6 +3317,117 @@ impl ApiTester { self } + pub async fn test_builder_payload_chosen_when_more_profitable(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The builder's payload should've been chosen, so this cache should not be populated + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + self + } + + pub async fn test_local_payload_chosen_when_equally_profitable(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The local payload should've been chosen, so this cache should be populated + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_local_payload_chosen_when_more_profitable(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI - 1, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The local payload should've been chosen, so this cache should be populated + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + #[cfg(target_os = "linux")] pub async fn test_get_lighthouse_health(self) -> Self { self.client.get_lighthouse_health().await.unwrap(); @@ -3747,9 +3897,9 @@ async fn get_events() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_events_altair() { - let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(Epoch::new(0)); - ApiTester::new_from_spec(spec) + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) .await .test_get_events_altair() .await; @@ -4262,6 +4412,18 @@ async fn builder_inadequate_builder_threshold() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_payload_chosen_by_profit() { + ApiTester::new_mev_tester_no_builder_threshold() + .await + .test_builder_payload_chosen_when_more_profitable() + .await + .test_local_payload_chosen_when_equally_profitable() + .await + .test_local_payload_chosen_when_more_profitable() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn lighthouse_endpoints() { ApiTester::new() diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index a57d4114128..1721960f8b4 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -87,6 +87,16 @@ pub struct ExecutionPayload { pub withdrawals: Withdrawals, } +impl<'a, T: EthSpec> ExecutionPayloadRef<'a, T> { + // this emulates clone on a normal reference type + pub fn clone_from_ref(&self) -> ExecutionPayload { + map_execution_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.clone().into() + }) + } +} + impl ExecutionPayload { pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { match fork_name { From 5b398b19905565c4d16da8c4ed02ace5f1284bf7 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Sun, 5 Feb 2023 20:09:13 -0600 Subject: [PATCH 144/263] Don't Reject all Builder Bids After Capella (#3940) * Fix bug in Builder API Post-Capella * Fix Clippy Complaints --- beacon_node/execution_layer/src/lib.rs | 41 ++++++++++++++++---------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 8f206886e4b..752fc8f6815 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -861,10 +861,10 @@ impl ExecutionLayer { match verify_builder_bid( &relay, parent_hash, - payload_attributes.prev_randao(), - payload_attributes.timestamp(), + payload_attributes, Some(local.payload().block_number()), self.inner.builder_profit_threshold, + current_fork, spec, ) { Ok(()) => Ok(ProvenancedPayload::Builder( @@ -915,10 +915,10 @@ impl ExecutionLayer { match verify_builder_bid( &relay, parent_hash, - payload_attributes.prev_randao(), - payload_attributes.timestamp(), + payload_attributes, None, self.inner.builder_profit_threshold, + current_fork, spec, ) { Ok(()) => Ok(ProvenancedPayload::Builder( @@ -1875,6 +1875,11 @@ enum InvalidBuilderPayload { signature: Signature, pubkey: PublicKeyBytes, }, + #[allow(dead_code)] + WithdrawalsRoot { + payload: Hash256, + expected: Hash256, + }, } impl InvalidBuilderPayload { @@ -1889,6 +1894,7 @@ impl InvalidBuilderPayload { InvalidBuilderPayload::BlockNumber { .. } => true, InvalidBuilderPayload::Fork { .. } => true, InvalidBuilderPayload::Signature { .. } => true, + InvalidBuilderPayload::WithdrawalsRoot { .. } => true, } } } @@ -1924,6 +1930,13 @@ impl fmt::Display for InvalidBuilderPayload { "invalid payload signature {} for pubkey {}", signature, pubkey ), + InvalidBuilderPayload::WithdrawalsRoot { payload, expected } => { + write!( + f, + "payload withdrawals root was {} not {}", + payload, expected + ) + } } } } @@ -1932,10 +1945,10 @@ impl fmt::Display for InvalidBuilderPayload { fn verify_builder_bid>( bid: &ForkVersionedResponse>, parent_hash: ExecutionBlockHash, - prev_randao: Hash256, - timestamp: u64, + payload_attributes: &PayloadAttributes, block_number: Option, profit_threshold: Uint256, + current_fork: ForkName, spec: &ChainSpec, ) -> Result<(), Box> { let is_signature_valid = bid.data.verify_signature(spec); @@ -1962,29 +1975,25 @@ fn verify_builder_bid>( payload: header.parent_hash(), expected: parent_hash, })) - } else if header.prev_randao() != prev_randao { + } else if header.prev_randao() != payload_attributes.prev_randao() { Err(Box::new(InvalidBuilderPayload::PrevRandao { payload: header.prev_randao(), - expected: prev_randao, + expected: payload_attributes.prev_randao(), })) - } else if header.timestamp() != timestamp { + } else if header.timestamp() != payload_attributes.timestamp() { Err(Box::new(InvalidBuilderPayload::Timestamp { payload: header.timestamp(), - expected: timestamp, + expected: payload_attributes.timestamp(), })) } else if block_number.map_or(false, |n| n != header.block_number()) { Err(Box::new(InvalidBuilderPayload::BlockNumber { payload: header.block_number(), expected: block_number, })) - } else if !matches!(bid.version, Some(ForkName::Merge)) { - // Once fork information is added to the payload, we will need to - // check that the local and relay payloads match. At this point, if - // we are requesting a payload at all, we have to assume this is - // the Bellatrix fork. + } else if bid.version != Some(current_fork) { Err(Box::new(InvalidBuilderPayload::Fork { payload: bid.version, - expected: ForkName::Merge, + expected: current_fork, })) } else if !is_signature_valid { Err(Box::new(InvalidBuilderPayload::Signature { From c56706efaeba346a4def8a658a1ac4a08f09a589 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 6 Feb 2023 04:18:03 +0000 Subject: [PATCH 145/263] Unpin fixed-hash (#3917) ## Proposed Changes Remove the `[patch]` for `fixed-hash`. We pinned it years ago in #2710 to fix `arbitrary` support. Nowadays the 0.7 version of `fixed-hash` is only used by the `web3` crate and doesn't need `arbitrary`. ~~Blocked on #3916 but could be merged in the same Bors batch.~~ --- Cargo.lock | 3 ++- Cargo.toml | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 56b3724016b..05e543049c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2602,7 +2602,8 @@ dependencies = [ [[package]] name = "fixed-hash" version = "0.7.0" -source = "git+https://github.com/paritytech/parity-common?rev=df638ab0885293d21d656dc300d39236b69ce57d#df638ab0885293d21d656dc300d39236b69ce57d" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" dependencies = [ "byteorder", "rand 0.8.5", diff --git a/Cargo.toml b/Cargo.toml index de01771eb9c..251da36cbf8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -92,7 +92,6 @@ resolver = "2" [patch] [patch.crates-io] -fixed-hash = { git = "https://github.com/paritytech/parity-common", rev="df638ab0885293d21d656dc300d39236b69ce57d" } warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" } eth2_ssz = { path = "consensus/ssz" } eth2_ssz_derive = { path = "consensus/ssz_derive" } From 4d07e4050102e9e2802d9e961006ca0b8e162b52 Mon Sep 17 00:00:00 2001 From: kevinbogner Date: Tue, 7 Feb 2023 00:00:19 +0000 Subject: [PATCH 146/263] Implement `attestation_rewards` API (per-validator reward) (#3822) ## Issue Addressed #3661 ## Proposed Changes `/eth/v1/beacon/rewards/attestations/{epoch}` ```json { "execution_optimistic": false, "finalized": false, "data": [ { "ideal_rewards": [ { "effective_balance": "1000000000", "head": "2500", "target": "5000", "source": "5000" } ], "total_rewards": [ { "validator_index": "0", "head": "2000", "target": "2000", "source": "4000", "inclusion_delay": "2000" } ] } ] } ``` The issue contains the implementation of three per-validator reward APIs: - [`sync_committee_rewards`](https://github.com/sigp/lighthouse/pull/3790) - `attestation_rewards` - `block_rewards`. This PR *only* implements the `attestation_rewards`. The endpoints can be viewed in the Ethereum Beacon nodes API browser: https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Rewards ## Additional Info The implementation of [consensus client reward APIs](https://github.com/eth-protocol-fellows/cohort-three/blob/master/projects/project-ideas.md#consensus-client-reward-apis) is part of the [EPF](https://github.com/eth-protocol-fellows/cohort-three). --- - [x] `get_state` - [x] Calculate *ideal rewards* with some logic from `get_flag_index_deltas` - [x] Calculate *actual rewards* with some logic from `get_flag_index_deltas` - [x] Code cleanup - [x] Testing --- .../beacon_chain/src/attestation_rewards.rs | 196 ++++++++++++++++++ beacon_node/beacon_chain/src/errors.rs | 2 +- beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/http_api/src/lib.rs | 53 +++++ common/eth2/src/lib.rs | 18 ++ common/eth2/src/lighthouse.rs | 2 + .../src/lighthouse/attestation_rewards.rs | 42 ++++ common/eth2/src/types.rs | 9 + 8 files changed, 322 insertions(+), 1 deletion(-) create mode 100644 beacon_node/beacon_chain/src/attestation_rewards.rs create mode 100644 common/eth2/src/lighthouse/attestation_rewards.rs diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs new file mode 100644 index 00000000000..3f39946978f --- /dev/null +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -0,0 +1,196 @@ +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::lighthouse::attestation_rewards::{IdealAttestationRewards, TotalAttestationRewards}; +use eth2::lighthouse::StandardAttestationRewards; +use participation_cache::ParticipationCache; +use safe_arith::SafeArith; +use slog::{debug, Logger}; +use state_processing::{ + common::altair::BaseRewardPerIncrement, + per_epoch_processing::altair::{participation_cache, rewards_and_penalties::get_flag_weight}, +}; +use std::collections::HashMap; +use store::consts::altair::{ + PARTICIPATION_FLAG_WEIGHTS, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, + TIMELY_TARGET_FLAG_INDEX, +}; +use types::consts::altair::WEIGHT_DENOMINATOR; + +use types::{Epoch, EthSpec}; + +use eth2::types::ValidatorId; + +impl BeaconChain { + pub fn compute_attestation_rewards( + &self, + epoch: Epoch, + validators: Vec, + log: Logger, + ) -> Result { + debug!(log, "computing attestation rewards"; "epoch" => epoch, "validator_count" => validators.len()); + + // Get state + let spec = &self.spec; + + let state_slot = (epoch + 1).end_slot(T::EthSpec::slots_per_epoch()); + + let state_root = self + .state_root_at_slot(state_slot)? + .ok_or(BeaconChainError::NoStateForSlot(state_slot))?; + + let mut state = self + .get_state(&state_root, Some(state_slot))? + .ok_or(BeaconChainError::MissingBeaconState(state_root))?; + + // Calculate ideal_rewards + let participation_cache = ParticipationCache::new(&state, spec)?; + + let previous_epoch = state.previous_epoch(); + + let mut ideal_rewards_hashmap = HashMap::new(); + + for flag_index in 0..PARTICIPATION_FLAG_WEIGHTS.len() { + let weight = get_flag_weight(flag_index) + .map_err(|_| BeaconChainError::AttestationRewardsError)?; + + let unslashed_participating_indices = participation_cache + .get_unslashed_participating_indices(flag_index, previous_epoch)?; + + let unslashed_participating_balance = + unslashed_participating_indices + .total_balance() + .map_err(|_| BeaconChainError::AttestationRewardsError)?; + + let unslashed_participating_increments = + unslashed_participating_balance.safe_div(spec.effective_balance_increment)?; + + let total_active_balance = participation_cache.current_epoch_total_active_balance(); + + let active_increments = + total_active_balance.safe_div(spec.effective_balance_increment)?; + + let base_reward_per_increment = + BaseRewardPerIncrement::new(total_active_balance, spec)?; + + for effective_balance_eth in 0..=32 { + let base_reward = + effective_balance_eth.safe_mul(base_reward_per_increment.as_u64())?; + + let penalty = -(base_reward.safe_mul(weight)?.safe_div(WEIGHT_DENOMINATOR)? as i64); + + let reward_numerator = base_reward + .safe_mul(weight)? + .safe_mul(unslashed_participating_increments)?; + + let ideal_reward = reward_numerator + .safe_div(active_increments)? + .safe_div(WEIGHT_DENOMINATOR)?; + if !state.is_in_inactivity_leak(previous_epoch, spec) { + ideal_rewards_hashmap + .insert((flag_index, effective_balance_eth), (ideal_reward, penalty)); + } else { + ideal_rewards_hashmap.insert((flag_index, effective_balance_eth), (0, penalty)); + } + } + } + + // Calculate total_rewards + let mut total_rewards: Vec = Vec::new(); + + let validators = if validators.is_empty() { + participation_cache.eligible_validator_indices().to_vec() + } else { + validators + .into_iter() + .map(|validator| match validator { + ValidatorId::Index(i) => Ok(i as usize), + ValidatorId::PublicKey(pubkey) => state + .get_validator_index(&pubkey)? + .ok_or(BeaconChainError::ValidatorPubkeyUnknown(pubkey)), + }) + .collect::, _>>()? + }; + + for validator_index in &validators { + let eligible = state.is_eligible_validator(previous_epoch, *validator_index)?; + let mut head_reward = 0u64; + let mut target_reward = 0i64; + let mut source_reward = 0i64; + + if eligible { + let effective_balance = state.get_effective_balance(*validator_index)?; + + let effective_balance_eth = + effective_balance.safe_div(spec.effective_balance_increment)?; + + for flag_index in 0..PARTICIPATION_FLAG_WEIGHTS.len() { + let (ideal_reward, penalty) = ideal_rewards_hashmap + .get(&(flag_index, effective_balance_eth)) + .ok_or(BeaconChainError::AttestationRewardsError)?; + let voted_correctly = participation_cache + .get_unslashed_participating_indices(flag_index, previous_epoch) + .map_err(|_| BeaconChainError::AttestationRewardsError)? + .contains(*validator_index) + .map_err(|_| BeaconChainError::AttestationRewardsError)?; + if voted_correctly { + if flag_index == TIMELY_HEAD_FLAG_INDEX { + head_reward += ideal_reward; + } else if flag_index == TIMELY_TARGET_FLAG_INDEX { + target_reward += *ideal_reward as i64; + } else if flag_index == TIMELY_SOURCE_FLAG_INDEX { + source_reward += *ideal_reward as i64; + } + } else if flag_index == TIMELY_HEAD_FLAG_INDEX { + head_reward = 0; + } else if flag_index == TIMELY_TARGET_FLAG_INDEX { + target_reward = *penalty; + } else if flag_index == TIMELY_SOURCE_FLAG_INDEX { + source_reward = *penalty; + } + } + } + total_rewards.push(TotalAttestationRewards { + validator_index: *validator_index as u64, + head: head_reward, + target: target_reward, + source: source_reward, + }); + } + + // Convert hashmap to vector + let mut ideal_rewards: Vec = ideal_rewards_hashmap + .iter() + .map( + |((flag_index, effective_balance_eth), (ideal_reward, _penalty))| { + (flag_index, effective_balance_eth, ideal_reward) + }, + ) + .fold( + HashMap::new(), + |mut acc, (flag_index, effective_balance_eth, ideal_reward)| { + let entry = acc.entry(*effective_balance_eth as u32).or_insert( + IdealAttestationRewards { + effective_balance: *effective_balance_eth, + head: 0, + target: 0, + source: 0, + }, + ); + match *flag_index { + TIMELY_SOURCE_FLAG_INDEX => entry.source += ideal_reward, + TIMELY_TARGET_FLAG_INDEX => entry.target += ideal_reward, + TIMELY_HEAD_FLAG_INDEX => entry.head += ideal_reward, + _ => {} + } + acc + }, + ) + .into_values() + .collect::>(); + ideal_rewards.sort_by(|a, b| a.effective_balance.cmp(&b.effective_balance)); + + Ok(StandardAttestationRewards { + ideal_rewards, + total_rewards, + }) + } +} diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 788369e55e7..420af2ea1b8 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -50,7 +50,6 @@ pub enum BeaconChainError { }, SlotClockDidNotStart, NoStateForSlot(Slot), - UnableToFindTargetRoot(Slot), BeaconStateError(BeaconStateError), DBInconsistent(String), DBError(store::Error), @@ -159,6 +158,7 @@ pub enum BeaconChainError { BlockRewardAttestationError, BlockRewardSyncError, SyncCommitteeRewardsSyncError, + AttestationRewardsError, HeadMissingFromForkChoice(Hash256), FinalizedBlockMissingFromForkChoice(Hash256), HeadBlockMissingFromForkChoice(Hash256), diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index ae3e98f9131..e3b5f1e0af7 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -1,4 +1,5 @@ #![recursion_limit = "128"] // For lazy-static +pub mod attestation_rewards; pub mod attestation_verification; mod attester_cache; mod beacon_chain; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 1399bb99a4f..8bb7db077d4 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1709,6 +1709,58 @@ pub fn serve( .and(warp::path("rewards")) .and(chain_filter.clone()); + // POST beacon/rewards/attestations/{epoch} + let post_beacon_rewards_attestations = beacon_rewards_path + .clone() + .and(warp::path("attestations")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(warp::body::json()) + .and(log_filter.clone()) + .and_then( + |chain: Arc>, + epoch: Epoch, + validators: Vec, + log: Logger| { + blocking_json_task(move || { + let attestation_rewards = chain + .compute_attestation_rewards(epoch, validators, log) + .map_err(|e| match e { + BeaconChainError::MissingBeaconState(root) => { + warp_utils::reject::custom_not_found(format!( + "missing state {root:?}", + )) + } + BeaconChainError::NoStateForSlot(slot) => { + warp_utils::reject::custom_not_found(format!( + "missing state at slot {slot}" + )) + } + BeaconChainError::BeaconStateError( + BeaconStateError::UnknownValidator(validator_index), + ) => warp_utils::reject::custom_bad_request(format!( + "validator is unknown: {validator_index}" + )), + BeaconChainError::ValidatorPubkeyUnknown(pubkey) => { + warp_utils::reject::custom_bad_request(format!( + "validator pubkey is unknown: {pubkey:?}" + )) + } + e => warp_utils::reject::custom_server_error(format!( + "unexpected error: {:?}", + e + )), + })?; + let execution_optimistic = + chain.is_optimistic_or_invalid_head().unwrap_or_default(); + + Ok(attestation_rewards) + .map(api_types::GenericResponse::from) + .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + }) + }, + ); + // POST beacon/rewards/sync_committee/{block_id} let post_beacon_rewards_sync_committee = beacon_rewards_path .clone() @@ -3432,6 +3484,7 @@ pub fn serve( .or(post_beacon_pool_proposer_slashings.boxed()) .or(post_beacon_pool_voluntary_exits.boxed()) .or(post_beacon_pool_sync_committees.boxed()) + .or(post_beacon_rewards_attestations.boxed()) .or(post_beacon_rewards_sync_committee.boxed()) .or(post_validator_duties_attester.boxed()) .or(post_validator_duties_sync.boxed()) diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 00b664446d3..b9acc696204 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1044,6 +1044,24 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST beacon/rewards/attestations` + pub async fn post_beacon_rewards_attestations( + &self, + attestations: &[ValidatorId], + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("rewards") + .push("attestations"); + + self.post(path, &attestations).await?; + + Ok(()) + } + /// `POST validator/contribution_and_proofs` pub async fn post_validator_contribution_and_proofs( &self, diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 068abd693a2..06801a3927d 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -1,6 +1,7 @@ //! This module contains endpoints that are non-standard and only available on Lighthouse servers. mod attestation_performance; +pub mod attestation_rewards; mod block_packing_efficiency; mod block_rewards; mod sync_committee_rewards; @@ -23,6 +24,7 @@ use store::{AnchorInfo, Split, StoreConfig}; pub use attestation_performance::{ AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics, }; +pub use attestation_rewards::StandardAttestationRewards; pub use block_packing_efficiency::{ BlockPackingEfficiency, BlockPackingEfficiencyQuery, ProposerInfo, UniqueAttestation, }; diff --git a/common/eth2/src/lighthouse/attestation_rewards.rs b/common/eth2/src/lighthouse/attestation_rewards.rs new file mode 100644 index 00000000000..3fd59782c82 --- /dev/null +++ b/common/eth2/src/lighthouse/attestation_rewards.rs @@ -0,0 +1,42 @@ +use serde::{Deserialize, Serialize}; + +// Details about the rewards paid for attestations +// All rewards in GWei + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct IdealAttestationRewards { + // Validator's effective balance in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub effective_balance: u64, + // Ideal attester's reward for head vote in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub head: u64, + // Ideal attester's reward for target vote in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub target: u64, + // Ideal attester's reward for source vote in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub source: u64, +} + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct TotalAttestationRewards { + // one entry for every validator based on their attestations in the epoch + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub validator_index: u64, + // attester's reward for head vote in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub head: u64, + // attester's reward for target vote in gwei + pub target: i64, + // attester's reward for source vote in gwei + pub source: i64, + // TBD attester's inclusion_delay reward in gwei (phase0 only) + // pub inclusion_delay: u64, +} + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct StandardAttestationRewards { + pub ideal_rewards: Vec, + pub total_rewards: Vec, +} diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 70129724600..53cca49120a 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -270,11 +270,20 @@ pub struct FinalityCheckpointsData { } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(try_from = "&str")] pub enum ValidatorId { PublicKey(PublicKeyBytes), Index(u64), } +impl TryFrom<&str> for ValidatorId { + type Error = String; + + fn try_from(s: &str) -> Result { + Self::from_str(s) + } +} + impl FromStr for ValidatorId { type Err = String; From 2073518f0ff03a86a6fc948bf1ff4deda24cf9c5 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 7 Feb 2023 11:23:36 +1100 Subject: [PATCH 147/263] Remove unused `u256_hex_be_opt` (#3942) --- consensus/serde_utils/src/lib.rs | 1 - consensus/serde_utils/src/u256_hex_be_opt.rs | 169 ------------------- 2 files changed, 170 deletions(-) delete mode 100644 consensus/serde_utils/src/u256_hex_be_opt.rs diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs index 75fd6009b75..92b5966c9a0 100644 --- a/consensus/serde_utils/src/lib.rs +++ b/consensus/serde_utils/src/lib.rs @@ -7,7 +7,6 @@ pub mod json_str; pub mod list_of_bytes_lists; pub mod quoted_u64_vec; pub mod u256_hex_be; -pub mod u256_hex_be_opt; pub mod u32_hex; pub mod u64_hex_be; pub mod u8_hex; diff --git a/consensus/serde_utils/src/u256_hex_be_opt.rs b/consensus/serde_utils/src/u256_hex_be_opt.rs deleted file mode 100644 index 8eadbf0243f..00000000000 --- a/consensus/serde_utils/src/u256_hex_be_opt.rs +++ /dev/null @@ -1,169 +0,0 @@ -use ethereum_types::U256; - -use serde::de::Visitor; -use serde::{de, Deserializer, Serialize, Serializer}; -use std::fmt; -use std::str::FromStr; - -pub fn serialize(num: &Option, serializer: S) -> Result -where - S: Serializer, -{ - num.serialize(serializer) -} - -pub struct U256Visitor; - -impl<'de> Visitor<'de> for U256Visitor { - type Value = String; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a well formatted hex string") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - if !value.starts_with("0x") { - return Err(de::Error::custom("must start with 0x")); - } - let stripped = &value[2..]; - if stripped.is_empty() { - Err(de::Error::custom(format!( - "quantity cannot be {:?}", - stripped - ))) - } else if stripped == "0" { - Ok(value.to_string()) - } else if stripped.starts_with('0') { - Err(de::Error::custom("cannot have leading zero")) - } else { - Ok(value.to_string()) - } - } -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, -{ - let decoded = deserializer.deserialize_string(U256Visitor)?; - - Some( - U256::from_str(&decoded) - .map_err(|e| de::Error::custom(format!("Invalid U256 string: {}", e))), - ) - .transpose() -} - -#[cfg(test)] -mod test { - use ethereum_types::U256; - use serde::{Deserialize, Serialize}; - use serde_json; - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - #[serde(transparent)] - struct Wrapper { - #[serde(with = "super")] - val: Option, - } - - #[test] - fn encoding() { - assert_eq!( - &serde_json::to_string(&Wrapper { - val: Some(0.into()) - }) - .unwrap(), - "\"0x0\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: Some(1.into()) - }) - .unwrap(), - "\"0x1\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: Some(256.into()) - }) - .unwrap(), - "\"0x100\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: Some(65.into()) - }) - .unwrap(), - "\"0x41\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: Some(1024.into()) - }) - .unwrap(), - "\"0x400\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: Some(U256::max_value() - 1) - }) - .unwrap(), - "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: Some(U256::max_value()) - }) - .unwrap(), - "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" - ); - } - - #[test] - fn decoding() { - assert_eq!( - serde_json::from_str::("\"0x0\"").unwrap(), - Wrapper { - val: Some(0.into()) - }, - ); - assert_eq!( - serde_json::from_str::("\"0x41\"").unwrap(), - Wrapper { - val: Some(65.into()) - }, - ); - assert_eq!( - serde_json::from_str::("\"0x400\"").unwrap(), - Wrapper { - val: Some(1024.into()) - }, - ); - assert_eq!( - serde_json::from_str::( - "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" - ) - .unwrap(), - Wrapper { - val: Some(U256::max_value() - 1) - }, - ); - assert_eq!( - serde_json::from_str::( - "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" - ) - .unwrap(), - Wrapper { - val: Some(U256::max_value()) - }, - ); - serde_json::from_str::("\"0x\"").unwrap_err(); - serde_json::from_str::("\"0x0400\"").unwrap_err(); - serde_json::from_str::("\"400\"").unwrap_err(); - serde_json::from_str::("\"ff\"").unwrap_err(); - } -} From e062a7cf768d2c492e2efa13febc14fe9ec66081 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 7 Feb 2023 17:13:49 +1100 Subject: [PATCH 148/263] Broadcast address changes at Capella (#3919) * Add first efforts at broadcast * Tidy * Move broadcast code to client * Progress with broadcast impl * Rename to address change * Fix compile errors * Use `while` loop * Tidy * Flip broadcast condition * Switch to forgetting individual indices * Always broadcast when the node starts * Refactor into two functions * Add testing * Add another test * Tidy, add more testing * Tidy * Add test, rename enum * Rename enum again * Tidy * Break loop early * Add V15 schema migration * Bump schema version * Progress with migration * Update beacon_node/client/src/address_change_broadcast.rs Co-authored-by: Michael Sproul * Fix typo in function name --------- Co-authored-by: Michael Sproul --- Cargo.lock | 6 + beacon_node/beacon_chain/src/beacon_chain.rs | 5 +- beacon_node/beacon_chain/src/schema_change.rs | 9 + .../src/schema_change/migration_schema_v15.rs | 78 +++++ beacon_node/beacon_chain/src/test_utils.rs | 15 + beacon_node/client/Cargo.toml | 4 + .../client/src/address_change_broadcast.rs | 322 ++++++++++++++++++ beacon_node/client/src/builder.rs | 20 ++ beacon_node/client/src/lib.rs | 1 + beacon_node/http_api/Cargo.toml | 1 + beacon_node/http_api/src/lib.rs | 13 +- beacon_node/http_api/tests/fork_tests.rs | 17 + beacon_node/network/Cargo.toml | 1 + .../beacon_processor/worker/gossip_methods.rs | 8 +- beacon_node/operation_pool/Cargo.toml | 1 + .../src/bls_to_execution_changes.rs | 52 ++- beacon_node/operation_pool/src/lib.rs | 44 ++- beacon_node/operation_pool/src/persistence.rs | 88 +++-- beacon_node/store/src/metadata.rs | 2 +- .../types/src/bls_to_execution_change.rs | 1 + .../src/signed_bls_to_execution_change.rs | 1 + 21 files changed, 649 insertions(+), 40 deletions(-) create mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs create mode 100644 beacon_node/client/src/address_change_broadcast.rs diff --git a/Cargo.lock b/Cargo.lock index 18426b9e5d4..5aa7a392313 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1062,8 +1062,10 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "lighthouse_network", + "logging", "monitoring_api", "network", + "operation_pool", "parking_lot 0.12.1", "sensitive_url", "serde", @@ -1073,6 +1075,7 @@ dependencies = [ "slasher_service", "slog", "slot_clock", + "state_processing", "store", "task_executor", "time 0.3.17", @@ -3224,6 +3227,7 @@ dependencies = [ "logging", "lru 0.7.8", "network", + "operation_pool", "parking_lot 0.12.1", "proto_array", "safe_arith", @@ -5007,6 +5011,7 @@ dependencies = [ "lru_cache", "matches", "num_cpus", + "operation_pool", "rand 0.8.5", "rlp", "slog", @@ -5342,6 +5347,7 @@ dependencies = [ "lighthouse_metrics", "maplit", "parking_lot 0.12.1", + "rand 0.8.5", "rayon", "serde", "serde_derive", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index bf48c32a6d0..2aae03b7242 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -70,7 +70,7 @@ use fork_choice::{ use futures::channel::mpsc::Sender; use itertools::process_results; use itertools::Itertools; -use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool}; +use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool, ReceivedPreCapella}; use parking_lot::{Mutex, RwLock}; use proto_array::{CountUnrealizedFull, DoNotReOrg, ProposerHeadError}; use safe_arith::SafeArith; @@ -2289,10 +2289,11 @@ impl BeaconChain { pub fn import_bls_to_execution_change( &self, bls_to_execution_change: SigVerifiedOp, + received_pre_capella: ReceivedPreCapella, ) -> bool { if self.eth1_chain.is_some() { self.op_pool - .insert_bls_to_execution_change(bls_to_execution_change) + .insert_bls_to_execution_change(bls_to_execution_change, received_pre_capella) } else { false } diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 8684bafe2d0..35202a3c5d3 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -2,6 +2,7 @@ mod migration_schema_v12; mod migration_schema_v13; mod migration_schema_v14; +mod migration_schema_v15; use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY}; use crate::eth1_chain::SszEth1; @@ -123,6 +124,14 @@ pub fn migrate_schema( let ops = migration_schema_v14::downgrade_from_v14::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) } + (SchemaVersion(14), SchemaVersion(15)) => { + let ops = migration_schema_v15::upgrade_to_v15::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(15), SchemaVersion(14)) => { + let ops = migration_schema_v15::downgrade_from_v15::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs new file mode 100644 index 00000000000..f4adc2cf4db --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs @@ -0,0 +1,78 @@ +use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; +use operation_pool::{ + PersistedOperationPool, PersistedOperationPoolV14, PersistedOperationPoolV15, +}; +use slog::{debug, info, Logger}; +use std::sync::Arc; +use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; + +pub fn upgrade_to_v15( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Load a V14 op pool and transform it to V15. + let PersistedOperationPoolV14:: { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + } = if let Some(op_pool_v14) = db.get_item(&OP_POOL_DB_KEY)? { + op_pool_v14 + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + let v15 = PersistedOperationPool::V15(PersistedOperationPoolV15 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + // Initialize with empty set + capella_bls_change_broadcast_indices: <_>::default(), + }); + Ok(vec![v15.as_kv_store_op(OP_POOL_DB_KEY)]) +} + +pub fn downgrade_from_v15( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Load a V15 op pool and transform it to V14. + let PersistedOperationPoolV15 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + capella_bls_change_broadcast_indices, + } = if let Some(PersistedOperationPool::::V15(op_pool)) = + db.get_item(&OP_POOL_DB_KEY)? + { + op_pool + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + info!( + log, + "Forgetting address changes for Capella broadcast"; + "count" => capella_bls_change_broadcast_indices.len(), + ); + + let v14 = PersistedOperationPoolV14 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + }; + Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)]) +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index daba7115e05..f1b9bc83c5f 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -310,6 +310,21 @@ where self } + /// Initializes the BLS withdrawal keypairs for `num_keypairs` validators to + /// the "determistic" values, regardless of wether or not the validator has + /// a BLS or execution address in the genesis deposits. + /// + /// This aligns with the withdrawal commitments used in the "interop" + /// genesis states. + pub fn deterministic_withdrawal_keypairs(self, num_keypairs: usize) -> Self { + self.withdrawal_keypairs( + types::test_utils::generate_deterministic_keypairs(num_keypairs) + .into_iter() + .map(Option::Some) + .collect(), + ) + } + pub fn withdrawal_keypairs(mut self, withdrawal_keypairs: Vec>) -> Self { self.withdrawal_keypairs = withdrawal_keypairs; self diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index d01f2505cce..9a49843a9f3 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -6,6 +6,10 @@ edition = "2021" [dev-dependencies] serde_yaml = "0.8.13" +logging = { path = "../../common/logging" } +state_processing = { path = "../../consensus/state_processing" } +operation_pool = { path = "../operation_pool" } +tokio = "1.14.0" [dependencies] beacon_chain = { path = "../beacon_chain" } diff --git a/beacon_node/client/src/address_change_broadcast.rs b/beacon_node/client/src/address_change_broadcast.rs new file mode 100644 index 00000000000..272ee908fba --- /dev/null +++ b/beacon_node/client/src/address_change_broadcast.rs @@ -0,0 +1,322 @@ +use crate::*; +use lighthouse_network::PubsubMessage; +use network::NetworkMessage; +use slog::{debug, info, warn, Logger}; +use slot_clock::SlotClock; +use std::cmp; +use std::collections::HashSet; +use std::mem; +use std::time::Duration; +use tokio::sync::mpsc::UnboundedSender; +use tokio::time::sleep; +use types::EthSpec; + +/// The size of each chunk of addresses changes to be broadcast at the Capella +/// fork. +const BROADCAST_CHUNK_SIZE: usize = 128; +/// The delay between broadcasting each chunk. +const BROADCAST_CHUNK_DELAY: Duration = Duration::from_millis(500); + +/// If the Capella fork has already been reached, `broadcast_address_changes` is +/// called immediately. +/// +/// If the Capella fork has not been reached, waits until the start of the fork +/// epoch and then calls `broadcast_address_changes`. +pub async fn broadcast_address_changes_at_capella( + chain: &BeaconChain, + network_send: UnboundedSender>, + log: &Logger, +) { + let spec = &chain.spec; + let slot_clock = &chain.slot_clock; + + let capella_fork_slot = if let Some(epoch) = spec.capella_fork_epoch { + epoch.start_slot(T::EthSpec::slots_per_epoch()) + } else { + // Exit now if Capella is not defined. + return; + }; + + // Wait until the Capella fork epoch. + while chain.slot().map_or(true, |slot| slot < capella_fork_slot) { + match slot_clock.duration_to_slot(capella_fork_slot) { + Some(duration) => { + // Sleep until the Capella fork. + sleep(duration).await; + break; + } + None => { + // We were unable to read the slot clock wait another slot + // and then try again. + sleep(slot_clock.slot_duration()).await; + } + } + } + + // The following function will be called in two scenarios: + // + // 1. The node has been running for some time and the Capella fork has just + // been reached. + // 2. The node has just started and it is *after* the Capella fork. + broadcast_address_changes(chain, network_send, log).await +} + +/// Broadcasts any address changes that are flagged for broadcasting at the +/// Capella fork epoch. +/// +/// Address changes are published in chunks, with a delay between each chunk. +/// This helps reduce the load on the P2P network and also helps prevent us from +/// clogging our `network_send` channel and being late to publish +/// blocks, attestations, etc. +pub async fn broadcast_address_changes( + chain: &BeaconChain, + network_send: UnboundedSender>, + log: &Logger, +) { + let head = chain.head_snapshot(); + let mut changes = chain + .op_pool + .get_bls_to_execution_changes_received_pre_capella(&head.beacon_state, &chain.spec); + + while !changes.is_empty() { + // This `split_off` approach is to allow us to have owned chunks of the + // `changes` vec. The `std::slice::Chunks` method uses references and + // the `itertools` iterator that achives this isn't `Send` so it doesn't + // work well with the `sleep` at the end of the loop. + let tail = changes.split_off(cmp::min(BROADCAST_CHUNK_SIZE, changes.len())); + let chunk = mem::replace(&mut changes, tail); + + let mut published_indices = HashSet::with_capacity(BROADCAST_CHUNK_SIZE); + let mut num_ok = 0; + let mut num_err = 0; + + // Publish each individual address change. + for address_change in chunk { + let validator_index = address_change.message.validator_index; + + let pubsub_message = PubsubMessage::BlsToExecutionChange(Box::new(address_change)); + let message = NetworkMessage::Publish { + messages: vec![pubsub_message], + }; + // It seems highly unlikely that this unbounded send will fail, but + // we handle the result nontheless. + if let Err(e) = network_send.send(message) { + debug!( + log, + "Failed to publish change message"; + "error" => ?e, + "validator_index" => validator_index + ); + num_err += 1; + } else { + debug!( + log, + "Published address change message"; + "validator_index" => validator_index + ); + num_ok += 1; + published_indices.insert(validator_index); + } + } + + // Remove any published indices from the list of indices that need to be + // published. + chain + .op_pool + .register_indices_broadcasted_at_capella(&published_indices); + + info!( + log, + "Published address change messages"; + "num_published" => num_ok, + ); + + if num_err > 0 { + warn!( + log, + "Failed to publish address changes"; + "info" => "failed messages will be retried", + "num_unable_to_publish" => num_err, + ); + } + + sleep(BROADCAST_CHUNK_DELAY).await; + } + + debug!( + log, + "Address change routine complete"; + ); +} + +#[cfg(not(debug_assertions))] // Tests run too slow in debug. +#[cfg(test)] +mod tests { + use super::*; + use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; + use operation_pool::ReceivedPreCapella; + use state_processing::{SigVerifiedOp, VerifyOperation}; + use std::collections::HashSet; + use tokio::sync::mpsc; + use types::*; + + type E = MainnetEthSpec; + + pub const VALIDATOR_COUNT: usize = BROADCAST_CHUNK_SIZE * 3; + pub const EXECUTION_ADDRESS: Address = Address::repeat_byte(42); + + struct Tester { + harness: BeaconChainHarness>, + /// Changes which should be broadcast at the Capella fork. + received_pre_capella_changes: Vec>, + /// Changes which should *not* be broadcast at the Capella fork. + not_received_pre_capella_changes: Vec>, + } + + impl Tester { + fn new() -> Self { + let altair_fork_epoch = Epoch::new(0); + let bellatrix_fork_epoch = Epoch::new(0); + let capella_fork_epoch = Epoch::new(2); + + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(altair_fork_epoch); + spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); + spec.capella_fork_epoch = Some(capella_fork_epoch); + + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .logger(logging::test_logger()) + .deterministic_keypairs(VALIDATOR_COUNT) + .deterministic_withdrawal_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + Self { + harness, + received_pre_capella_changes: <_>::default(), + not_received_pre_capella_changes: <_>::default(), + } + } + + fn produce_verified_address_change( + &self, + validator_index: u64, + ) -> SigVerifiedOp { + let change = self + .harness + .make_bls_to_execution_change(validator_index, EXECUTION_ADDRESS); + let head = self.harness.chain.head_snapshot(); + + change + .validate(&head.beacon_state, &self.harness.spec) + .unwrap() + } + + fn produce_received_pre_capella_changes(mut self, indices: Vec) -> Self { + for validator_index in indices { + self.received_pre_capella_changes + .push(self.produce_verified_address_change(validator_index)); + } + self + } + + fn produce_not_received_pre_capella_changes(mut self, indices: Vec) -> Self { + for validator_index in indices { + self.not_received_pre_capella_changes + .push(self.produce_verified_address_change(validator_index)); + } + self + } + + async fn run(self) { + let harness = self.harness; + let chain = harness.chain.clone(); + + let mut broadcast_indices = HashSet::new(); + for change in self.received_pre_capella_changes { + broadcast_indices.insert(change.as_inner().message.validator_index); + chain + .op_pool + .insert_bls_to_execution_change(change, ReceivedPreCapella::Yes); + } + + let mut non_broadcast_indices = HashSet::new(); + for change in self.not_received_pre_capella_changes { + non_broadcast_indices.insert(change.as_inner().message.validator_index); + chain + .op_pool + .insert_bls_to_execution_change(change, ReceivedPreCapella::No); + } + + harness.set_current_slot( + chain + .spec + .capella_fork_epoch + .unwrap() + .start_slot(E::slots_per_epoch()), + ); + + let (sender, mut receiver) = mpsc::unbounded_channel(); + + broadcast_address_changes_at_capella(&chain, sender, &logging::test_logger()).await; + + let mut broadcasted_changes = vec![]; + while let Some(NetworkMessage::Publish { mut messages }) = receiver.recv().await { + match messages.pop().unwrap() { + PubsubMessage::BlsToExecutionChange(change) => broadcasted_changes.push(change), + _ => panic!("unexpected message"), + } + } + + assert_eq!( + broadcasted_changes.len(), + broadcast_indices.len(), + "all expected changes should have been broadcast" + ); + + for broadcasted in &broadcasted_changes { + assert!( + !non_broadcast_indices.contains(&broadcasted.message.validator_index), + "messages not flagged for broadcast should not have been broadcast" + ); + } + + let head = chain.head_snapshot(); + assert!( + chain + .op_pool + .get_bls_to_execution_changes_received_pre_capella( + &head.beacon_state, + &chain.spec, + ) + .is_empty(), + "there shouldn't be any capella broadcast changes left in the op pool" + ); + } + } + + // Useful for generating even-numbered indices. Required since only even + // numbered genesis validators have BLS credentials. + fn even_indices(start: u64, count: usize) -> Vec { + (start..).filter(|i| i % 2 == 0).take(count).collect() + } + + #[tokio::test] + async fn one_chunk() { + Tester::new() + .produce_received_pre_capella_changes(even_indices(0, 4)) + .produce_not_received_pre_capella_changes(even_indices(10, 4)) + .run() + .await; + } + + #[tokio::test] + async fn multiple_chunks() { + Tester::new() + .produce_received_pre_capella_changes(even_indices(0, BROADCAST_CHUNK_SIZE * 3 / 2)) + .run() + .await; + } +} diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 3b016ebda9c..5fa2fddc3e0 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -1,3 +1,4 @@ +use crate::address_change_broadcast::broadcast_address_changes_at_capella; use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::notifier::spawn_notifier; use crate::Client; @@ -802,6 +803,25 @@ where // Spawns a routine that polls the `exchange_transition_configuration` endpoint. execution_layer.spawn_transition_configuration_poll(beacon_chain.spec.clone()); } + + // Spawn a service to publish BLS to execution changes at the Capella fork. + if let Some(network_senders) = self.network_senders { + let inner_chain = beacon_chain.clone(); + let broadcast_context = + runtime_context.service_context("addr_bcast".to_string()); + let log = broadcast_context.log().clone(); + broadcast_context.executor.spawn( + async move { + broadcast_address_changes_at_capella( + &inner_chain, + network_senders.network_send(), + &log, + ) + .await + }, + "addr_broadcast", + ); + } } start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone()); diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 24df8740863..b0184dc0ffc 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,5 +1,6 @@ extern crate slog; +mod address_change_broadcast; pub mod config; mod metrics; mod notifier; diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 0dc918f425e..5110a73ed79 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -36,6 +36,7 @@ tree_hash = "0.4.1" sysinfo = "0.26.5" system_health = { path = "../../common/system_health" } directory = { path = "../../common/directory" } +operation_pool = { path = "../operation_pool" } [dev-dependencies] store = { path = "../store" } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index e0f8bcf2a33..0edbaf8f7c6 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -35,6 +35,7 @@ use eth2::types::{ use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; +use operation_pool::ReceivedPreCapella; use parking_lot::RwLock; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; @@ -1696,8 +1697,12 @@ pub fn serve( .to_execution_address; // New to P2P *and* op pool, gossip immediately if post-Capella. - let publish = chain.current_slot_is_post_capella().unwrap_or(false); - if publish { + let received_pre_capella = if chain.current_slot_is_post_capella().unwrap_or(false) { + ReceivedPreCapella::No + } else { + ReceivedPreCapella::Yes + }; + if matches!(received_pre_capella, ReceivedPreCapella::No) { publish_pubsub_message( &network_tx, PubsubMessage::BlsToExecutionChange(Box::new( @@ -1708,14 +1713,14 @@ pub fn serve( // Import to op pool (may return `false` if there's a race). let imported = - chain.import_bls_to_execution_change(verified_address_change); + chain.import_bls_to_execution_change(verified_address_change, received_pre_capella); info!( log, "Processed BLS to execution change"; "validator_index" => validator_index, "address" => ?address, - "published" => publish, + "published" => matches!(received_pre_capella, ReceivedPreCapella::No), "imported" => imported, ); } diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index e61470fe959..6144123565e 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -6,6 +6,7 @@ use beacon_chain::{ }; use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials}; +use std::collections::HashSet; use types::{ test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs}, Address, ChainSpec, Epoch, EthSpec, Hash256, MinimalEthSpec, Slot, @@ -438,6 +439,8 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { .await .unwrap(); + let expected_received_pre_capella_messages = valid_address_changes[..num_pre_capella].to_vec(); + // Conflicting changes for the same validators should all fail. let error = client .post_beacon_pool_bls_to_execution_changes(&conflicting_address_changes[..num_pre_capella]) @@ -464,6 +467,20 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { harness.extend_to_slot(capella_slot - 1).await; assert_eq!(harness.head_slot(), capella_slot - 1); + assert_eq!( + harness + .chain + .op_pool + .get_bls_to_execution_changes_received_pre_capella( + &harness.chain.head_snapshot().beacon_state, + &spec, + ) + .into_iter() + .collect::>(), + HashSet::from_iter(expected_received_pre_capella_messages.into_iter()), + "all pre-capella messages should be queued for capella broadcast" + ); + // Add Capella blocks which should be full of BLS to execution changes. for i in 0..validator_count / max_bls_to_execution_changes { let head_block_root = harness.extend_slots(1).await; diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index b1d928eecb9..5ce33116933 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -45,6 +45,7 @@ tokio-util = { version = "0.6.3", features = ["time"] } derivative = "2.2.0" delay_map = "0.1.1" ethereum-types = { version = "0.14.1", optional = true } +operation_pool = { path = "../operation_pool" } [features] deterministic_long_lived_attnets = [ "ethereum-types" ] diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 44d61118923..f2b1b3a26ba 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -12,6 +12,7 @@ use beacon_chain::{ GossipVerifiedBlock, NotifyExecutionLayer, }; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; +use operation_pool::ReceivedPreCapella; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use ssz::Encode; @@ -1251,7 +1252,12 @@ impl Worker { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); - self.chain.import_bls_to_execution_change(change); + // Address change messages from gossip are only processed *after* the + // Capella fork epoch. + let received_pre_capella = ReceivedPreCapella::No; + + self.chain + .import_bls_to_execution_change(change, received_pre_capella); debug!( self.log, diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index 8483233589f..cc4eacde898 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -19,6 +19,7 @@ serde = "1.0.116" serde_derive = "1.0.116" store = { path = "../store" } bitvec = "1" +rand = "0.8.5" [dev-dependencies] beacon_chain = { path = "../beacon_chain" } diff --git a/beacon_node/operation_pool/src/bls_to_execution_changes.rs b/beacon_node/operation_pool/src/bls_to_execution_changes.rs index 84513d466e9..c73666e1458 100644 --- a/beacon_node/operation_pool/src/bls_to_execution_changes.rs +++ b/beacon_node/operation_pool/src/bls_to_execution_changes.rs @@ -1,11 +1,20 @@ use state_processing::SigVerifiedOp; -use std::collections::{hash_map::Entry, HashMap}; +use std::collections::{hash_map::Entry, HashMap, HashSet}; use std::sync::Arc; use types::{ AbstractExecPayload, BeaconState, ChainSpec, EthSpec, SignedBeaconBlock, SignedBlsToExecutionChange, }; +/// Indicates if a `BlsToExecutionChange` was received before or after the +/// Capella fork. This is used to know which messages we should broadcast at the +/// Capella fork epoch. +#[derive(Copy, Clone)] +pub enum ReceivedPreCapella { + Yes, + No, +} + /// Pool of BLS to execution changes that maintains a LIFO queue and an index by validator. /// /// Using the LIFO queue for block production disincentivises spam on P2P at the Capella fork, @@ -16,6 +25,9 @@ pub struct BlsToExecutionChanges { by_validator_index: HashMap>>, /// Last-in-first-out (LIFO) queue of verified messages. queue: Vec>>, + /// Contains a set of validator indices which need to have their changes + /// broadcast at the capella epoch. + received_pre_capella_indices: HashSet, } impl BlsToExecutionChanges { @@ -31,16 +43,18 @@ impl BlsToExecutionChanges { pub fn insert( &mut self, verified_change: SigVerifiedOp, + received_pre_capella: ReceivedPreCapella, ) -> bool { + let validator_index = verified_change.as_inner().message.validator_index; // Wrap in an `Arc` once on insert. let verified_change = Arc::new(verified_change); - match self - .by_validator_index - .entry(verified_change.as_inner().message.validator_index) - { + match self.by_validator_index.entry(validator_index) { Entry::Vacant(entry) => { self.queue.push(verified_change.clone()); entry.insert(verified_change); + if matches!(received_pre_capella, ReceivedPreCapella::Yes) { + self.received_pre_capella_indices.insert(validator_index); + } true } Entry::Occupied(_) => false, @@ -61,6 +75,24 @@ impl BlsToExecutionChanges { self.queue.iter().rev() } + /// Returns only those which are flagged for broadcasting at the Capella + /// fork. Uses FIFO ordering, although we expect this list to be shuffled by + /// the caller. + pub fn iter_received_pre_capella( + &self, + ) -> impl Iterator>> { + self.queue.iter().filter(|address_change| { + self.received_pre_capella_indices + .contains(&address_change.as_inner().message.validator_index) + }) + } + + /// Returns the set of indicies which should have their address changes + /// broadcast at the Capella fork. + pub fn iter_pre_capella_indices(&self) -> impl Iterator { + self.received_pre_capella_indices.iter() + } + /// Prune BLS to execution changes that have been applied to the state more than 1 block ago. /// /// The block check is necessary to avoid pruning too eagerly and losing the ability to include @@ -102,4 +134,14 @@ impl BlsToExecutionChanges { self.by_validator_index.remove(&validator_index); } } + + /// Removes `broadcasted` validators from the set of validators that should + /// have their BLS changes broadcast at the Capella fork boundary. + pub fn register_indices_broadcasted_at_capella(&mut self, broadcasted: &HashSet) { + self.received_pre_capella_indices = self + .received_pre_capella_indices + .difference(broadcasted) + .copied() + .collect(); + } } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 4643addad52..d401deb8968 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -9,12 +9,13 @@ mod persistence; mod reward_cache; mod sync_aggregate_id; +pub use crate::bls_to_execution_changes::ReceivedPreCapella; pub use attestation::AttMaxCover; pub use attestation_storage::{AttestationRef, SplitAttestation}; pub use max_cover::MaxCover; pub use persistence::{ PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14, - PersistedOperationPoolV5, + PersistedOperationPoolV15, PersistedOperationPoolV5, }; pub use reward_cache::RewardCache; @@ -24,6 +25,8 @@ use crate::sync_aggregate_id::SyncAggregateId; use attester_slashing::AttesterSlashingMaxCover; use max_cover::maximum_cover; use parking_lot::{RwLock, RwLockWriteGuard}; +use rand::seq::SliceRandom; +use rand::thread_rng; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::per_block_processing::{ get_slashable_indices_modular, verify_exit, VerifySignatures, @@ -533,10 +536,11 @@ impl OperationPool { pub fn insert_bls_to_execution_change( &self, verified_change: SigVerifiedOp, + received_pre_capella: ReceivedPreCapella, ) -> bool { self.bls_to_execution_changes .write() - .insert(verified_change) + .insert(verified_change, received_pre_capella) } /// Get a list of execution changes for inclusion in a block. @@ -562,6 +566,42 @@ impl OperationPool { ) } + /// Get a list of execution changes to be broadcast at the Capella fork. + /// + /// The list that is returned will be shuffled to help provide a fair + /// broadcast of messages. + pub fn get_bls_to_execution_changes_received_pre_capella( + &self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Vec { + let mut changes = filter_limit_operations( + self.bls_to_execution_changes + .read() + .iter_received_pre_capella(), + |address_change| { + address_change.signature_is_still_valid(&state.fork()) + && state + .get_validator(address_change.as_inner().message.validator_index as usize) + .map_or(false, |validator| { + !validator.has_eth1_withdrawal_credential(spec) + }) + }, + |address_change| address_change.as_inner().clone(), + usize::max_value(), + ); + changes.shuffle(&mut thread_rng()); + changes + } + + /// Removes `broadcasted` validators from the set of validators that should + /// have their BLS changes broadcast at the Capella fork boundary. + pub fn register_indices_broadcasted_at_capella(&self, broadcasted: &HashSet) { + self.bls_to_execution_changes + .write() + .register_indices_broadcasted_at_capella(broadcasted); + } + /// Prune BLS to execution changes that have been applied to the state more than 1 block ago. pub fn prune_bls_to_execution_changes>( &self, diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 4948040ae10..65354e01ac9 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -1,6 +1,6 @@ use crate::attestation_id::AttestationId; use crate::attestation_storage::AttestationMap; -use crate::bls_to_execution_changes::BlsToExecutionChanges; +use crate::bls_to_execution_changes::{BlsToExecutionChanges, ReceivedPreCapella}; use crate::sync_aggregate_id::SyncAggregateId; use crate::OpPoolError; use crate::OperationPool; @@ -9,6 +9,8 @@ use parking_lot::RwLock; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::SigVerifiedOp; +use std::collections::HashSet; +use std::mem; use store::{DBColumn, Error as StoreError, StoreItem}; use types::*; @@ -19,7 +21,7 @@ type PersistedSyncContributions = Vec<(SyncAggregateId, Vec { #[superstruct(only(V5))] pub attestations_v5: Vec<(AttestationId, Vec>)>, /// Attestations and their attesting indices. - #[superstruct(only(V12, V14))] + #[superstruct(only(V12, V14, V15))] pub attestations: Vec<(Attestation, Vec)>, /// Mapping from sync contribution ID to sync contributions and aggregate. pub sync_contributions: PersistedSyncContributions, @@ -41,23 +43,27 @@ pub struct PersistedOperationPool { #[superstruct(only(V5))] pub attester_slashings_v5: Vec<(AttesterSlashing, ForkVersion)>, /// Attester slashings. - #[superstruct(only(V12, V14))] + #[superstruct(only(V12, V14, V15))] pub attester_slashings: Vec, T>>, /// [DEPRECATED] Proposer slashings. #[superstruct(only(V5))] pub proposer_slashings_v5: Vec, /// Proposer slashings with fork information. - #[superstruct(only(V12, V14))] + #[superstruct(only(V12, V14, V15))] pub proposer_slashings: Vec>, /// [DEPRECATED] Voluntary exits. #[superstruct(only(V5))] pub voluntary_exits_v5: Vec, /// Voluntary exits with fork information. - #[superstruct(only(V12, V14))] + #[superstruct(only(V12, V14, V15))] pub voluntary_exits: Vec>, /// BLS to Execution Changes - #[superstruct(only(V14))] + #[superstruct(only(V14, V15))] pub bls_to_execution_changes: Vec>, + /// Validator indices with BLS to Execution Changes to be broadcast at the + /// Capella fork. + #[superstruct(only(V15))] + pub capella_bls_change_broadcast_indices: Vec, } impl PersistedOperationPool { @@ -110,18 +116,26 @@ impl PersistedOperationPool { .map(|bls_to_execution_change| (**bls_to_execution_change).clone()) .collect(); - PersistedOperationPool::V14(PersistedOperationPoolV14 { + let capella_bls_change_broadcast_indices = operation_pool + .bls_to_execution_changes + .read() + .iter_pre_capella_indices() + .copied() + .collect(); + + PersistedOperationPool::V15(PersistedOperationPoolV15 { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, bls_to_execution_changes, + capella_bls_change_broadcast_indices, }) } /// Reconstruct an `OperationPool`. - pub fn into_operation_pool(self) -> Result, OpPoolError> { + pub fn into_operation_pool(mut self) -> Result, OpPoolError> { let attester_slashings = RwLock::new(self.attester_slashings()?.iter().cloned().collect()); let proposer_slashings = RwLock::new( self.proposer_slashings()? @@ -142,33 +156,43 @@ impl PersistedOperationPool { PersistedOperationPool::V5(_) | PersistedOperationPool::V12(_) => { return Err(OpPoolError::IncorrectOpPoolVariant) } - PersistedOperationPool::V14(ref pool) => { + PersistedOperationPool::V14(_) | PersistedOperationPool::V15(_) => { let mut map = AttestationMap::default(); - for (att, attesting_indices) in pool.attestations.clone() { + for (att, attesting_indices) in self.attestations()?.clone() { map.insert(att, attesting_indices); } RwLock::new(map) } }; - let bls_to_execution_changes = match self { - PersistedOperationPool::V5(_) | PersistedOperationPool::V12(_) => { - return Err(OpPoolError::IncorrectOpPoolVariant) - } - PersistedOperationPool::V14(pool) => { - let mut bls_to_execution_changes = BlsToExecutionChanges::default(); - for bls_to_execution_change in pool.bls_to_execution_changes { - bls_to_execution_changes.insert(bls_to_execution_change); - } - RwLock::new(bls_to_execution_changes) + let mut bls_to_execution_changes = BlsToExecutionChanges::default(); + if let Ok(persisted_changes) = self.bls_to_execution_changes_mut() { + let persisted_changes = mem::take(persisted_changes); + + let broadcast_indices = + if let Ok(indices) = self.capella_bls_change_broadcast_indices_mut() { + mem::take(indices).into_iter().collect() + } else { + HashSet::new() + }; + + for bls_to_execution_change in persisted_changes { + let received_pre_capella = if broadcast_indices + .contains(&bls_to_execution_change.as_inner().message.validator_index) + { + ReceivedPreCapella::Yes + } else { + ReceivedPreCapella::No + }; + bls_to_execution_changes.insert(bls_to_execution_change, received_pre_capella); } - }; + } let op_pool = OperationPool { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, - bls_to_execution_changes, + bls_to_execution_changes: RwLock::new(bls_to_execution_changes), reward_cache: Default::default(), _phantom: Default::default(), }; @@ -204,6 +228,20 @@ impl StoreItem for PersistedOperationPoolV12 { } } +impl StoreItem for PersistedOperationPoolV14 { + fn db_column() -> DBColumn { + DBColumn::OpPool + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + PersistedOperationPoolV14::from_ssz_bytes(bytes).map_err(Into::into) + } +} + /// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`. impl StoreItem for PersistedOperationPool { fn db_column() -> DBColumn { @@ -216,8 +254,8 @@ impl StoreItem for PersistedOperationPool { fn from_store_bytes(bytes: &[u8]) -> Result { // Default deserialization to the latest variant. - PersistedOperationPoolV14::from_ssz_bytes(bytes) - .map(Self::V14) + PersistedOperationPoolV15::from_ssz_bytes(bytes) + .map(Self::V15) .map_err(Into::into) } } diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index fb5769635d2..729b36ff2e6 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(14); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(15); // All the keys that get stored under the `BeaconMeta` column. // diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs index cb73e43f9ac..b279515bd1f 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/bls_to_execution_change.rs @@ -10,6 +10,7 @@ use tree_hash_derive::TreeHash; arbitrary::Arbitrary, Debug, PartialEq, + Eq, Hash, Clone, Serialize, diff --git a/consensus/types/src/signed_bls_to_execution_change.rs b/consensus/types/src/signed_bls_to_execution_change.rs index 92b79fad3f9..2b17095ae7d 100644 --- a/consensus/types/src/signed_bls_to_execution_change.rs +++ b/consensus/types/src/signed_bls_to_execution_change.rs @@ -10,6 +10,7 @@ use tree_hash_derive::TreeHash; arbitrary::Arbitrary, Debug, PartialEq, + Eq, Hash, Clone, Serialize, From 9547ac069c5fb50ffee2a5748b5bb4860684276e Mon Sep 17 00:00:00 2001 From: naviechan Date: Tue, 7 Feb 2023 08:33:23 +0000 Subject: [PATCH 149/263] Implement block_rewards API (per-validator reward) (#3907) ## Issue Addressed [#3661](https://github.com/sigp/lighthouse/issues/3661) ## Proposed Changes `/eth/v1/beacon/rewards/blocks/{block_id}` ``` { "execution_optimistic": false, "finalized": false, "data": { "proposer_index": "123", "total": "123", "attestations": "123", "sync_aggregate": "123", "proposer_slashings": "123", "attester_slashings": "123" } } ``` The issue contains the implementation of three per-validator reward APIs: * `sync_committee_rewards` * [`attestation_rewards`](https://github.com/sigp/lighthouse/pull/3822) * `block_rewards` This PR only implements the `block_rewards`. The endpoints can be viewed in the Ethereum Beacon nodes API browser: [https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Rewards](https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Rewards) ## Additional Info The implementation of [consensus client reward APIs](https://github.com/eth-protocol-fellows/cohort-three/blob/master/projects/project-ideas.md#consensus-client-reward-apis) is part of the [EPF](https://github.com/eth-protocol-fellows/cohort-three). Co-authored-by: kevinbogner Co-authored-by: navie --- .../beacon_chain/src/beacon_block_reward.rs | 237 ++++++++++++++++++ beacon_node/beacon_chain/src/errors.rs | 1 + beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/http_api/src/lib.rs | 23 ++ .../http_api/src/standard_block_rewards.rs | 27 ++ .../http_api/src/sync_committee_rewards.rs | 2 +- beacon_node/operation_pool/src/lib.rs | 2 +- common/eth2/src/lib.rs | 16 ++ common/eth2/src/lighthouse.rs | 2 + .../src/lighthouse/standard_block_rewards.rs | 26 ++ 10 files changed, 335 insertions(+), 2 deletions(-) create mode 100644 beacon_node/beacon_chain/src/beacon_block_reward.rs create mode 100644 beacon_node/http_api/src/standard_block_rewards.rs create mode 100644 common/eth2/src/lighthouse/standard_block_rewards.rs diff --git a/beacon_node/beacon_chain/src/beacon_block_reward.rs b/beacon_node/beacon_chain/src/beacon_block_reward.rs new file mode 100644 index 00000000000..3f186c37c19 --- /dev/null +++ b/beacon_node/beacon_chain/src/beacon_block_reward.rs @@ -0,0 +1,237 @@ +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::lighthouse::StandardBlockReward; +use operation_pool::RewardCache; +use safe_arith::SafeArith; +use slog::error; +use state_processing::{ + common::{ + altair, get_attestation_participation_flag_indices, get_attesting_indices_from_state, + }, + per_block_processing::{ + altair::sync_committee::compute_sync_aggregate_rewards, get_slashable_indices, + }, +}; +use store::{ + consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, + RelativeEpoch, +}; +use types::{BeaconBlockRef, BeaconState, BeaconStateError, ExecPayload, Hash256}; + +type BeaconBlockSubRewardValue = u64; + +impl BeaconChain { + pub fn compute_beacon_block_reward>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + block_root: Hash256, + state: &mut BeaconState, + ) -> Result { + if block.slot() != state.slot() { + return Err(BeaconChainError::BlockRewardSlotError); + } + + state.build_committee_cache(RelativeEpoch::Previous, &self.spec)?; + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + + let proposer_index = block.proposer_index(); + + let sync_aggregate_reward = + self.compute_beacon_block_sync_aggregate_reward(block, state)?; + + let proposer_slashing_reward = self + .compute_beacon_block_proposer_slashing_reward(block, state) + .map_err(|e| { + error!( + self.log, + "Error calculating proposer slashing reward"; + "error" => ?e + ); + BeaconChainError::BlockRewardError + })?; + + let attester_slashing_reward = self + .compute_beacon_block_attester_slashing_reward(block, state) + .map_err(|e| { + error!( + self.log, + "Error calculating attester slashing reward"; + "error" => ?e + ); + BeaconChainError::BlockRewardError + })?; + + let block_attestation_reward = if let BeaconState::Base(_) = state { + self.compute_beacon_block_attestation_reward_base(block, block_root, state) + .map_err(|e| { + error!( + self.log, + "Error calculating base block attestation reward"; + "error" => ?e + ); + BeaconChainError::BlockRewardAttestationError + })? + } else { + self.compute_beacon_block_attestation_reward_altair(block, state) + .map_err(|e| { + error!( + self.log, + "Error calculating altair block attestation reward"; + "error" => ?e + ); + BeaconChainError::BlockRewardAttestationError + })? + }; + + let total_reward = sync_aggregate_reward + .safe_add(proposer_slashing_reward)? + .safe_add(attester_slashing_reward)? + .safe_add(block_attestation_reward)?; + + Ok(StandardBlockReward { + proposer_index, + total: total_reward, + attestations: block_attestation_reward, + sync_aggregate: sync_aggregate_reward, + proposer_slashings: proposer_slashing_reward, + attester_slashings: attester_slashing_reward, + }) + } + + fn compute_beacon_block_sync_aggregate_reward>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + state: &BeaconState, + ) -> Result { + if let Ok(sync_aggregate) = block.body().sync_aggregate() { + let (_, proposer_reward_per_bit) = compute_sync_aggregate_rewards(state, &self.spec) + .map_err(|_| BeaconChainError::BlockRewardSyncError)?; + Ok(sync_aggregate.sync_committee_bits.num_set_bits() as u64 * proposer_reward_per_bit) + } else { + Ok(0) + } + } + + fn compute_beacon_block_proposer_slashing_reward>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + state: &BeaconState, + ) -> Result { + let mut proposer_slashing_reward = 0; + + let proposer_slashings = block.body().proposer_slashings(); + + for proposer_slashing in proposer_slashings { + proposer_slashing_reward.safe_add_assign( + state + .get_validator(proposer_slashing.proposer_index() as usize)? + .effective_balance + .safe_div(self.spec.whistleblower_reward_quotient)?, + )?; + } + + Ok(proposer_slashing_reward) + } + + fn compute_beacon_block_attester_slashing_reward>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + state: &BeaconState, + ) -> Result { + let mut attester_slashing_reward = 0; + + let attester_slashings = block.body().attester_slashings(); + + for attester_slashing in attester_slashings { + for attester_index in get_slashable_indices(state, attester_slashing)? { + attester_slashing_reward.safe_add_assign( + state + .get_validator(attester_index as usize)? + .effective_balance + .safe_div(self.spec.whistleblower_reward_quotient)?, + )?; + } + } + + Ok(attester_slashing_reward) + } + + fn compute_beacon_block_attestation_reward_base>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + block_root: Hash256, + state: &BeaconState, + ) -> Result { + // Call compute_block_reward in the base case + // Since base does not have sync aggregate, we only grab attesation portion of the returned + // value + let mut reward_cache = RewardCache::default(); + let block_attestation_reward = self + .compute_block_reward(block, block_root, state, &mut reward_cache, true)? + .attestation_rewards + .total; + + Ok(block_attestation_reward) + } + + fn compute_beacon_block_attestation_reward_altair>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + state: &mut BeaconState, + ) -> Result { + let total_active_balance = state.get_total_active_balance()?; + let base_reward_per_increment = + altair::BaseRewardPerIncrement::new(total_active_balance, &self.spec)?; + + let mut total_proposer_reward = 0; + + let proposer_reward_denominator = WEIGHT_DENOMINATOR + .safe_sub(PROPOSER_WEIGHT)? + .safe_mul(WEIGHT_DENOMINATOR)? + .safe_div(PROPOSER_WEIGHT)?; + + for attestation in block.body().attestations() { + let data = &attestation.data; + let inclusion_delay = state.slot().safe_sub(data.slot)?.as_u64(); + let participation_flag_indices = get_attestation_participation_flag_indices( + state, + data, + inclusion_delay, + &self.spec, + )?; + + let attesting_indices = get_attesting_indices_from_state(state, attestation)?; + + let mut proposer_reward_numerator = 0; + for index in attesting_indices { + let index = index as usize; + for (flag_index, &weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() { + let epoch_participation = + state.get_epoch_participation_mut(data.target.epoch)?; + let validator_participation = epoch_participation + .get_mut(index) + .ok_or(BeaconStateError::ParticipationOutOfBounds(index))?; + + if participation_flag_indices.contains(&flag_index) + && !validator_participation.has_flag(flag_index)? + { + validator_participation.add_flag(flag_index)?; + proposer_reward_numerator.safe_add_assign( + altair::get_base_reward( + state, + index, + base_reward_per_increment, + &self.spec, + )? + .safe_mul(weight)?, + )?; + } + } + } + total_proposer_reward.safe_add_assign( + proposer_reward_numerator.safe_div(proposer_reward_denominator)?, + )?; + } + + Ok(total_proposer_reward) + } +} diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 420af2ea1b8..e6f44f6654b 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -154,6 +154,7 @@ pub enum BeaconChainError { ExecutionForkChoiceUpdateInvalid { status: PayloadStatus, }, + BlockRewardError, BlockRewardSlotError, BlockRewardAttestationError, BlockRewardSyncError, diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index e3b5f1e0af7..5e75c2a632e 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -2,6 +2,7 @@ pub mod attestation_rewards; pub mod attestation_verification; mod attester_cache; +pub mod beacon_block_reward; mod beacon_chain; mod beacon_fork_choice_store; pub mod beacon_proposer_cache; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 8bb7db077d4..973be2d49b4 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -15,6 +15,7 @@ mod database; mod metrics; mod proposer_duties; mod publish_blocks; +mod standard_block_rewards; mod state_id; mod sync_committee_rewards; mod sync_committees; @@ -1700,6 +1701,27 @@ pub fn serve( }, ); + let beacon_rewards_path = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("rewards")) + .and(chain_filter.clone()); + + // GET beacon/rewards/blocks/{block_id} + let get_beacon_rewards_blocks = beacon_rewards_path + .clone() + .and(warp::path("blocks")) + .and(block_id_or_err) + .and(warp::path::end()) + .and_then(|chain: Arc>, block_id: BlockId| { + blocking_json_task(move || { + let (rewards, execution_optimistic) = + standard_block_rewards::compute_beacon_block_rewards(chain, block_id)?; + Ok(rewards) + .map(api_types::GenericResponse::from) + .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + }) + }); + /* * beacon/rewards */ @@ -3433,6 +3455,7 @@ pub fn serve( .or(get_beacon_pool_proposer_slashings.boxed()) .or(get_beacon_pool_voluntary_exits.boxed()) .or(get_beacon_deposit_snapshot.boxed()) + .or(get_beacon_rewards_blocks.boxed()) .or(get_config_fork_schedule.boxed()) .or(get_config_spec.boxed()) .or(get_config_deposit_contract.boxed()) diff --git a/beacon_node/http_api/src/standard_block_rewards.rs b/beacon_node/http_api/src/standard_block_rewards.rs new file mode 100644 index 00000000000..b3c90d08a4d --- /dev/null +++ b/beacon_node/http_api/src/standard_block_rewards.rs @@ -0,0 +1,27 @@ +use crate::sync_committee_rewards::get_state_before_applying_block; +use crate::BlockId; +use crate::ExecutionOptimistic; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::lighthouse::StandardBlockReward; +use std::sync::Arc; +use warp_utils::reject::beacon_chain_error; +//// The difference between block_rewards and beacon_block_rewards is the later returns block +//// reward format that satisfies beacon-api specs +pub fn compute_beacon_block_rewards( + chain: Arc>, + block_id: BlockId, +) -> Result<(StandardBlockReward, ExecutionOptimistic), warp::Rejection> { + let (block, execution_optimistic) = block_id.blinded_block(&chain)?; + + let block_ref = block.message(); + + let block_root = block.canonical_root(); + + let mut state = get_state_before_applying_block(chain.clone(), &block)?; + + let rewards = chain + .compute_beacon_block_reward(block_ref, block_root, &mut state) + .map_err(beacon_chain_error)?; + + Ok((rewards, execution_optimistic)) +} diff --git a/beacon_node/http_api/src/sync_committee_rewards.rs b/beacon_node/http_api/src/sync_committee_rewards.rs index ae369115d5c..cefa98db415 100644 --- a/beacon_node/http_api/src/sync_committee_rewards.rs +++ b/beacon_node/http_api/src/sync_committee_rewards.rs @@ -47,7 +47,7 @@ pub fn compute_sync_committee_rewards( Ok((data, execution_optimistic)) } -fn get_state_before_applying_block( +pub fn get_state_before_applying_block( chain: Arc>, block: &SignedBlindedBeaconBlock, ) -> Result, warp::reject::Rejection> { diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 4fe5a725458..bb370ed5b20 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -8,7 +8,7 @@ mod persistence; mod reward_cache; mod sync_aggregate_id; -pub use attestation::AttMaxCover; +pub use attestation::{earliest_attestation_validators, AttMaxCover}; pub use attestation_storage::{AttestationRef, SplitAttestation}; pub use max_cover::MaxCover; pub use persistence::{ diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index b9acc696204..653c6c0bcc7 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1044,6 +1044,22 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `GET beacon/rewards/blocks` + pub async fn get_beacon_rewards_blocks(&self, epoch: Epoch) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("rewards") + .push("blocks"); + + path.query_pairs_mut() + .append_pair("epoch", &epoch.to_string()); + + self.get(path).await + } + /// `POST beacon/rewards/attestations` pub async fn post_beacon_rewards_attestations( &self, diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 06801a3927d..e50d9f4dc09 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -4,6 +4,7 @@ mod attestation_performance; pub mod attestation_rewards; mod block_packing_efficiency; mod block_rewards; +mod standard_block_rewards; mod sync_committee_rewards; use crate::{ @@ -30,6 +31,7 @@ pub use block_packing_efficiency::{ }; pub use block_rewards::{AttestationRewards, BlockReward, BlockRewardMeta, BlockRewardsQuery}; pub use lighthouse_network::{types::SyncState, PeerInfo}; +pub use standard_block_rewards::StandardBlockReward; pub use sync_committee_rewards::SyncCommitteeReward; // Define "legacy" implementations of `Option` which use four bytes for encoding the union diff --git a/common/eth2/src/lighthouse/standard_block_rewards.rs b/common/eth2/src/lighthouse/standard_block_rewards.rs new file mode 100644 index 00000000000..502577500d9 --- /dev/null +++ b/common/eth2/src/lighthouse/standard_block_rewards.rs @@ -0,0 +1,26 @@ +use serde::{Deserialize, Serialize}; + +// Details about the rewards for a single block +// All rewards in GWei +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct StandardBlockReward { + // proposer of the block, the proposer index who receives these rewards + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub proposer_index: u64, + // total block reward in gwei, + // equal to attestations + sync_aggregate + proposer_slashings + attester_slashings + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub total: u64, + // block reward component due to included attestations in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub attestations: u64, + // block reward component due to included sync_aggregate in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub sync_aggregate: u64, + // block reward component due to included proposer_slashings in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub proposer_slashings: u64, + // block reward component due to included attester_slashings in gwei + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub attester_slashings: u64, +} From 7934485aef6ce986359c4d2b887bd7cab5d56854 Mon Sep 17 00:00:00 2001 From: Nazar Hussain Date: Wed, 8 Feb 2023 02:18:51 +0000 Subject: [PATCH 150/263] Update the docker build to include features based images (#3875) ## Proposed Changes There are some features that are enabled/disabled with the `FEATURES` env variable. This PR would introduce a pattern to introduce docker images based on those features. This can be useful later on to have specific images for some experimental features in the future. ## Additional Info We at Lodesart need to have `minimal` spec support for some cross-client network testing. To make it efficient on the CI, we tend to use minimal preset. --- .github/workflows/docker.yml | 13 ++++++++++--- book/src/docker.md | 8 +++++++- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 76e5d031aab..49288c594cb 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -43,7 +43,7 @@ jobs: VERSION: ${{ env.VERSION }} VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }} build-docker-single-arch: - name: build-docker-${{ matrix.binary }} + name: build-docker-${{ matrix.binary }}${{ matrix.features.version_suffix }} runs-on: ubuntu-22.04 strategy: matrix: @@ -51,6 +51,10 @@ jobs: aarch64-portable, x86_64, x86_64-portable] + features: [ + {version_suffix: "", env: ""}, + {version_suffix: "-dev", env: "spec-minimal"} + ] include: - profile: maxperf @@ -60,6 +64,8 @@ jobs: DOCKER_CLI_EXPERIMENTAL: enabled VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} + FEATURE_SUFFIX: ${{ matrix.features.version_suffix }} + FEATURES: ${{ matrix.features.env }} steps: - uses: actions/checkout@v3 - name: Update Rust @@ -98,8 +104,9 @@ jobs: docker buildx build \ --platform=linux/${SHORT_ARCH} \ --file ./Dockerfile.cross . \ - --tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX} \ - --provenance=false \ + --tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX}${FEATURE_SUFFIX} \ + --build-arg FEATURES=${FEATURES} \ + --provenance=false \ --push build-docker-multiarch: name: build-docker-multiarch${{ matrix.modernity }} diff --git a/book/src/docker.md b/book/src/docker.md index f22b8a20082..7484f9f525b 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -57,7 +57,7 @@ $ docker pull sigp/lighthouse:latest-modern Image tags follow this format: ``` -${version}${arch}${stability}${modernity} +${version}${arch}${stability}${modernity}${features} ``` The `version` is: @@ -81,6 +81,12 @@ The `modernity` is: * `-modern` for optimized builds * empty for a `portable` unoptimized build +The `features` is: + +* `-dev` for a development build with `minimal-spec` preset enabled. +* empty for a standard build with no custom feature enabled. + + Examples: * `latest-unstable-modern`: most recent `unstable` build for all modern CPUs (x86_64 or ARM) From ceb986549dd7925e96dc8cb31df33a3bca8d6d19 Mon Sep 17 00:00:00 2001 From: Divma Date: Wed, 8 Feb 2023 02:18:53 +0000 Subject: [PATCH 151/263] Self rate limiting dev flag (#3928) ## Issue Addressed Adds self rate limiting options, mainly with the idea to comply with peer's rate limits in small testnets ## Proposed Changes Add a hidden flag `self-limiter` this can take no value, or customs values to configure quotas per protocol ## Additional Info ### How to use `--self-limiter` will turn on the self rate limiter applying the same params we apply to inbound requests (requests from other peers) `--self-limiter "beacon_blocks_by_range:64/1"` will turn on the self rate limiter for ALL protocols, but change the quota for bbrange to 64 requested blocks per 1 second. `--self-limiter "beacon_blocks_by_range:64/1;ping:1/10"` same as previous one, changing the quota for ping as well. ### Caveats - The rate limiter is either on or off for all protocols. I added the custom values to be able to change the quotas per protocol so that some protocols can be given extremely loose or tight quotas. I think this should satisfy every need even if we can't technically turn off rate limits per protocol. - This reuses the rate limiter struct for the inbound requests so there is this ugly part of the code in which we need to deal with the inbound only protocols (light client stuff) if this becomes too ugly as we add lc protocols, we might want to split the rate limiters. I've checked this and looks doable with const generics to avoid so much code duplication ### Knowing if this is on ``` Feb 06 21:12:05.493 DEBG Using self rate limiting params config: OutboundRateLimiterConfig { ping: 2/10s, metadata: 1/15s, status: 5/15s, goodbye: 1/10s, blocks_by_range: 1024/10s, blocks_by_root: 128/10s }, service: libp2p_rpc, service: libp2p ``` --- beacon_node/lighthouse_network/src/config.rs | 5 + .../lighthouse_network/src/rpc/config.rs | 173 +++++++++++++++ beacon_node/lighthouse_network/src/rpc/mod.rs | 58 ++++- .../lighthouse_network/src/rpc/protocol.rs | 24 +-- .../src/rpc/rate_limiter.rs | 69 ++++-- .../src/rpc/self_limiter.rs | 202 ++++++++++++++++++ .../lighthouse_network/src/service/mod.rs | 1 + beacon_node/src/cli.rs | 15 ++ beacon_node/src/config.rs | 7 + lighthouse/tests/beacon_node.rs | 13 ++ 10 files changed, 528 insertions(+), 39 deletions(-) create mode 100644 beacon_node/lighthouse_network/src/rpc/config.rs create mode 100644 beacon_node/lighthouse_network/src/rpc/self_limiter.rs diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 0ae3d9a23b6..009aab8e3ca 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -1,3 +1,4 @@ +use crate::rpc::config::OutboundRateLimiterConfig; use crate::types::GossipKind; use crate::{Enr, PeerIdSerialized}; use directory::{ @@ -133,6 +134,9 @@ pub struct Config { /// Whether light client protocols should be enabled. pub enable_light_client_server: bool, + + /// Configuration for the outbound rate limiter (requests made by this node). + pub outbound_rate_limiter_config: Option, } impl Default for Config { @@ -211,6 +215,7 @@ impl Default for Config { topics: Vec::new(), metrics_enabled: false, enable_light_client_server: false, + outbound_rate_limiter_config: None, } } } diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs new file mode 100644 index 00000000000..bea0929fb0b --- /dev/null +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -0,0 +1,173 @@ +use std::{ + fmt::{Debug, Display}, + str::FromStr, + time::Duration, +}; + +use super::{methods, rate_limiter::Quota, Protocol}; + +use serde_derive::{Deserialize, Serialize}; + +/// Auxiliary struct to aid on configuration parsing. +/// +/// A protocol's quota is specified as `protocol_name:tokens/time_in_seconds`. +#[derive(Debug, PartialEq, Eq)] +struct ProtocolQuota { + protocol: Protocol, + quota: Quota, +} + +impl Display for ProtocolQuota { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}:{}/{}", + self.protocol.as_ref(), + self.quota.max_tokens, + self.quota.replenish_all_every.as_secs() + ) + } +} + +impl FromStr for ProtocolQuota { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + let (protocol_str, quota_str) = s + .split_once(':') + .ok_or("Missing ':' from quota definition.")?; + let protocol = protocol_str + .parse() + .map_err(|_parse_err| "Wrong protocol representation in quota")?; + let (tokens_str, time_str) = quota_str + .split_once('/') + .ok_or("Quota should be defined as \"n/t\" (t in seconds). Missing '/' from quota.")?; + let tokens = tokens_str + .parse() + .map_err(|_| "Failed to parse tokens from quota.")?; + let seconds = time_str + .parse::() + .map_err(|_| "Failed to parse time in seconds from quota.")?; + Ok(ProtocolQuota { + protocol, + quota: Quota { + replenish_all_every: Duration::from_secs(seconds), + max_tokens: tokens, + }, + }) + } +} + +/// Configurations for the rate limiter applied to outbound requests (made by the node itself). +#[derive(Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct OutboundRateLimiterConfig { + pub(super) ping_quota: Quota, + pub(super) meta_data_quota: Quota, + pub(super) status_quota: Quota, + pub(super) goodbye_quota: Quota, + pub(super) blocks_by_range_quota: Quota, + pub(super) blocks_by_root_quota: Quota, +} + +impl OutboundRateLimiterConfig { + pub const DEFAULT_PING_QUOTA: Quota = Quota::n_every(2, 10); + pub const DEFAULT_META_DATA_QUOTA: Quota = Quota::n_every(2, 5); + pub const DEFAULT_STATUS_QUOTA: Quota = Quota::n_every(5, 15); + pub const DEFAULT_GOODBYE_QUOTA: Quota = Quota::one_every(10); + pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = + Quota::n_every(methods::MAX_REQUEST_BLOCKS, 10); + pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); +} + +impl Default for OutboundRateLimiterConfig { + fn default() -> Self { + OutboundRateLimiterConfig { + ping_quota: Self::DEFAULT_PING_QUOTA, + meta_data_quota: Self::DEFAULT_META_DATA_QUOTA, + status_quota: Self::DEFAULT_STATUS_QUOTA, + goodbye_quota: Self::DEFAULT_GOODBYE_QUOTA, + blocks_by_range_quota: Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA, + blocks_by_root_quota: Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA, + } + } +} + +impl Debug for OutboundRateLimiterConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + macro_rules! fmt_q { + ($quota:expr) => { + &format_args!( + "{}/{}s", + $quota.max_tokens, + $quota.replenish_all_every.as_secs() + ) + }; + } + + f.debug_struct("OutboundRateLimiterConfig") + .field("ping", fmt_q!(&self.ping_quota)) + .field("metadata", fmt_q!(&self.meta_data_quota)) + .field("status", fmt_q!(&self.status_quota)) + .field("goodbye", fmt_q!(&self.goodbye_quota)) + .field("blocks_by_range", fmt_q!(&self.blocks_by_range_quota)) + .field("blocks_by_root", fmt_q!(&self.blocks_by_root_quota)) + .finish() + } +} + +/// Parse configurations for the outbound rate limiter. Protocols that are not specified use +/// the default values. Protocol specified more than once use only the first given Quota. +/// +/// The expected format is a ';' separated list of [`ProtocolQuota`]. +impl FromStr for OutboundRateLimiterConfig { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + let mut ping_quota = None; + let mut meta_data_quota = None; + let mut status_quota = None; + let mut goodbye_quota = None; + let mut blocks_by_range_quota = None; + let mut blocks_by_root_quota = None; + for proto_def in s.split(';') { + let ProtocolQuota { protocol, quota } = proto_def.parse()?; + let quota = Some(quota); + match protocol { + Protocol::Status => status_quota = status_quota.or(quota), + Protocol::Goodbye => goodbye_quota = goodbye_quota.or(quota), + Protocol::BlocksByRange => blocks_by_range_quota = blocks_by_range_quota.or(quota), + Protocol::BlocksByRoot => blocks_by_root_quota = blocks_by_root_quota.or(quota), + Protocol::Ping => ping_quota = ping_quota.or(quota), + Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota), + Protocol::LightClientBootstrap => return Err("Lighthouse does not send LightClientBootstrap requests. Quota should not be set."), + } + } + Ok(OutboundRateLimiterConfig { + ping_quota: ping_quota.unwrap_or(Self::DEFAULT_PING_QUOTA), + meta_data_quota: meta_data_quota.unwrap_or(Self::DEFAULT_META_DATA_QUOTA), + status_quota: status_quota.unwrap_or(Self::DEFAULT_STATUS_QUOTA), + goodbye_quota: goodbye_quota.unwrap_or(Self::DEFAULT_GOODBYE_QUOTA), + blocks_by_range_quota: blocks_by_range_quota + .unwrap_or(Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA), + blocks_by_root_quota: blocks_by_root_quota + .unwrap_or(Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_quota_inverse() { + let quota = ProtocolQuota { + protocol: Protocol::Goodbye, + quota: Quota { + replenish_all_every: Duration::from_secs(10), + max_tokens: 8, + }, + }; + assert_eq!(quota.to_string().parse(), Ok(quota)) + } +} diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 203a642a8be..31569b820b1 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -12,7 +12,7 @@ use libp2p::swarm::{ PollParameters, SubstreamProtocol, }; use libp2p::PeerId; -use rate_limiter::{RPCRateLimiter as RateLimiter, RPCRateLimiterBuilder, RateLimitedErr}; +use rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}; use slog::{crit, debug, o}; use std::marker::PhantomData; use std::sync::Arc; @@ -32,12 +32,17 @@ pub use methods::{ pub(crate) use outbound::OutboundRequest; pub use protocol::{max_rpc_size, Protocol, RPCError}; +use self::config::OutboundRateLimiterConfig; +use self::self_limiter::SelfRateLimiter; + pub(crate) mod codec; +pub mod config; mod handler; pub mod methods; mod outbound; mod protocol; mod rate_limiter; +mod self_limiter; /// Composite trait for a request id. pub trait ReqId: Send + 'static + std::fmt::Debug + Copy + Clone {} @@ -100,13 +105,18 @@ pub struct RPCMessage { pub event: HandlerEvent, } +type BehaviourAction = + NetworkBehaviourAction, RPCHandler>; + /// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level /// logic. pub struct RPC { /// Rate limiter limiter: RateLimiter, + /// Rate limiter for our own requests. + self_limiter: Option>, /// Queue of events to be processed. - events: Vec, RPCHandler>>, + events: Vec>, fork_context: Arc, enable_light_client_server: bool, /// Slog logger for RPC behaviour. @@ -117,10 +127,12 @@ impl RPC { pub fn new( fork_context: Arc, enable_light_client_server: bool, + outbound_rate_limiter_config: Option, log: slog::Logger, ) -> Self { let log = log.new(o!("service" => "libp2p_rpc")); - let limiter = RPCRateLimiterBuilder::new() + + let limiter = RateLimiter::builder() .n_every(Protocol::MetaData, 2, Duration::from_secs(5)) .n_every(Protocol::Ping, 2, Duration::from_secs(10)) .n_every(Protocol::Status, 5, Duration::from_secs(15)) @@ -134,8 +146,14 @@ impl RPC { .n_every(Protocol::BlocksByRoot, 128, Duration::from_secs(10)) .build() .expect("Configuration parameters are valid"); + + let self_limiter = outbound_rate_limiter_config.map(|config| { + SelfRateLimiter::new(config, log.clone()).expect("Configuration parameters are valid") + }); + RPC { limiter, + self_limiter, events: Vec::new(), fork_context, enable_light_client_server, @@ -162,12 +180,24 @@ impl RPC { /// Submits an RPC request. /// /// The peer must be connected for this to succeed. - pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, event: OutboundRequest) { - self.events.push(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler: NotifyHandler::Any, - event: RPCSend::Request(request_id, event), - }); + pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, req: OutboundRequest) { + let event = if let Some(self_limiter) = self.self_limiter.as_mut() { + match self_limiter.allows(peer_id, request_id, req) { + Ok(event) => event, + Err(_e) => { + // Request is logged and queued internally in the self rate limiter. + return; + } + } + } else { + NetworkBehaviourAction::NotifyHandler { + peer_id, + handler: NotifyHandler::Any, + event: RPCSend::Request(request_id, req), + } + }; + + self.events.push(event); } /// Lighthouse wishes to disconnect from this peer by sending a Goodbye message. This @@ -272,11 +302,19 @@ where cx: &mut Context, _: &mut impl PollParameters, ) -> Poll> { - // let the rate limiter prune + // let the rate limiter prune. let _ = self.limiter.poll_unpin(cx); + + if let Some(self_limiter) = self.self_limiter.as_mut() { + if let Poll::Ready(event) = self_limiter.poll_ready(cx) { + self.events.push(event) + } + } + if !self.events.is_empty() { return Poll::Ready(self.events.remove(0)); } + Poll::Pending } } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 1f40f81971c..e5d784d8004 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -14,7 +14,7 @@ use std::io; use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; -use strum::IntoStaticStr; +use strum::{AsRefStr, Display, EnumString, IntoStaticStr}; use tokio_io_timeout::TimeoutStream; use tokio_util::{ codec::Framed, @@ -139,21 +139,26 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { } /// Protocol names to be used. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EnumString, AsRefStr, Display)] +#[strum(serialize_all = "snake_case")] pub enum Protocol { /// The Status protocol name. Status, /// The Goodbye protocol name. Goodbye, /// The `BlocksByRange` protocol name. + #[strum(serialize = "beacon_blocks_by_range")] BlocksByRange, /// The `BlocksByRoot` protocol name. + #[strum(serialize = "beacon_blocks_by_root")] BlocksByRoot, /// The `Ping` protocol name. Ping, /// The `MetaData` protocol name. + #[strum(serialize = "metadata")] MetaData, /// The `LightClientBootstrap` protocol name. + #[strum(serialize = "light_client_bootstrap")] LightClientBootstrap, } @@ -172,21 +177,6 @@ pub enum Encoding { SSZSnappy, } -impl std::fmt::Display for Protocol { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let repr = match self { - Protocol::Status => "status", - Protocol::Goodbye => "goodbye", - Protocol::BlocksByRange => "beacon_blocks_by_range", - Protocol::BlocksByRoot => "beacon_blocks_by_root", - Protocol::Ping => "ping", - Protocol::MetaData => "metadata", - Protocol::LightClientBootstrap => "light_client_bootstrap", - }; - f.write_str(repr) - } -} - impl std::fmt::Display for Encoding { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let repr = match self { diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 6ba9f6e9419..a1f7b89a2f2 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -1,6 +1,7 @@ -use crate::rpc::{InboundRequest, Protocol}; +use crate::rpc::Protocol; use fnv::FnvHashMap; use libp2p::PeerId; +use serde_derive::{Deserialize, Serialize}; use std::convert::TryInto; use std::future::Future; use std::hash::Hash; @@ -47,12 +48,31 @@ type Nanosecs = u64; /// n*`replenish_all_every`/`max_tokens` units of time since their last request. /// /// To produce hard limits, set `max_tokens` to 1. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct Quota { /// How often are `max_tokens` fully replenished. - replenish_all_every: Duration, + pub(super) replenish_all_every: Duration, /// Token limit. This translates on how large can an instantaneous batch of /// tokens be. - max_tokens: u64, + pub(super) max_tokens: u64, +} + +impl Quota { + /// A hard limit of one token every `seconds`. + pub const fn one_every(seconds: u64) -> Self { + Quota { + replenish_all_every: Duration::from_secs(seconds), + max_tokens: 1, + } + } + + /// Allow `n` tokens to be use used every `seconds`. + pub const fn n_every(n: u64, seconds: u64) -> Self { + Quota { + replenish_all_every: Duration::from_secs(seconds), + max_tokens: n, + } + } } /// Manages rate limiting of requests per peer, with differentiated rates per protocol. @@ -78,6 +98,7 @@ pub struct RPCRateLimiter { } /// Error type for non conformant requests +#[derive(Debug)] pub enum RateLimitedErr { /// Required tokens for this request exceed the maximum TooLarge, @@ -86,7 +107,7 @@ pub enum RateLimitedErr { } /// User-friendly builder of a `RPCRateLimiter` -#[derive(Default)] +#[derive(Default, Clone)] pub struct RPCRateLimiterBuilder { /// Quota for the Goodbye protocol. goodbye_quota: Option, @@ -105,13 +126,8 @@ pub struct RPCRateLimiterBuilder { } impl RPCRateLimiterBuilder { - /// Get an empty `RPCRateLimiterBuilder`. - pub fn new() -> Self { - Default::default() - } - /// Set a quota for a protocol. - fn set_quota(mut self, protocol: Protocol, quota: Quota) -> Self { + pub fn set_quota(mut self, protocol: Protocol, quota: Quota) -> Self { let q = Some(quota); match protocol { Protocol::Ping => self.ping_quota = q, @@ -191,11 +207,40 @@ impl RPCRateLimiterBuilder { } } +pub trait RateLimiterItem { + fn protocol(&self) -> Protocol; + fn expected_responses(&self) -> u64; +} + +impl RateLimiterItem for super::InboundRequest { + fn protocol(&self) -> Protocol { + self.protocol() + } + + fn expected_responses(&self) -> u64 { + self.expected_responses() + } +} + +impl RateLimiterItem for super::OutboundRequest { + fn protocol(&self) -> Protocol { + self.protocol() + } + + fn expected_responses(&self) -> u64 { + self.expected_responses() + } +} impl RPCRateLimiter { - pub fn allows( + /// Get a builder instance. + pub fn builder() -> RPCRateLimiterBuilder { + RPCRateLimiterBuilder::default() + } + + pub fn allows( &mut self, peer_id: &PeerId, - request: &InboundRequest, + request: &Item, ) -> Result<(), RateLimitedErr> { let time_since_start = self.init_time.elapsed(); let tokens = request.expected_responses().max(1); diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs new file mode 100644 index 00000000000..451c6206f37 --- /dev/null +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -0,0 +1,202 @@ +use std::{ + collections::{hash_map::Entry, HashMap, VecDeque}, + task::{Context, Poll}, + time::Duration, +}; + +use futures::FutureExt; +use libp2p::{swarm::NotifyHandler, PeerId}; +use slog::{crit, debug, Logger}; +use smallvec::SmallVec; +use tokio_util::time::DelayQueue; +use types::EthSpec; + +use super::{ + config::OutboundRateLimiterConfig, + rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}, + BehaviourAction, OutboundRequest, Protocol, RPCSend, ReqId, +}; + +/// A request that was rate limited or waiting on rate limited requests for the same peer and +/// protocol. +struct QueuedRequest { + req: OutboundRequest, + request_id: Id, +} + +pub(crate) struct SelfRateLimiter { + /// Requests queued for sending per peer. This requests are stored when the self rate + /// limiter rejects them. Rate limiting is based on a Peer and Protocol basis, therefore + /// are stored in the same way. + delayed_requests: HashMap<(PeerId, Protocol), VecDeque>>, + /// The delay required to allow a peer's outbound request per protocol. + next_peer_request: DelayQueue<(PeerId, Protocol)>, + /// Rate limiter for our own requests. + limiter: RateLimiter, + /// Requests that are ready to be sent. + ready_requests: SmallVec<[BehaviourAction; 3]>, + /// Slog logger. + log: Logger, +} + +/// Error returned when the rate limiter does not accept a request. +// NOTE: this is currently not used, but might be useful for debugging. +pub enum Error { + /// There are queued requests for this same peer and protocol. + PendingRequests, + /// Request was tried but rate limited. + RateLimited, +} + +impl SelfRateLimiter { + /// Creates a new [`SelfRateLimiter`] based on configration values. + pub fn new(config: OutboundRateLimiterConfig, log: Logger) -> Result { + debug!(log, "Using self rate limiting params"; "config" => ?config); + // Destructure to make sure every configuration value is used. + let OutboundRateLimiterConfig { + ping_quota, + meta_data_quota, + status_quota, + goodbye_quota, + blocks_by_range_quota, + blocks_by_root_quota, + } = config; + + let limiter = RateLimiter::builder() + .set_quota(Protocol::Ping, ping_quota) + .set_quota(Protocol::MetaData, meta_data_quota) + .set_quota(Protocol::Status, status_quota) + .set_quota(Protocol::Goodbye, goodbye_quota) + .set_quota(Protocol::BlocksByRange, blocks_by_range_quota) + .set_quota(Protocol::BlocksByRoot, blocks_by_root_quota) + // Manually set the LightClientBootstrap quota, since we use the same rate limiter for + // inbound and outbound requests, and the LightClientBootstrap is an only inbound + // protocol. + .one_every(Protocol::LightClientBootstrap, Duration::from_secs(10)) + .build()?; + + Ok(SelfRateLimiter { + delayed_requests: Default::default(), + next_peer_request: Default::default(), + limiter, + ready_requests: Default::default(), + log, + }) + } + + /// Checks if the rate limiter allows the request. If it's allowed, returns the + /// [`NetworkBehaviourAction`] that should be emitted. When not allowed, the request is delayed + /// until it can be sent. + pub fn allows( + &mut self, + peer_id: PeerId, + request_id: Id, + req: OutboundRequest, + ) -> Result, Error> { + let protocol = req.protocol(); + // First check that there are not already other requests waiting to be sent. + if let Some(queued_requests) = self.delayed_requests.get_mut(&(peer_id, protocol)) { + queued_requests.push_back(QueuedRequest { req, request_id }); + + return Err(Error::PendingRequests); + } + match Self::try_send_request(&mut self.limiter, peer_id, request_id, req, &self.log) { + Err((rate_limited_req, wait_time)) => { + let key = (peer_id, protocol); + self.next_peer_request.insert(key, wait_time); + self.delayed_requests + .entry(key) + .or_default() + .push_back(rate_limited_req); + + Err(Error::RateLimited) + } + Ok(event) => Ok(event), + } + } + + /// Auxiliary function to deal with self rate limiting outcomes. If the rate limiter allows the + /// request, the [`NetworkBehaviourAction`] that should be emitted is returned. If the request + /// should be delayed, it's returned with the duration to wait. + fn try_send_request( + limiter: &mut RateLimiter, + peer_id: PeerId, + request_id: Id, + req: OutboundRequest, + log: &Logger, + ) -> Result, (QueuedRequest, Duration)> { + match limiter.allows(&peer_id, &req) { + Ok(()) => Ok(BehaviourAction::NotifyHandler { + peer_id, + handler: NotifyHandler::Any, + event: RPCSend::Request(request_id, req), + }), + Err(e) => { + let protocol = req.protocol(); + match e { + RateLimitedErr::TooLarge => { + // this should never happen with default parameters. Let's just send the request. + // Log a crit since this is a config issue. + crit!( + log, + "Self rate limiting error for a batch that will never fit. Sending request anyway. Check configuration parameters."; + "protocol" => %req.protocol() + ); + Ok(BehaviourAction::NotifyHandler { + peer_id, + handler: NotifyHandler::Any, + event: RPCSend::Request(request_id, req), + }) + } + RateLimitedErr::TooSoon(wait_time) => { + debug!(log, "Self rate limiting"; "protocol" => %protocol, "wait_time_ms" => wait_time.as_millis(), "peer_id" => %peer_id); + Err((QueuedRequest { req, request_id }, wait_time)) + } + } + } + } + } + + /// When a peer and protocol are allowed to send a next request, this function checks the + /// queued requests and attempts marking as ready as many as the limiter allows. + fn next_peer_request_ready(&mut self, peer_id: PeerId, protocol: Protocol) { + if let Entry::Occupied(mut entry) = self.delayed_requests.entry((peer_id, protocol)) { + let queued_requests = entry.get_mut(); + while let Some(QueuedRequest { req, request_id }) = queued_requests.pop_front() { + match Self::try_send_request(&mut self.limiter, peer_id, request_id, req, &self.log) + { + Err((rate_limited_req, wait_time)) => { + let key = (peer_id, protocol); + self.next_peer_request.insert(key, wait_time); + queued_requests.push_back(rate_limited_req); + // If one fails just wait for the next window that allows sending requests. + return; + } + Ok(event) => self.ready_requests.push(event), + } + } + if queued_requests.is_empty() { + entry.remove(); + } + } + } + + pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + // First check the requests that were self rate limited, since those might add events to + // the queue. Also do this this before rate limiter prunning to avoid removing and + // immediately adding rate limiting keys. + if let Poll::Ready(Some(Ok(expired))) = self.next_peer_request.poll_expired(cx) { + let (peer_id, protocol) = expired.into_inner(); + self.next_peer_request_ready(peer_id, protocol); + } + // Prune the rate limiter. + let _ = self.limiter.poll_unpin(cx); + + // Finally return any queued events. + if !self.ready_requests.is_empty() { + return Poll::Ready(self.ready_requests.remove(0)); + } + + Poll::Pending + } +} diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 5b3598216b5..832f025c432 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -262,6 +262,7 @@ impl Network { let eth2_rpc = RPC::new( ctx.fork_context.clone(), config.enable_light_client_server, + config.outbound_rate_limiter_config.clone(), log.clone(), ); diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 38d81512e4b..b4da83315c8 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -194,6 +194,21 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Lighthouse by default does not discover private IP addresses. Set this flag to enable connection attempts to local addresses.") .takes_value(false), ) + .arg( + Arg::with_name("self-limiter") + .long("self-limiter") + .help( + "Enables the outbound rate limiter (requests made by this node).\ + \ + Rate limit quotas per protocol can be set in the form of \ + :/. To set quotas for multiple protocols, \ + separate them by ';'. If the self rate limiter is enabled and a protocol is not \ + present in the configuration, the quotas used for the inbound rate limiter will be \ + used." + ) + .min_values(0) + .hidden(true) + ) /* REST API related arguments */ .arg( Arg::with_name("http") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 294568cca9f..726f8368ea4 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -967,6 +967,13 @@ pub fn set_network_config( // Light client server config. config.enable_light_client_server = cli_args.is_present("light-client-server"); + // This flag can be used both with or without a value. Try to parse it first with a value, if + // no value is defined but the flag is present, use the default params. + config.outbound_rate_limiter_config = clap_utils::parse_optional(cli_args, "self-limiter")?; + if cli_args.is_present("self-limiter") && config.outbound_rate_limiter_config.is_none() { + config.outbound_rate_limiter_config = Some(Default::default()); + } + Ok(()) } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 7e581ee6152..053a04f879a 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1079,6 +1079,19 @@ fn http_port_flag() { .with_config(|config| assert_eq!(config.http_api.listen_port, port1)); } #[test] +fn empty_self_limiter_flag() { + // Test that empty rate limiter is accepted using the default rate limiting configurations. + CommandLineTest::new() + .flag("self-limiter", None) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.network.outbound_rate_limiter_config, + Some(lighthouse_network::rpc::config::OutboundRateLimiterConfig::default()) + ) + }); +} +#[test] fn http_allow_origin_flag() { CommandLineTest::new() .flag("http-allow-origin", Some("127.0.0.99")) From 1cae98856c6f7835a7080f5a5e13efae73c63062 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 8 Feb 2023 02:18:54 +0000 Subject: [PATCH 152/263] Update dependencies (#3946) ## Issue Addressed Resolves the cargo-audit failure caused by https://rustsec.org/advisories/RUSTSEC-2023-0010. I also removed the ignore for `RUSTSEC-2020-0159` as we are no longer using a vulnerable version of `chrono`. We still need the other ignore for `time 0.1` because we depend on it via `sloggers -> chrono -> time 0.1`. --- Cargo.lock | 534 ++++++++++++++++++++++++++++------------------------- Makefile | 2 +- 2 files changed, 287 insertions(+), 249 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 05e543049c0..69651fb7ee8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -205,15 +205,15 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61" +checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" [[package]] name = "arbitrary" -version = "1.2.2" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0224938f92e7aef515fac2ff2d18bd1115c1394ddf4a092e0c87e8be9499ee5" +checksum = "3e90af4de65aa7b293ef2d09daff88501eb254f58edde2e1ac02c82d873eadad" dependencies = [ "derive_arbitrary", ] @@ -245,7 +245,7 @@ dependencies = [ "asn1-rs-derive 0.1.0", "asn1-rs-impl", "displaydoc", - "nom 7.1.2", + "nom 7.1.3", "num-traits", "rusticata-macros", "thiserror", @@ -261,7 +261,7 @@ dependencies = [ "asn1-rs-derive 0.4.0", "asn1-rs-impl", "displaydoc", - "nom 7.1.2", + "nom 7.1.3", "num-traits", "rusticata-macros", "thiserror", @@ -326,7 +326,7 @@ dependencies = [ "slab", "socket2", "waker-fn", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -362,9 +362,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.61" +version = "0.1.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "705339e0e4a9690e2908d2b3d049d85682cf19fbd5782494498fbf7003a6a282" +checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2" dependencies = [ "proc-macro2", "quote", @@ -397,9 +397,9 @@ dependencies = [ [[package]] name = "atomic-waker" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" +checksum = "debc29dde2e69f9e47506b525f639ed42300fc014a3e007832592448fa8e4599" [[package]] name = "attohttpc" @@ -530,6 +530,12 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +[[package]] +name = "base64" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" + [[package]] name = "base64ct" version = "1.5.3" @@ -845,9 +851,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.11.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" +checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" [[package]] name = "byte-slice-cast" @@ -863,9 +869,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" dependencies = [ "serde", ] @@ -914,9 +920,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.78" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" [[package]] name = "ccm" @@ -935,7 +941,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ - "nom 7.1.2", + "nom 7.1.3", ] [[package]] @@ -1118,9 +1124,9 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7bef69dc86e3c610e4e7aed41035e2a7ed12e72dd7530f61327a6579a4390b" +checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" dependencies = [ "crossbeam-utils", ] @@ -1189,18 +1195,18 @@ checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" [[package]] name = "crc" -version = "3.0.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53757d12b596c16c78b83458d732a5d1a17ab3f53f2f7412f6fb57cc8a140ab3" +checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" dependencies = [ "crc-catalog", ] [[package]] name = "crc-catalog" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d0165d2900ae6778e36e80bbc4da3b5eefccee9ba939761f9c2882a5d9af3ff" +checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" [[package]] name = "crc32fast" @@ -1390,12 +1396,12 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.2.4" +version = "3.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1631ca6e3c59112501a9d87fd86f21591ff77acd31331e8a73f8d80a65bbdd71" +checksum = "bbcf33c2a618cbe41ee43ae6e9f2e48368cd9f9db2896f10167d8d762679f639" dependencies = [ - "nix 0.26.1", - "windows-sys", + "nix 0.26.2", + "windows-sys 0.45.0", ] [[package]] @@ -1413,9 +1419,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0-pre.5" +version = "4.0.0-rc.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67bc65846be335cb20f4e52d49a437b773a2c1fdb42b19fc84e79e6f6771536f" +checksum = "8da00a7a9a4eb92a0a0f8e75660926d48f0d0f3c537e455c457bcdaa1e16b1ac" dependencies = [ "cfg-if", "fiat-crypto", @@ -1427,9 +1433,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d1075c37807dcf850c379432f0df05ba52cc30f279c5cfc43cc221ce7f8579" +checksum = "bc831ee6a32dd495436e317595e639a587aa9907bef96fe6e6abc290ab6204e9" dependencies = [ "cc", "cxxbridge-flags", @@ -1439,9 +1445,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5044281f61b27bc598f2f6647d480aed48d2bf52d6eb0b627d84c0361b17aa70" +checksum = "94331d54f1b1a8895cd81049f7eaaaef9d05a7dcb4d1fd08bf3ff0806246789d" dependencies = [ "cc", "codespan-reporting", @@ -1454,15 +1460,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61b50bc93ba22c27b0d31128d2d130a0a6b3d267ae27ef7e4fae2167dfe8781c" +checksum = "48dcd35ba14ca9b40d6e4b4b39961f23d835dbb8eed74565ded361d93e1feb8a" [[package]] name = "cxxbridge-macro" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e61fda7e62115119469c7b3591fd913ecca96fb766cfd3f2e2502ab7bc87a5" +checksum = "81bbeb29798b407ccd82a3324ade1a7286e0d29851475990b612670f6f5124d2" dependencies = [ "proc-macro2", "quote", @@ -1481,12 +1487,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0dd3cd20dc6b5a876612a6e5accfe7f3dd883db6d07acfbf14c128f61550dfa" +checksum = "c0808e1bd8671fb44a113a14e13497557533369847788fa2ae912b6ebfce9fa8" dependencies = [ - "darling_core 0.14.2", - "darling_macro 0.14.2", + "darling_core 0.14.3", + "darling_macro 0.14.3", ] [[package]] @@ -1505,9 +1511,9 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f" +checksum = "001d80444f28e193f30c2f293455da62dcf9a6b29918a4253152ae2b1de592cb" dependencies = [ "fnv", "ident_case", @@ -1530,11 +1536,11 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e" +checksum = "b36230598a2d5de7ec1c6f51f72d8a99a9208daff41de2084d06e3fd3ea56685" dependencies = [ - "darling_core 0.14.2", + "darling_core 0.14.3", "quote", "syn", ] @@ -1652,7 +1658,7 @@ checksum = "fe398ac75057914d7d07307bf67dc7f3f574a26783b4fc7805a20ffa9f506e82" dependencies = [ "asn1-rs 0.3.1", "displaydoc", - "nom 7.1.2", + "nom 7.1.3", "num-bigint", "num-traits", "rusticata-macros", @@ -1666,7 +1672,7 @@ checksum = "42d4bc9b0db0a0df9ae64634ac5bdefb7afcb534e182275ca0beadbe486701c1" dependencies = [ "asn1-rs 0.5.1", "displaydoc", - "nom 7.1.2", + "nom 7.1.3", "num-bigint", "num-traits", "rusticata-macros", @@ -1685,9 +1691,9 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.2.2" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf460bbff5f571bfc762da5102729f59f338be7db17a21fade44c5c4f5005350" +checksum = "8beee4701e2e229e8098bbdecdca12449bc3e322f137d269182fa1291e20bd00" dependencies = [ "proc-macro2", "quote", @@ -1709,7 +1715,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4" dependencies = [ - "darling 0.14.2", + "darling 0.14.3", "proc-macro2", "quote", "syn", @@ -1873,9 +1879,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.5.2" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" +checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" dependencies = [ "signature", ] @@ -1927,9 +1933,9 @@ dependencies = [ [[package]] name = "either" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "elliptic-curve" @@ -1955,9 +1961,9 @@ dependencies = [ [[package]] name = "encoding_rs" -version = "0.8.31" +version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" +checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ "cfg-if", ] @@ -1968,7 +1974,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26fa0a0be8915790626d5759eb51fe47435a8eac92c2f212bd2da9aa7f30ea56" dependencies = [ - "base64", + "base64 0.13.1", "bs58", "bytes", "ed25519-dalek", @@ -2145,7 +2151,7 @@ dependencies = [ name = "eth2_interop_keypairs" version = "0.2.0" dependencies = [ - "base64", + "base64 0.13.1", "bls", "eth2_hashing", "hex", @@ -2422,7 +2428,7 @@ checksum = "a1a9e0597aa6b2fdc810ff58bc95e4eeaa2c219b3e615ed025106ecb027407d8" dependencies = [ "async-trait", "auto_impl", - "base64", + "base64 0.13.1", "ethers-core", "futures-channel", "futures-core", @@ -2696,12 +2702,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "fs_extra" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" - [[package]] name = "funty" version = "1.1.0" @@ -2716,9 +2716,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" +checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" dependencies = [ "futures-channel", "futures-core", @@ -2731,9 +2731,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" +checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" dependencies = [ "futures-core", "futures-sink", @@ -2741,15 +2741,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" +checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" [[package]] name = "futures-executor" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" +checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" dependencies = [ "futures-core", "futures-task", @@ -2759,9 +2759,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" +checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" [[package]] name = "futures-lite" @@ -2780,9 +2780,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" +checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" dependencies = [ "proc-macro2", "quote", @@ -2796,21 +2796,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" dependencies = [ "futures-io", - "rustls 0.20.7", + "rustls 0.20.8", "webpki 0.22.0", ] [[package]] name = "futures-sink" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" +checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" [[package]] name = "futures-task" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" +checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" [[package]] name = "futures-timer" @@ -2820,9 +2820,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" +checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" dependencies = [ "futures-channel", "futures-core", @@ -2924,9 +2924,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dec7af912d60cdbd3677c1af9352ebae6fb8394d165568a2234df0fa00f87793" +checksum = "221996f774192f0f718773def8201c4ae31f02616a54ccfc2d358bb0e5cefdec" [[package]] name = "git-version" @@ -3049,7 +3049,7 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ - "base64", + "base64 0.13.1", "bitflags", "bytes", "headers-core", @@ -3070,9 +3070,9 @@ dependencies = [ [[package]] name = "heck" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" @@ -3286,9 +3286,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.23" +version = "0.14.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" +checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" dependencies = [ "bytes", "futures-channel", @@ -3316,7 +3316,7 @@ checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ "http", "hyper", - "rustls 0.20.7", + "rustls 0.20.8", "tokio", "tokio-rustls 0.23.4", ] @@ -3463,7 +3463,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.2.1", + "parity-scale-codec 3.3.0", ] [[package]] @@ -3615,12 +3615,11 @@ dependencies = [ [[package]] name = "jemalloc-sys" -version = "0.5.2+5.3.0-patched" +version = "0.5.3+5.3.0-patched" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134163979b6eed9564c98637b710b40979939ba351f59952708234ea11b5f3f8" +checksum = "f9bd5d616ea7ed58b571b2e209a65759664d7fb021a0819d7a790afc67e47ca1" dependencies = [ "cc", - "fs_extra", "libc", ] @@ -3636,9 +3635,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.60" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" +checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" dependencies = [ "wasm-bindgen", ] @@ -3664,7 +3663,7 @@ version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09f4f04699947111ec1733e71778d763555737579e44b85844cae8e1940a1828" dependencies = [ - "base64", + "base64 0.13.1", "pem", "ring", "serde", @@ -3966,7 +3965,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a173171c71c29bb156f98886c7c4824596de3903dadf01e2e79d2ccdcf38cd9f" dependencies = [ "asynchronous-codec", - "base64", + "base64 0.13.1", "byteorder", "bytes", "fnv", @@ -4117,7 +4116,7 @@ dependencies = [ "parking_lot 0.12.1", "quinn-proto", "rand 0.8.5", - "rustls 0.20.7", + "rustls 0.20.8", "thiserror", "tokio", ] @@ -4182,7 +4181,7 @@ dependencies = [ "libp2p-core 0.38.0", "rcgen 0.10.0", "ring", - "rustls 0.20.7", + "rustls 0.20.8", "thiserror", "webpki 0.22.0", "x509-parser 0.14.0", @@ -4260,7 +4259,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" dependencies = [ "arrayref", - "base64", + "base64 0.13.1", "digest 0.9.0", "hmac-drbg", "libsecp256k1-core", @@ -4581,9 +4580,9 @@ dependencies = [ [[package]] name = "matches" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" @@ -4738,7 +4737,7 @@ dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -4939,9 +4938,9 @@ dependencies = [ [[package]] name = "netlink-packet-utils" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25af9cf0dc55498b7bd94a1508af7a78706aa0ab715a73c5169273e03c84845e" +checksum = "0ede8a08c71ad5a95cdd0e4e52facd37190977039a4704eb82a283f713747d34" dependencies = [ "anyhow", "byteorder", @@ -4966,9 +4965,9 @@ dependencies = [ [[package]] name = "netlink-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92b654097027250401127914afb37cb1f311df6610a9891ff07a757e94199027" +checksum = "260e21fbb6f3d253a14df90eb0000a6066780a15dd901a7519ce02d77a94985b" dependencies = [ "bytes", "futures", @@ -5048,9 +5047,9 @@ dependencies = [ [[package]] name = "nix" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46a58d1d356c6597d08cde02c2f09d785b09e28711837b1ed667dc652c08a694" +checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ "bitflags", "cfg-if", @@ -5087,9 +5086,9 @@ checksum = "cf51a729ecf40266a2368ad335a5fdde43471f545a967109cd62146ecf8b66ff" [[package]] name = "nom" -version = "7.1.2" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5507769c4919c998e69e49c839d9dc6e693ede4cc4290d6ad8b41d4f09c548c" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", @@ -5195,9 +5194,9 @@ dependencies = [ [[package]] name = "object" -version = "0.30.1" +version = "0.30.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d864c91689fdc196779b98dba0aceac6118594c2df6ee5d943eb6a8df4d107a" +checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" dependencies = [ "memchr", ] @@ -5304,9 +5303,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.24.0+1.1.1s" +version = "111.25.0+1.1.1t" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3498f259dab01178c6228c6b00dcef0ed2a2d5e20d648c017861227773ea4abd" +checksum = "3173cd3626c43e3854b1b727422a276e568d9ec5fe8cec197822cf52cfb743d6" dependencies = [ "cc", ] @@ -5402,15 +5401,15 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.2.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "366e44391a8af4cfd6002ef6ba072bae071a96aafca98d7d448a34c5dca38b6a" +checksum = "c3840933452adf7b3b9145e27086a5a3376c619dca1a21b1e5a5af0d54979bed" dependencies = [ "arrayvec", "bitvec 1.0.1", "byte-slice-cast", "impl-trait-for-tuples", - "parity-scale-codec-derive 3.1.3", + "parity-scale-codec-derive 3.1.4", "serde", ] @@ -5428,9 +5427,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.1.3" +version = "3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9299338969a3d2f491d65f140b00ddec470858402f888af98e8642fb5e8965cd" +checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5462,7 +5461,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.5", + "parking_lot_core 0.9.7", ] [[package]] @@ -5481,15 +5480,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.5" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" +checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-sys", + "windows-sys 0.45.0", ] [[package]] @@ -5524,11 +5523,11 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c64931a1a212348ec4f3b4362585eca7159d0d09cbdf4a7f74f02173596fd4" +checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" dependencies = [ - "base64", + "base64 0.13.1", ] [[package]] @@ -5548,9 +5547,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.5.2" +version = "2.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f6e86fb9e7026527a0d46bc308b841d73170ef8f443e1807f6ef88526a816d4" +checksum = "028accff104c4e513bad663bbcd2ad7cfd5304144404c31ed0a77ac103d00660" dependencies = [ "thiserror", "ucd-trie", @@ -5558,9 +5557,9 @@ dependencies = [ [[package]] name = "petgraph" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" +checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" dependencies = [ "fixedbitset", "indexmap", @@ -5681,7 +5680,7 @@ dependencies = [ "libc", "log", "wepoll-ffi", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -5803,9 +5802,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.49" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5" +checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" dependencies = [ "unicode-ident", ] @@ -5862,9 +5861,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c01db6702aa05baa3f57dec92b8eeeeb4cb19e894e73996b32a4093289e54592" +checksum = "21dc42e00223fc37204bd4aa177e69420c604ca4a183209a8f9de30c6d934698" dependencies = [ "bytes", "prost-derive", @@ -5872,9 +5871,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb5320c680de74ba083512704acb90fe00f28f79207286a848e730c45dd73ed6" +checksum = "a3f8ad728fb08fe212df3c05169e940fbb6d9d16a877ddde14644a983ba2012e" dependencies = [ "bytes", "heck", @@ -5907,9 +5906,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8842bad1a5419bca14eac663ba798f6bc19c413c2fdceb5f3ba3b0932d96720" +checksum = "8bda8c0881ea9f722eb9629376db3d0b903b462477c1aafcb0566610ac28ac5d" dependencies = [ "anyhow", "itertools", @@ -5920,9 +5919,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "017f79637768cde62820bc2d4fe0e45daaa027755c323ad077767c6c5f173091" +checksum = "a5e0526209433e96d83d750dd81a99118edbc55739e7e61a46764fd2ad537788" dependencies = [ "bytes", "prost", @@ -6016,7 +6015,7 @@ dependencies = [ "rand 0.8.5", "ring", "rustc-hash", - "rustls 0.20.7", + "rustls 0.20.8", "slab", "thiserror", "tinyvec", @@ -6158,9 +6157,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.10.1" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3" +checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -6250,11 +6249,11 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.13" +version = "0.11.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68cc60575865c7831548863cc02356512e3f1dc2f3f82cb837d7fc4cc8f3c97c" +checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" dependencies = [ - "base64", + "base64 0.21.0", "bytes", "encoding_rs", "futures-core", @@ -6273,7 +6272,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite 0.2.9", - "rustls 0.20.7", + "rustls 0.20.8", "rustls-pemfile", "serde", "serde_json", @@ -6286,6 +6285,7 @@ dependencies = [ "url", "wasm-bindgen", "wasm-bindgen-futures", + "wasm-streams", "web-sys", "webpki-roots", "winreg", @@ -6470,7 +6470,7 @@ version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" dependencies = [ - "nom 7.1.2", + "nom 7.1.3", ] [[package]] @@ -6479,7 +6479,7 @@ version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ - "base64", + "base64 0.13.1", "log", "ring", "sct 0.6.1", @@ -6488,9 +6488,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.7" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring", @@ -6500,11 +6500,11 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64", + "base64 0.21.0", ] [[package]] @@ -6566,7 +6566,7 @@ checksum = "001cf62ece89779fd16105b5f515ad0e5cedcd5440d3dd806bb067978e7c3608" dependencies = [ "cfg-if", "derive_more", - "parity-scale-codec 3.2.1", + "parity-scale-codec 3.3.0", "scale-info-derive", ] @@ -6588,7 +6588,7 @@ version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" dependencies = [ - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -6696,9 +6696,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.7.0" +version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" +checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" dependencies = [ "bitflags", "core-foundation", @@ -6709,9 +6709,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.6.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" dependencies = [ "core-foundation-sys", "libc", @@ -6758,9 +6758,9 @@ dependencies = [ [[package]] name = "send_wrapper" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "930c0acf610d3fdb5e2ab6213019aaa04e227ebe9547b0649ba599b16d788bd7" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "sensitive_url" @@ -6812,9 +6812,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" +checksum = "7434af0dc1cbd59268aa98b4c22c131c0584d2232f6fb166efb993e2832e896a" dependencies = [ "itoa 1.0.5", "ryu", @@ -7224,14 +7224,14 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774d05a3edae07ce6d68ea6984f3c05e9bba8927e3dd591e3b479e5b03213d0d" +checksum = "12ba5f4d4ff12bdb6a169ed51b7c48c0e0ac4b0b4b31012b2571e97d78d3201d" dependencies = [ "aes-gcm 0.9.4", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0-pre.5", + "curve25519-dalek 4.0.0-rc.0", "rand_core 0.6.4", "ring", "rustc_version 0.4.0", @@ -7255,7 +7255,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ - "base64", + "base64 0.13.1", "bytes", "flate2", "futures", @@ -7416,7 +7416,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7e94b1ec00bad60e6410e058b52f1c66de3dc5fe4d62d09b3e52bb7d3b73e25" dependencies = [ - "base64", + "base64 0.13.1", "crc", "lazy_static", "md-5", @@ -7480,9 +7480,9 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "synstructure" @@ -7498,9 +7498,9 @@ dependencies = [ [[package]] name = "sysinfo" -version = "0.26.8" +version = "0.26.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29ddf41e393a9133c81d5f0974195366bd57082deac6e0eb02ed39b8341c2bb6" +checksum = "5c18a6156d1f27a9592ee18c1a846ca8dd5c258b7179fc193ae87c74ebb666f5" dependencies = [ "cfg-if", "core-foundation-sys", @@ -7610,9 +7610,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" dependencies = [ "winapi-util", ] @@ -7780,15 +7780,15 @@ dependencies = [ [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.24.1" +version = "1.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d9f76183f91ecfb55e1d7d5602bd1d979e38a3a522fe900241cf195624d67ae" +checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" dependencies = [ "autocfg 1.1.0", "bytes", @@ -7801,7 +7801,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -7827,9 +7827,9 @@ dependencies = [ [[package]] name = "tokio-native-tls" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", "tokio", @@ -7852,7 +7852,7 @@ version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ - "rustls 0.20.7", + "rustls 0.20.8", "tokio", "webpki 0.22.0", ] @@ -7890,7 +7890,7 @@ checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" dependencies = [ "futures-util", "log", - "rustls 0.20.7", + "rustls 0.20.8", "tokio", "tokio-rustls 0.23.4", "tungstenite 0.17.3", @@ -7931,9 +7931,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1333c76748e868a4d9d1017b5ab53171dfd095f70c712fdb4653a406547f598f" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ "serde", ] @@ -8169,7 +8169,7 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0b2d8558abd2e276b0a8df5c05a2ec762609344191e5fd23e292c910e9165b5" dependencies = [ - "base64", + "base64 0.13.1", "byteorder", "bytes", "http", @@ -8188,14 +8188,14 @@ version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ - "base64", + "base64 0.13.1", "byteorder", "bytes", "http", "httparse", "log", "rand 0.8.5", - "rustls 0.20.7", + "rustls 0.20.8", "sha-1 0.10.1", "thiserror", "url", @@ -8210,7 +8210,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4712ee30d123ec7ae26d1e1b218395a16c87cdbaf4b3925d170d684af62ea5e8" dependencies = [ "async-trait", - "base64", + "base64 0.13.1", "futures", "log", "md-5", @@ -8324,9 +8324,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.8" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" [[package]] name = "unicode-ident" @@ -8424,9 +8424,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" +checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79" dependencies = [ "getrandom 0.2.8", ] @@ -8637,9 +8637,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" +checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -8647,9 +8647,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" +checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" dependencies = [ "bumpalo", "log", @@ -8662,9 +8662,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.33" +version = "0.4.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" +checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" dependencies = [ "cfg-if", "js-sys", @@ -8674,9 +8674,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" +checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8684,9 +8684,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" +checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", @@ -8697,15 +8697,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" +checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" [[package]] name = "wasm-bindgen-test" -version = "0.3.33" +version = "0.3.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d2fff962180c3fadf677438054b1db62bee4aa32af26a45388af07d1287e1d" +checksum = "6db36fc0f9fb209e88fb3642590ae0205bb5a56216dabd963ba15879fe53a30b" dependencies = [ "console_error_panic_hook", "js-sys", @@ -8717,14 +8717,27 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.33" +version = "0.3.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4683da3dfc016f704c9f82cf401520c4f1cb3ee440f7f52b3d6ac29506a49ca7" +checksum = "0734759ae6b3b1717d661fe4f016efcfb9828f5edb4520c18eaee05af3b43be9" dependencies = [ "proc-macro2", "quote", ] +[[package]] +name = "wasm-streams" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bbae3363c08332cadccd13b67db371814cd214c2524020932f0804b8cf7c078" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "wasm-timer" version = "0.2.5" @@ -8742,9 +8755,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.60" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" +checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" dependencies = [ "js-sys", "wasm-bindgen", @@ -8757,7 +8770,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44f258e254752d210b84fe117b31f1e3cc9cbf04c0d747eb7f8cf7cf5e370f6d" dependencies = [ "arrayvec", - "base64", + "base64 0.13.1", "bytes", "derive_more", "ethabi 16.0.0", @@ -8966,7 +8979,7 @@ dependencies = [ "tokio", "turn", "url", - "uuid 1.2.2", + "uuid 1.3.0", "waitgroup", "webrtc-mdns", "webrtc-util", @@ -9074,9 +9087,9 @@ dependencies = [ [[package]] name = "which" -version = "4.3.0" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" +checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" dependencies = [ "either", "libc", @@ -9158,19 +9171,43 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.0", - "windows_i686_gnu 0.42.0", - "windows_i686_msvc 0.42.0", - "windows_x86_64_gnu 0.42.0", + "windows_aarch64_msvc 0.42.1", + "windows_i686_gnu 0.42.1", + "windows_i686_msvc 0.42.1", + "windows_x86_64_gnu 0.42.1", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc 0.42.1", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc 0.42.1", + "windows_i686_gnu 0.42.1", + "windows_i686_msvc 0.42.1", + "windows_x86_64_gnu 0.42.1", "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.0", + "windows_x86_64_msvc 0.42.1", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" +checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" [[package]] name = "windows_aarch64_msvc" @@ -9180,9 +9217,9 @@ checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" [[package]] name = "windows_aarch64_msvc" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" +checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" [[package]] name = "windows_i686_gnu" @@ -9192,9 +9229,9 @@ checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" [[package]] name = "windows_i686_gnu" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" +checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" [[package]] name = "windows_i686_msvc" @@ -9204,9 +9241,9 @@ checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" [[package]] name = "windows_i686_msvc" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" +checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" [[package]] name = "windows_x86_64_gnu" @@ -9216,15 +9253,15 @@ checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" [[package]] name = "windows_x86_64_gnu" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" +checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" +checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" [[package]] name = "windows_x86_64_msvc" @@ -9234,9 +9271,9 @@ checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" [[package]] name = "windows_x86_64_msvc" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" [[package]] name = "winreg" @@ -9249,13 +9286,14 @@ dependencies = [ [[package]] name = "ws_stream_wasm" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47ca1ab42f5afed7fc332b22b6e932ca5414b209465412c8cdf0ad23bc0de645" +checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" dependencies = [ "async_io_stream", "futures", "js-sys", + "log", "pharos", "rustc_version 0.4.0", "send_wrapper", @@ -9309,11 +9347,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fb9bace5b5589ffead1afb76e43e34cff39cd0f3ce7e170ae0c29e53b88eb1c" dependencies = [ "asn1-rs 0.3.1", - "base64", + "base64 0.13.1", "data-encoding", "der-parser 7.0.0", "lazy_static", - "nom 7.1.2", + "nom 7.1.3", "oid-registry 0.4.0", "ring", "rusticata-macros", @@ -9328,11 +9366,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0ecbeb7b67ce215e40e3cc7f2ff902f94a223acf44995934763467e7b1febc8" dependencies = [ "asn1-rs 0.5.1", - "base64", + "base64 0.13.1", "data-encoding", "der-parser 8.1.0", "lazy_static", - "nom 7.1.2", + "nom 7.1.3", "oid-registry 0.6.1", "rusticata-macros", "thiserror", diff --git a/Makefile b/Makefile index ebad9b63f8d..85872f016de 100644 --- a/Makefile +++ b/Makefile @@ -190,7 +190,7 @@ arbitrary-fuzz: # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: cargo install --force cargo-audit - cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2020-0159 + cargo audit --ignore RUSTSEC-2020-0071 # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: From c33eb29ee336cab0fdc1554afe670533d53cd2eb Mon Sep 17 00:00:00 2001 From: Nazar Hussain Date: Wed, 8 Feb 2023 20:23:21 +0000 Subject: [PATCH 153/263] Fix the whitespace in docker workflow (#3952) ## Issue Addressed Fix a whitespace issue that was causing failure in the docker build. ## Additional Info https://github.com/sigp/lighthouse/pull/3948 --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 49288c594cb..2940ba769eb 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -106,7 +106,7 @@ jobs: --file ./Dockerfile.cross . \ --tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX}${FEATURE_SUFFIX} \ --build-arg FEATURES=${FEATURES} \ - --provenance=false \ + --provenance=false \ --push build-docker-multiarch: name: build-docker-multiarch${{ matrix.modernity }} From aa5b7ef7839e15d55c3a252230ecb11c4abc0a52 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 9 Feb 2023 04:31:22 +0000 Subject: [PATCH 154/263] Remove participation rate from API docs (#3955) ## Issue Addressed NA ## Proposed Changes Removes the "Participation Rate" since it references an undefined variable: `previous_epoch_attesting_gwei`. I didn't replace it with anything since I think "Justification/Finalization Rate" already expresses what it was trying to express. ## Additional Info NA --- book/src/validator-inclusion.md | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/book/src/validator-inclusion.md b/book/src/validator-inclusion.md index e6fbc0f16f8..0793af20db5 100644 --- a/book/src/validator-inclusion.md +++ b/book/src/validator-inclusion.md @@ -59,14 +59,7 @@ The following fields are returned: - `previous_epoch_head_attesting_gwei`: the total staked gwei that attested to a head beacon block that is in the canonical chain. -From this data you can calculate some interesting figures: - -#### Participation Rate - -`previous_epoch_attesting_gwei / previous_epoch_active_gwei` - -Expresses the ratio of validators that managed to have an attestation -voting upon the previous epoch included in a block. +From this data you can calculate: #### Justification/Finalization Rate From 2b735a9e8b24d1ab46a37f4f9423dab7c3e588c0 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Thu, 9 Feb 2023 23:51:17 +0000 Subject: [PATCH 155/263] Add attestation duty slot metric (#2704) ## Issue Addressed Resolves #2521 ## Proposed Changes Add a metric that indicates the next attestation duty slot for all managed validators in the validator client. --- validator_client/src/cli.rs | 9 ++++ validator_client/src/config.rs | 10 ++++ validator_client/src/duties_service.rs | 55 ++++++++++++++++++-- validator_client/src/http_metrics/metrics.rs | 6 +++ validator_client/src/lib.rs | 1 + 5 files changed, 78 insertions(+), 3 deletions(-) diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index c82a1a9d362..9142a0c7ec4 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -231,6 +231,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { address of this server (e.g., http://localhost:5064).") .takes_value(true), ) + .arg( + Arg::with_name("enable-high-validator-count-metrics") + .long("enable-high-validator-count-metrics") + .help("Enable per validator metrics for > 64 validators. \ + Note: This flag is automatically enabled for <= 64 validators. \ + Enabling this metric for higher validator counts will lead to higher volume \ + of prometheus metrics being collected.") + .takes_value(false), + ) /* * Explorer metrics */ diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 22741dabbd7..0f24e81d54f 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -53,6 +53,11 @@ pub struct Config { /// If true, enable functionality that monitors the network for attestations or proposals from /// any of the validators managed by this client before starting up. pub enable_doppelganger_protection: bool, + /// If true, then we publish validator specific metrics (e.g next attestation duty slot) + /// for all our managed validators. + /// Note: We publish validator specific metrics for low validator counts without this flag + /// (<= 64 validators) + pub enable_high_validator_count_metrics: bool, /// Enable use of the blinded block endpoints during proposals. pub builder_proposals: bool, /// Overrides the timestamp field in builder api ValidatorRegistrationV1 @@ -99,6 +104,7 @@ impl Default for Config { http_metrics: <_>::default(), monitoring_api: None, enable_doppelganger_protection: false, + enable_high_validator_count_metrics: false, beacon_nodes_tls_certs: None, block_delay: None, builder_proposals: false, @@ -273,6 +279,10 @@ impl Config { config.http_metrics.enabled = true; } + if cli_args.is_present("enable-high-validator-count-metrics") { + config.enable_high_validator_count_metrics = true; + } + if let Some(address) = cli_args.value_of("metrics-address") { config.http_metrics.listen_addr = address .parse::() diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 86b8ca870e2..6ba2a2d1fdc 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -9,6 +9,7 @@ mod sync; use crate::beacon_node_fallback::{BeaconNodeFallback, OfflineOnFailure, RequireSynced}; +use crate::http_metrics::metrics::{get_int_gauge, set_int_gauge, ATTESTATION_DUTY}; use crate::{ block_service::BlockServiceNotification, http_metrics::metrics, @@ -39,6 +40,11 @@ const SUBSCRIPTION_BUFFER_SLOTS: u64 = 2; /// Only retain `HISTORICAL_DUTIES_EPOCHS` duties prior to the current epoch. const HISTORICAL_DUTIES_EPOCHS: u64 = 2; +/// Minimum number of validators for which we auto-enable per-validator metrics. +/// For validators greater than this value, we need to manually set the `enable-per-validator-metrics` +/// flag in the cli to enable collection of per validator metrics. +const VALIDATOR_METRICS_MIN_COUNT: usize = 64; + #[derive(Debug)] pub enum Error { UnableToReadSlotClock, @@ -121,6 +127,7 @@ pub struct DutiesService { /// This functionality is a little redundant since most BNs will likely reject duties when they /// aren't synced, but we keep it around for an emergency. pub require_synced: RequireSynced, + pub enable_high_validator_count_metrics: bool, pub context: RuntimeContext, pub spec: ChainSpec, } @@ -220,6 +227,12 @@ impl DutiesService { .cloned() .collect() } + + /// Returns `true` if we should collect per validator metrics and `false` otherwise. + pub fn per_validator_metrics(&self) -> bool { + self.enable_high_validator_count_metrics + || self.total_validator_count() <= VALIDATOR_METRICS_MIN_COUNT + } } /// Start the service that periodically polls the beacon node for validator duties. This will start @@ -501,6 +514,7 @@ async fn poll_beacon_attesters( current_epoch, &local_indices, &local_pubkeys, + current_slot, ) .await { @@ -520,9 +534,14 @@ async fn poll_beacon_attesters( ); // Download the duties and update the duties for the next epoch. - if let Err(e) = - poll_beacon_attesters_for_epoch(duties_service, next_epoch, &local_indices, &local_pubkeys) - .await + if let Err(e) = poll_beacon_attesters_for_epoch( + duties_service, + next_epoch, + &local_indices, + &local_pubkeys, + current_slot, + ) + .await { error!( log, @@ -619,6 +638,7 @@ async fn poll_beacon_attesters_for_epoch( epoch: Epoch, local_indices: &[u64], local_pubkeys: &HashSet, + current_slot: Slot, ) -> Result<(), Error> { let log = duties_service.context.log(); @@ -671,6 +691,35 @@ async fn poll_beacon_attesters_for_epoch( .data .into_iter() .filter(|duty| { + if duties_service.per_validator_metrics() { + let validator_index = duty.validator_index; + let duty_slot = duty.slot; + if let Some(existing_slot_gauge) = + get_int_gauge(&ATTESTATION_DUTY, &[&validator_index.to_string()]) + { + let existing_slot = Slot::new(existing_slot_gauge.get() as u64); + let existing_epoch = existing_slot.epoch(E::slots_per_epoch()); + + // First condition ensures that we switch to the next epoch duty slot + // once the current epoch duty slot passes. + // Second condition is to ensure that next epoch duties don't override + // current epoch duties. + if existing_slot < current_slot + || (duty_slot.epoch(E::slots_per_epoch()) <= existing_epoch + && duty_slot > current_slot + && duty_slot != existing_slot) + { + existing_slot_gauge.set(duty_slot.as_u64() as i64); + } + } else { + set_int_gauge( + &ATTESTATION_DUTY, + &[&validator_index.to_string()], + duty_slot.as_u64() as i64, + ); + } + } + local_pubkeys.contains(&duty.pubkey) && { // Only update the duties if either is true: // diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 146d008a575..0cb3417fc72 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -172,6 +172,12 @@ lazy_static::lazy_static! { "Duration to obtain a signature", &["type"] ); + + pub static ref ATTESTATION_DUTY: Result = try_create_int_gauge_vec( + "vc_attestation_duty_slot", + "Attestation duty slot for all managed validators", + &["validator"] + ); } pub fn gather_prometheus_metrics( diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 00c3db7aa10..f2d64749019 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -422,6 +422,7 @@ impl ProductionValidatorClient { }, spec: context.eth2_config.spec.clone(), context: duties_context, + enable_high_validator_count_metrics: config.enable_high_validator_count_metrics, }); // Update the metrics server. From 5276dd0cb067f70af5e4b22551169e98fb6fd1d1 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 9 Feb 2023 23:51:18 +0000 Subject: [PATCH 156/263] Fix edge-case when finding the finalized descendant (#3924) ## Issue Addressed NA ## Description We were missing an edge case when checking to see if a block is a descendant of the finalized checkpoint. This edge case is described for one of the tests in this PR: https://github.com/sigp/lighthouse/blob/a119edc739e9dcefe1cb800a2ce9eb4baab55f20/consensus/proto_array/src/proto_array_fork_choice.rs#L1018-L1047 This bug presented itself in the following mainnet log: ``` Jan 26 15:12:42.841 ERRO Unable to validate attestation error: MissingBeaconState(0x7c30cb80ec3d4ec624133abfa70e4c6cfecfca456bfbbbff3393e14e5b20bf25), peer_id: 16Uiu2HAm8RPRciXJYtYc5c3qtCRdrZwkHn2BXN3XP1nSi1gxHYit, type: "unaggregated", slot: Slot(5660161), beacon_block_root: 0x4a45e59da7cb9487f4836c83bdd1b741b4f31c67010c7ae343fa6771b3330489 ``` Here the BN is rejecting an attestation because of a "missing beacon state". Whilst it was correct to reject the attestation, it should have rejected it because it attests to a block that conflicts with finality rather than claiming that the database is inconsistent. The block that this attestation points to (`0x4a45`) is block `C` in the above diagram. It is a non-canonical block in the first slot of an epoch that conflicts with the finalized checkpoint. Due to our lazy pruning of proto array, `0x4a45` was still present in proto-array. Our missed edge-case in [`ForkChoice::is_descendant_of_finalized`](https://github.com/sigp/lighthouse/blob/38514c07f222ff7783834c48cf5c0a6ee7f346d0/consensus/fork_choice/src/fork_choice.rs#L1375-L1379) would have indicated to us that the block is a descendant of the finalized block. Therefore, we would have accepted the attestation thinking that it attests to a descendant of the finalized *checkpoint*. Since we didn't have the shuffling for this erroneously processed block, we attempted to read its state from the database. This failed because we prune states from the database by keeping track of the tips of the chain and iterating back until we find a finalized block. This would have deleted `C` from the database, hence the `MissingBeaconState` error. --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 +- .../beacon_chain/src/block_verification.rs | 6 +- consensus/fork_choice/src/fork_choice.rs | 17 +- .../src/fork_choice_test_definition.rs | 2 +- consensus/proto_array/src/proto_array.rs | 74 ++++++- .../src/proto_array_fork_choice.rs | 193 +++++++++++++++++- 6 files changed, 276 insertions(+), 20 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 3366e1364cf..6a67ae71ed2 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -8,7 +8,7 @@ use crate::beacon_proposer_cache::compute_proposer_duties_from_head; use crate::beacon_proposer_cache::BeaconProposerCache; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::{ - check_block_is_finalized_descendant, check_block_relevancy, get_block_root, + check_block_is_finalized_checkpoint_or_descendant, check_block_relevancy, get_block_root, signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock, IntoExecutionPendingBlock, PayloadVerificationOutcome, POS_PANDA_BANNER, }; @@ -2736,7 +2736,7 @@ impl BeaconChain { let mut fork_choice = self.canonical_head.fork_choice_write_lock(); // Do not import a block that doesn't descend from the finalized root. - check_block_is_finalized_descendant(self, &fork_choice, &signed_block)?; + check_block_is_finalized_checkpoint_or_descendant(self, &fork_choice, &signed_block)?; // Register the new block with the fork choice service. { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index ad08bd9f4f3..4f65a05c56b 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -744,7 +744,7 @@ impl GossipVerifiedBlock { // Do not process a block that doesn't descend from the finalized root. // // We check this *before* we load the parent so that we can return a more detailed error. - check_block_is_finalized_descendant( + check_block_is_finalized_checkpoint_or_descendant( chain, &chain.canonical_head.fork_choice_write_lock(), &block, @@ -1564,12 +1564,12 @@ fn check_block_against_finalized_slot( /// ## Warning /// /// Taking a lock on the `chain.canonical_head.fork_choice` might cause a deadlock here. -pub fn check_block_is_finalized_descendant( +pub fn check_block_is_finalized_checkpoint_or_descendant( chain: &BeaconChain, fork_choice: &BeaconForkChoice, block: &Arc>, ) -> Result<(), BlockError> { - if fork_choice.is_descendant_of_finalized(block.parent_root()) { + if fork_choice.is_finalized_checkpoint_or_descendant(block.parent_root()) { Ok(()) } else { // If fork choice does *not* consider the parent to be a descendant of the finalized block, diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 290cef78ab5..afae7f058b4 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -721,7 +721,7 @@ where op: &InvalidationOperation, ) -> Result<(), Error> { self.proto_array - .process_execution_payload_invalidation(op) + .process_execution_payload_invalidation::(op) .map_err(Error::FailedToProcessInvalidExecutionPayload) } @@ -1282,7 +1282,7 @@ where if store.best_justified_checkpoint().epoch > store.justified_checkpoint().epoch { let store = &self.fc_store; - if self.is_descendant_of_finalized(store.best_justified_checkpoint().root) { + if self.is_finalized_checkpoint_or_descendant(store.best_justified_checkpoint().root) { let store = &mut self.fc_store; store .set_justified_checkpoint(*store.best_justified_checkpoint()) @@ -1323,12 +1323,13 @@ where /// Returns `true` if the block is known **and** a descendant of the finalized root. pub fn contains_block(&self, block_root: &Hash256) -> bool { - self.proto_array.contains_block(block_root) && self.is_descendant_of_finalized(*block_root) + self.proto_array.contains_block(block_root) + && self.is_finalized_checkpoint_or_descendant(*block_root) } /// Returns a `ProtoBlock` if the block is known **and** a descendant of the finalized root. pub fn get_block(&self, block_root: &Hash256) -> Option { - if self.is_descendant_of_finalized(*block_root) { + if self.is_finalized_checkpoint_or_descendant(*block_root) { self.proto_array.get_block(block_root) } else { None @@ -1337,7 +1338,7 @@ where /// Returns an `ExecutionStatus` if the block is known **and** a descendant of the finalized root. pub fn get_block_execution_status(&self, block_root: &Hash256) -> Option { - if self.is_descendant_of_finalized(*block_root) { + if self.is_finalized_checkpoint_or_descendant(*block_root) { self.proto_array.get_block_execution_status(block_root) } else { None @@ -1372,10 +1373,10 @@ where }) } - /// Return `true` if `block_root` is equal to the finalized root, or a known descendant of it. - pub fn is_descendant_of_finalized(&self, block_root: Hash256) -> bool { + /// Return `true` if `block_root` is equal to the finalized checkpoint, or a known descendant of it. + pub fn is_finalized_checkpoint_or_descendant(&self, block_root: Hash256) -> bool { self.proto_array - .is_descendant(self.fc_store.finalized_checkpoint().root, block_root) + .is_finalized_checkpoint_or_descendant::(block_root) } /// Returns `Ok(true)` if `block_root` has been imported optimistically or deemed invalid. diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 035fb799eea..68b3fb71981 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -273,7 +273,7 @@ impl ForkChoiceTestDefinition { } }; fork_choice - .process_execution_payload_invalidation(&op) + .process_execution_payload_invalidation::(&op) .unwrap() } Operation::AssertWeight { block_root, weight } => assert_eq!( diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index add84f54787..bf50c080261 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -451,7 +451,7 @@ impl ProtoArray { /// Invalidate zero or more blocks, as specified by the `InvalidationOperation`. /// /// See the documentation of `InvalidationOperation` for usage. - pub fn propagate_execution_payload_invalidation( + pub fn propagate_execution_payload_invalidation( &mut self, op: &InvalidationOperation, ) -> Result<(), Error> { @@ -482,7 +482,7 @@ impl ProtoArray { let latest_valid_ancestor_is_descendant = latest_valid_ancestor_root.map_or(false, |ancestor_root| { self.is_descendant(ancestor_root, head_block_root) - && self.is_descendant(self.finalized_checkpoint.root, ancestor_root) + && self.is_finalized_checkpoint_or_descendant::(ancestor_root) }); // Collect all *ancestors* which were declared invalid since they reside between the @@ -977,6 +977,12 @@ impl ProtoArray { /// ## Notes /// /// Still returns `true` if `ancestor_root` is known and `ancestor_root == descendant_root`. + /// + /// ## Warning + /// + /// Do not use this function to check if a block is a descendant of the + /// finalized checkpoint. Use `Self::is_finalized_checkpoint_or_descendant` + /// instead. pub fn is_descendant(&self, ancestor_root: Hash256, descendant_root: Hash256) -> bool { self.indices .get(&ancestor_root) @@ -990,6 +996,70 @@ impl ProtoArray { .unwrap_or(false) } + /// Returns `true` if `root` is equal to or a descendant of + /// `self.finalized_checkpoint`. + /// + /// Notably, this function is checking ancestory of the finalized + /// *checkpoint* not the finalized *block*. + pub fn is_finalized_checkpoint_or_descendant(&self, root: Hash256) -> bool { + let finalized_root = self.finalized_checkpoint.root; + let finalized_slot = self + .finalized_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + + let mut node = if let Some(node) = self + .indices + .get(&root) + .and_then(|index| self.nodes.get(*index)) + { + node + } else { + // An unknown root is not a finalized descendant. This line can only + // be reached if the user supplies a root that is not known to fork + // choice. + return false; + }; + + // The finalized and justified checkpoints represent a list of known + // ancestors of `node` that are likely to coincide with the store's + // finalized checkpoint. + // + // Run this check once, outside of the loop rather than inside the loop. + // If the conditions don't match for this node then they're unlikely to + // start matching for its ancestors. + for checkpoint in &[ + node.finalized_checkpoint, + node.justified_checkpoint, + node.unrealized_finalized_checkpoint, + node.unrealized_justified_checkpoint, + ] { + if checkpoint.map_or(false, |cp| cp == self.finalized_checkpoint) { + return true; + } + } + + loop { + // If `node` is less than or equal to the finalized slot then `node` + // must be the finalized block. + if node.slot <= finalized_slot { + return node.root == finalized_root; + } + + // Since `node` is from a higher slot that the finalized checkpoint, + // replace `node` with the parent of `node`. + if let Some(parent) = node.parent.and_then(|index| self.nodes.get(index)) { + node = parent + } else { + // If `node` is not the finalized block and its parent does not + // exist in fork choice, then the parent must have been pruned. + // Proto-array only prunes blocks prior to the finalized block, + // so this means the parent conflicts with finality. + return false; + }; + } + } + /// Returns the first *beacon block root* which contains an execution payload with the given /// `block_hash`, if any. pub fn execution_block_hash_to_beacon_block_root( diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index cbd369ae6ec..0e0d806e76e 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -358,12 +358,12 @@ impl ProtoArrayForkChoice { } /// See `ProtoArray::propagate_execution_payload_invalidation` for documentation. - pub fn process_execution_payload_invalidation( + pub fn process_execution_payload_invalidation( &mut self, op: &InvalidationOperation, ) -> Result<(), String> { self.proto_array - .propagate_execution_payload_invalidation(op) + .propagate_execution_payload_invalidation::(op) .map_err(|e| format!("Failed to process invalid payload: {:?}", e)) } @@ -748,6 +748,15 @@ impl ProtoArrayForkChoice { .is_descendant(ancestor_root, descendant_root) } + /// See `ProtoArray` documentation. + pub fn is_finalized_checkpoint_or_descendant( + &self, + descendant_root: Hash256, + ) -> bool { + self.proto_array + .is_finalized_checkpoint_or_descendant::(descendant_root) + } + pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> { if validator_index < self.votes.0.len() { let vote = &self.votes.0[validator_index]; @@ -928,6 +937,10 @@ mod test_compute_deltas { epoch: genesis_epoch, root: finalized_root, }; + let junk_checkpoint = Checkpoint { + epoch: Epoch::new(42), + root: Hash256::repeat_byte(42), + }; let mut fc = ProtoArrayForkChoice::new::( genesis_slot, @@ -973,8 +986,10 @@ mod test_compute_deltas { target_root: finalized_root, current_epoch_shuffling_id: junk_shuffling_id.clone(), next_epoch_shuffling_id: junk_shuffling_id, - justified_checkpoint: genesis_checkpoint, - finalized_checkpoint: genesis_checkpoint, + // Use the junk checkpoint for the next to values to prevent + // the loop-shortcutting mechanism from triggering. + justified_checkpoint: junk_checkpoint, + finalized_checkpoint: junk_checkpoint, execution_status, unrealized_justified_checkpoint: None, unrealized_finalized_checkpoint: None, @@ -993,6 +1008,11 @@ mod test_compute_deltas { assert!(!fc.is_descendant(finalized_root, not_finalized_desc)); assert!(!fc.is_descendant(finalized_root, unknown)); + assert!(fc.is_finalized_checkpoint_or_descendant::(finalized_root)); + assert!(fc.is_finalized_checkpoint_or_descendant::(finalized_desc)); + assert!(!fc.is_finalized_checkpoint_or_descendant::(not_finalized_desc)); + assert!(!fc.is_finalized_checkpoint_or_descendant::(unknown)); + assert!(!fc.is_descendant(finalized_desc, not_finalized_desc)); assert!(fc.is_descendant(finalized_desc, finalized_desc)); assert!(!fc.is_descendant(finalized_desc, finalized_root)); @@ -1004,6 +1024,171 @@ mod test_compute_deltas { assert!(!fc.is_descendant(not_finalized_desc, unknown)); } + /// This test covers an interesting case where a block can be a descendant + /// of the finalized *block*, but not a descenant of the finalized + /// *checkpoint*. + /// + /// ## Example + /// + /// Consider this block tree which has three blocks (`A`, `B` and `C`): + /// + /// ```ignore + /// [A] <--- [-] <--- [B] + /// | + /// |--[C] + /// ``` + /// + /// - `A` (slot 31) is the common descendant. + /// - `B` (slot 33) descends from `A`, but there is a single skip slot + /// between it and `A`. + /// - `C` (slot 32) descends from `A` and conflicts with `B`. + /// + /// Imagine that the `B` chain is finalized at epoch 1. This means that the + /// finalized checkpoint points to the skipped slot at 32. The root of the + /// finalized checkpoint is `A`. + /// + /// In this scenario, the block `C` has the finalized root (`A`) as an + /// ancestor whilst simultaneously conflicting with the finalized + /// checkpoint. + /// + /// This means that to ensure a block does not conflict with finality we + /// must check to ensure that it's an ancestor of the finalized + /// *checkpoint*, not just the finalized *block*. + #[test] + fn finalized_descendant_edge_case() { + let get_block_root = Hash256::from_low_u64_be; + let genesis_slot = Slot::new(0); + let junk_state_root = Hash256::zero(); + let junk_shuffling_id = + AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); + let execution_status = ExecutionStatus::irrelevant(); + + let genesis_checkpoint = Checkpoint { + epoch: Epoch::new(0), + root: get_block_root(0), + }; + + let mut fc = ProtoArrayForkChoice::new::( + genesis_slot, + junk_state_root, + genesis_checkpoint, + genesis_checkpoint, + junk_shuffling_id.clone(), + junk_shuffling_id.clone(), + execution_status, + CountUnrealizedFull::default(), + ) + .unwrap(); + + struct TestBlock { + slot: u64, + root: u64, + parent_root: u64, + } + + let insert_block = |fc: &mut ProtoArrayForkChoice, block: TestBlock| { + fc.proto_array + .on_block::( + Block { + slot: Slot::from(block.slot), + root: get_block_root(block.root), + parent_root: Some(get_block_root(block.parent_root)), + state_root: Hash256::zero(), + target_root: Hash256::zero(), + current_epoch_shuffling_id: junk_shuffling_id.clone(), + next_epoch_shuffling_id: junk_shuffling_id.clone(), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(0), + root: get_block_root(0), + }, + finalized_checkpoint: genesis_checkpoint, + execution_status, + unrealized_justified_checkpoint: Some(genesis_checkpoint), + unrealized_finalized_checkpoint: Some(genesis_checkpoint), + }, + Slot::from(block.slot), + ) + .unwrap(); + }; + + /* + * Start of interesting part of tests. + */ + + // Produce the 0th epoch of blocks. They should all form a chain from + // the genesis block. + for i in 1..MainnetEthSpec::slots_per_epoch() { + insert_block( + &mut fc, + TestBlock { + slot: i, + root: i, + parent_root: i - 1, + }, + ) + } + + let last_slot_of_epoch_0 = MainnetEthSpec::slots_per_epoch() - 1; + + // Produce a block that descends from the last block of epoch -. + // + // This block will be non-canonical. + let non_canonical_slot = last_slot_of_epoch_0 + 1; + insert_block( + &mut fc, + TestBlock { + slot: non_canonical_slot, + root: non_canonical_slot, + parent_root: non_canonical_slot - 1, + }, + ); + + // Produce a block that descends from the last block of the 0th epoch, + // that skips the 1st slot of the 1st epoch. + // + // This block will be canonical. + let canonical_slot = last_slot_of_epoch_0 + 2; + insert_block( + &mut fc, + TestBlock { + slot: canonical_slot, + root: canonical_slot, + parent_root: non_canonical_slot - 1, + }, + ); + + let finalized_root = get_block_root(last_slot_of_epoch_0); + + // Set the finalized checkpoint to finalize the first slot of epoch 1 on + // the canonical chain. + fc.proto_array.finalized_checkpoint = Checkpoint { + root: finalized_root, + epoch: Epoch::new(1), + }; + + assert!( + fc.proto_array + .is_finalized_checkpoint_or_descendant::(finalized_root), + "the finalized checkpoint is the finalized checkpoint" + ); + + assert!( + fc.proto_array + .is_finalized_checkpoint_or_descendant::(get_block_root( + canonical_slot + )), + "the canonical block is a descendant of the finalized checkpoint" + ); + assert!( + !fc.proto_array + .is_finalized_checkpoint_or_descendant::(get_block_root( + non_canonical_slot + )), + "although the non-canonical block is a descendant of the finalized block, \ + it's not a descendant of the finalized checkpoint" + ); + } + #[test] fn zero_hash() { let validator_count: usize = 16; From c9354a9d25906d6ddbe6f8d376b65d6651003993 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 10 Feb 2023 06:19:42 +0000 Subject: [PATCH 157/263] Tweaks to reward APIs (#3957) ## Proposed Changes * Return the effective balance in gwei to align with the spec ([ideal attestation rewards](https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Rewards/getAttestationsRewards)). * Use quoted `i64`s for attestation and sync committee rewards. --- .../beacon_chain/src/attestation_rewards.rs | 27 +++++++------- .../src/lighthouse/attestation_rewards.rs | 2 ++ .../src/lighthouse/sync_committee_rewards.rs | 1 + consensus/serde_utils/src/lib.rs | 2 +- consensus/serde_utils/src/quoted_int.rs | 36 ++++++++++++++++--- 5 files changed, 49 insertions(+), 19 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index 3f39946978f..a4a661197f7 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -72,6 +72,8 @@ impl BeaconChain { BaseRewardPerIncrement::new(total_active_balance, spec)?; for effective_balance_eth in 0..=32 { + let effective_balance = + effective_balance_eth.safe_mul(spec.effective_balance_increment)?; let base_reward = effective_balance_eth.safe_mul(base_reward_per_increment.as_u64())?; @@ -86,9 +88,9 @@ impl BeaconChain { .safe_div(WEIGHT_DENOMINATOR)?; if !state.is_in_inactivity_leak(previous_epoch, spec) { ideal_rewards_hashmap - .insert((flag_index, effective_balance_eth), (ideal_reward, penalty)); + .insert((flag_index, effective_balance), (ideal_reward, penalty)); } else { - ideal_rewards_hashmap.insert((flag_index, effective_balance_eth), (0, penalty)); + ideal_rewards_hashmap.insert((flag_index, effective_balance), (0, penalty)); } } } @@ -119,12 +121,9 @@ impl BeaconChain { if eligible { let effective_balance = state.get_effective_balance(*validator_index)?; - let effective_balance_eth = - effective_balance.safe_div(spec.effective_balance_increment)?; - for flag_index in 0..PARTICIPATION_FLAG_WEIGHTS.len() { let (ideal_reward, penalty) = ideal_rewards_hashmap - .get(&(flag_index, effective_balance_eth)) + .get(&(flag_index, effective_balance)) .ok_or(BeaconChainError::AttestationRewardsError)?; let voted_correctly = participation_cache .get_unslashed_participating_indices(flag_index, previous_epoch) @@ -160,21 +159,21 @@ impl BeaconChain { let mut ideal_rewards: Vec = ideal_rewards_hashmap .iter() .map( - |((flag_index, effective_balance_eth), (ideal_reward, _penalty))| { - (flag_index, effective_balance_eth, ideal_reward) + |((flag_index, effective_balance), (ideal_reward, _penalty))| { + (flag_index, effective_balance, ideal_reward) }, ) .fold( HashMap::new(), - |mut acc, (flag_index, effective_balance_eth, ideal_reward)| { - let entry = acc.entry(*effective_balance_eth as u32).or_insert( - IdealAttestationRewards { - effective_balance: *effective_balance_eth, + |mut acc, (flag_index, &effective_balance, ideal_reward)| { + let entry = acc + .entry(effective_balance) + .or_insert(IdealAttestationRewards { + effective_balance, head: 0, target: 0, source: 0, - }, - ); + }); match *flag_index { TIMELY_SOURCE_FLAG_INDEX => entry.source += ideal_reward, TIMELY_TARGET_FLAG_INDEX => entry.target += ideal_reward, diff --git a/common/eth2/src/lighthouse/attestation_rewards.rs b/common/eth2/src/lighthouse/attestation_rewards.rs index 3fd59782c82..314ffb85121 100644 --- a/common/eth2/src/lighthouse/attestation_rewards.rs +++ b/common/eth2/src/lighthouse/attestation_rewards.rs @@ -28,8 +28,10 @@ pub struct TotalAttestationRewards { #[serde(with = "eth2_serde_utils::quoted_u64")] pub head: u64, // attester's reward for target vote in gwei + #[serde(with = "eth2_serde_utils::quoted_i64")] pub target: i64, // attester's reward for source vote in gwei + #[serde(with = "eth2_serde_utils::quoted_i64")] pub source: i64, // TBD attester's inclusion_delay reward in gwei (phase0 only) // pub inclusion_delay: u64, diff --git a/common/eth2/src/lighthouse/sync_committee_rewards.rs b/common/eth2/src/lighthouse/sync_committee_rewards.rs index cdd6850650c..e215d8e3e0b 100644 --- a/common/eth2/src/lighthouse/sync_committee_rewards.rs +++ b/common/eth2/src/lighthouse/sync_committee_rewards.rs @@ -8,5 +8,6 @@ pub struct SyncCommitteeReward { #[serde(with = "eth2_serde_utils::quoted_u64")] pub validator_index: u64, // sync committee reward in gwei for the validator + #[serde(with = "eth2_serde_utils::quoted_i64")] pub reward: i64, } diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs index 92b5966c9a0..5c5dafc6656 100644 --- a/consensus/serde_utils/src/lib.rs +++ b/consensus/serde_utils/src/lib.rs @@ -12,4 +12,4 @@ pub mod u64_hex_be; pub mod u8_hex; pub use fixed_bytes_hex::{bytes_4_hex, bytes_8_hex}; -pub use quoted_int::{quoted_u256, quoted_u32, quoted_u64, quoted_u8}; +pub use quoted_int::{quoted_i64, quoted_u256, quoted_u32, quoted_u64, quoted_u8}; diff --git a/consensus/serde_utils/src/quoted_int.rs b/consensus/serde_utils/src/quoted_int.rs index 822acb5ee82..0cc35aa318c 100644 --- a/consensus/serde_utils/src/quoted_int.rs +++ b/consensus/serde_utils/src/quoted_int.rs @@ -11,7 +11,7 @@ use std::convert::TryFrom; use std::marker::PhantomData; macro_rules! define_mod { - ($int: ty, $visit_fn: ident) => { + ($int: ty) => { /// Serde support for deserializing quoted integers. /// /// Configurable so that quotes are either required or optional. @@ -140,19 +140,25 @@ macro_rules! define_mod { pub mod quoted_u8 { use super::*; - define_mod!(u8, visit_u8); + define_mod!(u8); } pub mod quoted_u32 { use super::*; - define_mod!(u32, visit_u32); + define_mod!(u32); } pub mod quoted_u64 { use super::*; - define_mod!(u64, visit_u64); + define_mod!(u64); +} + +pub mod quoted_i64 { + use super::*; + + define_mod!(i64); } pub mod quoted_u256 { @@ -216,4 +222,26 @@ mod test { fn u256_without_quotes() { serde_json::from_str::("1").unwrap_err(); } + + #[derive(Debug, PartialEq, Serialize, Deserialize)] + #[serde(transparent)] + struct WrappedI64(#[serde(with = "quoted_i64")] i64); + + #[test] + fn negative_i64_with_quotes() { + assert_eq!( + serde_json::from_str::("\"-200\"").unwrap().0, + -200 + ); + assert_eq!( + serde_json::to_string(&WrappedI64(-12_500)).unwrap(), + "\"-12500\"" + ); + } + + // It would be OK if this worked, but we don't need it to (i64s should always be quoted). + #[test] + fn negative_i64_without_quotes() { + serde_json::from_str::("-200").unwrap_err(); + } } From 39f8327f734e62fbe4d49c574515a17998d88852 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Fri, 10 Feb 2023 08:49:25 -0600 Subject: [PATCH 158/263] Properly Deserialize ForkVersionedResponses (#3944) * Move ForkVersionedResponse to consensus/types * Properly Deserialize ForkVersionedResponses * Elide Types in from_value Calls * Added Tests for ForkVersionedResponse Deserialize * Address Sean's Comments & Make Less Restrictive * Utilize `map_fork_name!` --- beacon_node/execution_layer/src/lib.rs | 11 +- beacon_node/http_api/src/version.rs | 8 +- common/eth2/src/lib.rs | 63 +------- common/eth2/src/types.rs | 39 +++-- consensus/types/src/beacon_block.rs | 18 +++ consensus/types/src/beacon_state.rs | 16 ++ consensus/types/src/builder_bid.rs | 58 +++++++- consensus/types/src/execution_payload.rs | 23 +++ .../types/src/execution_payload_header.rs | 26 ++++ .../types/src/fork_versioned_response.rs | 138 ++++++++++++++++++ consensus/types/src/lib.rs | 4 + consensus/types/src/signed_beacon_block.rs | 18 +++ 12 files changed, 335 insertions(+), 87 deletions(-) create mode 100644 consensus/types/src/fork_versioned_response.rs diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 752fc8f6815..2337776c4f6 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -13,7 +13,7 @@ pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; use engines::{Engine, EngineError}; pub use engines::{EngineState, ForkchoiceState}; -use eth2::types::{builder_bid::SignedBuilderBid, ForkVersionedResponse}; +use eth2::types::builder_bid::SignedBuilderBid; use fork_choice::ForkchoiceUpdateParameters; use lru::LruCache; use payload_status::process_payload_status; @@ -38,11 +38,10 @@ use tokio::{ use tokio_stream::wrappers::WatchStream; use types::{AbstractExecPayload, BeaconStateError, Blob, ExecPayload, KzgCommitment}; use types::{ - BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName, - ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256, -}; -use types::{ - ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge, + BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ExecutionPayload, + ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge, ForkName, + ForkVersionedResponse, ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, + Slot, Uint256, }; mod block_hash; diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs index 87ba3a4663f..30f475e689c 100644 --- a/beacon_node/http_api/src/version.rs +++ b/beacon_node/http_api/src/version.rs @@ -1,9 +1,9 @@ -use crate::api_types::{ - EndpointVersion, ExecutionOptimisticForkVersionedResponse, ForkVersionedResponse, -}; +use crate::api_types::EndpointVersion; use eth2::CONSENSUS_VERSION_HEADER; use serde::Serialize; -use types::{ForkName, InconsistentFork}; +use types::{ + ExecutionOptimisticForkVersionedResponse, ForkName, ForkVersionedResponse, InconsistentFork, +}; use warp::reply::{self, Reply, WithHeader}; pub const V1: EndpointVersion = EndpointVersion(1); diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 9c4248d4d98..4a3114e4327 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -14,9 +14,8 @@ pub mod lighthouse_vc; pub mod mixin; pub mod types; -use self::mixin::{RequestAccept, ResponseForkName, ResponseOptional}; +use self::mixin::{RequestAccept, ResponseOptional}; use self::types::{Error as ResponseError, *}; -use ::types::map_fork_name_with; use futures::Stream; use futures_util::StreamExt; use lighthouse_network::PeerId; @@ -683,35 +682,7 @@ impl BeaconNodeHttpClient { None => return Ok(None), }; - // If present, use the fork provided in the headers to decode the block. Gracefully handle - // missing and malformed fork names by falling back to regular deserialisation. - let (block, version, execution_optimistic) = match response.fork_name_from_header() { - Ok(Some(fork_name)) => { - let (data, (version, execution_optimistic)) = - map_fork_name_with!(fork_name, SignedBeaconBlock, { - let ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data, - } = response.json().await?; - (data, (version, execution_optimistic)) - }); - (data, version, execution_optimistic) - } - Ok(None) | Err(_) => { - let ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data, - } = response.json().await?; - (data, version, execution_optimistic) - } - }; - Ok(Some(ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data: block, - })) + Ok(Some(response.json().await?)) } /// `GET v1/beacon/blinded_blocks/{block_id}` @@ -728,35 +699,7 @@ impl BeaconNodeHttpClient { None => return Ok(None), }; - // If present, use the fork provided in the headers to decode the block. Gracefully handle - // missing and malformed fork names by falling back to regular deserialisation. - let (block, version, execution_optimistic) = match response.fork_name_from_header() { - Ok(Some(fork_name)) => { - let (data, (version, execution_optimistic)) = - map_fork_name_with!(fork_name, SignedBlindedBeaconBlock, { - let ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data, - } = response.json().await?; - (data, (version, execution_optimistic)) - }); - (data, version, execution_optimistic) - } - Ok(None) | Err(_) => { - let ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data, - } = response.json().await?; - (data, version, execution_optimistic) - } - }; - Ok(Some(ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data: block, - })) + Ok(Some(response.json().await?)) } /// `GET v1/beacon/blocks` (LEGACY) diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 4b7ae5539a3..36a690911b5 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -236,21 +236,6 @@ impl<'a, T: Serialize> From<&'a T> for GenericResponseRef<'a, T> { } } -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct ExecutionOptimisticForkVersionedResponse { - #[serde(skip_serializing_if = "Option::is_none")] - pub version: Option, - pub execution_optimistic: Option, - pub data: T, -} - -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct ForkVersionedResponse { - #[serde(skip_serializing_if = "Option::is_none")] - pub version: Option, - pub data: T, -} - #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] pub struct RootData { pub root: Hash256, @@ -1128,6 +1113,30 @@ pub struct BlocksAndBlobs> { pub kzg_aggregate_proof: KzgProof, } +impl> ForkVersionDeserialize + for BlocksAndBlobs +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + #[derive(Deserialize)] + #[serde(bound = "T: EthSpec")] + struct Helper { + block: serde_json::Value, + blobs: Vec>, + kzg_aggregate_proof: KzgProof, + } + let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; + + Ok(Self { + block: BeaconBlock::deserialize_by_fork::<'de, D>(helper.block, fork_name)?, + blobs: helper.blobs, + kzg_aggregate_proof: helper.kzg_aggregate_proof, + }) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index f7b9790b4dc..f960b21178f 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -685,6 +685,24 @@ impl From>> } } +impl> ForkVersionDeserialize + for BeaconBlock +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + Ok(map_fork_name!( + fork_name, + Self, + serde_json::from_value(value).map_err(|e| serde::de::Error::custom(format!( + "BeaconBlock failed to deserialize: {:?}", + e + )))? + )) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index b44c14ded5a..0b07ce49580 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1853,3 +1853,19 @@ impl CompareFields for BeaconState { } } } + +impl ForkVersionDeserialize for BeaconState { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + Ok(map_fork_name!( + fork_name, + Self, + serde_json::from_value(value).map_err(|e| serde::de::Error::custom(format!( + "BeaconState failed to deserialize: {:?}", + e + )))? + )) + } +} diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 818ec52b813..e922e81c706 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -1,6 +1,6 @@ use crate::{ - AbstractExecPayload, ChainSpec, EthSpec, ExecPayload, ExecutionPayloadHeader, SignedRoot, - Uint256, + AbstractExecPayload, ChainSpec, EthSpec, ExecPayload, ExecutionPayloadHeader, ForkName, + ForkVersionDeserialize, SignedRoot, Uint256, }; use bls::PublicKeyBytes; use bls::Signature; @@ -34,6 +34,60 @@ pub struct SignedBuilderBid> { pub signature: Signature, } +impl> ForkVersionDeserialize + for BuilderBid +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + let convert_err = |_| { + serde::de::Error::custom( + "BuilderBid failed to deserialize: unable to convert payload header to payload", + ) + }; + + #[derive(Deserialize)] + struct Helper { + header: serde_json::Value, + #[serde(with = "eth2_serde_utils::quoted_u256")] + value: Uint256, + pubkey: PublicKeyBytes, + } + let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; + let payload_header = + ExecutionPayloadHeader::deserialize_by_fork::<'de, D>(helper.header, fork_name)?; + + Ok(Self { + header: Payload::try_from(payload_header).map_err(convert_err)?, + value: helper.value, + pubkey: helper.pubkey, + _phantom_data: Default::default(), + }) + } +} + +impl> ForkVersionDeserialize + for SignedBuilderBid +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + #[derive(Deserialize)] + struct Helper { + pub message: serde_json::Value, + pub signature: Signature, + } + let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; + + Ok(Self { + message: BuilderBid::deserialize_by_fork::<'de, D>(helper.message, fork_name)?, + signature: helper.signature, + }) + } +} + struct BlindedPayloadAsHeader(PhantomData); impl> SerializeAs for BlindedPayloadAsHeader { diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 1721960f8b4..16b27783555 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -146,3 +146,26 @@ impl ExecutionPayload { + (T::max_withdrawals_per_payload() * ::ssz_fixed_len()) } } + +impl ForkVersionDeserialize for ExecutionPayload { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + let convert_err = |e| { + serde::de::Error::custom(format!("ExecutionPayload failed to deserialize: {:?}", e)) + }; + + Ok(match fork_name { + ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Eip4844 => Self::Eip4844(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Base | ForkName::Altair => { + return Err(serde::de::Error::custom(format!( + "ExecutionPayload failed to deserialize: unsupported fork '{}'", + fork_name + ))); + } + }) + } +} diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 42e44ed739c..695c0cfdf4f 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -282,3 +282,29 @@ impl TryFrom> for ExecutionPayloadHeaderEi } } } + +impl ForkVersionDeserialize for ExecutionPayloadHeader { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + let convert_err = |e| { + serde::de::Error::custom(format!( + "ExecutionPayloadHeader failed to deserialize: {:?}", + e + )) + }; + + Ok(match fork_name { + ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Eip4844 => Self::Eip4844(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Base | ForkName::Altair => { + return Err(serde::de::Error::custom(format!( + "ExecutionPayloadHeader failed to deserialize: unsupported fork '{}'", + fork_name + ))); + } + }) + } +} diff --git a/consensus/types/src/fork_versioned_response.rs b/consensus/types/src/fork_versioned_response.rs new file mode 100644 index 00000000000..07ff40b27ef --- /dev/null +++ b/consensus/types/src/fork_versioned_response.rs @@ -0,0 +1,138 @@ +use crate::ForkName; +use serde::de::DeserializeOwned; +use serde::{Deserialize, Deserializer, Serialize}; +use serde_json::value::Value; +use std::sync::Arc; + +// Deserialize is only implemented for types that implement ForkVersionDeserialize +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct ExecutionOptimisticForkVersionedResponse { + #[serde(skip_serializing_if = "Option::is_none")] + pub version: Option, + pub execution_optimistic: Option, + pub data: T, +} + +impl<'de, F> serde::Deserialize<'de> for ExecutionOptimisticForkVersionedResponse +where + F: ForkVersionDeserialize, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + struct Helper { + version: Option, + execution_optimistic: Option, + data: serde_json::Value, + } + + let helper = Helper::deserialize(deserializer)?; + let data = match helper.version { + Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?, + None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?, + }; + + Ok(ExecutionOptimisticForkVersionedResponse { + version: helper.version, + execution_optimistic: helper.execution_optimistic, + data, + }) + } +} + +pub trait ForkVersionDeserialize: Sized + DeserializeOwned { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( + value: Value, + fork_name: ForkName, + ) -> Result; +} + +// Deserialize is only implemented for types that implement ForkVersionDeserialize +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct ForkVersionedResponse { + #[serde(skip_serializing_if = "Option::is_none")] + pub version: Option, + pub data: T, +} + +impl<'de, F> serde::Deserialize<'de> for ForkVersionedResponse +where + F: ForkVersionDeserialize, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + struct Helper { + version: Option, + data: serde_json::Value, + } + + let helper = Helper::deserialize(deserializer)?; + let data = match helper.version { + Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?, + None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?, + }; + + Ok(ForkVersionedResponse { + version: helper.version, + data, + }) + } +} + +impl ForkVersionDeserialize for Arc { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( + value: Value, + fork_name: ForkName, + ) -> Result { + Ok(Arc::new(F::deserialize_by_fork::<'de, D>( + value, fork_name, + )?)) + } +} + +#[cfg(test)] +mod fork_version_response_tests { + use crate::{ + ExecutionPayload, ExecutionPayloadMerge, ForkName, ForkVersionedResponse, MainnetEthSpec, + }; + use serde_json::json; + + #[test] + fn fork_versioned_response_deserialize_correct_fork() { + type E = MainnetEthSpec; + + let response_json = + serde_json::to_string(&json!(ForkVersionedResponse::> { + version: Some(ForkName::Merge), + data: ExecutionPayload::Merge(ExecutionPayloadMerge::default()), + })) + .unwrap(); + + let result: Result>, _> = + serde_json::from_str(&response_json); + + assert!(result.is_ok()); + } + + #[test] + fn fork_versioned_response_deserialize_incorrect_fork() { + type E = MainnetEthSpec; + + let response_json = + serde_json::to_string(&json!(ForkVersionedResponse::> { + version: Some(ForkName::Capella), + data: ExecutionPayload::Merge(ExecutionPayloadMerge::default()), + })) + .unwrap(); + + let result: Result>, _> = + serde_json::from_str(&response_json); + + assert!(result.is_err()); + } +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 7bb0045e24d..8d9156ff5d3 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -46,6 +46,7 @@ pub mod execution_payload_header; pub mod fork; pub mod fork_data; pub mod fork_name; +pub mod fork_versioned_response; pub mod free_attestation; pub mod graffiti; pub mod historical_batch; @@ -150,6 +151,9 @@ pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; pub use crate::fork_name::{ForkName, InconsistentFork}; +pub use crate::fork_versioned_response::{ + ExecutionOptimisticForkVersionedResponse, ForkVersionDeserialize, ForkVersionedResponse, +}; pub use crate::free_attestation::FreeAttestation; pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index cd6cd5cb9ec..70fb28fbe7a 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -487,6 +487,24 @@ impl SignedBeaconBlock { } } +impl> ForkVersionDeserialize + for SignedBeaconBlock +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + Ok(map_fork_name!( + fork_name, + Self, + serde_json::from_value(value).map_err(|e| serde::de::Error::custom(format!( + "SignedBeaconBlock failed to deserialize: {:?}", + e + )))? + )) + } +} + #[cfg(test)] mod test { use super::*; From e743d75c9b408b3fad9fd81cce342b5c70209e83 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Fri, 10 Feb 2023 13:30:14 -0600 Subject: [PATCH 159/263] Update Mock Builder for Post-Capella Tests (#3958) * Update Mock Builder for Post-Capella Tests * Add _mut Suffix to BidStuff Functions * Fix Setting Gas Limit --- Cargo.lock | 19 +- Cargo.toml | 7 + beacon_node/execution_layer/Cargo.toml | 6 +- beacon_node/execution_layer/src/lib.rs | 28 +- .../src/test_utils/mock_builder.rs | 246 +++++++++++++----- beacon_node/http_api/tests/tests.rs | 113 ++++++++ 6 files changed, 344 insertions(+), 75 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5aa7a392313..014e03d25fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -538,7 +538,7 @@ checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" [[package]] name = "beacon-api-client" version = "0.1.0" -source = "git+https://github.com/ralexstokes/beacon-api-client?rev=7d5d8dad1648f771573f42585ad8080a45b05689#7d5d8dad1648f771573f42585ad8080a45b05689" +source = "git+https://github.com/ralexstokes/beacon-api-client#53690a711e33614d59d4d44fb09762b4699e2a4e" dependencies = [ "ethereum-consensus", "http", @@ -2342,7 +2342,7 @@ dependencies = [ [[package]] name = "ethereum-consensus" version = "0.1.1" -source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=a8110af76d97bf2bf27fb987a671808fcbdf1834#a8110af76d97bf2bf27fb987a671808fcbdf1834" +source = "git+https://github.com/ralexstokes//ethereum-consensus?rev=9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d#9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" dependencies = [ "async-stream", "blst", @@ -2351,6 +2351,7 @@ dependencies = [ "hex", "integer-sqrt", "multiaddr 0.14.0", + "multihash", "rand 0.8.5", "serde", "serde_json", @@ -2508,7 +2509,7 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "lru 0.7.8", - "mev-build-rs", + "mev-rs", "parking_lot 0.12.1", "rand 0.8.5", "reqwest", @@ -4678,18 +4679,19 @@ dependencies = [ ] [[package]] -name = "mev-build-rs" +name = "mev-rs" version = "0.2.1" -source = "git+https://github.com/ralexstokes/mev-rs?rev=6c99b0fbdc0427b1625469d2e575303ce08de5b8#6c99b0fbdc0427b1625469d2e575303ce08de5b8" +source = "git+https://github.com/ralexstokes//mev-rs?rev=7813d4a4a564e0754e9aaab2d95520ba437c3889#7813d4a4a564e0754e9aaab2d95520ba437c3889" dependencies = [ "async-trait", "axum", "beacon-api-client", "ethereum-consensus", + "hyper", "serde", - "serde_json", "ssz-rs", "thiserror", + "tokio", "tracing", ] @@ -7303,11 +7305,10 @@ dependencies = [ [[package]] name = "ssz-rs" version = "0.8.0" -source = "git+https://github.com/ralexstokes/ssz-rs?rev=cb08f1#cb08f18ca919cc1b685b861d0fa9e2daabe89737" +source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1" dependencies = [ "bitvec 1.0.1", "hex", - "lazy_static", "num-bigint", "serde", "sha2 0.9.9", @@ -7318,7 +7319,7 @@ dependencies = [ [[package]] name = "ssz-rs-derive" version = "0.8.0" -source = "git+https://github.com/ralexstokes/ssz-rs?rev=cb08f1#cb08f18ca919cc1b685b861d0fa9e2daabe89737" +source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 88badcc5f1b..c80a734ae08 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -103,6 +103,13 @@ tree_hash_derive = { path = "consensus/tree_hash_derive" } eth2_serde_utils = { path = "consensus/serde_utils" } arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="a572fd8743012a4f1ada5ee5968b1b3619c427ba" } +[patch."https://github.com/ralexstokes/mev-rs"] +mev-rs = { git = "https://github.com/ralexstokes//mev-rs", rev = "7813d4a4a564e0754e9aaab2d95520ba437c3889" } +[patch."https://github.com/ralexstokes/ethereum-consensus"] +ethereum-consensus = { git = "https://github.com/ralexstokes//ethereum-consensus", rev = "9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" } +[patch."https://github.com/ralexstokes/ssz-rs"] +ssz-rs = { git = "https://github.com/ralexstokes//ssz-rs", rev = "adf1a0b14cef90b9536f28ef89da1fab316465e1" } + [profile.maxperf] inherits = "release" lto = "fat" diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 5b016b46490..1b687a8b60e 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -41,9 +41,9 @@ lazy_static = "1.4.0" ethers-core = "1.0.2" builder_client = { path = "../builder_client" } fork_choice = { path = "../../consensus/fork_choice" } -mev-build-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "6c99b0fbdc0427b1625469d2e575303ce08de5b8" } -ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "a8110af76d97bf2bf27fb987a671808fcbdf1834" } -ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f1" } +mev-rs = { git = "https://github.com/ralexstokes/mev-rs" } +ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus" } +ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs" } tokio-stream = { version = "0.1.9", features = [ "sync" ] } strum = "0.24.0" keccak-hash = "0.10.0" diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 2337776c4f6..3b354d62eb0 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -36,7 +36,8 @@ use tokio::{ time::sleep, }; use tokio_stream::wrappers::WatchStream; -use types::{AbstractExecPayload, BeaconStateError, Blob, ExecPayload, KzgCommitment}; +use tree_hash::TreeHash; +use types::{AbstractExecPayload, BeaconStateError, Blob, ExecPayload, KzgCommitment, Withdrawals}; use types::{ BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge, ForkName, @@ -1874,10 +1875,9 @@ enum InvalidBuilderPayload { signature: Signature, pubkey: PublicKeyBytes, }, - #[allow(dead_code)] WithdrawalsRoot { - payload: Hash256, - expected: Hash256, + payload: Option, + expected: Option, }, } @@ -1930,10 +1930,16 @@ impl fmt::Display for InvalidBuilderPayload { signature, pubkey ), InvalidBuilderPayload::WithdrawalsRoot { payload, expected } => { + let opt_string = |opt_hash: &Option| { + opt_hash + .map(|hash| hash.to_string()) + .unwrap_or_else(|| "None".to_string()) + }; write!( f, "payload withdrawals root was {} not {}", - payload, expected + opt_string(payload), + opt_string(expected) ) } } @@ -1964,6 +1970,13 @@ fn verify_builder_bid>( ); } + let expected_withdrawals_root = payload_attributes + .withdrawals() + .ok() + .cloned() + .map(|withdrawals| Withdrawals::::from(withdrawals).tree_hash_root()); + let payload_withdrawals_root = header.withdrawals_root().ok(); + if payload_value < profit_threshold { Err(Box::new(InvalidBuilderPayload::LowValue { profit_threshold, @@ -1999,6 +2012,11 @@ fn verify_builder_bid>( signature: bid.data.signature.clone(), pubkey: bid.data.message.pubkey, })) + } else if payload_withdrawals_root != expected_withdrawals_root { + Err(Box::new(InvalidBuilderPayload::WithdrawalsRoot { + payload: payload_withdrawals_root, + expected: expected_withdrawals_root, + })) } else { Ok(()) } diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 40a0c41afab..19972650139 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -3,15 +3,19 @@ use crate::{Config, ExecutionLayer, PayloadAttributes}; use async_trait::async_trait; use eth2::types::{BlockId, StateId, ValidatorId}; use eth2::{BeaconNodeHttpClient, Timeouts}; -use ethereum_consensus::crypto::{SecretKey, Signature}; -use ethereum_consensus::primitives::BlsPublicKey; pub use ethereum_consensus::state_transition::Context; +use ethereum_consensus::{ + crypto::{SecretKey, Signature}, + primitives::{BlsPublicKey, BlsSignature, ExecutionAddress, Hash32, Root, U256}, + state_transition::Error, +}; use fork_choice::ForkchoiceUpdateParameters; -use mev_build_rs::{ +use mev_rs::{ + bellatrix::{BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix}, + capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella}, sign_builder_message, verify_signed_builder_message, BidRequest, BlindedBlockProviderError, BlindedBlockProviderServer, BuilderBid, ExecutionPayload as ServerPayload, - ExecutionPayloadHeader as ServerPayloadHeader, SignedBlindedBeaconBlock, SignedBuilderBid, - SignedValidatorRegistration, + SignedBlindedBeaconBlock, SignedBuilderBid, SignedValidatorRegistration, }; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; @@ -39,25 +43,129 @@ pub enum Operation { PrevRandao(Hash256), BlockNumber(usize), Timestamp(usize), + WithdrawalsRoot(Hash256), } impl Operation { - fn apply(self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> { + fn apply(self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { match self { Operation::FeeRecipient(fee_recipient) => { - bid.header.fee_recipient = to_ssz_rs(&fee_recipient)? + *bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)? } - Operation::GasLimit(gas_limit) => bid.header.gas_limit = gas_limit as u64, - Operation::Value(value) => bid.value = to_ssz_rs(&value)?, - Operation::ParentHash(parent_hash) => bid.header.parent_hash = to_ssz_rs(&parent_hash)?, - Operation::PrevRandao(prev_randao) => bid.header.prev_randao = to_ssz_rs(&prev_randao)?, - Operation::BlockNumber(block_number) => bid.header.block_number = block_number as u64, - Operation::Timestamp(timestamp) => bid.header.timestamp = timestamp as u64, + Operation::GasLimit(gas_limit) => *bid.gas_limit_mut() = gas_limit as u64, + Operation::Value(value) => *bid.value_mut() = to_ssz_rs(&value)?, + Operation::ParentHash(parent_hash) => *bid.parent_hash_mut() = to_ssz_rs(&parent_hash)?, + Operation::PrevRandao(prev_randao) => *bid.prev_randao_mut() = to_ssz_rs(&prev_randao)?, + Operation::BlockNumber(block_number) => *bid.block_number_mut() = block_number as u64, + Operation::Timestamp(timestamp) => *bid.timestamp_mut() = timestamp as u64, + Operation::WithdrawalsRoot(root) => *bid.withdrawals_root_mut()? = to_ssz_rs(&root)?, } Ok(()) } } +// contains functions we need for BuilderBids.. not sure what to call this +pub trait BidStuff { + fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress; + fn gas_limit_mut(&mut self) -> &mut u64; + fn value_mut(&mut self) -> &mut U256; + fn parent_hash_mut(&mut self) -> &mut Hash32; + fn prev_randao_mut(&mut self) -> &mut Hash32; + fn block_number_mut(&mut self) -> &mut u64; + fn timestamp_mut(&mut self) -> &mut u64; + fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError>; + + fn sign_builder_message( + &mut self, + signing_key: &SecretKey, + context: &Context, + ) -> Result; + + fn to_signed_bid(self, signature: BlsSignature) -> SignedBuilderBid; +} + +impl BidStuff for BuilderBid { + fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress { + match self { + Self::Bellatrix(bid) => &mut bid.header.fee_recipient, + Self::Capella(bid) => &mut bid.header.fee_recipient, + } + } + + fn gas_limit_mut(&mut self) -> &mut u64 { + match self { + Self::Bellatrix(bid) => &mut bid.header.gas_limit, + Self::Capella(bid) => &mut bid.header.gas_limit, + } + } + + fn value_mut(&mut self) -> &mut U256 { + match self { + Self::Bellatrix(bid) => &mut bid.value, + Self::Capella(bid) => &mut bid.value, + } + } + + fn parent_hash_mut(&mut self) -> &mut Hash32 { + match self { + Self::Bellatrix(bid) => &mut bid.header.parent_hash, + Self::Capella(bid) => &mut bid.header.parent_hash, + } + } + + fn prev_randao_mut(&mut self) -> &mut Hash32 { + match self { + Self::Bellatrix(bid) => &mut bid.header.prev_randao, + Self::Capella(bid) => &mut bid.header.prev_randao, + } + } + + fn block_number_mut(&mut self) -> &mut u64 { + match self { + Self::Bellatrix(bid) => &mut bid.header.block_number, + Self::Capella(bid) => &mut bid.header.block_number, + } + } + + fn timestamp_mut(&mut self) -> &mut u64 { + match self { + Self::Bellatrix(bid) => &mut bid.header.timestamp, + Self::Capella(bid) => &mut bid.header.timestamp, + } + } + + fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError> { + match self { + Self::Bellatrix(_) => Err(BlindedBlockProviderError::Custom( + "withdrawals_root called on bellatrix bid".to_string(), + )), + Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root), + } + } + + fn sign_builder_message( + &mut self, + signing_key: &SecretKey, + context: &Context, + ) -> Result { + match self { + Self::Bellatrix(message) => sign_builder_message(message, signing_key, context), + Self::Capella(message) => sign_builder_message(message, signing_key, context), + } + } + + fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid { + match self { + Self::Bellatrix(message) => { + SignedBuilderBid::Bellatrix(SignedBuilderBidBellatrix { message, signature }) + } + Self::Capella(message) => { + SignedBuilderBid::Capella(SignedBuilderBidCapella { message, signature }) + } + } + } +} + pub struct TestingBuilder { server: BlindedBlockProviderServer>, pub builder: MockBuilder, @@ -112,7 +220,10 @@ impl TestingBuilder { } pub async fn run(&self) { - self.server.run().await + let server = self.server.serve(); + if let Err(err) = server.await { + println!("error while listening for incoming: {err}") + } } } @@ -163,7 +274,7 @@ impl MockBuilder { *self.invalidate_signatures.write() = false; } - fn apply_operations(&self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> { + fn apply_operations(&self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { let mut guard = self.operations.write(); while let Some(op) = guard.pop() { op.apply(bid)?; @@ -173,7 +284,7 @@ impl MockBuilder { } #[async_trait] -impl mev_build_rs::BlindedBlockProvider for MockBuilder { +impl mev_rs::BlindedBlockProvider for MockBuilder { async fn register_validators( &self, registrations: &mut [SignedValidatorRegistration], @@ -201,6 +312,7 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { bid_request: &BidRequest, ) -> Result { let slot = Slot::new(bid_request.slot); + let fork = self.spec.fork_name_at_slot::(slot); let signed_cached_data = self .val_registration_cache .read() @@ -216,9 +328,13 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .map_err(convert_err)? .ok_or_else(|| convert_err("missing head block"))?; - let block = head.data.message_merge().map_err(convert_err)?; + let block = head.data.message(); let head_block_root = block.tree_hash_root(); - let head_execution_hash = block.body.execution_payload.execution_payload.block_hash; + let head_execution_hash = block + .body() + .execution_payload() + .map_err(convert_err)? + .block_hash(); if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { return Err(BlindedBlockProviderError::Custom(format!( "head mismatch: {} {}", @@ -233,12 +349,11 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .map_err(convert_err)? .ok_or_else(|| convert_err("missing finalized block"))? .data - .message_merge() + .message() + .body() + .execution_payload() .map_err(convert_err)? - .body - .execution_payload - .execution_payload - .block_hash; + .block_hash(); let justified_execution_hash = self .beacon_client @@ -247,12 +362,11 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .map_err(convert_err)? .ok_or_else(|| convert_err("missing finalized block"))? .data - .message_merge() + .message() + .body() + .execution_payload() .map_err(convert_err)? - .body - .execution_payload - .execution_payload - .block_hash; + .block_hash(); let val_index = self .beacon_client @@ -288,12 +402,22 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .get_randao_mix(head_state.current_epoch()) .map_err(convert_err)?; - // FIXME: think about proper fork here - let payload_attributes = - PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None); + let payload_attributes = match fork { + ForkName::Merge => PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None), + // the withdrawals root is filled in by operations + ForkName::Capella | ForkName::Eip4844 => { + PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![])) + } + ForkName::Base | ForkName::Altair => { + return Err(BlindedBlockProviderError::Custom(format!( + "Unsupported fork: {}", + fork + ))); + } + }; self.el - .insert_proposer(slot, head_block_root, val_index, payload_attributes) + .insert_proposer(slot, head_block_root, val_index, payload_attributes.clone()) .await; let forkchoice_update_params = ForkchoiceUpdateParameters { @@ -303,17 +427,13 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { finalized_hash: Some(finalized_execution_hash), }; - let payload_attributes = - PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None); - let payload = self .el .get_full_payload_caching::>( head_execution_hash, &payload_attributes, forkchoice_update_params, - // TODO: do we need to write a test for this if this is Capella fork? - ForkName::Merge, + fork, ) .await .map_err(convert_err)? @@ -321,44 +441,54 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .to_execution_payload_header(); let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; - let mut header: ServerPayloadHeader = - serde_json::from_str(json_payload.as_str()).map_err(convert_err)?; - - header.gas_limit = cached_data.gas_limit; - - let mut message = BuilderBid { - header, - value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, - public_key: self.builder_sk.public_key(), + let mut message = match fork { + ForkName::Capella => BuilderBid::Capella(BuilderBidCapella { + header: serde_json::from_str(json_payload.as_str()).map_err(convert_err)?, + value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, + public_key: self.builder_sk.public_key(), + }), + ForkName::Merge => BuilderBid::Bellatrix(BuilderBidBellatrix { + header: serde_json::from_str(json_payload.as_str()).map_err(convert_err)?, + value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, + public_key: self.builder_sk.public_key(), + }), + ForkName::Base | ForkName::Altair | ForkName::Eip4844 => { + return Err(BlindedBlockProviderError::Custom(format!( + "Unsupported fork: {}", + fork + ))) + } }; + *message.gas_limit_mut() = cached_data.gas_limit; self.apply_operations(&mut message)?; - let mut signature = - sign_builder_message(&mut message, &self.builder_sk, self.context.as_ref())?; + message.sign_builder_message(&self.builder_sk, self.context.as_ref())?; if *self.invalidate_signatures.read() { signature = Signature::default(); } - let signed_bid = SignedBuilderBid { message, signature }; - Ok(signed_bid) + Ok(message.to_signed_bid(signature)) } async fn open_bid( &self, signed_block: &mut SignedBlindedBeaconBlock, ) -> Result { + let node = match signed_block { + SignedBlindedBeaconBlock::Bellatrix(block) => { + block.message.body.execution_payload_header.hash_tree_root() + } + SignedBlindedBeaconBlock::Capella(block) => { + block.message.body.execution_payload_header.hash_tree_root() + } + } + .map_err(convert_err)?; + let payload = self .el - .get_payload_by_root(&from_ssz_rs( - &signed_block - .message - .body - .execution_payload_header - .hash_tree_root() - .map_err(convert_err)?, - )?) + .get_payload_by_root(&from_ssz_rs(&node)?) .ok_or_else(|| convert_err("missing payload for tx root"))?; let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 43099c7a916..6424d73eb5d 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -24,6 +24,7 @@ use network::NetworkReceivers; use proto_array::ExecutionStatus; use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; +use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::per_slot_processing; use std::convert::TryInto; use std::sync::Arc; @@ -3428,6 +3429,98 @@ impl ApiTester { self } + pub async fn test_builder_works_post_capella(self) -> Self { + // Ensure builder payload is chosen + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, + ))); + + let slot = self.chain.slot().unwrap(); + let propose_state = self + .harness + .chain + .state_at_slot(slot, StateSkipConfig::WithoutStateRoots) + .unwrap(); + let withdrawals = get_expected_withdrawals(&propose_state, &self.chain.spec).unwrap(); + let withdrawals_root = withdrawals.tree_hash_root(); + // Set withdrawals root for builder + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::WithdrawalsRoot(withdrawals_root)); + + let epoch = self.chain.epoch().unwrap(); + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The builder's payload should've been chosen, so this cache should not be populated + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + self + } + + pub async fn test_lighthouse_rejects_invalid_withdrawals_root(self) -> Self { + // Ensure builder payload *would be* chosen + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, + ))); + // Set withdrawals root to something invalid + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::WithdrawalsRoot(Hash256::repeat_byte(0x42))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The local payload should've been chosen because the builder's was invalid + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + #[cfg(target_os = "linux")] pub async fn test_get_lighthouse_health(self) -> Self { self.client.get_lighthouse_health().await.unwrap(); @@ -4424,6 +4517,26 @@ async fn builder_payload_chosen_by_profit() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_works_post_capella() { + let mut config = ApiTesterConfig { + builder_threshold: Some(0), + spec: E::default_spec(), + }; + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + + ApiTester::new_from_config(config) + .await + .test_post_validator_register_validator() + .await + .test_builder_works_post_capella() + .await + .test_lighthouse_rejects_invalid_withdrawals_root() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn lighthouse_endpoints() { ApiTester::new() From 3b4c677727fcfc12735fb9b49f10dc34bae9cc03 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sun, 12 Feb 2023 23:14:07 +0000 Subject: [PATCH 160/263] Use release profile for Windows binaries (#3965) ## Proposed Changes Disable `maxperf` profile on Windows due to #3964. This is required for the v3.5.0 release CI to succeed without crashing. --- .github/workflows/release.yml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8ca6ab0f923..2e63b4d6c24 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -134,11 +134,17 @@ jobs: - name: Build Lighthouse for Windows portable if: matrix.arch == 'x86_64-windows-portable' - run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }} + # NOTE: profile set to release until this rustc issue is fixed: + # + # https://github.com/rust-lang/rust/issues/107781 + # + # tracked at: https://github.com/sigp/lighthouse/issues/3964 + run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile release - name: Build Lighthouse for Windows modern if: matrix.arch == 'x86_64-windows' - run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile ${{ matrix.profile }} + # NOTE: profile set to release (see above) + run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile release - name: Configure GPG and create artifacts if: startsWith(matrix.arch, 'x86_64-windows') != true From 84843d67d790e91721a66a1afb2466a885ec46a7 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 12 Feb 2023 23:14:08 +0000 Subject: [PATCH 161/263] Reduce some EE and builder related ERRO logs to WARN (#3966) ## Issue Addressed NA ## Proposed Changes Our `ERRO` stream has been rather noisy since the merge due to some unexpected behaviours of builders and EEs. Now that we've been running post-merge for a while, I think we can drop some of these `ERRO` to `WARN` so we're not "crying wolf". The modified logs are: #### `ERRO Execution engine call failed` I'm seeing this quite frequently on Geth nodes. They seem to timeout when they're busy and it rarely indicates a serious issue. We also have logging across block import, fork choice updating and payload production that raise `ERRO` or `CRIT` when the EE times out, so I think we're not at risk of silencing actual issues. #### `ERRO "Builder failed to reveal payload"` In #3775 we reduced this log from `CRIT` to `ERRO` since it's common for builders to fail to reveal the block to the producer directly whilst still broadcasting it to the networ. I think it's worth dropping this to `WARN` since it's rarely interesting. I elected to stay with `WARN` since I really do wish builders would fulfill their API promises by returning the block to us. Perhaps I'm just being pedantic here, I could be convinced otherwise. #### `ERRO "Relay error when registering validator(s)"` It seems like builders and/or mev-boost struggle to handle heavy loads of validator registrations. I haven't observed issues with validators not actually being registered, but I see timeouts on these endpoints many times a day. It doesn't seem like this `ERRO` is worth it. #### `ERRO Error fetching block for peer ExecutionLayerErrorPayloadReconstruction` This means we failed to respond to a peer on the P2P network with a block they requested because of an error in the `execution_layer`. It's very common to see timeouts or incomplete responses on this endpoint whilst the EE is busy and I don't think it's important enough for an `ERRO`. As long as the peer count stays high, I don't think the user needs to be actively concerned about how we're responding to peers. ## Additional Info NA --- Cargo.lock | 1 + beacon_node/execution_layer/src/engines.rs | 4 +-- beacon_node/execution_layer/src/lib.rs | 4 +-- beacon_node/http_api/src/lib.rs | 2 +- beacon_node/network/Cargo.toml | 1 + .../beacon_processor/worker/rpc_methods.rs | 28 ++++++++++++++----- 6 files changed, 28 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 69651fb7ee8..6159778b7a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4988,6 +4988,7 @@ dependencies = [ "eth2_ssz", "eth2_ssz_types", "ethereum-types 0.14.1", + "execution_layer", "exit-future", "fnv", "futures", diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 339006c1ba6..eaaa271c512 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -5,7 +5,7 @@ use crate::engine_api::{ }; use crate::HttpJsonRpc; use lru::LruCache; -use slog::{debug, error, info, Logger}; +use slog::{debug, error, info, warn, Logger}; use std::future::Future; use std::sync::Arc; use task_executor::TaskExecutor; @@ -325,7 +325,7 @@ impl Engine { Ok(result) } Err(error) => { - error!( + warn!( self.log, "Execution engine call failed"; "error" => ?error, diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index a4d15abb364..5b0fecbf205 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1575,10 +1575,10 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME, &[metrics::FAILURE], ); - error!( + warn!( self.log(), "Builder failed to reveal payload"; - "info" => "this relay failure may cause a missed proposal", + "info" => "this is common behaviour for some builders and may not indicate an issue", "error" => ?e, "relay_response_ms" => duration.as_millis(), "block_root" => ?block_root, diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 973be2d49b4..60e5d2adf43 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2855,7 +2855,7 @@ pub fn serve( .await .map(|resp| warp::reply::json(&resp)) .map_err(|e| { - error!( + warn!( log, "Relay error when registering validator(s)"; "num_registrations" => filtered_registration_data.len(), diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index b1d928eecb9..1b036b32c6a 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -45,6 +45,7 @@ tokio-util = { version = "0.6.3", features = ["time"] } derivative = "2.2.0" delay_map = "0.1.1" ethereum-types = { version = "0.14.1", optional = true } +execution_layer = { path = "../execution_layer" } [features] deterministic_long_lived_attnets = [ "ethereum-types" ] diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index bfa0ea516fa..afcc15280d3 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -7,7 +7,7 @@ use itertools::process_results; use lighthouse_network::rpc::StatusMessage; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; -use slog::{debug, error}; +use slog::{debug, error, warn}; use slot_clock::SlotClock; use std::sync::Arc; use task_executor::TaskExecutor; @@ -392,12 +392,26 @@ impl Worker { break; } Err(e) => { - error!( - self.log, - "Error fetching block for peer"; - "block_root" => ?root, - "error" => ?e - ); + if matches!( + e, + BeaconChainError::ExecutionLayerErrorPayloadReconstruction(_block_hash, ref boxed_error) + if matches!(**boxed_error, execution_layer::Error::EngineError(_)) + ) { + warn!( + self.log, + "Error rebuilding payload for peer"; + "info" => "this may occur occasionally when the EE is busy", + "block_root" => ?root, + "error" => ?e, + ); + } else { + error!( + self.log, + "Error fetching block for peer"; + "block_root" => ?root, + "error" => ?e + ); + } // send the stream terminator self.send_error_response( From 2f456ff9eb15d9767564c812382d701c6cf9f5d6 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 13 Feb 2023 03:32:01 +0000 Subject: [PATCH 162/263] Fix regression in DB write atomicity (#3931) ## Issue Addressed Fix a bug introduced by #3696. The bug is not expected to occur frequently, so releasing this PR is non-urgent. ## Proposed Changes * Add a variant to `StoreOp` that allows a raw KV operation to be passed around. * Return to using `self.store.do_atomically` rather than `self.store.hot_db.do_atomically`. This streamlines the write back into a single call and makes our auto-revert work again. * Prevent `import_block_update_shuffling_cache` from failing block import. This is an outstanding bug from before v3.4.0 which may have contributed to some random unexplained database corruption. ## Additional Info In #3696 I split the database write into two calls, one to convert the `StoreOp`s to `KeyValueStoreOp`s and one to write them. This had the unfortunate side-effect of damaging our atomicity guarantees in case of a write error. If the first call failed, we would be left with the block in fork choice but not on-disk (or the snapshot cache), which would prevent us from processing any descendant blocks. On `unstable` the first call is very unlikely to fail unless the disk is full, but on `tree-states` the conversion is more involved and a user reported database corruption after it failed in a way that should have been recoverable. Additionally, as @emhane observed, #3696 also inadvertently removed the import of the new block into the block cache. Although this seems like it could have negatively impacted performance, there are several mitigating factors: - For regular block processing we should almost always load the parent block (and state) from the snapshot cache. - We often load blinded blocks, which bypass the block cache anyway. - Metrics show no noticeable increase in the block cache miss rate with v3.4.0. However, I expect the block cache _will_ be useful again in `tree-states`, so it is restored to use by this PR. --- beacon_node/beacon_chain/src/beacon_chain.rs | 40 ++++++++++++++----- .../src/validator_pubkey_cache.rs | 17 +++++--- beacon_node/store/src/hot_cold_store.rs | 6 +++ beacon_node/store/src/lib.rs | 1 + 4 files changed, 47 insertions(+), 17 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 6a67ae71ed2..4ec13f8f53e 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2714,7 +2714,7 @@ impl BeaconChain { // is so we don't have to think about lock ordering with respect to the fork choice lock. // There are a bunch of places where we lock both fork choice and the pubkey cache and it // would be difficult to check that they all lock fork choice first. - let mut kv_store_ops = self + let mut ops = self .validator_pubkey_cache .try_write_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(Error::ValidatorPubkeyCacheLockTimeout)? @@ -2816,9 +2816,14 @@ impl BeaconChain { // ---------------------------- BLOCK PROBABLY ATTESTABLE ---------------------------------- // Most blocks are now capable of being attested to thanks to the `early_attester_cache` // cache above. Resume non-essential processing. + // + // It is important NOT to return errors here before the database commit, because the block + // has already been added to fork choice and the database would be left in an inconsistent + // state if we returned early without committing. In other words, an error here would + // corrupt the node's database permanently. // ----------------------------------------------------------------------------------------- - self.import_block_update_shuffling_cache(block_root, &mut state)?; + self.import_block_update_shuffling_cache(block_root, &mut state); self.import_block_observe_attestations( block, &state, @@ -2841,17 +2846,16 @@ impl BeaconChain { // If the write fails, revert fork choice to the version from disk, else we can // end up with blocks in fork choice that are missing from disk. // See https://github.com/sigp/lighthouse/issues/2028 - let mut ops: Vec<_> = confirmed_state_roots - .into_iter() - .map(StoreOp::DeleteStateTemporaryFlag) - .collect(); + ops.extend( + confirmed_state_roots + .into_iter() + .map(StoreOp::DeleteStateTemporaryFlag), + ); ops.push(StoreOp::PutBlock(block_root, signed_block.clone())); ops.push(StoreOp::PutState(block.state_root(), &state)); let txn_lock = self.store.hot_db.begin_rw_transaction(); - kv_store_ops.extend(self.store.convert_to_kv_batch(ops)?); - - if let Err(e) = self.store.hot_db.do_atomically(kv_store_ops) { + if let Err(e) = self.store.do_atomically(ops) { error!( self.log, "Database write failed!"; @@ -3280,13 +3284,27 @@ impl BeaconChain { } } + // For the current and next epoch of this state, ensure we have the shuffling from this + // block in our cache. fn import_block_update_shuffling_cache( &self, block_root: Hash256, state: &mut BeaconState, + ) { + if let Err(e) = self.import_block_update_shuffling_cache_fallible(block_root, state) { + warn!( + self.log, + "Failed to prime shuffling cache"; + "error" => ?e + ); + } + } + + fn import_block_update_shuffling_cache_fallible( + &self, + block_root: Hash256, + state: &mut BeaconState, ) -> Result<(), BlockError> { - // For the current and next epoch of this state, ensure we have the shuffling from this - // block in our cache. for relative_epoch in [RelativeEpoch::Current, RelativeEpoch::Next] { let shuffling_id = AttestationShufflingId::new(block_root, state, relative_epoch)?; diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 26aea2d2722..79910df2923 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use std::collections::HashMap; use std::convert::TryInto; use std::marker::PhantomData; -use store::{DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp, StoreItem}; +use store::{DBColumn, Error as StoreError, StoreItem, StoreOp}; use types::{BeaconState, Hash256, PublicKey, PublicKeyBytes}; /// Provides a mapping of `validator_index -> validator_publickey`. @@ -38,7 +38,7 @@ impl ValidatorPubkeyCache { }; let store_ops = cache.import_new_pubkeys(state)?; - store.hot_db.do_atomically(store_ops)?; + store.do_atomically(store_ops)?; Ok(cache) } @@ -79,7 +79,7 @@ impl ValidatorPubkeyCache { pub fn import_new_pubkeys( &mut self, state: &BeaconState, - ) -> Result, BeaconChainError> { + ) -> Result>, BeaconChainError> { if state.validators().len() > self.pubkeys.len() { self.import( state.validators()[self.pubkeys.len()..] @@ -92,7 +92,10 @@ impl ValidatorPubkeyCache { } /// Adds zero or more validators to `self`. - fn import(&mut self, validator_keys: I) -> Result, BeaconChainError> + fn import( + &mut self, + validator_keys: I, + ) -> Result>, BeaconChainError> where I: Iterator + ExactSizeIterator, { @@ -112,7 +115,9 @@ impl ValidatorPubkeyCache { // It will be committed atomically when the block that introduced it is written to disk. // Notably it is NOT written while the write lock on the cache is held. // See: https://github.com/sigp/lighthouse/issues/2327 - store_ops.push(DatabasePubkey(pubkey).as_kv_store_op(DatabasePubkey::key_for_index(i))); + store_ops.push(StoreOp::KeyValueOp( + DatabasePubkey(pubkey).as_kv_store_op(DatabasePubkey::key_for_index(i)), + )); self.pubkeys.push( (&pubkey) @@ -294,7 +299,7 @@ mod test { let ops = cache .import_new_pubkeys(&state) .expect("should import pubkeys"); - store.hot_db.do_atomically(ops).unwrap(); + store.do_atomically(ops).unwrap(); check_cache_get(&cache, &keypairs[..]); drop(cache); diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 4f63f4e7f97..6028d0ddcfa 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -727,6 +727,10 @@ impl, Cold: ItemStore> HotColdDB let key = get_key_for_col(DBColumn::ExecPayload.into(), block_root.as_bytes()); key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); } + + StoreOp::KeyValueOp(kv_op) => { + key_value_batch.push(kv_op); + } } } Ok(key_value_batch) @@ -758,6 +762,8 @@ impl, Cold: ItemStore> HotColdDB StoreOp::DeleteState(_, _) => (), StoreOp::DeleteExecutionPayload(_) => (), + + StoreOp::KeyValueOp(_) => (), } } diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 75aeca058b5..9d15dd40432 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -161,6 +161,7 @@ pub enum StoreOp<'a, E: EthSpec> { DeleteBlock(Hash256), DeleteState(Hash256, Option), DeleteExecutionPayload(Hash256), + KeyValueOp(KeyValueStoreOp), } /// A unique column identifier. From fa1d4c7054987413c29a713fdc8735916eb394fe Mon Sep 17 00:00:00 2001 From: Nazar Hussain Date: Mon, 13 Feb 2023 03:32:03 +0000 Subject: [PATCH 163/263] Invalid cross build feature flag (#3959) ## Issue Addressed The documentation referring to build from source mismatches with the what gitworkflow uses. https://github.com/sigp/lighthouse/blob/aa5b7ef7839e15d55c3a252230ecb11c4abc0a52/book/src/installation-source.md?plain=1#L118-L120 ## Proposed Changes Because the github workflow uses `cross` to build from source and for that build there is different env variable `CROSS_FEATURES` so need pass at the compile time. ## Additional Info Verified that existing `-dev` builds does not contains the `minimal` spec enabled. ```bash > docker run --rm --name node-5-cl-lighthouse sigp/lighthouse:latest-amd64-unstable-dev lighthouse --version Lighthouse v3.4.0-aa5b7ef BLS library: blst-portable SHA256 hardware acceleration: true Allocator: jemalloc Specs: mainnet (true), minimal (false), gnosis (true) ``` --- .github/workflows/docker.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 2940ba769eb..46896073add 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -52,8 +52,8 @@ jobs: x86_64, x86_64-portable] features: [ - {version_suffix: "", env: ""}, - {version_suffix: "-dev", env: "spec-minimal"} + {version_suffix: "", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc"}, + {version_suffix: "-dev", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc,spec-minimal"} ] include: - profile: maxperf @@ -66,6 +66,7 @@ jobs: VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} FEATURE_SUFFIX: ${{ matrix.features.version_suffix }} FEATURES: ${{ matrix.features.env }} + CROSS_FEATURES: ${{ matrix.features.env }} steps: - uses: actions/checkout@v3 - name: Update Rust @@ -76,7 +77,7 @@ jobs: - name: Cross build Lighthouse binary run: | cargo install cross - env CROSS_PROFILE=${{ matrix.profile }} make build-${{ matrix.binary }} + env CROSS_PROFILE=${{ matrix.profile }} CROSS_FEATURES=${{ matrix.features.env }} make build-${{ matrix.binary }} - name: Move cross-built binary into Docker scope (if ARM) if: startsWith(matrix.binary, 'aarch64') run: | From d53ccf8fc79f72d5ea0ded3ee85c409429ca6691 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 14 Feb 2023 12:08:14 +1100 Subject: [PATCH 164/263] Placeholder for BlobsByRange outbound rate limit --- beacon_node/lighthouse_network/src/rpc/config.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index bea0929fb0b..871fa644eb1 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -129,6 +129,8 @@ impl FromStr for OutboundRateLimiterConfig { let mut goodbye_quota = None; let mut blocks_by_range_quota = None; let mut blocks_by_root_quota = None; + // TODO(eip4844): use this blob quota + let mut blobs_by_range_quota = None; for proto_def in s.split(';') { let ProtocolQuota { protocol, quota } = proto_def.parse()?; let quota = Some(quota); @@ -139,6 +141,7 @@ impl FromStr for OutboundRateLimiterConfig { Protocol::BlocksByRoot => blocks_by_root_quota = blocks_by_root_quota.or(quota), Protocol::Ping => ping_quota = ping_quota.or(quota), Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota), + Protocol::BlobsByRange => blobs_by_range_quota = blobs_by_range_quota.or(quota), Protocol::LightClientBootstrap => return Err("Lighthouse does not send LightClientBootstrap requests. Quota should not be set."), } } From f7bd4bf06ed7e54d9ba55d4c14711cbae367d478 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 14 Feb 2023 12:09:40 +1100 Subject: [PATCH 165/263] Update block rewards API for Capella --- .../beacon_chain/src/beacon_block_reward.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_block_reward.rs b/beacon_node/beacon_chain/src/beacon_block_reward.rs index 3f186c37c19..786402c9978 100644 --- a/beacon_node/beacon_chain/src/beacon_block_reward.rs +++ b/beacon_node/beacon_chain/src/beacon_block_reward.rs @@ -15,12 +15,12 @@ use store::{ consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, RelativeEpoch, }; -use types::{BeaconBlockRef, BeaconState, BeaconStateError, ExecPayload, Hash256}; +use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, Hash256}; type BeaconBlockSubRewardValue = u64; impl BeaconChain { - pub fn compute_beacon_block_reward>( + pub fn compute_beacon_block_reward>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, block_root: Hash256, @@ -97,7 +97,7 @@ impl BeaconChain { }) } - fn compute_beacon_block_sync_aggregate_reward>( + fn compute_beacon_block_sync_aggregate_reward>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, state: &BeaconState, @@ -111,7 +111,7 @@ impl BeaconChain { } } - fn compute_beacon_block_proposer_slashing_reward>( + fn compute_beacon_block_proposer_slashing_reward>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, state: &BeaconState, @@ -132,7 +132,7 @@ impl BeaconChain { Ok(proposer_slashing_reward) } - fn compute_beacon_block_attester_slashing_reward>( + fn compute_beacon_block_attester_slashing_reward>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, state: &BeaconState, @@ -155,7 +155,7 @@ impl BeaconChain { Ok(attester_slashing_reward) } - fn compute_beacon_block_attestation_reward_base>( + fn compute_beacon_block_attestation_reward_base>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, block_root: Hash256, @@ -173,7 +173,7 @@ impl BeaconChain { Ok(block_attestation_reward) } - fn compute_beacon_block_attestation_reward_altair>( + fn compute_beacon_block_attestation_reward_altair>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, state: &mut BeaconState, From 8dd92491772fccf2b99210a9cb7a14217739a7a9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 14 Feb 2023 03:25:42 +0000 Subject: [PATCH 166/263] Enforce a timeout on peer disconnect (#3757) On heavily crowded networks, we are seeing many attempted connections to our node every second. Often these connections come from peers that have just been disconnected. This can be for a number of reasons including: - We have deemed them to be not as useful as other peers - They have performed poorly - They have dropped the connection with us - The connection was spontaneously lost - They were randomly removed because we have too many peers In all of these cases, if we have reached or exceeded our target peer limit, there is no desire to accept new connections immediately after the disconnect from these peers. In fact, it often costs us resources to handle the established connections and defeats some of the logic of dropping them in the first place. This PR adds a timeout, that prevents recently disconnected peers from reconnecting to us. Technically we implement a ban at the swarm layer to prevent immediate re connections for at least 10 minutes. I decided to keep this light, and use a time-based LRUCache which only gets updated during the peer manager heartbeat to prevent added stress of polling a delay map for what could be a large number of peers. This cache is bounded in time. An extra space bound could be added should people consider this a risk. Co-authored-by: Diva M --- Cargo.lock | 54 +++++++------- beacon_node/lighthouse_network/Cargo.toml | 1 + .../src/peer_manager/mod.rs | 44 ++++++++++++ .../src/peer_manager/network_behaviour.rs | 2 +- .../src/peer_manager/peerdb.rs | 11 ++- common/lru_cache/src/time.rs | 71 +++++++++++++++++++ 6 files changed, 154 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6159778b7a1..d568ade047f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1433,9 +1433,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.89" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc831ee6a32dd495436e317595e639a587aa9907bef96fe6e6abc290ab6204e9" +checksum = "90d59d9acd2a682b4e40605a242f6670eaa58c5957471cbf85e8aa6a0b97a5e8" dependencies = [ "cc", "cxxbridge-flags", @@ -1445,9 +1445,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.89" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94331d54f1b1a8895cd81049f7eaaaef9d05a7dcb4d1fd08bf3ff0806246789d" +checksum = "ebfa40bda659dd5c864e65f4c9a2b0aff19bea56b017b9b77c73d3766a453a38" dependencies = [ "cc", "codespan-reporting", @@ -1460,15 +1460,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.89" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48dcd35ba14ca9b40d6e4b4b39961f23d835dbb8eed74565ded361d93e1feb8a" +checksum = "457ce6757c5c70dc6ecdbda6925b958aae7f959bda7d8fb9bde889e34a09dc03" [[package]] name = "cxxbridge-macro" -version = "1.0.89" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bbeb29798b407ccd82a3324ade1a7286e0d29851475990b612670f6f5124d2" +checksum = "ebf883b7aacd7b2aeb2a7b338648ee19f57c140d4ee8e52c68979c6b2f7f2263" dependencies = [ "proc-macro2", "quote", @@ -2982,7 +2982,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.4", + "tokio-util 0.7.7", "tracing", ] @@ -3463,7 +3463,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.3.0", + "parity-scale-codec 3.4.0", ] [[package]] @@ -4215,7 +4215,7 @@ dependencies = [ "thiserror", "tinytemplate", "tokio", - "tokio-util 0.7.4", + "tokio-util 0.7.7", "webrtc", ] @@ -4391,6 +4391,7 @@ dependencies = [ "lighthouse_metrics", "lighthouse_version", "lru 0.7.8", + "lru_cache", "parking_lot 0.12.1", "prometheus-client", "quickcheck", @@ -5402,9 +5403,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3840933452adf7b3b9145e27086a5a3376c619dca1a21b1e5a5af0d54979bed" +checksum = "637935964ff85a605d114591d4d2c13c5d1ba2806dae97cea6bf180238a749ac" dependencies = [ "arrayvec", "bitvec 1.0.1", @@ -6281,7 +6282,7 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-rustls 0.23.4", - "tokio-util 0.7.4", + "tokio-util 0.7.7", "tower-service", "url", "wasm-bindgen", @@ -6567,7 +6568,7 @@ checksum = "001cf62ece89779fd16105b5f515ad0e5cedcd5440d3dd806bb067978e7c3608" dependencies = [ "cfg-if", "derive_more", - "parity-scale-codec 3.3.0", + "parity-scale-codec 3.4.0", "scale-info-derive", ] @@ -6813,9 +6814,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.92" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7434af0dc1cbd59268aa98b4c22c131c0584d2232f6fb166efb993e2832e896a" +checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" dependencies = [ "itoa 1.0.5", "ryu", @@ -6977,9 +6978,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] @@ -7665,10 +7666,11 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ + "cfg-if", "once_cell", ] @@ -7867,7 +7869,7 @@ dependencies = [ "futures-core", "pin-project-lite 0.2.9", "tokio", - "tokio-util 0.7.4", + "tokio-util 0.7.7", ] [[package]] @@ -7917,9 +7919,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.4" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" +checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" dependencies = [ "bytes", "futures-core", @@ -8964,9 +8966,9 @@ dependencies = [ [[package]] name = "webrtc-ice" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494483fbb2f5492620871fdc78b084aed8807377f6e3fe88b2e49f0a9c9c41d7" +checksum = "465a03cc11e9a7d7b4f9f99870558fe37a102b65b93f8045392fef7c67b39e80" dependencies = [ "arc-swap", "async-trait", diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 474ebebb507..9b00c39d2b5 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -25,6 +25,7 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } smallvec = "1.6.1" tokio-io-timeout = "1.1.1" lru = "0.7.1" +lru_cache = { path = "../../common/lru_cache" } parking_lot = "0.12.0" sha2 = "0.10" snap = "1.0.1" diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 89670a2eb3c..03f6a746ac6 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -8,6 +8,7 @@ use crate::{Subnet, SubnetDiscovery}; use delay_map::HashSetDelay; use discv5::Enr; use libp2p::identify::Info as IdentifyInfo; +use lru_cache::LRUTimeCache; use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult}; use rand::seq::SliceRandom; use slog::{debug, error, trace, warn}; @@ -39,6 +40,9 @@ mod network_behaviour; /// requests. This defines the interval in seconds. const HEARTBEAT_INTERVAL: u64 = 30; +/// The minimum amount of time we allow peers to reconnect to us after a disconnect when we are +/// saturated with peers. This effectively looks like a swarm BAN for this amount of time. +pub const PEER_RECONNECTION_TIMEOUT: Duration = Duration::from_secs(600); /// This is used in the pruning logic. We avoid pruning peers on sync-committees if doing so would /// lower our peer count below this number. Instead we favour a non-uniform distribution of subnet /// peers. @@ -74,6 +78,20 @@ pub struct PeerManager { target_peers: usize, /// Peers queued to be dialed. peers_to_dial: VecDeque<(PeerId, Option)>, + /// The number of temporarily banned peers. This is used to prevent instantaneous + /// reconnection. + // NOTE: This just prevents re-connections. The state of the peer is otherwise unaffected. A + // peer can be in a disconnected state and new connections will be refused and logged as if the + // peer is banned without it being reflected in the peer's state. + // Also the banned state can out-last the peer's reference in the peer db. So peers that are + // unknown to us can still be temporarily banned. This is fundamentally a relationship with + // the swarm. Regardless of our knowledge of the peer in the db, it will be temporarily banned + // at the swarm layer. + // NOTE: An LRUTimeCache is used compared to a structure that needs to be polled to avoid very + // frequent polling to unban peers. Instead, this cache piggy-backs the PeerManager heartbeat + // to update and clear the cache. Therefore the PEER_RECONNECTION_TIMEOUT only has a resolution + // of the HEARTBEAT_INTERVAL. + temporary_banned_peers: LRUTimeCache, /// A collection of sync committee subnets that we need to stay subscribed to. /// Sync committee subnets are longer term (256 epochs). Hence, we need to re-run /// discovery queries for subnet peers if we disconnect from existing sync @@ -143,6 +161,7 @@ impl PeerManager { outbound_ping_peers: HashSetDelay::new(Duration::from_secs(ping_interval_outbound)), status_peers: HashSetDelay::new(Duration::from_secs(status_interval)), target_peers: target_peer_count, + temporary_banned_peers: LRUTimeCache::new(PEER_RECONNECTION_TIMEOUT), sync_committee_subnets: Default::default(), heartbeat, discovery_enabled, @@ -243,6 +262,15 @@ impl PeerManager { reason: Option, ) { match ban_operation { + BanOperation::TemporaryBan => { + // The peer could be temporarily banned. We only do this in the case that + // we have currently reached our peer target limit. + if self.network_globals.connected_peers() >= self.target_peers { + // We have enough peers, prevent this reconnection. + self.temporary_banned_peers.raw_insert(*peer_id); + self.events.push(PeerManagerEvent::Banned(*peer_id, vec![])); + } + } BanOperation::DisconnectThePeer => { // The peer was currently connected, so we start a disconnection. // Once the peer has disconnected, its connection state will transition to a @@ -259,6 +287,11 @@ impl PeerManager { BanOperation::ReadyToBan(banned_ips) => { // The peer is not currently connected, we can safely ban it at the swarm // level. + + // If a peer is being banned, this trumps any temporary ban the peer might be + // under. We no longer track it in the temporary ban list. + self.temporary_banned_peers.raw_remove(peer_id); + // Inform the Swarm to ban the peer self.events .push(PeerManagerEvent::Banned(*peer_id, banned_ips)); @@ -1109,6 +1142,14 @@ impl PeerManager { } } + /// Unbans any temporarily banned peers that have served their timeout. + fn unban_temporary_banned_peers(&mut self) { + for peer_id in self.temporary_banned_peers.remove_expired() { + self.events + .push(PeerManagerEvent::UnBanned(peer_id, Vec::new())); + } + } + /// The Peer manager's heartbeat maintains the peer count and maintains peer reputations. /// /// It will request discovery queries if the peer count has not reached the desired number of @@ -1141,6 +1182,9 @@ impl PeerManager { // Prune any excess peers back to our target in such a way that incentivises good scores and // a uniform distribution of subnets. self.prune_excess_peers(); + + // Unban any peers that have served their temporary ban timeout + self.unban_temporary_banned_peers(); } // Update metrics related to peer scoring. diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 42eb270c40e..21288473ec9 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -170,7 +170,7 @@ impl PeerManager { BanResult::NotBanned => {} } - // Count dialing peers in the limit if the peer dialied us. + // Count dialing peers in the limit if the peer dialed us. let count_dialing = endpoint.is_listener(); // Check the connection limits if self.peer_limit_reached(count_dialing) diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 1f44488a569..61cf8de1cb2 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -844,8 +844,12 @@ impl PeerDB { .collect::>(); return Some(BanOperation::ReadyToBan(banned_ips)); } - PeerConnectionStatus::Disconnecting { .. } - | PeerConnectionStatus::Unknown + PeerConnectionStatus::Disconnecting { .. } => { + // The peer has been disconnected but not banned. Inform the peer manager + // that this peer could be eligible for a temporary ban. + return Some(BanOperation::TemporaryBan); + } + PeerConnectionStatus::Unknown | PeerConnectionStatus::Connected { .. } | PeerConnectionStatus::Dialing { .. } => { self.disconnected_peers += 1; @@ -1177,6 +1181,9 @@ impl From> for ScoreUpdateResult { /// When attempting to ban a peer provides the peer manager with the operation that must be taken. pub enum BanOperation { + /// Optionally temporarily ban this peer to prevent instantaneous reconnection. + /// The peer manager will decide if temporary banning is required. + TemporaryBan, // The peer is currently connected. Perform a graceful disconnect before banning at the swarm // level. DisconnectThePeer, diff --git a/common/lru_cache/src/time.rs b/common/lru_cache/src/time.rs index 5c0e4c1ca14..1253ef1ecc6 100644 --- a/common/lru_cache/src/time.rs +++ b/common/lru_cache/src/time.rs @@ -31,6 +31,77 @@ where } } + /// Inserts a key without removal of potentially expired elements. + /// Returns true if the key does not already exist. + pub fn raw_insert(&mut self, key: Key) -> bool { + // check the cache before removing elements + let is_new = self.map.insert(key.clone()); + + // add the new key to the list, if it doesn't already exist. + if is_new { + self.list.push_back(Element { + key, + inserted: Instant::now(), + }); + } else { + let position = self + .list + .iter() + .position(|e| e.key == key) + .expect("Key is not new"); + let mut element = self + .list + .remove(position) + .expect("Position is not occupied"); + element.inserted = Instant::now(); + self.list.push_back(element); + } + #[cfg(test)] + self.check_invariant(); + is_new + } + + /// Removes a key from the cache without purging expired elements. Returns true if the key + /// existed. + pub fn raw_remove(&mut self, key: &Key) -> bool { + if self.map.remove(key) { + let position = self + .list + .iter() + .position(|e| &e.key == key) + .expect("Key must exist"); + self.list + .remove(position) + .expect("Position is not occupied"); + true + } else { + false + } + } + + /// Removes all expired elements and returns them + pub fn remove_expired(&mut self) -> Vec { + if self.list.is_empty() { + return Vec::new(); + } + + let mut removed_elements = Vec::new(); + let now = Instant::now(); + // remove any expired results + while let Some(element) = self.list.pop_front() { + if element.inserted + self.ttl > now { + self.list.push_front(element); + break; + } + self.map.remove(&element.key); + removed_elements.push(element.key); + } + #[cfg(test)] + self.check_invariant(); + + removed_elements + } + // Inserts a new key. It first purges expired elements to do so. // // If the key was not present this returns `true`. If the value was already present this From 10d32ee04c416200205a051724daafb76ae2bc50 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 14 Feb 2023 14:41:28 +1100 Subject: [PATCH 167/263] Quote Capella BeaconState fields (#3967) --- consensus/types/src/beacon_state.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 0b07ce49580..e70b8842758 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -301,8 +301,10 @@ where // Capella #[superstruct(only(Capella, Eip4844), partial_getter(copy))] + #[serde(with = "eth2_serde_utils::quoted_u64")] pub next_withdrawal_index: u64, #[superstruct(only(Capella, Eip4844), partial_getter(copy))] + #[serde(with = "eth2_serde_utils::quoted_u64")] pub next_withdrawal_validator_index: u64, // Deep history valid from Capella onwards. #[superstruct(only(Capella, Eip4844))] From 918b688f728cbf43d184c004b6827345fd100e52 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 15 Feb 2023 14:17:56 +1100 Subject: [PATCH 168/263] Simplify payload traits and reduce cloning (#3976) * Simplify payload traits and reduce cloning * Fix self limiter --- beacon_node/beacon_chain/src/beacon_chain.rs | 5 +- .../lighthouse_network/src/rpc/config.rs | 8 +- .../src/rpc/self_limiter.rs | 2 + .../block_signature_verifier.rs | 3 +- .../verify_bls_to_execution_change.rs | 5 +- consensus/types/src/beacon_block_body.rs | 12 +- consensus/types/src/execution_payload.rs | 4 +- .../types/src/execution_payload_header.rs | 59 +++++----- consensus/types/src/payload.rs | 111 +++++++++--------- 9 files changed, 108 insertions(+), 101 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 31d52deae03..9bcf8a0d6e8 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -985,11 +985,8 @@ impl BeaconChain { })? .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; - //FIXME(sean) avoid the clone by comparing refs to headers (`as_execution_payload_header` method ?) - let full_payload: FullPayload = execution_payload.clone().into(); - // Verify payload integrity. - let header_from_payload = full_payload.to_execution_payload_header(); + let header_from_payload = ExecutionPayloadHeader::from(execution_payload.to_ref()); if header_from_payload != execution_payload_header { for txn in execution_payload.transactions() { debug!( diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index 871fa644eb1..e89d4585039 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -67,6 +67,7 @@ pub struct OutboundRateLimiterConfig { pub(super) goodbye_quota: Quota, pub(super) blocks_by_range_quota: Quota, pub(super) blocks_by_root_quota: Quota, + pub(super) blobs_by_range_quota: Quota, } impl OutboundRateLimiterConfig { @@ -77,6 +78,8 @@ impl OutboundRateLimiterConfig { pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = Quota::n_every(methods::MAX_REQUEST_BLOCKS, 10); pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); + pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = + Quota::n_every(methods::MAX_REQUEST_BLOBS_SIDECARS, 10); } impl Default for OutboundRateLimiterConfig { @@ -88,6 +91,7 @@ impl Default for OutboundRateLimiterConfig { goodbye_quota: Self::DEFAULT_GOODBYE_QUOTA, blocks_by_range_quota: Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA, blocks_by_root_quota: Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA, + blobs_by_range_quota: Self::DEFAULT_BLOBS_BY_RANGE_QUOTA, } } } @@ -111,6 +115,7 @@ impl Debug for OutboundRateLimiterConfig { .field("goodbye", fmt_q!(&self.goodbye_quota)) .field("blocks_by_range", fmt_q!(&self.blocks_by_range_quota)) .field("blocks_by_root", fmt_q!(&self.blocks_by_root_quota)) + .field("blobs_by_range", fmt_q!(&self.blobs_by_range_quota)) .finish() } } @@ -129,7 +134,6 @@ impl FromStr for OutboundRateLimiterConfig { let mut goodbye_quota = None; let mut blocks_by_range_quota = None; let mut blocks_by_root_quota = None; - // TODO(eip4844): use this blob quota let mut blobs_by_range_quota = None; for proto_def in s.split(';') { let ProtocolQuota { protocol, quota } = proto_def.parse()?; @@ -154,6 +158,8 @@ impl FromStr for OutboundRateLimiterConfig { .unwrap_or(Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA), blocks_by_root_quota: blocks_by_root_quota .unwrap_or(Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA), + blobs_by_range_quota: blobs_by_range_quota + .unwrap_or(Self::DEFAULT_BLOBS_BY_RANGE_QUOTA), }) } } diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index 451c6206f37..61e9b46a90d 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -60,6 +60,7 @@ impl SelfRateLimiter { goodbye_quota, blocks_by_range_quota, blocks_by_root_quota, + blobs_by_range_quota, } = config; let limiter = RateLimiter::builder() @@ -69,6 +70,7 @@ impl SelfRateLimiter { .set_quota(Protocol::Goodbye, goodbye_quota) .set_quota(Protocol::BlocksByRange, blocks_by_range_quota) .set_quota(Protocol::BlocksByRoot, blocks_by_root_quota) + .set_quota(Protocol::BlobsByRange, blobs_by_range_quota) // Manually set the LightClientBootstrap quota, since we use the same rate limiter for // inbound and outbound requests, and the LightClientBootstrap is an only inbound // protocol. diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index bbf2c1caa51..709302eec17 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -348,8 +348,7 @@ where &mut self, block: &'a SignedBeaconBlock, ) -> Result<()> { - // FIXME(capella): to improve performance we might want to decompress the withdrawal pubkeys - // in parallel. + // To improve performance we might want to decompress the withdrawal pubkeys in parallel. if let Ok(bls_to_execution_changes) = block.message().body().bls_to_execution_changes() { for bls_to_execution_change in bls_to_execution_changes { self.sets.push(bls_execution_change_signature_set( diff --git a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs index 34700a33e4e..15a856c40c9 100644 --- a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs +++ b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs @@ -37,10 +37,9 @@ pub fn verify_bls_to_execution_change( Invalid::NonBlsWithdrawalCredentials ); + // Re-hashing the pubkey isn't necessary during block replay, so we may want to skip that in + // future. let pubkey_hash = hash(address_change.from_bls_pubkey.as_serialized()); - - // FIXME: Should this check be put inside the verify_signatures.is_true() condition? - // I believe that's used for fuzzing so this is a Mehdi question.. verify!( validator.withdrawal_credentials.as_bytes().get(1..) == pubkey_hash.get(1..), Invalid::WithdrawalCredentialsMismatch diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 28c9213d1f4..07c8f898b33 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -279,7 +279,7 @@ impl From>> voluntary_exits, sync_aggregate, execution_payload: BlindedPayloadMerge { - execution_payload_header: From::from(execution_payload.clone()), + execution_payload_header: From::from(&execution_payload), }, }, Some(execution_payload), @@ -320,7 +320,7 @@ impl From>> voluntary_exits, sync_aggregate, execution_payload: BlindedPayloadCapella { - execution_payload_header: From::from(execution_payload.clone()), + execution_payload_header: From::from(&execution_payload), }, bls_to_execution_changes, }, @@ -363,7 +363,7 @@ impl From>> voluntary_exits, sync_aggregate, execution_payload: BlindedPayloadEip4844 { - execution_payload_header: From::from(execution_payload.clone()), + execution_payload_header: From::from(&execution_payload), }, bls_to_execution_changes, blob_kzg_commitments, @@ -414,7 +414,7 @@ impl BeaconBlockBodyMerge> { voluntary_exits: voluntary_exits.clone(), sync_aggregate: sync_aggregate.clone(), execution_payload: BlindedPayloadMerge { - execution_payload_header: From::from(execution_payload.clone()), + execution_payload_header: execution_payload.into(), }, } } @@ -447,7 +447,7 @@ impl BeaconBlockBodyCapella> { voluntary_exits: voluntary_exits.clone(), sync_aggregate: sync_aggregate.clone(), execution_payload: BlindedPayloadCapella { - execution_payload_header: From::from(execution_payload.clone()), + execution_payload_header: execution_payload.into(), }, bls_to_execution_changes: bls_to_execution_changes.clone(), } @@ -482,7 +482,7 @@ impl BeaconBlockBodyEip4844> { voluntary_exits: voluntary_exits.clone(), sync_aggregate: sync_aggregate.clone(), execution_payload: BlindedPayloadEip4844 { - execution_payload_header: From::from(execution_payload.clone()), + execution_payload_header: execution_payload.into(), }, bls_to_execution_changes: bls_to_execution_changes.clone(), blob_kzg_commitments: blob_kzg_commitments.clone(), diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 16b27783555..6e055d0a79a 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -35,7 +35,9 @@ pub type Withdrawals = VariableList::MaxWithdrawal arbitrary(bound = "T: EthSpec") ), cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") + partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + map_into(FullPayload, BlindedPayload), + map_ref_into(ExecutionPayloadHeader) )] #[derive( Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative, arbitrary::Arbitrary, diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 695c0cfdf4f..4dc79ddc999 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -159,40 +159,40 @@ impl ExecutionPayloadHeaderCapella { } } -impl From> for ExecutionPayloadHeaderMerge { - fn from(payload: ExecutionPayloadMerge) -> Self { +impl<'a, T: EthSpec> From<&'a ExecutionPayloadMerge> for ExecutionPayloadHeaderMerge { + fn from(payload: &'a ExecutionPayloadMerge) -> Self { Self { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom, + logs_bloom: payload.logs_bloom.clone(), prev_randao: payload.prev_randao, block_number: payload.block_number, gas_limit: payload.gas_limit, gas_used: payload.gas_used, timestamp: payload.timestamp, - extra_data: payload.extra_data, + extra_data: payload.extra_data.clone(), base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions_root: payload.transactions.tree_hash_root(), } } } -impl From> for ExecutionPayloadHeaderCapella { - fn from(payload: ExecutionPayloadCapella) -> Self { +impl<'a, T: EthSpec> From<&'a ExecutionPayloadCapella> for ExecutionPayloadHeaderCapella { + fn from(payload: &'a ExecutionPayloadCapella) -> Self { Self { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom, + logs_bloom: payload.logs_bloom.clone(), prev_randao: payload.prev_randao, block_number: payload.block_number, gas_limit: payload.gas_limit, gas_used: payload.gas_used, timestamp: payload.timestamp, - extra_data: payload.extra_data, + extra_data: payload.extra_data.clone(), base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions_root: payload.transactions.tree_hash_root(), @@ -200,20 +200,21 @@ impl From> for ExecutionPayloadHeaderCape } } } -impl From> for ExecutionPayloadHeaderEip4844 { - fn from(payload: ExecutionPayloadEip4844) -> Self { + +impl<'a, T: EthSpec> From<&'a ExecutionPayloadEip4844> for ExecutionPayloadHeaderEip4844 { + fn from(payload: &'a ExecutionPayloadEip4844) -> Self { Self { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom, + logs_bloom: payload.logs_bloom.clone(), prev_randao: payload.prev_randao, block_number: payload.block_number, gas_limit: payload.gas_limit, gas_used: payload.gas_used, timestamp: payload.timestamp, - extra_data: payload.extra_data, + extra_data: payload.extra_data.clone(), base_fee_per_gas: payload.base_fee_per_gas, excess_data_gas: payload.excess_data_gas, block_hash: payload.block_hash, @@ -223,31 +224,33 @@ impl From> for ExecutionPayloadHeaderEip4 } } -impl From> for ExecutionPayloadHeader { - fn from(payload: ExecutionPayloadMerge) -> Self { - Self::Merge(ExecutionPayloadHeaderMerge::from(payload)) +// These impls are required to work around an inelegance in `to_execution_payload_header`. +// They only clone headers so they should be relatively cheap. +impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderMerge { + fn from(payload: &'a Self) -> Self { + payload.clone() } } -impl From> for ExecutionPayloadHeader { - fn from(payload: ExecutionPayloadCapella) -> Self { - Self::Capella(ExecutionPayloadHeaderCapella::from(payload)) +impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderCapella { + fn from(payload: &'a Self) -> Self { + payload.clone() } } -impl From> for ExecutionPayloadHeader { - fn from(payload: ExecutionPayloadEip4844) -> Self { - Self::Eip4844(ExecutionPayloadHeaderEip4844::from(payload)) +impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderEip4844 { + fn from(payload: &'a Self) -> Self { + payload.clone() } } -impl From> for ExecutionPayloadHeader { - fn from(payload: ExecutionPayload) -> Self { - match payload { - ExecutionPayload::Merge(payload) => Self::from(payload), - ExecutionPayload::Capella(payload) => Self::from(payload), - ExecutionPayload::Eip4844(payload) => Self::from(payload), - } +impl<'a, T: EthSpec> From> for ExecutionPayloadHeader { + fn from(payload: ExecutionPayloadRef<'a, T>) -> Self { + map_execution_payload_ref_into_execution_payload_header!( + &'a _, + payload, + |inner, cons| cons(inner.into()) + ) } } diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 9b7d3417fa1..cc22bc3ab81 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -4,6 +4,7 @@ use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use std::borrow::Cow; use std::convert::TryFrom; use std::fmt::Debug; use std::hash::Hash; @@ -90,15 +91,15 @@ pub trait AbstractExecPayload: type Merge: OwnedExecPayload + Into - + From> + + for<'a> From>> + TryFrom>; type Capella: OwnedExecPayload + Into - + From> + + for<'a> From>> + TryFrom>; type Eip4844: OwnedExecPayload + Into - + From> + + for<'a> From>> + TryFrom>; fn default_at_fork(fork_name: ForkName) -> Result; @@ -150,31 +151,21 @@ pub struct FullPayload { impl From> for ExecutionPayload { fn from(full_payload: FullPayload) -> Self { - match full_payload { - FullPayload::Merge(payload) => ExecutionPayload::Merge(payload.execution_payload), - FullPayload::Capella(payload) => ExecutionPayload::Capella(payload.execution_payload), - FullPayload::Eip4844(payload) => ExecutionPayload::Eip4844(payload.execution_payload), - } + map_full_payload_into_execution_payload!(full_payload, move |payload, cons| { + cons(payload.execution_payload) + }) } } impl<'a, T: EthSpec> From> for ExecutionPayload { fn from(full_payload_ref: FullPayloadRef<'a, T>) -> Self { - match full_payload_ref { - FullPayloadRef::Merge(payload) => { - ExecutionPayload::Merge(payload.execution_payload.clone()) - } - FullPayloadRef::Capella(payload) => { - ExecutionPayload::Capella(payload.execution_payload.clone()) - } - FullPayloadRef::Eip4844(payload) => { - ExecutionPayload::Eip4844(payload.execution_payload.clone()) - } - } + map_full_payload_ref!(&'a _, full_payload_ref, move |payload, cons| { + cons(payload); + payload.execution_payload.clone().into() + }) } } -// FIXME: can this be implemented as Deref or Clone somehow? impl<'a, T: EthSpec> From> for FullPayload { fn from(full_payload_ref: FullPayloadRef<'a, T>) -> Self { map_full_payload_ref!(&'a _, full_payload_ref, move |payload, cons| { @@ -189,11 +180,12 @@ impl ExecPayload for FullPayload { BlockType::Full } - fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { - let payload = map_full_payload_into_execution_payload!(self.clone(), |inner, cons| { - cons(inner.execution_payload) - }); - ExecutionPayloadHeader::from(payload) + fn to_execution_payload_header<'a>(&'a self) -> ExecutionPayloadHeader { + map_full_payload_ref!(&'a _, self.to_ref(), move |inner, cons| { + cons(inner); + let exec_payload_ref: ExecutionPayloadRef<'a, T> = From::from(&inner.execution_payload); + ExecutionPayloadHeader::from(exec_payload_ref) + }) } fn parent_hash<'a>(&'a self) -> ExecutionBlockHash { @@ -404,17 +396,9 @@ impl AbstractExecPayload for FullPayload { impl From> for FullPayload { fn from(execution_payload: ExecutionPayload) -> Self { - match execution_payload { - ExecutionPayload::Merge(execution_payload) => { - Self::Merge(FullPayloadMerge { execution_payload }) - } - ExecutionPayload::Capella(execution_payload) => { - Self::Capella(FullPayloadCapella { execution_payload }) - } - ExecutionPayload::Eip4844(execution_payload) => { - Self::Eip4844(FullPayloadEip4844 { execution_payload }) - } - } + map_execution_payload_into_full_payload!(execution_payload, |inner, cons| { + cons(inner.into()) + }) } } @@ -666,6 +650,7 @@ macro_rules! impl_exec_payload_common { $wrapped_field:ident, // execution_payload_header | execution_payload $fork_variant:ident, // Merge | Merge $block_type_variant:ident, // Blinded | Full + $is_default_with_empty_roots:block, $f:block, $g:block) => { impl ExecPayload for $wrapper_type { @@ -675,7 +660,7 @@ macro_rules! impl_exec_payload_common { fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { ExecutionPayloadHeader::$fork_variant($wrapped_type_header::from( - self.$wrapped_field.clone(), + &self.$wrapped_field, )) } @@ -712,15 +697,8 @@ macro_rules! impl_exec_payload_common { } fn is_default_with_empty_roots(&self) -> bool { - // FIXME: is there a better way than ignoring this lint? - // This is necessary because the first invocation of this macro might expand to: - // self.execution_payload_header == ExecutionPayloadHeaderMerge::from(ExecutionPayloadMerge::default()) - // but the second invocation might expand to: - // self.execution_payload == ExecutionPayloadMerge::from(ExecutionPayloadMerge::default()) - #[allow(clippy::cmp_owned)] - { - self.$wrapped_field == $wrapped_type::from($wrapped_type_full::default()) - } + let f = $is_default_with_empty_roots; + f(self) } fn transactions(&self) -> Option<&Transactions> { @@ -755,6 +733,12 @@ macro_rules! impl_exec_payload_for_fork { execution_payload_header, $fork_variant, // Merge Blinded, + { + |wrapper: &$wrapper_type_header| { + wrapper.execution_payload_header + == $wrapped_type_header::from(&$wrapped_type_full::default()) + } + }, { |_| { None } }, { let c: for<'a> fn(&'a $wrapper_type_header) -> Result = @@ -788,7 +772,7 @@ macro_rules! impl_exec_payload_for_fork { fn default() -> Self { Self { execution_payload_header: $wrapped_type_header::from( - $wrapped_type_full::default(), + &$wrapped_type_full::default(), ), } } @@ -806,11 +790,11 @@ macro_rules! impl_exec_payload_for_fork { } } - // FIXME(sproul): consider adding references to these From impls - impl From<$wrapped_type_full> for $wrapper_type_header { - fn from(execution_payload: $wrapped_type_full) -> Self { + // BlindedPayload* from CoW reference to ExecutionPayload* (hopefully just a reference). + impl<'a, T: EthSpec> From>> for $wrapper_type_header { + fn from(execution_payload: Cow<'a, $wrapped_type_full>) -> Self { Self { - execution_payload_header: $wrapped_type_header::from(execution_payload), + execution_payload_header: $wrapped_type_header::from(&*execution_payload), } } } @@ -825,6 +809,11 @@ macro_rules! impl_exec_payload_for_fork { execution_payload, $fork_variant, // Merge Full, + { + |wrapper: &$wrapper_type_full| { + wrapper.execution_payload == $wrapped_type_full::default() + } + }, { let c: for<'a> fn(&'a $wrapper_type_full) -> Option<&'a Transactions> = |payload: &$wrapper_type_full| Some(&payload.execution_payload.transactions); @@ -848,6 +837,15 @@ macro_rules! impl_exec_payload_for_fork { } } + // FullPayload * from CoW reference to ExecutionPayload* (hopefully already owned). + impl<'a, T: EthSpec> From>> for $wrapper_type_full { + fn from(execution_payload: Cow<'a, $wrapped_type_full>) -> Self { + Self { + execution_payload: $wrapped_type_full::from(execution_payload.into_owned()), + } + } + } + impl TryFrom> for $wrapper_type_full { type Error = Error; fn try_from(_: ExecutionPayloadHeader) -> Result { @@ -915,11 +913,12 @@ impl AbstractExecPayload for BlindedPayload { impl From> for BlindedPayload { fn from(payload: ExecutionPayload) -> Self { - match payload { - ExecutionPayload::Merge(payload) => BlindedPayload::Merge(payload.into()), - ExecutionPayload::Capella(payload) => BlindedPayload::Capella(payload.into()), - ExecutionPayload::Eip4844(payload) => BlindedPayload::Eip4844(payload.into()), - } + // This implementation is a bit wasteful in that it discards the payload body. + // Required by the top-level constraint on AbstractExecPayload but could maybe be loosened + // in future. + map_execution_payload_into_blinded_payload!(payload, |inner, cons| cons(From::from( + Cow::Owned(inner) + ))) } } From 2fcfdf1a01218f78f3241b85bb8c6f526de17c35 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 15 Feb 2023 11:51:46 +0000 Subject: [PATCH 169/263] Fix docker and deps (#3978) ## Proposed Changes - Fix this cargo-audit failure for `sqlite3-sys`: https://github.com/sigp/lighthouse/actions/runs/4179008889/jobs/7238473962 - Prevent the Docker builds from running out of RAM on CI by removing `gnosis` and LMDB support from the `-dev` images (see: https://github.com/sigp/lighthouse/pull/3959#issuecomment-1430531155, successful run on my fork: https://github.com/michaelsproul/lighthouse/actions/runs/4179162480/jobs/7239537947). --- .github/workflows/docker.yml | 5 +- Cargo.lock | 79 ++++++++----------- consensus/types/Cargo.toml | 2 +- .../slashing_protection/Cargo.toml | 4 +- .../src/slashing_database.rs | 4 +- 5 files changed, 40 insertions(+), 54 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 46896073add..c3119db3780 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -53,7 +53,7 @@ jobs: x86_64-portable] features: [ {version_suffix: "", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc"}, - {version_suffix: "-dev", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc,spec-minimal"} + {version_suffix: "-dev", env: "jemalloc,spec-minimal"} ] include: - profile: maxperf @@ -65,8 +65,6 @@ jobs: VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} FEATURE_SUFFIX: ${{ matrix.features.version_suffix }} - FEATURES: ${{ matrix.features.env }} - CROSS_FEATURES: ${{ matrix.features.env }} steps: - uses: actions/checkout@v3 - name: Update Rust @@ -106,7 +104,6 @@ jobs: --platform=linux/${SHORT_ARCH} \ --file ./Dockerfile.cross . \ --tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX}${FEATURE_SUFFIX} \ - --build-arg FEATURES=${FEATURES} \ --provenance=false \ --push build-docker-multiarch: diff --git a/Cargo.lock b/Cargo.lock index d568ade047f..37c6fe667c1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -464,7 +464,7 @@ dependencies = [ "http", "http-body", "hyper", - "itoa 1.0.5", + "itoa", "matchit", "memchr", "mime", @@ -816,18 +816,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" -[[package]] -name = "bstr" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" -dependencies = [ - "lazy_static", - "memchr", - "regex-automata", - "serde", -] - [[package]] name = "buf_redux" version = "0.8.4" @@ -1356,13 +1344,12 @@ dependencies = [ [[package]] name = "csv" -version = "1.1.6" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" +checksum = "af91f40b7355f82b0a891f50e70399475945bb0b0da4f1700ce60761c9d3e359" dependencies = [ - "bstr", "csv-core", - "itoa 0.4.8", + "itoa", "ryu", "serde", ] @@ -1827,7 +1814,7 @@ dependencies = [ "enr", "fnv", "futures", - "hashlink", + "hashlink 0.7.0", "hex", "hkdf", "lazy_static", @@ -2558,9 +2545,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] @@ -3043,6 +3030,15 @@ dependencies = [ "hashbrown 0.11.2", ] +[[package]] +name = "hashlink" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" +dependencies = [ + "hashbrown 0.12.3", +] + [[package]] name = "headers" version = "0.3.8" @@ -3182,7 +3178,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa 1.0.5", + "itoa", ] [[package]] @@ -3299,7 +3295,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.5", + "itoa", "pin-project-lite 0.2.9", "socket2", "tokio", @@ -3590,12 +3586,6 @@ dependencies = [ "either", ] -[[package]] -name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" - [[package]] name = "itoa" version = "1.0.5" @@ -4302,9 +4292,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.22.2" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290b64917f8b0cb885d9de0f9959fe1f775d7fa12f1da2db9001c1c8ab60f89d" +checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" dependencies = [ "cc", "pkg-config", @@ -4731,14 +4721,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -5223,9 +5213,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "oneshot_broadcast" @@ -5845,7 +5835,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83cd1b99916654a69008fd66b4f9397fbe08e6e51dfe23d4417acf5d3b8cb87c" dependencies = [ "dtoa", - "itoa 1.0.5", + "itoa", "parking_lot 0.12.1", "prometheus-client-derive-text-encode", ] @@ -6047,9 +6037,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.18.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d24607049214c5e42d3df53ac1d8a23c34cc6a5eefe3122acb2c72174719959" +checksum = "b4f5d0337e99cd5cacd91ffc326c6cc9d8078def459df560c4f9bf9ba4a51034" dependencies = [ "r2d2", "rusqlite", @@ -6408,16 +6398,15 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.25.4" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c4b1eaf239b47034fb450ee9cdedd7d0226571689d8823030c4b6c2cb407152" +checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" dependencies = [ "bitflags", "fallible-iterator", "fallible-streaming-iterator", - "hashlink", + "hashlink 0.8.1", "libsqlite3-sys", - "memchr", "smallvec", ] @@ -6818,7 +6807,7 @@ version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" dependencies = [ - "itoa 1.0.5", + "itoa", "ryu", "serde", ] @@ -6841,7 +6830,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.5", + "itoa", "ryu", "serde", ] @@ -7700,7 +7689,7 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" dependencies = [ - "itoa 1.0.5", + "itoa", "libc", "num_threads", "serde", diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 6ae185f7fff..7fd730a5143 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -37,7 +37,7 @@ cached_tree_hash = { path = "../cached_tree_hash" } serde_yaml = "0.8.13" tempfile = "3.1.0" derivative = "2.1.1" -rusqlite = { version = "0.25.3", features = ["bundled"], optional = true } +rusqlite = { version = "0.28.0", features = ["bundled"], optional = true } arbitrary = { version = "1.0", features = ["derive"], optional = true } eth2_serde_utils = "0.1.1" regex = "1.5.5" diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 55e7f3f7155..631e54dc4eb 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -12,9 +12,9 @@ path = "tests/main.rs" [dependencies] tempfile = "3.1.0" types = { path = "../../consensus/types" } -rusqlite = { version = "0.25.3", features = ["bundled"] } +rusqlite = { version = "0.28.0", features = ["bundled"] } r2d2 = "0.8.9" -r2d2_sqlite = "0.18.0" +r2d2_sqlite = "0.21.0" serde = "1.0.116" serde_derive = "1.0.116" serde_json = "1.0.58" diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index bd5f97f4d81..c8be851472e 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -162,8 +162,8 @@ impl SlashingDatabase { /// The exclusive locking mode also has the benefit of applying to other processes, so multiple /// Lighthouse processes trying to access the same database will also be blocked. fn apply_pragmas(conn: &mut rusqlite::Connection) -> Result<(), rusqlite::Error> { - conn.pragma_update(None, "foreign_keys", &true)?; - conn.pragma_update(None, "locking_mode", &"EXCLUSIVE")?; + conn.pragma_update(None, "foreign_keys", true)?; + conn.pragma_update(None, "locking_mode", "EXCLUSIVE")?; Ok(()) } From 461bda6e8569fbd11e8386b58ea9849b98748cc2 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 16 Feb 2023 16:54:05 +1100 Subject: [PATCH 170/263] Execution engine suggestions from code review Co-authored-by: Paul Hauner --- .../execution_layer/src/engine_api/http.rs | 18 ++++++++---------- beacon_node/execution_layer/src/engines.rs | 5 ++--- 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 3871ca27afd..8f88de79a2b 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -570,8 +570,8 @@ impl CapabilitiesCacheEntry { } } - pub fn engine_capabilities(&self) -> &EngineCapabilities { - &self.engine_capabilities + pub fn engine_capabilities(&self) -> EngineCapabilities { + self.engine_capabilities } pub fn age(&self) -> Duration { @@ -817,7 +817,9 @@ impl HttpJsonRpc { Ok(GetPayloadResponse::Merge(GetPayloadResponseMerge { execution_payload: payload_v1.into(), - // Have to guess zero here as we don't know the value + // Set the V1 payload values from the EE to be zero. This simulates + // the pre-block-value functionality of always choosing the builder + // block. block_value: Uint256::zero(), })) } @@ -984,16 +986,12 @@ impl HttpJsonRpc { ) -> Result { let mut lock = self.engine_capabilities_cache.lock().await; - if lock - .as_ref() - .map_or(true, |entry| entry.older_than(age_limit)) - { + if let Some(lock) = lock.as_ref().filter(|entry| !entry.older_than(age_limit)) { + Ok(lock.engine_capabilities()) + } else { let engine_capabilities = self.exchange_capabilities().await?; *lock = Some(CapabilitiesCacheEntry::new(engine_capabilities)); Ok(engine_capabilities) - } else { - // here entry is guaranteed to exist so unwrap() is safe - Ok(*lock.as_ref().unwrap().engine_capabilities()) } } diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index fe4058af009..1ee355e4777 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -17,8 +17,7 @@ use types::ExecutionBlockHash; /// The number of payload IDs that will be stored for each `Engine`. /// -/// Since the size of each value is small (~100 bytes) a large number is used for safety. -/// FIXME: check this assumption now that the key includes entire payload attributes which now includes withdrawals +/// Since the size of each value is small (~800 bytes) a large number is used for safety. const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512; const CACHED_ENGINE_CAPABILITIES_AGE_LIMIT: Duration = Duration::from_secs(900); // 15 minutes @@ -276,7 +275,7 @@ impl Engine { let mut state = self.state.write().await; state.update(EngineStateInternal::AuthFailed); - (**state, CapabilitiesCacheAction::None) + (**state, CapabilitiesCacheAction::Clear) } Err(e) => { error!( From ffeb8b6e05d4023e122138c10fbef6048c801025 Mon Sep 17 00:00:00 2001 From: Divma Date: Thu, 16 Feb 2023 23:34:30 +0000 Subject: [PATCH 171/263] blacklist tests in windows (#3961) ## Issue Addressed Windows tests for subscription and unsubscriptions fail in CI sporadically. We usually ignore this failures, so this PR aims to help reduce the failure noise. Associated issue is https://github.com/sigp/lighthouse/issues/3960 --- beacon_node/network/src/subnet_service/tests/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 9e1c9f51bcc..a407fe1bcf8 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -182,6 +182,7 @@ mod attestation_service { #[cfg(feature = "deterministic_long_lived_attnets")] use std::collections::HashSet; + #[cfg(not(windows))] use crate::subnet_service::attestation_subnets::MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD; use super::*; @@ -290,6 +291,7 @@ mod attestation_service { } /// Test to verify that we are not unsubscribing to a subnet before a required subscription. + #[cfg(not(windows))] #[tokio::test] async fn test_same_subnet_unsubscription() { // subscription config @@ -513,6 +515,7 @@ mod attestation_service { assert_eq!(unexpected_msg_count, 0); } + #[cfg(not(windows))] #[tokio::test] async fn test_subscribe_same_subnet_several_slots_apart() { // subscription config From 245e922c7b4c48a3573859f06439c41ae52a579b Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 16 Feb 2023 23:34:32 +0000 Subject: [PATCH 172/263] Improve testing slot clock to allow manipulation of time in tests (#3974) ## Issue Addressed I discovered this issue while implementing [this test](https://github.com/jimmygchen/lighthouse/blob/test-example/beacon_node/network/src/beacon_processor/tests.rs#L895), where I tried to manipulate the slot clock with: `rig.chain.slot_clock.set_current_time(duration);` however the change doesn't get reflected in the `slot_clock` in `ReprocessQueue`, and I realised `slot_clock` was cloned a few times in the code, and therefore changing the time in `rig.chain.slot_clock` doesn't have any effect in `ReprocessQueue`. I've incorporated the suggestion from the @paulhauner and @michaelsproul - wrapping the `ManualSlotClock.current_time` (`RwLock)` in an `Arc`, and the above test now passes. Let's see if this breaks any existing tests :) --- common/slot_clock/src/manual_slot_clock.rs | 7 +++-- lighthouse/tests/beacon_node.rs | 34 ++++++++++++---------- 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/common/slot_clock/src/manual_slot_clock.rs b/common/slot_clock/src/manual_slot_clock.rs index 296247fe93b..61299f74ac4 100644 --- a/common/slot_clock/src/manual_slot_clock.rs +++ b/common/slot_clock/src/manual_slot_clock.rs @@ -1,6 +1,7 @@ use super::SlotClock; use parking_lot::RwLock; use std::convert::TryInto; +use std::sync::Arc; use std::time::Duration; use types::Slot; @@ -10,7 +11,7 @@ pub struct ManualSlotClock { /// Duration from UNIX epoch to genesis. genesis_duration: Duration, /// Duration from UNIX epoch to right now. - current_time: RwLock, + current_time: Arc>, /// The length of each slot. slot_duration: Duration, } @@ -20,7 +21,7 @@ impl Clone for ManualSlotClock { ManualSlotClock { genesis_slot: self.genesis_slot, genesis_duration: self.genesis_duration, - current_time: RwLock::new(*self.current_time.read()), + current_time: Arc::clone(&self.current_time), slot_duration: self.slot_duration, } } @@ -90,7 +91,7 @@ impl SlotClock for ManualSlotClock { Self { genesis_slot, - current_time: RwLock::new(genesis_duration), + current_time: Arc::new(RwLock::new(genesis_duration)), genesis_duration, slot_duration, } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 053a04f879a..a07502c58ab 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1418,7 +1418,7 @@ fn slasher_slot_offset_flag() { CommandLineTest::new() .flag("slasher", None) .flag("slasher-slot-offset", Some("11.25")) - .run() + .run_with_zero_port() .with_config(|config| { let slasher_config = config.slasher.as_ref().unwrap(); assert_eq!(slasher_config.slot_offset, 11.25); @@ -1430,7 +1430,7 @@ fn slasher_slot_offset_nan_flag() { CommandLineTest::new() .flag("slasher", None) .flag("slasher-slot-offset", Some("NaN")) - .run(); + .run_with_zero_port(); } #[test] fn slasher_history_length_flag() { @@ -1465,7 +1465,7 @@ fn slasher_attestation_cache_size_flag() { CommandLineTest::new() .flag("slasher", None) .flag("slasher-att-cache-size", Some("10000")) - .run() + .run_with_zero_port() .with_config(|config| { let slasher_config = config .slasher @@ -1569,23 +1569,25 @@ fn ensure_panic_on_failed_launch() { #[test] fn enable_proposer_re_orgs_default() { - CommandLineTest::new().run().with_config(|config| { - assert_eq!( - config.chain.re_org_threshold, - Some(DEFAULT_RE_ORG_THRESHOLD) - ); - assert_eq!( - config.chain.re_org_max_epochs_since_finalization, - DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, - ); - }); + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.re_org_threshold, + Some(DEFAULT_RE_ORG_THRESHOLD) + ); + assert_eq!( + config.chain.re_org_max_epochs_since_finalization, + DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, + ); + }); } #[test] fn disable_proposer_re_orgs() { CommandLineTest::new() .flag("disable-proposer-reorgs", None) - .run() + .run_with_zero_port() .with_config(|config| assert_eq!(config.chain.re_org_threshold, None)); } @@ -1593,7 +1595,7 @@ fn disable_proposer_re_orgs() { fn proposer_re_org_threshold() { CommandLineTest::new() .flag("proposer-reorg-threshold", Some("90")) - .run() + .run_with_zero_port() .with_config(|config| assert_eq!(config.chain.re_org_threshold.unwrap().0, 90)); } @@ -1601,7 +1603,7 @@ fn proposer_re_org_threshold() { fn proposer_re_org_max_epochs_since_finalization() { CommandLineTest::new() .flag("proposer-reorg-epochs-since-finalization", Some("8")) - .run() + .run_with_zero_port() .with_config(|config| { assert_eq!( config.chain.re_org_max_epochs_since_finalization.as_u64(), From ebf2fec5d025273ff6f334d9effc7bcfc767278f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 16 Feb 2023 23:34:33 +0000 Subject: [PATCH 173/263] Fix exec integration tests for Geth v1.11.0 (#3982) ## Proposed Changes * Bump Go from 1.17 to 1.20. The latest Geth release v1.11.0 requires 1.18 minimum. * Prevent a cache miss during payload building by using the right fee recipient. This prevents Geth v1.11.0 from building a block with 0 transactions. The payload building mechanism is overhauled in the new Geth to improve the payload every 2s, and the tests were failing because we were falling back on a `getPayload` call with no lookahead due to `get_payload_id` cache miss caused by the mismatched fee recipient. Alternatively we could hack the tests to send `proposer_preparation_data`, but I think the static fee recipient is simpler for now. * Add support for optionally enabling Lighthouse logs in the integration tests. Enable using `cargo run --release --features logging/test_logger`. This was very useful for debugging. --- .github/workflows/test-suite.yml | 2 +- Cargo.lock | 1 + testing/execution_engine_integration/Cargo.toml | 1 + testing/execution_engine_integration/src/test_rig.rs | 12 ++++++++---- 4 files changed, 11 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 57fee718300..5ecd5efe36b 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -280,7 +280,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.17' + go-version: '1.20' - uses: actions/setup-dotnet@v3 with: dotnet-version: '6.0.201' diff --git a/Cargo.lock b/Cargo.lock index 37c6fe667c1..5d5d3215713 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2463,6 +2463,7 @@ dependencies = [ "fork_choice", "futures", "hex", + "logging", "reqwest", "sensitive_url", "serde_json", diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 26b5f596f22..de3085d2227 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -21,3 +21,4 @@ deposit_contract = { path = "../../common/deposit_contract" } reqwest = { version = "0.11.0", features = ["json"] } hex = "0.4.2" fork_choice = { path = "../../consensus/fork_choice" } +logging = { path = "../../common/logging" } diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 5455b48bce8..ee20129f870 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -100,7 +100,7 @@ async fn import_and_unlock(http_url: SensitiveUrl, priv_keys: &[&str], password: impl TestRig { pub fn new(generic_engine: E) -> Self { - let log = environment::null_logger().unwrap(); + let log = logging::test_logger(); let runtime = Arc::new( tokio::runtime::Builder::new_multi_thread() .enable_all() @@ -281,7 +281,9 @@ impl TestRig { PayloadAttributes { timestamp, prev_randao, - suggested_fee_recipient: Address::zero(), + // To save sending proposer preparation data, just set the fee recipient + // to the fee recipient configured for EE A. + suggested_fee_recipient: Address::repeat_byte(42), }, ) .await; @@ -330,6 +332,7 @@ impl TestRig { .await .unwrap() .execution_payload; + assert_eq!(valid_payload.transactions.len(), pending_txs.len()); /* * Execution Engine A: @@ -394,7 +397,6 @@ impl TestRig { .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); - assert_eq!(valid_payload.transactions.len(), pending_txs.len()); // Verify that all submitted txs were successful for pending_tx in pending_txs { @@ -479,7 +481,9 @@ impl TestRig { let payload_attributes = PayloadAttributes { timestamp: second_payload.timestamp + 1, prev_randao: Hash256::zero(), - suggested_fee_recipient: Address::zero(), + // To save sending proposer preparation data, just set the fee recipient + // to the fee recipient configured for EE A. + suggested_fee_recipient: Address::repeat_byte(42), }; let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(100); From 4aa8a2ab128d63cf124bf5d32c1bbfe0e7086add Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 17 Feb 2023 11:58:33 +1100 Subject: [PATCH 174/263] Suggestions for Capella `execution_layer` (#3983) * Restrict Engine::request to FnOnce * Use `Into::into` * Impl IntoIterator for VariableList * Use Instant rather than SystemTime --- .../execution_layer/src/engine_api/http.rs | 16 ++++------------ .../src/engine_api/json_structures.rs | 4 ---- beacon_node/execution_layer/src/engines.rs | 2 +- beacon_node/execution_layer/src/lib.rs | 11 +++-------- consensus/ssz_types/src/variable_list.rs | 9 +++++++++ 5 files changed, 17 insertions(+), 25 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 8f88de79a2b..4416d6a37e7 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -10,7 +10,7 @@ use serde_json::json; use std::collections::HashSet; use tokio::sync::Mutex; -use std::time::{Duration, SystemTime}; +use std::time::{Duration, Instant}; use types::EthSpec; pub use deposit_log::{DepositLog, Log}; @@ -559,14 +559,14 @@ pub mod deposit_methods { #[derive(Clone, Debug)] pub struct CapabilitiesCacheEntry { engine_capabilities: EngineCapabilities, - fetch_time: SystemTime, + fetch_time: Instant, } impl CapabilitiesCacheEntry { pub fn new(engine_capabilities: EngineCapabilities) -> Self { Self { engine_capabilities, - fetch_time: SystemTime::now(), + fetch_time: Instant::now(), } } @@ -575,15 +575,7 @@ impl CapabilitiesCacheEntry { } pub fn age(&self) -> Duration { - // duration_since() may fail because measurements taken earlier - // are not guaranteed to always be before later measurements - // due to anomalies such as the system clock being adjusted - // either forwards or backwards - // - // In such cases, we'll just say the age is zero - SystemTime::now() - .duration_since(self.fetch_time) - .unwrap_or(Duration::ZERO) + Instant::now().duration_since(self.fetch_time) } /// returns `true` if the entry's age is >= age_limit diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index ace15ebd847..a6ebc195275 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -145,7 +145,6 @@ impl From> for JsonExecutionPayloadV2 withdrawals: payload .withdrawals .into_iter() - .cloned() .map(Into::into) .collect::>() .into(), @@ -173,7 +172,6 @@ impl From> for JsonExecutionPayloadV3 withdrawals: payload .withdrawals .into_iter() - .cloned() .map(Into::into) .collect::>() .into(), @@ -231,7 +229,6 @@ impl From> for ExecutionPayloadCapella withdrawals: payload .withdrawals .into_iter() - .cloned() .map(Into::into) .collect::>() .into(), @@ -259,7 +256,6 @@ impl From> for ExecutionPayloadEip4844 withdrawals: payload .withdrawals .into_iter() - .cloned() .map(Into::into) .collect::>() .into(), diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 1ee355e4777..ce413cb1139 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -341,7 +341,7 @@ impl Engine { /// deadlock. pub async fn request<'a, F, G, H>(self: &'a Arc, func: F) -> Result where - F: Fn(&'a Engine) -> G, + F: FnOnce(&'a Engine) -> G, G: Future>, { match func(self).await { diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 6798d49bce1..af5e4915569 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1348,16 +1348,11 @@ impl ExecutionLayer { .set_latest_forkchoice_state(forkchoice_state) .await; - let payload_attributes_ref = &payload_attributes; let result = self .engine() .request(|engine| async move { engine - .notify_forkchoice_updated( - forkchoice_state, - payload_attributes_ref.clone(), - self.log(), - ) + .notify_forkchoice_updated(forkchoice_state, payload_attributes, self.log()) .await }) .await; @@ -1723,7 +1718,7 @@ impl ExecutionLayer { capella_block .withdrawals .into_iter() - .map(|w| w.into()) + .map(Into::into) .collect(), ) .map_err(ApiError::DeserializeWithdrawals)?; @@ -1750,7 +1745,7 @@ impl ExecutionLayer { eip4844_block .withdrawals .into_iter() - .map(|w| w.into()) + .map(Into::into) .collect(), ) .map_err(ApiError::DeserializeWithdrawals)?; diff --git a/consensus/ssz_types/src/variable_list.rs b/consensus/ssz_types/src/variable_list.rs index ef1f113bbde..3361f750908 100644 --- a/consensus/ssz_types/src/variable_list.rs +++ b/consensus/ssz_types/src/variable_list.rs @@ -176,6 +176,15 @@ impl<'a, T, N: Unsigned> IntoIterator for &'a VariableList { } } +impl IntoIterator for VariableList { + type Item = T; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.vec.into_iter() + } +} + impl tree_hash::TreeHash for VariableList where T: tree_hash::TreeHash, From 9a41f65b892e4c679d353ddc00e87bd93b521026 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 17 Feb 2023 16:25:20 +1100 Subject: [PATCH 175/263] Add capella fork epoch (#3997) --- .../built_in_network_configs/sepolia/config.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 7b2d9c13325..4ba006ec945 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -28,6 +28,10 @@ TERMINAL_TOTAL_DIFFICULTY: 17000000000000000 TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 +# Capella +CAPELLA_FORK_VERSION: 0x90000072 +CAPELLA_FORK_EPOCH: 56832 + # Eip4844 EIP4844_FORK_VERSION: 0x03001020 EIP4844_FORK_EPOCH: 18446744073709551615 From 0b6850221edca109318b227bacb5b2ebb0d89ad0 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 20 Feb 2023 17:50:42 +1100 Subject: [PATCH 176/263] Fix Capella schema downgrades (#4004) --- .../src/schema_change/migration_schema_v14.rs | 60 +++++++++++-- .../src/schema_change/migration_schema_v15.rs | 6 +- beacon_node/beacon_chain/tests/store_tests.rs | 88 +++++++++++++++++++ beacon_node/operation_pool/src/persistence.rs | 14 +++ beacon_node/store/src/errors.rs | 3 +- beacon_node/store/src/hot_cold_store.rs | 5 ++ book/src/database-migrations.md | 6 ++ 7 files changed, 171 insertions(+), 11 deletions(-) diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs index 02422a403b5..be913d8cc5f 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs @@ -2,9 +2,41 @@ use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; use operation_pool::{ PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14, }; -use slog::{debug, info, Logger}; +use slog::{debug, error, info, Logger}; +use slot_clock::SlotClock; use std::sync::Arc; +use std::time::Duration; use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; +use types::{EthSpec, Hash256, Slot}; + +/// The slot clock isn't usually available before the database is initialized, so we construct a +/// temporary slot clock by reading the genesis state. It should always exist if the database is +/// initialized at a prior schema version, however we still handle the lack of genesis state +/// gracefully. +fn get_slot_clock( + db: &HotColdDB, + log: &Logger, +) -> Result, Error> { + let spec = db.get_chain_spec(); + let genesis_block = if let Some(block) = db.get_blinded_block(&Hash256::zero())? { + block + } else { + error!(log, "Missing genesis block"); + return Ok(None); + }; + let genesis_state = + if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? { + state + } else { + error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root()); + return Ok(None); + }; + Ok(Some(T::SlotClock::new( + spec.genesis_slot, + Duration::from_secs(genesis_state.genesis_time()), + Duration::from_secs(spec.seconds_per_slot), + ))) +} pub fn upgrade_to_v14( db: Arc>, @@ -41,17 +73,35 @@ pub fn downgrade_from_v14( db: Arc>, log: Logger, ) -> Result, Error> { + // We cannot downgrade from V14 once the Capella fork has been reached because there will + // be HistoricalSummaries stored in the database instead of HistoricalRoots and prior versions + // of Lighthouse can't handle that. + if let Some(capella_fork_epoch) = db.get_chain_spec().capella_fork_epoch { + let current_epoch = get_slot_clock::(&db, &log)? + .and_then(|clock| clock.now()) + .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) + .ok_or(Error::SlotClockUnavailableForMigration)?; + + if current_epoch >= capella_fork_epoch { + error!( + log, + "Capella already active: v14+ is mandatory"; + "current_epoch" => current_epoch, + "capella_fork_epoch" => capella_fork_epoch, + ); + return Err(Error::UnableToDowngrade); + } + } + // Load a V14 op pool and transform it to V12. - let PersistedOperationPoolV14 { + let PersistedOperationPoolV14:: { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, bls_to_execution_changes, - } = if let Some(PersistedOperationPool::::V14(op_pool)) = - db.get_item(&OP_POOL_DB_KEY)? - { + } = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? { op_pool } else { debug!(log, "Nothing to do, no operation pool stored"); diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs index f4adc2cf4db..07c86bd931f 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs @@ -43,7 +43,7 @@ pub fn downgrade_from_v15( log: Logger, ) -> Result, Error> { // Load a V15 op pool and transform it to V14. - let PersistedOperationPoolV15 { + let PersistedOperationPoolV15:: { attestations, sync_contributions, attester_slashings, @@ -51,9 +51,7 @@ pub fn downgrade_from_v15( voluntary_exits, bls_to_execution_changes, capella_bls_change_broadcast_indices, - } = if let Some(PersistedOperationPool::::V15(op_pool)) = - db.get_item(&OP_POOL_DB_KEY)? - { + } = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? { op_pool } else { debug!(log, "Nothing to do, no operation pool stored"); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 622ea7aecd1..2f40443b996 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2,6 +2,7 @@ use beacon_chain::attestation_verification::Error as AttnError; use beacon_chain::builder::BeaconChainBuilder; +use beacon_chain::schema_change::migrate_schema; use beacon_chain::test_utils::{ test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, }; @@ -22,6 +23,7 @@ use std::collections::HashSet; use std::convert::TryInto; use std::sync::Arc; use std::time::Duration; +use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}; use store::{ iter::{BlockRootsIterator, StateRootsIterator}, HotColdDB, LevelDB, StoreConfig, @@ -68,6 +70,7 @@ fn get_harness( let harness = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) + .logger(store.logger().clone()) .fresh_disk_store(store) .mock_execution_layer() .build(); @@ -2529,6 +2532,91 @@ async fn revert_minority_fork_on_resume() { assert_eq!(heads.len(), 1); } +// This test checks whether the schema downgrade from the latest version to some minimum supported +// version is correct. This is the easiest schema test to write without historic versions of +// Lighthouse on-hand, but has the disadvantage that the min version needs to be adjusted manually +// as old downgrades are deprecated. +#[tokio::test] +async fn schema_downgrade_to_min_version() { + let num_blocks_produced = E::slots_per_epoch() * 4; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let spec = &harness.chain.spec.clone(); + + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let min_version = if harness.spec.capella_fork_epoch.is_some() { + // Can't downgrade beyond V14 once Capella is reached, for simplicity don't test that + // at all if Capella is enabled. + SchemaVersion(14) + } else { + SchemaVersion(11) + }; + + // Close the database to ensure everything is written to disk. + drop(store); + drop(harness); + + // Re-open the store. + let store = get_store(&db_path); + + // Downgrade. + let deposit_contract_deploy_block = 0; + migrate_schema::>( + store.clone(), + deposit_contract_deploy_block, + CURRENT_SCHEMA_VERSION, + min_version, + store.logger().clone(), + spec, + ) + .expect("schema downgrade to minimum version should work"); + + // Upgrade back. + migrate_schema::>( + store.clone(), + deposit_contract_deploy_block, + min_version, + CURRENT_SCHEMA_VERSION, + store.logger().clone(), + spec, + ) + .expect("schema upgrade from minimum version should work"); + + // Rescreate the harness. + let harness = BeaconChainHarness::builder(MinimalEthSpec) + .default_spec() + .keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec()) + .logger(store.logger().clone()) + .resumed_disk_store(store.clone()) + .mock_execution_layer() + .build(); + + check_finalization(&harness, num_blocks_produced); + check_split_slot(&harness, store.clone()); + check_chain_dump(&harness, num_blocks_produced + 1); + check_iterators(&harness); + + // Check that downgrading beyond the minimum version fails (bound is *tight*). + let min_version_sub_1 = SchemaVersion(min_version.as_u64().checked_sub(1).unwrap()); + migrate_schema::>( + store.clone(), + deposit_contract_deploy_block, + CURRENT_SCHEMA_VERSION, + min_version_sub_1, + harness.logger().clone(), + spec, + ) + .expect_err("should not downgrade below minimum version"); +} + /// Checks that two chains are the same, for the purpose of these tests. /// /// Several fields that are hard/impossible to check are ignored (e.g., the store). diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 65354e01ac9..35d2b4ce7ee 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -242,6 +242,20 @@ impl StoreItem for PersistedOperationPoolV14 { } } +impl StoreItem for PersistedOperationPoolV15 { + fn db_column() -> DBColumn { + DBColumn::OpPool + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + PersistedOperationPoolV15::from_ssz_bytes(bytes).map_err(Into::into) + } +} + /// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`. impl StoreItem for PersistedOperationPool { fn db_column() -> DBColumn { diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index 3e1c6d012d4..fcc40706b30 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -42,9 +42,8 @@ pub enum Error { }, BlockReplayError(BlockReplayError), AddPayloadLogicError, - ResyncRequiredForExecutionPayloadSeparation, SlotClockUnavailableForMigration, - V9MigrationFailure(Hash256), + UnableToDowngrade, InconsistentFork(InconsistentFork), } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 83315808256..965bbb3bd48 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1176,6 +1176,11 @@ impl, Cold: ItemStore> HotColdDB &self.spec } + /// Get a reference to the `Logger` used by the database. + pub fn logger(&self) -> &Logger { + &self.log + } + /// Fetch a copy of the current split slot from memory. pub fn get_split_slot(&self) -> Slot { self.split.read_recursive().slot diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index 0982e10ab90..7219a0f6b69 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -26,10 +26,16 @@ validator client or the slasher**. | v3.1.0 | Sep 2022 | v12 | yes | | v3.2.0 | Oct 2022 | v12 | yes | | v3.3.0 | Nov 2022 | v13 | yes | +| v3.4.0 | Jan 2023 | v13 | yes | +| v3.5.0 | Feb 2023 | v15 | yes before Capella | > **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release > (e.g. v2.3.0). +> **Note**: Support for old schemas is gradually removed from newer versions of Lighthouse. We +usually do this after a major version has been out for a while and everyone has upgraded. In this +case the above table will continue to record the deprecated schema changes for reference. + ## How to apply a database downgrade To apply a downgrade you need to use the `lighthouse db migrate` command with the correct parameters. From c3c181aa036786573f7cdd08fcec04c3bf492be5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 21 Feb 2023 11:01:22 +1100 Subject: [PATCH 177/263] Remove "eip4844" network (#4008) --- common/eth2_config/src/lib.rs | 3 +- .../eip4844/boot_enr.yaml | 3 - .../eip4844/config.yaml | 85 ------------------ .../eip4844/deploy_block.txt | 1 - .../eip4844/genesis.ssz.zip | Bin 3546 -> 0 bytes 5 files changed, 1 insertion(+), 91 deletions(-) delete mode 100644 common/eth2_network_config/built_in_network_configs/eip4844/boot_enr.yaml delete mode 100644 common/eth2_network_config/built_in_network_configs/eip4844/config.yaml delete mode 100644 common/eth2_network_config/built_in_network_configs/eip4844/deploy_block.txt delete mode 100644 common/eth2_network_config/built_in_network_configs/eip4844/genesis.ssz.zip diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index 45fc709cba6..7e3c025a83b 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -307,6 +307,5 @@ define_hardcoded_nets!( // Set to `true` if the genesis state can be found in the `built_in_network_configs` // directory. GENESIS_STATE_IS_KNOWN - ), - (eip4844, "eip4844", GENESIS_STATE_IS_KNOWN) + ) ); diff --git a/common/eth2_network_config/built_in_network_configs/eip4844/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/eip4844/boot_enr.yaml deleted file mode 100644 index 4d52cc59752..00000000000 --- a/common/eth2_network_config/built_in_network_configs/eip4844/boot_enr.yaml +++ /dev/null @@ -1,3 +0,0 @@ -- enr:-MK4QLij8YaVQ6fIi09rDuD9fufxBlCZRXwfM1q6SbNJfy5ZZdAvtlnsfqhIeI0IqeOZdaPExVCfZfR4JJTIuKXFR76GAYJGrqHnh2F0dG5ldHOIAAAAAAAAAACEZXRoMpBCynldgwAP_QMAAAAAAAAAgmlkgnY0gmlwhCJ7uEyJc2VjcDI1NmsxoQJpeftU6RbmIhcFllICznlAMJXL3EwHEGhn73_Gk0wrCYhzeW5jbmV0cwCDdGNwgjLIg3VkcIIu4A -- enr:-JG4QK27MZvV3QbwdLt055Yhei27SjAsDXMFGCdl-Q7SDiCgR_qbiW3BmcOClehFVJgMa6IfjHeJBdbC0jvrr2NycOqGAYJLWb5kgmlkgnY0gmlwhCJE_eeJc2VjcDI1NmsxoQIecO7Y9C7J2Bs7RNxXaUkU6BfmPKIhEsDScKAoxENaRYN0Y3CCdl-DdWRwgnZf -- enr:-JG4QExcHW3vzBcE0f_r-93nSA4iBy4qNLthSyTw7p0tlPwjMl1JVTAgLSNHLLZJzOGtelJO4sw37LliuHyJ55zN5J6GAYJLWTvzgmlkgnY0gmlwhCKq1cmJc2VjcDI1NmsxoQJT2d4jtKQbHNw3tZPLhoMlR73o5LNdi-bk_bYq6siwuIN0Y3CCdl-DdWRwgnZf \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/eip4844/config.yaml b/common/eth2_network_config/built_in_network_configs/eip4844/config.yaml deleted file mode 100644 index d6e6aef57a5..00000000000 --- a/common/eth2_network_config/built_in_network_configs/eip4844/config.yaml +++ /dev/null @@ -1,85 +0,0 @@ -# Prater config - -# Extends the mainnet preset -CONFIG_NAME: 'eip4844' -PRESET_BASE: 'mainnet' - -# Transition -# --------------------------------------------------------------- -TERMINAL_TOTAL_DIFFICULTY: 40 -# By default, don't use these params -TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 -TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 - -# Genesis -# --------------------------------------------------------------- -# `2**14` (= 16,384) -MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 2 -# Mar-01-2021 08:53:32 AM +UTC -MIN_GENESIS_TIME: 1653318000 -# Prater area code (Vienna) -GENESIS_FORK_VERSION: 0x00000ffd -# Customized for Prater: 1919188 seconds (Mar-23-2021 02:00:00 PM +UTC) -GENESIS_DELAY: 0 - - -# Forking -# --------------------------------------------------------------- -# Some forks are disabled for now: -# - These may be re-assigned to another fork-version later -# - Temporarily set to max uint64 value: 2**64 - 1 - -# Altair -ALTAIR_FORK_VERSION: 0x01000ffd -ALTAIR_FORK_EPOCH: 1 -# Merge -BELLATRIX_FORK_VERSION: 0x02000ffd -BELLATRIX_FORK_EPOCH: 2 -# Sharding -EIP4844_FORK_VERSION: 0x83000ffd -EIP4844_FORK_EPOCH: 3 - -# TBD, 2**32 is a placeholder. Merge transition approach is in active R&D. -TRANSITION_TOTAL_DIFFICULTY: 40 - - -# Time parameters -# --------------------------------------------------------------- -# 12 seconds -SECONDS_PER_SLOT: 12 -# 14 (estimate from Eth1 mainnet) -SECONDS_PER_ETH1_BLOCK: 14 -# 2**8 (= 256) epochs ~27 hours -MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 -# 2**8 (= 256) epochs ~27 hours -SHARD_COMMITTEE_PERIOD: 256 -# 2**11 (= 2,048) Eth1 blocks ~8 hours -ETH1_FOLLOW_DISTANCE: 15 - - -# Validator cycle -# --------------------------------------------------------------- -# 2**2 (= 4) -INACTIVITY_SCORE_BIAS: 4 -# 2**4 (= 16) -INACTIVITY_SCORE_RECOVERY_RATE: 16 -# 2**4 * 10**9 (= 16,000,000,000) Gwei -EJECTION_BALANCE: 16000000000 -# 2**2 (= 4) -MIN_PER_EPOCH_CHURN_LIMIT: 4 -# 2**16 (= 65,536) -CHURN_LIMIT_QUOTIENT: 65536 - - -# Fork choice -# --------------------------------------------------------------- -# 40% -PROPOSER_SCORE_BOOST: 40 - -# Deposit contract -# --------------------------------------------------------------- -# Ethereum Goerli testnet -DEPOSIT_CHAIN_ID: 1331 -DEPOSIT_NETWORK_ID: 69 -# Prater test deposit contract on Goerli Testnet -DEPOSIT_CONTRACT_ADDRESS: 0x8A04d14125D0FDCDc742F4A05C051De07232EDa4 diff --git a/common/eth2_network_config/built_in_network_configs/eip4844/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/eip4844/deploy_block.txt deleted file mode 100644 index 573541ac970..00000000000 --- a/common/eth2_network_config/built_in_network_configs/eip4844/deploy_block.txt +++ /dev/null @@ -1 +0,0 @@ -0 diff --git a/common/eth2_network_config/built_in_network_configs/eip4844/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/eip4844/genesis.ssz.zip deleted file mode 100644 index 88b405071058d5163f5757e775c7cea3dd4b1183..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3546 zcmWIWW@Zs#-~dAQe7jHvC=g@OWZ-5{U`S8ROD)bU)+;Wq3Ju|9U_Wndo?P2!1IDEl z+zgB?FPIq^z{K0LnHgd%5(hqpFTYu{*W2;tls(gqI~Y&j)RWnMZi-TB+}Y%~8C{DE zqusJ9rW_9nG{0@5)qQH=K0jY+WBx@3;m=N}1U(CyK0A7s+0kTO>j@nTY6TVR>?f_Cfb@`kC4Vby-KgCKNu{b}Y31 z+}y$v|KA&fcZtNr2+ZDK|Mt|S>-*%keNA|KXs4*O{oCD}cIDn&X?>Ly=G{?x#D%~= z`>?C8L{_}GQTq9b{vvs;nIh&puRW{$YGuDk+uCGJUGs}vQSI*U^Jc|P5w%dH) zs>1IpzVSZ`6En{d{`xNbdD&Ipcb{(lICXaEaj(LY?;cw!n$8Cl7HVi z#y9il>sLp3&xf$xi;t+PiMX+K`TN|Txj|)@JB@>miL*|#7mNDy;Yf>YxbyR8^^>F| z<8AaRkDfiBt!^eM>~%tNkGg<)QrX^m?$G~-uUT$9_(7EU8r$}m{*L!I7nQ%;b<6#n zdg{}qXS0O=e)}%J$Ke03g&oh&O}Rh+|DRh+uVy-XF)eI+d;QDH;2R4qLhS6)o?UV6 z{Of0~bwgZQYF~!?sXc<)*)nyXZ#^vJUK|zg@%!1A&OFUZ_p~& Date: Tue, 21 Feb 2023 11:05:36 +1100 Subject: [PATCH 178/263] Suggestions for Capella `beacon_chain` (#3999) * Remove CapellaReadiness::NotSynced Some EEs have a habit of flipping between synced/not-synced, which causes some spurious "Not read for the merge" messages back before the merge. For the merge, if the EE wasn't synced the CE simple wouldn't go through the transition (due to optimistic sync stuff). However, we don't have that hard requirement for Capella; the CE will go through the fork and just wait for the EE to catch up. I think that removing `NotSynced` here will avoid false-positives on the "Not ready logs..". We'll be creating other WARN/ERRO logs if the EE isn't synced, anyway. * Change some Capella readiness logging There's two changes here: 1. Shorten the log messages, for readability. 2. Change the hints. Connecting a Capella-ready LH to a non-Capella-ready EE gives this log: ``` WARN Not ready for Capella info: The execution endpoint does not appear to support the required engine api methods for Capella: Required Methods Unsupported: engine_getPayloadV2 engine_forkchoiceUpdatedV2 engine_newPayloadV2, service: slot_notifier ``` This variant of error doesn't get a "try updating" style hint, when it's the one that needs it. This is because we detect the method-not-found reponse from the EE and return default capabilities, rather than indicating that the request fails. I think it's fair to say that an EE upgrade is required whenever it doesn't provide the required methods. I changed the `ExchangeCapabilitiesFailed` message since that can only happen when the EE fails to respond with anything other than success or not-found. --- .../beacon_chain/src/capella_readiness.rs | 17 ++--------------- beacon_node/client/src/notifier.rs | 3 ++- 2 files changed, 4 insertions(+), 16 deletions(-) diff --git a/beacon_node/beacon_chain/src/capella_readiness.rs b/beacon_node/beacon_chain/src/capella_readiness.rs index b1563210585..bb729d89997 100644 --- a/beacon_node/beacon_chain/src/capella_readiness.rs +++ b/beacon_node/beacon_chain/src/capella_readiness.rs @@ -21,8 +21,6 @@ pub const ENGINE_CAPABILITIES_REFRESH_INTERVAL: u64 = 300; pub enum CapellaReadiness { /// The execution engine is capella-enabled (as far as we can tell) Ready, - /// The EL can be reached and has the correct configuration, however it's not yet synced. - NotSynced, /// We are connected to an execution engine which doesn't support the V2 engine api methods V2MethodsNotSupported { error: String }, /// The transition configuration with the EL failed, there might be a problem with @@ -44,11 +42,6 @@ impl fmt::Display for CapellaReadiness { execution endpoint: {}", error ), - CapellaReadiness::NotSynced => write!( - f, - "The execution endpoint is connected and configured, \ - however it is not yet synced" - ), CapellaReadiness::NoExecutionEndpoint => write!( f, "The --execution-endpoint flag is not specified, this is a \ @@ -56,8 +49,7 @@ impl fmt::Display for CapellaReadiness { ), CapellaReadiness::V2MethodsNotSupported { error } => write!( f, - "The execution endpoint does not appear to support \ - the required engine api methods for Capella: {}", + "Execution endpoint does not support Capella methods: {}", error ), } @@ -115,12 +107,7 @@ impl BeaconChain { } if all_good { - if !el.is_synced_for_notifier().await { - // The EL is not synced. - CapellaReadiness::NotSynced - } else { - CapellaReadiness::Ready - } + CapellaReadiness::Ready } else { CapellaReadiness::V2MethodsNotSupported { error: missing_methods, diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index c1d830bc089..fb8a9b6349b 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -466,13 +466,14 @@ async fn capella_readiness_logging( error!( log, "Not ready for Capella"; + "hint" => "the execution endpoint may be offline", "info" => %readiness, - "hint" => "try updating Lighthouse and/or the execution layer", ) } readiness => warn!( log, "Not ready for Capella"; + "hint" => "try updating the execution endpoint", "info" => %readiness, ), } From b72f273e47c04d14d0fdb32ca7f451312ca852c6 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 21 Feb 2023 15:33:27 +1100 Subject: [PATCH 179/263] Capella consensus review (#4012) * Add extra encoding/decoding tests * Remove TODO The method LGTM * Remove `FreeAttestation` This is an ancient relic, I'm surprised it still existed! * Add paranoid check for eip4844 code This is not technically necessary, but I think it's nice to be explicit about EIP4844 consensus code for the time being. * Reduce big-O complexity of address change pruning I'm not sure this is *actually* useful, but it might come in handy if we see a ton of address changes at the fork boundary. I know the devops team have been testing with ~100k changes, so maybe this will help in that case. * Revert "Reduce big-O complexity of address change pruning" This reverts commit e7d93e6cc7cf1b92dd5a9e1966ce47d4078121eb. --- .../src/per_block_processing.rs | 6 +- consensus/types/src/beacon_block.rs | 98 ++++++++++++++++++- consensus/types/src/beacon_state.rs | 1 - consensus/types/src/free_attestation.rs | 13 --- consensus/types/src/lib.rs | 2 - 5 files changed, 99 insertions(+), 21 deletions(-) delete mode 100644 consensus/types/src/free_attestation.rs diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index e12fb59565e..4f686200b01 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -180,7 +180,11 @@ pub fn per_block_processing>( )?; } - process_blob_kzg_commitments(block.body())?; + // Eip4844 specifications are not yet released so additional care is taken + // to ensure the code does not run in production. + if matches!(block, BeaconBlockRef::Eip4844(_)) { + process_blob_kzg_commitments(block.body())?; + } Ok(()) } diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index f960b21178f..60dd781a67f 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -752,19 +752,65 @@ mod tests { }); } + #[test] + fn roundtrip_capella_block() { + let rng = &mut XorShiftRng::from_seed([42; 16]); + let spec = &ForkName::Capella.make_genesis_spec(MainnetEthSpec::default_spec()); + + let inner_block = BeaconBlockCapella { + slot: Slot::random_for_test(rng), + proposer_index: u64::random_for_test(rng), + parent_root: Hash256::random_for_test(rng), + state_root: Hash256::random_for_test(rng), + body: BeaconBlockBodyCapella::random_for_test(rng), + }; + let block = BeaconBlock::Capella(inner_block.clone()); + + test_ssz_tree_hash_pair_with(&block, &inner_block, |bytes| { + BeaconBlock::from_ssz_bytes(bytes, spec) + }); + } + + #[test] + fn roundtrip_4844_block() { + let rng = &mut XorShiftRng::from_seed([42; 16]); + let spec = &ForkName::Eip4844.make_genesis_spec(MainnetEthSpec::default_spec()); + + let inner_block = BeaconBlockEip4844 { + slot: Slot::random_for_test(rng), + proposer_index: u64::random_for_test(rng), + parent_root: Hash256::random_for_test(rng), + state_root: Hash256::random_for_test(rng), + body: BeaconBlockBodyEip4844::random_for_test(rng), + }; + let block = BeaconBlock::Eip4844(inner_block.clone()); + + test_ssz_tree_hash_pair_with(&block, &inner_block, |bytes| { + BeaconBlock::from_ssz_bytes(bytes, spec) + }); + } + #[test] fn decode_base_and_altair() { type E = MainnetEthSpec; - let spec = E::default_spec(); + let mut spec = E::default_spec(); let rng = &mut XorShiftRng::from_seed([42; 16]); - let fork_epoch = spec.altair_fork_epoch.unwrap(); + let altair_fork_epoch = spec.altair_fork_epoch.unwrap(); - let base_epoch = fork_epoch.saturating_sub(1_u64); + let base_epoch = altair_fork_epoch.saturating_sub(1_u64); let base_slot = base_epoch.end_slot(E::slots_per_epoch()); - let altair_epoch = fork_epoch; + let altair_epoch = altair_fork_epoch; let altair_slot = altair_epoch.start_slot(E::slots_per_epoch()); + let capella_epoch = altair_fork_epoch + 1; + let capella_slot = capella_epoch.start_slot(E::slots_per_epoch()); + let eip4844_epoch = capella_epoch + 1; + let eip4844_slot = eip4844_epoch.start_slot(E::slots_per_epoch()); + + spec.altair_fork_epoch = Some(altair_epoch); + spec.capella_fork_epoch = Some(capella_epoch); + spec.eip4844_fork_epoch = Some(eip4844_epoch); // BeaconBlockBase { @@ -809,5 +855,49 @@ mod tests { BeaconBlock::from_ssz_bytes(&bad_altair_block.as_ssz_bytes(), &spec) .expect_err("bad altair block cannot be decoded"); } + + // BeaconBlockCapella + { + let good_block = BeaconBlock::Capella(BeaconBlockCapella { + slot: capella_slot, + ..<_>::random_for_test(rng) + }); + // It's invalid to have an Capella block with a epoch lower than the fork epoch. + let bad_block = { + let mut bad = good_block.clone(); + *bad.slot_mut() = altair_slot; + bad + }; + + assert_eq!( + BeaconBlock::from_ssz_bytes(&good_block.as_ssz_bytes(), &spec) + .expect("good capella block can be decoded"), + good_block + ); + BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec) + .expect_err("bad capella block cannot be decoded"); + } + + // BeaconBlockEip4844 + { + let good_block = BeaconBlock::Eip4844(BeaconBlockEip4844 { + slot: eip4844_slot, + ..<_>::random_for_test(rng) + }); + // It's invalid to have an Capella block with a epoch lower than the fork epoch. + let bad_block = { + let mut bad = good_block.clone(); + *bad.slot_mut() = capella_slot; + bad + }; + + assert_eq!( + BeaconBlock::from_ssz_bytes(&good_block.as_ssz_bytes(), &spec) + .expect("good eip4844 block can be decoded"), + good_block + ); + BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec) + .expect_err("bad eip4844 block cannot be decoded"); + } } } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index e70b8842758..c98df48d14e 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -710,7 +710,6 @@ impl BeaconState { .ok_or(Error::ShuffleIndexOutOfBounds(index)) } - // TODO: check this implementation /// Convenience accessor for the `execution_payload_header` as an `ExecutionPayloadHeaderRef`. pub fn latest_execution_payload_header(&self) -> Result, Error> { match self { diff --git a/consensus/types/src/free_attestation.rs b/consensus/types/src/free_attestation.rs deleted file mode 100644 index dd3782d3ce1..00000000000 --- a/consensus/types/src/free_attestation.rs +++ /dev/null @@ -1,13 +0,0 @@ -/// Note: this object does not actually exist in the spec. -/// -/// We use it for managing attestations that have not been aggregated. -use super::{AttestationData, Signature}; -use serde_derive::Serialize; - -#[derive(arbitrary::Arbitrary, Debug, Clone, PartialEq, Serialize)] -pub struct FreeAttestation { - pub data: AttestationData, - pub signature: Signature, - #[serde(with = "eth2_serde_utils::quoted_u64")] - pub validator_index: u64, -} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 8d9156ff5d3..2926a434b10 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -47,7 +47,6 @@ pub mod fork; pub mod fork_data; pub mod fork_name; pub mod fork_versioned_response; -pub mod free_attestation; pub mod graffiti; pub mod historical_batch; pub mod historical_summary; @@ -154,7 +153,6 @@ pub use crate::fork_name::{ForkName, InconsistentFork}; pub use crate::fork_versioned_response::{ ExecutionOptimisticForkVersionedResponse, ForkVersionDeserialize, ForkVersionedResponse, }; -pub use crate::free_attestation::FreeAttestation; pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::IndexedAttestation; From 729c178020a6d4921c7f0cea8802a953f58f9d2e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 21 Feb 2023 17:18:28 +1100 Subject: [PATCH 180/263] Revert Sepolia genesis change (#4013) --- .../sepolia/genesis.ssz.zip | Bin 273912 -> 136489 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/sepolia/genesis.ssz.zip index 64aa1a5be9f701031b05e53e9c8533720e00439f..1321634cea6faa40295d0ead46ee9a0479e6b529 100644 GIT binary patch delta 88 zcmexyTVUlX4$%N_W)=|!4j^P%doF|l0vbhIMH#n>GEEl|XXp6qeNiw`go$CZgbqW1 fHzSh>Gf@3>R$Z1L5mq*kdPX3O1k$^JiWnFGRDutU delta 132634 zcmZ5`S6tIe7wva!97RP%K&dJUDnbOLm#8R+2#E9=5drBSCA8#!5D}3kQl&-dJ<@B4 z^iHIN03iuI^w2^e^?L8)eVK>ZFZ;J=X0NsOtm2Y?5AhfHUg{k`aruA$``^F+J5fGt z!qOYLXn*-w7+(b6Kec?vQZEDlH+t-E^k0|p{}&w*IQGBlEBF4V^FQJLIe2+^czJ>x z{octzJRSX@Uhmx?4ql#4F3zAg{x0r*5Esw?Z{HW>>*(#}?qUyecJy@gb@7$)^$j*P zIsL!m{}umwW4fX0;2+5Szkg1w9RJ_{{@(&@Fin4%I#2#uHJBk>x43o!3Wc5%ov{sX zZr1y6c8mXmmNyiNo4wv}K!a}D*1DOSQ<*$X^u?yqiIJ1*cP2932B9quX9W*zamw3Z zTlgiA3;T{6|IH~QKlYxs{b=^x2Y1@Hiul0DhRVC`d*6=-pJ0Ra|2<*FLLz^2v_x)^ zlB(=W{pu0t2F({JE)?x^s;0h6*4bE1ql@#_hx6CCj7 zKM6Y!fL(ZQXN-e@aF7b`77K!7^+I7SHA{V?sRv>dwR`OUDCgL=Vl1l+xGosb*j}6{ zrDoMrnN~F}el(4%mGy*cLLtsIWRQ9$PvTRIJ4=F+tb@B%S$RAop{9q!UH*hgzcy4okLQ+J<$ z>6@m#UXt0g0hKDZw6Ic&MkBy?H-vJP{6Un7D-o&Yf19qJ_iR*_@aD4DgZXFO&t%^h z#NlP9Caxn3G!NlE4-3T|ZYcC^`+kmghw(QICULbYc16T~bH5=6&|yah-tWct_kA_G)xo!VVFAGn^hTvFjhQ>EJX!OtAcCAW(R0U>gU$W3 z^-lbJH$3V3_EF?0PdxAGbu(J$fZN}9t{EX75GUra+~pipf-^|>bt*mPMmj%OcgLea>E|2$(>Gh| z4~Bkx#U+jl8Z%sX&rC%psruY=o1|ao&MPyzJreU$DqOUs!V5q=j?#k`8SS+%(ob4l z9?sllP;pd#ir1k(HQ}FBkmu0*Y9IFV+%xy!({%K=(`P*29az66^-2h(Jje(kPi#Fm z+Vv=I+Mq3-9zBbvuN=_uVb7vL?92PJE=G3FcWj-!NPIp#=Uwjm?~!)8KbHKdHh8^L zPyst=?SsHK(tsGg_QH@9di&&_lDOB#gF2(xcXV~ydCX}()=`RXNVc#_Qtk5@o71Dx zXBlHjG`ynpp~?R}(cSFK#E{);BJ7isLFLbb)-y|HgVxD!+E-qAh#j1U26(dQ;_HT! z(5=nG>r-Y}?hv=P5x2w03AyoK1f{GC>VyEhBxQqKpt?ti{M=+CUX80?U}&^)yYu`b z9y?xz;TGLSsrW5LhEMBY?I=aRaV~NIYhd0q5%TbmI~BRu5|I0HDvm@_v^jt#?p7}N zS0vp2<+x->0+kO01rA|^*wd;(!^I?o!8E@ro#i1rQ9e+=K34sM%h1J?ZdZ0igj+$w zd~Oyf0&A(8x+b%P+>rgHKJrF_YgvQ%VD~|`4j=;J|>FrcZe z4EMv;p&I*>`)X_KI|M~=8Y8lY_TY)#$zDu?}qj&50d?{8KLN<~77My_ql ztCpsgy9=vw+J$pQ<@*ci`GTkVYeIROjy^gn$+X_KlGv}V)K6S2Vc+wMyXasBgtD{M z9yccbEjmnjqr3|N&U8QM=@%qT!=oQm?!agd`fx1LT4`F{{_Vl% z<9ZTt@9Xwrl+oR3fa3ewk8Z(cn8}|OM}%VSB^Skpnam@;XqJxR8Kd14ZrL0;+5kEEvNFctI z_P}v-Swlulmw=jzJ{@8Q6%)$0$K0zw9$DwEieIFXYs9}1;_#H+t?04Qlu(8>)r|&- zDXk&yOCe8a1TGZ=77mh1mg%H-^VFW(clBRU*P_MU0QLK)iF@ z`LWiwr8!buKhKghDl{nMu?`Ws5)5vS1KP)p5zBGWY{ucLzw~%HQ2M%(n`tB%n zqF~?pd+Ymy!Q);VE2Hy9JwoP>BiIV>85xobCI*wN_Ix{`52ETmPD!~DQk>MrXFC~c z8B1!GIr!N%XR!45R{w*|Iz6-VERVK7o{o>jSaQWaar+&}ye+YUCqL-loXEqWdZO^f zmR|04b_TGsgs|rFLI}eNS)POf7J=2CH!Bb#%PSM5_|ttg@{X5IDL-pGEdiWmi*wof zh%!3f!_VU~WA7cN6G@II)V{SN;g%#6ODf%<*On57hAK8hRp-}uil;@zcmu!B&x9?B z-QY0B=2Jq%S(#;yf5LUL)A00>7*bOuqMWnRJGM=}|%^G0bK(U+-0)zCRz4$7Qt_8!oH{w(o$nC+_U&bl9Ct6=PYHx?3IUMU*kCRJlWFhkaSkc`hmWplvmL`tY@n7xaMwy)RFeVmD-Z6@6CJ|j94PD z=k(L|-0|;c#AXB*>`StnC7uY#qq9RTOcg*33dWlFSz zpk|*B+CA5dn;{}(PBls2hNuz6>VVxxF;|_VPjPa$@KSBdw*0EDA;;q`_1a2VlNIza*dPnZ_RaKFt7VnL zYdZuV#6fwWf`rbL!V>VeD}Jc6R9loH_`e>aIZ7DnrLAB)TcDEC!B%y^TdaVoz`c09 zxZ|Ef55ipSGW6Y^QGIG_OuG83*PY8Jk-?63N5K}!pRB73o*R3ZBs4{zuXsMT!~RoT zyzC?DZ%ui zRdwA)Uu$aw348!zL$p7`X_EujwVtCZ#upJnj=WtjI2CQt#y)Zn8osM;c7eDMm(y#N z^x{3lTszNR^;~07gN`SDkv0%Q4ox7FDFRF>W0Z}2GoWcvSNpC<0BX?6v?*ZZupjYc z|7qwXSZ7phF`%nMJR@?#K}Sv9rTJqi9z&#C9Ye8dUYUsCGIzQR%r1F@pRv3@ruSY( z=4zm|8*DwMc>#61!Szu>7&yC(qetAbf1Zl>|K;3nVK`4@3Nqt^9ogxo^d*dD1u46w zL&p#}Pi4hpHL~?>5w$CubC@qJt}hiI4NpL=V77G!me_}r!9qRGXWXOiOVW;tleOaS z%}@&)2Kr4r!MB{}Ok7Hta<@kyL3^$A1N6?SiR0mPtJ^o7=)}X?PVxCs8ga z%=0!UsX?!&r`3HJ!jP0}R<(u#p83I{An5XGK4XD(^U6^}^R@lr$y+OJ$K5y$sydN! z7jwXx*&r*D`jiq;g!iX$&Rx@qvw(qoOHdETvu>RVmi%KLjQbLJNLU<1!wp2=$=zI< z`pMg2V_l1OZyQ!%0EUSUK56$z?SoTCj&pGxhBBwn;$PPWcp@uIVrpBv3E&Wx71XGO zId>*h4XB>$j}Wq!mY%AA8C`kYVc1Ua-PCnj2MbNgh#R=mWIcM`7=Ki@8mud@*TX|v=FvvWV| zgFIjw$(M2gz3SlAy;=Z}302WxBf5nQ3Nn(bJSTUg@hQWsI!ZTM6iM>GJ(S>+=w(tC z^3HGX@Rm$+n&|ay*%v6cv1l2ge`tNRipb%V0YLWJK|}$Uoy2+DV4&?c z;T+1@ON@~d&>LvVxstjYpL^sPI?TU}4?l^;OBT(@cSZl;ivRpz!3cSPFy6G2^WW~5 zhI(?06lO|ZY6y`6IeK%Nr4w=xn^ z!oVGso&*`4Q}X5pWb`ZdbNRN(dvga_lmM=gwR(VJYk_~nved3 zlgVks79C>z&r6cl*ay%gg?Rk=qPkFz1%`93ju8AW_KoT2K{KYe_r$Or0kFUux`~fC zfa94O>#J{!Bb1XNHk7Y(q>-n?pJK*5ey^_?4$N>TR6JZb@X_Pa2plioSFrN3J6Na% z>6VyD1>7)7JQFq=?!;=Y(c3*)UQm8IgKZNLvV#W691Q&WmI@zu`O|>%o7t_$rJzB) z_KMu*MPHUy`7yJzv18^p4MQ65iOO#!1Fe^s$g#Y6k?JK;&BEE1U-}g!VKMQ?s1RB= zoY_c8tLZV;c%BkUiFZ3gQNASaAKzGiqt>bx6j0n227cZX6sWSl|E8BPxJ5(nBhXWJ51mx_yvTN(P*!r0pQxP3PXV|9qIwR`1w9EC=7M>5ZL~iYq=t183 z*K>LQ;wE#@fj zAN1D9>-0m8l@m3Jc$+)A$iPs2Aheip}wI?s&uO!mEzS9$P zb=Q+q{is#R+M)J2g#DNl0Y)w?<0G_yh3QPjJup zZaghTC~0-GiZ7Y3PA>#@%s@o*ky&bw(#`!|}Q;<;ufTiuE7Q_>FU&We-{RSnDjyGzey0fjLoQb%Ls?Ol?cdN8UY%zOaH=_W9Wj zQoC0qpm9N*v-_~0prtviuCjw|>lyQ;6a@xN5Wix8j=z(;>wf!YplL%UTymvtZ&y;6 zKn{$!#9D{a$-(uy#;0&HM&i1Wm&gq}6 zg-%t%@`w;aTS1%BCnN8oG@)mMw`1ON$-%(^0X|V*jTuOFzaMjk2_1&IHK6Ib9A|zV z&RqYtFyOvnvy>X5-wmDoRlJ^cb_5VQrI|@<%pk*&cmD#$CQU>!tWvryv`38>Fnny1K$o1JqDzF*UPw>Yt z3eB$yJWK^)F_SX)(W4(*J1C;AY31WQ3azypmj0})5SM{C`cgbl#~)7x3dsM>f^Qy7 zkeXD?zpSo`o)i9~G5?KuF1T=#@x#4aFCH$!-}q#(8h!HA1CEl@R7do#5;NuI=8E~z z@vh+^J))#XdU_Px(`aR6NhY3*0QuPeX%QY3B&-jy}`onCXTxSIn1pxAk$Pq2hzw+;ONAH1#u_E58zmz<-e7zo1i$k8?c*U_t9&*^~?(ph+^A13=~lW&%7mv3!&<+u}>Tb~`T$&xZ6E zidob#lBlV>``j6eBcJ3HZcts{2QcZrIIF*GF|?2egBx#EZglJ$5X!xmmcBt4boX^j zu@)_~4Lg;90j&g846SMEAp$DA;o}1=9cO}vuf~M~?Y6Xd15=`Z@SEw4RaL)Xs=Wm; z#qC&XE^K?QD@c)nvPszHw$VZ~2p6U2nRlTDOkH zJG;P#{BGa(INJX;q{LDvIQ-~){EaES+9~Pll2~2Z5j5Lb$a>k8qwN1DyvK9OueodS zrnqtFq;Gy3_62vgrutPcyZoGyoO(#L=ghv$!h0n=eRhC(-S}WREi2at0sKyRc+`Sa z^`BB?(L$9jNv|8PE>x97U7B&-lkV^Q^<7B1!jD0j#l(wyW>ZPcTd3zPq8UY7*y_k^ zKf$r>rD*i|!q&9EcRbayAStkcxlpt;@}Bu;0ZDQ*D{hh8hK&twayfJ}GuJ~}#%i^M z?`{WomcuomVpkhc_PH3~qJZ`pb}wJ0O?rB)Sm%{?y;H02 zdcW(Pzo`L8c6QHaA()Bs-Gu_C81T^LK>xDWso$;|XsDWpz((c?xN}6RUKzsEKuAE- z<)cJ1^i#0_^UmPc(_H*pX=BXJp9#i9vAb6`$w{R1ptCIVjV*aVw9{aRQLaoSQ4A+G zf|~b#N+9}JjgP0Fh4pn9W3ND81Jo5Jk#<)ROzWzTpUo;>x`KSyJlAErXYCx{ z-M2Bko(AZDAoY%#KBc2ukvf07Q*DJmBPd$*LBp?7U3x&4mn1r^q(?V2M>YOdBe!{V`lYSkb3jIn zt{e+tBamumMqy=mkFsHs+Xe(|-qqK1C8SV+ zxP&^Z*QP@!kprD@OARgbGc6vRRIRMZH%&lXEoJ2!Z0tW#NVmqnY@AE?@M9fgo&JzY zAnWYuGc$3?O_(*E&8AMc{JX(#hTsU;;US__=vZGcN^mOVfc|X`*Lfr?_fpMn@eG%f zXi@s%$Z%F=1%k&`#eQxDats_BG681iu?E69Uo8;Gt(1Vs`cNU!>PPK&Nd=IB#=rBo z{7*l%E{hTr2F1F>o<6TxUs{sk*{ykQb$g__p0Dn#rf^aE4@2rlnwC3!7q`JpB6c@_ z4WiM1HY(199tG_8TVTdcqT3Y9CYZruLYw7Abhp3y{y8WFtpWX{D+?7XGk3t1xy72f zPpXvJAEmIA&OPUI)ak6oMQ)tA2wb6)|BFFLlry{?*N)1>#B101WX$GNeb^PyUF%)@ zL<#gc_GPCR(hgHw$DM!mG1&Wbk=lCcXI_ZtL*8d zRUzE78N?*b9q)2*8nkt?ue~fgwy4q)7!VP@iWFsxdX6@6MTzU2XF0C`!)*xZi6BMy z)wvs}ZnhF=`1e)I5KM;2bqGmn3A7oc@^+E_RRs5d@T!*7Xi2tW83jLY0v$iVa+k5U z4~M?;N)<3+M~;VO=tah4$nd9o*W+5;#7bVrakQcRd`!6if`q+Uw$#fin}bueUVQxz zWVJFIZAZJTvfkLF7~ud{5fG#Ch+tDrA}WtcOmBu};7~B?UGG`e$RB}kJwkuH+xiAB zE@;SnryGb{w?mubP z4O?yv-+8>Vq}zd%g->76X8>~rd-P=ac9`mtn*cYWYOgGV1^5zd4+nZAqYS9QO4pu_ zKYp5Xe#W%Yn)vti^R4M4{og$K=DhBy-T-CHhGoR=L4u~<4R1?-AD$=&N;{cB;Bquk znuFmY+B`d`)o%Rec-)yG!ckkwD?BclZ!OzMZC2rUh8zVLFp)j;D9!O;%s2g>aE_4} z+>bmO{ecXmdku!p_SSz^Ig@`H8l;!kP^hK9aZzO-MXMLe#+dm}|96Q{eZDlU+&lYK z@-caKkm4H-OSky_Q%K5vZEULA!_K4$R+$Pk%jE%~Ong7*)d}_$xe&_&2dq{_ z6s~Hs>J699vM`)7-2T~`%g;LXr*jQgV>ntQm&l_le&8i94^(W3xhmL>c$2Z*vJU13 znsxZ47$tPUnWQ}}mPEfRO)bAj zHRn0EvuF|a3W#J)OH!7=rzXxPS@Q<<6vm>v<3{hbB_wQ~ItLc;$A7z_;bfx!@<(6E z*!u2Y+A{FpnrWeuSjdXs*imk(W09p(_b$*P9nCky=i1fa8galR{_@SvKz*55yX^8V z_HnU}riu^U_q7%Zjpngv-hSG9S+X_uUMQSdkrE}ac}FdGII_uD@NAHo%wZR?{un~a zxp*{Wp!PPJ_(HCJvf;X#owGh%{wrvF>ZDip3paJ=*`MHmMb}#2fi`eSUOd?M%@3fj z@K=mF*6V8@GQD`bhv9mJ(E;&IF@OpR0o^hao(mm8no0mv4|-i74`1K)yEie zsoXf{!7?^anBbMq=9lSVagS6p16mJ3$MIyt)H=>iUQeR!>9cF&C1XFsk3&JvGirCh zNFAKMaJBh-MEIMTfswbttU)ROoP|3hhok;L*mpUzll^c|MrxEXKqGkxMz(PLCg(<$ z5K7Iw3$QT(mePI(ewX+k|Ib_vK#ctKhjv~7f}Xt^%9)HhS1(YY5p=^O9NoU?mMo=* z_A#n=ik7``$k*F*1Aw^+75IF!8@hDPOOw3R^=wloSp;|UO|oHx9TTw0qh%IK?uCA~ zaHfKFnhgi*g10|I!?03s-0{Ygi73FA9ZY>xU+J5TC`PEyWDeTrAmSBUoMZgk5Ndq7 z+w%?y?x(eF-7Gz4@xU<=jED%t_r+1&$#z0n)1#DP?6+^?6X-&50-WDqa_!T`rjX1C{ zs>IX%>#IHvTwZFMyrhEmH1tQ~w}c(TNdx!%8c7T>p4KBk*8f8YX07!~aM@}Cs2p#I zNek&1DLJimfNb_Kpw8dX!>nQV4R!|pxvOdoQwIk+sHhFLuWC`>_v2Ac&L%LI6{t3_j6;`ya5 z)h6a45IVGVD=pZ&Yn?v~5N|EI?_-BMK8Nlx2=$~a3Ij1>QwnhW$(@ky)7DPbwX?K` z+8qC2Z|01Llz4BJNsgatvml`uJAxZr0DzNVFR1tWT3jc*k4yOKoZgQD-DU{ zC$lw81E<20!{;T>vt8GCWKYGa1+Ug$QHj4*E`QB%x=N{Ak$u0+KR!=MRNwl-{A%-F zIA>7P6-(l7mngdgf8SaGJ6;O>9YtJFVOK{PEuX`*8ezi!HZfSxB%~8;$55y@F{CQd7nM zIEI+B$c)2}Ki7G(Ax9Po#^=Bqp|TZZhFjmKnsaPdSI$eH0l&{PsNv>gSiy)U>>c~Y ze|-)%1ueIGZiO>m03uC{wsj@j_Rc2MCUf>Xp>;-xm>c*STEs*=R}g~2f+!$+&VW4A zt$tEWOU5vpZLl~4n8TnFhid(WZ>1Q1c9Y6d>7BqX>v!`pt#q?Px4r+!<#sl@BAeQY zYaX5`;S{(j*{M9$Q!~0^@^~rul9h9`F6UOIT(@ z{dLMN1K%m2p}-Q?6uA+**b>x{cCxXHGwfdlV6K)D-XkEBaNDuMekifb>r&<1>O=N( zrS+ax;?Gy>%CzX=IG@J%4Idpu&sk=!Ee-A|(X;TEnCDp~!>clUL#8gTl7AY~&EAC; zKhHaxJ54FUE6g`1hEwe9?mMluH;+Z?XfTbZ05B%gEv<*XBppG*VJSEZ)As;HBDmfQ<$hubSf_12!1iyGI?-9r2g1sND@=Yi7|jy$Hnw z)bE6KQcjG8mE+5dbFfbYZuM*dC*zMj;?A3l%Y^~0Tqw4s*k6sCu!vII7eyVcx11-L zLD#C@2h6K5a5`%~js~gYD#3N^TUrc1Y6Wgny~vF;fEIJrWc86B(zJodT+cqaAN_0( zEgp3Xv;@3~Hv&y;l?CrXxX?JTPZ4yRVUx@`4QuS5AE@W0oec`orMPIR`Ij~4z{}mf z`!0RHaNy89gm>sE34jhKKV=a$(=IaT!>llZ%YhQn<`(<+*F<=1s76q~C{o)aL#}ES z*Dvb^Q}$QZL(X~u@hyJ$sX1`I!n4D4av>Cx@zzDzeym=I&q-f3TXQ-2!=4~^HAuob z?Wab)%#mI22$)wViGBH*g6eAB4KxljVgIuG!&r1#!^_3p1w${M;c4DpMJl}WIPC-J zC&YUdQzItosf*oPEk8Pm79Gf@$FJQg)pW_Yy){rS=GF>uCiczZ;V3OzO`kxQiE{7@ zcVCGM9)2aB3aaH|@}54$j%wOZLa6Tov0e)E6(R6|&_i)DXyRXZkWl$p6Zn0cs9Bj= ztB2_+;(2qKxg#erXc7lWieD}G7{`|3PGAhFdo?vAwB zM80}@s7kJ%rWQmFQRN7~#k8Pb(y8>$NfgA9BroOt4XT7^ci@#hkdgddIPm@zrX$uV3}G6Ce^j~+n#=W&y|zEJ2W(gVVWM8 zp}&rRNzae)Opgd1To%08w2~_wpUjfIbdc@AvPt$CA|2h}(IW|@`fEJ|!Jng#5> zWOX^;?0tXjPu*O*5e(@#2;Q)tK=86cVTWDt0dbP68q9yZLP0G_LssHY92e*0P>Ndh zR(?@GtU%J-61t%`XwFrQ{dW(ODCRGNu-IGx^{2fj6GE+J+WO%)>jbVX%MKcRc=u1mZzM#yblg~Qy*mu-v2nl`=#DyRC4A6 z;HGXvVtAxrxteHq1eEc>QQL-cso}yb2YS-8uJk!4w!Qk zYQ3Z8b*iv#7tqYEIZNkzm{G^a;Soy?I=$ob{VA?gH5lF3HA-ZPz7)61vcNt@>mQPz zyzayeoGwV6`6@pj+np@$Y>_QLfP<)YrhzFhBjkjJ1>j8&4gm-M;%_Rci|`uz;Ml)& zfY8C}`v>SkZS}JI+g3Ge9;r`?cJJ+sJV~VJzaz9v?|t7>&BMHB>;C8~(><5&cjQOa zufHi|x3K{ZeEhjhEq5^HuOGW<0=m@|BE)=y>{{kFzj3a#$+2yHloPR~S_4~ahhbnu zz}a%=(tsVL91sH&DbP5aej?PdgN)apB!(Di@ zTCmU^y^S;LyPaRW(F1j(VR6xP=GVp@ybucL4MhSy)$#e~`8KHdn8xFm@}z5)rvC7@ zc_kStr&jtiTm0GTq{?n0Br!#~alv9CRRkEk@(!NYKJhaOErwzD7+z0tW-mQugV#t| z`ox!fwCL2e}_OR|5&CygO9@jrD#M{%^} zMbBCpyNz0GTs_-GXFVH_82s*CMC{LV0E9Gd9_}q{1_^4SNku(gjgb7t;7#h6?MMlc z3c9NM{#kC(O7n>MY?>@qVMrDrVNnYj6YJ+y*L;(0`^^w#Xue`B#U@d zINxomdKK#t7FR3p6xUG5tbmDSw?uhwugv~ijMyqmx^9ZQ=ZMr)JMi(}M(#uz7=oUh zv6OYA8m)V|BU@-a{Zo<}fbx6t#t+h^BqGY^c{wI^y`rSjgHZP>`aDtH#=|W9;rH`i z*7{BUMNas=i}eP2I91EBE4evlgoc{X)t66wd9sC=xsKv&b>*fNQ28fq1B>Ffho*al zHs$ySb3^66a#T{fEF&d}+8XJ>!4KoJIG%@WYOAmFnHl`$hU7h<@ZT|G#nvk>V=2NB zrvQ*Ca~@Q#sV!#i+e6;mqxokFGtb=IVVUr00TiWoe+qfPPv|zy7S#o&Z1yd?-YY%9 zgpz&Lk)2JAf)nsk;@)gL#px`)Y6%e@7UomGG5pPCa>S=&m=mh(f~+nriP2T^YadPV z=r=rM>5hTg0N~joYNQamP1ZH2DLs1KL2uES(8Q0)ZOojSo2HiGnwCGLaJxC*whUgZ zgh1}y0ic_o417mV$p!S%@f0#zcq4_rQ>lZgPYRKpt)CyV7CG|5P3kws70!q-(aKL4 z_8I1WLxH)bh_8JM0eAm2>F1U>(33-bi#KCXi4F=t3EFvZDlZ>7C}Zr^kxahWs_!B& zx!XZ#l26PdVaqzgDip*vO0P1yK2+TjqkzEl9yu-p5v$98iSD1Ea8F_uk&;@uQ%F$V ze$zddQC*>vJ^h-L*1oi`RXXO<6SYkcH;l_x53VJhk_l8*!~J;3bOk1cgjL#>rMW;G z!hqlFz^?AORERnXw18c=6{WylR=?QBh|wlz@_%<0??7vcOlJGSBv0O`aNBnE`98lx zgr_&apO<+E$kfrBW;Z(qX@c39wAL4!TWGHKC&BO=6NM{A9ooGkste24K}}?9LNx>7 zzYu#Ran7iR*hb3ywfr0GV<*r%O?IgOv{w=JGBtY^DJ5?03Ko-+ZW|LQvj{eQXX$p4&<=2;jctMM|2 zPGiJBqDIksrO0!hoL}-A%dN5JhtJ6?^=%(@@hhEX)>UB#E)oLr$`N;c`s@JKab){P zL7(g1Dmp_=ya7W|2eVoff9JQ+aWpZOH>^^UEwfd!R|y9yz-1wPGj z;7@}SRoH2tAWnwKeSh35f%mE0H6@4S9XrS=&o5N`m+M1ZDSd=P)5IsVVQp|@U3U^M zS+D0``1N1w6r0lB7k5)CmlB$czVO)+?*x^|dmR@y9!$3`_E>zoTH;uGrPV$n$-lr> zOsVzhBvgKs%ZX+~1H7>WC&~Ve_^-D(IQ~sBwC2|bdQE@1buA+O-B6K38|Z+(wOVz< zdH# z=g7n1$HeTqMC+)Yq+@ey{{1AMQL5n`y`;2|1_Ml3F?z7{(z}ex$6?J~k^1k%>gT}X zUd&XN?Sq=1TG~xQxwDMb{8Zt1VPaAMUJrF-B@OjV zD2Cu}JI(-CP!z*y(}1bN+H&NCvT23O!tXfQVjN4|$2!|r!?}hM5IR(+y3-6R`Bhwa-@zb{W$RH-5kb||Q{{(El0EksVPS%I z&wK<=q$t{2#iu@F?3l`VQF!8Y;!>V$B61*0#TLl8Yp4k>vr{$heOPFiZ+=jGaQBda z&qal3;Moolf5M%~E$HiA>Dou7vFYDbeN*iAWNyanbU=*gwHd_w{n=f*@FBVcZoJcj zaY9B|EzP^9cUXK)KuE;Rzja%{Ay37RQJ*QUCM@{%CM!H=>I3+{JWTe3mmY!5<3VRk zj(}C~#;B9cDb^uWv-&AabFxrKj?Ic6HQ?stzQpcA;jL`6mt6J31>Z11$MZVwtyLRa zzOnUXQIpiEBb)DR&-(w7Z^L8mnZ(N!tt| zbT+b!yfv2)J2Vn3m>ICOFeM)#qDh?sj@+yj4gbBFeu*|Px-@VDicf-il-Rg{>!ku4 zeM@FgB_z*p-cKVnIe*PoGvjr7?H@&|G$+0hs@*2ou%iUaW~hSdVdGXNA5U; z?i_MS2zW{Xv#!&c-vb5*eH52l$(4_jgPBZQQYoo@k(qD0+{HSBQf^RxI`#x$Ufi+% zv^GG49NxLmI5n`O2B05Ewy)SHdSYH}>eY6C6AMKMmKlx&vrr7_L z{xH8*4?+rR0FjCFDrR=2KropyCCLe-yPKz`aO-)k+IYCeneLdMICBmzr+(o!ti}{~ zJGwuXA!)S(9U0D~Sd-slOy;!6=Z~Z(823(o0B#59mXt?!n+7Kr`BkgOri|ksB>c6R^suo6&yGJS$58Hzp4tiU2*nJx0RT#ex_9pEPGdN+SC>T!d3kolj_mu zgOh>HGfD579GSJXDrmCqzWK*z4#$qxJ{h$7JBB^o<1T!zh933e4!OB$yy94B4HmHu zYB46m^xU++fSvbTrHq{yfOrJM&hl&?(@^`EY8H zHGy@TaI>cTabT-Wt{^4iyG^60ZdrXSReiT@#o+E~eV`f|$1leyBYhbUsdIeBbTB#= zS?3rZ;`Szad;PuFX`^-R+Et(yzS}@px>{=vOa^HL49(VGE{qX8GMsy!P$0t2p0ldk zRjcZhd@d6`-uVEyfy_Bawa2QqonIoD^Du~n4d6YxRO(h$pSvUYHNx$j6E0biA` zdooZ}59HK|LA{=lcX!rm4PZHIi4Gkf(Ec)P23y`tf0a=Qsegpj6!02XfM;F5bE>TT z@U~&pJsQ~Qe9&#%gf0~QW_nWntA6UmSE3b{dr7XN4Vyuu_6@n2QiL;*D0u^Je=sNrH{0Bi& z6Qb3!n?)UbW~kfu_e>jNd#rj8=(WC8zA2)uHJ#BXVU&@FsB`k&zb$>-TB+Q^w{~@s z>$-fV`ZV%c(aMqdL}S3t)17nj*?|5k+qhD6(wAKUF#@R7hvXF=s zSw%DaxHIE();b8OM_cNz(83&}Nwo~xZMJ`DT5r#m8C%~VIef9bb9tTgIH6$cZbAjA zQF!MA#Mc({a?QA;8>lBLA7~FhQd>8N32Az!r$9uVJ@8bAf^#AK+Wz~|sHOrn-8F{#4W?^#TGmN{vN<-dTpP2|9!*QhHkUAXRbi~UEl?sPAl z7p@gLcX*_KuIDDEzHAg@-1FFJ?=Jxxsw zmb2E`!f+)tUzOSd|kp<0##GSpxgH{&>Gdfn3ZurGlmKwM4=Q?Ey zDa!3GmRkc_XE875Rw@H}43=P*;M<803yn?!70hxoiuD~&G@j+Wdle{9774chVhT{c zYc8!!AlOPNnp~b0&(0+D-REogmUKzK0qJ{Ih6#{OfB#bAbaLiUO95 z{|^8;K*qnu9lGOUryBQHM%`9)YyT!QN?j-yZ*BS!pN}^<@LTOex5^#*aBJ$Xzf@XZ zIbnyZfebqiwR|-4OO0Pf-)!4@<*7_rhMtbl|J%)c7Z)yW(Dwem;9ukH3y+lgyL`VE zp_|Mu9$0^~_xRg?*G35OSHd*+a<1Lbr|Pb_!@EqL^&n~1^JUtcOqlgy%)p0EiQBdP zdO1O=H<^2l|L5$`T;B#RJ<;*opr#FG6w0>uXz)+te(za0L*+2%n*`cNY&G%8s(rEA zmQGQ+@5JKW&Dgvdo7-$`wb{09+qP}HHrHmm zHrsaZ`w`~(1oJyHXYTvD{sc&CR+}CgpEO?iF64mj=Bqu=@-HG88wnRNTm=*j&$SF5 z5e4N{HkQ`YexScxWkDFhA%6m8(Gpb7*kl{6ie$B)6|*xuM8_A)&53dTU9JU%rNT|` zwF-ZCmohahwgaoJy|DN4lpi~&_xMx8{hWu+bk$Dd=>pZh#z^Pppj*qDjaT>ROI822 ztU#Rb6jA2rW!hC*^RGIZ?R2!m4j*mPne(uHem4j(;l}eMS<^-aLbbA6ATt~|(dM-a z^B;5tMSgfpZVoRa+CeWrDiedA`O*O0-CKW9EXbq?f7W_H>$EY%^#?3L!m{-bVqj%B zK3J68F>ct121RqxnO+s}?^>LG$5BB44S$AYXKI~b4bKoQB8!cVAnGr~Y)dKV z_Cc7?09?fS{u!BwK8zLG3u~I}_yMxn6fxapNmKhWQ7yD)s^_k7YZrH~=6kq?Tp)i! zSIQ?zdn0SKn()$H@0Ay8V z;rzC@%9iTCRLs_E>3@F)A+507_%m4Ef89iq2P_YfN=T}pw9`{+)0d6f7A-h5QF^U) z42*cj_dm8l7a29$h@<}gCoQ95t>u4wJr5;|NtFiO%vl@sHY@yjVj(7oapa zunyp|Dz|X!O`pQJP{2R;?E!xsXUK?O)G|Bk*qj>VO#YjA{Mu_*hS6;w9OJt zcU6D!k_}pA60p^U9OcYb_i+cjFG5+baT*MQPI&Q|NiJapK9a}EZuevi@T@w|?C}g(*`4cEJi(5%kt#@N3cU~z1gD`*0Jwp%ck}Wv- zkz?TqHN*z`(zcz1-%*zdUu7Ntt8ZYvnt)#ZH$1Z+m)OSf9iiaP1|-YGKW~EyUS{O= z!f-3rR{%^mA+k*$Y6Q!BEcOfabPduMc^=97$;L>PM7*k;kvq&~EHrZA| z;Nu%a-zqM9Y)u0s$w`?%=2#p|qB6nk|<*jsCKQ*OE+c8yL+%i2(eccL$q zi0bfo;{WE08bb5!-h0aNoZ16%-cNrIszSO}4p^SL-~oKD>>5pz@zw(Uo7LLKANOnc z##2f@Q;Cnm@$DQ-ivJ-K)#8sx4i`zwG)FwQKAwhJj_&MF7m@E|rLjoSg@|C5H_wft zmq0kTH4T5F&GqCIkS}!zj;OtFk52RNBA3d_Wc*VaUBH-66Y8$qh)Wrl}S( z1!weRrW3-HC4clZbi1aiqqBAboFHlYmVZ3X1|j7QFP@^`R(@CXM=}86dW@M>iKB9} zj;>3CY+v@hL#Bb}vKiXOX{5Hkx*YWJw!p;F#FKyOuPOH8DSY+bkOe*zp0wv|Tlm99 z2oF1tRq^2)^-`%*@}v_{;JdaRAyAps9yy!t@`7^Ji&pT0@!!PibD$iYD zG_rr2NsNFOoWC2eo^_&fs8$%rtC~{8NAH&YM?Y!?`Xy1nyV)Q0d(d4l60!Y|?Dnrl ziT1B4%HxjuV$P_)q|oWRGv%+1^c|x4%MHqqI{AR>IZ$&`jjamBL(@jk2MC+v?Q7xyt&$XehPYem=k> zF%B0&8i%LuCs4!PPhxhS@I67~gE$L~XpmwX2a-#8Pt)>2J^Y#wbSyKK^}$**(UQww zV3f9NOSI(3*B(s*bzw#m!ab>((7!c8lUVgd@&}2c*9GV1{wfXt7W}#RertwphY^1g z>LgT3m>^M~A+1Vm_6V+LM6RC$`pZ>{Hs^A_QOOe+W7HYb5snov?=_URtEf-##QL*! z&mfpsC11_b(p;0hzA!XqbszZa`_kvhoaElAHdloTNd8V_*>95VC8CUQeZP4nO#&S# zS6-_oq=ui<>Xp4<9GWZ?81wo*Bo%)M)5?3l<+)dC;ug@Tq}qe#-7`(Z)U}9<2|!4F zna_drxg+Z?ONhKrwx`?3%qz8)Da6XttJp7t1s&OQ|1o-R{Wu~212x9Ya}&RMN{pen zv!vcGx<~~R-JHUHKC#E0%o%$4PzXM`Oyme)T5V*Aq}?=?$2;5+qs9-X|aXti(4>XQ*u3&x3*XC$cXi^ zzVGnTT*ScWok~|-sb0Q&cLp_^ull=-!Z{D(tAFE&zOS!rF6f&?GW~a3tAnjDQRA{) zvLCvFcwLdL(AKgX54-bdv0`-_-nfiZl;9*Qa&!I0O zo{vp>G}l*5kwA>MSz7zg;wSpQl~q*6ACMJsZE zH{SmWppOR0O-sysa{7C)C?7u?xBw9O;0ruTFn(DX;Y(sb;A+`np)! ze;+(N{5GC^MqT`n+U#Y&LSa7b;MyKL6CD|xf97&FeEkDR&zIwRG@7o7}8D= z80>U>T+riQiadYlgaG9TAw&EwIMHOqHAC8 zPkC)0m2*@0NL#!@fM_<@KSaR~b{TM-q(qg(LAYk@Df z1gL4t8hn4>OFyqHdIN8asXWX|v9=0$AACs5bYl&@ClyH z7Plnq%|vcU(0U2Dv!aC@nv8ziRTLeFst>cFl|6?xSgJQVhnWA;6P64*vbVL-zHKcW z=T8yXv&!{()-mJ#Y^f=M+mkSKDvQN193;)|#I}?C&Z}?KcaDbA0dT^74gN(PI*;6R zNMwHs2LV=+1Hr{F6$2i|G~yJ?D+>BoJzQ4S_}zZi-y8Q!iRzR4-b8@$T~g-l(LH*b z@g#?&YC7fqovc)UqFG#nzt52O{>6WIOiA`V!^V6)o8s-)beRnlB<%e`xZKGM zu>C6Amp3i8CM=f@4`J&;43cpJ&)Tk$^;O7uot`lOee|K7+KEwyi}G=L{!;jrj4~uv z8)L{oq(ephT5HW&PE%oblJ7V47+?PeE(tRa5>O-9`S>24 z^Eua%kzzfkr~<`4ty67%U3MofEPN9X7cqG9=vyz!4VS@BzhRuOKu>e7PG0woWHL8# za|yt!B2>tJHdvrn&{)UZBCjzGi)A1)z~xFrzPeW>~(M z%b1Hmm$xL(b`Z^8r@Va$J3TI+zG zBM>q=KsFudy{qISQmIjK&%IHnT;i|n${@($GM zLYHvRTYm9*jg4>WYjbQG2}~isY5h`24{d@02YPZspt>YnB9XX>wyHyrqI0CPr^-7TVQ6AoHpT&MQlw?BP3DgpivLvA6T!McnZ-jl;!!D3L z;KgqOk+Q2-9@9@iq>z-(ySl0R+5sNK*9JptKmK^ASnSMHH2_XgpjUl+IpV&_S4Q$Z zL;}`Jv2de)B%Hz^Sy{apbpAL+lUeWTmQGEKPct-tw zinf&d{OCvv3hH)>xP`~l{4)gpPoG@O8}y9wxc2rFoT(T><&S^b)*N_FF7wA@QuFAq+DvH^6Fw$7{W*Ar@xd?>e>R?Deug?( z2TG;_0KuPzCKrE^DYlwy#L2bz;;*L^;j47YzHr8iw_v=W+m4D`q-`x&=0=ZOss@MU zY5vA|JWFCMxXu1?X-2R9t;||wbHLQjzK-!nSiz8;P5T=#WppB|OOV>15X+N{rxejO z5_2I6kmmL6pV5yfFM}?fOMGcUCh|9d>nzkNR*}3Y->QFa-nQ?DN$pq;wnxTUe?izA zun-!NP*{8i-7>tX+}<(zCpa11-9MH~3WbXRRF` zfg@O(P9%S&;fuY)T%0~Aj*N`@Z5PG?`WY1DR@;&AKqg&uwDQQh zLyn2IQA)^v^UPD&5It*<%xZqgOzXfVLfP95n383qhu6`+do}LrsTV!jjJHBGoRIxP zc?HmCE@J6SQ-^kiwWo{}ZM($?BlwO;mw!jfs8fH0Q&D)7at)u2kB}mW{nnUE&@}_m zdBCUVF=T!QQKLbtzG=6Hl=c>aePUD|eX5hu##1Kre~7JgT0-E?anN}!Afr0fo;(K7 zupW-BL~O)#WoOpwQ|u(+&@SOvaM!b5yW-NCX!n5<^%P05LsN+Ta*2y@?bayMYAO!t zV)TEoY>%tgO^O%L&2-H6rS?$+nGQ+?{8iSvO*b5C*r$Oe`!vfK@b3wKG`T8`#=iuz z@4J#JqcVb3+yh2)A0j%I0D&s4=^gl|73%0HmSH-poILgt_;?ALG3erbBX;ggDF-~* zcXkxNXWmrzId(CnkPWJiRl3pm+NM?6f6RYoNrlz%Tv9X}?s@-Y0NCAVDD0z;ui+A3 z7#*)Tes3~5ZA3Sun+My)ZU1x!-Q3=IcaAsfSgL@N^0m}AJcdL%AoxglG$W5muSE-T zSd^u1xkPmmfo#0<2xe*kP+@Z22$etv`mZz`0J3^?bctX%YNiP#y*Ft{> zI&r;tbvEgB_A@71AA5pA)oh@8)poEE>ib0CtVbq}2U;pTKz}daq3{^0_ z>r^mGRJHyZIOw`eRg#=d?IiMj@;VDe=1a;~eY36TNQAjQ_>M46SHt?jj^FpqTYNF_ z$Ow@8kF*%T>O~cD{bn2nqJ-^6|Ky!;xlLmMWw7`G3?AkA5(hNs_wjYf4+MWX^{IeH zj-vf+9jaIr)g=+&5Z^)!4N>{bi6Jkx8ATR@Lc zBI7l5bw2r^6zxUC6IjW($d90)JfR;mDL6&KvZ?C4*p>N6q$xbR%|`nDyf|A7s0-aj z8;!l|!ug7L-smfH!#vCxuKm_<9fyAqqL@rsi3dyMjLY$bG?E*Y7{@)b%#! zeaN}<=Co-v=chS+l;M1nV+djET$}NeInFYlMeG*tf1;}bF54-*c!?}XTALhtyjQk^ zZiF)hT1@fW%PwU0t^1Hb4+i_r_pA{mXmo{TjFv14NSVpc9Zi2aHjbi|bH~0miom2R z$GQG<`A+%Gpio9Uo;C&u@qLKMeJHFdM)tTLCGMgQ5a`26)3<=r;?t~2QE3JG-f z)e;I2M6tGYOXKwZ-q5GI*2pMoGGFhl5_QM4u$(&hzD}wr{G#VRBR5A1{R5yNbxCw} zjt`IB5+IW*s6>B(uQKY=qK=rbr=! zVfPSxB*$ZercJ}k<*ukH;FQASJ-gRE3B16yRP6+frb=_U3o+8-n36Mq(_2@x3ypgI zsj3!^lQ#!=GXXr`zG)VG-=@Hcv7S#k*?5A#&7J%EMy_L z$W+#cU&(*gSk-Lgq!XZzK8^PQ+wf&IZjGmlVq2dd`1W9E_TD-#15Es6CO;&ICNH)V z6y;B#|7n;x|6%0(=BayBNTh!#8QOvRbj~7!MuNEqV4F^T8=7Pe z3#qi;esh|RG;Ng~sP?d--GePHiR*L<`X<55fiq)}5P;DL6GnTqIguV{fY&mn$u{I{ zfqKsb6KR&>wjAr~*`@ttT2JURDhqt@>YNE*u0lHvmB03&;+%TW1bjqh;PoTk0ELMt`#uqSdXAL<8WLaP#TWdg?JZU`hv$!vaNH+E{aKw z22g3i*Xs_s`SY)nYAbUdIBJ#DdVS9KKCP3dFzFY9j%BvY%$yG%en)Lm&n*v{D<9`r zi`PVAto0ur@ZuE7h`Bj@HY-sY!KkF+^Avw94ONK;y66&kGIUYj-nUx8fo2-6x#erU63*=rz|R`f1ASX;b%#hnELVT{ z0#M{KBU+kzEg+JX6lIR0Q>rTT4@bM)_WQMyy~WR2P<*!z5`G-n0AkniSPmlPXQ`i5 zi7OLE)vN=Kd(1HZK-GVDAP1!Wfldv?2NjgHnU=xt2C_<)J^PvRDn%uE>Os9=X_dra zPdw?SU57Xq{UygQ*H7XQcwYdbuF`)rO>^+#putF+P;K}mJfT4tf)mITkf{M35%8nqJ?G-dR=t=) z&(CNaN3X@Bfxjgc%?xrf=JWJQw{qoEnA2>K(`MTCq%b8io&q{q?Zl*UBB{R%WTA?bTWVCx`@v(T44TU zkds?ZdVUY4p77xU3t#oR3xue%i+lZlP@5oq5+|CNC{4;Z6i0iKb9Q2Du!%SYeUoUM z_L6`7Yapv5c^WF=2pwJACTp^%U=MTuJb?R6LWtGNY;KtAQ6}oRf?aLe(#jmrDO#fT zC+nqNDc;PmOqeCeZ1aDj&tmzkcSgjMZD0aj!IqYJX3n%yf8z`0wV{UYi{YPBAYGw) z(626!amFqcX!lLvF#i#IiUH|pL4=ST?i=7h8bH!VjozvUPl?q-Daro5^<)pfXiREJ zw4O7(s2TJRX4l!Th%W7N4nM@fwJ6MRoBer@Hp!R)ew8Jvb3%WqpZfW`sclR{K)(#E zAy!xS8_tJI!FnH-Qs_^bRLNVeXfqTyrEF@+gBurUA|dG5g2pmsw>$hY^@+!JC?hw(&#dg zv#jYXS9A1L_Wp-JObrmHoG0mhK}5gtj2;iVd~la?VB4I!NuG=Uz??akMg#q=cD3G# zT~a(c2g45vc6JNL5+wo2e_Oc^H1_$B4JMEGo1a4% zi29oUVGw^vBs!-Q2xB!L@c#~dQ6(~Hk%@LRI(hn-&6qrC4ND}*L#);9%K4ALlD}JD zF9DF;t&XXPqe0JYeBP}l{Ny+xp#!D8jmc)T z=^pI8r!T1-NwDVwy45>r2;sNhKF=G)B5`-@lR(<%;zn{vZ#S2ct@zmVor$tbrqRukLDH@t<7Ei78`by&!g`MqTdPsldMKlJ`124;y!3VFe+~+ zScRQG{GHE0>-G>zqZ2`74Tma)@C9^){=Tg1qKo6>nfnIb@z+H4dm`63+X2HbV2w%oakzg*NVISI%U#x2sRKmIVp(ao!BJB3{xL{Q z&@SbvP4_ff2nqaZ_2ME|W=xWK9jOKKIMlGfR zDi~lXxeMXH#fsm3Px85c9MLe8b04Kl#T@?;`v-{Ij{q{6?aC^>Lk0HGoLnop=8cGsp=>u2`63$RvncKJ>kCYcek^~U+IC`y zWwczT_Brhe#oxbHD zbN`k0>PbRfFR_8&P~F_!&dl5;T2QpbGIhv-QF=vE;w&!ms63%Ezy30Ki*o;O|2*N&%pmgkjC3ZJPZRNZ6*i$XF zA5NYoTqp&7%B%=|A{h38%Wo!jJ#4>g9+u}MFUfW3U|eF5s{ZsRhsb|aWcfzVtnDvn z7e7D4+>{J_Kr^p?O1}A%sC_r>}u9^4oo*7RDsC^iG;MdHN2$Q4Z0xn57BmlBf!8U4JURQUy%MYefaK< z=Ef&~Qi<94-O7w;kmG+#FX%nyesA{TqD`(uKiJ(gnd`AdBI?uZYuLwy12$4C;!Kfv zGkQY>*C_PFy|{vekpAvc04U}?;vGLv#e?rMkHbajit@*3#BqtW8&inBm}-u(f-c9% zEZ)~)&FG-(-=kLea$kRho$}4rO^&U%e54nr!QD*yZN#a0iIjhw^+%e&7K0`yaK;GN zQWZ74%D+W;6L5u4Dz8!{fJ*l!3kxN5icv)fy6Bg8cN4|p0K;#*<;vaNU`htd5e!+B zEc7+J=>+yp_}rb+4T{(sfh+$Cq3v=sRSx(*D+tr-!o$T~zSRRXHHx|L7WY41ow~6G zE?l^}qktY7bq9ZSc3h`JknT9Axk-#3SYX0rjps~+3&+f5i%5 z<=-p70!Z@E*+NVlY+ms@O=)>^H!Z}@TaR4|-OW(U8Wtn-pg%D(IBmEx`)?Ro*ln_= zXEWuFs}EBAA?m@84NAje3o$jU$+l3FXST*dd$fJ7<6D1V;PNUhQO+^ZJdhVWchE+x zdbD0Z+oKVNs3gvQp*HD1IIhvVzQX{xGLD*yD-w8p17+AwXd>xO>oi}=!c38{c@H=q zZ9=_v8M!Dd2saab0ao`F*N%r!RlUx2AC$!If^FT+V%T=<-*ovOTaXo?%Og|0R$LZa zXFY!!HRFHrz(oI@qV%`O|z5}VOxMHX0RmN`9^?_`c&fHxO%gbqn$-Y;u+5va8r^ z;Sd7dx5(5%-AoLdc2wG@f@KI?K8FS+uIBAJfpNkUkYs>m&$KYEkd!_we(3*uY_;AE z0Vdc8nDO~1{|rpjb6n=uAxNPyK>pQk1{){yAa@8~1^t65Cy>Z_YE%=3vw; zt-*h?E~2$IA^NxdMG=%;9y(46s6h0eA6=jqwua3qtMT<@1w<|l=Z@SSYh42v ziCXMs)a09WUl1UY04f6uUG!BY9dZo?d@7 zOCDn7gVo>&q>jabWV^lk{9`3x;?EhXc@fkW^JhI|KAh=ymU~Kh_Cgy}rdPhz)mYG7 zd?kI@vM030^(FWta5~kT17rn?!wcZZebc|uv5WP=->&?JP%}zQoqm&j-lfEK$O557 zM42M1sFrJVk!>A{(4#Xt3ApRlE31D@;yWS*NTA7Q@2Cen4hQS4XOHemzAC$0p;Hm(8dQxawR3kpE-nZu<|6x(BILT zAP8W>6%83d?+gj*!`j6-^;m8P--`c>j7~ey{z@{G`?{rCyCG;l=6oH{%42^L@WzZC z46XARne?mxcu8E^rb6;1vJ-!|W1(WG6OX|d z*o?wtBxy;Ej|DNmEbYl?J1Y;0qR(T_eo)!c%8d2x@lnCYB@h?BJ%)e1-tIgA9L|Wj zg@t~;qU%xvWfKb$c{CO!f@eW#Z8~bppAw*Fz9bkasU&+5>G!KhOD!vh{-x`>QNGnr z#d+#m(%GgN9am}oa5bQz{8d6^ln6cRy0f+B zBYcZ|706!T#qQ|b|LUf}nUkZs8#i=xEvp(3J-hTTKHIMf^g+OvvQxh)CkIoHk7Lzw ze3+>J@p= zdf~Nv^RKzE{{`)}E%F2O^vD`s-p}sefpo9J*wpIQGo^GCMQ&dc6C0p}6)+)Hf}Aq; z##Gir5WQAEJ5g|hB;$cT&r)oGSY{y|dLat+j$2t|bO^zk+Vs#}Wqc^EML6lb7NW#7{E~xOu06OQ;9V=$$5FXy8%XF-JUW~ z`V?9nek{L{A8450virzWtg8%{+Q+=24?!32|3G1kiYCwPAZX$VO5LX*)6|aJ)7AdT zU{2mkVd|B&E!v{VS%7a=;oRPY!Hf(AAav}S44M3?yi|XVW#-3SU06phK_<4Il~=az zNi*QK1O0<(*@)0%lb_Eu%0DP;-nZGXf}B;M=2bZeIq4DJevK{HsKGVltwK%DIEUC$ zoiYQLCfNz*^fOhZ?8^2X<+CQ?O$7HoG2BW-R zd_^A2yX%&gb3y>BT<4DeTBul>p`WxJmJ$+JnVV?u4qaAifuBq9z;OWq{xH{7g8nW} zOJ%*Op($^Anufv42+ey3Ua|5f1YMwO_TIfIavc-oPuG_Q=a8k6)1AAD7)O{W82(1E zcPD?)$bc)NJR~jDqns>fA0oHxa|4divbSO==OO}44m57L+d`|nHowN$bUXPht26RuDYk=eB=;saaxFgFb;QS%$6_w1@FZO_ObLZr_I} zYcV;)jc2VN^3(CiSmjh+5Wf}R2Qf3Cyk9Gryq*EN9Elwbq5kt&d6RWNH>!6E#6#zd zHL(WpV>|h{EkO?=3E9e7yEl=FH&1MrNki7|g0IJ!`!##q>8kFvLwio18en8srJsL~ z`F$YIE@Dc>vID5i97azK6&X^4p#F6d>Xk>Cn?^Kj^UsVX&4ifGmloQ)pnEHDNUgOz?oY(IDY015ZYba`RDEZw6>o< z&3F+~LS^aFcw{gS8;=MkzRU$Zzq)_aMVmrIZ!OTNCs-UJdpjZ0pZs&|lVPdwnyjX` zRs7ZH1OuTFp>gzLuIT3%6!Cqa3nvn{sAYgzn@!}haQq>xP6;jMbU<(9{vZEXavJD4 z^tFy+!$*#4Ogdw${H->Zl+OVUb>FPN@0#qGzlS_5rWSI@6Ro3 zp!qW4E;?{QpScn7k9?V%7hm8tw}6^0#bh8VY8+>@lx>QA!_6 zdnT%5Z9cC8k|k+V5bgeGwogm@;Y`CU5VOk4U5Jbxz}<>3q$h*!{>l5)8O_og@FtcsXgOwc`KY{APdEP9)&oW_HZ#iu`00 zBdkU5fBql3aaU&;T(^sqZm_uCOK{US@DHi$6zf8^X!V=-YX`$)pJLJp+MPovN9`ILNz@WXoL#4R*+f(hA zJ!{2djmAQ%pyPiVtQ`5;a;_J+31&UFi_M--ID7C=y31H3z%J7gb;!e6|Jf}Y93%yG zxZA>JC99TWA{-X2wNe8E`U=Bemt;=ETKaQS?y9M7Yvc{c= zVHzsDFAb|~kU$)n;u3&Z8^*<5^7HnR5cv?Tdty>sDU5#yyH<%mKvyfJVd*~@CYSyO znG}?DdGmxOykfA7UgF8%sQ>fZTVr%_&na+#yS|A;IWOa z`;l%MV{tSsH5q-d&$D}f@Zw;lMWsmlkUDrDXx}7EV+}5ix%e-Z8aT2UDaevWa+SvLdi71N( zA)JbXj)7!uYI+7$Q{y|1B-KaJd+a%D0eW-kvqL1*)wW|VE}0t8JG_#pp{SJBkuCILe1T zfj3Frno6?FB`|`wCd=y{MLqYwl4(j+GC^OrvmQD3J1PAv4Exr!Sqk_SGmT6VqrXYE zPi8TY&7{KjB=i+mL@iKZ$UzlLOa0%YWJ~FxLNCjq%4X?==pk z8(&v7@{e|sZR50@Nh%BUM2isA5Ws*0K^Ktg4kbmUl*l#)Qys!d#RA*9^E_x`us=`; zIyDf|7uw8QKORtg3S2KsagX2rYgq`>)i#5A9f(_qXKDXb>eUl6BU0K z0BbI)FI6dZd;61C$(8L1?U~CZIcC#e6L9~vf=L5(xTy^&l1HBw^wuh_$3k7K<}-{@ zdQ2RH%b=sZXx}PR5pz#m7toF#Jcy2^o|K>G5CR^)^Cnn}xJiU1)bRG!N|vy9J>qY^ zO!`v@^S~mSZ-Sm9;M9%t`_}++>v?}yJucF%-w0UqaiYoFeJI`17;BuF?xa|y!}qVm zRm?bdDb2BycfcC?U$7xJ8FYy6LSGj~uf%_!loL#psG8Ptp>weq7l00;>ep@&ZkNhugWUVB|`$aH$vB>R!iaOgClpr2>ZSj9>rZ+)MyH%C-tRDn=~qQ;I{!sSDw$k*C|mpi5w*EkgZLz>wQFNURDpQm|G_O|%jk z`ni(xa#ltK_2n#t5E!m8C%$OdISA-#t$zCh>@Nh~(S1g){r>iKdkudYtBm3N|Ig!v0J6}}3e?KIsw*lTREwrQz7TXcS#cL@Gb@rCb`F?<^IBQs7b=H{Q9wnzD( z5JyOV_E1L~6}}cLbYXv9J`j^UbYomqCH`$_z>qpmb=O844F&LBy{nA=>yb%&Rrp8p z_JHwW6!o|tjH)8h9A0Y;40Op&DJH>SS(>^rjM}bwj{iCLL6SjIu=Vd!gDU*_^pK9A8^#|z0S!072vN1x)MBz67wpL-k06-8MP{sw=@JLPt1BBI~{p!Y4N zQp7o%D`njqvR6)jIIp^*(}O!<-q<AyL6nj$zTgM!hdAp9PDxLwJKqAXk&p_{%c7a^H{iQGVP%~x1t6@V2 zNhu~R1L$XUF``WU4eUgIV154P1Muy3In?1RZsN564X9@>Wh4*wSI)7m@~t4ygp35*sQkKTWPZdYh9jKRV5V~~7*ci8(t zWE_B4$(v5hL=w>zIMDy8MJ7ZvR_)Bls^Q2u&*og=6FLTTawB$kUkwys(Sd;@(yuKrky-emMG;W8sR)9~|LIWj3i>3Qs|78}uZ^C@qI6a3;# z;98HBcN%{hpkWm9f&w6V1FBDyd|?$vj4AZ2yGgB?L!Wm`CAlOe_kZd%K_BQ#QgeS0 zMh>hCjxVLL)~=~@sa=P|Z{^U}*{c~+y)|4xpdAJ*zLSjg>Oo-N$-<5S!)`q}3w6k- zTjEU_&WG^9B^p)Z6E9k!sa|(%soy|%)we=~8<~F%WvZ7THL=3R6hIbAI-m58ot`~* z=6ZP;n?Q3T7Fdxgho};~7Q3L)@aO<)&3%i<@bXP9cSk^nk=^f5)(}8c zLmKo0#4Uwy=XX|a2D7Jr-8h)bO;DhCngIEnPkyv<+AXBpU(gGgl<@pO?7{) z(mco)Da#0Ix=ATL{Oe#R^NWk-uro_CZX4sg@D8Q{Tu$paSBT3*=yxiv6{bz+{mp1p>LAF!tDs|< z!EPLBmv)Zu^=G66K#(nw8yq2b|DM z8V>2~mKg2GP1+m|ffAbU0BY^$9^0P?>pwZbKIeCHJ(3j32!TowLD@#?+gj}F6|oWt;=jp-6b|32t$3~+?!e>SW)})t}SCR8O}H~pjNJ?{Q8<7 zqqvfFyNUr=uYAwjd@RbGlOn`G-69=R81c9fw76cQ!~9{@dv-aq+i^gaNqMp5gM zGhB$xTYQY0&_pW8yB%+)&31p4cUokJ;b6w=LN*Tjfqs&&A0NPZ`o3lKCBia4>ekP> z+k%2kCUOy+U4|}mKL95>0`$4M7AYw<+I|QM!?lp~l$5FIF{STC)69D2pzUv(Mcy)* z&984l*O6e8229^MUO4#Yf#elAw(TZb+*^2)l+BUG!8~z3y&HpjmMVXGs1tW)(BY-T;b#Kp_rJWh#l?1^~wJcvv$r$AUgKoiD z6I%1lwke^{^uD6fy!i;#zF@xGi&D=*E*j94sBN2%<*IUj=92^JjA-k{64BypzscmG zcX~*v-4}XIM+-~}L)3pR4|uRu-UM)dsY(M@QJ9~3mzX@cQaD=Or59mN%A%LbelKap zy+)(c8h|dVidnJ4|1Rf#d4bYb1s&68b7s&@k$6^}fos*Mp&bR{HAF25}{zHHCid?BWx%)qFk{ZTu zKek@rWZ^C+XO~VZET6;=O56|s3{N@3{Qu25x87V?$~+zeuED1No*7(jJrf!~HC&sI zkbfu}Yn}dHoJ-9|>GlAf09HV$zdugH^-Q?3l0P#S`Z?r;=Wwb@-abu}BrCkVGykn7 zKJ@EphZ>?D^(0%v^)c*!4kiN&&}mWjO~ICM;2H6eJbg&p&A9Qp?%8dXH}U)FNwP*R z=wEf(OvZ{_Z8vA1jK^Oh8q%T*lHfCFf={KC6m)||Ue5PzS-7)*{S=1%8hmWmRx6t3 zBl5F}+Di6_deNzURu*RWKU+gDz}9aLgXvPVXkY->t_29W%G`K%|v%-dwoL7 zgL%JRK}v*$5_H~)nH*0_StWxQmtp15Wv|<}oP_&t@VORh$mnVYsk-+}43Wk|Bo*f$ z7G$7)k)ISuS1K@nCoQpe411AQb;)h{J~~IqEkP(jBHBy+P_GKQo-5+;JKCS}K<(UN z_LkFw`Sn`aV?|9QFav)lS+>&DLC1Hk-H#af=EwbrTqi)$ItK_UhW*Q0uMj%W@HZzR z31y|UGWR-IcfMb!?Tu{O4)mC;!ugv$+`^4tL@hs5NpMtuogk|DAYP0@#BI}?IfwU8 z+Y_Ga+_qJlC)?1Nd1EaBFks%4|w zjw&u3#{{YEgmgT1`n?m3!uzq84D@(6b2Q*vxQ{L>&BGjNe*U=hT{#T|iJBVoN0g<^ zOp~onQ77obUMk&LZRu>l)irCJ?|k#5I>8K4ng~vRAw)*@6u!R88u}^J*yO}?6@33; z^f&8o_rTF{Ge9&au;4T^M#!pzSez=wh^sJr)wFVXY;?^H5Bi$gnM-@f*=2mZP(}_p z?`f_8TmNbSrZ(3-b|7SIq+MEYgxAO zK2AA*$jM3(G-?Nv;2Ze>U5U`fGSTi%X9cu0UL^b~utFai3JDdkZ; z9?={{3C|Te82wYJW^D<*QH&vpwB8e2f9S`5Wegavqvr95Zy|Y}yPy0^vNfWM zlkgz)E`$~Y?u#VzV}C9kO!wk?3c8yd(>abR&tjcPMIqichC-uzXTB zD$9YqNQ3FfxmI3%(&!!B0UN0Nq`mIouV5M`ULKz}nJJ(T0x)NR&DAQ@bJjEys_Ja; zqZ1d`;+=!*l5yuvPEZPl`dvbHD&j_eN(2$LOTqaW3_v-_N9DvH#oTe|;#8|~-G7u+ ziIX}Sl0W4AYzD)GL9e!Kl9;tGxy6B{D05vBllPVxLGpK%>@d=Nn=8&7ruR}E{&ISP zzXe{Js!;mOdSd$uXyqzryh;;y$N9^F@irsZ8P#RM*-wB0j|$s*03ztVMc@5@VBhm* zI_oPj$^vVF{dR*7SNA*zY>eR13uuBd1G1<3O_;YYM5LMU0nyXQd`!ToV@1>*dQ%bn z`Ce%kSmKf2@`$?ftxhv}wS+(u887IrfrBW`N>jLGgp}IMhyXcb`(l06wSPmexdDPA ze52rozn>wiNVH()IgIFWW@a*fn}JeIZypwth*qpRd}zjepgUIGW!F||#;yuSqX1|-tRm|);V^pCMf znfYkkr?qK~#esporly?RvEGTT{K5Z)_sl>?xzs#Q-r9)DuG9UHJoT(e;v|ZHzY(y) z-2&_*ccisR0Nis}mBa$7di84-il#^Afz zVVL{62sI)mTP2gOWfuWY&k5+AA(5==_&a-YKM1`7|3Y>qOe+n4s(1Y&m$@+MLW^fl zfjGH~!!AKq?@1Nuk=8YkcH{$gg2J~w`MQ;gaG}*Y7kIx^=Okco_AkWPPn4L~8-q^q zmH$mD#K)L(3*x(uc?*(Uq*=1n8_oD9Pd(bO*k^#WjdAEw^2S0cqQ<#Eti6!=8%Q}H zyf37iOktMK>xv!(1jXdoG z^4ld6jU{)#^`femDNmyyD&GPq?CcXNi%5~NbOfd^NtC-TCV?x@fcMvS;mBvrU7@AM zirgQFX#~Z8%Ua0j|H5UVdO+`!NJ;d63%nqQ0xJeM&zYz4Q{Y>&lW?=A*-D_1;fz;< z-BT}xV7GkA(kB-$47d&vfzChvHOD8G_qB+1M$UT~Y5l#j8Ysr0E&b8gsoCqGs}%k1 zq<~V=egr0;Hv*|VgEYk6&wb~dxL9)^yDB36hE$_}A|_#$!V20F{klb3Fn|we%Cosb z-pKEER5wKvYoyW35jqLaH$dJ7cg5}w#mfXedEcEYiBv2{53FIMJ_vh<_(`yV4x@{v z^-C|HF?^5**3tocNNo>=SIU$3`j~;^bX1jzD@kB&8}j|uFrFQm$v&tQfd|F zX7A&F0eX^A1^$3Uz@E8vz7Tw|o_T#R#vS`NV{Jb3UEBnfo0kIU~FMCh0YCGj$l&2|D~eS&BFB%%?^u`tw`Y{e@=}=QA%1Ih+b9^a zO@pc^dtG96YyM5n-BHjjDJi7AFj*}-+XE@eRN>%^v_h@#@e3xHL6w9h5~yU3G?cSZTcr3Q=MtLZ^zOziV8Kqr3pw~LI;GaH! z{)~^~oWtlL(XB*_3E^b_43R6A)~n%$Yk-mGNrBSPT@Mv94W##E84i&%1`auC_#ZoX za6c{2`CXBV1b8loLl@ zIHy`u0Qm;h8s1Mhk6_CiW}*fIfsyfCl&7phA&(h!jA+7Makcqp07T)V6LzpS*) zPE>$i>riv(RyHcT4#98^dx11<)=lZ8>ylFM zZ(M+N@u2D-yh5Vh)lL6{jyExapBUQ)R0-!96=#}Bucky@5FA&#v{A+)V zJHZk z0`0tV12hCqZwdqj5rcJC9(xZt5i#ggJ;dYIUuWqf;X19rj}yqBRzntlU)6Q|KYvo* zIOuKT-0TUymnFYFiXr;mSHR!EgC9KQ_Zr4Ffj=_!qAlHHnQbl?l|i+4AR!<{>U( z{*^c$CCRjIKPRjK{pI@cJ8Cn`5J^8(s`d@-L?=O-RCRt5x-Nu&T`Jwsq7BYCh@kSe z8%()A$@0!hCW;csj1o40ujk_{h~Y%#P$!XFytL)}bMsjuF1FiXy2S(f=tH=RgocoE z*o<77kzuLm`H#4H*Wkw?5>@s>V#4nYW|OR*5-|lW8)E1CZYTn-Fd!3=kCFJxz2UK` z$Hi04)FVPOL?5Sr=x%*~>Y9rP6zB&?WQk7kM|#GL&rogF=yk^P6Vt8c5`3c4uaeOA z`}%Bl9^`WatUsAEQS`cU{DdgRz%OPB#GV44I&d6AHAlth5#cBPX4)0!bm7UVe~_ zNfF;8f}vk?=t?MkAl_h%@gIv2^?CN9Tsw-tX=pZNr}0_1N>#Qjoc?4(?h4F5On>c-{DZ*Iw9lQr zYoNivzGwMC1w_$h5nqk|u-O*vhU0O<`@878bm&ZD?6KjKFzoEZ0s0e*H4T3$m|F4< zN*A{p;}}UEulOhEEE?B+o!1uaEwTF^a%2b3X>Rs^Lc$;|8T(TgfI`zEO3ODazBVXc z^tXi@B9e5M{I_Xby7MO;*=ZZ}j_W$;p&6b$cj@S&G)C*dI6ulMx*ZhCO8)&KFSaQg zZUSe!VT52CYjCXay~C;|Q(fSE65Ht3E}r9MdQTBimHiW5M%LBqJNlEO+~Twq7c%I` z-f{tdDbpwI8|U@&T;`NP2xa5RvavA&Uc~D^wZRG*=gW;HGseri?A#j22zpKnOCVf_ z{QtY`j9{7Auop)>yx~%eY7H2)9VHR5GZT3+=zpi6Bl0v~ri@{ET_1VK@_@&slN;ny zEVX5FoB@xU5|@26idsJcRK#}r}R=qFXdmg(=vuJoS{E}+|h^S*pFa?cnn>a}ifdwyT%C-9?`n9%YaVSsp1WnsBsQ1T_r3Pd--w^cBX+JqEg^CF-Ggw%HbA*KeO^tLUHCe*XDC{8!*!217zPxoZU<^_-W#7YHJ)qP^g~PO@I) zzy3Vd>IE#D{7}PHE3VJiY2&1SVaA8wp8q}W}HL0#q5HvJ(A}Xa(_#&CdP#Oe(@Z0;&j8RwCGYndHn zLS)be*Hb?L@5dYmL0%89#Qw9EV2E3M6%c7|_s37P^t6N{*<-cpQ0TmPkzQoGxED(P z`)f@_0CeeGXrFvzGX(VASzUX_vlfEYAH86CZsKostYT62L@oY*%V6#AXqNroJSOTc zvWz!_fc)Pf*J4Bk2EmZ2Bq3VPCcj-oM2H0WDyNGaOM-|%PwZ~Q_FY-@&W`Y_d!Qrl z8K$3FHOiv>FeN{4L7qT{`9==kZ+O_PfWMAPTg-87pgRv3sdyZOWP7`pbim6$_hj}p z9*i8=H&O(*>qm)yRIqD<&Pj+oOUuS8@9J)(r3F1zL6bl!zu3S`J=(G7i$X)y)P54x z&E!|?74Ck0x1|1j`U05gK4wVz91fAEI)rWsDIY~itxaGk>71)Al1F_*0KHf$)CF!M zTv+VS`A?ou)v)JXgwL4mx~h&A6*N6m@#riO;GDh`NH4%2DdZ zX~YCLSToeRxqaRqMf(o2v^|lLr>aD1!Vg8jyJ#dA7eOaO(!99+_ni2N`rpWtrgk() zE59l)XTr%lds<}eK$Fd()GbSv+gBueU!PcUdZR0UNC5&LR}n##f!*_3c>TrFqX>0- z?@YgIT$`8B!4yhh!R9x00;{zZ z&Uw~<8XAD^H*kPNKZ!nCoKBEmb*w7DM1*Xe6l(AhAQwK2%ogJSpEdFSK>+G+aYR_i zb%J5v8^+#jR{i%=@hkyk;&fRSMJmw4W*TAh$D+5Qg>#>p@N(4L{uu4wP-|20lSyN~ zK4&1<%A_I*cc1(=Oy_2QP9f)@ z48Xy4e&fC?I-z5hRYtkxv@RQ?A{YMlu=9H~4lxelKlG-qyb^9M)CTDocQg=v{r8W^$jAALUU;WLeTgu>~gXRKg19QUhwD}60$pd z1)15AKZv3QrTRgqaoaAIvjL98s_%bS+PFl%d83`=paVT(a$ILpJSW2aSFQ1XGH))n>e^pd$_{^Dr;7UK*@G^tYSxaRM%e6o z&()mi$q9me&@UHVnn_ZgF5LYlCEEuil>dPBgF2C>CDiz)gz^nT1W2AVHF)NH-)^PqL4!>muy5qWt6YcK+4Q@iaG*_aPOGq!#WG@&2`Vs+uE&7dB zL4vyP=FNSg;-&zH#jP4j&9V78;QBgONepp8WtzKnK|`~6fl-Y)&xNd zIyE5k4GGCRx8@)e{iR7UbjjQ4EcaIyi3`D)o$cfr>N-aKk9te>?Z-!|CVK6jD>`6Z z>L)_lJ}IIhrBd{@6&Vl529jide~yFmyogAM?hWV-8dYPezzr+(p#nfBv2gxAB`#`h z#s8fXD>-329g^>U_97X1$t3Ul+x4W5^sIU>5m45%$Wu;Rg=G=dqA-}6t@W#P@-ITs zFWKyK@`5{J&@CyI*;PU;Q!f%1@fz!5JH5H1S-3D{St#x_;k1x;?^&XM6qCY#00M^; zRVp$UvQ20$K*}%*)xEQ#e$P)XCp*3#R@Ot z73Bi6L17;u39}41tmNQ-a)R!KiTfkX{1O^E=by#HvP~{Od)5M@zi@FjEj%AfFXfKk2(IQqNzTL^*o3yX z#rjQyL;3YYa}t(?hsD>Y4N~KDSfWfofrDTO^uLq;*8^^M)G(Z|e!(|&i}=r=AZDi&Vorxxk#bL-5lh zS+7199`qHa73ju)4#idnDmiI6&1?YHb$Jb zpn}nQaVwV5h&u(c6d5otzYdwKej7~1@bE%V;o$q4m9G18mLK;6>*x_*m;iLx7;-Em z_uIQ&%Z0~(ITZ`)Zxc}h+@sj<}0LdeA91UWpu`->5+7R%(W#J9gBQ_8-W-HiuNN{>~C||D3~Vf?XH$#D`Eo=7oRTA* zfN|i)V)CWtds^!~7M6nzM5MlVE!NIUp_r?-IyOZKE$9}}g5sGAwsGVYYIt<{;06YC zU8)9uVk2ZCRv{tD$4`X~$3fB0oxJWp{nXb@-0Ag zP+=!n<&o=|jz|_*#8#@_hJl+#J_?Cp`*Ssaqhv>}t&e=5lwH=)7b*>Ur)GU2fVj0W zccsMlXD`;sHi#>T4arF$oajc^H?7C)iVfwTigoT-BXmo3v>b_wDJpQc(08(d2;l(! zaoInWD^~{%OK;0#0a!O{u@e!tfj(tc|C97fOV?`06&e;>G%L|difeR%qZ3bdq~n{D80ZAOEm#1 zVgZXoM}E1h&pH}AP1Ld=-Edrqd23I9EVi`-O9#+v9YVf}>sU2Oq7Z$(cYGuI{Qmk3J_tG@K##)GEi(|=7wHz8Z;w_`>`I5Sa@$+`g|YCb4I3EYkLuNbk2dRn zL0(6?xOQQUaX@e79cDy-OLYD-YUSaKX@*z()(MKXnLfYFK8{mT8|V{A2WNEIl0Qde zfb?U-ibl>8@=ocrePwk_nwy#$N?^8;Ygk*NB7R~P1z5wR2bmV2dQf7r`4I+*VD)Wz z{8Ok$`8Mj_f9CTi=Ba492n6VVWu75JW0}QN^ROsDr=^{sMv@A6+$rbe-1vH@>YSOIvJj_(fETOPRmwC1()4o@P%n1A9;?g<|BU`^N&DZ z+GxQWU>z(iYituB-|;56#>AFF&|(S^PI0Av(VVsH)urL@x*vKJ7U&Ouh##mEWdZ&z zw+W4NQX~UGtZ`ji*ISBd?J(a~n{_)cNd0=W+d*gA_Ig8;Z%;dJCz?hrSgmoLyAQ&A z;(cHV#M}}tUI(SRUN%PT%8;~^JB79J-*#jHJzSz$XW5ha3*G2iME~pa+fcCaY}P31 znSSHR{t(dZgB2q8Ey`zq)(EN7^Njc*ChG7+2&k^DWv+<0^?c&rmq`M|*=q7O!{GEu zNYwTW>)?Senz}EB!XwHAG)7L$?>Gg*(o&17gRtVh5Ag{J@t`y2&23uZ-)7cHL+bwI z&Z2D?X?V128Cdz@+lp+?^RQxSAFx+0j#U%$r(mws+Q9Yy1dyJ8dsPu4T_+jQ%1}8} zh9rl^;*{@Zwgw`pT#i#xL2nVwvy6ilHHef{IToNUU^&riRkiY&TwfrKmCgdoxLn*1 z5PsK=vGdD(3B+e@7(u%S9Lr(WrrFWO;P2nw<2pkB_~ax&Dl{s)KSEs9eX#@`QcR8# z#)+oO!i23g%1&N?T>7%yIm8+$dn?ucy@o;m2m!@ks!9WhEnN(oC zFpZP*Yx=^#C^bW%kiX>=Bb?Zcc0e<^h=AS{TbNb&opSnXKK_t+`l^Q=ZR)`JPDH*| z=S!UVe)}UIL}A3C!H;k_7a2*2H$c2lhhm$FE1V>M%1V(O=ep4az|cPa@LufS z^>)=7kpcZaPEMmJr&qWx3?F24Y%w&9khbd*5`q&pNv0CzSqw;h1eP6pMIf4fD0hqC zN8|?rhEM$?^2&Ga`34If_NGvU7ndbYc>3D3id!qxRVC@51LYVcYMD_f<(b9YAvPv% zE>lS5$vHQFWqFD>DTRgBurFG164XhXiR;W$K;}V*h7Q1>A^P?A=GHh^kd(=~hNvuc zlb$nl+>8XNbpmI566j{S3Wr>w@qY)bKMFVxE|M?N1Sn)7H)P^`tStT}Bm-9avV@pc z9BLK?Gn#ef1=LUXfSlO(BjvvnY1S!BsV7RwC=WRdnOaa4^{=+J&JOxQJ@@u`f6c;8no*{4|1 zaF5M@gEgS`S(1NO7M*)4*;Q?@3)~7YRc{P=W9=rHs$MU>E}Yr!ACF@_(l9DLY0rUj zG6Q`OXlz;A#<09^X!*lm&BLaw51`Z^?tLpCf#%ix_J_0<{MJHAY=eL18E4-W%s0F=3?^b2>R%gO!(f19l9m6Z*Cc;OjKgs{kLc=;TT4A5`EZgoM0B; z)pD0aHjm6s>pTa#4y2A@;Zbaml;@ZHdcJ)`LxI6ySJi&mjDWIWJu&{Ta*KB2vd}?) zyk-%G3vZ}WRSQADxmCPS)n$g8nvIxwPw{vw5?AFHOuA1~rjhdkUD3@|2=<;T-8mL`OnqG3B8E`mqTXa zo2s{6Og0{)TO29)dN<5v3j{4$fbj(`lEDh^I(~ea7?PP=t*0VtY&3EhReumf%2b0c z=vZb;C1uu=En73!3F8+_C%nMfw33ieG_H2oc8c?%r(rhQ@_{?M#k&qm5y5DGV>AhX zSX1ST)P1cm{6fKG;e>WgBQ=M}GVf)qnK2IK@qg$|4V&oFL-tWQGUV8Yb?hH*52`8V zS6{El7Ln2<=94Mba_sP|R7)x~qC+X(<9&1Q}U^Lkp8X1`*jv{_!9i8XjEuDGj{JdH=83jGqh zFS>lv;byi48YN7qLkz1pYE0(tP4!7R55Ejo9)Bpi$3c?HkxMCo4n=6C3!0OSX7BrV zV)a3kvC)m|1PLRHreqx+z&m9G?i|PSAJ}$ z(!HUBWsMnSb_F8!@26YQ=#d-2korpOG5*0z=ygL-R(5tSHfj;96|@W0Hx4? zEUmW`eUGRfO8X{3R5$g1#6_WO&Ylh|Jr|4kY4B`aP3hWss3#WJ1+m*K_7oNH8^bWx z7~vg8hU@7IjuqSp{A0RC3b-MIEbey(bZ_gfo)qySX!H%%~sb?-I z<8rCd9`s?a8s>DG!unm$TKV-CSh-#M>v<&YM)^5qf34Y)?65M}n>o)}SDaUPFH0;p z0SlETK)at})lo7E{f7K~hky-=y4qx~+|X*YWYmh}K`R?{@TFy7Uz=9E{dWnGJ%^)H zsI`U|CJJ)G1kF}|lK=aUeM1YSu7uYD)Mm{<+UfFbL8)Ot^2qI4XyFSgBvPDbCM*uc z=_#;zb!MADreN&yw+-}G@3b*&Dol4g&y8f5Da<&$&L#`P1%}#bR1XnX#ZtmSHS}kf zpf42Lp)V8)D%l@d|A5Fko!G18>oOMkIO=wGQM(flD(K^X`o@h(^os(A@;cC8u0L=x z6L#CB>*FO>!=aAOn6f5pziu=T%aE)pBe=Op1v+(cKTa_XyS9Yr>QboF0B6Lx3cDY3 zr`V_GbjV&qcl9r%uBD|v8gF92f{5Kg=Oj|xQXSD!9G#0Bhb9Ydwj$JYr}Jq0Y+33* zcYegT+raOCVtE%2B@?@(ASzfg02~g74-$jXJScMOuZMO^Xa$?X zkZuL9SW`sVJ3@}22ku7b$1>ChDFUj=;?(hiFS=NF2IKZf<(96>~GJEx_>_bn9Jm3qRb? zBvZna369eK)i4vbFAgv#3BZioEa>QMTQe$u3s@KJ&kVboyozk5S%$!;F^KlnLfGv& zTV^&O5lvQ+_t03M&dn)T>}Asu*o*y}^MnYOx_mV$eX1;|ki8lA`U*BTolRzMm&Eu4 z`jmNT)HqC(k2rlE%rEpXq%hyFL*dd%9B^Oe>mPX36&7mCUw-u}T^T*L`c;W=+z9A@ z#`BJJNJ?LGrk6d{3QR*2?iyNPlc?d8>N&!P(92{@-E;_l1AjBE3>{)L5KE(L=-tPVmGtGzmG=OKbGzP& zF_a21wm1T)+Z|@7avf9WNACVBSY`Xgk*SGl+q9ct`S1Oo@&{_rN1uXKh@NH7r-D4= z^+45cB$`7|K3VjtRK`Q|?QCZYgiNI)#9TI{v_rOdr{7`~?f{4tNe#9i6w^H_Hlq%`bL3FwH$roe!4dgDsqqq1^=b6=u(g z8D8?k_*0ffzU6c*2J^mmU@KuQqiU7|5`pBL(~rebO~q^jb11Kl!tl<<=2;hX`(Uw^ z2L6pCM^S2_7M5QW`P8gT@-#`iWlPrdSg;i*uF!=Unm z`)e;tHRuknKsEn96NU}p23~sQDidp4Y4Ato+UyXc?n2slX$*VeKxL2LS8IZm#ACK4 z_r&-Zz;h1Y&hf~QKKaF zSyAqC0OMUG0n%bbXsiH#E~nfQ)wLj|I4y*j7t3w3$oeTjd8JAnqnwtoWqkFk)4w3F zCti&`A#lCPu~X1V69M$PwA>pwntmBF13kI=d!OMJx_Yrwq9T4yRli9K$;!pocqFUA zBp6!)ar;mddTF_WFhJq*2)<%D+C7meiy+fy=(31SnT)fuAOt6Ws*Xb3%^&pJ30~ts z$y&}){VX>Y8arN90&^(WsbdtQ|CDw5jmE>ytR5__8>L?G9m$QX-1d+Nn9a(-3zh;4 zkj`Z0L`lzguasEN;p|R|%kHp($$14mDuDjv3oh&2F^9x3X@Xp+c)emIyLn&!b*?IPxVuQGgn^v1cX9VqWpj4tK;T0(lU-}A|&PEf-$ z4A4Lr13uoyJXn`I=JH*D*;LjY+)fIU=LB znDWD6U%)9aILK5Pmi9IGU;opEN~Bq#0Ig%ui>GfS(0&$sz3(VP|3i?ZWi>AK``dC0 zr2MbrIbGK13nD7elYR>a{vlJZeXWr8->@uIL16L1Y|8WsRB?9~mW**`xVE{k;1h(% zTWp3W`%FxKPdM=d7(_)ZPr~=B&&w*?zX?dA;gRXX2AeXNw|m+(PqjXSepZ`e1s2}t z@0&|b{0Ojwo;uRwhEAL%kBOW-a7jM2k=V#8kf`h9Vt*F$P3b)l&<3pS59pabl2bQ} zi25&oY;lIWs7(_$b)i2kg_>vSf$m#u(`ID;O1ln!Ed)IjkV+S-gLEDd>#oRvsbEg- z^YQsTV#A$`wHL{;PFw=6ezzJ~3{d6!5j*!w)QB99uEVUQlylmZ7pZSN*3Hrnm#OXz z^a!OEQDF~6Bs-&LBNIBiew>?LC>r&l(_P{^0i(Z^mI2wzq-8jK#RsbgTyp+af&@mu z8R}wxHfNu{fFD%FdE^bTQ-Upeb#<8ul920d89NN!6?UZkZq=;@J3PPWOt%!dy> z$xCfq>PX~?3kPs**F2IrA<;<%VK2C-csN?U?=fDfEkD+uW@~f-52Ln!M&FE$buzMl z``OVO>5C@^HFUJdgJ+*Gi5;%->Cve z8%~MG&^$S9kWv1*)!k3bryK#uWH$?7`n#B*W^(R3`pJo4Cx+O@Jva)q)~Z=1qk-NO zo2T8K-n^+_IP=Na@}^q(igBW}^zw^;r?Xb1hS7G7^iL*Xs!zTpq7s`$Hphzq1qFb# zr~Zpe81@IwUvdE(8VHByk%T}7X)Pars71_|3rEm@V`Zreb?)gz=Lgj(m#@s0Y^*^$ zcOOMjsDRzz#s*JCzeMTG*@?(P*txJ<5)ExWfX+%q-2jE+Wt`9LIz}7dR49LcIz<@D zwtUzahc*fWdX7M#s5*sI&pLg$O>*5_vLF2=+r&5Lt?Jbv&nc|0{=fRMVC>b$;)ppm zhr`*;>NeN_(fss7qFS~5sp>2+iqIPYetKHSqODHLa9e#-uz1i{n0Pd=--;UdIkdnp zeE&zuk648)$v^F*){4Kh3>f);_i3d_IrwWS@(Z4#lV`igbj*SBusjA#Ry&SQX@9&a zm>XvrSTxlTQm>d=VdkG=S)eBiR);?Zl1d3l$-JZ=3;V4~3o3)dxv}f>g?EJLkZ-oZ zlshqY?y9(&Q=A)KDlQx}0Hc(*C{@H;P-!vnS=RBZ)M0ms7d89+#ImY?MI@rWpkpA# z^1)Kx&CcmH(ByqeVHjd;gtqiQXS-Sd&FXL~n*2c8Rbq+?V{6A*bEBq&{9SDh{DBOl z(^5!28e0WNW;4v{kfGiRkiwoYe^Sn@`Bw*eyFy}z=?D&FvKXnph|XdKOK^%GEcuJ9 z|CjkC2CZwQ+qRDZ6&R&5JaFthF@^*@M9N-cRnrY4~eYf z)O4>af+OfbB%$ttE*Bw64*cx#Wsq1Gm4w~<3^z;;rIc6g${EYD@4nV;=&=$HmV++4 zwsTj>bbwSmze@8V2aNL#c4`X;xKWbMHMTCU9jxBpUk1U)pw|$8hizvVYaTx6xdztM z1j&YWc*qk@%p?^Ve|PhxvnNvgc~&|QAW%hM>3aY5iC#UG9l(Fwc3qpu@=Y0eXg-Q< zpz5w~aXIAWL__)rOGP(x0Uf;^b6cSPHPZ&RR~-CDav@}wpvk+a#^$4q@8o4SKGm&1 zc;hVii!Tq|5Xx_VA#aNdzbSHLbf%4^ zJGn=T~HfXQ|c7jzLC8x8td9btnvcMvmw$li5**Oj~mL7jN&e28hc zv#kEe9Tw=?%8=g3>_Cff(Fto<YTjjxWGt@U`(K;nOv|y9O zwD&5WpwC=GMg@C2>hAZ1LOdx!4uN6_$%5oFg*#zGfF z*erAih0NFa27UWW7yTYY!p8#o@7mLn-v@7g@{Nu<%Mk-#UbI91X!o#XH#e~yyNYR7 zd9BO_>+M8*Vx?FQ%Ywe9Myz?o$7!K_b)bRMN~T4BEV9_ZY$w%2wLKRe@jU^vouK_h z`s4^FIu~`1e|Iu3x&Q_PV?3iJL(HUpc?(a`RlQf)MD{b+ON{)IZNDWV2K|z#ji;Hg z$}iJk-?W1H&195Vva?HH;^;=#k8#vL;kOt-Kdlf4+nNRns-9lfoj{@pttptSw0gU7xV)OM0eeY7b@!}krR z5Mm51ct`M<4A+$fu$0WCNq-ISPrR%P#~70c%hMUwI6^C7;34ViBV2Dt^@ zNWwr7@w;;Ucr6Kow>;E-gCy1a)J(wVG(f#YD;$V;?gaXj*^1=TRkwDn?DfjHe1vhp zdte)o9%FOI`hkssrIvFUfES$-WtbX-7w)&!8TA?jd|7Z#5IPd`@(=dNk7aBwK>xFU zNg;YS{x={aIzS|e2J{2ui?YNZbv&wlIJ-R!2JLmZXUFR-d9&R-<&cx;53OTU<{lHc zVQ%+dLK$#q5_57u*oDtGxgaUp!QF%(;uqQ~9-LgBG9CIhd@BMaTKAx%w-t}{Jyb}_ zDTHbn+=p>GI;7Hw6FUPx$-GV4l)|om{$ff_Lbf>b)>Dc7z^=<-H z&;ho}{p3~N$dE{XxQb+TB6F<;k}ARaMXW)+Cqlfk7OJT5mCV6(B=S1;TpI{3M{1zI)#R@KsLM{tx>pFA^UR{|l9gsV@sQjc>PvQk<01+bP9plbwwy1h=e>~naf zx;PuHz{ArC115uiFt&NC3DVZfM}-=aB`gTr;>C7i_VZRm8#AW>7L>yH4_Z6V^nDiQ^yD4YJIK+x=!w^~Sz+&wY4>uuX zHv^A;-9Fi8uw(Ap$-v5gC2j%Ghhca}jMB_=W5rO8j_PIIG5OS)KBN`RMgE4Wr#6EA za(PSKKxLeV#%0JJn+c0bwX*623BMafgaO6buQNx~qAbaC{9zp9oa@!?i!OR}>|O;twL3 zNOqoAynqPvL3qi3qTJAC0dBnYS>P|Mw^)w2SST=7EF`ogb1L(70O5B^wVnYH6=M2`u*g<+ z<{L#c%)4JGwShQyPVtAJD_;W7jVu)6#_9w4#sloeu7%xyev|8}b>EfPv0N8;J5j$H zPQK)yr0|$RP(Pr;rPVV4{hVAXnM*^#Ji@~M!&QC}5?~9*9zm6gym)6>9BH5{$}75M zpmtpeQM>zceuP*9ei-gs@psUual%r69OaljvwAcb9t=YgX$=<}deT@#g#lQ)Df+$y zqULCr+S;psU2J7`wZgJGduXYKs#{Us2uRS$kb3m%<&BAKL6t9h_nydV69#UM$ns?I zDx5kDhcAEq{{~|PZM|MtI(43_7dMMb0B$qSaNof8FS*X-W0dfH*jWy(hAA$M5q>am zRYK{2Zo-LrYEaeNLiYE!;NpbN&f)(umKT-(1DRoeDZcUbVkF-Vb$G)Ye)9gVE5lv0 zZ}IFHAj2lvOs%xy`F$)h+0viNmeQ5;dfl1bxuz@e$jJ+ORz%`0Wv9wH{!!$fyp%Y~ zc=&x6Ny(N66}+qQBT zyO%t!t1S@%KCL&P?QF$x^wisDErhQvK$k~)w*0DU)Jvj&kL|-tNpXQ*V9{Q!oS77Qst-Rmd9BW0KFHS+ zmC=b;B_u0;$%O_J^rh{uaB8_}_zar4tXCJ`3~iw_WrT=U1tB<`W^253t@$8|xrLe9 zH$*7yb?gDex=UaPO(nL&7~>`;_U~W1$U%o9xad-9IFoztrSy0 z*JUPNXr~+iTR^10HTit0^0Pln)v8)xaa)Vh zTJL8IV4zHvCN#KxO#&$kr9S#&`CZ+S7i!8IPMV5N{4Y1?sq7K_F(b2Q?orKWe^ssB zvGp2urYf<1A&QDu4)S2IP*@1!Lj*4b&AFo)!lh+Q`K>i{S^6T>| zQN!1TmyV|8Q`ty>hVkmG)jil0J$@2&1zT}Ws3N(OM2`p)Pk<1e4LV)ke@^ohOVyQr zBL`PtGZA$DQcs%I`@Q!s}&bjK{UN2drWaBpE%;z`IN@ z0Rbi2v@}LZruA%lev(Q@eZ*lht1?vOpj9j&9mS)JN)+Pa+3>B0eYqnvXiVmTFzH~&T$km|&%ktbB__ULZ)N2>jEcTSstna>? z3D|Db91z*;dBOIeA0XLyB5s{5TG%#p_$@%g@swj_KdyG!Yg<`Af6P|{LtpH2`UIxc zo&kD8|7Q#=l_Fro*g5Nh2kDf&LKwFUk?0Jf?Tde6p2v;(;&WKsAJAhtYMs`YQ?j0+ zCZB_+CahY?DoHkeqEAk3g@!7|euhurELokoar>_6xVQ=GQLdhQ0KVL~1wO@?-s#l( z(LR&n>ve3O15^5`f3^A^mhI71peGp>MxZc_GN`MM$#hL0=Qkc#EsVb*V6TYdQ1V1w z6Z-#3c-dr^g+D{N{)}7jf+%7DdTL5)YFETUKQyF~xv2`kd{UD1rBdI}EWrQbBTR$t z{we-W?`if8;>a=28g|bd+WYH<^4Ek>m`}IKF~wm;XKp@;e>r=D$mE{Vza)Qh-NF%p zwMvKn7vU1kJ!gAZ#%m~=6QMi=rM?ifpHh7#xYD3Qibd(fMYhfv)2PJ~*u(DVQtpnp z#Ps~IXe>%Ieg8^Tyny-5>ft?|1y4^8K)) zXOy#UkUW>NK-Px5-1dY;Y4_{;W*YPb3a#|T7z**zqg&OUBn^!%=%(1<1NUuzbNme> z+~J_ThL~_}6aV1tJQ|!| zuL%YGqpKXvvl07biE)V-*`Hz@0%$uvK7^hP4HRAWnt8O*4GyO#v1vqLm7LzpFJRGu?*2&~ zxzlF#e~rVEw9B>dGcJo?HzRR5+|o6;DW+MwAf_7eU-$nXW!IoxSGPuY?8c34yRmIM zjqRjy+BCLp+qTu%w(X=z<9i?B{7Yu6j~kWCjhtQMxSY{Q$pkeI^Z7pmxM95h2L*$mlXyT%VE+_GtO}j@ zf9zB`V6}cyw5OH({r+v$#1sPpks(_HFXa52@YIc{w|+M0k%l!{=qOy4BWnJ5McA?4 z&_7>uDVDj>30JvR_jfZ zJQdRk+6abKP`a3$${C=2Ex;w^G%((I!G&$R;Nf#5m*@49d#K^AVZ4}d1ReUMeaWxQ z+^nAqO2=0_fN0&g_^M=JgjF+=ne1H=j1D6hpfdt>;3%$CR!9ltr?({KOSMFp%V0vEylN-m z7NRwA0!iB`8V|Oi{VKYqmQRT(yRia*0TnxMn~rag$(B>##~Cg;Gfr{-(%U|2j5Z=2 z<~8Ug2dVIC&!;QlKD|+pmtD)Ff253QcaC+Z9#c+26Bhg7jD`~^>>IjB=emZw&?)ns z&PhOhoYS~I@`M{tnXbwmTmQf0k?}eHL>mfq=_hsKAz#oh2@M@+q|s=)mwFrRn!AHU zcQ-XKJ=SttN3Hl6O?VHy}5k4|n$y>KOuU zt2TpUL79)}MUy`mP;@!pp|t)J)CvoRCco`~D^^q`A`aW*KBs8>NhdPNA5>1mPk)B6f4SbyGZthY=`sI2kDK$zZU@;@nG7ZNGgs&U)k_}4j=1zC zNBqB?h&pITwwopz{i$*U2wRa;Bi})%A_7BW7~jLZXvZM;#i8649psI3X?0EoorDY+ zcv(%%8zDl~#ABA!`O((wq~biHwSmim6wc6|ah|N}-X(p-4Ew!*e|D^7mEf5;QH_+? zM4&Icf?elOj(j#7ramr-5weLjJm>AA#+H%cjyzUo8_rR*XDLMPD+_@8KqWD)Qol_@ z0EvP;^!U`PZdR}MkmGp5Ct<~&irKYrwm<|)FA@!O!(@2D>e_4{X2A=Q+hd%Y$ax|ai4jqRA6Kvu1?Zd40x)>I1*sBMF6g6*u8y@ZavF%Z zWia|#dpv)}1p}1M{AhWi%DDXqi*-Y>1FEBeS0gFco5o*he*{dy4aGrcZ|$2mcuyX+ zLNnw2P=}cb;?6GfJ_Wu-c(Lsd)ge)%{bTvNWmFu!9Oqf+SV4!R2SjW9EqKMzkYr%o z<8J};Gjj%TMKceg*OB$biB{P_SEAO%Y_CwA^7t|8)^po2K9{_yIP`HcqvS+B?$)48 z>pgv^G-Lgde{=e`G+ZHzWU7=3=-<;tK|UnyL5+Favp2vx;w0E0Ub#RKb|uA(wTB12 zzO+`zs4F@Z7U>H**K5%L;+wNAWt28m5G{s<{7>pM;g7`t9dZ^v|GKRHZxK;q+LS;w zv$XkZxH3uN&CgW@S5eUn4UGAW*6ac=m>0*rLeNzaf35F#c$%||SCa`(wwz>0oEc^e zld+U}T9~*STz{@aeoWA%9e9UbK>htyvr+EFs{oLkO8VM8=(q{aqB*J4m5=xQ2>0~$ zZ75Drim4+X06p%mDun`777UZ=6I~8Bay$wPFLL&4(6uFIi-Q=0>o0VKitLBcKz;Lv za&XQoe>xRCV&HC+?K-z3lspl#DNiQS4>oIU9Q&e~1aJP-k#4w25p>b7p=(NZ4VS~? zxOnEf)W@t-wI3b)Ktoi&{K{CYUM0RE!ZFrD?$!%@kfP9ZO(Yv&!t=F)IHPKlb#cpSAN+uDiU(Jx2-p=h1xtUaJ#5^1U><9#Z(e_+L51h|N3Z+*I5z;^8)B^c1@VvudE zu>(9Y@@TTYLOtJwwP~QDJ#+x+4Y{X_M)hI>PAiJ$8`78LD;ty72NqA6YnS%=&vif>e|+RGaj+%1h|*)N)0RBO>Z<1 z+z!as=(0f1p)Y88X|Gu5KUCj1u->%Qe+a^}WRApZq2O>UyJuX(z$9hy)&ue*xD={* zLJ^{Ugf@WWQX9EyUv+KRP93L!5<{)4Kd}*P;16{bH)(+NAqTAx2 zrj%bi`u8*SD^aOJc=C<_`k&!ZzIUUOm~JFZ6Iz#VWa3*QuNVb%;V*CL;7L88hgAF= z|2K)#N_*$3E2S|#QLr4h&$I6%f903TSenI4G6z*A#Net}X#(LadXy$5Px1)da-G8F zt9dSO?!r}dE3dR|$Bs@hVB%)r3gXHxCt2E}uL ze3tSeh{@dSyv50~;jWLYs^$?Ef2Sm@H_vqg zyG>1J1wBSOaPAxk@~G}|0Hcugk`bD~&~@x>!&w2ikiWH}8K{;#;f5Y`)eVa| z(T%&Uf6th*-kg$TY6sno>$j7}_!D2PAHR}y-zK(RpHSDid^9tOk53*fdumDEhsCxN zR)|Y~c}-hQ(HW(b`SlkbJ%&*$dee>H#EFF_~RwrRxZ zBr>D3gL3&=&&VP%>t=c}ReLrFb{;|dvD@Z+N=@NuD}sU!*Y|C|ve zQL}Qer_YRrhC=MUfBcd9!%vRGa#jFzS^p74yIL^_IHJu&Zx~dH6>#Xh_`bpOd(#qw z{J0$Smy51oL~=BVwTCBxo9jzpKmz4_mXKm5O`<^iRS}Xnk9=JSQzo_JY^BPvm4a zm02xXk$l7)4Txa}sqg=|*v$s?SWY-I9N~zgZhUgt z^|kyoLsbvx$)GcPiJVhG7{=i9e&3(&h4cI^e}c`nTV#^~yV0e}J|mPF1Q~EAZ%0}; zb#rldlusRJUm&?wr|VthR$721!}-^#PRl3XDH7I)HX1ZI?%;Pa(8tsoE7Z%SKUK`n zP`L$9!R)@FtncR{(6!JBQlv{iI;U3lJ>27s8U|aO8)QSBQh#d#>Hy4&wEpm;dmvds zfBma@P0@H|ctYAGvUBiiDVR6t1Y5y-Rz%lmKw>B}DNW9*rvRcNr+l=~(o|S|a2Cst zaceagr;7`hpT7c7c ziIgck3)FFI8)CojUu&&pQH!>7F5%FBW)I3+-h$5F@@72RV*J|Ptei3#xn(jw(BR_Y zea2JT`?!lmpHeG$j}({~ZA~VK8>1m^*F_`*5_cC|q3hCBA9<4W;|HjX=6|>Kf0|7b z|5&*>;~7%|y&I#(|(fI2o1CJQPBilNH(o$_$Tu%0Es|% zY1{VQLfht=e2?)`Wok!`27Up%e`;C5Jz2lVCE_=7gTlcBD=To%t|qIdU64^v0yH>N zTHq6+WJ8!lpTFljLXWpgK&Ri5C!6QwFGTRAbnie}(iw*WMON zDI2QoW1BZA;wZP}3c{hY$+0{yR!aoA$)Shl3M0yngpKIJJA2ApobpH3{pAV#vL6{}`&H+aT?u2%7hQfsQo$@x}_;sM;>O5vee~IVYGm9xBo)hua ziU{4(X8p``3tCAOefol#Tg#aQS}DwpHY# zkEZA|ih$zwX;60S%M|D}ORLMLs(-3st~|b5mZcqoYiJwJyIC=07h0-Y&kB=f5b=(B zQu;;0gn>N?OlU?Ye*mqWpZZ50bwgM=zpPq3kq9l6PDPOMcu&1@i=DU#Kv$yrN$iWF z)2G0_~%r-vp)=w`FO~Q-W_UR>+UtlWxgqZo<-m!8Q}Xn-|4ui z7!P_0U;(Y}YD@P3!7SIHe1I9YO#)d>M}f9FBt|YF{<=ycWlwJN(^?J3w^Jc%l9y5d z0>GS4AG>w`f7^wTkV4dGtxPidcPEXR0pjYjt7YpOH0WEHSon~#>9Xqm0RO^H@ruq& z2MxQ1PRFB?d@ND72<5h9jbe*!KAOO^gwxNk&F>B00V>p&DH}B*jV+i@hfHnFt&&fY ziW`%h#IWvX9E7i+`<*J#+LhAJ{X%luJQUSdU|aote+Y~EP$ACjIl4T`H(j5g-bEv( z;gw6dRWm{!F{ycgTtg$Q(Pg9#V@+q5ig4ZAgAw`(_8(AW&*1eF_Bza<(@p10T8Fdl zI#Kaw*%xTII9O|gEK~FKr`Nxh9H*Uf{QG9CWLZd*rdH!nr6~&I7=albTs9YM+@*%| z^%6vOe+p_2QOS8&*PoGYIN$yl5`s<}H{7zM@5{_C#=}s@fXy$r?6xoe5Ns^P$;|*G za4uZ_*2U-b(IWTPE_Vm<2@R5V4wy><6UnmujxDqH?*Cgjd-kS2M}(HE2WKMx@czpY z=vfgBy@5Z`Y@&}B8TxoTtnup3mkI-sbG>5wf9l|CU^)M?QVw`@i}ud)*1xQd?cqjY z1JCPf>3*m2_?HpGR5J^vf_37s{-Rqb9x<~G!7vG}pf9eWIC0kKT*1~FiBYRDWci|; zyRf8L{Caqf(=B{0Hw5->vuKJDbXdQ|4?G4wo5TaC>p5m$>dmHli;WIsPO!tfzhx5R zfB#8=qf0WYJYxfWOiiS_%0BU})Mtt)P{(eDjm<|*k*DINJ;0ooI}!2aKb}jMr+mEf z?9GmY((W1?o(DW$X5zQ~T%1!RtIAHR@YsEJJ!cr56~Yl7sgVC-fbMsS=rj5&dZD9m zHpW1w9TPDG9vHLtniw%>+{8t?oFn;~d!_5}eWejw{zA zEwzqvIt}$5;g6X=~GVcQb0bWx)z1N(wyip}TmI5jj& zIG6IlL@{sw_`%y?yi?R>%zrXXf3=jeR2cX=m;vHZdK%~mmecg;o1@8pk^NTXZj3dw zoCY5_D4=jqK_6ji|G=giNoR}{4P?@F~IpHb>L?qw}4GWc_%m9 z{-T0&ARFfjfY|P6Y90UYIvDA3*0w_Hr)&2YadmftyC!lKU$R%w+1m=Me`!xw$Zzu~ z8+`i7{cVmA58R3@+offdB2Y;i-tx(7iyprc*uC>R9dK;Part|IpRp48TAd^2;!sdT zqY7%mf(cA5kY^cJRR3rx5mP~zz~*~&7vbJ+MRbwj+!uJ&$#nAeQfJTOR*T(-rj*o+ zm=tdAjzjoh2$OhUAZ(kwI;oz)|vC&x#RA*;K=?Nh^aF4 zz%fS~aM}Q`XnhnjFG)`wZBuc&E|~?5=Q=1J<$TRO;cn#z;E#tT#|muA4Rp+xA^4Mt z`@`Q5iD@Jm^y|K2`=r~-pexFa+DXBfukvCO7$>W8P!+gze^@S*|KN{oi4%S3yH-pV zI8zOm{C138Sn0TB7r`3?#<1-%x?U0-DQ(}0TyWN5tp%sP?-%v$Xlh!{%B_Pg-Y>+I z#b`8u4(RgCMZ9q}Kj0;-{ORkPc}>RWvqY=Y;~(;H{Vn(j9T}w=jc-6R7z{vy5eSW) z-0><5d6g63e|`023g|r`(8|KF=SlT+N(Mb=&zEZ7$sFuP5$^*1Pl&JH`zL)+F%-@BtfJL0;bJZdFrweogb%gLa8CDZ^tc|RQRKnBA5Go(efmYm7E z)ukt=BxfZ1$4s?qOxakk^{K3_Cb?RNkV7nn_kJ0~eg~%2JU7tRwM3^C&e!3_?T@70 z5{A>ke{v2^8}fw92|@3;PNIMGpo5=3(V*bQA9@JRvuUdPcSgMR)*R%$JY%^r=ZN>` zQjgc2CpgY`6_UxF4uD>sHvRi*t3Y+zyNNuO4Z6`SY&$2eSditc->o6&YYSQMWQwsC1>TNm-~p|RRH zJsm(*caak=e>Tgn2kCo|?6)8L9Zzy5wDIgeADU{Sd_iaSLNdm?E68-$33>4sGaRSZ z3*Bjrtk@U)l5+96p2U}kcp3-|Rl_)sf7FyPza(HB_yIU-uTobv+_hAY;h3DVT>sYH z^@b6Xm=~N*hmuKxKrec%wD(CfBzEFjY-}#MF>5?j5* zL%PkWY5C|lk&&?gB!5**p*wuI%ro&BTX+a#?r?9t3jV>EKl3X-W8ew&!qx((f1s}9 zmm|geU40Go503xLIu|#gv7&@e!oUcV= z3`OMp%7y#69h3agq@az2%t_cDe;)vf?urvC@cbk;h&G2xO~F@tHRk{$NIl^p;S%Ku zIMBz`%Cns{VK=wV9WPAjhxdMS?rBS2)H1>WJRdF@ zz&FD9iIpDnT9r}1@OncTZg#z#xNI+JLfMOSsMf4SwsooCiO%xhy4Jn|lVK1xsTP3_7t*fL1nbX^Z5Tth z?aum3(89N`V4tA(!o(Z!wq%BtXpi5%bd}4qF?`@LVAy7OFddZrGq_TrX5Z11L;4qI z5aX>HW++>1ivvXCMwK@|e_0@MAiKTw(L_Pxm?W$~!n5+~b&)zrFn~Uo``esqPMfbM z`BJ|mndz$4v;FpXwZF5O%Vy*;tz&M3B4HC`nPu&j^gO8Nv9VoM2DsBvg(F?QEb8_S z^s>?q5~5ZxUJEkTrbkfgO+3ngZY?X9sww%U<2RY?O|1uToC=TGe|fMc>;@wJ>_n!f z$-}5T#juX)X1VB!HaTtR|5`c(hy`$(5)l0Pn$~luZu?#9H>NZqOHYaFEY55?)Y%#6 zA&&lQ{}!-yN2qVh9j}pl$iDx35I31st6$N1F4A_tG|Cw-iJ0k;yHLi7 zEW+Osr_9_X5wDOMe`tsi*-EakP3fc!cgV5{JRd^@{j4@47wQ6IF(do9o@zA2j9hJ} zj9OVgG0deR0&^onhH|&i_7P4A)h&ZziOdIYhXxEU#9vqO7dyh{KccNrZ4M3Pv<__N z2gW=OX{?YFKzCdR5shti?^rQoJ8;klUcCz~uU%ebBp!Wee_N>9(tnW~j?tR``})U$ z(7g>Y$rna374T>|18X3y{<(F~tA*e2gd<3VW0_y8aQuzBdKM}P^je2fo5PrER5kH7 zxb2tWzXSyz@fbC$14N6a9WmT>8+@%vb)-!6CZslM%?MP{gZk@$2Wo1K^pPvwO(P`o z_C8ip5_YUm`{|B(eW#1aM)u z;yeq0Ni6oZMJvJpm1G!j`#Rgei6r6gP5FzUpVf^tfp~=Vk+A`rHBel7bWH|yHWv*Q zDPMV!;GafMJPyE7!Q54TN7`%Cs+@K+J;SQf-HA@pwk<@h5RnD95~P zwFiCbCm{ZnDcGU6_a#_=xqd*jkNrE!C$>ur%z4i;_Kf}HAJAu~W}Gf06rMCe0_L7& z8l!z9XHRT*Ws+VzZ#ZnJ&1{)a-<3%+Db9qif1dhW&u!8p00fO!g0XIe(+F)Tf{Q{% zI;v5l&&kb1K0l4#+Ni=j=t|Uv3yjjwR@B^!0e2!pqzKsFRMlT7$nEA^x7j`ivjZsw zs43p}KDKiUUg3-<{bayU#^p*H@M!$`#{@%6^6IBylk33k*}U^q)Na+|An2b{a`j<% ze^kLAHT2_(kT@P{hl7-s5qs}MUZN$&=-D_In>ND30e!zl)Z+lHtFo-uT%e;_pkrDn zK44nDh0X|mrG}`VR;B8}p_(;S09MKjbg^LM$eiZ|>`YO9Zu-pcidr9a0&CJj~_o!H%`K1Gft3c9uXO} zY!(`H_fLUxSRRxR5#noMBkvv-nUpQ0S+)+?(NEEn=QtE*KZKBotE1!_MU#1qiI0@? zmRNx38d9J~b7zJVp9@Zu#MnN5Z{@x{IG==!M2!W;0rX_SAeO*8iJ4UNc;;kAe`U;| zZxLb()~I?0&=b|yE*3wM@Xi*KUX3M&(_E!(DvcUx6#>4&8(lVK-kO^g^+|6{dR(|4 z2rTMgx%&auA7N8Tx}b{%GiYH}jCQ_?S5DW4K0Q~d2nTnGrl7g2X%{1h6Ll;I>;xVx z?Fqi^*p*{v&K{`&*AcAobs<-Fe?hR&U)Gm>Fw?JDYA1Di^sCqEnQ|hwLC0Lk0DRlb z(ffLJ$%_qT92@zuY-4~i|pU-$j6?=wsNZJdC;lDMta%ArL*yq5Q-4$YWmh_rJ zkb9K~Rtm?cwI0n!(-Gi%v)POZ_XIlm;_~$@-NrB5q*}yLbI}DWw95pnf0h9o2>4TO zP73@zzr-jEzOFVI*z(h4+F7N|1eDE28s^Jw2quseT&D)reT9x~nSg6E`Ydg`j>8dC z&`DIX^e<>i^Xf+W)qd%j(*bicHCZx6nzCfgtsG=B6R zZ!_IQ3+6hkLd%Xpm{JPB=wtf3;Hrpr9S4sNFaj3&UmEt>pL(4P`~M~yuc27FT>jwf zk<(ANyTNJ90ey|@hxbuTf%_!Bz<59XmJg;ZGdkm}d2~`Ur%2ouf6X-AO&~6z3SImc z?4F@`pu35w8~An^tg(>s7145tfQjl?cwhM25UW^6ng+i`{zcpa=-I~p90fPA9-Hh_ zOnpCxDRzu#LeT5bH}e=cWclezzn~6^)@^E!p6k#k^TZ~LSEnohV14j{Ci{P24wer- zeH7pCjw_5Q(480ifBG#;Vuq7IFKlheh!0!u72jQ|aQR_qWe?9NTUgbf8FdiS_Wnxa*Beh)>L2s zN~6PzqP*_WCi^bf$OY%y`p#;B>MUY&pI4wzF$U;obsF^U-%;#lXA$*Iq|Zs-h0ibH z=*S9Cdg0N9JeAb7*WV^cv%&rRSBhw>)`?iufexsFG0&8*3|hMMgH5x!Z(L<*xeY}= zy->M+7C!Bue|u9y5ftv~J$CCSroR<5wY*Lx-a=pIKjNBHA(|-z@XBQs@;JX-o%{%_SwS@z^)jL#=-4$EkE9j?3 z4n~;R%lom_>(-Q$$t!E|uPre*!%F19(M1+=V1eA-SGzS7H2| z$hiViKiV5FxkJ-IEt!iG*GOfD-eYfcULTv%BkFk*?%g|c6r}T&a3AZ+h zS$5@$`a!*dazgDNm1`-BNHi)EE)VS!TnB(`f7!lr|IHmn+?iqBt;XjXUy(#2O#PAX zH_W$~rc}^%AazHOZvxm>23oRXWD;cv;1@2PD zKZm3>M5ZF4L>)+yEja~!aUJ{glFqz(=SNGtp3*SGZCuopg2SbU?Kg}9chD>CoBhEV z!9DHLEMK0Au64N-w*{!Y@u0~lsQ#{-f4U!DV^1Y10{ry?SO;hP7KXl8eF6RCO7Mqv zfoEG`dN0_)OeHZP|AEdiZ;`B%+ae@oXV}gdTzIi45riU9j4O*g;-Azs4%jU#sEFmN zfn8O47Pl{Fw+`X*aONx1mqn1Oj6`ykfDZOxFGDyR%Ge$5kVbFOt|$5uBpDsHf2{Md zr>ar+LObRL3<~uKBI>!Utdre-E=5QH|7J!WzZU3)o-IjS;7$e^W^pmx`WZxOnX}QH zZrp>;`UGhxfd@^9{9 zX@`+nOj_vgQm0WplCgZvM9;Ism&|zMIgWhJHOfwtasUl|f9@UoAES2|9~j?K8!FhL zd;bX_ZPC{kC~biNdd^-=J5`G1%`}PFliA-n-J_f>^4Qu~aP9f@dBHCPN=dQ~yd0x>GY?A^7`7 zZ`xCHDkd{b8&eRk??VRfv9ifoM4SjutO?*e`_cq%@+l9^5xGrKy8`=elSw_?PSdhvvK&2(lXp|?T1hP zZU!(aTS8r~6@5(WHEh6Oa`%;52T(C+C5QcvYj)}W72gJV0PRbts$S)$H@p&~r-?*E z4(JiX-zreMlmCspy}NSJwLCbxqH5dpMpF36lisx^CXt8D1SB_Ve?5iD+wj4!TLoF$ z0lLAgRfVykkYKOA3t8yxV2WrPpW!^1VLM5ca^dTsA0QzZ(r)8O>RaZIXwG_eVHF%j zq>K>&8%=BKF{^Qot|Ms)DPQL3g+CHMrs{D$r4QiU)630$g!f_fTSS}v!o6z|L1Ax( zcKaXIgpRbzJ?NBTe=3s`hC7p&f=gK*j%46yz-epaaKEldBNh}wTp~6ykr;S*Xq$md z2CfOKA=cz0BH%?}R4Q|+165#GW&xvz-P!ipJLmHDnuje~wZ>Q_47woH{jD-Z^}5|F zEBoK4!f0rEVcP+3UeMvT&7-D_-g8@(AhN(+XLqsTpR0dXe^YKFz~bSD;W9@p3x@g# zCy&gbXUD_zUD{%d;`?ia3#J(8M`oK9QIq#^b=v}WQY$#I@@-m5l=TwFt}zc@pQ|hd za%-7%&}82c*=dM{6}+%dd@k_UiWV6|htx>va*G<`i(eJxgWKQmCk~yTS`HW!E}fts znV$n|tHcY0e*+H#DWcb!cw-%6%o5+zOHpTTO4jn|t`z1pRDOLeKIQCj% zK0g;jF)uyk=(p`6O#}@mqUvYTOX5RWFIy3Z@BW7#a}4TX6}Vad37e$mr3(AirK>od zG%CZrbi}m24EpNp?N$$nnMc>5p>OmB7KIf+a3fkFe|)WsDNFKnn#uT#!St0M#VqXJ zJ0iQwhDs9X;{6cow;Az~yfBM+PPT$VPE9k?u-X*4c`34RD?#QC&HG@}i#8AUqQj>d zY$fje9yK77SZBbKBGhANe6K2S1`)%6qQb0OeRcjf{R=Yg575<3xnh$#yw1P+#CC7RF2ojKe zCcj%+V($Y3wc)HOkdW?wRe`?L#N673CNz`RDilyq(J`r_HFi03Ag)^?FK++EYV3(gV;HCthqk7gIive`b8sbrsQ3HUQ#>RH~QZYKzC}E%vxA} zJ;8M)wU~zklizix)5Jr`qygLbnTqnDhV_yc>6fgBDti!ii50C@`7I<5NF%D`7mSx` z@K)NIPucCJT4A(jG=$cjMWI}Hf9U=F0{SmjXipcuWA=!v!KS!W!gHaDu{gVy9r{_V zhbTczkMn|*{(Yqlo#L@iIoDLu_?iZYM0elu5SyJkhiKeHZL&-bXqEKG%4GZdInl<; zh)oANpeJ%x*bT!kbvsnYTY0FGFC^8pH3~F)s6;_A1K@&y-egQ9xHkq(Z?C&K{{x z7z6*(LuGj=dV4it3l8$Yi1id#!3Or=6PcZgnGx!ZWDjuBvlMQ(e*r932wsN7G@8!0 zI%OueqaQ#0@6o8N^iMuYnt{(3(2FdJAIP_J!3HromeMlf!XUmfI2f$)P4Y39{8X{C zQU3~Ntc!qjZ({sSd=@OzTb`H$;5)emu9xMy%9=v*7Y}*hMK5j&wOJmtk0n44 zB5CMiy6i02!PZ@fe@EzTqsl5-h9KnLrVcI~wW^PLcJiy@9Fwh}^L&u^9;|Gj!>9nJ zbALf0^eu|}VCGZweYDFRTT9je8D2E&HH-c{06qleP%pYd5-U zK=_h#Ng#+A+f-#1YHH)B+I72R0aMAS{Vqf41k_s_b1WyZe{ntkd^L`oj2sHkgd3Mi zYe5b?>bHCL;08U6RUaX8i^zB)26Odq@;<>%CMiJ~9txXo{57h&FQW^&$kwe9DI1$6 ziJ8Z{1U}*Q(uRD=}7IT?EBZB?ruK8elzN`WgyT|WpNvlOAng&$q? zl86?{cX>3df6*>Gcx2yVtY}x8Cw4wC;RgOO=751^I6wCAL`whw`bDxUit;?$rbb?v z(@FC0!}UqwwzH~(syt-sD;3aX&Y>L5JcLG-Qaw#_9=Ee)oOp?h`DrE4i{`w16Phv( zv#|uo5mKI=JaE2Zg1^7eVF59PVpv=%_ir*J6I`xTe{g1hIbdZ?EYu_uqWBe4R`@_4 zQ|;a1?-J7YSJ~G6Rz9}x*|pJq8$7RN?2$rvE!yM%PW;92)!}M0YTYf6s8v(42LM@n z6+%42HLNP)!L_8&UqQcnZ15_6?Zi`iK6_s1fqqsGPaB@ZJt__a*3WAwPXwkoltrxP z)8g1Xe<39m2f3DrW2+4fAW9@sJ$_3)*OWj9sw0B4UoF4*{ucLOtx%ik=Jbz3IMWsiIs*prTv5Vg4LD%=UmSN z-Vuw4al;bg!FbZMHy_ucg>6)(dTfIxhk%428F#jg1^#_SNj-e?U>M1Bn)p_!Kq8&& zxrZ2~RnWg2Q_M@L{vRlENAhL6D3nwHFQ?cx2Zw2ty%pM{SGF8FH$dohfMfKt$lPeUo@TL_0D!%2&2IJCd(r2#LX=Y z{W}_WQ;|#jy^$Br31CZ>5u5dOr*DyD{7W9yHzAm8B`>I3(QNHnyC1k&pj*pYGX^Gq z#d;pOt}mC2xwG<^ znVG)rv#|lX!eRKlJ9Vd_Inb_r;exrK2MXo)Bu}DdN4om{WZf3z#Tf&8#nu8}jA4-H zs1fq&*&2TEkW-t-^ouI-5`19u`)UcSTdh~U+5*h4G4E!avdrt9Y)x52R-(%GNhHJ( zhM*V3RqN+d_q7q;@sVp&e`7nezo5B(Vq>W)VK2|EqE~z(FWWp2&B!FLe7(C3!<)5_ zHwB1Gp_DPWe$!>M>5Gggq#Yo1-4uNKWnr)x3BxefgU*4J)e)Eca_k%L#R^AT_426S zL`a>{Qi%3INbmrr>LO#$@@`7`bd~nys_}^>VgN;eso!W9g@e`7e}nJ5x?K4d3h$Hz zQNDHS{5yk46@kAe=nJo_3)`S=vqeUJjqB;8e-YZ{u)0Pa;12cFt?D(zic9aHA9V;w;E=p7C96ifj--IT{Ha1^f~=OEH%-%hM;>wOJ54SM5q^O6+pa$6(X zXZLH@1ob&T93EQSe_uuSu0=kX5d%j(acQ9k^9?G7EpLc^2gId|2H@xK+r1-P(j;#k z-OVGh!Am9+LlX6RlJZTC_S83=BH?3zesu5?63M5j0y>Bh}!iRc;Dz~1v<8vnFMrsE`~_Vn*Tf-5)}h> zW*%ja`1%?j_ea1+so!L_iHnHioYUN+@|JNzEI=p93+X15&`Mi>mJeP~|9}(rOr~@9OyM1YdjtG-bBMlm=@={}L*g z)u6qWXz?2)N1nnK=73*PrbtKvRZZrJ?yW!i7=;_bwQV7 z1d>21ddlF9OaD4raWv3;sVI4ZW_?cc-yOE`ae*j&MOnQzs1NZrUvqv}cUbzY1xVV; z@?L)Ud_b6-?L=SZJ7y;s9H z><(xR7&ectc6h`h=o0MIzZK1XyxBw1AgnQd49lR#eDk?|Nu-<6nFkKa!)WUg*OzbE zt6dHg6aUZ>32rxk-PuA=oo*d}1ig@^I%#v7zkfBx2IbfE_wX{9dI)`RrKcrRa5oJM zlcbVF_AJ=q0_dq9SS{3J0Z+&RHNe@o8$_xMtDzVt;aFDZ2hjF)roplaBeu-wNUTiS4TL%{)zRHC9oD_64DBGz>Ati0xi;l%F{ z=zoR0Bla0w*oUsvskE$HrusriLhjVOscv(!%!_^kdT3Oo`F#l_W^g5@+dfC?4Gzh! zU0O|#i1Yvl@|RsR`O}DbFQb0Zb8GA}k#&{qZ#75&YJPlp?vFybOk(FVPAZk~!Si0} zRQ{7?7Keyz5kJuV$4!dY0v55_(_AGa&42h^1my9x=FV$hqWjR90u5-#8eDtm+x4>* zmjVQh)Ppbj@qYriChM%#`K{Qp8v+7V8z3L@O1*9_*HbD`owH9T}P0&66^NYv>6kbf9_ z?~b_(f5Z&gHSSRM8hBb7wmvRqfvzZTyP+HYeai2WXFZWXfo6d+jqdY2f~G$ipuXco zW{Y1qd9?m)@I&^?+?ffNOeduTuwXhO^Y#UKgQ-f9Y9AYyi#DNM>xTmi5&P5%_|`P& zaqtN^WH)_JIygSYVoR0=OyzBl`21lDWL9H2g&Ad zcAysnrtz?#CmthNHOF4)xq}Zr-{`4_(z(|KrWHkuvXCqGHJEVk*uj&e;~DVTR({$gVBlFRyG+a&*! zl)&i!e}d^oK?Oi8$&vTqmfn#(q2;y}>@wLn<-Ax)9*Uxw!X#vo$1i3ab1QEU{M-DvuF;Rf-9) za=s(iJrSG=m>*nPtCB9h>FZm>GM3w!5D83xC4lbK47$4%e1$kfj3HI$jW&_)HWph+ zDCaC0UY{d5-6he?)_;MNus2+4oY(OFBU$%Gm;-#~OsQCTlJuI*|FTn*t`a*GSWEN_ z9I+<(rWo6s-2yrgz({6*y4rrjS$=7#F0=T{WxvE!j*O5IetGh`SSG?!{U|v&p|ZANuX!2 z1y40g7jPDpWMVZYr}5?fF5~4Y^8fZTSCRi?XzJgc<-!T<7ag}crN7;HSU!)b3IP4& zkz0{goBcE-1xj@%@!vR3QuBH96~1m%*`_gB(8-rH>VIVCp1~-lo<&^NndRph!B5Us z%FF}3_TAImxGU}ecX=p*kLYS920_xYb5(LpfI4}Bwo^Ae<_5vzBDOEE#5nAuf#cEE zOW+=zsBeWTS>(wi&1GJ^aW-7F@c!RKY$#e=s$#*v0{PoMdxF- zqy1OU^0a?a`@(QqZ0f|Spx;gbKb~AsA0cVa6n`n6GFzSza@WvBEK+}ZIT0k>)gDd1 z5-%v}YgRUb5g*?m73eRN0`^OcnQYs}wqmt~PeV1F@+I;Cr*jSZf)ip{%PFOxr_M#z zAs$*9a!`WJ>{*Y84-NA$ZmTcn_Y!k9sU2myWC%nATwpar&cv{D&=!goEhU5IGW<+%2xP zmlv}sC5Zn6NP&QAb{Pv6RN_G}89LLS_vVIJRRwzTVyWgAR|=u)ZP-96rn~X&KN{c*8O>aM zc?7-4qN?&^0xgNs8M&Fk|L;ox!tW7rYrI0I1!xy^Na!D`X`ilzj3HEXu;;@2%O#68 z`hcBG9s8@onJHVUZe9v~x>$f1?pPT`V3yD1sJmUS(fWr`?k*} z1M6NeO#2O3ZuNyQ(htD3ffLotolV>?+h;uw~JEdl#8}a<@!-820lo z8Nx7J=F)>59Yp|<9vW!{K5rFgH6MOl_EE-&`?Qh6L$cw4RQS+42!Hf;g zhOHtRcda;t%w=sg-BLC$x{BZ-fo`;_X6T?&TJ=f1KptJg7fPnc3KNMVK3EeyEKzKb zmh^bg6fvbf=4n#Q&VMfB{`WpV1XRv{3UJl!3762e6%*{3QucOi5nOP#lK3Ld7(y}p z0{vRd^}fRqWz6&)2udF1Fl@~HwHI+>!`JhdKs)I2@U#>jx3&@D}OX%739(wj(=x#fw(jtTnl; z8qK?RcS2cRx;1Kjyq8k)G(ZEt!iwk@#q>{~dNly|T3yHo^;p|Be99307WQ>0majzR z>^q_k6IgY@^M6pHu0H-Oz&Xo0L=?PKX0>V+%(Y1I+MYdKy@IJSs;?wClHp!%4)bc# za-2?OI`G>BIDa+1|GD4@ZVvfQg;JJSDRJJJ9hW<(@?Jj-E5p11`l>#WN;A#%1eF@f zF(rlWaxDj3Xw;d_1ea_GaW@yHcV;mx>Kv(ZS^*P#yRD+#ug3}avxw20N&n6=W1_jy z>yVPd(^j%K%sJ-W2eojM+{yNAq+z77ybD;4{zemE=rQ%72-xt$vdSGkj_jLM|V;8Z=a0-djJ zo0Aqw@U~aV^ZZ$rSrjEp#}F3M5#6gX#>DMKx873Y$=&K={?X4osRlY9 z*AE8<-Z#)Sd*$NcmBkm0y!PT}&NGz$hb0?P{eM*c=IPMXyC%^;(h_iPP<;gzMy6rN zEqdsshiQO3z7I9)fft4W{T+kWavPK|${-9;CEg(Ul8E??V$eP2F=B(fz6C=Mp{SwS zFSTkedLO#5q3&On_bh;@(nw26(Fy#pBBThqEEECe$#XY^%P^}tnJ@|b z`n7H4rCZ_%IJ1$n?vbSBx{P13^TP*O-G8nndd%;X>-?`@vlgsHMX13kAZURldHS`; z!u=LX4(ON^Jkk4CIE0#@FT%K8FMNaD(xBf?33;C!Q5qUr^IS}v!#kS$LpuM;(Hb4@ z#-Z;wf6KP=LY|4~#qj)1d_Lnv9D^M81y*tjc&Z6&>k*PtV??<1`HI^mlDZ|8)NlqWK?x@Q;$B84kwaI)=vlMZYyzgJR)jxl#7>V=E(S6 zYwUvnGdQ$;L!J?zkeaCy6qQ7)&^oJz#(8b_LY{kCCOXgy)T1srm%LRijnd)3%}<-O z40;|RaLV9C#0m!3DTV*`6rsbVZhxjM7g9h?hS}0ks51jXdITal7vI>s%!^Zu@T?jf zl+%1XZ)?c?jPBD_!9f3xBhAWb%C2I~Tk-GYo_D&(1fs(z_qRzGKheI?_+Unkbb31G z|9?7zn^1U(!Kf3J1n}?AzM&mDTRY;kv_}1=HeX_9e>v-^658LM{!PyY1%LWk9W}{< zD*4u0?<;m>r#$0-O0q5-FfBMpdth_XmdE&WXn>w-mdQ?Nd6~!MvBMk_aFN|IJ(kxS zW5{XZmEM~96ZvJ^SRLD{-P?-|*WM9ykx||>mb&n_5X)wL6@ekBOp@j~TXHGD{lc?B*sR4rFEPMI5m{-K&JTaSgP6DQ%z$Ernyhr7|Q22D^pH>Qfr=!UFtVT^BbND z$3Q`q#|{5jk#rHY=)biEpOt8xDe#LE^0}6#E3D5ay>XD|Nv5hAQ07 zcZ-d}PE&jH1)h)B_=;5ljvkeN3MFxNli7s(?VU7dYbt!N)em zJD|Wa0!PzOlB`zdE4WZ8?+Gk`UQrun;B?bzqOoFH6YNWOOz<$u33N`Ll0zg5#m>6dsBg#6k0R1vN@YPt%# zhTiu;p2Pi?zM^^=M)9i2$@tT^nJ9STjz`vhiR~gzB!>k<1hxNX++1-%GH;26=3*7VRgy2vqyL1PKxhua&!B^AdwWw zKP?=aq+IrM(4)eOH$A?VS>Ic!0qBJq&gASxW!M%Rrhm%m6g-FK*#$LMf8q9WIEE+r zO5SOnJwvAVfm@r9W{a`$wcc#k(BzxcV_pVN=ZuPD`KYpWZDKvpL!%lhKVm4(l~r4P za|K}Bkbf5-Tok$na`-idg!c;5&q4TM-RVg6sT?hR>9dz5Ocw`)V&-K&vVXs9V`J5C zU}j^aSAU7R`8b4Sl?=_uRBwacBHAXVThnBm3QI3K(N$3?i}1JqrWjr@+bqud0VT?v z-}rMRwLljk`qlX?(4IuG92{Vmq%EDEpo|uJRC(uJf+H_xpLn^k zw5b33@H}nZF4yi=^fwWBE~a%H^;MIwKH|(oF;AwtP|qe5X3T^jET|8}_Jk z>Gbn`rqhFH9+>qf)3|I8d@vk)>``%@^mzqDM;CH+Ih89d&xj_ymF8g_?HkQ8c%u9I zO@G$RuG8y4e{hpPiPnkU3SNlM7)388&JG{jwtCmAZ4hkhZ5v>!_Q%>B;CxdwA`_#a+3e19Ci1JfiRJG8|Yun_i5*U)=Pu!kF_PWs;7 z<)2=3sc%Y{7a~*W@x%kV`=`VOUqw0^o_w9}*ecb3D@KBrPGIr$>DM1(#I6S$+s$}h zRBG2WxTVcaQ%6KO;vqmKPN;o&XctMuBe+;tIYwT8^h;ME=7YhD>mFDGl?CYQAAgmN zL@Q2Z@Vm+LIKJgQ=o8gUNiyw=Q(u>E>rTW8zi7>3{XoiEsqc)7Dv9UDOaKpCHH`hZ zbS}}FjI-GT#JE{|3SI(tua9O6GPn0X(62>*ccsYm!r6q#fW|1O4<0duSEZqD&jzHt zN^I5H>>XoI@7*MlXIY5IwzxVnuYXQ$AZK-rP|qc~4=L(yV9Lm7Yl1DK!P51faNFdt zX;vI`qP)(3DJ7mzko*UWCfx}x1_;i!jXF&S13U8UB&j3HMh?t)DGT6j^u z9}K7@*NmjewzHO*cOG|A0DZ+5j1pl^R6k%+HGfemhbkAq zZi>F`5|I>(*^4bs|I#NF+oScpw^(8+gld9sd#)Kf1rQwnp?OW6_EL~WWbTq@_4xXd zKCruEuAWl-RV;xRbgOqHJfGNi(Vuy`JNvl-Pw^uOyAhdu6BwNu~%eT{8(X>jq0xw!hWTS^Py#$Nyd0YI0SD{x`sx$?-jePfJ46Y1M zWu9e#$}ftVi>g$fLgd`biG@%Y{^6QEZ5rl-@obf+<_G8h>7z@wOTvdhs^w zMM#;w2txb=?Q1T5m?LINZLh-|+mRDwk&797M?I;bzWGTrCXFe-S}s?i^6 zL9fIs=@$2n=D#Ue8Cby_ui>9^z~ru=DyCo@ zfN%6!Q}|ZhT0$;4<>?%ZH|nX`$%=)+8fjWXr6vnB+SYtM_L%CnCCo||@s!3Oo4>Xb zw$W@zCg@J*U_JnSJ|=MPoI`1C=X++Rk>u%5xoIE0yhI8Skbg~!O<`w$MyCiYwqJWx zRwm_m#dhUHECQ4$Z>Ak_8sR3OkMQtsb|Er~zJEY=iq)4`qm`eUhJX%loh0hPYc9Z8 zD*?w|sA+Q{zY{~YoEUlWN&yq3H7E_ZZ$}aq0%i=bQDR2_1y`_)PFPBOj%=vZn`Y!#Zo~|+HY@6BY}kr4MkMLd~M6pT10QYbsa`9#?-GE zC&%p2(zS)N9Rjw=-fwA5awEWI#GwN(6j*k5bOZ&zSIS&aipSGjAGMTT8Qg9cLO1Bp z$M;GFA{@b(e)(crc9F(hQ?P1F@#G0v^d(cYs4PREMt_S<2S=*O=x>8%g#rx9BT!qn zVLw^rkD4Sf3Kt;$TUyv>bDH|YlYok6`RDXoF6jH?{nVr@+3A2Am_H{&hRpKn7INFy zU-w^im}8ALQ|`Ud)@Y}7nV@=SelI3c{gVC!{3;Mu;7?>q#ow1$#klDmt|Tkw!_Owy zslFXw8A6dgwoL|D=rHsoIACh6xHeNB`XcSTG1kAsueOA z_MEx!*kuF#eN&EEYOE2xUW~Y#ba5)HBXhEmsgZiw=T!80MWEyAaugoc467_*@MkF$ z{KJ<$XG-B5*A??Qj@2$d`CKTUipRy9dQp|)_H^7nzxxrWCD>b@ z6fh31D9b{zkmfDctXl)z@pW_3WYt&eB4juaDDxe;<7BSXMrru_r|iH=M;d{CWX{z= z34fZSKr04B@LUZAg~SB2@>O#Dl(~eG57E|hXLnBj;Z4)zDHN)3!hIUJga*J?@_Gd) z+R@pGl52Ly(j><3P4*T26O>}MKE}M#Y!npSV zKY6F1@ap$~keS{APmFRRibD8?g`CO?;D37=22Gx_P5&ILbP89(Ff}&aQ=+xY^~f2$ z8yM{6$v~tjJ#kB53fqw^9tn#O>*XP5bzi6xKj){U%V~WYO3XypH zw*3jnVF^~mG=CZvsx+hK5h`P$H?NE@SpGqy^o&6Lw+ebVdSiKfG13w4OxQTM&wp(? z3BL1}-a{uWwBBe8$6;9{#otRzKNc0+jDZ+mvKo~UvQ1M+bmU5uRe z#y)t(gbQ3sy@Tr0fWc1V5L9hI<$s?NGxno)xh2~(OLiZ-6iEopss`pN0Jeb1*^3a&K(@Kxp zz%FGjgK{HycRRQLx@O@^V5|Hnk+TAQ;Z--}03O3~^BLUrS2NU+SKc?{qJKs}K8rmE z&m10R?p7bor-k+ltqzq43ujQlmS_?1b0?u4k3aU;p)DJc{ojQT_9Mv^+$%u5czCUv zak2vRHSPkI#IN{ZO;7atc29oMfm~v;(5D1!pKPwF(-VPeBI`fT=rOCKbx^y1G~nk_ zmw{T8DkjeqdG6n~d|&V}(tn#x4Q4NSh^HVocjPSiwLupdMN8QmD*lDgEszK_9erAf zc$5piZsA_GVwpY619YiFbAHCZm|);DsPPHJF_-9+0c}^@CSL~@HMrYzRqqq@MwFeo zs9>>Fpkd!6g;wiVK)3VLCK=IQOo@`r^?6myy;36JHxm4Pa?tf}w154Fy7oo`Xjj0klJh{z6%35~ z<|wHZ@nHc%aH+iiQh&7+d;XZFzp*K4dp4ae(Q3jZ|t7mwm zA%#g2HfhB6%23vN+q;K04N0PJ9{?Fl_^N9`GlLCXBbEYy@|QMM6%(AK_f2X)jE0L7 zbWd9(g@*c599~O_G458VzLkz9&1@nV*de~+0;&vY)@p!!>3?!xD#JmmY}@;Yx#{`{ zKo;)7_Gnw?2c6CT^^qM{P=zt?$$bw=^Yy`JHkuQ3OG>fjK$_x9eB!ev26;0yp9wer zZv6pE6H%mkSgTRHx=r3AJ~$LHE+NSfL47budT`%@mePTnr`1k^cOis zr%f(*!4i&29e;E)UCnb*EH4F1*P~W{%$aU<`}^R{ka3%r^EWXv?bo(ZY=!bAoCPH5 zViqA|azrZ49$--Qn?99KOAIMbuzD7~rm>6n4l=h-j`S0hzgOSbL%kjH(zyLfQLF8OiIfX#kk;1B)K2_(CtrGLx^wFopSpkS%nk_+BF*gHjx z9L$a6YNq0<(R7YD2h64YwEzP2$p3s)v@duKbzofEL`BiBKsrrCh||BoR=S2QOn*8w z6*L*k`&8s)G+K9qHV3H9fkhxSLDMehYwPuK;`I`EtaBkf?;J{ictbSXwJs=CnOSK$1GbJdkv6 zoUf>ktZCaVK{LM~5LpDbDFw)_kOu73-&V1M-eaEPpC$@;o~6TDo}-1GVdrW{Lf{h`?<9WLdw@PBBwd<;u4eB4)Nz28$t*%~W`A zqlp&^xBox%;btfY?^#)&R)x71WDSLbp*H!3QRXUzXoY!9`X3U>9NLT+^GcW_cfNEH!NU2^Q zbt78_xE^zw$KCqI;7MxGC9t_UsB@i$z$~pullW$TCeDvU8E%iC22`VViq^TSBQKef zIgImEuk#RY+k!0p7 z9)7Ud`Tg;uU-q|y%}|%FCAE@84V9e(oy!cm3NG8CEJMc#*SmzXDYV~ZnM*N9BY&`e zYxo$ZajnHA5Xl+&j?IKb`^12|t@};~9N3h?b5y5;@8F+Eg~jQILF0gTlV4$p76x&e z%7o*94)(Tw6CchB|Fxv^L583#5XlC1$Q)X$wNB)SFTGN2lJM7<7x#{jag1=*S8K2J z^MLw8!2i1;2O_U(n~{m`*A=Eq`;nf^471Z>5>{ z78L%7;cNC6@NG>soQ5j0Kk~8osO>TJw7nIUu~s|}08hpP=|)>jSP2oECDHwIp2fkH zRcj_^DDNr(uJAz6xy%5|+}odlRNXBEJ*c_qOefMhY&VfKe`M0rtV7GedVh0slGH2C z32(LOE7Hn#v|Oiw6U;2cKgOQ4gy{W?5%I?<7{@#%qLy;})%OaHq&1*#VHz{VSJ!09 zO_P68-@d!IJi+}fU}*{^JO7FvQ!NYycwPGC?0CmT%c+9#NDHqJp3))eB>7PTrPV`JHr{Ow^#g|&mN`5 ze*k+nJqPg@9jzOoVe=cy5MEmAhPuNYp6IyFWo>=!OU%ax`ud1?gMTo`g8b-pUw`BP zhx30AP3%g0uw%f=`cJV?wq z7qyvl>Mc)ZQR+eSM`ZM%rwP{ow`^q9OjkvkV+_C}YnsMDc~~`ex)h?W{5y094CmlX zNjGOVqR|h?D*q3;K7UjX(GAk61$F5zx?_S>Mjy9?@1qj2aAICDw8XR7ZMe>Qi1$}H(Go`3TJF|Y)Ow3Y_>kx9Y+ zRj-tyvMf5v&lTJ5>fp-WR!94w?-uo-56!MD#%jkW+UU$R-s^=LnceKG_J?b8>)QAN zztAay2ZMj~Dzy>?A8vjv>k0+UC6IlWwq9Bs+d%GSqvU#3x~x2~c;N z%JIdQ6#$JD*qV(cvWuVjN?^a&kAdI?2El-0YOuUF_o#i@RM6?BjLcr{SxF18%h?03 zRWQK`q<`G6LXMw5pz5MEga5qrN}^kuc=4>a#PHm@6!mG-0)(M$8(S<+k{#o3MS&l&wBfg>d@Jk>#!L1(FUBS}joy-)e+D^9 zf_eJHL07O9Ho+8j(hZW+c>ha|xDR^SuN)4ZerQSHRn)b=$4E=!=2vA|uaq^;4&T_p zjVqi0*nqOBPlWGZ$XgA?aJKE?#nW5dgMYj0ebdwISI!o;Au^$e=#L-c1#E_MDO>3p2cZjEIDnpV6FI1wq}T`#7N4b(y^%LHIct3Trw5@u?K5bv0d86%SATKB z&!=rLz?14(S8cA`&lXry4i;$D(zw28VJanmSuaVjR>@Qdz~_bg*$1Lv1PRKJqR()?0}0|$SNXF%*(-Na9*tU^>w*rpg>}aCIjN( z?0`V+W|P_n%?$_DTn>-7v7w+dbbnkD^!d0p6JAM8bGY%xn>T}r2x+YDx8lzUuf&zKw{Xx=W?(J>I}AmNwRc`ibhxhs zwW>wG3sL>=HJYvOFl_}I3Quit?V}=@)DaU~Km7h71Fup~wTk$@x|X!EUXY)&qCC#@ z;TB9}yMCUO{5@KtpexwA$A7EFa{S>q)!a)p6A!|cl(lD)Mb=<_oQWez$o&`+t?G;k zLVVi0miWHvSHzS7Ti|}7u`f^G6n;@Wi+lVeE{@H<0((cMZNR_2zZ3onIuJ;o+@5O| zyg7#xF$|6kp&(}dv<{x;X`tWt%Me4!t2Hs}rht;&Wo9H~H5i z&9AB&=zC#Ql0NnZ-T3S$Rx}h)Oyrwmouga!t9yY- zD0s`MCduIYUw1|G0xfiocykjahM#|$(Bi+Yks4$Iln1{07=P|=t~eZgl-k3KC9ct# z52t#K_!t?88+kCG-^Xd;X8i2|Kg9y8lw!|VRF{wNxM9_B7UQcLTmT13rxCJ7S!m3e zH$E)IJonZn82@_FR&B6==XLa?9ds`9I#8!E?2({nV;Q%zKW89@Juw$tw5=Z@b7nI; z9GQ$+{+~2=n;HGKy2xt76|6oWV7P>WSZG#m^X(f4dw*MX(YqKTB=tV0uf8K>dpjfO z-MA440r1?om4`-KlOM(9M$pM~@nuI%rc%R2vEaX+AKfS|EwEgv=qe@7*i_@HF@FKW zbIQ%Xw|j`Im#_%35T8B4N^SgdWEkN~;Rf~~J^q8ay`2$2Sa-Jai{eHYB%_I+^F2!M z?q;aJXn%gI<3?g-aVK4}?@W$; zeaFu-%_WoApl?(26!A4DseN`B(A#9R>BAlbG=!j>igLv2E%6a{?&dK>=9=Y7uvR+L zWsHW^V~>D2m|)+8pjgzus%`%uzlAbb-k$>XlYbvQH$p7EP{N>JiwRMEyZ*O1Lhb1a zO{$PEhf$SO`JDCIFAJYAh~Yh{xvwa{n!$Vs@^ymrLM=`Zs(~%s9ILLN@SvfmD@}A} z&15;Y8}y3Flc{{Krbp4sInbexKNZ@we$$zM%YK^u^{0V;i>Bkg4=UFF5e{K`WiT@O z_Q0q55LH@ZjGd@2um8~v6A_}Ua6w6}h%UGRE)cBnYyKzb1Y1A| z8z~Z|_6RVIg|QwkTI_|0M8K2Nw>LR$^qpA94D1lY;iM_!QBm%DIj)_{tsdaMw5lpU zfoaVq`KB(w&ybDk*O6&ue#vG`sEK<44S%|Qu*U6c>)l_w;b?*I!;SA9vfw_dQ)bkL zmcQ{Z>;Dm-K}ffs={)SW%Ll8`upzMptN_uL_L^L(m9PHT#~ZuhHD4O#%>t2Vwec5g zFu*W=flja`+^{Dt@_FE$wAgbVMVOXTLXiOqpQh@YBcB}4`ywQkrgTBy9 zLCb`K^LDC%^U5PDPbTXgxD;ncvVrCVcvBKmMO_N-2x&mmjl9*z!3yd*-rW7Lzud3+Mp`mEcwj4Sylu@*xS-7HRPMsiOTUUmX`HebU($>sF5AAq-$K z>go$K`FN>Nw87F^HUP#bsy}^Iqz`7}uzw57S(;+@saHD6+n`vCt&Y{)K!2+X>fp%J z`(t6DZ|)Pg)|?maChTwrA(~Rz8|{i5^&Y;}1g5piSZ8G+OBq=O>VKE}sfXAf zB-&+AM$ey$#MR5%We5gJpfc*bi=P|wK*wBT(g2O=dS$Gi9_g*)7!33?860HV!pk4a-e#2p3+w`w>74m!RPZ*X z0Q8e9%I4C*Mo_X&BnB9O-hWny37jM+VQl|%#3h4(Mp>bRmI)y|lZTXsu$V!bYISU` z1JDXj^_&~-+qeZ*(CfIcX8TNr-3|nQu>&SHmp5@RB4Pyn^Z1DuueC8cB$pEs27k@DlZ?Z$? zhhCqg6PjoTRLhTesK~O=Lp~-J(8JM-h0pS&2h6uf!>QR~y_UY5iSW-~KKQsnGHpA2 zAN0~S=(}yBk4XOW#V*`(zBSAiXmq(-^ixPJc}YxBTA`BZFwVat`+T-j==#NmBTu;*#h) z0c=<=Fl!kGfZZ%h)@;2HQI3c4!Ea1&X)+63+NK{A{2vuFI#y*@PU2@JAXnlTsHZBiBe&JBo-j_Uk_M(e(nZ z9hLZ4sed%u##ZMisc$dd{HcJPm8IMH*KONxPICGF+7*#v-82)$#_=(=`YhH351_}v zxBN{01q)6RPHYpFI3)(t(n>||$6z{1N(HvX!{I6uaGCS9yEDTo#Q^eRxVYy62Jp~F z-xS2ZpEEgKc*uFh&&c=ZCpFNi;7fTb_#^lP`hQzpSF1!XOHtz|NeWK2lVjku7A;mu zX-h}Nm0jX)q_DQdDA%=LN4$ITHg>dmA*oLV#1&;TLeh$%|Ksq(bg9p80wgVHr&>(~ za)WN3HZ(v7^aaup-PIgVI@|?qHu9#7bu`=DVU!c1d>uJ&To{#vnqn~AXRNE+^1PuC z$A2d=OaC9tJwHk3;^AMoXjxzLb%zgDt}1Ct6u+30PF5ckZGtXyE@Tg%M?~xRHN!uH zF;SE@Af2_%9_^k~%T0^@oJyVMDH-6h?@~(Rt!pddBD~jB0~~r<29$wes&;{QKhR{d zeT-ap*1F^k8uL31w=;)=UZ5Up3AaHd9e*IlG)1QRWkBv-R{me2?~~6*T+%bd4=C~? z?PoB`g_|7`QWCq34nsF#!05p_``?>Cr8WJ`L?X*^ljX1mEW@H%_=ibwpg4*yfaUC;dYztii6a=B=+C@(qTV~2^32p7jNOTPh@zkiVj zN3N!WCQzA;_6xJ$A?->6Hx+(`DsGd?sO8#%Zb_lcCsNm=F^Aa5Cf{`?}OgwvWEV&nXwrGSs#5PH6!8elz&mb2KZ!H zx}<-P`-;walB=gDynXh9E;91nUB9QX*)9ve>MPNJ_W$}Fz0=s&sEbweUc4MHPcUM( zJ5LeS>#uP)DQb+4zgZ2iX>?K3fwd|*;#4brIffh-58S7K1qX0J?VgihmV&+vX&WgB z6E*XI70HLQE6kR9I{pon!hc9Rd3JL0v9dWtOUSgWrsf#Z86Q8580@G1Itj>5$cAJP zF!>jSiYGB)1-j4XUg#~Z+$0hIs{s%720d&h&8or}$gnQOV%JQNaI@l^qbuP9qR6{D zvH9`%`Cd9>8-EMeXAJN0JAPG9>_2t^Dho2+@Qjz53Ni-yNz6Xi;6PvhlpX${Qq(~wuyqsegmTL_ z#xpf9>Y=}=N74EUAAcl^myNLNwnk@@A+Tut_HF1U4hIlBl_6eC^{m3}i=JUbV`urA zCad#DiE!g@(+}Ld4bTmfXCj4u7y%foZ=T&>4XSFUHZW zf#uclQPdYgRg}VCWTnD0^bu)gWNG+6-oS!pnF|wLB2Dj_3VXdLq2s>*{N>+p#|67T zz|Hl+3EDBLKhZ=U9^k-)U6kC?c|#sSesK7@OO!!TKmEZUYl8Fp#9Ab+s?~>`AoA%< zCBdXryl<^D<9`e<*GSqk?v{t@KLjQK4)Y{8vP?ohxNzULE8fV`s-?JWEhJF}T3zIl z?@n-_H)yc5j(kH^wDU*DLfRtyEwzZs?Gh4x!t$0OpDU<8Z(_)|CdzAG)%e8grq z$pEC9ka!EI>~N=pnR5f)Sy(Wcr*|oOK~1=r_C^eApnqFZ{02&U5kD0C?^nj9wth)d zE?zLXg*}f4^@UoKxkmKC6n5ID1cMDI$7$qQ&GcA!0!KGj-sEA&lX?U~l{08^wC9cU zMGRaS=3#|pm52)tpbu?zId=0NS+|}QhGwv1KKv!gUJLBVCJ1W*+iHC&OIva0@K~!QNXyZb}2k``n!Q5hBDIK)hQy3znSX6(}%=djB|ag05CL_*d$EsS9@yu zr$l+r7DMiLsBK;z;CRZZr7yk*y74)eSV&w_$bb82fm*`@9xwC3Mt9JUG?v;oHe=&K zmsk!~kQYMir+w_|?>XTVb@S66AOTNOIn)uL$wIDa{mHqjXU^k$TpyvNPhf3twqpi5 zd+XMOj+92aa~UphlH-Glot(2S$BD*Uh*3pqNW0#GXzld z8h=JNo}Z!%b{spw%TtxUtV{IBDo6W4djb<>p-B!9xq<~=H%H2BJ~nIX7)lx&eo>FP6ECc9h) z2KdM6ZuQho0C9_hy5QWkRA=^jysPqi+%_7Yuq8MIG3wq(YaYUXpdz4&^_DCkrYzhV zB2owFs2=pZ1U^SFxX4}i=N;!xmH=s`ky-m5&eltmA(A1E!PCxy{#So7b*+4%i&glg+UYe31J8I6ruf$x zp7_x4^r9t%u~4K4|L*SUp!?ArvZyzF^&D_z?$P{gs>F}yQ#z7tS}rgjYkx<%)oOSk zDa0DJfA|F5t}uj;hCBV#OZJN~=w#nITU(%Aeh-3hD4fxnQ%}sp+137WPY%V90K=U; zH58R665y@>uKsM86#Xl2od}{YiOJjW&0&c1bZQmB#vgAEbT3RCmD1d8J(h+!tq}OP zr+xFxV|MpUiWhY9<-k3)#6OpOjUCC(8jnyPGxu)AJbary zv%nSB%2ZfJgRk$ZfPd8bi1VM1M{hB61E5>U=iM~bN1LYJsfZcdJ!PsxLF9QvV+|KR zPDmyQdd*TjSgo1^&d_@3cBZA=U;QPru10RE8AIhRZMBqb7ou~FpkyM`llfUcf^5cY zLbWR3978)+kI&B>p&W1VyZ&F3OO0@RHs_=(v-`9L6AI8(5r0Mf6fIqecIN_i_c%XK zkX)oZbP}?)Owoz`sTNxsoe18|uy5ip85oLq7uul09HfAQN1noZf#&l>!AYAVY++u+ zUnYVqSe?@8it=lLiPNAvymZF+`CkYS;uN&s1kRdkCcgjKN1j*+$<$^jJWE#y_d?Np z(-kbQBMv?cL4UI@6a|oQ9sR@Rar81!hf*#q`xo6OVAZWAp!kWimnuARhd@8M%Db>* z&+pa9G`{!TOM1aw74948P+p2!#s&T5$zHpjvk-9-|a; z;4f*dQ5M!BGMceG06jp$zkVUro#B+I=C!||n_^S>nmaxMI!}LC?X9u{-f{`^lOo}K zBOUoFQ*zJTeRK>2b=%{t3f)<^T(k@R)JV<&M~V7n<<_p*JDj}I5r?sdQ(MfY)oaV) z8fQKp$mgJ=%+@bKU$rB(H#cqt*S$nm{ua>(?%*vIr20NoR)0jx#=uSB;gXY0Dw8}U zk%HX;9sugs8IFHGGUOJZO+L6(c~e54bYpR@0;}n{GGk*SA{q3zI+tMZ*J=!G4)xWM zcm7itCdb#7`9>$I2j;&b!P$chEAR3ne6L3oLxb=P4A5+6KtFq^GM&w8hRFS1w@pYp z?OxP-Ho87N09INTMu<-g`Un%5y@$m1Y)uwgw{1#W4Bmek2DG#3hoZDsU_m1iRvvV! zKv>XIc&8sL0TGVg@OePL;s|p5pO~9Dc_FQohq4*t=6%?jIMk(41MiSQTTalosez?0 z#yeAF)NH|6N-!NBxY46{(C>+Nw8WFaX*64$6seMBGRUMb;dQbtl5j7>fTgu`%u5fB zd$vr~BMpDno-XZRY`9^9_fGN1$Rq6)=sSrbep=aU1AbwqOO8rT0~5v;T63lA6VyF) zT)T}X=6}9okGj4s%iL*xm~GES9s6;>o##H5Ec2qnOoitIrxBdrK11OTr>pzokg<_NhiP|_M~8p+q(*65U8yLRy=t}^%%Qa{I+cE; zBL*B3SU*~ghlm^eMoG3yj`=yplHL2!&{y*sCc@|23=TT^GCwM0H@xXATtdmAA-W8G zV8e0yd7zX&D3CHN2cu0*f@zb>I9d|saby9PrUO3$^zE{wOpcM1CbC_u>E?Tyu*v#o z!Fqow0VPVmSW_WDuV;x;5f)6v8%7oJno*UfIK@Ea|I)dX3}0bNVi<8@F4dZTP(MbZ zI`oV0;r8Acty~Z2#eKnFqdwnhAJHavf3-sKTa^(lEcp_!62qdWj?)af2}h9sg785P z%>nX@7Ixm$nMgoq4;&2U>fh~q*<6yMA#sO zIqF2=Vs{dD@Sy2kmrUc6Yh*qx;B}p{iZZt*@Z^%%B&EPzKH@z>d#hoNcb3$7k9dE5 z40PY3SMOOKIJDoUkSY{ScA5-$r&GspvXlY|R|#QmP7e~LD;w|H19efJ;_RGjPstja~AJ~K~CQP?le4bfLrs*zj({fv2N|5@(q7SjR>>(>P{s$-%n93 z|HW4cCO=LNAh!~&ig!^@vc$jVjudXLZVMinwKy>=aLT46WRrZ573FYYw-}x zf(=`B$KnoE>@cQEGx2J_D}yfj4Jjf7P&Xk%fsFFeVbs?xu#lL{TP=k`80{314%TZF zExTBuwf$3Hc(m__ioU&)fC+zXo)rt})4#EjEj`~66vS%nVv&3V?_Ci#{6G7}|DlGs z)m{uEo+s_wQj{?l z>x{hyaW#9e&8Dajg0;aTRD?voA1aL3&_FmBJOVhijyg=7&RNM`(FW2VhEZ3pV;||7 z718llC(scHHer06F_WO6bHYz9>Z#K_bNnJw3Z&O=eD2A7Ub1do`xN9FAw=8Xn4@C^ z8eEouTp`>bj1eI*qzZo?EYfsEE`Js5xAl-Egy+y>9r;qwe{rd>wt`Eu702~lQgN?6 z-45SKUr4?C_}@t;ZD{5NKWeZl_4`BcA9iS_!ufYh*cL!mwCJt!8omr19X=pHI=3M_ zkx~lBl4r@#UT_QRssQ@N+LpJg|$7;PtFHQyd_z$=i6v9M&Z1QkX8Rw~4ag+VhfHMdpd_ee+P7 z0QAA!H>Hjn)|`P{i}=1)e5HTb>?0jYA=4b4EM8&6?!|wJ_gn3skSh82^OOW^zb}?v zQb4C;E}6&cF8fiO*elJ@PN&T2@rI~?+V>*6F8nA;(0k02my5G&sEm5P=yxu`)Uhva zt(Wc@j}h1j#PpCI38zQyb{tHSwH-A5Lu!)orA*7(;~gCbVO^%MsXzKy2&~}>;XM`rNy>&T+F^< zT+xzNZzb??lsUMx$Jka^Pu&YzI_tEL4bvz}DtCX#iUX}+1B1si69$O&{mai}Xp2=# zTVY~IO3O~o9~-gxJ|FRt*3HA_4>||ZDy|U7Wr{A;@)ka%8=T&)=Ian5(0jM?ruIL| z&Vjp*hKs@ztBq~jX=AfV8r!yQ+qTu%PLniNW81c!^!t9oeSg4Rvu5t#oPG9w4n;wS z>qURCgRruoA(m0g$?~_B12MdGKq?gS71213{neo>%{AYhqmmez1t)^0q~@q(W?CL} zf$bMlKK0O^tgP?p)8+wzMrb5ctH%2b`#3J)e}aI!?lI~VM0)nDA^*3O(UeDk6kO(LO^f%OiOc^l7WB0 zhWXN9c-7eqWCk8}i6E1trxc(=p}>1K;uThwBgLkVLr9PYvb}(gG)!%BuRqw&kuCa2 z8wj@CehOG0efei_2~J;5&lmGnmaV`me*BxWmEX!N(xbhe#0}^=OR&$p{8Gi3uRG3^ zp*m;x@BkB*-B9@+bfIVc^&9AQwVHn`dbG)&j)PA@JcF~N7tbZhWJe5}kDF5jYp5Px z2w?z1!Z$m6b^lfCDkE$TEMU?9xIS|YMZ<89C3=%!v!(8KbeSD2KW~xz9=Cx2^siIc zd;>x^!-Sm4{SK_3x)OteU}L6Y_|7kZp4JmjOVwoC+Ku=QIOZ>WL{~mO&($GC|_;zcA=ORVVK;Mc)bO!rvv@MwuM z-;r|1_nvrVT_j^x}<)zhS z%CunH+%ZGTixd~uv|E}#k|Xf2HZ-bXZOv={d~e^PnPm$|C4%dwc-((x&coBRCdo$3 zg$eF-&OGEuY+s^NH8O#YP>R&wHm&EcY;=V7w1Tyll1ATB^ltz7e z7ygBi^v_vF!zs-%Qa*njYk$g@h3%ijra4$*MZYPe^ibR*UcTP4(iua&_5tp1v<8hG z>`fi=v7F80o}|sj4{T9Zyk!F|%y?3lpjRL@Ut2?dP@-a1`&jzUAHwGDcnK7tNJ=x& ztK^bs&u#kZal7M6uKyE`j5O#gzR$z~gsBul`)F&?oQxogY1@Aoq9k4{YkIy&PH@pc zWYJ83E;l6;GV&Co7daH*3!KnbK8F8e=ley5I6T5V%q(C+i*q)TJ&-KyEoSqD_V}kG zlzTeA~K3 zjBu-QoyRCVQFVW05HeBB%s5q5gU{$ppXSKinh=Zxo)qBb)97Po@UwR86_0;m*Ri#_ zIo9bHyU5)$gQ_I$p&3g7<04g0n2R-l`5qq2tT%)qczIDYlTOohfzC!D`lcDP-=!oHVy`FO} z4V%l|0d@hk-x8q@zhmCMX%lLp?5B`h zwo_&^3|`h?s6AkI1Tep>gie~$shWcRuFjaQZQaqbca{}?ok)<`v^!wU^>Eq_Tab#` z$YEyIct(F1t=|j1P0BXG{ZaxKD6R%{EKs^7)oDF<;8Hgh-Rs7wyXjM%5R|qj|JyCB z$pd}uVg6{lXb~r?p6;8Ob~dU(UK4LD#qn3pMRY`#1sEu&`uvNmW(wE+TPBWa8V2@X zKY;$Na9el*)P?}@*)r2(hvV;-ck$1B)Fc*3)A4`q?4WxDN+m0m-YE_9J&Yr|gWq2@ zeme?0{pgDD-}=(jIJ29~DOR^Mev2~XvYa>XoZm`h{CuVqflWXg7eM{Wt5x>JVcqBf-KCi5is#F6J*?hX|IoyZBTZ4! zJ!FoIBYtLrf<|pLSKB@l7sMx0#ukIZZu)7$oj+D7Cb|9^7eGN^gr~+LD1|)RH*}eoky9Eb|xd-Vl;`6w?wm_rA2q| zU`ml{Jcc+#z6J94&@eNBx?cgLBbHYl^geZ3wGup(uF}{>O6s$$cy`a*(f*MbebCJb z^$re6DL5;L5zQevB+7I^ilh6!ZazUBK(h zn-TasSRuZ)X>%_t)|RE`*>=nF?|_P7J!I)*(AT9k6>-p6r*BCsD^0*WmeDs;L&w@L zDGz@dv)muq-HtnkPwBtHhIRI6eZyYqWYUCYGT=th5c`2Dy0k?k(OQeQCbez-LxIMObt#s}bS58F`>hd<9N*D2N7 zE?4F&{$S(;<|R3BXWE{c20AJ{z6g&x-@z)HH#YKhj+OYP=78dR3)+(v5QS_kY5AR+ zNqY7yPeikad#>n2+v~gxVB|$5U^dwFyq)6lzF717&O02f84G_u&$4DsRniv;dYc-+ z=Gz^=n9LAmkjPl(KQJ5H>8!JkTu?Ql<*bP@G~yz4Q_6$)EvTggT-yCRa-%k|pnRY~ zp|tzED4OZ#2)IofMH+Eja)1|?0MqcO-ahCxS6i4TP$-S`-q0m585vbXwS38#zO2IX z*Wj~{Xg+(!!Z?3nLIGSVo0E77y41VsS{ew-8Kga0Wp+Hr7%fGrY>3DFLBNZM${f+A z)gpF~4*J(AjP#DuGg;4ptamHQnuJu$Cg5|y*2tp6(F&XwtoP9=u9s2Xmhx-H{*U)W zq=kc8fbciw(rnhFgjF*2aA>ab9}-#lxVZteFP=nG4MKl)pqID#Xnf}-HE}4wR!mC< zal>>!ZXw=Wa8Ad4pYiGGc?3sgTsoEgBw>LsB1Hspi48bFrdxSGXH%&fIs_Mdx1n{a zlyi((?aeBPS^dIlp$K{fQa%}9c^>nJU>0`=#lQL+DP0}cKa^f^J=l?$hW(HB;C<}U zM$Gk{ml%Jf3x31C-V=ale!OurzVj}%JCpqM+xsKb<|?hyBD%$%1JU(-2Du&RHWCpDNdYdz~sP72)fD$7^jW#oH zr7kW-_JLpIR@l^uo!c+1-Vn@)Yq*vapx0b0HK~6j8$^6X6AU7{5%~RT*+0MPFBYq6 zSB3UtOcvs1E(fXP9kr#^*h1Y@q-U-G8Sk;{Usv{jv)15<741M%WIRP+C6E(Ou(6p5 z$7+M_Qfz|0xjxgDB?M-GcVeQOU)ND{~Y>a2Eczcu+-dF2%HpOGA(@H7~d|a8hr0g=W1r#r!0dG>MXLUmh)5dS0jK_%1OAR)N@4jVx54$(8Izs zzx5KVYV8OD@gBKqORxkkRkBW83xIYyHbX-!JHb)-UQW;02Gu6xPtu9~b~KGu^tV%L z9P|<2!`bx1*V`8=n!K%?<(Ibry6J!2gWzfE#wp!J=#R>u&#nL1zAudrnRC{YFYpNT z0LsbUvPLWD*Ur%79j+_3!HKlje5-|=jgD@M7Q8K>8&F!Qkl4To>N{p)VJtBjY?W3} zIbrqFoX*7eimNO}eo{EY#Ct7FYuPrRP_``(Ee8YYhDn_YM?N`h4i&>u3mSiZcV7RjR^Yw)_JrCG6lCtC>|<=iNn_{}2XdGPy?( z@#_hy3HaSyMF=#5A$2F#y)=;_EYdS?mtbCsqsKy~EfSZz=8()HQUC)VC6aaLKs!X2^dOeiyLAo|qB? zT`2znTR5Z5EDo*E2jpkTzXzQx%Y;?-s!m&9IHlK<6e;}gJaz+3Y?nFS^T z)lDU}6NV;ah2FQ4bS91meJeB3ZNt)wSN;aqn&kD&)a^M(sSEq4$Fcb zOj@*-=VPM;^!_+pzk^~y=}VishRZS-H$HhI(zUapQ|ID}W8qqzEA@Y9~Nd?g@ z+Z_Hm9wA4-F7%i*Y5M7Rdvt9s(Y-^vEXx;~_X_w8bQZ~Y=~aJs(3c$K!)B40fN{Qy zgd(UGJ}*U)liC?FXK2{}^wLt2A#vii3I*4vDo;sYU>)^4sfhw692)7^8*{oe?5}@mcfpCGPMvHQHrFcCG8km~ z^o!yi^}khW0zysZ-eG5n4(ON@i7p#kOIW@D8yz__O$fIw2C37Jrt^Vt0t3(l%uAZ} z0tw(0Q?Q^LLeou1q7WMb4kQkfpp;c-rU%piOn`B2tm$s(={;KaEs&es9eqJ!MHPpz z@m(H*o&kUUPLuikfY?7+Id;V+6z;O+6roc>W?;?z*4xTpD2o@<)mMLvL%)j*#`z*o8??Kj2ip{-~XrC#fFPadIdP3t_sv&lKZ$klyUIC*| zb%=jx@j40^ccctrp)`W=k3iO7BK+pVQ)W}p4WZrT-`WyGGEVWTPu=D*$@;g%1PW9ynCKNO|WQi^|a22oO3Y+N$499Q0v_O$<&>ij|TU$$~NH9R&WVDn=y$%f1H|R7*dcGtl@vl^@5S( zUvHE0VCA(!hbp8}_xT|{7zL2g7<76RA!#sPC;dC`6b%JV(~{Zv_Q+M+3Jw@bOCW!0 z!!;YMT5mD%3}IV5hEPFgq6VS(_8ma)CjOJq=(BI^EUY3W>Jh#4;g*}2C;x{eol0Os z8RwNZnRo3&Q~cL_%MfdAio<2XH2;6-*QwZ?k1f6>uw%*LK z7kyR4Xqb6uu_}BF*IYZ+b$LZD?Kpo4<<}b$TyWff?X)n*;X+0Qb3Lvx2DngcxM?=} zVtcffd<7Rbsj7EPEqbjFcoNqfsb2-=a7>?~ zw$qkL=kOXN6Dyg6$t2TmD|kOn?}7n}4j-ObX;-yqa#8!ggq57=(En=c1}J}q!BYEG zn>c}^X(uPi_)A5iNShy^N#JA-A zBmyu7WEAGJ$or`KV%AL>R}hl##4)G*G$a*QA&6hx2c4x@)E$K7h#fe5avH)k#bu5L z>p!0LN+iJGo0#D`D^+#w!OMTG>D8$eh4s84VBtY73^-0=b29`Y;jE5G5B@+6=U`#p zW;2+1kb0jn6^Cj8ecMs$G;S=913{K%jMoJLA9=m%PLg@nyRllMv-y;cT2}Fy0CxM$A~r}aLDh&2>2@wHU)w7P}qIXIk^mI zSOIx{byMW>k)b#@SjL2sziu7U-dLa~Lt=kBUA_cQPg29OK^xjY2H0~#SyoB~{>vP} zvio?u!{SANI$>XZP$GYiVih=$Qw0)kInVFGq@5Ukev*Kt5WrcfZienwc~0TZ9Vxfw zg1%0?BGsdZaEb_BaZo&b8^1y8My>0xE{kwpzpdA0QMW@R+jG9PcZnS1!H`Ly6FO!D z_=`2|U+Fp0jkzyP$cX0X)K61=-WXMR2Rw+3LS7t>3Qlnl$=uUSlc3Y}bPK6+g9#)1s z1M71b2fU$W(*LlhE%h97Z^=d4$C-w9fqnU(JiAyuiHn?lG}_4582+{>A2nu6+>U=c z1)P(?>2-$;d;ovf#5e&|`b>@{dgqjLW%`2$PNUdLIrc1gm7%1h_$KIM+&g+jK~pX7LrSbY$D*tI@Wj^gK~)#O7WNn5OzsQltjzBDFJDKwc(&%ipn&*=Bqm z?$cPI<}3{ITN!tGn*;rZDuFK03xWDX-2>i4>M8%-hbn(CEFd!BBql7*;t$01*D(Hj zJrRY!7U6CT5%c34O)%~=gkKkdwfvg#a%!<;z2^>7;={^2({?F?Ovdf0Rt_z3aGzSx zQv>lN2F)fLTCvf}*j|*^SVBd)Wt}L*8leVV?B37sPP>GwOh|TFg;ZbsOY$DlbpdsW zSQk;-uv35VhA%%}?nP>8tZzJvQfm>{dv@jb1VLZx5U0(P*3s6gDaN+<+mJ(61n=dQ zWw?Ko8e%o!oaIBB%W5L&s)REk%Dz8`gfP&}3!uUN^yhFI+%r^x7$Qw_pX6cVhBZBY zS`u4Q`dg1W^B;(0k-&Gf%5eKYE^xzg*3#p_U1fikKxwa^y-4F7PL$e+x(AII<5y_$ zHfflm37pV?x!d9d&cDeZhc;uyuisW7f7EU`KIFVbalycy6}0JtK0lUkQtOxg_*hw7 zO{Jd4k{qvrFiFAAmsb7Zb<@;ij^~g&NcIH*X<@?wHhIgk@EMRB+OJ+Mp#>9T@n$b% zeU*QUhUzB+hJ;|Pn>-bSOhIqjg4ms9V(*H6^XJYjw(-M`{d;$96^Y#3;C51Pq@ClTcAr3;nz0=U+5mtm0-Llzp^AG5jxdFUTQ1Kl=#Y4!H1{nsW(;bll8&~xP0>Q znpzTP0_wa4G{;>cu!`!a-b9=~>0S^>DJ6O#d#Et0*$jO_M};R6Ku(uP)8J8w#*hc2+(DZ$c&|Xfp$H7BQ29ODd;iAP9o) z;Q&dtqC{N91PrUY?O8*{CL-XMY0Nn^H&5{MQ;#Qi3g~mEZLRFu!RLQ8t$y}Qa-I7Z zqC2cFO~ydHcw4BnIHBV>$(zG=9dQ6~o-#Gn${lA95KUSHZdXoKdoM`iN;8>@M5L4* z|5mFHS@3f7hcR>p-BDfu#~AFEGnr(G$6`=E6v_23O5dj@26g1|^XUV6&#MWGN{zdj3kIza_UY0H9f0DAjhN@X!T05( zfvDiqn~O1}zu>pecZd|rUwHNnpzGCT9=d)%f9iniRs(MR6&!yu6)z!i$P_(u;4+R6 zi#=>AHI~CdQP(bh``M)MQ_p#RfDdcvW~s?f!wK+`wvNR&H9sCBHyv*o?Mpi`#?Ko( z(92twa|!;p*L<3aEsWroP!7Vs|8M>NSt`yv4%sOzap}S@dg?}9K?DERzD2kv@Y@0p zVo4^ynwhewT-<-MUA&$vs)uD8e^u*zMUVKMyI|-Bx)dP~!(?0>-DdSlDkDjEQckF8 zopS}*QW)P_kdvB@!?94E>g5|4JeB#E4Zxiz4bTfAhG3~O!9w?#gI6jN>ALOYK;-vWP8*^ z6g?P_ewIp+16>-V@u-oDPg%U~6HoH2>7EC%<+D<%x5J5IlX=FVQ*O$& zv)_!yypn4yQL49x+KvU}6WiHdqYTtaX0Nl!V2LE>v`6%G8m$3vZ0M2XJ7nCJgn}dw?RA1BoZjSGrB7bdxB5#p1-%U5g z;z+ng;c$?X8GZ{zeR@h}H3G^Nt_$fmC$$kX-lZ;Whsp0u2@QHW=YOPUi&NkhK!6_g zhMezM%)=sP_MIU!nmJEPp`F^!$z_*^_{ny6`^A5i)!}S=p&5aIXGU8*Pt2(<2N>gy zovv$l4|dkz?66se<&VZMwOZnr1TYOgX7Fu`KtC|9bLudvlhnDtQ!MlBA96MIMqx1+ z1IZBC2Dn#ai0-Jm`2&pZ@o+ppc4X&7{T^UuW@k>hu6mY>bYoSsZD}2y8IFtjMe??G zl>UE#xIqVWU#74|i7k(uvqma)2C6f7K-a!6sl6ULJ?#&H4|_s)B!zrDEFbdZ^_cd< z1hsn|QGn$@uLqxK!;Wl6fNIO{u41@VL$ftnnthV_dpx}g=+2k;$jM;YPTQjQg2px~ zd$&}k;^MM5(k#(g z#C=BQLAl6KuPP8CwyK;M`UfeE%mRNJaC)iJrOorQ4*?mJjYJ#Rj6DR&5)l4b|-8PaGRu-%9>P1t#(&3Py+KZjMNcg`wu?umCJO(^K z@&FO2c?s+V77MESqhmWVp21|vnk_Ltju5Zk9llXpfv)-lA=*n=>nAO@50QUtT#HA# z$0t)nj2($GLt+|bM)3@0s+I0wAOym+R0gfS54Py=0tAkb))kz}Oh{ z&R=?6v(Bw9u#PICIQtaOFsKl*s>7a7Z5*InPi-Qjcv!L^F)A!fdK zOPETXSe)uxok)O5LM@UKZ9jjSK(Yr_Ov-EF)k#z#U)ZF~X}mssVi)ugU%4y%YY8%V zH{6e^r<$(#b6NfEwK6eVQO(mN!AMB+7P^*I$>4UTIDG?E{2bN$0RU^0)?9I*gj;D< z^N&;aW^PYM!R!xlLA~uFl{YRT&`Cz^c*6R-;nFo}?w>S;z59x~;zobxfRM)K!@Lvm zCiBnK*&^RDXwjeAeLcOFf)|2FGSd#`+M#+pIfQciC?H6SjWRU>u z<9g7WA5P|DxmZa^H>!VYqqo)eUgZ{%m+X4!W6?r_{#j&mxR(XI?PCTyYfHPn2fV<- ziv^8#hZ_o;fXS^eL4#2pQg3SB4G0(NL1z29nF8FpkvgX3!tKt9bkS6w>=RCv;9{oI zM=SoAkWf_$px4#af+U{Us!M(c#FJKO`4J6888uccLR=FjGZ}w~?ELqWZA;w@0gA@9 zbLH1{t_<@xz&4wn4%>;Mdth?nBaE^_M3_+5UvlphAb`W+oH`!#MUS=XaJZzRp+|qJ zFmo-vWPun4NOUpSB94!wmixJoOM$P6UPRjXOPLtkANub5I`e?`42`FX*e4keduHhr z^W90htfOSpxm61IsTV<*?!1qMIo1D9u1Db0~ptg6eHlzsi_aVyCdteo|IOu<2y22*p7Q1WtO5wk%&k!zdrqp1NygskIbDo){bWNXJLGs>M_iw&V;e^tpH~&Li*RH}y{JZCx?TMx=)m398}TBwZCaxVk1>fq zNf}W%ygOK;;wb{CuP#I(>$J+F7eEhg60CfJVq<^DEP*34pfZ1^R%PRt#m!vtORY~Ve9ZU4y zrZegIoKa&g+A4YS#REK?o_-@HpUDh%Z}^G*D22n4UM@-Tmh$ZrQgNA@#W4=cPQlOoyNY;B>>lc0xDP_`IE$1XQN9QO+obH#)CV599TYwT%cm; z5IZigHZTa{c^&xi85^WkcN)Tnqsfn} z)8T*w)-1bB!fdKG&O*Zqb4ht;_gpb?pGlN-CX&*Q7|;`5(KEw5ekT}5m^t5x!mEF` zpOF^}R6mLLo9R6*y!n#Ea&4@ckk|2T2T-I>_qNLhGyw=yvP8$0SvnRsm-KzkmFwoI zeNLX-GWnc&xy@5_&=;sjKMT>#N0HRQUK4u}i6E|64Jpubpl-c&|25rD+G9`eQE2d_ zNgwRmh*<1*nr46ok~b=X%h$J^n=5}S5R;O@!82t9^m0$t(oKSlAe2reL4TZ1b-(hy zU#o#b)^QM6Y~i-ud<-0fhM&*21#_i}KFwh->i6-?FD`x+E#@wtGO+=Q3<5iKX=QL# z5KRgqMTxRZF6W&hnq4}3VUt_0KV<JVk%@+`cdizTB&1L22ulfNms%YKz`wiLZan;mpD~jCaN& z%XDmab()dL{`ogf3}ZNv0A0z=WmwiJGFt$Z4qXJuLw z#Yx-eLK#aoRxwbYD^F9VWlK4E32%RkuQpP_QvA8ts6F)2g+a_Z3A!({p`-MyIJHXl z5q|lKs@%*k1t#!oO2MGx9H}9`3^-+@f%u>P@-a-Y8M3ObWSzZxU@y7rYP8~K^h2|O znB1;6_d>s#MYIYfWLtkF7RUV~=tIpSk(JK{%!!;=$XR8Eq|L?aKSU4x_#bj%bq3-D z1N+v8>a31)5L9OCl=iqX^HiQdo_fA#kX*g*eWwDqzn53pFU-s6zV929k4kgNv#+2F zY!z^V`v>Z_9z`ooX(P3FGpIvP8nahbH0+k+TH2Q0pU(?Wn16qmbIMx7HYs&RHdlcS zqO_FQkRUUbKA6uE{6t2Cub#6Jo5>8{W>lnZG(qp3(gk`qZv4_zM<>UD#;k^F5sI*q zp_zUfOHQ6VrR>bl;Gq$0QBV791aqrG>w{B%pwmZqJiC&Yw7-BX`7?X;Qj;Ru)>gk$ra8x))ZLAK`b_kDWMUzx3&f+Znt^I>@$ z#-G#7cYJI^wqDoS&bq4&NpugN1n&>tVuW(iI2Bd!CS89b*-wC`0qZ9Q1DZM?`N|01 z2ZPZ7J&kT5xr0+s{is}|9q9h*a0iBbn!kKeIa&cZLGg`ygejYy5k@w#GU6R!ZkK8| zwmpJrzbfjhvz;F(zkQTG0{-YN*8`NlFzPNa(G}%to=%_({+5t)X_x3aaoYgsplcup zYc6;0UIBkO_YMwBVJ&ym=p1lJI<19$crciv;xx2EKh1rGHW~2qVM6PY$g+)rzqe8w znh{9R7kxHTB&F+aj0FY;7schT$dYzaTHK4E8&HZ}FkI*{_Sa5dE~#D{n|sy*ABIAX zpD;gs!i+H`MZZBZfAZCrnC04imK4~m-~i^%8l`^#hvf24{KO(+spmhYhznqb5M-P0 zUjtR#=t0jPXY87!T3N*fg!+Bemu`iHAU&b5HE1L9ExOIOnHZ`zEF(MOWFVa%r+79`%)xJ zV8MSUgpISF@dd?$Y@Y6B;;8Njhd(iy$R}Iz%h1bd-P0c%V51}DZ^FU}cH2DemuMB( zQRWxx2G{@GUV{jid5YottT{DqoVr<3jwbZAr|y4!U#fm2<8m9!8(>KY+;kVOF6zr=rx zN@}X(RgI1^oz7wSw;&>|FA%Yi_Nq((;C*D^9(jge<~A4nPaG!*W%|TDFO!?#8R;7| zv4<1r*0}=TaOd5@OuBWJaWgngDgu^;;t={N+Q2h!F;AYd5#NLEc5aihr!L4DaEU`c zt6)I(L#~9kE~?@%-7y&lS^}jC^QM1y;$CM~apwl>N(yxDZN4&;|KI0d`cC6-1~O-r zJC2Eu2hXdd@auR6s{CbQP;jN*?{K&Xl{Hu&&&O=*OMqX4WcV{%BC7zcyv#W?Mng$r z6*Du-U)dJu5XX=oZa)>z zgxjK(R;HcP&mwuI!-|72pDviQB(~vJtN81zvYRje{3?GC6uvL{;35N<8dju6nJcYI zz?2$uY$gs^98hn)HhZ=t{M&!TSQ0>2Zwq?*u@3hXcWfzo1p^Th9SkOQ=zlUIcNRf; z70+AA30He|5O0RAuDlzWpC(!;ngf9(7H$?wdSsgFQ<9^)9$#ojowByueXn>0$)VRx zLFeAa%Q!Ol4&q8!hLBUQ6pELO@f>^ZQWH{Po@6}Jw=XYgZo<)2`}co_R+s1nWx79j z14fU{C3a;+jbG6Wlbxp;Ol`~3I95o~*hhh$hWGYYr;OU3G;(EFJejUo(p{oefNqWmYNqE+<>tDm&Omh2@SZ75~ye zPt|`sftk>-;yr(1oQsc!vTa8zO-tvSjN51!&|AlH*;fhG^(Qj3lu%EEvD+$J6Uqmdmpj)jX@R9DFw zxGa}xX;Hty2nXGq0HbAkaEsE1o-NjZGDG9lNsw!t_LmKVomY}JQ=vvH2cG)R`I5?F zh?0{9Sd}R>;E$h1r*s^zYH*~x!QApCk9fmqQ?jQm*b?cW5XT4fNJ^>Y=Ekj<3>tpI z!tP$N@s)oLui&L6ZM&}h6{F;K;#cyQx(ns1EEg$p)2k0re5ZUnixDG_zjmFSPl1Ornjr28P?ap}Be-rNu zd3F6@E%(h9GyWAPR{@#s$Y-c`F}@^6;2jGU1Jef(2bzr&12z(-VGNAT?}8J%y+o1~ql_!7l zu4Z1?@u*z>*^>F#9ij&PvlvlHs7+yVqx?dj^<5bgmUYI}KmeC1jLvqEafPBtpss%j z!J$=@zoI}sqmtZ$HwFy=lPtr{n*7&Kgky+k&eu?HeC)9Rro?mKzf%#l)(ZNi4s z1|Ch3|G~Gqj`-JoaYOTds@#!zar^89vcA}vddL=&F@ANXHe=;D-9kCBWn+D3sY8FM zq*-_c{c%!H)Kr!j0psr%D97q-eie#*L7oy{7W_BTR{T$bKnA}&DJX%YC1-!<&`B}j zp!^W{)Bb64K&4-maIqbC2l_8y=B|n2Ctn-2+;?+_cmIn=b7qp*~CVmoj1P4 z=!~%nLXs?vDO~t3^NBTUFr7dr3RMy!UNI|!mZ{LS4B!QED6t?36kY#GC};@H?aJl$ zZSd-)6da4FVhvwyKD)sJ%?)T$8QPx;}m1dcbkmU z^|fHLL$UYsY|l*;IW(jxyA)!l)vjp~buT4{N573!-c}pFO1|OYauq6f6O$l`cQP3xh<3bRgAIqiuHVL}w6FhV2_>}-dK)k<$d@{O1&$Tj;{=3PgEL#7c z%w+e*%+r>Qwj>`Sfx-8u*NXW9L-bjdFSx)#>LayoMg!kzi zNl^~m3DByO6Q0M1NMn3uBDR$sjwdIXFSvYV4bu(5{;v+UAM{Qlrly+P8EHS}J-i8q zqYcfW^04)R_tt(fgS&S;L15Bcifo1Z(^y91=(W^HGI;|iH|c^SIdRN?;E-L^`3V78 z9vP3HiWoUjcu{ZSmT3gKOVNMtOXW^WQ@@z>h!I^@zIlG<2l1qqjp#`I?p}6xum{tM zwZfFxRJzKD#CqanG6%rgMy6S==A_cXAYiG}|5|=iF5~lS9IY>BHRw1Zj}~+g3A=j5 z#vmcF3%T-or^RY7o0=zom{G;^@IPg+D6S4`Mp%k0B$hP7+LsA&PP3N}Ccu5uNE4Hm z!jw|wJNvZ{m>EeBAvgDKFl1f`s!9RdZ_p2H)%p8KfK+M}-gZmqda(uq#c(fV2~t+W@ko(HE* z^9tBg1ZXAm!(}TZ@JM>T*ktGnU3UVE5x0b9@hmP*~ApW-C2AYArQQUAT+#x!5F2E7n>mTb$pPC z(Byl!EC~ZVfip59=uKN9ob8|VvnuVtZ#w+fYe<^twPcg0PE_$>!dBD-W?&h9J1n0j zrCUCR(sC~Z8J@?$`B$lJ9>&>Xk|l3D0k^7}S6K4XctnGL!jhv%!bxym(CrwHEp@W& zAzvAO_s5qT2%@c$CAr){)!jJ-aEcJYb4Qyii#-lvZ`U0$HImD$f6WClAOF2&i_+yJ zIbVm;9!={RT~V=$dDV-F1-VnVkb;iMO7NCjpx~VQNjbg|V5@v~3a8+PTH!o76v@cv zl5J_xJl+In-H9)5z^Fqx+vfeqxfxCICs^LNOkQ}hD3N@X?E)!Qi)^nZj}Ty-nC zj6uJ-+P1G?R4H^f{F9<(RW$iBXp|;IIB+nQN~yp0b_KH~u>~gN$d|`etYE*#2X(># zdGL1kngtguflc!HF%$wmXcm9?jFlO;r4GpMoHhA>K$jv!kq_TOY|7fx&Tb=jg|rWM zULK3p@~SbKw+!sF6QvTB(i#O0y(9ir8;BI;xL5*Hb%zASj%9Dm^yfsP<|a*jm|LG7 zl@ET(99A!k!KP!+WJ-_&vQxKBOT5GopzDwsCeG zW3|VB9MkJ5!+;LkH4|2tB@P84u@T-h8BwIu7@xLl=N0pp_FU!#MbOpT3v*0QGf9M{ zrTN^TZeK=pKU~r!C_4LBnjIGHH_3t@q`kZgIRLBgJSt1(%CO11WLe6OX8IX+6^`n+6G4H?`_F*lM2G3122u%=12K$Hqig$ zD*E*NN`V|mqF#)ZZYgWGE>fJ|(joZW+ba~FG6x>03Qk&Dan*2xI+$l)(_j(*0hAek zbA06x;54XM~&_ z8yHAlU11_F^-u6n?W^g6JC$q}@5}XYw@aP(fK~QS3S>Kc3*bfLwTPZ;l=-56kgt^N zZCZZzzN7FOB3a@B5d8X11^VL@4e@5S5sd;NJP=oo1|`k&23ppxwYO{4g2hZ1%k6!D>@^MWR1E0{ z5afh~2rNMHaIqM5s6;Iqx!^_E;tXilse!)8A}y&@uEf3SNDKD6JC!Hyv8#l*$VG??B_A8Jhki0`QA#H_jQ6`C^RHg^}Jy^@fIj!3>gcq)_S( zc46%r6ZG{gaRl)YavA>E{rAnuUV`;p@29i-@MTDwL&a>~`X>wjaL6`FubbZS`Jou1Ubu+?U7du~1!x7xY0Lq6v; zhriUkOdgXS;*u4jG69;oyK8CS@iPl3#YF&odQUm61D8TAg-e)!7yj!xr)-|j@k z&ZL0NfYJ;Xn_6OjcOGaZ}t+Tl4N5e}qX| zI+wI7R643SI~D{K?z#`21kT29$W`4V%-%nD8&dLB*8%BZOKEL0apA1}n6$00C|2{Z zW_gz)f&*EMtf*zq&s|Due$)4qZ)78<69%Lu0{wngN&-Zn>vA~nu zrS4=(3_Pm5K)dt&NZ4d;!G&7)U<2;UUZWExDdCcg>rUpk7x>T9PTrs(m&|G1cWhZ@ zvq2vXRGwbzXYh)9Z|gE1R($Z+bEupJG^Gy z2t!-J6pP-lM+03ENYBd`pOzMSPsx|5Y^{ucGCr1iEz^cC&c!!(Uup;tOcrD?tP$S& z!#&JsW`7AsSq3nRKIuS!%|Qgz)t?=7an3pMp`>>Q=1>#v#O2f}fWF8g^x{gRQhzeN zCdL!|Fe&Xz!MI&92dzf;xc7Krxo#YBF-}Bw^xyXW;~%#|?!5|RK=4LfRdaidS&dnL z{CTRk7rYYum%F)#&(~5Ve$Nr7M9>fXr(Qci?fxiH$ z)D!~Qkvv0CO)|@quqmKP+{*W(A0LQkYd^(0jrSr@$uV3 z8#+G9Ei}mo?x)0EbFE<}sb*Y%O6@6VD?Xwa@+7n{1YVgoiWEB9*ZDhWdwhlPQjlc|)dQQ*X9LTl{`}$l=O#)jdK?DyzHk=( zjh`Yqw_dvaOYvxe;VgYUG)zvrG1$A$)eJf`Dh}xfiWr!?ry;|e^v$NJq5s}NA~L?l zElmA{3;OTPNVF3@Isv$US{gj_cJwa<2^;`p_SuOt1^>3=nrkCPq3V;H3gz!UHtK+| z@T|pj_N@OvfwfvCw9l-!MUyB%c2_Koh-8xru1$;C%OLhZkO@44;rjdjZ57`^dBZl( z)!F6$9|2`Lfxrxw6B(|`OhG4V4r}3q@w#R2Lj3w8oB2G@Gv>8_p?D2M!Tm>L%}Vw` zIoMU?11{yqZ2Eo^z9z#?mpR7Wg%FpyaVv8iHxtwgV_)NeH*MMPE`eC=f#kpF#&8qh zMm9cdtG(O*OyxKB4LF0|J2fp-GnHoV!>-hSCv%lr7#bEAUn7+wTc~VRM{s}s=q0$>F zAcN*7_|C;kYdj#AhTk@TdvrCcl4!4CkB+~ggnfL5?V)i~6?LzzadSGo0_0FW68?;W zG|jW^l1t`0O^R}``d9qT`4DdMd$Gn)0qCQFYL9$Y_y}Wvj2*2&&xq74vH1C|LW>r| zu915qcI-zl@;u+~Me4!zEN%?2;9BpjKut>HpOuHoqy9zen77%g?@}{djU9DtM^OZ%Bxr2gKR&l3y>cwLINy`1cN3n#`ms zol<_+4{iWP->Sx~K_3nLx~S3|iu5ky7_iY?jjKd|GyBofU(}AN^Sh7boYM+}HCFa% zF%x5c>bubQZ;`V5fWUhYL9l8~@evpF$G4scq3|nxJLEE$x7#wjyBlH9=@=0)XUrD| z_Hzl8)&Xd=?qu;n6}eR9(6N+7o#_9J&HHbKzXe}4Ji|6+SY!UVgkuHJwwq3O-!zle z3$M$6$S|0$%ri`PO8zp)e_p0H3q}dteTCdIVB{zu9 zMS|{NgGBJ3>_DnkCyJMI<~Kxg$D+NaEU(so9cME10}H4`9-7?ctnGoG7=gcyA5Ffnraex348Bflg8DV&zOE1wG;A!^lZ|GFsbKyElzz_{SG& zW})B0UQCr(qgWU_anA*#I*bC}9Vx+=cbKV*CF%e;J;)&Nc5$ZeRA4z=Cm0G;baL>2 z4LJt$Z;zZAkFx`Pni`jRanS;`CM%i4%gF;5T+d1jGyG$-Ag1qTn6Ch`q^En*s)-<| za;r*1QrRER$`BYm64E0Ba>dc8k*z{i(kRFT_8SWBU*cO7)CCynqd_;I#JG~BDrD~s zz^UNzXkb9_ZXJ-6gf8^@N%qdZ{`#wb@v5-%=6nnM@H!1ESiEy~1Ae)Lbt%n$396b& zNe}kdU^}FHETkW1pQ%7}{gyp14!TF6PVDFhooBb%SI09Z&re~P=A!v}xG6lbU$zt+eJt|PttPv%`qQQBG;Ew9D`P9}h1`p5Xiv7|;f`EHL) ziK6!k5u*^u1L5(Kj-2YfF3`){U@vt;J7(M4rI5W(JQIwUm^?=a-&1JF6~+i$a2#}| z^B+oN-2*4wvOBD`2H;kI{=hvPgs+eJ(3Lz+uCrURkzRKU*9VorLCG!~x2a?F8t4WT z*MV$WFYj++zh63HtB?;v#yRvC19J9xdZDMg5(pR8MiundUZKMktu2N)w1MGysDc|OsPkp)ArG)rzLB{XdawCmlWEX2m#Ndf^@8R=1Vt6m2NLGf&01_jQ`7Z=T+g|{Eo zm{X(M8}!X|;Z0d6%r9G%f_`_8Rme)2mS!y*<4&iye!B&6Et!S2*Xp=hh*zoY8*_hM z@z(w*0E1G?R?rTAcwkHgTe-XGti_r1RlWc&*f}+E9!Ese4(J*47~M;Shehq&86yTr zy4L3agFE+Jt0n`MXl3B^(J|I5|AiiVr#!hrLIhnq6;m1rzSc_*i6J|0IsSIIi!-Xd z?!sPbwIeb$Su6L!RscG-GT)stQBM%pOPF-o;&)hbvxDq^?Yrq)VLcYo*l5&+beT?? z^~)t?)NzqyO~#IAj|(7ttNpS1l|>pJ>%eO;-DHJ{yxb3S%|Wc*=pOr21N3Za(C;}y zkIY{2P*`>I<5QQg;o;P5dyGH(ddqu7!JZ3j{!lLy4?(F7r$M|@FHtLFz+N6D$N1jn z`bToCB#k0}g8m{zsZP(OqU0-H#?1`GF6hy+e|v9Kqfb^{?&e<3Jc@^RY=|2pc%y)I z8PTHPjpPzLbFErv^1Tu)TOS4n>3T0<&3U7U5%bOWFy?@}*hYxLKtAr&&3CBY>69!q zT?BL|YAh`^VX_ppnLt~UX9abT%j5^Q)h?nK3(K;9lF|9VhZY5*UygUd!8h1JNfAgK zu|mK-GoJ&9$DCL8D#h|Nkvh;jdlsz+5m|2Nhu z%H6PM#4s52!DK5ar6}Geo(`{&UaQGw5yXJj{GWIP!0e(|v%CQe+-J0h-TFQRgwfB?O#N@Fi<8hIn(PU=MoGMs4?&NJhAUk?F%=It zU6lHY#-et{9_eY<-Dx@E`yONeyk;&SIjz@Sz5ub-snR(saE)!5U0SM}!X zuZ5IQe?coAKv@?Xbtx8N7h#`H0*|17vP>*&!5LsUEicn2x0%u4IT<&Xb~wI~&>>F6 z`a(%Eyd>RTQ2{zHvwnmwWTLGU+-^<;?=*WF_L`FL=pHLi)}kmQjGCh|$P#^bL+n8` zM%f=;>S*{D2+&7N#{~+pW+C(7K9px#X5MJ<=g?Qd2OzgS5Q;(Hk`g+3EpPLGy(;q6 z1x^*W9HBG9RKPDBKD4ePDA>4&oBh@ub|T9B)y^^3_F}&hDrglT@sIf`S%tQPo=!5IsE$5LV3SYjP{m1fuY9XN!nmmzf zx^bmHmlc@lujkLWQnTHDI%_lCCf;Evh($kvp4U7Wb+(^iy1V&5rA&VO%%)~AA~$E} zCc0A=>4(67opFqeass{jo=PpLe#Gb?7`sCU9NtgRGi@A{+x2}^UTJN- z{y2>2Ego)vNLtP&cDste4JxqHDDY=@-KPE;Bq{h2)~n8bh5|_%a?>vU7k9enjYP9a z-(T35i3Ov18bhG2U=|;!LH|0njE##$m*ijnU>`L8`fd12<+s0o+F@JNYMEbk=^ZaR z;i%EOy5p%N&h{qR{bS-}ssK&&2Xn9AOqt^e2W)TAiR+OXEA$L7vUV121EMbY>!8a` zRuYEZPc|uv!(;(5&1aWx-;~MOn~^>-8p~~CJ*|{q@>6w=f}wV7nG=$3B#>i(ed#B? zEEUDcojj(mShTZ$O50yJJs)eXSaC16Cy1k=L4S@Li%+wNHi7Y@9L;_I?|k}yZPxdU z2EYIAA5#t)S4xT%%FlOD6-I3XK<3J}9!RQt_R;1n=s2AY zrT@Ue(m%3)pU*?-VCZ7$0^N>bHr{143GHUGteML@22s%6#?jt? zPbKc&tT94)gGV#tlk<`2)Y*v<&z#nt#60}x7+nE$DYiC!5 z@mUJ!b7SOi)7wRi(Mtciz_hv&<#>bO01UF181OrRxlw+{37K)0LdR|oC$U^SBW8=4z%eCGMNiY#F z{A(EtY^}skRuQ|*Wi8jjpQ~}4EQHiBXB=jTXLZ)g`i+7P25SyV?L3;J`!x)|Hs_$a ztg)lio5Vo(#XC$2C9oq1o$L6k-O$ou$}3^+4iW~gk{KH#KeNvHMF0Q$r{&o@SY z>;_-@4US|8h~xG6m-tPw^VQ9wyOETMh-1CYpgS$swg%e|dWy0Z>iAL%u)cGQ=N6n} zyw7cl*i~aj%(w0|^2TFEdmVqhzElkQ5396PE%>vi3Cu!tGK1u}%8m@4RB~$C6M(^Lh83Pnt|GXCaADsnsdAM>N1g0B!oM^@@tXel1cGPww)e# z6wu|S2;_T$mK1u5tv^;)T7`;*D;a|r9CMmqLtD+Gnq~glp`5vB>P)g5plvlgHk*UD z0%?SoWuJ)_Xcn}V);EmKbvHx&j5u2@9@(zS3XYDTn|=d;rpP{bWw!#d(p0a1_tU7V zD-AA8|6&viYJa#$ST~{Xv@ zZL0&Llbuvj&baQ(PxT6YtafGqf_JASAyvQ>f74YUo*P9JS4xH(>Yw)UXmHVbs2J$e z)O3d6q8O}p8#Nj+YyBtN_sj~U@kg7Ch2$(ExUx{6OXN!XH#`=<;?iU+zU*=>OaN|< zz8m{XrYTGuOxqEk`({^vb058*A2)++7$FJq(l5|wi`Ly%jG15}T5Se)cjc`MJ}lAPP>E%I?kuRAg500TRXxP`tLXS&`p z+-JG(H`FqQWEl}R;JUdTS?9Y;;w4Pvc3CZ*AIIVCPc41}kAP6vL$&;awwlysvnNdx zoA1Ho(JFTc+kx4CkeYNF4Cu;U?OqEj|Be-Ma5CiW-`MjW+sW&}F(_<3Q$aYcAB`rc zD-pIX>YMq=e7=mAaV8)2fY>P-DljmEuUsVlwepPpAyB}83VE1*@J8jBS4yE^vl z63U+LMok)3!^^SEMyvV?`iIAB4qoOz%~oA)hh>7O0mxf_1Hq6G9ouC;byQAZVksEm zQLnNsz_e*YM)HsHft=RP;`l8UlcsK2Zz$+3%rE@r`X7J&?j&3`t7Bh%l$X#^0>4{5 zqQiWBEk!=N_LO-um#M}eX!(K^XwVNYCkS9GY*$~!e8aRw(bcvSyQ}0${%MQsuEPR+ z`BsWy3c8VhthI22%VA(2jY(kaZh+&5>7>R=3mC5$JV&oxA_nSv2m7|nElqxjDrFs<^`T0--hMdOOEIsP76 z5Y^NR6;h}MPvDOWDFDU=x;-*v2*C=qDpKpXbXU^Nl!c=I2URd_QG*k#awSgA^z3Ol zbMUA}9voM9Op8ktP84v5)amx~+qB#|2gRBc-?LHU2ifbz!w(`Vl24^{YS2rc5+9ud z)sSz0!S&1Gu(OKE@?=p1W*jyb!Bb031sxP|HX_X_fy6I{gI*BaomT`3>cCt^)+U~2 z^}Yw(CrxU0;-uYM-GBkd2>d=H=l)Hbv!tvV=`~0 zf1tlui#jDFnbH<}9jhq|6o6IOR}$+03}#e+7QcN+JM~po$Y9(cty7SzllWImsd2u2 zwZFaseVJ#7v)1y@#Eo~AEtF5RpHGOq-{1>|Cl;|IA(*4@$^`FkQfXeC5EzXqg(V~Y zK>%L?D44d{#L-DCQFSVhT4Ap}D)*`;eq}Y7$MvPZ2ivxw4@2g5^KCIFXZ4dCRSPVC zizLLgC=wBJGA(#5$XZ_!Yf&4I^`?j?VBsm|J=w; zcPd<{7RI3g##~{E@;X6(b9wirDaG^KzmEm6z4=w&FFm&P!C?MwV+qjuQyGx;qxYmf z;m4FA7ww9++3;_08X!{ZZsA=puwQw9M7VlUo)AaJe$UJaF;QkDvlGi?0QBt&HNVMQ zq)*?C^~aY*wJ)VYv|LqBq6#Nax+I2}xykw0={E`1;L3k^k*tM4dZ^KH0J=t32%dNX zD#5aY!=1Bo4E>bC)V{_8wsOXMJw2wNXH)Cu$*|G)5tS-sFM``)xP~&*ds*7iUn?J78>PGN&MR z03WEUUO1GKp$bD*q52m|IE(>*9GWHU^20R)ctCEgzr8w{Fx;}XKN|R8RQ)A# zgb%IupqjK?i^sqX=mJ~lSnSe(%iK4W*L`w2sQN}VTiYJZLDVrHf$SK6Q4ZVqY^R?T z6Nqfrk4+&yf?~wvz{E?6NtAZw-RA?QQ8-K0rd3#nKRWLqSSvOG$vA8n=>IV;^E}t) zORkwUoUfiVZJ}g^47O~=WI#ZsMRcPN+e4vX1X~8?CH!BdJVe7Jn;_tiDBAJmWCh7! z&o3G+F2B8`BfC`#WS;DQ?8Jxi$q>-FJ~3{GTE8fecA}Lh?Wa4$97^X7Bcm|mRVIjI zMZYy_3a9NTyJzFzC|<2LTh^Se zKws<7e#zsNvIa(av4UVsu-D`F$Ef%T=~B-T3cs6Tlp$PH1bzB{N2cyXy}zCudY(Ar zA@HwpW!3ChU^3R{SBU8|Rj^0P?#YtR9%`1I>Iy;)73iG3Gz1IrcRORSFMCHZN4V_j z=#*wK4FX}<@oVeWI=J&ba~a&vLf_VyTT%#)p{bJaWY!%kYHFZ`EjT z_~q%hMU<_i1r?xwlLhn15VnFQ#IqfLz(}7f>2?JAPi?CEcpJ^Zpy#GicDf2Tb&O|S z-7X(Lo^r+XR38EVs!uQ#sy^;gP*j?gqZskG#kNarXIJcLjK=5pC}Tm-N$h;0+{JJy zm)=K5ng2_%&2M=wI)wP3h)#w+r!&{P4-{HcRKv>6CobcEWm!+tGbmg6ZSy)ov=J^EmY&)QxbnNWB1#yK!DqP>G~Ucf05(u_)KFny4*47W+<+{@pz`MBNxltVgXt`w;B8D16NJQw}}3mO3vW zUvS;{M>~KV5{5Qw@7TI;+Vr<8w%R~z7_0pWRJ0N3g@E-YcB7n|J=OG(H0J2foVa8P zNXa>+6G_K;`s59n1kHk$VQzJT!gO1>JNDXCCxB6Z)AP$O^zV86dN%Bjvh@#~pGJqV zEryHAUm$X+<4Hib&Rx8fE}U}0;i4i6L@!>8mt`MRS=1eS6o=%azs4cyuQZ7N)& zEJ)dZe;-;8E`{{ZDb)`Fe46zw-1-N9>SCyLBM^sD(lWtS_os#dZfCS@MVI*)#BwL7 z+@zJa0DAUz2Hi9Q^c0C|Coa%ae1S4wd%$W#eX*i&%W;7XdsV5 zSrt6AJHw2P<)<||h2VC%3j~PUP1b3$FHtgoIO_lQts-N)C2zv{-iNX_QQoGPO?ZNS zbG?Sfv}(}nwJ0pCq!A75DQ~@4EY`_~JWLZct>XR`MjTr>uR9Lio}3xojN{Y(1m2g! zg#RFxsHyckc*N=CxR9$Dh(#&-WOSG9}Qgm4E@)Smh`~e6aMw$3$@8sgr{nGq-LE2yCIj_pC5Vk&MOtf}c8h`Erf6xYNfIHDsoRU%FZZ9qPOVpu4hYBtq4W9(PocQJJatcB!(lL4<&BiQlBC zwwya&XUd6UNaA9Y3G#_g)rXFMu6?^)vwc43FOa%?V~b&DOL(#Yjlxft4GCHL`V)W4 z&oHL5YvscmUV3Z}gYmZbq2DrLl?Q2?guuPJHgm(`{^p5RhucNt~iE z*~d(+qu$4nXBoD}VZroL;z7JdT=_cU9deM+1bs1UA*duVqgzvuk*n{rXdxMwv^Un~5Y~baE!uZLD;CH8gluID@f&wZp8eKtL$A>|cmW z1#-WA+ZxGfttsJ&j2oe~WJoyJg991R6@j4g2}8Wgv5}6mr!6_(9k2UV_$rpI0Nehn z=BX=&!Ja=U*@{$K+`jjHZ8JM9>SX}6Xc_`Eiz;$NjtjvNEN7L|fp_pgPKYLdFnKxW z7ktnQ`sQjeOTN5+peWbP-~srK_4z+4I`Gh+lgh`6>`E=MhX1drl64Rte%5duCK4@82o+J4897qCOINy*Lhbjy=d3-b88z@kfwdWA0HX;$!`M}C&*K+~gdkgN3Ui&HW z%zuz2FB8;%@WGG-u}KecsWD^m8z(1EdQpkbFFJr>>DSFMhAuz0Y6Kwt9m3^++sr=R zl$m@ZH+JD z2IzV<@@5$JLV2pE|2Lu=eo?96D0zA_YHCd@*0-R}dp7(4EO?3BnxO1q<5iy^c_=Pm z+(}Y~Fg95XdR5beW*J7k&q1DqIbl)@@{ocbSsCX?e7N1`f zgI#TZ45@-Y`?{PDwdyqNZFXhenEFUnqfGg?wc`O^vJZ+Rwoa!QWbfKfU3#x;SLe0u zN~pm0*gg@j7|;O*6`$xEJmyGn`pE8ql*>ZAzoT?Pe}xH;L6DovnQ zAjwpYt-}L8vKngRH;Xr7fPE7D-EQk?<<}*PBu7!5>+u9cYsB{luyCK5>dMtiGGL6B z6vHwWuY}a2_!YaEseamHxcr&j{`(_;&An)g!yD)Vo5HGt53N{b>g&P^i*K3P`AO< zOPRHK@hs#DPe8*Ko{GUY(81Y%MK9|Ef$`ydWGp~SGGtwy_MObbD?$bWXjy@n6E={L0& z%4x@D3c<$R?_N!^MUA7jk0<3HU87lXu;9yuQn4)A#SDKMY}L>Q$6D-vND4pjvHDUP zFQ#KDG$?x0M~`5mMe%3B@<(P2!}OZ{><#Gn+3Fr!Qf_jsV>_5%S|Lt@VWim^af&}% zJ@8_W+Ooi|%H<_;^^JpUe?D62$FyJD%>Yv+kjq8^K=RAjy5n`LD=#RFA{MpG@D{HHgnVVBgfr^7TaLuM)}r=@mz z$(0PHXdsT|3)Dq&Bw%#AQ^Fqn{hj&E39!^iqYAd?WjF0~6TUnQ@2> zUed!t!30IZn|mUzYmT{M&nQLmH^l+qIwm1%G{Py~{9f{Z&y)q51!J2z&P*TW4V8{^Q1wO`0^-{F`w_zL{eaGQ^$haDkPD>?kM!<&ot4R4uQsCkJY))2#u z`590F*=aPe8rmWg7|W#`OV1Igf*W@aiSVmh2qSqi1@tUTaL(E~_n_V_Ef16QAa>&C zVzMNB)5bS{5|b*6Ix0LYK()C!o^^vtsB=Qf0P>4!`e^1-VJN$H*n4=C<_H%$8^!3H z$C-Bmmkqr>RBkeG3Qn}xdwsQ+>+@k7Fnrwn2%c4c5;};Q@Y*%^y*vtZM|q-I&q=TA zI??}&TyTBU8f#IA;X?eI5Jezw!6mG#j94^oqr-|*==t7+| z=J9(%JwW1X0B_LWf`gmr4u-&W$WPG6KMVpclN03T8p#XSQj}4or>fDtB5f!z)MOOR z6kgze->By&#oT|&x8wtMy7s?s)qvf^{iU9hQj!6()89eR40g=tuad0J7VzS$>`VKp zpo=f{wKeZ~b^GiiZ%G{%pUu>ad1my6e;do+RS!5kLQa-v+VahlERkWZN*JHJRVK3m z=jL;%>tmKl%2x^g{s=4>drkpV>!iTEXp**nF9Y?UF9A%UIh9gim7PCdR(U-Y3AQBV zEBo7&d=T>U${z9el#DA%_7x@$qm{S9Gi_^`t9Jr(YiaLjZ0&CwYFhCR$XV6Zne_k8Lc2u^zdHx=Dl39GW= z(>a9yLgejeC>+qNs0FjO>X*X-x;+wqYkb(~eT%Mi*Y}r*iLBxytu2>G2gRc;GQ@}C z6ZKxy2Qqul0%GIdu{%f9IAacw?AG;dkMgf{^o7^#(8-;pu2AN*6Ee9-Ybm%M+85A^ zy*d#Ly9w}Co$ll>t^#GprNfU)KM9ml*0c{qYkBn3PFpZTQ=q<=Ay=e?XJ4RyKFI*( zA(~P)g#!+QAJfPw_b8M*(=(oP%s)34ZABwEQpiE~-lEfO%2)2Mj%;fHd`+0hA~xE; zzA}7L>YrEWrBc_lHP4IeG`kwY*txOuMifZo0B^ePnruo)fecW)Xz7KIzWr&*X@S+T92!`M@~xfn zQbZ6n$`U;QiYs+~DZ1OI@X(od6aTJV;_0+eGk&{dZ0X@JUf%^BcUR8v>JgUSk(>Au zZuQx+s8LU_j!NCX-?rf|{VzXa@dp-PUudwY#};!{Q*-nE2MR#onVEopjfxZ$M*1Os z)6Gp6%32-skH+&uHq!kQml!pFkD{)Aqn9aoybgCR9;Z^5Jt3)E6J0Fh?a73=cp+HyY;pa&nKcGy*nqGA? zwqCrZ!v$LhwjsZNpXHPxe)WOD74;;2KIk1h;m&r<)e6%j1e}?>$q2gL6ujSqCLags ziu_CarIO?+<=0_js1V0no8Vq9(DhJm&dM`o|4c(x2_!Y7f8T* zy&1V`o9&@d5zR5Ka*WtIgmb-%M(6IBM0Bzoa(bSmnms8RoxAHV8OZ9!jVhq2tFE&> zYNgfJG%0fqNtw@k*kNu{#hB7nDk~(!8+4CA(U38hql}Xe@h`#7%Vje>;wiHfGdvqn z#+g&yRWq`G8kMBfJ~`2ozz#Qs^`Yu3ZQ!IfWNJ;A9|@Ji*bJ^qwY-|&q?JW`sefp{ zX6`y2^ftBVBNtE0V0OEG(OUd+syB{9>`r@CBSZSdLA0yVoVE8U3)y`3*dwd8J1J$G$%hI~VOA02snueRevV}#o-D68`sG7~^E^KuL=!2VWla(0d*Ww!1S zM-J~wicb9ORVi^R0=h3V;dE(rN+e#ldA{DGmgYrFB6r#u)vB6Dldcofs8-KZ?tq)! zTsA3xqEa2_Kmbkj9dJ)rl??`qI7OH{IK69fAu?vNq~V>|p^$duiD+R4J<}H7Bindq zmc7U@uC7ifb#7;GzN>d{98X zIDyz-4?%qD+t+Ij;lOh_x_tZA6+Hrax7Z(lZC<<8(gBtT36agzl=)rUUj4z~k@7W+ zMaEul5u>y{)x;HwZl}=#pq?8X?gS`l{V=DHT=5zPYD2+M^MC2D;p5mSJsZECmMJ7E zgI>_~M6?qz2$#}Q)dthwV7Y+n2a(+fKC)@S6zTkRISp(gKchn$ z4GREdK%2j$grG2Ylu8xQ;|jg?FqgB4zkV_ce<%fHUhKW|J?l;(!(oY}mvy5H?Kzvc zrX^Y4v_BrIcsRSOc%K4+?e=~Md93DfED)5)l|M(i%?}Y*4BWRd=PRnEJT^d=o8)Yj zn$1<8<3eHT2M$LMq|Px0e@75{RRX&OycLc0i;7v-OAJ3x($L%fMx_bh0(4}Wr<~4s zf9;a~^}infj+6Dttp5{e^YIkUmB%Nk0=*DucC@@q%89)ED*p#VRWIXkdSJ^b9EL~i z+e5&EW3@^IuZrnued#Vk+viziJeHOZ;QJIBHGVF4=MM+Ye9HJ;0Om?R4w{|&H?9|R z?L^r#=($BO*3T~~{^yg7ykDi1@GJMDfBx9|;1ks5&fZ}hV?q7F#B7>c#gFHKktNF< zs{APdPziE-6x1+TU}|5S)sWiLitpXF|LTeR^=ai?tl15^dA}I(ZkC21EKnKFV3Jy1 z*({2Wm4Wx^9YYv~G@1qmw!ZS6qj<@cp_1>jB(MNgR0?g`qdzVl&f*mGGp^3{EU)dW$U*$2k+V?Wtr>Um9+6^ZkLP`+ zhY?C}XsnnkGEYQv!TOzA6#vG$iQ==pMql2mf1-50*tzT> zBt|gkGYJI5$I6Z0k}RKPy~zg%691@9BWVzHZ&0az)oUJH6yQs>4LoBmCuYTZq1CnP zGXvAoBZT`z+6l-}3e32Rp=og+s4+9yOc?JMH(I)GpeL@|Em0t&y;2tf29aIj5X|IjLX1Z zVW9DvH^2&Qi{a<|@SySDvIXB?b#yH+9=_IaX)ucVW0|Zr#)R(W-$j50QQ1$V zncVd6@6ic==WgnbjYIy)XU!De@BAp_<`2$uRGU+ax>Vqd7{i%hVM>D44G zQrJi)pfI;rm$(*pf5+!A{-LT-X##&|(Hwn0Yms(UZ^|Vx2r`7hMppL4zi)P)`~`O% z16^RtS9BUc8US!QX9Y}24O~3z?@*=P%3r-XeVoTwKh~g?IbWJo4;z^&u$ClAQ!gd~ zsn=q;LcaF145a=wvkh*QUmx%}qPZT|aa{ou0tKKmU&@3&fAel_8gMr);%xqOfa}E9 zVpWc`VI-i`KX0U156RxK~dk?fmtR8@mGe9(F{WTOMgT zx$EF!3$=Hve-F6Ld%+gS(#sZsnSwhgbZU`m(!@B*g@BANE9i;50KM3Y{r{C5s_eu| zV3gKQCO)pkRo6c=gH4{RqCh7me$UjP4ic z5XN|HFQ&hRE^Id?otO7se=JXe{>p6Abi$Wp&NRz*e_c~xlPmL4xm0Jv^qeEPOP1UQ zr)4Yvgs77bjy3G(@T~-`UeAD8<(^A_B?%ST&!?TOaT;q}vHSMrIZoqwa5cCp9MHF< z#68R&jz$rk+W%!gjh#>XKxJzeiuh5bKGg4V@%;_?nS%c&d=zfs$Ty(5ZMV1@3+N@U zdVw@jf8y1ewQ^V3@^rjCZumtW9{o#s%@dqY67->FW2{P~u3 zA8C#++Z$x-{l3u2dK;ZM4@#`lw!F&v=%jHAFi)ls7NHHD-nc|ZeHJx?nfr*jrU0?Z z;g_Yxl0j4rdYfv?;=j6=F^?*8`(5nda0WYAe=aL_tpvPCUSI~3ahdYv-2gVCU@>IQ zxuuUV6xs)va-Y{yF_jD;X@DzLj9ZDk4SBRq%YUh^S_99ESp(gP+R&RY_cI6Xs)@w7 z`}+jst&N8cg?e3taOl3u^Ko684-ERj$ee~@VDYz-s}l0KL0}CQr*17wX7MM4du)4k ze|xAnqxekMDBGUG0+@baWxO4z5e->6vT@dvCx^zMSDBSJ zS@dnv<6$$psKGK&tQ-YgOP1fq&?&xLf0AQd8r!7*NgJS>HP(ZPKWhx%P$kd+eCZ!4 zpMR|V;#*Miipi3qZ786pR}4Y@i-cZCaj$Uz`g3ebBDqVyM;0WGfA%9Qfr;0Mn1;`G z%Rn;0xVxgY(BxnHCO`b+sCF6oGT;!40S$~-gpJM@KXa0sMc5b8eP5)^op!Ndf2O*! zc?#%4)-wb>3#05T5p#Hb<6urBWoDt&=Aw%8tJ_MjdC#Y zP#EzpeIpiVn_4XX_R}$wJ}6Tre}>aAdobgo^eSJE_fA*~;Pv~>FY-G6P0Ux8tSf9~ z)^mpp5j)&JfLJcv9Yq`T)If83^geU#vZXA)uC zMOYPyH9JFH?v-~U*_l&A0c6|cnM)aI3`jx;6Rc$?Tf|7g(TGmfF-#ksrNG9oUfp%( zbGqRfH*K(y2?Bj#<>APSL|hMo<4Rj+NYKYW9Fglesu|YD=WDr(3h@8NJ3mq#XvvHS zw-*1cp<4ud7E7#I(LEAWe+$`m4z|ve0y`ywmnQXgDqoLmpR2xbNLvklN^qDxaW_v@ zk{y+TUI>&d9ZO8QC2`tfeJbh&R{Zw(s*Ke?wD-uu6sz~Z_!{+1o9OY&&#WgSJG~n( zPrv}MkBd)0z);)Bbfl{2Bbs}xEvNM3RYtLrv zocRDAJmNu4%jPbpW0!t2?X<3*d%k9U7H=q|Yr1wFzZ9_Ly=VBzJT5(A`>*10gI1y6 z3d%i6_@!-DsKkz}dJgnLfPg;8+0D+4r&{Ll;$2VR=2Ym~whhMJN1#KC2+0|@#z>y8 zvxMzverevaZ9hE{f8aY#f_#*=iOr;b$RmmQIr8939w9RUagiS+SlwL@dMA+zxk|Fj zn;``srn#^^>oJ^&{8YNBotp7FQ1g4qhN~6g@?2-(YP8xaXE1^tHfJ69ecRMvALfrF zh%t88%`y%Nhxp-Fsu&%8#o}zmVnzu%ttwOdg+(dokMY)Vf2=Bl8!1|!YTf0G_O7>3 zzpFQ7LeDyFSeqiRCFPB7D^;GT(+fCiCq7c>;Jo7(cRS~IA8 z;Lq3G7c_-Pf3+^RWsLcm?Qt0^ch-!J%y)bQDPJeY>#E=EZwiqA|&A z^3F>S9EXRDe^*3;e8V+;mA2hWyhLI0<#qq!ED8<*3%xnXr-BDlr01UC)+hPSp5T~W zMoANj_7#ytmfkwheVHX}b+jimkKW9fl7$PQ3hmKbf1gfxMz5?D$Y^~IM$*4wqrZZO zYM}Mv{l5%RQ>Fx&U?(Xj9{eOi0JAB9B=sMivsry;$UUSVEUyn)M2D5iwfa>P&F5)FJdZJ2g z&NI~gKDh~4|MpBH{GN7-`R)i?24;)Vz|JFIai(Q6%Y+)2w-@|UY$o^l6D~z9!N}=w ze=LEzxY5Y-!_TN~QWp3JGsL{zzg*Bsd0$tx(qF}&5B9!(S!x41F_k-LJx2T| zC3RT6pXc`m=0l9tXs}=Kc)^#q5oa;T6{6jmxxJ~%T1uvbv=*PzC=n>x%eH+gQ#Xsc zp5xbH%~S@L{XfVkvQl8h0&>rF@N>V&f0<@BPzMKdA}Ep-D8*_IXi;|6dV>=4z-J9) zG_zBSX4@(ML~TI|UDhvgn719!ZLgS9je#EbQJ}K2b=<7+q24J932kH8n2$05JaF(@ zY}KdU%?jUP^Y|F3>}rmrl(7jBRYnZ2n?6Q?|36mJwbev6@5;*uEJk_0vgys2f6UnT z&n^7;ng(7`?)QSJ6g4jLxYl=%_AO-)uKU(ebg_Zw}iLdRI?B_#*xwsXPu*~{aCrVK^EBKO#e;aDSR5-5; z=k(D#754SygR#0luNQhiHrzkO?1Q-aAQ!G0JRM|uysfUChP(B=R|e1eu>atW;ZHSc zIY*yvo1T2)+jZ)|>u;DU3X$?Hf3+su=~E}b7qnd2tj^y(8y>I|YN8{f476eNFC#Px zr@Cly!P={-WDzCXQwHBte-1Gl);E=jf#0exr8!B~`$Woic51e;ti>#ayNwCYYo?l# zr`ew_r!8FVWh?EVR^S#_!J|LesCX3!g!G`HG<<4OSx`7QnT+XNj|C{&n zpV&cTjRB#Y|En=1j$BQdgC|0bSUC85E^kE{8eIB}USCY-zFpEdUJBR3UM>>J0W+LX z!U1GN^8!0-NYl{VbI9Xd<7kOnj=SJ$(W_ zc<24vZEMzJe@N{nZ4>AB)h^S-S^plSw+1s+%Ty>Qd15Ur6zuaSkY!^nSdo z11DMH&VTN+3eGfE3mgX^8Y1JaRtt|%B+b>VKQ54gU+0gtP}OqDQJ8cI3!Z}h<;F8Y zv-$~bJpehffz_i&2L8n4}$Gf6!_0_zDPxcRWmp&CJ{^BnrVy zZ6paeYji;CWvhcP=(FwkLl3oOkiWKQX8ZAN{;4r{%$=M-CzNyvyf2+RvUB(nkt!9Q z(f9*fTy97T&K;<>nV=SE4kOzywlul9xpNqLMUVSldLid{zt=3$2>#kQvHF9_NzsvV zd|P$&e`$fBcvrvxg$sGK^#x{|O`t3`9BxDJGCq#=g6=erqwwz)Q$Vdl!da&5v*68B z_U%KAjW4zCXLA0%{z1`Teq)Dw@Hyty=!5#$__7NHe%Ex5M#GtF=a1st8}FlQ_V<7Bq}$tO+TfejXPbm=RH#erav%dt`{yhw=5ZVv(aM8{N?ebVNZIu86>9gtzZGbYo8X8Pkm)wsg7e6(Wv zf3Hr%gC#^QYdd4Q4%PJOw6sUEpL@(+9YR5BQLMcv}@o*s7 zL^}Q=y9i*FEQrGnDLn>}C&tWy-)TEXz!I&?sgc)3=o>A=uwEI>Cdk$v$bdPTe@D!p z3^+NH`(lHs;Of4A!7;))G|mf9OkhYs@>V<$&9W8UEh-h(9<#Q6Q$QLz(s$wiVI2g% zTa;;3LMlsAOY5Ith71u9`nHv#MgHSOLW!1tSk!@CQHoYI$m!C$*WOkG$2gc(b zyj;)Y?e#4)|K*+}IAv$o)Ipy;f1q9Fn7BYLfDd{MsDtg9>?z5otK?m+6`}1zp8Q)F z=Fgem#pnR%R1Gn-k8<}DeM~ecdU0qz%QQO_80|CarkGcJnMQ2pBM&W{j=563@hT%d zrS6EPpFYu3e~1JGs)ie--@Y3pYEBFOKZJ{kqBv#T?vZ^S&7P&Y$z(mtK_EKnrPf3X- zUR!8h{7jxlUT&TFxtZwGa51qj^YWg1)yB`(U9`Wn{)*b7HCnsZf8)I_3d_^|06&VT zPEBF1CXv*MUVj2}pNhC9FLJ~*%(d-&DAgYLw>nsTi0Z~uLnUSHa6bQ(vS{Se@NRQ? z@*353RzY@!t#P5!13CEqe}~KZiZ>)gz8VDb z$PfaD3;2v{f5o=(e@uf_oXZ_-o_!?De`A-UfQgm5U3(%l*JS9y5S9l-kS|b z;uKL&Rgbg;4v-;IsHDPX0mnkEKr<-VfUHvnOd{H<~idba06y zF&%!y?fnUn@;tCGywf)KQN0yXg=LEuXtZehmXQ9le^G&LSoq))g*bn!I`K);?F5P+ zM|b9D^nZwFxt1Vo8aos-jwavrzL!cIlMox~(I}^~Gx8-sgA{K;;v9Hif>48Nqmg+E z3wa6fiX;5gJ^T%E+Bm_VbRC5-I$V4~3ZX$%(Twpc`xJaqY>^`Rn&eVd)dYpiaGcFZ z0Aapyf8_2zp@kOL!d*N`X`TH3sYPD$?uG8msVls=r~n|FYGnWciY>%?t4dBQRp)v^ zcAZ9nQ+RV#dHT8mBN6;;5xr=vfZXVeHvDhFP*Pg~ilY>Uiq7-?Qr(U%>b_xobNA5) zX~Enx3k(OQx-a{I053w3-1d0#?KdVh?Ww4;e<<5Mzh-(vI&9Tl;b#?7@MGNgw0eYB z+U7sY9JSZNVF8=r(-pcnXcg}k$A816?7)sRrw!;>(-{A-O^9%wlAreisv zf8;0jJij!5f|Xvv$p>b(-@lOWIX(hXN|dKtglZ~eyj1MQ7 zK8@h3OgbW*-TA)PZ39+31?4J5e<{jOjlEAZ#WZS~RnkiM+iL{|@!^6M8O}CO6rVrK z12MMdjz}Ys2+OYTJ+-s#-ltsS9l}`Yf9GtCOBvUab%K{;q*v>D51kN;IR5o1{u2sk z+pj`#U1rE3ZbMV=J_{u=#xH*p$adL+^zNPFZw!bI0CWF`%P-)ur>~Htp0rVx*HwP9F>lYtD*A;u?XfKnynvGd zq%EnJdf=ZfSB!9lH0eZh`_C)wHvM_e`0(=-yS`jhSu+~CzDEN-x%7CVf7J%r#s(Q+ z?#e?@ZCLT_5y=y%D>{#yTZaF08pdYa3m|CgAu|LYS<_Vu@7W%pj$lNW5$<9YUw zA}3VugLh$kzy)8*xVg%CYlbN39r zqG}_l!{ruvVHo_So?2svFzQ=EQ#s$s9&i9IPkS$CNBb zhnEwPev+6zN^#DUbKg+3ZlKyCPnsE@>qEB5zg5oK;9u#Qy-*@iG1;UxoY1?f;5}c8 z?7w}d)#)a}?;TXcf0e#-`QV{6%Le?y{{dyfFK~TncA@fqw>iYBsqoj*C-;SEdZ6h_ z4(P6+UpqzBgJv9t-$;lf(Gr|?7!eO5%FmYtudl8lCHFZthHmgT6NmaUBgBd-b>4>B z53?u2l~!kf{PO@)VZxu?+QKUP2wTqqCy4++hZ+-&E6`BPf8=!Lr9BFS`cWXkX#cRP z;!xi{Mmq)GIibMVfLkzTm!f3UGNI&mM{rn)@17z)c@5%zI?9lk%MS)Kolil+ZvT9b z1CncsyJ3OnmeM}MF3z=Xrc~@79IVS;C<6y*D2gAk>Su;%FTo!mF_29Hw*o6gI<%JD zgP4?gqltA#f3&ypu5K`Kr&+3kc4Qf(r|1`~6t6CB%zpZ3E`S2bw}^cma)~P29gP4w z{a32UmRw2>9E;ny!lzfQYw%yLDJ%;6`s`@s7DE&&6#l*e#Pzk=xivXvGPjTYzYTTN z))xm(geWP_WiSqd^%sHw<%7-K+6g3cvlY+Viccn=e|9N|w^h@2*4v3_mVRe~zc$Vl zkQ>KW?pMnUI}O|5CM}U!pl+~6Q~QIAL~In#SI?!q8`Yp8k?Z|^S@EYKOV%%zOjTg_#-O|@H(+2QU7~lNFD;HV)rTQA?DT9h<$v_LSg25y> zG-?Co~}^- zf4>=}F<)XoRe0Az>n*ea^+Y)fWZr?|YQN(Qpl?4#vXcL;(N8?H@~|M5kAwt2)GRDS zb=($)De-6A9{52RBZJG{lk3gRMW?ybd15h!0lD-v{jfK?IxKpkx#gyAq6TbT4GbwA zPX>pLs4&N(sTp)j>|-yihoPho3Ocv2e}ebDO&YuyS{9}>wi>O0ZTg|?$N=LlS78uQ zGQG$U^rK%{llY>Sk14bZ^Nao3C8j7f126)UxF0`*qo0J3E2&oZ1LXtk&XesGIToD3 z=~Q@875M&`nr8gNA&D?FdFeBmfmF6^wMCmYT+7EFxH*~Y^RE(3t;%;qh(*|Ke>3hv zX-;;qz|x=#u}r@7D`{nyI=|;Zs7?{ZOJs9BFD8)(tllj6&$xga${b>yu5^|H3CNd2 zU|v8g$xt)O{I6Zwd{tT zAzO4q{1v(G)u+W$3VtazlX_s3e`Qs$rZ8kabbp?o#ARKn4{Nez7orXu`9*|D)cFzG zJLi6RL$H%3QR0a{1^5z}ghm|>FQIr!=6HsrD;0^r>?W#bYj%LmC*QK=0saydo%X1kiac!I zrWU-fPjLGw5_Q9S?~JT}e;BdS!x3!O*~Z)c;ibQWrNsVut5$c%a%E)2oCluM7_-|I z1tr+ z8l~5MI}50F&lfN@I#;}uz#2lWlza>(!WD0iyfARW0nY5~A4-4o; zS*gO7lMvR%*i$|@>-98P;MH=g+EVJsK4Gofr_whkJIMW;&baMAGOU13F(qm| zdM~h*F5T*r8EIVwjA!d~44oZX?|x;Hph4?D$u#}}bB6f2xwi3u@n>udc$u?eNV4;n z;F%WlZyG3pLctQ!e=m(bGUY;5(u-B^;VlEANL6-NALVV%uq8Np49LX*vsw1ILn&%y zf(l}k^82F4?vy9Z@s~e37Wjq|7f@c{YcBcmk)WZg*Onm&jJ!=edF~m9F}U!loxBUK zN%oGhN3onO%9i6+)c$h?Hcp#6Lcpg~&3Tl3dOfiF=ru_! zPYL+hA{+69-^`eGbf6BqdCxZYv$JqL(b1lEu}ucF(D+2*6BI|$<+nss6Q|{m1V!nF z0EZCi1BB_#Xe9wcVKwLQokP9^WSSL^Y=r8@2a@Y7U}jzp8R<#%j%Un|S3Rv=&? zpxaM)32?Vbf9^9apwID=WKf!_O&srgS7e0Yi}rE=R`qdUd1ueKwElc4O=et`3v(;? zWre4yCIAn0o(8;v%_r^#qeCAno!ysz5ViB!(c!k#@T9y2gLbl&n1HF~aY+Zw6aDes zE7o5+czt3KPvAyTq*UiG!L3NZL+blv&U(z6YLmILe@FXyHs@7bDRl5vrnObLdr5v* z6{lik9lZPES&ALIevKyEMLwF{Ri;~%J>K`JX^GC0pQHx`0x=z>0QI8CGL8MbYn*wi zmA@euk^Z~M54pFI@~`F2JC1B=;QM1_Rs2gW1zPQz=jG_a5c6-{zRwx(B;z9@OlwZj z!S4_(f0L>>9R}Itntr4BB2LxXzjjo2XeQAX8B0Ic8_xi&#&hjK61ow0*Muc ze;+_u32HPWV8p>U%}vGbF;;>wTSH~vG!o2++v6ipj63gL05J*Pa5n?m1FAnc?vuZe zUWB<@R$tL<$pTNhzavr40AE33ul!!+0k^qxYS?r%HDcn=Wp_Z%{x>zBhk$^Ph%Jx5 zq3+|lp}x|45*($kdE2U3PvDp7>f}?+e+Xkn$BF-@<+A>Tt60{f4w#uXL-yXQc{oRp zO8&lup{^;XaOy3ns7_iOx(aBX){t^`me#7M;>#MT7bp?)uB&CTk)C&>lPPl|gEy(D z@a-OX{QljciKC8~Ns)?lNQwwAaZ|Gb_0ek(bHkrGro+rcS-?qgDqmV(MR?f(f2h$T zy3@ww>&iKToivJOx=~C)9{z54hlEu+#V6qT1N;F(+TVc7dVRm_@Lmgfm`xTa*5xu` z%&A^r2?;dgKCxa+=bqcFN<-b};X@7j^AQEWs2IWzH)H?gC6BvVN64DVUMy7aL-<~a zkAu$m!;3cfyG-xHgx_~7RUgwaf7(C7ZxPltTb1GPrwKoKo4Fd)dQY}|`W+YfcYT`` zNx`63@3I|`$MvELt+D>CBiUJZ+a>tI+cX$yV#v5sxY_HCe7zQ3uDLEh6M+$M!@XkxAX&}e^FlMCryWo z>0b9CoYLL2#|WQt`=cY~Q8n<_#(o*e%g}D9JL2;_dj7z;x2_m=1dMh_|A0c5lB6hI zdt4P`G2A?7tc&VX^^t2npi>{dyPd0y^DkH`{C-9V|KPJ4cQ8{qx$4wOx;r8GKZ%e% zl!IfwZsofy&5sT(2-h^^f1*=bj`^tKg`0?3X>6~`1oDXMH!@5I#N@(T9+ zZ<2C!1aE&-_Of`?RlMBDe=CNj?hotbBzWLu&OzV5`5HzPx|jIQ!T$4}CudHC60L-v z@P5n*UckzLmz|o=fQcF(G%1?wac1|Q)&WBJ_Jzba5Q_@Kxn5Xje+S}*fv+PJ_MrWy$ynzCW z4+!XA9rg>=Ph#2_dwIc+e-gOrNhl_DH%65m=6rwa1vKkb^XWRcmhkns^ZZ70pA}liDxKXi93^%Ev+1Rr+tVH{V-#td!<4_ z45M_X$Z$N1e?=L+YF(y?UCfvQnd`O$bmD3g`xvMny5M8bZ@Wqt#TVQBbZ@`wuIy_q9;8o@ckyQ-MGZrYG5LtD3~ES{Tae?gJf52OzB``&`?~*C4tqKPXz=gj* z=~3z*_H)GhuBM*3?BbF-uo0IKE$UQ6a&)@g$|*Zh21s)Uj|$DEoWXV(@51y!kNoNm( z?-hm%qJ27$ftpE^l&QK328c+b&nk6J6pU59-Ki+`JexrQDO5Z>Fo2k#u!06XWv^rh zqFm$g@%-rH0H=PV-&ngM?f+VJ@DI*#I6(8Of6L5f#yZuXQIKWd^?`8?2sFq$CJr4D zjl_D_)`l`BwIS|O#EORO6N+35RQhO(S&0*sIr+ut{7rShv;2x~)z`Fc86KkNew7Yh z&lM%WXhpBA-y?AAB3tjbP_)RTVze&4#HbWEbU#S_Ty@QL^Ry+w$EP#fZ}OWc@e*)= ze?(HXIPtds!Wl*9HFk5qGET69_){%Lwz`p(;7Shs!0T_~Eutshk7ESYR>@@Dr4BFi zk=Yc3AGFD(Kb6H_4~UO-E2Xaf1~7QLT~P0T^#uSHLA!-q2K2N(O&AnTc7>-?JUCkF z9fP#=`)y7)@I%c8gh-$|Cl>8Tyq(n*qaqyUpUUv@L}w4 zx`Zkkel3&$@NpbEi4w^iax8rH&(A<^35Y>s>YH3?Dpy~{*wch?@F!O~bt!z&#V_v{ zV)PBPQpsKjEVvhDz=2DErG7*PL7uRyJIh= zq?}_*UW)h93EPDxKA-FIiGfSV<^}u{-@gWej2LRz-00q?t8)P^HQJ|IH7wk@;IEA{ znCCTDsT3z>8Na?To*bt0r^{c~f5G2Uv;y%Z!>8PzJX?jVdqSmx3C@+t1V65w08zgS zxS|I~6D4b1u6XbHuJK~B;)*A+e!O0UI4Vq$g4YPt9t|<{%l(l#@Qn_13dDsMC{~H> zL9Y~c-CngZ61DK4W{+PfYJ+9A+rA(R+cf|TxEt2R1AnEyT+t`uqe5$}fB)iv5e2S! z;(mz3P$z?rXQ?f#D{NrRmGQ>=C?QKpsAc2)sYmah3t92k%jK^DxY8d96uWWH?0+tD zp=X(~$o2z>S$yl}+%jTem6xS0zExL7qm@=lvhyvzsnj(vyx@l+#U=1pt4`+h3bMaK z2ZgGk>OPHAKrA8p>eppCf2Mw~(Csy)QO13J$=X^u|MAp9`v}}42mO?vDEjR2RVGW! z$EfY215EYJX-#oRb_z^G8o+6)6v(dA+r_GnORu3Llq8I+HE~5`S~k{zJ?iSf6f{TG&XHFf2p|ezNY; zejr1E4#TC>nw@#57mp-s8vLA1fDm@k5hn*e)jN6&#qih$Prj^P;djWP)ui)*ys7Q7 zJel9$UAt4b2!)*)f85qe$SIDA9P=S3cuyAq2qtj5qu2fyQmK-5<6D#nd%ejCDXFJw z>8!4+uUrQHcbv2st!H9FfRH{uL-?Xk`(jte8}CR3HKy^5hp3;ykXNG9nS1>$K%|1t z6OePj0gQM?>Jcv`);CF+6sTUXmKKY=@2x(5GnQahsV0LHe*phryZf%mhZ}*$RmHDe zdPaFhztRx*8%6H%P`jlF7G}c=QEsTiTM64w;$Br13C5}b>I^l7+>L+e+^o`J7rWo{ z#y(N(brP1YWqp~9t!W3p{Zr5OTCUVz?=;y>L6_C0ZNAYPRXQ?5TBVf~G0T8y()im` zb|u_!M5vm}e>?sv;`<`7hh8C<%F6;ze*mx3KbLi4q8F}E%o2e2?c?nfSrK@xPeU-D zi&(jcOWO1A<@WZ|FI-hJDd>JXI2{!6n)7EXno1|3t_tw4cu>YK!rCbq|EsBtaVOGmxiw;j!~K~!H@df?DCR1af-1eqbctF4ACUJ|V`Iqg0u{qnaZ za)NB}82DF=$BEyI^6~LPvvMYT#Bw!tNrK64dp$*H5^q>MBL9K*+t2jDZ83LWSnWm6 z=(=+Nf4AX!?jzHKO4syMfaEb>`YTU%2O6$#-p{DYLAY>1@KYGbhSwHZhtSyH+_!9}U$@~X0hVe!KP{`Z;AiyRT4@`0|Y?N3}9*Y;m5!$SUjV#1B@FV`cUqB8xNO~HNf{ybPuNs@z=*Mu$b*wEOmN{?*q|0+Uexv;7KGkHj0D-e$9Id zeGy2KcO`)D8+fh+PM+$Jq#t(c7s2q5un#C}k9v2lUFy)Ce82+4(c zA}B`MO-YfT^x-y{5@LlrM3wxuQ~pJK`8$>*p{G#{mPhW;`n@z z#MQ#5^)EnE==^Z!F7L;2-(`#093bm5;zB^Ckwi`!bcZEPst#=n?Nd5Ml;cdEM8!O-A6${j%qiN5zg4ulwhA!JP%BQ(7fIM{!%fY_2IT%q6Rs5i znmi0cu=o_O)900};y)qf=W7N(nve>ah0IZ~`%{2VS19#&`cq4*yYCP>$&`(cefz!^ z!cU~!Ow7=VITC#}T$iE|*T-phn#bqPEsetNK|xE}BWT0d%jQ-Gzsh*} z;bH#W#$cXTFVD{~1htR`hdhPdFc=4qouY|N7n(M$@w zDk8#16RMv-nFW%70@v*eab#4k3E~Oxz_nZ38DKb)C*~OG;Ap7-AlL)Z& zb752&HA|h|8*&Kc${Zi=dE@kiH=q3pOF@BPClmb11*|6ffB%cvgZLaT--I<==O(H4 zQ5WV_&mm>;eg$`daS<8+#X>8ZO!0-ZrN7|T7GN$-J+fc+K%R|+AciA?&WTbb3*DTE zlwrLvo1Edj2Y+%s^Et0f(fo1&c!*%QpkT?H43HJ&EU}85%^nIuTa23pO=HIq&QO-d zpLWm!aoGT3e_c}@@rS7@5?v*LQJkaIt#dpQt1t0#QKt@Rg%G?1HfDIP=Z?&Nt@TEk z0b9k&p5D3k3fL6azO=;j3aTY;38LertqQ5A=F42@^JJbD0(!=C`j<_fMGq3+t@H^< z`0^vj7oj~L%8{?YUE!E7fFE3w3+7}UuHX)vWu;1|HgIjdg|AS6!w`|Bdz|B_Gc3)O|P{3%Je&1vS$v0msJgN zSX}wJ)ju&PW#h*7lLWkDs#zziGq%pQXWN&%Wr~BJ zw1|)3qdV%KGH#nGt)O}+rs(hQ!tm;s3=qeuf8&EUrs<|^Tw9gM#r|Nl-0ZNpiTg5q zo0O^nk{oOksT_N_g+5hn|HB}W{r=4J$>i$bY;gj`$@eoi_%`)Um4^WsG}Yx7`5>Eg z+WxataC|gDi9U{~?)$tP+KE)UC7QZVQ%Fadrn~C=DH~u1xd6c9&eqMVo9zT-G;C(I ze;Mg2iidQlz$&a`e*_;xT$mg6JRWisC@Q3VQi`b56twpJT081>`A@IG2vTQ^XJtH; zOjBH{+0uP^Y}3TmA0WX)m?m_J=2en71d+*^YDLWmon`7Qz6Tn2xi9g5CmZO`*mS`{ zHPV5FMY_E+w1-N3#geP$O{`_~1-*V7qpx8Rw*^VAf429+ zB2Rtm0DolC?2Kp}Is?NJwB(w?5Bc86*7FjGx3Smss8E-S)3Ocu4g6=G+7{BigpJl= zo&&(TwMtaPM~ZbTy#AV|0jhuW$HY7K_AJ8mL9Sn8JHSt2>VmdyZ5m$Wli5hieTbE0 zYsrsTa%dIotx6>5u-CRF(3o^yK z#G_T}T^6?(M)1x7gS0fu-^liQ21RLzoZIPuWQO-M4#^Q|;f8AZ%c$T-e|%X#`_-g3 z=ayn%mD$_gnb~A>Yzv!2szW5hCKr)kW60%uK86|{$}>F zR?jp(F10Xc-cJNry(bKSf0$V+V*@7a1sfN?w+ZN!G40|P$ejgpK*X0r<-8n7^gPUw zsDS@+;b;39M$u5OeuU_Svzb}(EUo3EuR3R=ylW|`wM+|1C@OLF{+q^&MqVRB7g_QO zq`lO|8JRSwUot}pyf5vfm)Im;BXKc|9zr${>XfYcPoBbMleFb*A!W~)=mJ^dxq(TLA6AsO7Nvm(HD3P`)Juz>sJ59aV<_jbB~M z_QGb9^9=V)b27urL!oOa=_&^bcoRnzq4+KKcL_u-HbowCf8(ayb4icH{K5&Hg*Ma& zsUC7eQ>T|#s z0znWOM`Dqa%dfVl3n-C2QuYsb&=uKYJcD0OTh%=*+FJ=;cxPxsdh+tK@NAZP!ho5U z{p8rheZfR#$2Rk-1wEriI}iGJ@YJh z`gLb+Qj}BhapF~Y65Uzvy>lg~E_D184|tzSihsB$e=D|D22J2jqGNnjkGW!nKz%Oe z``UseR0+2XAfAkMOXtw=FjHZO<16Akq)Uh5I%8i}xHGO9QNMz>yDK6m@%2~XDci{L zMMYR2{xc@~uwL)a>$YwY7QHYzTO;O_81O-u(_@PSUhpcJSpZo}AdZa{ABhGfn3U{tGoOe`IjE z_35e`f7+jxJ?G(5PFIlGsi7I+u34kAYSoSz$@5=mF8!g?!CP;pGjX-jt;4#Dnl5je z=PiA)&0R8JpZvqLeznHh1t-2-P`k!=`JqrUa{F-*F^H-wAZ$pJ_#;p!(*&EZY{$l*T z@8z@XFX@M*a$f7z?M%=6E@!+li2a{_in@c8a>(;T5qEcUD9uXuzouK|RJ-YCm!+zW zk(u@@T=k>fzuC8tK9Tz?FEQtG>_qMF)*cm;mOMXk$-`cB06|${!Fu2}X zmvL)a;BLuE&ZmqFapxGS^peXIKDlaN zZY+3fxN=sn>94TQjf%4ByW396d}02u`6yHU&a`Q-?d}y{D{M=SVqEerf=RIdXeUR- zxm=OcohJ``%xL_(wVb=ue(yY$%A0#q>=@tWm}N7c>e%@y=J%?}&-^DWYV)qGQ~#=^ z!=$pktM|cX-~hrsnf?Ne2fg_ Date: Tue, 21 Feb 2023 18:03:10 +1100 Subject: [PATCH 181/263] Allow for withdrawals in max block size (#4011) * Allow for withdrawals in max block size * Ensure payload size is counted --- .../lighthouse_network/src/rpc/protocol.rs | 16 +++++-- consensus/types/src/beacon_block.rs | 46 +++++++++++++++++++ 2 files changed, 58 insertions(+), 4 deletions(-) diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 52971320796..b6651021d8f 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -22,8 +22,9 @@ use tokio_util::{ }; use types::BlobsSidecar; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Blob, EmptyBlock, EthSpec, - ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockMerge, Blob, + EmptyBlock, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, + SignedBeaconBlock, }; lazy_static! { @@ -62,6 +63,13 @@ lazy_static! { .as_ssz_bytes() .len(); + pub static ref SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD: usize = SignedBeaconBlock::::from_block( + BeaconBlock::Capella(BeaconBlockCapella::full(&MainnetEthSpec::default_spec())), + Signature::empty(), + ) + .as_ssz_bytes() + .len(); + /// The `BeaconBlockMerge` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing. /// We calculate the value from its fields instead of constructing the block and checking the length. /// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network @@ -72,11 +80,11 @@ lazy_static! { + types::ExecutionPayload::::max_execution_payload_merge_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field - pub static ref SIGNED_BEACON_BLOCK_CAPELLA_MAX: usize = *SIGNED_BEACON_BLOCK_ALTAIR_MAX + pub static ref SIGNED_BEACON_BLOCK_CAPELLA_MAX: usize = *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD + types::ExecutionPayload::::max_execution_payload_capella_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field - pub static ref SIGNED_BEACON_BLOCK_EIP4844_MAX: usize = *SIGNED_BEACON_BLOCK_ALTAIR_MAX + pub static ref SIGNED_BEACON_BLOCK_EIP4844_MAX: usize = *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD + types::ExecutionPayload::::max_execution_payload_eip4844_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional offsets for the `ExecutionPayload` + (::ssz_fixed_len() * ::max_blobs_per_block()) diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 60dd781a67f..0f26cd0e5e7 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -485,6 +485,52 @@ impl> EmptyBlock for BeaconBlockMerg } } +impl> BeaconBlockCapella { + /// Return a Capella block where the block has maximum size. + pub fn full(spec: &ChainSpec) -> Self { + let base_block: BeaconBlockBase<_, Payload> = BeaconBlockBase::full(spec); + let bls_to_execution_changes = vec![ + SignedBlsToExecutionChange { + message: BlsToExecutionChange { + validator_index: 0, + from_bls_pubkey: PublicKeyBytes::empty(), + to_execution_address: Address::zero(), + }, + signature: Signature::empty() + }; + T::max_bls_to_execution_changes() + ] + .into(); + let sync_aggregate = SyncAggregate { + sync_committee_signature: AggregateSignature::empty(), + sync_committee_bits: BitVector::default(), + }; + BeaconBlockCapella { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyCapella { + proposer_slashings: base_block.body.proposer_slashings, + attester_slashings: base_block.body.attester_slashings, + attestations: base_block.body.attestations, + deposits: base_block.body.deposits, + voluntary_exits: base_block.body.voluntary_exits, + bls_to_execution_changes, + sync_aggregate, + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + execution_payload: Payload::Capella::default(), + }, + } + } +} + impl> EmptyBlock for BeaconBlockCapella { /// Returns an empty Capella block to be used during genesis. fn empty(spec: &ChainSpec) -> Self { From 1bce7a02c8f85b712f41ad7cf563d53407450976 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 21 Feb 2023 18:03:24 +1100 Subject: [PATCH 182/263] Fix post-Bellatrix checkpoint sync (#4014) * Recognise execution in post-merge blocks * Remove `.body()` * Fix typo * Use `is_default_with_empty_roots`. --- consensus/fork_choice/src/fork_choice.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 9ca9ef0cefc..85e43945cdc 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -413,18 +413,18 @@ where AttestationShufflingId::new(anchor_block_root, anchor_state, RelativeEpoch::Next) .map_err(Error::BeaconStateError)?; - // Default any non-merge execution block hashes to 0x000..000. - let execution_status = anchor_block.message_merge().map_or_else( - |()| ExecutionStatus::irrelevant(), - |message| { - let execution_payload = &message.body.execution_payload; - if execution_payload == &<_>::default() { + let execution_status = anchor_block.message().execution_payload().map_or_else( + // If the block doesn't have an execution payload then it can't have + // execution enabled. + |_| ExecutionStatus::irrelevant(), + |execution_payload| { + if execution_payload.is_default_with_empty_roots() { // A default payload does not have execution enabled. ExecutionStatus::irrelevant() } else { // Assume that this payload is valid, since the anchor should be a trusted block and // state. - ExecutionStatus::Valid(message.body.execution_payload.block_hash()) + ExecutionStatus::Valid(execution_payload.block_hash()) } }, ); From 40669da486ff070449b703d696c965e05c2860f1 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 21 Feb 2023 18:03:42 +1100 Subject: [PATCH 183/263] Modify some Capella comments (#4015) * Modify comment to only include 4844 Capella only modifies per epoch processing by adding `process_historical_summaries_update`, which does not change the realization of justification or finality. Whilst 4844 does not currently modify realization, the spec is not yet final enough to say that it never will. * Clarify address change verification comment The verification of the address change doesn't really have anything to do with the current epoch. I think this was just a copy-paste from a function like `verify_exit`. --- consensus/fork_choice/src/fork_choice.rs | 4 ++-- .../per_block_processing/verify_bls_to_execution_change.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 85e43945cdc..590e151a853 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -857,8 +857,8 @@ where (parent_justified, parent_finalized) } else { let justification_and_finalization_state = match block { - // FIXME: verify this is correct for Capella/Eip4844 because - // epoch processing changes in Capella.. + // TODO(eip4844): Ensure that the final specification + // does not substantially modify per epoch processing. BeaconBlockRef::Eip4844(_) | BeaconBlockRef::Capella(_) | BeaconBlockRef::Merge(_) diff --git a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs index 15a856c40c9..bb26799250d 100644 --- a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs +++ b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs @@ -10,8 +10,8 @@ fn error(reason: Invalid) -> BlockOperationError { BlockOperationError::invalid(reason) } -/// Indicates if a `BlsToExecutionChange` is valid to be included in a block in the current epoch of the given -/// state. +/// Indicates if a `BlsToExecutionChange` is valid to be included in a block, +/// where the block is being applied to the given `state`. /// /// Returns `Ok(())` if the `SignedBlsToExecutionChange` is valid, otherwise indicates the reason for invalidity. pub fn verify_bls_to_execution_change( From 3642efe76ac4cd480d2822a10fb8f089b1771f45 Mon Sep 17 00:00:00 2001 From: Mac L Date: Tue, 21 Feb 2023 20:54:55 +0000 Subject: [PATCH 184/263] Cache validator balances and allow them to be served over the HTTP API (#3863) ## Issue Addressed #3804 ## Proposed Changes - Add `total_balance` to the validator monitor and adjust the number of historical epochs which are cached. - Allow certain values in the cache to be served out via the HTTP API without requiring a state read. ## Usage ``` curl -X POST "http://localhost:5052/lighthouse/ui/validator_info" -d '{"indices": [0]}' -H "Content-Type: application/json" | jq ``` ``` { "data": { "validators": { "0": { "info": [ { "epoch": 172981, "total_balance": 36566388519 }, ... { "epoch": 172990, "total_balance": 36566496513 } ] }, "1": { "info": [ { "epoch": 172981, "total_balance": 36355797968 }, ... { "epoch": 172990, "total_balance": 36355905962 } ] } } } } ``` ## Additional Info This requires no historical states to operate which mean it will still function on the freshly checkpoint synced node, however because of this, the values will populate each epoch (up to a maximum of 10 entries). Another benefit of this method, is that we can easily cache any other values which would normally require a state read and serve them via the same endpoint. However, we would need be cautious about not overly increasing block processing time by caching values from complex computations. This also caches some of the validator metrics directly, rather than pulling them from the Prometheus metrics when the API is called. This means when the validator count exceeds the individual monitor threshold, the cached values will still be available. Co-authored-by: Paul Hauner --- Cargo.lock | 1 + .../beacon_chain/src/validator_monitor.rs | 106 ++++++++- beacon_node/http_api/Cargo.toml | 1 + beacon_node/http_api/src/lib.rs | 17 ++ beacon_node/http_api/src/ui.rs | 202 +++++++++++------- 5 files changed, 251 insertions(+), 76 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5d5d3215713..12b70f58eef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3209,6 +3209,7 @@ dependencies = [ "environment", "eth1", "eth2", + "eth2_serde_utils", "eth2_ssz", "execution_layer", "futures", diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index dad5e1517ad..de26810126a 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -29,7 +29,7 @@ const TOTAL_LABEL: &str = "total"; /// The validator monitor collects per-epoch data about each monitored validator. Historical data /// will be kept around for `HISTORIC_EPOCHS` before it is pruned. -pub const HISTORIC_EPOCHS: usize = 4; +pub const HISTORIC_EPOCHS: usize = 10; /// Once the validator monitor reaches this number of validators it will stop /// tracking their metrics/logging individually in an effort to reduce @@ -45,7 +45,7 @@ pub enum Error { /// Contains data pertaining to one validator for one epoch. #[derive(Default)] -struct EpochSummary { +pub struct EpochSummary { /* * Attestations with a target in the current epoch. */ @@ -103,6 +103,12 @@ struct EpochSummary { pub proposer_slashings: usize, /// The number of attester slashings observed. pub attester_slashings: usize, + + /* + * Other validator info helpful for the UI. + */ + /// The total balance of the validator. + pub total_balance: Option, } impl EpochSummary { @@ -176,18 +182,60 @@ impl EpochSummary { pub fn register_attester_slashing(&mut self) { self.attester_slashings += 1; } + + pub fn register_validator_total_balance(&mut self, total_balance: u64) { + self.total_balance = Some(total_balance) + } } type SummaryMap = HashMap; +#[derive(Default)] +pub struct ValidatorMetrics { + pub attestation_hits: u64, + pub attestation_misses: u64, + pub attestation_head_hits: u64, + pub attestation_head_misses: u64, + pub attestation_target_hits: u64, + pub attestation_target_misses: u64, +} + +impl ValidatorMetrics { + pub fn increment_hits(&mut self) { + self.attestation_hits += 1; + } + + pub fn increment_misses(&mut self) { + self.attestation_misses += 1; + } + + pub fn increment_target_hits(&mut self) { + self.attestation_target_hits += 1; + } + + pub fn increment_target_misses(&mut self) { + self.attestation_target_misses += 1; + } + + pub fn increment_head_hits(&mut self) { + self.attestation_head_hits += 1; + } + + pub fn increment_head_misses(&mut self) { + self.attestation_head_misses += 1; + } +} + /// A validator that is being monitored by the `ValidatorMonitor`. -struct MonitoredValidator { +pub struct MonitoredValidator { /// A human-readable identifier for the validator. pub id: String, /// The validator index in the state. pub index: Option, /// A history of the validator over time. pub summaries: RwLock, + /// Validator metrics to be exposed over the HTTP API. + pub metrics: RwLock, } impl MonitoredValidator { @@ -198,6 +246,7 @@ impl MonitoredValidator { .unwrap_or_else(|| pubkey.to_string()), index, summaries: <_>::default(), + metrics: <_>::default(), } } @@ -252,6 +301,20 @@ impl MonitoredValidator { fn touch_epoch_summary(&self, epoch: Epoch) { self.with_epoch_summary(epoch, |_| {}); } + + fn get_from_epoch_summary(&self, epoch: Epoch, func: F) -> Option + where + F: Fn(Option<&EpochSummary>) -> Option, + { + let summaries = self.summaries.read(); + func(summaries.get(&epoch)) + } + + pub fn get_total_balance(&self, epoch: Epoch) -> Option { + self.get_from_epoch_summary(epoch, |summary_opt| { + summary_opt.and_then(|summary| summary.total_balance) + }) + } } /// Holds a collection of `MonitoredValidator` and is notified about a variety of events on the P2P @@ -347,12 +410,20 @@ impl ValidatorMonitor { if let Some(i) = monitored_validator.index { monitored_validator.touch_epoch_summary(current_epoch); + let i = i as usize; + + // Cache relevant validator info. + if let Some(balance) = state.balances().get(i) { + monitored_validator.with_epoch_summary(current_epoch, |summary| { + summary.register_validator_total_balance(*balance) + }); + } + // Only log the per-validator metrics if it's enabled. if !self.individual_tracking() { continue; } - let i = i as usize; let id = &monitored_validator.id; if let Some(balance) = state.balances().get(i) { @@ -479,6 +550,25 @@ impl ValidatorMonitor { continue; } + // Store some metrics directly to be re-exposed on the HTTP API. + let mut validator_metrics = monitored_validator.metrics.write(); + if previous_epoch_matched_any { + validator_metrics.increment_hits(); + if previous_epoch_matched_target { + validator_metrics.increment_target_hits() + } else { + validator_metrics.increment_target_misses() + } + if previous_epoch_matched_head { + validator_metrics.increment_head_hits() + } else { + validator_metrics.increment_head_misses() + } + } else { + validator_metrics.increment_misses() + } + drop(validator_metrics); + // Indicates if any attestation made it on-chain. // // For Base states, this will be *any* attestation whatsoever. For Altair states, @@ -717,6 +807,14 @@ impl ValidatorMonitor { self.validators.values().map(|val| val.id.clone()).collect() } + pub fn get_monitored_validator(&self, index: u64) -> Option<&MonitoredValidator> { + if let Some(pubkey) = self.indices.get(&index) { + self.validators.get(pubkey) + } else { + None + } + } + /// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`. /// Otherwise, do nothing. pub fn auto_register_local_validator(&mut self, validator_index: u64) { diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 077e3aa7cda..d7a3a680bd5 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -36,6 +36,7 @@ tree_hash = "0.4.1" sysinfo = "0.26.5" system_health = { path = "../../common/system_health" } directory = { path = "../../common/directory" } +eth2_serde_utils = "0.1.1" [dev-dependencies] store = { path = "../store" } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 60e5d2adf43..009775701a7 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3025,6 +3025,22 @@ pub fn serve( }, ); + // POST lighthouse/ui/validator_info + let post_lighthouse_ui_validator_info = warp::path("lighthouse") + .and(warp::path("ui")) + .and(warp::path("validator_info")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and_then( + |request_data: ui::ValidatorInfoRequestData, chain: Arc>| { + blocking_json_task(move || { + ui::get_validator_info(request_data, chain) + .map(api_types::GenericResponse::from) + }) + }, + ); + // GET lighthouse/syncing let get_lighthouse_syncing = warp::path("lighthouse") .and(warp::path("syncing")) @@ -3522,6 +3538,7 @@ pub fn serve( .or(post_lighthouse_database_historical_blocks.boxed()) .or(post_lighthouse_block_rewards.boxed()) .or(post_lighthouse_ui_validator_metrics.boxed()) + .or(post_lighthouse_ui_validator_info.boxed()) .recover(warp_utils::reject::handle_rejection), )) .recover(warp_utils::reject::handle_rejection) diff --git a/beacon_node/http_api/src/ui.rs b/beacon_node/http_api/src/ui.rs index a5b3a8b2f2e..e8280a796a3 100644 --- a/beacon_node/http_api/src/ui.rs +++ b/beacon_node/http_api/src/ui.rs @@ -1,5 +1,7 @@ -use beacon_chain::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes}; -use eth2::types::ValidatorStatus; +use beacon_chain::{ + validator_monitor::HISTORIC_EPOCHS, BeaconChain, BeaconChainError, BeaconChainTypes, +}; +use eth2::types::{Epoch, ValidatorStatus}; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; use std::sync::Arc; @@ -71,6 +73,82 @@ pub fn get_validator_count( }) } +#[derive(PartialEq, Serialize, Deserialize)] +pub struct ValidatorInfoRequestData { + #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + indices: Vec, +} + +#[derive(PartialEq, Serialize, Deserialize)] +pub struct ValidatorInfoValues { + #[serde(with = "eth2_serde_utils::quoted_u64")] + epoch: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + total_balance: u64, +} + +#[derive(PartialEq, Serialize, Deserialize)] +pub struct ValidatorInfo { + info: Vec, +} + +#[derive(PartialEq, Serialize, Deserialize)] +pub struct ValidatorInfoResponse { + validators: HashMap, +} + +pub fn get_validator_info( + request_data: ValidatorInfoRequestData, + chain: Arc>, +) -> Result { + let current_epoch = chain.epoch().map_err(beacon_chain_error)?; + + let epochs = current_epoch.saturating_sub(HISTORIC_EPOCHS).as_u64()..=current_epoch.as_u64(); + + let validator_ids = chain + .validator_monitor + .read() + .get_all_monitored_validators() + .iter() + .cloned() + .collect::>(); + + let indices = request_data + .indices + .iter() + .map(|index| index.to_string()) + .collect::>(); + + let ids = validator_ids + .intersection(&indices) + .collect::>(); + + let mut validators = HashMap::new(); + + for id in ids { + if let Ok(index) = id.parse::() { + if let Some(validator) = chain + .validator_monitor + .read() + .get_monitored_validator(index) + { + let mut info = vec![]; + for epoch in epochs.clone() { + if let Some(total_balance) = validator.get_total_balance(Epoch::new(epoch)) { + info.push(ValidatorInfoValues { + epoch, + total_balance, + }); + } + } + validators.insert(id.clone(), ValidatorInfo { info }); + } + } + } + + Ok(ValidatorInfoResponse { validators }) +} + #[derive(PartialEq, Serialize, Deserialize)] pub struct ValidatorMetricsRequestData { indices: Vec, @@ -119,76 +197,56 @@ pub fn post_validator_monitor_metrics( let mut validators = HashMap::new(); for id in ids { - let attestation_hits = metrics::get_int_counter( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_HIT, - &[id], - ) - .map(|counter| counter.get()) - .unwrap_or(0); - let attestation_misses = metrics::get_int_counter( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_MISS, - &[id], - ) - .map(|counter| counter.get()) - .unwrap_or(0); - let attestations = attestation_hits + attestation_misses; - let attestation_hit_percentage: f64 = if attestations == 0 { - 0.0 - } else { - (100 * attestation_hits / attestations) as f64 - }; - - let attestation_head_hits = metrics::get_int_counter( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_HIT, - &[id], - ) - .map(|counter| counter.get()) - .unwrap_or(0); - let attestation_head_misses = metrics::get_int_counter( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_MISS, - &[id], - ) - .map(|counter| counter.get()) - .unwrap_or(0); - let head_attestations = attestation_head_hits + attestation_head_misses; - let attestation_head_hit_percentage: f64 = if head_attestations == 0 { - 0.0 - } else { - (100 * attestation_head_hits / head_attestations) as f64 - }; - - let attestation_target_hits = metrics::get_int_counter( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_HIT, - &[id], - ) - .map(|counter| counter.get()) - .unwrap_or(0); - let attestation_target_misses = metrics::get_int_counter( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_MISS, - &[id], - ) - .map(|counter| counter.get()) - .unwrap_or(0); - let target_attestations = attestation_target_hits + attestation_target_misses; - let attestation_target_hit_percentage: f64 = if target_attestations == 0 { - 0.0 - } else { - (100 * attestation_target_hits / target_attestations) as f64 - }; - - let metrics = ValidatorMetrics { - attestation_hits, - attestation_misses, - attestation_hit_percentage, - attestation_head_hits, - attestation_head_misses, - attestation_head_hit_percentage, - attestation_target_hits, - attestation_target_misses, - attestation_target_hit_percentage, - }; - - validators.insert(id.clone(), metrics); + if let Ok(index) = id.parse::() { + if let Some(validator) = chain + .validator_monitor + .read() + .get_monitored_validator(index) + { + let val_metrics = validator.metrics.read(); + let attestation_hits = val_metrics.attestation_hits; + let attestation_misses = val_metrics.attestation_misses; + let attestation_head_hits = val_metrics.attestation_head_hits; + let attestation_head_misses = val_metrics.attestation_head_misses; + let attestation_target_hits = val_metrics.attestation_target_hits; + let attestation_target_misses = val_metrics.attestation_target_misses; + drop(val_metrics); + + let attestations = attestation_hits + attestation_misses; + let attestation_hit_percentage: f64 = if attestations == 0 { + 0.0 + } else { + (100 * attestation_hits / attestations) as f64 + }; + let head_attestations = attestation_head_hits + attestation_head_misses; + let attestation_head_hit_percentage: f64 = if head_attestations == 0 { + 0.0 + } else { + (100 * attestation_head_hits / head_attestations) as f64 + }; + + let target_attestations = attestation_target_hits + attestation_target_misses; + let attestation_target_hit_percentage: f64 = if target_attestations == 0 { + 0.0 + } else { + (100 * attestation_target_hits / target_attestations) as f64 + }; + + let metrics = ValidatorMetrics { + attestation_hits, + attestation_misses, + attestation_hit_percentage, + attestation_head_hits, + attestation_head_misses, + attestation_head_hit_percentage, + attestation_target_hits, + attestation_target_misses, + attestation_target_hit_percentage, + }; + + validators.insert(id.clone(), metrics); + } + } } Ok(ValidatorMetricsResponse { validators }) From b7d7addd4ae7c43ebd32604b0956de68de6d175f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 21 Feb 2023 20:54:57 +0000 Subject: [PATCH 185/263] Disable debug info on CI (#4018) ## Issue Addressed Closes #4005 Alternative to #4017 ## Proposed Changes Disable debug info on CI to save RAM and disk space. --- .github/workflows/test-suite.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 5ecd5efe36b..445f71fa096 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -10,7 +10,8 @@ on: pull_request: env: # Deny warnings in CI - RUSTFLAGS: "-D warnings" + # Disable debug info (see https://github.com/sigp/lighthouse/issues/4005) + RUSTFLAGS: "-D warnings -C debuginfo=0" # The Nightly version used for cargo-udeps, might need updating from time to time. PINNED_NIGHTLY: nightly-2022-12-15 # Prevent Github API rate limiting. From bb5285ac6d696ff4c48b6a2cb800d739cfc5cd4f Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 22 Feb 2023 04:15:38 +0530 Subject: [PATCH 186/263] Remove BeaconBlockAndBlobsSidecar from core topics (#4016) --- beacon_node/lighthouse_network/src/types/topics.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index ab7fb722bf8..b83b03d6b21 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -23,9 +23,8 @@ pub const BLS_TO_EXECUTION_CHANGE_TOPIC: &str = "bls_to_execution_change"; pub const LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update"; pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; -pub const CORE_TOPICS: [GossipKind; 8] = [ +pub const CORE_TOPICS: [GossipKind; 7] = [ GossipKind::BeaconBlock, - GossipKind::BeaconBlocksAndBlobsSidecar, GossipKind::BeaconAggregateAndProof, GossipKind::VoluntaryExit, GossipKind::ProposerSlashing, From 9c81be8ac43f47e797acd35438a493749c9d9567 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 22 Feb 2023 09:46:45 +1100 Subject: [PATCH 187/263] Fix metric (#4020) --- beacon_node/network/src/beacon_processor/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index f9de5373138..018e6f7e341 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -1517,7 +1517,7 @@ impl BeaconProcessor { gossip_block_queue.len() as i64, ); metrics::set_gauge( - &metrics::BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL, + &metrics::BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL, rpc_block_queue.len() as i64, ); metrics::set_gauge( From 3721f3a83c5d8544e6d812ac1196594cddb780f5 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 21 Feb 2023 23:45:43 +0000 Subject: [PATCH 188/263] Fix doppelganger script (#3988) ## Issue Addressed N/A ## Proposed Changes The doppelganger tests were failing silently since the `PROPOSER_BOOST` config was not set. Sets the config and script returns an error if any subprocess fails. --- scripts/local_testnet/vars.env | 2 +- scripts/tests/doppelganger_protection.sh | 39 ++++++++++++++++-------- scripts/tests/vars.env | 3 ++ 3 files changed, 31 insertions(+), 13 deletions(-) diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index 2506e9e1cdf..1ade1732867 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -45,7 +45,7 @@ SECONDS_PER_SLOT=3 SECONDS_PER_ETH1_BLOCK=1 # Proposer score boost percentage -PROPOSER_SCORE_BOOST=70 +PROPOSER_SCORE_BOOST=40 # Command line arguments for validator client VC_ARGS="" diff --git a/scripts/tests/doppelganger_protection.sh b/scripts/tests/doppelganger_protection.sh index b0f9ce82658..95dfff56962 100755 --- a/scripts/tests/doppelganger_protection.sh +++ b/scripts/tests/doppelganger_protection.sh @@ -2,6 +2,7 @@ # Requires `lighthouse`, ``lcli`, `ganache`, `curl`, `jq` + BEHAVIOR=$1 if [[ "$BEHAVIOR" != "success" ]] && [[ "$BEHAVIOR" != "failure" ]]; then @@ -9,13 +10,22 @@ if [[ "$BEHAVIOR" != "success" ]] && [[ "$BEHAVIOR" != "failure" ]]; then exit 1 fi +exit_if_fails() { + echo $@ + $@ + EXIT_CODE=$? + if [[ $EXIT_CODE -eq 1 ]]; then + exit 111 + fi +} + source ./vars.env -../local_testnet/clean.sh +exit_if_fails ../local_testnet/clean.sh echo "Starting ganache" -../local_testnet/ganache_test_node.sh &> /dev/null & +exit_if_fails ../local_testnet/ganache_test_node.sh &> /dev/null & GANACHE_PID=$! # Wait for ganache to start @@ -23,14 +33,14 @@ sleep 5 echo "Setting up local testnet" -../local_testnet/setup.sh +exit_if_fails ../local_testnet/setup.sh # Duplicate this directory so slashing protection doesn't keep us from re-using validator keys -cp -R $HOME/.lighthouse/local-testnet/node_1 $HOME/.lighthouse/local-testnet/node_1_doppelganger +exit_if_fails cp -R $HOME/.lighthouse/local-testnet/node_1 $HOME/.lighthouse/local-testnet/node_1_doppelganger echo "Starting bootnode" -../local_testnet/bootnode.sh &> /dev/null & +exit_if_fails ../local_testnet/bootnode.sh &> /dev/null & BOOT_PID=$! # wait for the bootnode to start @@ -38,20 +48,20 @@ sleep 10 echo "Starting local beacon nodes" -../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9000 8000 &> /dev/null & +exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9000 8000 &> /dev/null & BEACON_PID=$! -../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 9100 8100 &> /dev/null & +exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 9100 8100 &> /dev/null & BEACON_PID2=$! -../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_3 9200 8200 &> /dev/null & +exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_3 9200 8200 &> /dev/null & BEACON_PID3=$! echo "Starting local validator clients" -../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:8000 &> /dev/null & +exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:8000 &> /dev/null & VALIDATOR_1_PID=$! -../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_2 http://localhost:8100 &> /dev/null & +exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_2 http://localhost:8100 &> /dev/null & VALIDATOR_2_PID=$! -../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_3 http://localhost:8200 &> /dev/null & +exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_3 http://localhost:8200 &> /dev/null & VALIDATOR_3_PID=$! echo "Waiting an epoch before starting the next validator client" @@ -73,9 +83,14 @@ if [[ "$BEHAVIOR" == "failure" ]]; then echo "Done" - if [[ $DOPPELGANGER_EXIT -eq 124 ]]; then + # We expect to find a doppelganger, exit with success error code if doppelganger was found + # and failure if no doppelganger was found. + if [[ $DOPPELGANGER_EXIT -eq 1 ]]; then + exit 0 + else exit 1 fi + fi if [[ "$BEHAVIOR" == "success" ]]; then diff --git a/scripts/tests/vars.env b/scripts/tests/vars.env index 376fe3d8c55..778a0afca59 100644 --- a/scripts/tests/vars.env +++ b/scripts/tests/vars.env @@ -44,5 +44,8 @@ SECONDS_PER_SLOT=3 # Seconds per Eth1 block SECONDS_PER_ETH1_BLOCK=1 +# Proposer score boost percentage +PROPOSER_SCORE_BOOST=40 + # Enable doppelganger detection VC_ARGS=" --enable-doppelganger-protection " From 5c63d8758e35894660538ad205d9a1cacb9c7218 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 21 Feb 2023 23:45:44 +0000 Subject: [PATCH 189/263] Register disconnected peers when temporarily banned (#4001) This is a correction to #3757. The correction registers a peer that is being disconnected in the local peer manager db to ensure we are tracking the correct state. --- beacon_node/lighthouse_network/src/peer_manager/peerdb.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 61cf8de1cb2..70d3399d6ad 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -847,6 +847,10 @@ impl PeerDB { PeerConnectionStatus::Disconnecting { .. } => { // The peer has been disconnected but not banned. Inform the peer manager // that this peer could be eligible for a temporary ban. + self.disconnected_peers += 1; + info.set_connection_status(PeerConnectionStatus::Disconnected { + since: Instant::now(), + }); return Some(BanOperation::TemporaryBan); } PeerConnectionStatus::Unknown From 0fb58a680d6f0c9f0dc8beecf142186debff9a8d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 22 Feb 2023 06:00:49 +0000 Subject: [PATCH 190/263] v3.5.0 (#3996) ## Issue Addressed NA ## Proposed Changes - Bump versions ## Sepolia Capella Upgrade This release will enable the Capella fork on Sepolia. We are planning to publish this release on the 23rd of Feb 2023. Users who can build from source and wish to do pre-release testing can use this branch. ## Additional Info - [ ] Requires further testing --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1ce987816cf..33b0070fbbe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -617,7 +617,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "3.4.0" +version = "3.5.0" dependencies = [ "beacon_chain", "clap", @@ -785,7 +785,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "3.4.0" +version = "3.5.0" dependencies = [ "beacon_node", "clap", @@ -3719,7 +3719,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "3.4.0" +version = "3.5.0" dependencies = [ "account_utils", "beacon_chain", @@ -4323,7 +4323,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "3.4.0" +version = "3.5.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index cca8cc969ef..a2acd60552e 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "3.4.0" +version = "3.5.0" authors = ["Paul Hauner ", "Age Manning "] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 3c136b18b92..8ad4aa86f31 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v3.4.0-", - fallback = "Lighthouse/v3.4.0" + prefix = "Lighthouse/v3.5.0-", + fallback = "Lighthouse/v3.5.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 8ebac0ca610..93fe17506bf 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "3.4.0" +version = "3.5.0" authors = ["Paul Hauner "] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index a9e38a89272..2c0f1ec1cba 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "3.4.0" +version = "3.5.0" authors = ["Sigma Prime "] edition = "2021" autotests = false From be29394a9de90db562adf1fde9d68e7a6754cc11 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 27 Feb 2023 21:26:16 +0000 Subject: [PATCH 191/263] Execution Integration Tests Correction (#4034) The execution integration tests are currently failing. This is a quick modification to pin the execution client version to correct the tests. --- testing/execution_engine_integration/src/geth.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index 1b96fa9f3f9..5a1a5d4f53c 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -7,7 +7,7 @@ use std::{env, fs::File}; use tempfile::TempDir; use unused_port::unused_tcp_port; -const GETH_BRANCH: &str = "master"; +// const GETH_BRANCH: &str = "master"; const GETH_REPO_URL: &str = "https://github.com/ethereum/go-ethereum"; pub fn build_result(repo_dir: &Path) -> Output { @@ -27,7 +27,9 @@ pub fn build(execution_clients_dir: &Path) { } // Get the latest tag on the branch - let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); + // TODO: Update when version is corrected + // let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); + let last_release = "v1.11.1"; build_utils::checkout(&repo_dir, dbg!(&last_release)).unwrap(); // Build geth From 47b22d5256487c814b15030882faf59a4e9e9f06 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 28 Feb 2023 02:20:49 +0000 Subject: [PATCH 192/263] Allow compilation with no slasher backend (#3888) ## Proposed Changes Allowing compiling without MDBX by running: ```bash CARGO_INSTALL_EXTRA_FLAGS="--no-default-features" make ``` The reasons to do this are several: - Save compilation time if the slasher won't be used - Work around compilation errors in slasher backend dependencies (our pinned version of MDBX is currently not compiling on FreeBSD with certain compiler versions). ## Additional Info When I opened this PR we were using resolver v1 which [doesn't disable default features in dependencies](https://doc.rust-lang.org/cargo/reference/features.html#resolver-version-2-command-line-flags), and `mdbx` is default for the `slasher` crate. Even after the resolver got changed to v2 in #3697 compiling with `--no-default-features` _still_ wasn't turning off the slasher crate's default features, so I added `default-features = false` in all the places we depend on it. Co-authored-by: Michael Sproul --- Makefile | 15 ++++++++++++--- beacon_node/Cargo.toml | 2 +- beacon_node/beacon_chain/Cargo.toml | 2 +- beacon_node/client/Cargo.toml | 2 +- book/src/installation-source.md | 9 +++++++++ lighthouse/Cargo.toml | 2 +- slasher/service/Cargo.toml | 2 +- 7 files changed, 26 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 05c6c74d51e..89362d12d82 100644 --- a/Makefile +++ b/Makefile @@ -38,15 +38,24 @@ PROFILE ?= release # they run for different forks. FORKS=phase0 altair merge capella +# Extra flags for Cargo +CARGO_INSTALL_EXTRA_FLAGS?= + # Builds the Lighthouse binary in release (optimized). # # Binaries will most likely be found in `./target/release` install: - cargo install --path lighthouse --force --locked --features "$(FEATURES)" --profile "$(PROFILE)" + cargo install --path lighthouse --force --locked \ + --features "$(FEATURES)" \ + --profile "$(PROFILE)" \ + $(CARGO_INSTALL_EXTRA_FLAGS) # Builds the lcli binary in release (optimized). install-lcli: - cargo install --path lcli --force --locked --features "$(FEATURES)" --profile "$(PROFILE)" + cargo install --path lcli --force --locked \ + --features "$(FEATURES)" \ + --profile "$(PROFILE)" \ + $(CARGO_INSTALL_EXTRA_FLAGS) # The following commands use `cross` to build a cross-compile. # @@ -124,7 +133,7 @@ run-ef-tests: test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS)) test-beacon-chain-%: - env FORK_NAME=$* cargo test --release --features fork_from_env -p beacon_chain + env FORK_NAME=$* cargo test --release --features fork_from_env,slasher/lmdb -p beacon_chain # Run the tests in the `operation_pool` crate for all known forks. test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS)) diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index a2acd60552e..3c37f41de68 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -36,7 +36,7 @@ clap_utils = { path = "../common/clap_utils" } hyper = "0.14.4" lighthouse_version = { path = "../common/lighthouse_version" } hex = "0.4.2" -slasher = { path = "../slasher" } +slasher = { path = "../slasher", default-features = false } monitoring_api = { path = "../common/monitoring_api" } sensitive_url = { path = "../common/sensitive_url" } http_api = { path = "http_api" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 5b85833048b..5599e6f97d8 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -53,7 +53,7 @@ fork_choice = { path = "../../consensus/fork_choice" } task_executor = { path = "../../common/task_executor" } derivative = "2.1.1" itertools = "0.10.0" -slasher = { path = "../../slasher" } +slasher = { path = "../../slasher", default-features = false } eth2 = { path = "../../common/eth2" } strum = { version = "0.24.0", features = ["derive"] } logging = { path = "../../common/logging" } diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 9a49843a9f3..876458eea52 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -39,7 +39,7 @@ time = "0.3.5" directory = {path = "../../common/directory"} http_api = { path = "../http_api" } http_metrics = { path = "../http_metrics" } -slasher = { path = "../../slasher" } +slasher = { path = "../../slasher", default-features = false } slasher_service = { path = "../../slasher/service" } monitoring_api = {path = "../../common/monitoring_api"} execution_layer = { path = "../execution_layer" } diff --git a/book/src/installation-source.md b/book/src/installation-source.md index 8e515a41bd5..c89dd1add4f 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -133,6 +133,15 @@ Commonly used features include: * `slasher-lmdb`: support for the LMDB slasher backend. * `jemalloc`: use [`jemalloc`][jemalloc] to allocate memory. Enabled by default on Linux and macOS. Not supported on Windows. +* `spec-minimal`: support for the minimal preset (useful for testing). + +Default features (e.g. `slasher-mdbx`) may be opted out of using the `--no-default-features` +argument for `cargo`, which can plumbed in via the `CARGO_INSTALL_EXTRA_FLAGS` environment variable. +E.g. + +``` +CARGO_INSTALL_EXTRA_FLAGS="--no-default-features" make +``` [jemalloc]: https://jemalloc.net/ diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 2c0f1ec1cba..ecac53fb147 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -55,7 +55,7 @@ malloc_utils = { path = "../common/malloc_utils" } directory = { path = "../common/directory" } unused_port = { path = "../common/unused_port" } database_manager = { path = "../database_manager" } -slasher = { path = "../slasher" } +slasher = { path = "../slasher", default-features = false } [dev-dependencies] tempfile = "3.1.0" diff --git a/slasher/service/Cargo.toml b/slasher/service/Cargo.toml index 63cf1e4649e..0a787defa21 100644 --- a/slasher/service/Cargo.toml +++ b/slasher/service/Cargo.toml @@ -9,7 +9,7 @@ beacon_chain = { path = "../../beacon_node/beacon_chain" } directory = { path = "../../common/directory" } lighthouse_network = { path = "../../beacon_node/lighthouse_network" } network = { path = "../../beacon_node/network" } -slasher = { path = ".." } +slasher = { path = "..", default-features = false } slog = "2.5.2" slot_clock = { path = "../../common/slot_clock" } state_processing = { path = "../../consensus/state_processing" } From cc4fc422b2c08ba387cc32d577116c0fae6bf5f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alan=20H=C3=B6ng?= Date: Tue, 28 Feb 2023 02:20:50 +0000 Subject: [PATCH 193/263] Add content-type header to metrics server response (#3970) This fixes issues with certain metrics scrapers, which might error if the content-type is not correctly set. ## Issue Addressed Fixes https://github.com/sigp/lighthouse/issues/3437 ## Proposed Changes Simply set header: `Content-Type: text/plain` on metrics server response. Seems like the errored branch does this correctly already. ## Additional Info This is needed also to enable influx-db metric scraping which work very nicely with Geth. --- beacon_node/http_metrics/src/lib.rs | 8 +++++++- beacon_node/http_metrics/tests/tests.rs | 9 ++++++++- validator_client/src/http_metrics/mod.rs | 8 +++++++- 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/beacon_node/http_metrics/src/lib.rs b/beacon_node/http_metrics/src/lib.rs index dfdb8f7ff1b..2895506c3b3 100644 --- a/beacon_node/http_metrics/src/lib.rs +++ b/beacon_node/http_metrics/src/lib.rs @@ -116,7 +116,13 @@ pub fn serve( .and_then(|ctx: Arc>| async move { Ok::<_, warp::Rejection>( metrics::gather_prometheus_metrics(&ctx) - .map(|body| Response::builder().status(200).body(body).unwrap()) + .map(|body| { + Response::builder() + .status(200) + .header("Content-Type", "text/plain") + .body(body) + .unwrap() + }) .unwrap_or_else(|e| { Response::builder() .status(500) diff --git a/beacon_node/http_metrics/tests/tests.rs b/beacon_node/http_metrics/tests/tests.rs index b3e02d4cb6f..89fde323746 100644 --- a/beacon_node/http_metrics/tests/tests.rs +++ b/beacon_node/http_metrics/tests/tests.rs @@ -1,6 +1,7 @@ use beacon_chain::test_utils::EphemeralHarnessType; use environment::null_logger; use http_metrics::Config; +use reqwest::header::HeaderValue; use reqwest::StatusCode; use std::net::{IpAddr, Ipv4Addr}; use std::sync::Arc; @@ -45,7 +46,13 @@ async fn returns_200_ok() { listening_socket.port() ); - assert_eq!(reqwest::get(&url).await.unwrap().status(), StatusCode::OK); + let response = reqwest::get(&url).await.unwrap(); + + assert_eq!(response.status(), StatusCode::OK); + assert_eq!( + response.headers().get("Content-Type").unwrap(), + &HeaderValue::from_str("text/plain").unwrap() + ); } .await } diff --git a/validator_client/src/http_metrics/mod.rs b/validator_client/src/http_metrics/mod.rs index c30d6034471..31337491e88 100644 --- a/validator_client/src/http_metrics/mod.rs +++ b/validator_client/src/http_metrics/mod.rs @@ -121,7 +121,13 @@ pub fn serve( .and_then(|ctx: Arc>| async move { Ok::<_, warp::Rejection>( metrics::gather_prometheus_metrics(&ctx) - .map(|body| Response::builder().status(200).body(body).unwrap()) + .map(|body| { + Response::builder() + .status(200) + .header("Content-Type", "text/plain") + .body(body) + .unwrap() + }) .unwrap_or_else(|e| { Response::builder() .status(500) From caa6190d4a468ef74d647f668ac04e4accf293d5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 28 Feb 2023 02:20:51 +0000 Subject: [PATCH 194/263] Use consensus-spec-tests `v1.3.0-rc.3` (#4021) ## Issue Addressed NA ## Proposed Changes Updates our `ef_tests` to use: https://github.com/ethereum/consensus-specs/releases/tag/v1.3.0-rc.3 This required: - Skipping a `merkle_proof_validity` test (see #4022) - Account for the `eip4844` tests changing name to `deneb` - My IDE did some Python linting during this change. It seemed simple and nice so I left it there. ## Additional Info NA --- testing/ef_tests/Makefile | 2 +- testing/ef_tests/check_all_files_accessed.py | 9 ++++++--- testing/ef_tests/src/handler.rs | 5 +++++ 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 1feba41c86f..fc3dea6e2f5 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.3.0-rc.1 +TESTS_TAG := v1.3.0-rc.3 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index f8ddc0a9f23..b52d1552244 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -41,8 +41,8 @@ "tests/.*/.*/ssz_static/LightClientFinalityUpdate", # LightClientHeader "tests/.*/.*/ssz_static/LightClientHeader", - # Eip4844 tests are disabled for now. - "tests/.*/eip4844", + # Deneb (previously known as eip4844) tests are disabled for now. + "tests/.*/deneb", # One of the EF researchers likes to pack the tarballs on a Mac ".*\.DS_Store.*", # More Mac weirdness. @@ -55,9 +55,11 @@ "bls12-381-tests/hash_to_G2" ] + def normalize_path(path): return path.split("consensus-spec-tests/")[1] + # Determine the list of filenames which were accessed during tests. passed = set() for line in open(accessed_files_filename, 'r').readlines(): @@ -90,4 +92,5 @@ def normalize_path(path): # Exit with an error if there were any files missed. assert len(missed) == 0, "{} missed files".format(len(missed)) -print("Accessed {} files ({} intentionally excluded)".format(accessed_files, excluded_files)) +print("Accessed {} files ({} intentionally excluded)".format( + accessed_files, excluded_files)) diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 07db7cd2a1d..c066bdafa48 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -650,6 +650,11 @@ impl Handler for MerkleProofValidityHandler { fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { fork_name != ForkName::Base + // Test is skipped due to some changes in the Capella light client + // spec. + // + // https://github.com/sigp/lighthouse/issues/4022 + && fork_name != ForkName::Capella } } From 0155455990dc464558573ee9e5c19da5c2c4124d Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 28 Feb 2023 02:20:52 +0000 Subject: [PATCH 195/263] Docs for Siren (#4023) This adds some documentation for the Siren app into the Lighthouse book. Co-authored-by: Mavrik --- .gitignore | 1 + book/src/SUMMARY.md | 5 ++ book/src/imgs/ui-account-earnings.png | Bin 0 -> 886925 bytes book/src/imgs/ui-balance-modal.png | Bin 0 -> 44421 bytes book/src/imgs/ui-configuration.png | Bin 0 -> 110294 bytes book/src/imgs/ui-dashboard.png | Bin 0 -> 1453496 bytes book/src/imgs/ui-device.png | Bin 0 -> 57810 bytes book/src/imgs/ui-hardware.png | Bin 0 -> 73137 bytes book/src/imgs/ui-settings.png | Bin 0 -> 353862 bytes book/src/imgs/ui-validator-balance1.png | Bin 0 -> 67314 bytes book/src/imgs/ui-validator-balance2.png | Bin 0 -> 90980 bytes book/src/imgs/ui-validator-management.png | Bin 0 -> 391996 bytes book/src/imgs/ui-validator-modal.png | Bin 0 -> 341438 bytes book/src/imgs/ui-validator-table.png | Bin 0 -> 127175 bytes book/src/imgs/ui.png | Bin 0 -> 372824 bytes book/src/lighthouse-ui.md | 33 +++++++ book/src/ui-configuration.md | 47 ++++++++++ book/src/ui-faqs.md | 13 +++ book/src/ui-installation.md | 103 ++++++++++++++++++++++ book/src/ui-usage.md | 61 +++++++++++++ 20 files changed, 263 insertions(+) create mode 100644 book/src/imgs/ui-account-earnings.png create mode 100644 book/src/imgs/ui-balance-modal.png create mode 100644 book/src/imgs/ui-configuration.png create mode 100644 book/src/imgs/ui-dashboard.png create mode 100644 book/src/imgs/ui-device.png create mode 100644 book/src/imgs/ui-hardware.png create mode 100644 book/src/imgs/ui-settings.png create mode 100644 book/src/imgs/ui-validator-balance1.png create mode 100644 book/src/imgs/ui-validator-balance2.png create mode 100644 book/src/imgs/ui-validator-management.png create mode 100644 book/src/imgs/ui-validator-modal.png create mode 100644 book/src/imgs/ui-validator-table.png create mode 100644 book/src/imgs/ui.png create mode 100644 book/src/lighthouse-ui.md create mode 100644 book/src/ui-configuration.md create mode 100644 book/src/ui-faqs.md create mode 100644 book/src/ui-installation.md create mode 100644 book/src/ui-usage.md diff --git a/.gitignore b/.gitignore index ae9f83c46dd..1b7e5dbb88b 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,4 @@ genesis.ssz # IntelliJ /*.iml +.idea \ No newline at end of file diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 470407ebee9..7def1821dd2 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -33,6 +33,11 @@ * [Authorization Header](./api-vc-auth-header.md) * [Signature Header](./api-vc-sig-header.md) * [Prometheus Metrics](./advanced_metrics.md) +* [Lighthouse UI (Siren)](./lighthouse-ui.md) + * [Installation](./ui-installation.md) + * [Configuration](./ui-configuration.md) + * [Usage](./ui-usage.md) + * [FAQs](./ui-faqs.md) * [Advanced Usage](./advanced.md) * [Checkpoint Sync](./checkpoint-sync.md) * [Custom Data Directories](./advanced-datadir.md) diff --git a/book/src/imgs/ui-account-earnings.png b/book/src/imgs/ui-account-earnings.png new file mode 100644 index 0000000000000000000000000000000000000000..69e9456035635e947ddfc97771f646f07ee4e588 GIT binary patch literal 886925 zcmZU(1y~$E_Xdg-DHLdNr-c@WEflw<#frPbQlPlIE)*zk#T|+l+r{189TsPG;UYZ_Y`uf}8{v1}O#t0s@xgS5YMdgcpqn2*{M^sLwS-#T1GN z2(N%}`zAER7Hlz6QssqiHDj5vFTL{}MJue3A2W=|vQ&uoODW#ggcU z_lRGD*}2^`5Mj=Ys=h&C8uF!fChS@w!es2P*C=JOQ4)rMH$ArRww9hA9kzfb z!)e;62vzIkQS>;fXm9m2=p(SM^6Bz(#wFzukz#$gN>Sa(t$l(>C@8RhW2HS=A45cJ zOw|u^wVn>1pm}ZUM7D^+S?`8d2yD;a385o^x(I?X5#LI=RWSHdGX!9n2J8F22E?v0 z^FpH6Uhyt5od-u6eO|!+5seW@GmIpBiMz2Yw!r)+a}K90`xo77q~GF!Gju?2T%0w> zppwi;bb|sRnsv0a;ubUGR~Gg>njIQKL`e)N=oq4Isb)_mypJ`T+mZnW8tr+}EZD(u z!z9y(#z#3H8Pt4gZ?t&`YpVrj?4WZx^n92~c0gKfvG zM|=Q)^@Ol@eJ;azD!=#5yfsLvKsoq|vZR+tK}8M4nI7KkJqvVbV?FgB{P~W;>$CmM zsE+IX58>N|G80ezFR5R){r-#s)r1K1gU+%D9%~tZ0FC~XF9%8r`1!=4bcF3F46vS0 zV*X>NX9A~dp+E$95 z5VQMwi*C1e``>Q0(H=fs-@m>~)Bi%N)AluhdVDHCAlRo9!2q2t$kcCtU8t6b5K!ppc&ODNcMA zwRm~f$v5{jOQUIVYUQnE(`B&J&_?Jlpej59wOBIC%;2+{^SiW{E$E(n+PzN1VlMWM z_Nc;ea8yh4QfF^7S&&?hi}ex}f@=*nHa6wsuzxjD=?I0;n|kQ)`N;wJ|aJcZ>WiwQlEJ> zP}Te{zv0bbIQt5I1Dd{&Xnko*s)LZw%4v%RNvAwPy6|&Z`Q-cd{m+om*MgnIWx@xq z>3;g$y!H9YgBzOjcK0hGwdniaSKld>B8r8lX|#Wley17_OBEh|r`yZ2fjA!8C@z#q zT~hWd&cjPb$2Wtti!YCfulP8yhvBkHBr#W;6Ccy^?6J%_6p*ll>WtU@Fi{(x@S zI{ztZBSniZu=VcL<^^Nhk8ctu?@M3y{bX*(|H6>zTbgDYWQ!prE%?(#lv**+RLn7O z!~laWkSDD+vDQaJUO?`ET8J9^*Vp%g(G-0t_+o3nFeGzxoN^X()W(gJaCZHo%cIMp zW0Za=6-Fx*h!%V?OBiR%9gvcb6N;JYBm6FJkDnxwE)J4f61RS*_x}19P?>{X4c|se z;6osLLV3KB1g}_~v|Dy@v1Q>wCV4iqLV9+inj7>?a}%wfN|i&^QdNV|j1f2P10%mm zMA3qhn@Fw_OM%yjKCpJaid z+hlD{d2^vGeU4A0q20Md*;d(US(m17s){RTw0N~PwLB{Ms^C?eRl1dURr%)X=2TNh z(`}B-PBVONe9}%=`#P{&j&|D@4llerv)(w-utvH<`ITVu2~h1kZK#Lrbqu90Kmnj` zQwPdk5(Miks{zHmLmtH+l;L2SDslJljR_-E2H{;Kq1W+h@sjbF z%w#MXn!8$MB_So|B}ZDyCEyY|ZFOzEs&{iAE$ywiW^~HMtaPm^teC6-bD7T8&PWYd z&bry?*#hD(B~&Bp^`QnMm4I?+mG{c;_18xVYoj|S_La9glVbxC86*!wV^eG+b}8#= zY{`aSdSb)ONY3Z_ri1rYb{(gZ_Q*E`=Ii=y<3iIgmyNOqWYv=`e(oc|xL{E0BWr^i zz8a8=!?oiz<2BhU7I6+kw)JAPwvJ`P>Yg`|k?Gdy{^@suPiN)V{g-o_mzKp-8jmwp zhE#4T?hcJLt}b_@;Bhe5o#S2djlE|PmcKJvh5dHf)~Ynso{2Wp!`k9S4MQcw>`IRn*F85gK zsXcg2#Q~LlhD4R*L5I)<>l$SwvL*yJaJ>ax0S;XZ>qOl$5Aa(Uso7N9IEy)F{P>^& zaE-%@u3*>(TdN#JEVMP|G{QY|?=S9+GxI*G&j|IIj!#PJQu8Yo{P8H?-k6HH>(XC4 zMETlwA(gPxs%j12L=a+?MKj>MtRo8VPlRP%Q@EeNYc|z z!it``w5oJhE;peo%S`2KJ$j9qx?Z=jda>nxSEKC#iUFqn7UnHhp{M;sJ=h~(?Ry8w zZKaxAvV=DYFB5e2jv73-F~SMDm?%raG-gXfnhPzb=BMz~Gm2~rW-0^9#mnW!5pPd1ta2T$o(T3%HHBnRDg!226Gq zW7Bl=xHW0F;iuX|&JTy1^DRfOk9L+w8Y-H)oAQhk+90`ArX@MmE-rIN(jG`x98WmD>Pl!iq4^m8So&avMvS(HtwmnUC(5rf{ewfsV#j9C`oLPsY|k9q%G%=8 zoY!$?^(2CZR)R6ooJ!X{%--^`tZjO2+Gjm$9ZXj)Ct#j3hN$E_WgHx@menU1H6=EZ7&%SjFiwlxRo@TQPE)a#a9|Ngt3}`@VOfvjq3=Nu+6hI~19)X$8~11@)L0>pAzv?Vw%pw}ANVZ=rcsel zjd)7IFD}kw0XYDRmN`$WTf(cuqi>HZzk_JDdc9CS&WsMqAb^e$ek3CyU>%LnuW#G| zlir;P7QOO$`nl;LnpB6VfcCU(2wzzq!a}&R_L~R%fySc>a}fM+#reU9i7~XfC3fy=&C;6(X+d8uCjc9G{X!PLfatSspMx#FemS4{^51i}x0 zGoqx@`;+JKVP+~CjvBHue1hZgE{+z0)Ecr1Z$)hEjoxyxeq{YfErjv*?OOr+@5X#eqT>Hnf4&l= zHg$Bgcegyn?>}OSh zzqNb{X0Aq-nxbY_&ujKPhY&X}kHA0u|9>t2H{<_kYW$xj8#^2O|Em5^)Bjgh)xpSK z#K!7*PDi2tdtU!m{@=!bD+&Pq-unM6#lP11pW5ez7Qzq!{P&>=VbFYPYJGl=f+=gQhd z#eF(Q(hVvyfX4gC||uL3DN?|TJ9LBzP;Rs<*P%Z zy&vG06ZL& z{D|rLP}-x&mByQ<ZU#t8i0#WMM6Pw*c?kfeF3GL+Ee=68A%qb*Ps7XUUEVhoid1?avOS zN2gd!)+Z)6UlBvq*d)phlS^UW(Vrmj8J8Yx`hPXPJ7 z>)pFukI|o>rNnXi+S~vg4x15UsYZj`s?UWZnTSbmI6~Gze#(x8C|z&B?jsTx(>f1q4=HzLbPLzo*y7B=SxrwN%ni)(wTCYv-#S zbWa@20^nQvl6peNE?MvO^(q{(`SqC@)<>=GQ706Po3o3nG>8M1X3PDVTyrz@Bi0A# z*22wt;D|gG@ata8{artCV1`6fkAOME_FLpll=tQC?tamTe9QfAxYyqHu7d!vD4tbD z9q;2kJVE|ycM5hrUft>J+A?QtfhAkrmC(7lKQVu;m%c>L)$-?PsQ`PQvK35p1?+!p zdAJd6(tq0BpMA`DDggZ9I!9^}UHO0nWI=kmw|P4HRPD6^zvr4w^B74>BVP}Ko4~X8 ziuBkZe^UAyH45s>ixI=zNE|qhBrl5YyzLGq;`Es*Qy+R4e9I)Gy~9zU4n5wik5tGG zoC^1s0s{ylMqNkANayG88wtUQ3Sk)eZ}7co?FxjsX~NHCS{-O|^4`}HHkoG@5jLy@ z-)bA#c#16J#l(XSRY@4DjLXK2TTjR;Aman7@u8-@NeDM4%qfH2SfP zyU5|q#N}P>Lz=^gsu`CzMX`tHY99Dl;*8&U~V`F^ke<4 z%bvdrgd3IOU}G*c0-$BPx|QJ%VMdyKp;d8j>hu)L$LrAyZ&Dg@70?~1iqD(Z9VnTL zD6hFFzVRJvnq$0I+~yqFVLBlXbW{quRQ3$!#Nk>PDQY2m&}ONoPjSU_31efjO?j^f zgnH3p>S*c5Ml#o`9Blh4u)L~+N`hOZuT;lIDKKedHwHOQjoBaSQ-4v{q)Xcv1Vva2~DwO`3bazgs>QzbPWzn|JMzWKR4`L^)Y zL>|if#}IC-^hc1@!l^oFHWQg)w;O7I5F0==Opd}parvUz5Ond4M)`uDW8d6;U&L*F zVB{Nyo|$Dd4Ph6i3MGEy#Fyofl27$ttSm(uAXDcwBgqNPXLeb1Z`T9`Z`#N1ZI!p% zI3Ifr*)p1=dM@#Hrz%vR2*{aW)oNZm(>zoU0{cE9a>2squ9sS{Rx%darcHfxL*xe{ zu-c{M^QUOaH@y1=OS9o>V zL%zU{d%k~ho7^qQyc_Z0+VFOy9;dV2Vt*E)%P^)4k+YPhHkacSiKeI00?0x6S}fGY z9pPGf>*8pC^))7#D|9PE$jc3WQUs5Yj|O*VToq?J7uYHdx5?(-yB2X4yTF@NWn6E1 zG~`ksf$t}i4lSaTA`K5j@GowS+KLQM|ANm%V&xVEdS1hrcj6(dGQHy*fdD+>N ztCYw1N^b9Rt@yDP6KeCOHx#hK0{UgUYBh!8-Ig}Yf)3dYoBFZoX98Glvp+-*nA!5q z${8ao|JbK-&(1o2hlmMVcZc1vQPh(Ho|BtAR-Hn zne@=Pbmjln6^6oUu-m1R->Dcv@q|coAi{Onup<<~&|Bri;#fe>GNgFXtGPL8``7&0 zfYG^^+A4)Oh?>V&dUIV(1jwVb&HS0dN0Ws_oJa!WQNBPvkkwc2t^}Ku%dU_Ja(zz^ zC~Oh!ftYME+@!d%?vq-zgZ!_Dj5uD;1*Cq6!1; zC^AF$e~K;3YwZ}6?GF%jNr!%|V;u#xlzR@C@TkwV1IO^_v%p4*!Qs6(@CY_gsQr%qRF5MZpdJCSuOWXZ?))nLLcU3{)XfsDXDBi z>G-l^+rE2gd4_kXvhTGv|7L&8xYSriNUl9+5ZfsIWq26snha&F&me-z;~$U?ueD5J z2}FSwPlb_zJIbv+Lx6{L(Rw*3P>v!dAe~5tpFDgliQ~JQzJ7nk87->nm0Vk>dds`j zvxwJ0Db=Ga4Zc8U>d~jUTytC=ES^J@nkJYWF|sScnWT6peP322s|K1^y(i;u0W{;6E^WbQ!E8=aWI8<666NMkEmk>L+$<|Pfecep*< z>kYhk<1(1wPhqqCbB)g2fkvDdu8nHtftvqaSS;EGPqQ`M??z$kyM+h;w(ANm>T^n{<*FjsVj+r;y0&L%y zGk;CcIquM@tVcV&r=>y-6!a@c>yA^r$q9ZZdUlExMc#&(GB1Ff4j0ppQV^J(_5VQO zr=C#R+8KV6OR=W~J)LG#y$=etrEkZ?CVZJTtPH{OOlLcX@UHc?A0{ejVpH6IpRg+s z=;wj-lQ#AV#nCZ=+`oIr+QUh;KLnSMkB#2OjXYaKxOtg@M)KDV&HK23FT<(=wXJ!n zKGaS9KbtdBD0MTrsx!}M=hR0c1uQ@d*y*--f{1U9cY|}qMi+JgV`ckhM;7<}En2## zFsJaXJ3f)75oN7uo`jLymdh|gZWVX+==9oiI0clgyePCRhN588;hOE@axSfNnDt?X zB@VW#Pz~3hj3eT=iX3bIG)w!I`jHVx=(yB4Hq3f^bG7KIsuKzlQgPL-Kf-A$&4HnM zl!Z2bYWWCrt8%~zM3VrWvG;~A?~jBeI6Ip~1!vgY>;#@j=v2|bvz?v68R>7!#Tx45}Ute8zLtJf9idHP{s6duR526I@dq44s`j#b~xsD zFfVX@6438+&^_mUiR8v6Jon4`nPGQ3pzcW~aO!1f28FhxWq(7)xi!mH>6hK@TbRc7 zH`azFK&%UiNzcP)CnKfZTegR>AN!B4d3yti14irU4xHZ_qIqd9*jyfl2pfkfyb0DL z=X%DfpDJiRNWH^K zmTtq+G%Hth)TXxRt$lMw%-Lf5EQhYo>PV6wvyUmXhx1;dUrgwEPvNUfJQ!2YO0Zg1Ij0aug5{V2 z(qPAZj8!(lOp&n+07&-XfI0)|guPl@wSR}vu|hpza*-D`SxkiYhIF9*$p_!_iqt*i zSL;4a!cEw-LK~PFm9C8HR$YRk=PoIXRFtwz7i{($*>7&2%z zT(3vi$VUqpm44yn^`aTvW^rkEfyK1s-mz(MUIE+au({0`+ zOw1Gs0Iuab(BS>G9Pop&I}cVCJxkwa5NcFM(B|qsCEg-=OP?iWSFV4*g7~(S2eS8Dy z;!v$F^=mKFBztl)bya7j__2t1yqsuKk4JM3V&_9a>2IqbiYo7vK=~UgtRdB`TMAn% z%GTekRq$i`lVEeKUv&jl-|>uK%{$XiCW6w@GgU-b_e~b-YM{;`X_f~GuVN+UI1)V>YS8;ilVh~L52n?z zGPgAHW%xpHsSjHADpHWuQGV|%MJl>SCAWCYypy3{Vy(q5gk?7a#9)fgp0Ul{UF88ndcb<|@@ytdNxf|g*Nd+gmmkK|To&Y( z2lUu^DqB&M`c`^|Sp|#6edUC*^uVu}b-Hfz-ti|^YHL%CZ)@x7wd|};yXqcWx{#($ z6Vefq&oC|BOz}4??RMPGv2kvN+;n|DxY23%L{WjNJ#xA@e-NswV7ex`IBhX6+lRGy zs_1Y49~j}A7GWN|dd{x7*m}c&H#dURC+nD?Y`pSwWXc3r`Ir9JQqi>H(uD<0@bY`} zvZA9DbC5WdPGHr@=dTZTNt6tdTY6PXQ63&6ZHb{F?;PZEX#o4t{Q$%8t!6y+cz%|c zHdFD7?B$dC+0&XKAo?W1X^_Z^dd87ZKG0}?4wTAw3G&u~qU z5K!wAUvjT>dg5$E#G;D!Si{dpizVg}x6F^Bb5%7M8=6w=z}s+OI+(*|q&7=W$BobE zu@YO;Ums5fIC&?rhPe0?4s;jLkndw@9$!TI)-HS7TI4C4{@Q5cEsHv8H-=W;8Zzq4 zm+2&)@T*Lo<^rzwJlrLk?^`3Jjx}cD<#IZWyI-f$e(X%b{=QV(=E|pb9MPFa!N59$ z=%%tfjwQD7A1^G$xsiU)aq}U^``V~HYx_AeqRN-(R5TKQNs;%WYDzE^2RkaxT741;7rI5%~R{dc4L_&C>MtB6N zao}YPMiZ+VFV1ij@r8zq9fIzAgAe6RJydwnq=Ng|_EIbgx4nEYy1TI9EnI;eRvZJr zWV;wH8t+q0pk53pCkAk%>dLOwOPz^uS~DNx#&G$YA-Nh~`;->FjgYoxePF-OWTh9+ zd_Y{Uz>=atN7|=FB!+qq5-}a9u`Vs~(I1Mx`;`wA+T1Pyyf2d3xN?}tW}58gs@&HS z1fCVU?K+(NOxbxE_ z&nQ^a7tZ&B;V9lax=_}wHnDE8Z~M_mkb*FeFz@>A8|g+kc-kO5ZZ}y{b1&JfsK|z2 z#+pUG51o4qrPI*56?9ton{acP#ameY3 zQk5W9a`Q*7sv=ZO`q-03yi>wZxGMNya6(UQTF>zQ!pF6*`H6b{A@SR;4G3g*Fxn#g zjLg0_H{0HK*Wc}Q_LnVFy!7yF9@vpe9i#;#SuJIuQxflG)7cQ_es_qV2|tA{1_LM! zGY9WRnU##>ws;e}UoUFel9~rzbwlzA?nE#_ zM)soYyoI@rS~=-2gRFC|f06E(RlyL3rrt2d%>Nfa;yFH54IVlOw~i zlS=u0HHx``DzOjkS-ZBhAq}ML2zVn z)mmC;<6MWiO{=qMR}B$E8&7@|^Pj{Og<`peXkF58-k`SPMph(p}V#&q=tokx71 ztJFokTMGr|_i%u_>TP2!8K6v>A_r$fJme~6QBME`y2!JM=Vyw&ru%w#b|j88;!a@4 z<30As*~}|C*5>RXL$Aw5)41;aSQ?Xg@k8JDUTqGa=)s+^m^kK$x)HT>cm1*=_>sE; zo>6&SqF9EL9MN0s0L92Y0(JWv)tg$L?|CCkPNVIKwk+drx=le+CYcT;MhmaC2zs#G z-~wWX5e$vSVnT;ZuD8;^boi7aqWaSqW#gzXrsUf~sWKhJ&st5^BKCDYOPN@v*HDpt zAZy_IBoGA2vk{uQthpf=evW;(_id8$ua~rM>Y7E(kH|vzome)Tcxt|jtm>Wf@G{F? zyb#r(DP521A9Jmp(DSvp$AFTm-(xIq)D>Ga)>f)>vC9f;f9i%*V>ub77<`3?N z;Wmz^Rs-%#h=lFQ7J*%)gl03FgI{C`f2B8@s*1tnJf?o3uWQ&b$@E=$zIgryAZ@(l z7T~+F2=pY{PivmKJbD0tRLUvK>X%e*?lWUOg`fO{P@gXRZ9-NpF47L;DN=~@naD|5 zPj0%oEh|?pw@N@|_nT>i>y5kRmOn;gz3oCc2 zLUr{HkiElCn_QsTRND9BI9@kZAfri(u!G?V;yC*fxLU2Cr{>At&OZR1ebei!t|^IuYhCScWWzr|}rPj&J9)b%EF}VvC8XL@Y z%F1qRB}!|Po)V>z9(L~pBT2QV4vlDmJ-HrYWVsukuM9YwRF&;s*S;|HRhD`0xA#kS z6T2`hOU7t2M8-hmH$; z2cu(4Y=CCP$oNx+zn-E7f|@O@c{4-9>~oJ-WG(wRUV>0}#v^>17Eo!4*W_-H-J%9b ze$ZIXv{##7mch%m!{izBx3a((E)dw{a9WZV^56{#yLlqV*#yV;Y(0C3^uT3IMkTm- zB6oD6$rIx)iIOT4Jh>TmNk1LEsm-<*M@(KZ*Wc0X9F;O+(8LKm87$*Y`*DLu36*;~ z`iqa(9LPwqf>S<5V#R;9HcfVWJ!W?6Q-hZ^X`ne5``Sz@J{Yz+Un>_!k;@h-w@xoq z?mG}EGOOi$qUoM+sPlPDol#q2OkH<#nSzu}-BGjTGx3W7T8gyVadrFO9P(H z_^xFr@Dt(e?F3>~Q#VB&46lU5YDR z=T2F`)ZZ}NfqAo-Qd#x~9lWk=Rpi3!b*B2ATT{yxI6dZ1jk(6CzXo|Aw%ndqS*s>29H)yFrwPQfv$?!4s)Jl^yIeY5!{1~y zyS`x+Xuz?$!(ha5A(>*uw1x^jv(`9J_GFFZrIdO1%-n3Q7}vqM8S3HQ8i3usXa3VN zRtMze56cCEnrHkz zXYj#dX)>DNvg*-JzE7id!90=nGSZq1t(%<3bpw^6HPBU@(E^XMRg8TjUzIh5M6mUQuS(nCzfaqfz#r1RDgiq0m8IY!|$gR^GJ zp;%R~)$Wa!3n_UHtEpbsw&6A`lAFKlib>m~U8cI2PB%R#YM9qdAj9MA+tJlgwRX0) zW3&D6bA)Kwa`bD0!ODEkaMdVgul9r1IQ`1T^zWO6$Q+}oyR#k*1j2hS53qo}% zXE}cjX_qoDF;yh5gCe-{{6}ml@rLm_Eg%eNN2T;_39OeAX|I_>PP?Ho7b1Y!4qRXjlK>uogHKBPFS4pqQa;6xw_ zCy?!8WhYS|ww*Pag6_lMLR)Wpg4ZkyADjrM*oa7{V#c1WOH6#E3VVz)Dc;Cf41;>n z8)UTlVKyrVwfCunqh&AT$i@}jiW1}9GNIRgO~OO@lPIw=OP z!Mu@~fWk?U%&HWU)-z!hcs>oViDE4!vZ{GLt}gU!5iMIBuqdon3DDFQhb z)N;}H)gblby^ag0MottisEsKFHJ()3D<9$&P#&NE{;y$lEICJXz3r%}UuyQ|1Q`x; zbF=K$mJ`!u+z#}ERFI5!6V%zy4+||Co+&FjESbp_TH$kPK-5rpfsyH)daJED_rV0D zG9h$nXuWdVhDMmCB6D~!KAsjNahKNO8!+X!@G%P}kOA_Z`~oY=Z#mBgG921Ck3nu9 z_R@6W#<2Vv_$soRQUa_#lCpW6DSWbL;Pf9?Eae*>aK``z3K$^}?n>C4cJ@OAczNyh zJ2Q|}69J}r+yyPWS{3@eQPR1nCf7gV@;fT<-fJbbCSb44p~+`eo5K29&}+0}6#9{d z=5R?S$xZZp& zh)(JEh=P4>0*H6vm{x6Jn%0a}s;dePXKisoX&zXfe)KjQ&1j?hE!BklD|=;{)Dwcp zxm`80TV3v}1JG{D;68lqiRBaDM;Bv9)3tS~>hE>r zDZ6ES&nWx|OaL582<1GxUz?c^ZWf;-9a@n$hC-=zAY2Ow32F#467qHenH9vsAj#{k ze3$0MFp^>Q3F8(kK{e|^oo|`8gsbzgpi6;7eTs!uet5Y5Fci08c#FaFqscw#wMF_E z+NkrTVbmfR8jC!a?mJ1~;0)9J^=OlRdMM zF5UGhVZ6lJ33KF>U3<;Wkt@|Xfhtdk+HMFFR#qn0EQ7;Hz6B}iB-iMt*xAB;)SNwJ z@6-g;TqgyGmEbRcI8-Xl$@=Kz^z=>{KR`d4r?2T6yCsUI{MPkQC&KjU+~#+2 zj$2GXfkJ|bcAih&@FP8~FF)eOFdMV!j|0GU?hN&mCsGbL(8mtXJu8Hs)Rf0yp>QFN zbMvkKg{Sa45rB?UONN&yxUZGwkT(%s4EY!L(X~t@%DYaFZDJ)Cp7eKb{KzEWhiQ+p zm>@-4kvnK%A?lVo_g+qwU(uRfga}f!dT=*ZqtHStcBnja^k5w5 z1~LMZY#$j0pK$CaD-Q#6^p=rhB z?mTj};3P90RD5_*eZW|@uVaJ+-M%3mB)!%aVDdiCc|LM6C0OLWr~VPc_}T~QpgFY? z`?vXs+v`j*iOaT1Jpf-l%Qk8RSr+-lwMdV~i?*kKfy+L+y{^@F$`vD)=E?z8Ss|AR z@?KQLs$6+1UH;ls&CsYww)AUBEsFEOJo5g>iBiNjPaF$MdWV@%KG`73#%?|WgFSF6>Fg%ygZiwKjxM4{}Y)s@82Wdl@- zCUU}o`^4RVkPNSw7UM)JXao3?yPlo|Oy?W{DtQ!xu>doV*tBl)NfF2=6E8#NdW8@OY zVpp5)^<6SIj5Kh2gxTFpzlhS&HA)&h*hIvR%!lAla=w_YTJEAIQ z`#AO%gR+3J>gEtyWf!uP^nfUw*RuP2kimrIMDfViBkFiapKI;js*s5Nw#7nvYsy#16%&%MqmhE%(x3f*Ta zbi+;VLh?=s86HjhHYdtg2U$P5=x#@($YA7wiF>@@7y>BpB-^I}W@(lom+_Xh&a+Ke zN0rm=omA&TB=cziISA45FLHT1O=gIH(n0EmQ{)9^=Q#gPYB_obYbkR?zWkfz@R_)# zo3d?2_d~;bOGs53*VUg;ud>KH+dn4hoJTdTY7HjoSHcvkbC7x4>&;m1toz>Dr5JOnRiFK6qUk}<$EI7yPyHfT9M>!*Z4hUrb*JIDSGwCU z{i%7UOzA=@FjQG@^%+GHKx$3T>{p*|_?wN;GW;V2#%3NvSi!mFP6zfZD3AYw(Q6CO zUG(SJfdNbOUkveN8RimLPc2;>QK>(lbz4By6bR7azup_@ouIrfYA87lXhkg zu-dex*YsqZK2YBc|CceM(_$Cmr>}o`Mx3ippCR^a=c&BWZsJb*0N&?3lO9tS>>uk%F|UoI5Nn`<*(QHx`y-ndxX4q z2785Y&wi+ zDN*d~eA-k2xc?wZbY)s}u0Cq3AgNe~aAAm+N?3p5hjVl8&WM>TaKEtv9~fo@Ng4$v z*?K|;ptml1^m1RH-|?ZG z>w6C*L1cjNpj+*;*(MaLh4qQeo8Ke9iKO{{5%uBM{?^T=K0UNg;Y#ON;3=%}Fm2lx zFkKjhlVuknS)fT<^Ksg`t{A`0$+A$)M->aHCR(UN5ff`)Z!zX}X1c@za_21&Zw9nj zC)1N&4fhZn4PLSkH(awIt;v$tejdbzG=@|)`%YEyh_E4b4DqyBV&NQ+U6if(GtIua z4wth!i0m@ko^wd4XE~=|W}IZlD!K5rP|~?Ax=0Vm<5b}rPRtrD)1iD0;3AX7k&+%( zfw~OUm7Hmf=)&F92+SQ9TG=JnQdI86^0aD`D3&L~`c!YCJSE9DDPyo;tPOf}{3TVC zr#+qyNSo**!+VN~wzOyNqkY>Mye}Y#p-24Hqr+ixI zd^zH4c-?2!WhA3!w>3q+26+sRBx!&ezRMtMa8FcJ{7ldN3JE_g^XDLeB|9DCMX$#o zl=PYVDL$2kYkd`J_ZZ3d;3z6BpQ6>sQCmQYJcTf8_pIb%>8z%h=2yMDnpi*}H+b@L7Q{KJ!({Ex(3Tzy_J%qv?S+-7yh;Lcd@T*$gu&RcbRsFfR=1KB9(=4zh3JXIfBzcEgDB&Bg`*l+)$?Zt}rVAK{$-$?%K?*~(# zv^=@AxC#xX&NwVzo~b@5qZyK=5U}^iX6)YaJ|W@``Q0MFXZ8$7d$CK_?OJyEGIU(& zQ2~Bv-_V)v(feNJ*(?b*{IxgE6G`ql%cW_^VlgBXbd*a+HN?Yc=CKuwbJ~%T#4Co>PN{+0B8R{ZOtFXVkLMn#>;h7g= z5m4GZW?v^@cu7Um74)rp3*WT!+>8Q$i_`&Ea{ftM>Z!$o&1tbS)vAy8-m1AcGSN$5Si(5tO?`R9Uj$?Q2!82%r= z-ZCl*zH9qd5Kt+VZUq5pkcI(71f-jxh7{?Bp+P{pySpVux;v$N=o-2^h5_F3Jg@7% zpJ%OiJzwX;e3-TN?7feD9KXL3Kq`1rx-1P%R{o~SHr?@0c?5fHD)xI(S z;}?%IdCW#xYlkx($Val)t3%XpDUNi+#+$eD%RSO|UH~jHA$%vy4uXycZbR%YFCYGP zAc=*4I#7?;+GZ6Y2oavp5d+lRxpR5a)y&=oR~6?*b8^o;o&<~vHtfLi><{+VdjoA8 z>{tqB<^vmr$tGXX4pgkG6GN^M8@yWgYy>4ejdA`^Hts^>Yy3>(u~FV{>*+@My)q*8 zH+j%!PUpR!K}*$Y$dc`Jq(Q^Mpp8A?rT ze1{)tqs-+}%F9ICwAX@Ncqa70e6@Hg{1dmXpYc6+2Ui?U0Y=%vfQ@2A_&WLmZM~X4`NI+(k>wk&$%ghBc!t2fi^WhY{YCi^L6$YT_nx;kzasEd9n;t0g281u)6M z+av^12;_0D9m@46Q5@u|p1_n({k7=y?>NVTOb!*54I1Up+jh)U2ZV&Hvo~_TuKL<# zfqHy#k0B(L52~o4`^6h_zn z&F~yhShdmp4w=Pi+d$|MH_pcGbp59#7lRm)&+LTM$?Udx2K?o?dlN)2bhESW!_tjf zOAGVTfI~R3u>kWfK38CyGsy>&)YnulOxmtl%h6=%pWaKeni&$`4hL|alq%D@Zv`sz zP7-=5vZhGe+ zwf1h=7EK&xLS&1HD5OyHUenfmd9+>EIG%?E6#VsjxfYVOw%Usv8?_)SUNh?pCWm`&yx{iy zqrG_}BL00p!OwhkgYORr{i69wba-Yd8rY2`A1_AvUmcC6su z8o27UY(dFvJz00IC}u=1Sf7*%e}@kBK0Nt?xZ?Imwb#ShyN9)Ise!i_9hg=?H@!Jf zofA0p5x!!1wx6Yh`ED21l*K{ZX8eD4I(qzhk3s%?qeG@mfy_t4EfbHQ; zty3BAV$;yvO#DIohLDio_rRUDZ~0)7aOhFxUFhsy9l*cLYY_Si;{!TN`KV5+PlLqn z>A>BYad`i{e5m{!pFo>*=?xQ~GK)6gzg;Rg(W=CX>!_4oeJlCF?kal958gQrR-qrj zT-9r>L07o>Kn9ql-MUD82Kar}%fN6G|(_4LK%_u30CZc@w@ z2F2Yn$CDBNzW;P5n>c55@4mW&)v7s~(G%lKCAhTl{_LjKO9VxV6L-tyyP2Db*RpF& zJzDRV3UIGdFpC_%stM1gQ-apaiUXu)%ma{;odkFrZ=@sk|B~zqj>9W09@Jolj{Vgc zpVEkg$}>$|)E}M>GVMz#tfL@Z;6~NG_`eb=#JhrqX+?SoSUg(Xdc+ia&EUmhG|4OZ zZ0FaFhyiL?&BZwj{oIj%R6drSo|~_Qw3r|5$k34d3QR+Gog)>lxfFH7Mp0=ILIHxK z6F|yLSUWM!4W>T7ImZ%4YadPn%Xt=%YpM8pZ9dK&9RUFiowU^1OcGie8!ov{9lY~P z@fSVWgdef-kC`JaRx z6UyU)_g!eY1t`98t0eL|YD@S@bGaH^e&?vmVaZufGb3cp2R7u?3&@gSUZHc|5;n@H zV@mGoL{`^&%elaeC=D-0C!clR?YMVmA|NK*MhLCUj8VUbd~Na7M_Wi!%@58j6Geqq zmv$$`%sRX3TbGAAs1-C2HO|k!Y(4=e?QrS1+2UQ(W`r>NPCg=&i~pvG-e9h!r}HNr zCTU!@3NVstrPDqClH)erViaVCeWcW?CiYLWD%Vi|&)MpqNpL&3H%m79MRotrlo`d> zqe(e&a`O^&jt60X>RR^}^khHrZ$l6+Y7SR{?*Fs!GQ1Z zS+iBYe}p{1-aP-cj#=ao(vVD}clkMl0a`(qe_ZW%bbqJQrW-{9HIT}o6J&QK@R>cc zB9O~*#aBq)b+D=@to>7ZSk0CEr#e7=-ji>NbM&vGS5c=t>ZVS)%1B_lVA`NYhLnr- zEeG3s!k-8B(XH@JWLDlWT}Jz}u^jyJnt}fwvosq2Q{!VpX~H$xkH-a{T0HoM=y*Ky zQQl11J>wqc@nFt zP>;~9@7HdLTh5 zd-@eqYv|U+sjPKSI+Ht+pQk9&(5}eqA<3Q4u=yk#jToMPqxYaDP(xC*Usa6Fy&a-$ zAYHpnLz6b18-^6#$UG7E_n8+h9O2bIJEg(0UL4V+LC zhZXgkHn(vmT=p$rpi|nms93t8+)s+WXXh`8U|m5mi_CGcB<&w}pO@WKEHw`U>(zSs z(BDJsJ5#9|pM~Qf#$QHaL}q$xw{X{Q_l5?`S$8sCM!e>r%`9{w$>Ay*?)fyZ66$mL z?xCT{;$V+jUd?1Q@%q2>ob}z!hm~L1#-v)iXwba+GvKZewwy(gV;To?*$|}j3X#>1 zAvM(!6BjZHshrQV`u5Jr##{H9Lu%e(I2ua6wR?t!2}jL5xr=uk2(LqV(r0j|(nr z;wOp(F{b9{Dks&yfEZbY>L-emYm~~|Ft9&;9r-}rUUB}6=&14N=oJ5v_q-q2WPuwY z{7EVj|9*h!>gP&R>C!gwW^3Pl%;z<)VAZg_oS~@J5F4v}s$>U!C*iqEE;xhG8dULs zzw$K6@u2L3wPgkA2U;gjFaxQ1IYC|L?A$qRmt0_*h(z-2bkC?@(}kpCb<(yM(6~o$ z`BVQszlZ!gcF2}--Gmd#2pEE>?~ETUMyduC=Lg}f(cEpc1xqGVM8X1B5xk+nr)&_7 zkRuSN#9Pg6qSh0Q+e9f)#K;8NbnW5g>#qRr?4jD@J4d_iWaaFW7T!a>ye*%zcD`Ku z%0IClIX_p@Y!6qzx~y}}co_F)U1IB#d4^n$n!c=h!%b?sWbRQxKl|j|+V*%EJ*R}2 zT;t|E{t5~@<3{#^MEpy54Xffxzo=ErN9TOfpAwM@S0sFH-|{6|N%z^@Gqn_xY~_C* zN%G2dg?N%_|ARn1G8h`d=CUKP!oPF{ix|U=@v=z8|H!Lzi!SCF8CodSaQh2xi-vO? zjD4CSDBA2N!`s(+`ZE&N2e{|hl`K0}zgqmTCw_+1+~)t$+&Ghq|Icm5y4NB^HcOmt z3c2mL^x|(0Y@Md$dfN(44aK#trF&gk{c?5H+KC5Kn*rafKu?Zy>E?C1fLDKmE|lQy zbfn=(lP-zoAxRSY+L_jMSS5p>ViTtvnLb;CVkVBLyJmc=a%XMg2{r?J#S?nuiH(9c zb#DgtUmh$DIYnT)PPENixkMVku8E(uWxyoa{~6E_ktgkK@EIOz3b#z3au1wZ<8l}B z+_I6L(aDVQANTFL*h`R%h{obpKF{CQ%wJ62F&~o5O@F`ZT?cwCnnhHMDy;W*1xZOt z1bQmI|8^Zu)Ff>)!7b5Oxs+Shh&_tT4xnzJM1r*vXkv5>>(AKiXDXyA%vdmtkMSsD zLjxwX88j%WSYLX7jX(TIgx!G(nD2VMg1i8+E;RVYO~}3)9~TIrEDetRhi(wrDtlXN z$fptt>TU$gviMwfk12eigbE3rUVN%>18GmN*w`eiX1Xytrd{<=Vy6DgtE8PK<2?_~FvFIr~ z8JCdy@Zcq+h$%Hco}Y8;w|~4+H5MOP)`Z*LO1O7}%Kxx63(3qB-bq|CdvjLPI&a|u z5jo%~Yp(CD87-$`1MiY)+O(RQ0Kvu!ywge)!41nfV-|i?7eH#)o{5z==Z3&@0`G}l zJ<(z9scJH|S9=hSv&47%sh}-ILH$xCMXqh-wCc%+D!sxfocfi6RFF$P?-yN9MS!r# z{q8X$XXUQY`&wizeK-+Di}J6Hp6%i4nC1H}w)utT*b16gYQ~oJPScR^syFkL#(*yo z?1QTi#dxv-$GPK?<}?)^A?0t==g&s`@=x{eM!g@ZE>FK~HssQAG3l+bA3r)cs;mF@ z#?V?DSFz3aXCmX~F9C>6jf!|J(rxzR2{68IyPPIvWYnHHuMV|;gNHu5+!3EjpYfxj zezOC)^I?1Sq*mOz-{0#t{EF_|a4r`Dri|<&1%z+^&SX+{n~({2QsfA>%V!i~U&W&E ze~x52b2_Y=Rq>8=?kPX3p6r99Q}WC!0>B3#&IfgZl?8p|&-#9vshXQafh<3ip=|RH z2UT-HF!>oq7^mz2c+!rU;F%O5TbJcQCo7Pn4e8_c+ZNFsuKmvhGw{l)CU< z&&UjOv=Wh>|uyIVdl_w4#He$j=xfXky|C@AzXghb_I#M1TZFUz=q+Sq135MUzCArYIb zz9Zmw6u)}1Z*Co9R1cjNpl?hBpGAvq*ismKIoR}Qj%Sem^cisIB*JbwKt_Gnbo3N0 zqu&`d27T6_?f3jWm1i^>rS#T138OUc#YvxX7fp_+-r2@4GAs%`yF&e{kT*5rw?l7_b)A2-oh84^WV zat#(S8idvBE1W!eYO=SG=OgmaVPF*bly$L336o%f#>rVyVGrv=B}WYT+;|W9XQMQ| zzV`$^{&||LReMvl*Oj6+T6TIc-+UJ1?Kzk5sg;9H=xI3f#jeQZY6Dg#sXi~k3f^R7 z+nZz!h^L*lu2r20Hn+)|sR#ilI7MjJz^K(OLGDGd#nw{~b>ANO^~+7XbM{+yB7PYv zLiHOj)`wL;(l~Hpi)%@y$7+p=@XRCbBkbWq?(X3~tP!M~w{G*8j>*}m>m3VQxZrXY zi)?yL61fQzjS_%_CGV9l8bzb-d(gq=l*bxOFlP?@0Kv z4IjJx^8BPbJy^m+Pio~!Nk8tO%0Q;4N1L3&>IQ4a_cvYE5gw+M){ZqUS*Z&e#3r=C z7`$~x^obJcV3#3iTD@ONS=J43=(>C7Zji*{U;Vol`ac&}_SnH~($#-PIwvAW`O(F# z-B`H-735; zM>d;ca;^K2>Kng?lqY`Iv!X(p_EkYuu;*Svfv^5`FFeu!P9T>H|F*1O0g)%awnH$M zMo+)EFBFU<=xF`20rVOYg~plR)x4dR3nczwgf`)hAdC9@c) zK5z!lGY2gJjHOC9^jOa7J=fSMp$b`trTOil>!@2AJ2NUto%C{8z=s?n*4s<0{A#tA zE!G8clvkOtp#`?_<^8!QK_VORa+W#917*e-8JEguYc_Lg5B-x`oMxe|?WePQh2b4+ zJY2>VTyT|ibG@;+Qw4iH3@jqdD(bvvhFsrN;X(b`sJEM2so+pi!+?mdITPSFKUc%k z=xv?IcdBGDGVpZAv&b)GUV`h4&&H)PCGMP83e?Ez;bI^P4s*cslf79g#4O^v7l-3s6R2=r zswJQ*et#LCuHzcDVS`@zTU7R9uufZJinQ}QE}-oJKJHBrW0NZ6=?bB8ipZGof7X}LmWU}r-H4;-In@#jPB;zc zo~D3Nwt1gWF+BO-anY@#MCUX}HThV!O|CEbF029kv(NV=rZS_WGyZHZ>+H#Ans#DI z5-K5ykB@|_7!wzgzMKhP$tkgJpV@j2p+O$+^zKCx$$8$hB)gRQM>o;zKZU*N4+w+=ICe3SgK8FGWl-wb0I7Rrv$wX$7q6HJ#5dWp%5ihQqPZmC4iIgbQ9E9*Muu>=M4tB{ zE@Le#&q3!aTfvu&-d{*{f5oQ)_sox1rURj?Omy%~cmw|Hun48T@`R?8{XEtZ^Y;z5acFQ8NBSLn%=uln6b z3P6&}67cOl(3cu721{^Y*6YI-hGpjaVxI1by{~Pr+$%^Z>$4h!t)2r8`zB^$Cr!1h zo*APu+V09MuqX78KcVtPP{|r$PafHR3LF)n<-I`{I;5mk%*EjUa!X>xaz+56`+}kW zZRqW;7I9Faf z5lgUfG_DBcTXi0g(yXeoI7peWre)hPgT{R)S^xD=2r$!b0`15#9a-oY5PTzStggRE zp-_e2Hi3q$xD>A65qdLzi9eZJ>F-=QI54DFLkPk(W)?D;it5+HvFvW1h9Ut#d#NC9 zsw|~3LUR)$h^p7bap=PvSP9$ZSOp8Vzf`Yl;Zeiye%!X1KO?NuPnrfV_RJ_JLi2bK zxDQ=O#~(`(TQbs~)ct%B>2Qx8PejlO3l%WwIMEHGB!IFMCf_F8eUzV8?G}pK?PS6e z{)4+1m&|oDHZ8)Xm5J|hJHT{uirO4F!I2ST`?m`B^&U#}akf8%p7>ngRRPn_^%f8? z9+G;d9anJ%h?lGA5Amn>^>&wECe49M@Q2U$#vxAlA2b z&oFuB{=D#Qwjh)~VRBiJGuhv&%k=%%u{U5x>l&whj#?|mL}}FrDEjw8RX~|dFYdOy zBWdY6uX7(H@?#VmkL#XMzV*+T+$*~)xo3i6bElgR??WTUJaXSR$c{sU-Ix7zh5F|78HG3YkdvecJ%)`YD<~T3I^$o%rqX1fE%l9JJm@vv0Dl7-Zlx;= z%||Gj^WIbbrq%}0(GFrrBSB{BQ%szG`%X3!jerl$$}5XvF*pky>#G!6a>T!jr+`V} z+}@|XCmZNdJelb!lvwz0q1UUvldjx^pY_caFyEbrPVntWYUW9hf3Ae8i;u=wyszAR z(IB8qgz@s+C?Xg~QP})JFyfD55Q8eSOIKbZ2FjAI@==@ub&n-WiM$k30HQKT<9jD3 zkp+6=`wE6^!wgj;bm6JAQ$LK)7k`-bZ{oi!(&E5h7_Z!h{(MFY;ft&_TNxxY>p&rz z74|u+IKz)fdbaX!{rwiQ;1LaJ9EDsC7xT_@^f1{nC*1@$!&3I!?_&o>N_?nGOqs?Cf}=O;Q0+runBT$i#S=Cg?XU8WOX~O*;G%O+ zWpKJD{6kyNoQD;=Q{`R5-J~wp6v96@azLK56?&;oRk%%N(mgtlF9>&~M(pb$oi(4m z6jR4T)=kR3hV$4*i0($zxHs7DlqQ`-y9ImzgInA)?+7VRRD+P@g74M3Kagd3@f^^$ zTGP1EVJPl_fe6Au1^^4O^z)8J9lzHxPG$n?O)S-bpPh5uQ!S(Vn?sxXbODQkOm~0) zrLGc5?gA58Vd|e#|*q5-rx?VoVM-ToC|L!=Zv}r;myq0_dvti$I25m zNQ+`JR$QNrosyKS+g7(p`$qPu*!8Wcj+jaLHirnE=iEoOup^{of_C>wXrRvA^q=n6 z;8XhlluYP|9^9%O#j-^bfs%9y(3E$l@4(*IIfO2n8x*{57XizsJD^wy_CXJ-+p8R-?}}r=WDw@0ZFB)t4Bmtb zwuX)iUj0RdF_Uj=gAfr1*!FSLSYMuqD6hEm;~U~HeJ$4U>UyKL>?R&L<2MsqhJTKd zaSCp2I!ZE_C=uTOO*Q*EZh+2hH?GoGZ@+6eDLN?|p&WLGzHd}qE*jZd58Ribl*#9d z5fEcAq!;+&AUp&3p`y`?_n-tRU$}v}y&vhip5laeV^ETzKg%UbQ>GU%DAAvMw_lWM zwCYlbE)cmxKF;pB$yVFHNv_i|9&`5)hXx<|ltYT)D~>=2lu`Wr z@kEg=pqC-1n5ZgLTF>N;4lIN%K9ysYLY;9^tGhQBsKdO1O`Ge+>@kZz$Yd9Cae=G`-ZZB=*#3inhl*0Gf-y9l(fho z4}Enlhwr=XxoPCt3gIMn>3o1<@-DcM#+nh*=SkTgcIZ<7tDb0FbiF>UxdDEU@LU0>5st0nZ zj0;bJigx15Wop^TXsgK1v3PxjpRg4=)ftpg#B0^>_TbwX#^W|qrWFJH8quP2Q`~i4 zl0KE*m(}{brfVO{+W8;Q&(vSg53r$N|E?(_Mn*(@d&htnG|0+aQV>g0zp<9DH(vkt z9dy>)+K-qx4Z`ZI_Yg2Ba!~-1>Z%%25 zy<@(jYhC0OBzs}8jzNS@u7Q&OpO*OWKliVZ9;Jr;e=UPrNXy`281z3*C~SgLWQU9& zHkvfw4WNG1X{~k^yfzFYbX(cgb~;{ZgMLv~a4reVh1ro6rgQp`h3X+U#e0wqBFBia zES*y3yjjm=?jMwEA_>=DlHE!8N9+G0B{eFTZ5`LuLARfexI|abZV_pgCPXn6BnV?elg4w z`($Yk)&1%4?M|IV*-UwixD(DFu*A*o&wd{>%yQh*Xn3P4*$XFG$}jAZ@muIEuUJDH zqCcb~Ey#Mdo497XIMzbBa*sjjFsJ}5*+tUzmuKu2F*xp9bdIrS`fpAHN-GSTtnKGs zmDmc06&jsR!bJI>!wDm#W3BEwLM25nk8%yhIOt~f?y7O#+H>U$$MT)!HvcH_t9&|5 z5X*mVf94JKp86g1%KbzVz3V2MN&+jAtHF-vwXY-Jcro{fAE+1Y~wichaJ8qhFL+(ZYMh9G|U)4^X?~Psasri%>gTv6L`a2M)~_aomKDK z(**VkuFF@xSXMb6-6j;dkYu}Mct&-0T+U!+f`va71I(szR{UuV zXHev1+nu;k8yIccV;1Vb*xz%p?-F+19D%g*>h36GA&77ccK3Twgq@O+dW_{ffj11j z@E#FIq1v|L-dJ5xO|RUrzON`adJ$e4Iq!LOS@m$1txM)e(V+YL&+(oqf?(=RHX};N zGCd1E686=md)q76L_CToNQqwnx`!u5p1ts1#`2aB6u6y8sBmWEy+0i(kF0#0U)gt~ z7pc_NoKO+nB6vIzann3Wv2%ZlJSD`^oOojD^8L}jD8n%NU~klyK-e=bmM9M#z8Jve z-=VuW*NG1o9{6S#N=de!RW1l1+9;33n6Pj|CWNGThV#?r*0XE3_1M&b#Ivf;qcd!$rsoJV)O{(EP>iw9GfIcU)8f!2Wn> zB5+S*hKKD86`X?x#El-dX#uWj@z|pFUKN8V4WsE-)l;2Y#ME%15LA(e(96 z*GN6>9$Hp}B8(`F{1fccC)MqrV?3r%;gj*|SV=U>S1^_DWC#S3OD(FTI(?T_mF+uW zl@6pc0-+cY-9S^|il?=&#ki$~blVXZwbnIyN-pCo|i*k|$1fhAO z>hgDrurn%8KRc}0`$NmkVuy0<;^VhNg=n5(BaL-L)Oa(4=iD=gZ4i6ntCyAu*=x@e zw`hcK)Ar$(OxWBjA=+ypIWN2A!uBFw-MNF@utx-AHJC|^ggA0_xC^|;S@RYPa`n0}>_BurM)u^T5n*Cd<~M`#Z>xC`2sw`1+g+mvly$t7 zzB6B)xsvKKbaQ0YEh<{coeq{W>b~zbo{2_iM}LYHknuAK@3r_l2d(tw^`9OK|(~YoBA}KB!&p8j*ML z8O<-B?K+BIMS|(M^wThmgfx986$h-ZUW+i-k(3JT)3DIpP?A?$8uZ*(KOI-=;*P8@ z{AdG9y$i5iNk(s??B4$t>Jwlcg5frt=2)|5-|oK3 z*8M_~he)ieriH_1d(M(^N6&B6CDpt-B!nH!X|RRfyXeC?%<3-udP@$hjYmPJ7}x)E z=8gZ*K0VfmPi^wM`DEy6&a-S__|?;HgEtPBRsSVY*|11aX7 z@(YObZ+J^{R>0IGTN68+&i~Z!l^E`t9w$ZK$7PQIV8cRAUwabJrc~1|ev|)|PA``h{SJekh)~PK`W1!1))jERxA_=~7RCb`&^ zj0FaH3bLeS0=NS2#mcJE)(F5e6wlTWZKkVyA$s3pHJ%e!8s)z=irazFak{!x{@7?X z4VQeq<#Mk;h~M3r#BPt{|C)*Wq!&0*ovm`9!(6eFAV$Ptf6F8Mn#a?2hGKTC@R=r6 zp9^~Nc}Fecg4i;|^d*YjSLuKyORZ-Gb}uC65#N0)h}d794GWZBR#BI;J?pdS|M9ud z3}qTG1%t(ZP;Z5^P}ZOYQAhu0xc(>M+*%1_H%3(9PxK6~2gTZ4RGT13R9W=Q-Cvm|0@KOL?WXw(AlMZ5b zM1Fb@AcE9p4(?C(O&;Iy60ZCnQ;%q5YI8VfFb|ue@j3x=tvs4dy%XeWo?lD}8kJ;w z;^hXHe4JAPp0?!uQ*C;JC?d71{>~&pix}bQ+KZSsF+kn}Yr6tXSeC-5&@|G^9HW3k^84IOqu=qsnJk|3;Pnf*(^)Q~vMQ z1zauN{sS;aav&!JaZ(*4JW3ZZdt1HERHxGmA&``vvu7NFRLSIb(|XP^7Xdm74I@YK zr>1RZO+fc@m)c4}jq95LqTSPle5HqfF_2Rr4+aoC7|F_c=DVv&TKC%{G5GLX%fO#iGJ>BUz?_Pp}0S|#nEwPT^o&)Vo$3@jnC zqSuBA!#L!j+T{vo_g$9Y2s3KG-}}oNuj3MpB((S27bF3_#2+Ux%nC+iwLU02g;2Z^ zr})UZ%cDHswG>_1noQBk7peXN)4BH}=u4}b!m^MjA< zWu!jZJdK@`vjOV5s|8&68Qg51jUP;eCg~blRvi_+bKf$ln=-_jW;6*$0c92CqcdZQ z4rTQO;F8*X4~qfsImJ-!hlhCK-@4v4{BMkSqdxaUu#Wm6`5P3{dbdom%h_>!OILNs zekPV<2KDlr4v&A><*$Lad5|%!5DWsAd9BK|!>!CkT!-~q&stb-|L7TB$TJ=a@52UI zr;zvare@zx>let#Sck`PNLQg7U;I^o$R&0~WLxFz7d=(MVI-%-ie#}RO}mPaN$rgJ z8Jz9&mfAqWDo+ZRRe+5|C0W{Wz-b@a!~LTnbXm9=Nxnk%He|I$vbc=jai|+N|0@0o z(MjK1(7K3qBy1d*rf1k(?qC3r3ib(!mAT$f8eXs_@Atoi*$jPS#ycp5N8F}7)(4*W zeOVa{Ho+=ws0%G2!Gzl3NA5d^9k2_GxgYf}+=|Tl68Yb`O3Gfh~LM!S; zsX}${S)z6Ey;9)`P0cN+t2Mx0gBs1eedJ0>-w!PF{2%BbT#Jp8?JOA`q}EujFlnb&*!)@*ZrEh~Tb(K}&{*1xU6c!tq0A93h7*p1C^e9^2aO0aWng1iAm_1b=%bo) za}}*P^TMoss-Q}&jWd+3)q2gx?7)03%x&Jta7;yXo^(EY>-m?{MPOvAlAuw5Ee!AQ zfQd<|sudj@&>Us@miZ_e%|>S+$-ZW@_FY@)+v+W1DEifyA2yY36qCU3a-vDY63=II zq#%2aMq~>6bg0q1;cH@qv>5$1bz-vOc-^aBV=%z@T(XXD~Gf?D?`GdhKySJAc z3mop?!XM}@6bP0XlkiUl^2kw-LCqO=>q^W(^=JXMxOKDcwdi;3TZa(Vwj(+-y$)( z{UjvVgm{_TO2yOmu69;%mon(es6sHgSSF-U+O;el^h@%A^xJx_fFp>I6EiVnBWSb` z7jluvn3?h5=S@!0a~ae6kHeVlpSyZ%*isYvQdKa0=={mnJAXG7Co9*t@|oDB){^w2 zs6LWZTruOcrW~c8u<&JryQ`0YDlZx$W_Fg}W10 zgTG@hbNAB+AHrs9f`fyLYeVH|Rq#D=QSpwY-3XKDoPP{iCx?&P`;wU0$}u%wkGbSd zL!kBZ7lLgoIe@a<>|=&|)+KDgn}OU0qd4U}Askm)vGs}I4sKI-Yl797Qm`&*-(~gh z*J3yq&#vYQCqR=5c39xO!bpJC0fIoa@n~{H$3)hmUK9xv{NMZE5Fd|WZ=NEu|Cx?% z%G^cc?$R73vF)#IFi=AYLkGODeg9MvrtF)22BjVhov6rzfXkPg`*JLjH?4#+*XEoj zAq~f{RmY~e{1JQn>lBl|qXrqF5N(Iz%;dw`3I@amq`~zdNj^x&u#(AO!v zJ{|l>Op$%Qfn8*+TJoqcIWuHJZXx*lJtrOXcmA|>9E`0bIY{w`a8qIC%?&1%BJxLX zK$l2;0p!c&_4wJb4~jMDr`c}oE~E~jOFEmknv->xcmXKE9rx1{Gve@vs1j?F$G>cl zLNXnzki|7QuTYc_(+Q0rWla(PP?YLryzi-YO2``DJir^j*%Z^d&qH(i6t?oV6K2UyAnzE#W z(!wNlGK)lSWYW9jLRS~P!x~Q}Jy>Q=fOML_i`!cJKfl3TZ&=PeuiJIMSIQ`G+EphP z-cF=2Wp>#mkhn*@T|iq~3tJoyhFaX{CbmuI`!_%`t3UMh?Rwrr+<^5gkOwYFvEdi; z62b2v>gFcE*7zQJWmW0x;%bbgPv#z%3WAMi2HwHuCW#5XT1)7MS4W$9UxmZcL2fOv zP=IE=$Mpkf3w8IPEWP&6Yy?|obCt~R^{&p%%*SBHn@wa>Lq=s1pCc&=4iv)!CcGPF zegZ#asU~n|$;H+wSbf2S=y#$mRjy%AbR7idmuzN+&ot}bIpO>2Pw z=teqeO(7t?XiGGK(y7=xx*McsSa;6vRzNdTfj@AYM`2Ty_4#7zY10p7B1eq<_2;s* zkvxe;%O^khDG#;9dK{3>%;1ZYET2bF# zGo!xe#nL3UO{$;S(;|g}K;AXPTf(t!lgA}=!^M(%1vKEAqPH;Z_x(`2fQmK=>v+(b ziEC*J9va^~|CaOc>Cw=nZs-;SRg923Jr=Vl{ z#h+w{ugYNR!q}EKp~N_P(T|-k*7}XjbY}WuCh)d*!C6sV=SDg=QD^TwtS%`ZsPz|_ zI%<{6udSEW{cFv4-EN<2`Cnki8L1CN+rBK0H9WQ|JpshcDiJ8V|DwCWxY!iwMEBKm zWnHxu9>wh=H7VWJ;0EMu6DsU>jr_txFsgVk;mo!)QGPa4`bh!OiZifdvJqyNHwB!0 z#%{iN!yFd2G|N0?S~nHzEM@HTz5~lf(_w)Y&<)BlE{<*g!np8>1c1Fcoc%-=AXZa#bXb^Pr#k+~&T+WSu>7(DHf zW;Y>Ntnq^kqFPyhv4O_NTpd5t0z29)Qh}C$orKJsq3-wo1J`XE5J444hKL;6{GWV| zJ6K=j-0hXpW&a2-_;l}@_wma@vu;GsR09=p-78p~-8RrA$_(l~MO_ zv6doYb7;YxDhS5ib8??3t$1yKC4CXTs8Hb$>TWW+;u)DQqi1$h$iU=;4B{!W$Dyqr9ONdIeWcw80m?xQfw~|a zqULqKKsW1&=Z#Dvey-{tUEwi3tjaiyb(Sv>*pU%ZVv)rVbXinLp`_WyMqgyS?$Ve& z$2Y-TI&^l?M3QyY&>!?AeuV?rp4WDx6q~_#$M;ugzioqd*kdSBQatFI0x;NSqMp{V z+VbnUIEqG|85mtUWGB;Wt?4za?~>pFw#llIZx{WIUjO&t5O*E|E86~tXPkW(B)DZH z!t%?LVFlj~NNyD_p8|Z}ytBEar)eu?BA2=uAC#hDkt2}tv%sN%9(-BgHTXl0(o^dk zmFF=;^5j1TnCr!S%3bU7YLli${m5G+S$qx|s~jR}3&=0B-g~85%WS=A@M&7A^+m5- z*AA|&Z08Xf7EH5Q4BCoX>g>1qLb06VPSo=qM6`1Lsg^HHhcX7eHWe?@_VmEE`DveB zhY>z~8s7x+o5jW%h4uo0TvMOVUN5@~FXd|b$CvT^6B=--o~*Q_@Z;qOLxkA4#B2Qx z4P@d`VDfr}Yjl;B+*_xq4K;p1yi>crCAJXtcAW(JFZ|N^@D#5(yTiN$nHa@d&&cbWI?&a0+UfG4W zN31x}Vbt|`CGMwqPuJf~Ab9`2gR{Wmj`k02s?<)tI7B&xYTO~TkQ-@5NyFv_t`)uS zIQV?V%JA?cWXoqh{30N7;_fcB#wAWT)qV`(jzuZhC~Vddnht{8mIUanU~?MhmQtRt zlGQoXD_bB8?NQ53QRe!DV!O|eD@J%*Q zxuptIWLfAwM{nz?-a2|@b3Z1q1lccLrzhWb>i-aZZ~xxD_sLrZbfvg2f8MLc88^KbD3bQ#U}-{R zy%|)C+04R_a*P^ulwwr>qpSoWlzDGF#?@^|==T-n?ML+J{wF?9eWa3-*@kZ(wI6j` z8n2g;LDDvx=lJ~2EAC*3ZbR46l6(5HC-LLgTlU23G@*>syXKtXHWId5lb|=Ns;?rI z2D&3}yAF37@imGtB7yaCq9z(72`aHSBGnQFwB{*p|zP=vVze1Y8ateyF;G z(kbbJo)KiVq+(^qfjor`IZQd3^Hq5mUcN+Bizg&w1K8D^V+2o0r{7Xgi{#qOnMo{O zcB#>$QOpD=ko?$7Zz)TAcihtNVw!J#lLg7&pjnu}@mo6iRP6`T@K2yUvt8!ND#|gJQ$xh|8+t37IYD;}={zxU zrVqguvdXoVwdOm_w4kV9`c0L)7~EZ2>tqE?POmF$(z120uS>o96nC->qi!}YT7xeb zY4qGl50XvmuGZoO)Inzk+^?aQb0mj0?HzpUr|!uZHQoFoM%#-8lmj8bd}nLnvIenR z_yb=Bu~Hkiis4^nXcVgOVNa~G!CB2pTXq1e>gMDa9GInPf!qe$A%6HLU-;Ma?A&th z6`h7j9`FXNmnqp+$%t^h@eid6F8$fhk+Bc?(H(*m?SqIQDXjtqu{|0~6IWE@Wierj z9RHA_=5ZupL}KxYWNQ9Ou(Nb5amuLy{RbLuS4)UXDGgOY;!QRCx}?sn&>oSqO=vfp zvT>bpEN-Z8ZY!zmo4!_fiZN4S)Mfra>_KWqB{3_XsWSexkRW#)#>~1LztNk_;>(y^ zp;)5l_4Pj(i8c~=!3EmT1RR->#ssiLhX3$IMXuo6Y%w>9^nkSpJK6z`eTE||#LlCs zQ@48li((}l)-|&)E>LtY}c>9OuWnroQujJNKu9f-MhTiqBL{pCbG6Ba|uy@6WnFv zq5dOWPd~2hS;NiG(MAz};fig`ohGth05=vC3;$ zIrDs;ejg$3-7%5@BA;RyB?FfK4kY$rh2~zrI`7(;heToB0^+GGQeJ+@+uPk@TEn<= zs;FeDiw`m=WHNm)-6ZoVF9A*!lz!Bn8rN)RvUp$?D)E7MV|(0~u)+C@BL~Aq!|1PQ z4SIk01x0ZE+V=TTS)4}pHB=Gx=2V(UgA_{lBH5mGmm2DsP zG{qcNb!g+{o4X|3uEFzCtIC8w>&dmMelvA1BMIeKSl1gzku-F+1LUi$maJk<2a3-Z zyxxB3lT&O=D6q=?RxLAczN-ThKis6aZSqLP$GFSl-1|y;nduX=hnix#^Z(KHmQiu2 zLDOJ@1Oma`NeJ%lLkPj$ox$B*2MZxM!QBRTch}&~;O_1&JNMpw-#vTw+uw6$&itva zuIhfeXqWJWTv)xe$VYJerE8y`slzzLr;Z`y)^BGu4E(tD{J`*6a7& zbw<8fa?(;g*?DBct@k-+J5t!`2i?6FmoS@P>9|5KX z3Hba9@vt9ZUUlgMaqexR?{73d58+S405#QF0g8r;NIQzbbT#D#&lZN|y!~^nr_e+C z1eG!!9=|u4z5Nj)BtM-TG$@alq{~#2U?5RSubO~nqHARmA2*=Sq-*-(05hOq^K8*a zC3(xbJV8(`$kOtXC$G8wd=A+pmyn52Ve-nrGZ$}me!Bvj=Eii<-Anr`=nq{BW2*N) z2Jy}8dj4Vza(mWj;}B)8v-$YH2&+L8M%EQ&_SsDD$GZyOqghfRZNfES^kFh%^Cz=d z7mr|X14@Z7Qg?7hHj9m$Ju^@YURTw)kUtmc2-ie*8_jM}Hg-py?=So&+h`L=wUIq& zu%TR?=@BS0DK)bPh@lMZFAT&!?h}FK&cQMl@YwXtBbXh;g{n$>nxlE@_~Wd>GsQ>y z8B>Gz!^^b3tzQGgA7es*9A71R#NBL65a+B(v_aTVd~JuREq00yU?%%vlC^ag%C3K$ zsj5Co1AM0EdK$(vgtNa_k|{~74G4u?8R9JNA{8^XxGUTE@9bRpQ`Mir%3 ztMt5C_d6 z`F`z#ZAqQ$JJKoQeRS+->`4XQZYn+U4c;!J%cc z_XV@{(z~(!Xc`yg+yq|Wf)YMZSnn)%d+FUDqf?i7@ zFqnp^V2~AUd`*J(wFzm$bX%0!5`mmCG6yG)0ZL*Yc1U)(4j0&jzU4FBFkKb-M92>% z#CLab4v)2uazM99TT^6ZA&(ABld42w>so8u{eEN+9@rYKuv-P~ALqGUdS!!Fe-Do^ z;J|qoGMF0*3pHoM0+EakW8?F@iFs9H0#TeXXm%5~!;zTmYcxjg0!ulpMr~(X1P>5_ z{Qf5R3d2v1ut9eYqYmk!UJ53jf~u8E5_u9D?os5LpdI(LMh zMhORhw*yJqFS1nIiK_QdB7er(r1TT$Y<8gKF$Cuni;hx90wuN+x8Jyy-xtiW z7?db%{Vbx^3!M_e4oQlVk^Qs6XN1ifRIjVXl;su6STEvX@>XSt&NM6O>17WzVm0NH zyLYv6p?pPi@o}7Ede7;gmPr{|8``I8IfP_hO(g6J#v-(d4;al=d|7^&7~{EqHl{|ql1u;8)EVh@vBFZ#; z&hNKS{*3pxGdR*zrS8!G$Bi;<=ppl^)W<@W2?e`yhNBse5he>GMprLg$B2WkiIvsJ z{OD56Z$ybMdC!S`^7y){EF=tlkFD&bBOIlGt z$z38{_}0cTiY!?#3H586`)WJ81@WDFd3M*aQ4l!M=-A@n+|)bzsR&A6HDjBSt*lnh z8fQ=L)}FcLH{k-EJ4BFo_;NEHFjJFqq`^56Z1a6drLIFq1oSSWIf3g`w82NeffJWE zV*DR%pDK71&gyf+H{|dAsgsNJnxwFSf0`?;$#VIqic1sF zJY92o(-3)-8dd0&@JQ#TDMQBhMW(R0r5G&+x3q?WEh;tbz}LU!n|YP(@^?#BT|XU@;{ufYLku2D%9~}(stJ*~QO#2wupG3ZfQ6Abz~|zg*pHDHX*^+C zQ;=)x3de8p(!5GWW}XpfV0G~GC-ri2E7B{rgqH>|KYE5rdDb;LP#k~Ud+n!Ns_*sd zjKCzoKxMJv#_oV)Z}8bZ{Ki^3P5fW_QvMfmKeg;^xAfn3%IEKb^nVAk?b%JPDm8cX z3%C7s@E-R-sc5>1ST$1itoUIF`k7Y?B&Cc48E4emJGG71Dt_U^rJw4yJA?GmdtxYr zp78I}Ptg}NiR(=74du~qu)j|euf@Z;BX|{iG68$j5i3BEc2%51ULUW!H%5M~PnGmk6kf3Duh4gB{iEYD6h$y2Nnz zGc*kAtc^6b3J@1BVXixNrK_v&79#62)dXtCE%`GxzuTEeR_>%F3ng5!AK;)5At|6& zjm$lbB|Iqd&$-4${1!Ktpj+ejr(=}uymm{(?VOW-2FEu$EUtaCb-H+5+fh|<%0nNI z=@EHVRM0@y_ddzPBxjYvC3EPT#$P1)4kLxlzrinlucB&()wWax$^iH>l4@X2yib(`zpvZ+o(>VK4R+ABOm2Eu@+C3@uSWXf?1W~H;o%; z^68?~N!Qop{MuPl?dCnB9Ih#@ai#Lcq-diMaOLo`PB6t9A(v^ynQ3z#=eW6BX2u@` zcj&fd@otv<7RJdB-iN41<6~U02jT(Z+`UJq&Q&A#7J@q~YS%xS)R1TsI>0VJ z7O_J0*h9nl!LW?1X5q)g>t`12`%T?p0`?;qJaIGurUS-`+(SB#Xr0csOEd4mNH*z= zbK?ox=IgwnDc0e5?0eYS3DxVVV2ussVAhavY^#L-6SG-edk?bj<}v#m@1y>9mw+Zf zVGAvjGI!yCeu_s>fPUUx{&@O1Ias6Uid;{<^cwzuf*SqV3YOJwq#9gw zXThR!h0vx(xdo)0G7e=Tp%JN?Lh8d%K00am^s@6Dr15_UPHeVw0CXBqK203vQY0o2 zNFP`ABLt93s{Uz7Jqq-ZzwM6`(T{Rz(Lr2iLeT-KF373>A4}`jtw{dwk0rpSCl1_- zKsREphQ1g5+-xrDODT$dFp+k-^<{*d7jmy`3K2)M3QZn1iXTz9@8D>vrdJG@dnwFyns^e0*c1jA5mML8KA*IAJn|FN=gP+r(9ANRQW?W zc>Nu-=pRX4#wKU8{wAA;{E8|sP`<75&SN?fT77?2{sx>U6|co!Q5EJKen~w&P<`u+u&M7(q4;&X|PCGKx1=f%B;L zcogYWJu~trV)Y#U7SGm!W87uK=iOmsU7!PsY4N&xxRY$~5*wT4l`CV6D$(SxTC&$h zL4viF4A4$ez47ueSKr$0fz8PLf@R|t3(LHg4y+>HCgW{sUvX4EhA3b3ygYJ(0@r04 zIb?wRDk#!4@KGX4%)5)ir8e0fD$AOU6PvR~9Q#WhvbPxKac0+*XRp*q0s?y=+H=$~ zMs_elQ!<(3q7DBwhgKE{yr}o$1SU0auFePn4UX`md`MBMYgqq+Xk6?W@oWd$AD(vq zzd%%K&Hq1gtCIaNS4EZzbH`{F!=Fj7j)Htk9k0m@^ZfG2Xa7Qj#HhGmy2Dkxvjjc@ z8*Wlg^Kqu}cgR1KHS_$}IyWeF>4V{dMnUo_9*rQU=KYsHVj>6ce0J z>{?ypkhgg0Sb4ML8bThZ*aD(>pO@_!0*j!&^xFE6n9An}GaK40jrzDQ;^v8F zO&+%DzV5NbFZF!eDPl#ufA;uM(dFcw4x<;yZQu$pdG`hOQ?5X3e_(X08=V=v7w0!l zOZXaRr&E6tQg0O%+s#9atzstiP6J=lGrEC@n@N5eWRnto=`uYhUT`a8C0<9&v{t-h zpOw~yl<5ae9ks*-G#`R>n1MeBI={9K4R=$VYT8-d7}n}X3=Yz;qpGEOI2B%hu0;La z3)*<0)5kzWpx-w=a#`JrT!qVh7YO;={orycJLc325d)R-Ilgz6o>zG z09sJ|V)d6#oS?`rXH@ zhA_98IA_{b5k=AdFt{t*x>rPq9Iw60pj|>MlFfD_%q%yO3ybOTA9ku$;1d@*evol0 zM~F`i&MR0@+kDKp!2$N%4cR$ngMdDa9x2RVw5dco`D(kz(i#pVvgS~|$KKh+(dS)) z@nOgBVXwJ^cK zJ3yT2>%T^w>l@-Ir+nE0KI=!Q*#c)xk5DIy(E6k}t_?>m4&)G@=Mo!wgutNvu;J61M^)TnI;WUpYUVHObK zl%!l>t}u^Z*cm>-5o_z;$FaX>d#W+HVpy-UEn*+hMDcF3Tz?U|X~g&3)Y{dXJ8erL z87SPo7+Zz2s4xmH^6IW_h%fN6#en7 z2e?5=e}V{3R!n(l#2A(&HFk^_D_`^@0j^ZiIhQ1VqMHHfyvB32?+g;5I4VlT5D843 zMidTSZep?Qm~x85FO;Zm3P-US$xx>>DVQX*qD zrED~TTT*VqR!q(VJ;%63Tc{Q^TEe=~#gUI=uUj8;e#PBR$hJJVyc7Cq_7>r4b(2xu zM!?aPP4ZGq(WU8|=C-sW;^n!?RxYS>+qX&cmrriUjGpl!T5t1z@T6s-yP5c~SsOnz zo|b&o*vTo{HcW#GAkLh!!R~Mf6TpSj2x0>$w)F zr(_pI@?>E!s{5d5Sbq1P!df@2>DVPWIh|&|dIKKvOT8FBt(J!$dw3`pA_ax3lnX(IrMt&1Do(_TJ94y+~_HCv`lKLIJgd zp?(WSXBl@df2`CqZ(o&K(-7Guvq(sVGFJH%N^Px0+6ATNMEd)6991a$F zfCA8Ed}%lHur|E>S^0@xVQI zECZZ|pdIU-AIjg+>A}G1kTu@bulV?$_pS)K30g?DpFVHdCayr{PX|^n*mO%y_aJ5G zGUrh$D6MbQ8S~FOi$V`PBQ8mv)wh%bH~S*}jvjgxe~pct8i+wuEe1Zf9W@l$Sc+UmmB?rk8+$%LqcL{H#A2_Hv=@9r2`73bg@B1B9@YPt?uvWY8BzvpV^gaP;_4(gpA zN-4A3gVtWgD+`1Eaj1d#HC*$5A?Y;M+Am5}=xy>KbNyMXu}ScDniH5{6Ze;paG0$ddVh{hVD1T!VRQ|3 zJ)k59J|;7=Dl#hxpM>LD9kbMO8x4rp00wJ-CCgw!O|>5eh^|H}5WquR%e_3js3Il6cJM&D!@iD{HQ&_j&%XXr`m znMPt6;(Qk7Qy!-z?yW(BQy1qndHK)*hS)vy*KqbUq(EB$qM<3NDD~3=7BHzIC$-M{UuSH z_BrM$k2yX58alf>f|%UbHJR6%7tgt^z|%C^9hNOW4lxG!DB2=4oaYbuXstz-6eL@X zoiw;JtQBS(BM}~a+{@*;(mwc%0v#c{l(I^O&=;|hc>cgM*D*T@-S!vo4d8q7ymeSi zSr20#GcCFCaukrb3&F(Vr2;eF?^tz0VEb$2p;%jJioMj#p(LAxa zx?`xRhkxT54^MZhkpXiC7xI^5=^Zo%sT1GWaIbkk-55j&vVPDMQ!iE_psJLaxFEFY z`q18d`&`+1BQC^H!i6(c)@dYU^Lkxe_)Md?nBb!SdSR{FUu*cm^53R1N|1%xet~BB zW`BVWd{lL9dofSO#Hq@M5V9DOANqsLouaf*+fN}P`3YdH}IbiUB z80?s}nfNB?UWsIqnTmhxk%)ioj$aExo~uW)Wt_f`vmTE0gcHw;s7FaWmPq}-C(u)W zt`*fE-TZJ*^sc9<1cU}~4eO;*nNpn$QDJr1!p2x zK}Yd;{IHgA`RKY)RJz7OP}`K8E#M!7z80QfdtZTcpTBG~UW7$!)3+2d@5VrRlhp9B za%jXW)xe*t!l*;mom8$E%GJhfLB=Ve9cjp}B$=~pbs z+7tJjuRj0P{c`ku1p~!a*T$AH>UPu?pEd1`^?ph-z?rIxu;T$!*uvWbO9Zo0ObWiW zl|>k=(kt~$S-y&-&XM-ix&AFdC+D%*QZAn_*Hb*syx^)j1Wr6z3f*2A0%GmLGHukDk=@u$;WOc8?Z`z{cig|bMliE;%ifY`=rG+|!&@8Ey8VVLUNmC?(@IIdj8m9%5C24&$P=p$+H)d*G%Ks~{QAS1{3ge3_xn}6ha4(awIsP8p-H`+0pvj*x=CdCV}NL?JF zH|^uf^^JI(8tpz?$!xL*A-Lx^DrLzQ`EK z5T`UW)Od`JHuVc3C@ZNSIu(_S1Tm-X3QrBtdTje9xRtlq2wFg(W9oa@;3FERTw?IA zfBJy?a7eLvwuM?hJsV;1qLfKL%brs!#r`-RZ_xqamYTO>Pn#5W(nSBNsQ#4lkRC>D|#+l z*paNdq;z{KG-g>Hk!+W3&73zXHnVAW`Sp<}!EQ!@4L;)f=SmTKL;8PB=rCa{Hqa&h zNCEHQ8#T_m2xL^oAH_~e--`hlSlAUmLIguMAHRuZ=Fbs6he^ulv<_?-xX2c4TjJ@%G5Tg zahi%wl&B#ZP^3^buAsGHMnT^gsIj9L`Nz9f1OI`aDxpZKe6WEsMBv}S^$l4M?AyNf z0?iWyY{uaAAL_0>4#uhEd3q(0o&w!g&BFISvy!E4)=9u*3jRHK!r3Yvutmseu_>I13Bo%FW_F+8Up*Zs+ zw3XPj$_`cUuUD$%Z|?>#C|X^X?}STw=IgtBGGY00z^EFtT0q85#>*OCRW1sqErKq;;nGM?;bsogA$aGNe z&j8G-mBYN8qr`q8{v}C&*$!8yjYzV9<9lc3mQDwJo(um2rM`4QpR1GrJi4T?k7 zMWiC-jl3M);dF)mj$Vc{Jx9VAbVk7S&oItd5ZIA%j2EX!*rG{FI6pAdQH~Fcv{?Sa z(z_hP4VMG_@s$DD(m?*&Zi>#Kf@2e>-o326t$E)nmb|b*tMgKSe!=l;mWQ`F!yo5h z1mlg}djCB0PYFmUMI{2n^!XZ`@g+0+3ZIukU1jau+;`#vCE@H^r7YUVwVuo=Ru;QQ zU$Mo%hxh*|b?=L5>m~h@xo1d;a~I09-_Dm*1Dw<|L}jw5c`KPH+PY?T`S7&KF@3vt zJWj!_8;|B0bnyjH#B6^0{k(<@#%HYxSbEs}9Q`;&LD!oRZyH!TGs3$V@QtX#sr{At zNX*5R7}|}dWV}p`pJm()jsJ*Pxj85}13awattp&9mriU8A7B3>pP{)c)^g#K(CiEn z)3)|C1!4Jq!kfUSmN;7No1xfUtI|SF^0>XfsKLEsi!z|8A&KG$NXG$fp))q5gm`d| z8#dDp2u3rzCWQ?=FJ2{J!RTpBMTm~ge{Ofc2=6)3cKGm`d!>M}#>Z%WkRMUPW;iS5 zg)X5{M6EHpU{7dv(FMxQ;MGMPW54lq?(B7@i*Q&_IipPiQe>tgGmU93Z(YPpDRE^NBW zx5;gv>s9xO$RnC3iI(!kI_#JHwoRhO{S0RJJ)himTLzYQ7>C2MQJ7I%|5nG$9WVI4_yqX^{dX&&T zBJl7VxMd`wEAdwdxb0)beyLcx6dyj_tx8i*K1b=hZ$;h#Lh%`0)F>#k9<`h{&J@!@Z(fdB zy_8Zp)K53TLXfPaAF1w?Hld{Nm|3oQf#iP#@8;LouUC7+bqd)uPs9-I+NQU}`W< zl&2XOum@YNtOuNN{>Dvq`S?z}O@YEd0x@#;C%ZC0ApZFFA8S?T%pB21YUg(VHTOkH zK$Vizo!*IH-(h*~j@L^W?GtZm&qN)8fQ?&88Qkt&$mLi}PtBb)N?5hPu=X zj) z{bfseMi@mM zGYPsoxK(f+y>$;{Iw_oXmnFK5d+?gPr34JByP*xqKi?nXWmdOb*Jh1F3XDgCy}V1RyHv!Mf89t@!RUNoNhg|7VThjQ&lKk1ZDyu?A8NEROIY)8Dk2H z)r4LM`;na}izQz*`tpDY7!ha)&5=TKJzi;ZyZwqIjYAFe4`nkC_xUDi;faJpcqX<_ z(L0t*huhm*V#6M3&m)|Bw@?3%h@SQ@VWxDIL;J@+rfuv0D%w`riE+4(8b0#@uPn=$ zh_#*ks(ajuUX9=aIk5E8w*N5u>Um)<%4Fufi!jyCct;54b&)V&^%>+~aI>65ChPkYnOklm%LNjMx~CMVw_BiDfqBm{6PPVZ)J(%*h;T8RkuZ!T_ zsMU4CZD=*6lG_5BI#q`Z``%wN+n;0tok{@P?TyrGP~QTS)7}Zr_ewUm9?+vv{vJgY zK*4)qotU>5`qlLKPRih%S_6Lk1XiB^oHJ6;B1@Ui1D%Fv5zPM%kcO5~y7p}NQF1tc zylCnE*)cNh2;pR^=*_)MpdZ!}e4L9i9I)5p|0_T9)^>j|r$0Qcrx#E6^Jm&eEEXeA zBraWS&PUY(Swaz8z=D1Hw2()VYCPzd{^UhRjmm>bHT10<;XzR zRsT5US0z6cZtBcS?_#kcGW?~$KOWl2an|VLK$cm zJ=}9O9=uV6pYTwAgA$5h05Dse$Mx!xt#bo$+}}mwo~AyA+j9 zN*G<%vY8i4Y7z_v;_b=#8WP0u58o?$=cj3$s^bE+cp64&-bY*xdw~2ec1!5iWUJKi zO7frkFl~;G*dK$b*b9$)qHe>~063_H=T;Jcp^kJlCEdnC z9NlLmig6*!*j85y-`fY{S%MBve5kW##LRj~6j8)Ez0mB$ZsRLgM+LmsX?4GXA`~R; z2a0+9382lAKX0aX>Xz1;b$N(+OjdKH)qxPt+vR!70ikgNuVcFBbPek5CJFLm16rQY zQgU0c(&Dw_woEX4#S32vc>A*_rz&cHAX@R%+kCj8MD}!z04&s>u4x{Td|#nGrn}Vk z*s|8qa&v*|?9%l?$8&x2G_d1V>jrX+*0N&NvDelzK-=i@wjw{T+vW_q+cpEa(BcxE z`mV3FbM+tpkwCw$^L=b<2}5$WAwfIxd@1BQ?ob!5*%AJBDq8W@M%ldAabC4|40y`k zyL%?6Tp}Sm mBza@U6rPW7WsR}x;c| zroB9^Mrz#7&uQ3l>Y5J6|YQIy1rJ%Fs?=_sIDFzuquZwQu&3+I)`=yS;dq zfi^LTS{6Z5IUq`vMVPh|MYf6mCP3<#DZw-CJ%J1|W*HF=J`xagB6N=BKIHLWY!lG7 z9yMz3PK@X#DE6*DSK3>apL^a1gqR(S=a(srN+K12P;6})pi6$1R+s{34_rblR+~ks< z#)?Sc<17!Q*bmf;|8hDkt3@|uIJjWwz7|d$!SJH~5L6ZyqOwgxO>j8)gN+1~&I|A@xLlaKWbPDqVJ>umMepkr}U9DvE=5YDhgro@dnn3W17n+me=G8&iv-s6n-!VnA8{W?8##XD;ExMCpk-zQP_AA zrdb}d=kVy0C11gneiu&p7-jMB-cH$tRY8a}E{8=?ghnJ%4X#da9u+Flnad{b(NSpg?}N=w z?m30^#*N8JaT|r{hPsN&H5T&NYR=+dcrK8`A=L~upgq&Hm&I;jT5sWXRSVCy81=jP zg5i>8Y_=Iv%}G;ByhBbOV;}>JZt3x6yyDwoc7?-IQk_wDWO98v7-Kf+UL?KGD!%mR zRoYKp3DRp%->Uv@7&VszR@En$zLLyIgz8fz!%}#AFoJoF?k!VGkgCbEWj=uUL5~)| zFXx^2=i6;;259^=_3XZpt5Q9^h<5D-*=|u+@f$7LKJyzEnWNhE(^V=B^5YYIOYXe}TD~rOdLCs9i`zB!}Hp9aWzDwm7+VkR( zE84Plhtfl@U9;I;sGZvafm!?QEcFkC<9ccz8@H$F(^y$%xQ8j~%%-QACm{CYxLg5> zyGcaRdo=`gp8E~(?e&Yeu6$3|y@9*Eo`4O_q&+^*$_Qb^%pI4KVhp*IbTMyLb6{BuZ;uL-k4B!2&makU%s6?6H1sY44U?dBDuV z19tMXLVfZ9ze+jX2{K9$TGG;Xc2t!=Y%UhXIM-N>D#1YU+w_{s(Hz-kd_F0?{u zIjdw5askVGmp`Zs_YDK67aWoWL7dgpE)`Ina%DwAtH`OS9(MV!iM&Sx%d3v+(5Jo^@4NF@Ewzo zGK7J5FJ?eVXQ4LigL43oaN7cz)G*1fnmKg<&gTR}{9uSLSN^IKGxlM*M8OD_N2u}# zY;jq1yh`DD?%WBtCDThyW`ES4qUFZ|yY*qQgLYCu?bob-TA#pwo}`JSd=DP*mhmeg4|TIcOo{~=tkGB8o3zpc+KInT}7H*Wl9^Hik4)mJTSUw9|Q01Pc5>T(N%*P`L|h4E~#cJO}-Ejyb-upA4re z#xBCTN?pG+es0xE_2SV>pBAJBSIU;{EL+f7l2TD=843g%D?idCs>z;Q4a5Zd&8ctwvdih--Gjv?e}(N&8KFN=02rC8SoQd4`yhXFvM8Epoo~ZtnG*rgGGvT3Sv%)`<^rs&3twi_D%qx zF9-KWuOMFC8RB*7d3j3F@e-Ovg5-XowO;bJ{S6Z2aAo{#N6=Y zV!MG2T(qx(__Tz;GAqmHA2E=B?i;fb{NjJM55D9bi+QSma>Xw0Gesj_nr6 zc(rH1MPxzKA@&9vt2)UiA_2AQe~M4TT8q(x7z(BGgoJ$3$D$`g!@2+b9!{v=qq%y` z-yY_i6^jHFV3N8D@}-=J8rI0}o-E$dO=Z%wQ)#Iii}psQ8q|JA zO4PN6jVhnX1F*Zejr3xn(+vA2me`%7z`oEQYU0fw@Nt>-H6sNsR=Wa*rb%hridOw& zEo;=nNtrb#POk5y{U)yC*HthpFCCB($lgX6HpzZ{7%pCqix1#|fgE?z+mif!XU2Dr z`Ki5LSv}EWD!W@N_k-TGAE#N~H`*1Y=bhj6S-yN^E|>*k>M_+;9c3_)`+*KagAo?G zT-CB>SCmT(m0(%-cr&d53`j&a&(n-EXR1Fk$+wxHpKly!>pKC=JX99={?dI_pj|efoY0?0Skv<2@|W zg{=22A3XKkAwE*j+gETDm+Lj$h;tpkKqL;Kmrjg%Pgz@wIA8W=x0{$X@`hvmWonca7nD82w`VwAs(+zSP5mo>M^L^XaJvu;n4aVH{0S&M zR$Bj=L|!sFW))9BG^1SLFv_6}SG$GG@zBHk;}HR=*5Y*=vM7ZGych97zcxB!^D$0U z)%D&CQyVoIgV8H*VqI5tX+jaqZ7T%4_~TjL%*9fFP!m^#l%_o{2WN6m{;hlrQ`J8{ zxXA#z8%?0Ww`RO{GoNJKY%>2Kf9~nHDON=fd8uAK-&@7;1iX$Vp0r;E6?_o*&KUxl=Z{dFR>L6-->hOFz9a~EZv=W{;ptO6`fV*)I zx*P-g%4U5G!1x4C?RZlMl0CUn`sQDEaC@J%^q@ZZx832LNlY2>3G%M@-3!mwJN)Db z&$NL+$4UnBS?(^JG;NSJHMVo$^( z4OZ8A|8w}a zD+BuT5MZbR$?@cox?1oc=L7Xe+V-J_{EL%H-;+P z&RrKcgOSuZv)4$PdLD_5npQ`!v%&;9Rv~}bk@HOfnHKem^zsRvQRQd#KX2|68ckJS zRNOu(4pK)o!UxROM_+<4fz=ze)zX}<_YS-vB0tX zB_wI+)$L8b-^z3T@jogJ&cCZWZR?9#J%;+OOOfH04GXybwpubNx>Ox~m-oUl?fz%o zEqA7K)3g{xl*<=yFgDs3u*|j#5?)`#H~f#}g|`{(;+D=CkBFt#Mt~=iIN$^6 zGg`Cz4DH5|*E&2XP2Y@+)P_+%nZa%Sm?z+Cr?_e^F-!ASY z*9Qecd&IuFmKIFm;QJtn7pd*0dlMHm+i|vIN>|m6?!Uz~R_e~_R(4Nbhp`HS|Hbp4H6JlCfFL2K$|*pCAD@U z-)46gXXYc?CD&eB9+|5FQ&ARXL@&9D$1${_#3yIQbpA$h2s`jvv@Gt5 zuA0qQ>sO|-q)^8SZup1jd0rZzJ>&AJZ7j@(3+IKKbwN~ z)4&CBj-+C5fOF60>mbib8-B|nv*4GmXFNd|Md9o{v+Pr?54Uwn8;HJo}^tvL{ z)5fhxD!ZO0!@;0)V^ru_s5W~6mLrjjz*L03#~%Jy7H4+Fi{Ou~LY66WA2+p8+CvUw z2_S()v6t+$8yj&UNE||iQ$uB1k}at$4mQBE<`oG%va}34%#tG0$@s@PVoK63i4XDg>OQd)8Moy48E1| zaRHdY2lC+l*GMOXRrDmDCw`8_&u80%j&*Lo|c3yry*^Ij`@*d}*UjT^IH zN8&xgx+0HY=-PX3jZV|fO3b#zO!y2 z`a|yv%wQ?Cf2j5!wM9v*+6eXrN=x&%k5yKPbgvYs$vZ3rUFI_~6;7IZO4AY9z~Db_ z&cM8TuN`aw*6|E-Y2X$xC?2GAbL3+@UT^wp`kW^If6?`p0a5qg*0+)Z(h`y)A_CH# zLr5d2NSA;}cR1uoNq6VaAu)6_ba!`m!%zc5JmYo!&$-VX&zpHU@Ahx+@7im9Hm5?@ z#M~2IN11Srrp*}FU;Ykzej&Hfk8LvZ-*S&v%E%*RQdYLHgx-JD*ql(`#|0(dVaqgI zPo>hw@=l?nhFva_$0nMJNQERv{pwkbWOF4P+p?`%T9K@^>lDYb5j^o~zZcxAk+mLw zsL^JM?_*eY1^XZD;_>|MM_tk^QxuckyC3-dT)H^kNIC)J9W`^7HEkk$zMUFCusX4L z^GE%iXmOlgIcKwvd%#`!%$9qc3l3)Wsl5qNR6L|SpB$lDo}hU+x5Z9WFS{c;S(M-8 z5Nu%deJj?0p-c86{`h70F)-9aQP~W}M5EN)-tXLl&4QV_d%(+AH2f7bN#`Ee)z5~$ z-d^$0+p=mK1BpB4ByjJ$no|M{bt5zB581Q7j@jNlnCGFPc<#XSP zuV$G5v)9RX-7x@n6CPg4(c;$EnPzs>eVXr)k9f5Cb2_=|6u(ON_X|Khb@@xfqwzI6 zgs^IDO#nskIF^$sBf4SZ#r8$FOh>-&wVJZ|O$g`#a`bQpcx?1H8wK1LJWd^h9?M&* z+lh;fIIh{(;d$$7+)eb5l$XsuMEnyr;?1K)Qb>6>*aBQQmVM|0FrWYVVUc{hO>vt3@^(A=K+ki|ccxX0cc2|QJMJa_q~vD0 zjvZ6XF+2O>aiYnOKEc|nv4#-jP~l%4)NfjZ21Q6nClg7OKL11fg1$hx zANT@zPoY-yQ&HZjSIQ9~2<+>;j^e{p<5(8Cf2wnAxP-5CC14%8<-+vmJ-xMGK9_8ykiMbMdZY)Lr5m6P-*t zo862ykB{ZNe#tuM*2}3?h^mxAMe>CJUjY$5Y2MIMh^l<|TCGMzVZL`|^*F&bbxFgZ zkZ{>gHmzN;t=PCq^xT0pRC_eY$|B<6y+iL1sio1kewfm-tVN80%X=(^9*1PxR~Ttc ziX0P)Gny!l+C9j;cEO);=nKUJ{a4p@}9tLgisSIh?ZFPP*X5j#h+++ zKYv}elj+GL@)fZyr~~5waZ}ONqKkmqUadQQr737)_7Hf5BG&l+FSk%6)3v#N znq;rl;UvQG0UM+QaUS9$Xs$r-EMoQ5i(ndVuH2fR@1>GIDlo=62Xp)==tZ zfiR5>7ODQkpLE%(o}~Lfrcn~$53@U=YNj)6mMh#|Z4Pe!gv-@&;~W!awM!rW;8)Le z>F{EX9mL$=M0(*>yqSUFJWO6e9^;OCFr2JfL*~D&yuTV*hi|%9(@y0#(@qh$?CGxo za$F!dY0xe)ZG#E+w-wAG&6Taxl&41YaNRBv3}+*TDc@^RHYZp=eUEx;$j2?fsjUIo z#TgG~Q`7Yfjsc=?iMt9#amnTS3M{&`Uu1jly@GdH{EiaD>QGhh=Sm$NXA*JQ@su59 z^8Ni*gUR4%0`@Hde4O477H+x8E$zYdM{#*$#p8%u2`;w%`5nq#@Y9>A4*+_Yv{Lc% zI?Zn_zjd#+yC4MVPj4!{DY`$x9gE@qOCN0WQn1xiw}ry_{gJyIZ-{6CP6&!yu52EM z&&ZQc*k1SJ8_fHUh?$mp_e)A}y~9E2h-evN`s6}d=_S>W6@J1#d400wB5_b35&vQV z{*Z!*H{G+|Kge8T&Mn%I2f5hrL|h22z43UDHn?Q4-ZP!L$UUBY34Mracuz930-xre z&%oE3?l&g3Ney!U8_A0;M1LGz>%p(7xA^q7ml2ddh;<9{x-xh== zB7b5ckXL|abBT%rT~10A4kR*H8t)=$0s;`Li=hk58x*w+u5v2b(k4h1LwW}#9dF0< zFjWT;=u_Tl3|L|pR z6lGu~dG+Q?RegHxsVH2-Gf%TRtuj$TZ9nWRuur*}^*(ztTHWE(-7iv+_cqS|#R3>+ z;)k^-$gXo!3*wM1HnS47$i2JH4LiG`h3C0Tg`e=WzIb>C6ZQx%D9Uq)u#F> zJYhExiL1bhaR9)c0?N+mhGqk39%7XKM96&J@F8U!eH)EC3ZurL4Q>r}VI}kUCB%cU zet(+eQ?raGi5ZcEdSXvne6xBc2Fy=c8a1tlzcAE&1=1w1BXv%?s?$zLs~!6n zEbZ2jCb|ltIhYwhtHy4WC<1(Afe&3XMr^gg+g|D$^4Nl}&BAG0oMx}O=et-!JqXba z_Pa28xNM6@xUGjjyNl@TWq;Es-hZu0UbNnX)H)@}^nRmbO&Z<_y=fun3+6`0I~}-x zf>RlHFUS;%`J*~beUTt%lZ;6#YN!4;LlVpB)&sBg9IWCb)7j{229Wvk?)uA?46FxobGu(&yy3$ASM={M7Wce2Hiw+4*u^e>%`6#QHa5T7 z`0<>B`o-yiCza`WjgIY#=c?36c=s~wbOba4szdBDu4Ln_raW_}In!^j?L9}_I+6E} zghfcC|J>Y2u%BoD6z(jA*|#B}662nWzak6)7XkJKuDdNYm6?jy&ylNBj!&9|I3BLfCcDZ+|hhV z<5*S&a($vQ-&uWEF^F8B{BUPVVqgGVP25*xcNROwat^wt4kiQgTA=2IR85w&-tdP8 zW{AHX{RHE&jk)yvyvbzrbg6=wK{Yi$bBj#7pJ2=EFSeA$Cvh^wvS+yw;=1ipjp<)y z4%0e)=ZRh01sJivskEn^nk0?5m2Izc^L!bibDex!A9wO!^MF@wE+Kzw377(zDlC;6 zSm?@Ow29RokM7A$r~MHTQ;7j&nk=CGx_zA^siDUZ!Rv5E=K;A(%KyHmG@lqwg{pu1 z#l-%N=VaUib%uXwR)Ux=v&XF|(gj}q(V@x|hLVf} z#-NfiD<4h-6WEvh%&~SKkxjJY)k6pPObU*j;(^wdS7Tz5oIJig2#|A%#oRrWdeTJE zEX>Mk#5;)os*L3O^jM%M(@NGWYTE;;8t?U!*VOOkk$S~PYL@SNDC5-9F3%?40drmr zxt1sxAxSEVz$YD8_dWZ3L-dKNWz0&FFq&wPtB(z}kD*la zDM%nAXy&+Y( z8FM|B*Dn3B4<*qRbTN+&8{oAO=qO;!c{Dz>du70hE4t#UNV#-#A=i@%W~bRTZu%2O z0P?^h-r%>JcETi*py-zEd}53J{353HpUStz2YOzz9LFxOQ87=}bRr#VwKuho-H+u6 zh!>pwi1#K0a1LDpov%Ff?2~&rzeCmY0_ND)XTs@5sXJKo)ma*Lsqf!CV()jVKH-1s zS=SLqSs5p&p{uxyR1Qo3iB_qeFglO_-&z3MG4Bk8D)w}h$yiACs{wHA2+cRBN$WxW z1Elu&VfYaS=!w2xTp56jmiNo~Qb>?oC0hJ2Blc-J)8_Wg_Mzhx#|C>7erYN74ydAQ zR|I7Ki#FpQ;L-j%O($lx*DEQ2`8orc?qEOhc1n`T7_l zR2y#PhID(;O>*)Mwee$bo*p4%8Vb!x^KRAB>tux$YTG$sP+oz3%P{u2D87S2q*FZL z>OHkQt#d92(=4-SH&?|C<|er{sn7>5+}kwTwP^Y_;p?Xwah^A_Gj6VzM_QAiHM4sY zZ91rEP!(WDz4Vk2f3J-!d_7>{+S$&ZcJ1@Vf53vGx8jCY8uT=N0-YImn5SQ2Q*cGi}tL9R81OA*cI>MnUR zME^SIm+MrtfrT#3eFBv<9A?^LejW6)<$E72aS#L}qk(XCjGzt5!i{yW?ow-qzp_&B zh}fVZKas3!7i;w;5FT|6akh4f#g)z-&Lm3_$hnejPJ{ivTg1lqA!n`=!lw|~?ACnN z!#%5q;}ef>NbBly!%S~3Chf8(;-`m>2$fqQTk$@#p^Q8B?l9?qpRq~E73pa0XEdn8 zIbU72)YHz-4(`_1 z`4j74tY%8VlH6AV0aDPv`r`E1-}Qi)P719|o7RtiG(Vo4tjsrm2lxZblu0gFiJ|c{ zyj;bJ=IQSRDR1c75jo*AKfzvx_Y$0*x9k#EBKV|%H@~~}SZNo`j11{%t(mmZ1YRQQ z1RY}hz@?+M@fYab9qRg*gwHvcj2ozuUinXTe?1LyCMizk&~}(^989S8nHCwsa%H5j zASQfxU9W=C@14aO{Ns`YWxEa z)p3ri8mq;)?gzUnxWsC~cnq{fKgj3)#jk0V=CsoO+sZq!!9t<(9(kITIgqITHNZC; z*do!PrUg2&MQ?ECt6s)2{w>7QatCzW!4j%HiSUF|n{yRsH*F*tp!=$Zb;GerxHIOV zh~30=e$*VvUNhftE}*`#4siUZoaFF?lT&=TyllNp$d8*=P1!^dbJc3_d+!VHe}pkeL^*)ExMK&XDUnH|n#t zC4SM9(r0*5?z>*r*pDpBFVf>wR_=bU;6x$q3FF7ACx~s$J*Nd?+F4OVE_mf^AkcO6 zk!#gonJ;Mv-oFFRn^GHD(wj~&B&?rW*go6N2?NLA(QO9 zFvVD^Yp)ym#ufJ&MHdaWw3W>BJ@z^lFLWXblbr;*%rxr8B~#F)JapVR!{oSBXxvXE zeMHWAs5^_gTX%bpQ$r)*`+G|k3HkztwkGavJHZ)953x=n(@eto?0Eo_B~HLmD#58k zCS;`VAWkT4NCUb#)Cf9P1lVVlKRyJ2oUR>PZrOd=uZRPc*D)J*Up25)RxtSLt=2@q zAQ{%f9kY}%r75%K#Z;E(n$R(qfAS{7^7h-c|MvEMO{CqR7W#ia-x*zy9PTZZ{`{*X zTV(O)+b-RgJOgsJ?omqP#+F;i5vMxdy0N3aQ9^MxJ$_oQtLMiq_|)jzC4VL!fqQOb zUK{h*_U$y!`75UsQ)c(Sjond&yHot1`rlKXhz$O1zA>wEJur+<^DhXF7SSn|ei5^F z_ZI6yoQz*geV-{4*OAMBrDJiq&6j8%{H2}Nla6s#^4Xq&cIHR(pFR(gn5ovql!cLF z!{ulcX#9zz}z&;K{8Blm72pQ2mQrA$?ThPuVszkoB10YP~HC$I;gNtbJux~#KC>n;k!vm z)4$<{ewPKsi|l<|k5e%C^M5pW?Z!FO^H9^O8NX);yYRc~)ale$0l4 z{|P&_>x-Cis(j0hg_0YqA?v;$cjZXfrC)u@{dR4Vs!L;%HMMZj*$7asDLQo*+Fa;i8|oTv zHKAclB8)Js_H59cb5>nQTz+WNP{*0K1-&P`>k^zaDc*-!W|Z5iH!19rn3~hyO?;d+ z=yV@pQ`hS1ue(~+o*(G9avYRbhkzBbQD}=DRLjD|Gp-KTNeVe{a@@FNzd^b z={C(5j!=4?erbRwD);=F;Utl0-5y>6I^VU^-JxY2f9smM!Ita-itsQz_RRMSsM9dGDQeGEq|pnkz?r3RzPS z)C`o30Scw?Ttqz1>ik?mphD78;Zcuk;G>7(mB&o1>#=K)>oxdc2SAYKD;iw7&*15! z^ZXF+SAA9%z5TpMe~S;_OBM0u#0#Ea>M|akBu7M7_*HT}Yda;P+WSXJph9kSQ z`#}-XGW0>{Yn4xQ9r86kuw9`O=OhsiU;@u4cr;GY=mP}Nn)>;Ds9)}yk@A`kX%Nkt z@fje7f(YWrL8XmU-$m=yo)n{p7=sGPgG3L3K~}dZH1F?e&T;XK>1pOnXeR%)Fs8W8 zTes4L-#fJ&-mP-}+pLQxn3s|nJ?A$+-!!6GQw)0ioG7C>tRLILh_7OVlrLxTpnNYr zKJc!1%dUM(qMg%DpXE^1%p-YChW=ItyedOU3mS;US6?MiNYi?}ib^R9=jg=h?nRsT zL*mJu+P-r1@^HM1uEim4p85RElrq4qjmywwx#%wVwMrgsIv+b-{jcfe6E9f711x}&)*>Oke3ah0e|1! zi>w~={BO^a*0ULwMrMEsD0gy|iymyZz@ZoaJ&g}&{{tF!RdB&7Tf(VNuR=={;+`!>eo3jCo~!uhpB*I z(lS4LxnrOH3d>!zJW7Zq7kCu@ip*WHJC~4g8?~vT;HC;0V5a}o*^XCo3;ggjNB8r$ z=2<2kwUfN0FP9TK zp!0GqN_`Mc{ie$%b(b_+D;G+n_(638+&am%eK0FVQxutDq--UQbn{UcE}zHI#b{b>yf?o-{u|~D zE+zS55&s-0v7y##+H!c_z-R7Ai?(ol4`ug`lIjrXz;Kv^I=Lc z0azNL@IF&sm2JO_rI6**Ak87ZTO2Zor`NhD`?B7b$m7ET3i$4V=1{(+mJ~Iu*Qu`{ zg=Z(kz+LR9T+#DYgE!=4ykd~dgm!cJtEUC+${GA%tXk!_JBe|N>hKs?JA*~PMI)+S z?Fb8)29*qF-xV9)uyT9Y3*e<$7K6758=Ko$c)1u-x8lyDRzPpq$;e^K-Uc{y>$aOos+#~yUG?Q5l zV`|L$pl$9yD*9yk|5H#?PDhfDk#B4yyI(A4uFC5nuDj&d4E(fhwYBvnM92cxm3PIN zdE`x?XW1~U`I)Dbrd?er63)jD9ouq#;z{{V0J^8<&E%-7^L#kUw6*<2>4@8uwnXW( z)&Pw4Jo@xoWcw?Wbio~6*mFrb4PE37o#Go7!1P*iC6LH=kb<_$D*SNcD!{|u zGEhiEKQLt;72EiS!B!9dxivn&N`vp%-8fMjXVn?4GcF#^QJ%u9BI+tgvi^}$S%{eQ z$atGP7|N`~|AY9|mzkUrh`8BBBt|z7kjkzknba=Oz8Ncfl#{(iCSV#xj*RsoDR}g* z;=^&%Re7nZ{kCS^`Rv^PoXi;H1q_C3Uk_-hj&oOU{{sp7-Q!sEu?CgrdPl4DzaYoR zW~==LJ@@NAlY+$d%k%Z&hlMffXDZAZqvP?`K0{|j%hnIXkbM6s`3Mi(pZI51v(G3p%md`l@W}WhNhEPNT>;vkiXq-#8vUx%ow#C;*HeE5KiXWtiH+IhgGQsRH>_fu3>1s z%b{Wno;W**!H?L3{*b%d*DQ&eQ(ZN<-;>F&Ue7Yn-Waig_9AD>ZGNDuHbM-o7iw7% z3(%y>OI;s)*a*vp3!clFWuP`g0>UQm<1x5ghz%M#(_cEkFa5;+IuONSUeQYI2WYzf zhk-Zxet?=Zzd3$B&Rt!r|GB-&4wS}`hv_Pq|7!m=L2o~sn=THJx5M-)q#m6njH;-K z=GqT)a=7-8rU{a+1<~NgN03#U&fbJ*AVUJ@@KN{X24(slcID+i8f-7i=5#_m8^Z-Tluf0B3y z>km{LRb?biB;2=^uAiQd95w0hgsqk8t|O%i|V!(c{uFWCqWv+5G$I`TL2)tfw>m~+Ugio_!X zITe_pGuR`ON%2Eln}z?8v!R<(^K~&O`p<(lIB(-2eg1r5c%BTOis2Rz@tPt1wJ<4c za`DlanJ=YxXPXtxZ;P>X(IkCTP#ijZE2nZEGtJhF^BGvoLyd@Eb2;v9BahO|F_)Hu zNvGAB0Sbdc$YoG>@tJBfM8x3yWWuqx)+!>gSMjM(1I;|yntB9rvC^Z2FDXHGJ^d3> zz@4`hy7&?5y?1xmWBPn>l*p@k{I4{$ZRHWKpH@bah2PQ<6m{lLuQXxc+IDQv`E%O0 zZpPL!(Ni?Xd9(|nnoO2I{Tu%7Lv=8|cUg+h5B>ecAa7bwHKA&gb4Emhs`@xVaYTM1 z-Y{*U!*xwnMKSKH9)oqEEW7~1i=2Mvi}M|@wEoBJ`(-ju`5E38U1dBsvX{ocISPsw z>X+J()jp#b_>1jIZztt}p^)Wfd`#IhMlP^tA-WQZI>0V(wFpk+t2ZJ&EtzqiQOTcG zD)oO|%-G5OzsKH`|8wk(DDu`=*7WZTW<$~4akb_=xv|GTefHe6>580zZHI9W={k?Nl=D!$SpR7QH0dh(!BPzQ3R8*xrE-G626czgKtz z;eaVd-Hp>p)TjK$lsQB;QWHRPig{cmh<~YgcV*}>$(haPqdpxY+Z|>1%VS?J1Tq4P zw%J(;?BZe_zHDI6r+kMWPokB4k{mY)TTt$8Chy1+aWIowhifT|7{)5`K=s1HqNU@FNLv5sU(+>C4(eQwt zqSAW52L!+ea3hQY`cen5sF0j#n9*(_R zlU8;26OS;mpX{DP6K^#h#tq;jF5rhdS*L8M^Yuc@`N3z2zSB5%u`2hCTIkzK*}N?S z3E5y}EB?1flp{HI+}y}vGzG<5tV%*t94*5Y!W>D~ z$`Ac7TI)y!&p)u%`U}MLdeex=ILC2!n6KJ5dntP{l(hbpEMX*zW;dC9KuI&W#(PGh zvTW9FZh7?-e_%DspaAq_0=G_@@GuL%EfL?cVMSt^ZYzv^H$xkQvKfgP)>FTocGeaJ zAACi~U>1lg5E=nIm}Mc`VjEDmat;3R?$X2}joVpXk;c*FKJ?afsSE5DCf}T--Jw-x zSsYRRmKbb4_4-I~n$^z!sT$t!Af@mG(kRNH_No7OavZhs5O@DGuo#ke{6Y6tcwei- zuIG|sEBW%pHJ{f7s7h$v(^F?cKfO$E`j^!idE=k(f{5sfynn||&HuXKgq%A?Y6m-4 zkW**x->H*9Sn{h6?VEz8yUQ-1f0UZs88&@Uh^_&re$l!S@6W@rt#4#3O?Ue#zHb~; z3?UwW+%OFS1cPnm3!6M^T14*ty34U((m-fd1Z0iuF}gNc!DQWoK5`cTsvD@B_s zgoekF3~`LfRs?8WKEg-MU|^=mjJ3Jj>VOX{vn4YPe<7unCrLcnzxJ_Erm&TPCfQd% z)Y`}YS*o(&aSVRWhmDt#LgWVmzkkh$dc$wiE50wNGbtT2#Cn$wt~|i=kG-~Lyh@p6 znV}bRm@c4W))kT#ybh@=mB*EqI`^Rzuqe~x0|4RMC=9q7#;OIVK9E#_-2q$3{-T6X z!;eMUrEslqPLW%b;{O+*?*Gk8W`Z;Xw!g()cH%0$ZDM{-1D^r` zfuEmBg?^=NQIOyFfR{Am-r?e_C*$)4zouUMCDBGV;h?CU(edyO_3Bruuzh8;gR(iT z6!-FWTiE1gdGk8w%pHe&YAP@CU`$E9hW0s~=N=NsS}HS#PIbw9o2pLcVd&DW#zvR> zFWuO!e^?BS)rq9}BB-`nr>=Sqk0F6q!u{$NtL-!W9rCmi#ed3awAj*|F_hmuJM`0@ zvFy)Ra2DhcsBS*;&kQ(Lse1kaF+0!c_N7I1?uyZM6uU=rq$W0YQ$%J&EFFnCHnk`H zs!MOgu}V2=kDhKgbLpY~?~tih%5FN)Lk_`6b`XI&qfY6>z4 zC0?pOJrnvi$>3}Kybh=I++h`UQ+BGih7*MmDm~`>ncqc>=k!AZBA7h|q4G zjMdl|bf3)7V)pWAdY{{}N4?f6{NWoopqp+KX_PFPCg!5Sj!C&80~df5|4eC4<%(k3^=sbUg558A0R$ta~Q%7C-MFDg|(HEd0_Bmi{({>9>igVX(w`n=yP9<4aS+EDU*&`4$M$YC;_zV~bYsdoI}EtO2RU3vJlw2=nY2CMw7ube>^tsx=vqkudRUapUI-tl?yF>8yeOL?7Nxb_j)cHM*R*od~iFd!eY!n2IF|NWj4rLeNhrNQ<=WvZ)N$v6(zST_i{0h9l`m^*}ODMb#~q*2j<^C zRz*u=`&FEqX--O{3#|m4O}g~7OFhijmJwcogVOm{a=aU`%Fzz3q}#%Swhs6yk_oEi zIg0RI9gXF7dF8>cj8!nv`3f-2OjtzgZ^d6H;68V6;A!isv+G2BAQKi4GL133ASk0I z-M-@>ZB;&ZIO_iR$(+bGeuQ~XC+#FOfu2#EyoV)6#S_>U{Y4cWGwjpoLdsmLunIVp zr%oTgYtMubraVIDBUd?*!)Yo0#KTq}ZvdjM(n%F9rXeVd$ZE7YNI{0bCK|l_#2RRT zBdCT{Cz%&qc|v@F30~`#$(!?zql%s-4)t;P`s?(hk?;e7Vy*3`8l&hOkFQBTbySiFA^n%{oi}?{;Vv~PnQd{kCLTe78Mm4*4W{;7qY-L9^($}xh%Jt z6bgcvq(Zu823^aiG^mU(=fqWD$r>+E$( zS})C&C;9h(r`K1E&budmO&S_ABVH>Oh|;Bw_}`09+RkHc21wKrPm@DM8&^{x#nC(v zy0D-9{oGy`nC@UvUd!maDoGpbujsZG4PR?VxJ(|DlB2(AxF?-ee+68S@GI%h|6x-C z+P9SIrl$jjDV;G`rpmOfwo2+^ZSTlOyw9kBVh*cwDW`v$O&54~WaR`qG2fZR36$2> zaMV^D8W$`pd$g`QU5VC#GM?FD*6om_XZVDPs?b3u~Eza$VqLt+P3${Kp#uV z3?I40<;=O;5nf4wo^E-jF^R%PH{ifFor#N=6D-d66-3w+ndtg8;tlJ6LW0FmzzBHe zUI1C@*sAUgW-SnOduT{&YgPnl(04lw>Uq!W!D^R)Z3@Atm9BEJp6 zfqVoD?yfqW++S~{K3hrs*}uuOr_pj#D61%7o=y%Yym^7adv{5V47944-~ViGq3ioH zqt8FwjfJcFFecBVGp%`c07{0#u*!j5;xVtSfZxC15}7Wm8Iae`ELfHGlNg*XF?PLc zm%#-rC0KA}T^5JA&hcRR<+m|3W?|M)LKw8uszX?|G`Ij#@e4;3ef)3P~(?_?CvHYBaSN z_u~5NZ30Qs5G3{=dgnMdjhUTPnj!1fnlkV)uTvFlE{Z3=??%}f5S}b!RT5=4nyV<{ zja#tIKo$F|Q?(~^gbTmUhwax~gr+!&;|6^RO#9`3z7*o}Vv;p2#eM2$7p+<0+xD!^ zVy2kE59J3hf3Wb}UcO9yimnk>WSCbMrig+33vqQadr2FE4^;tHXx1h#zS?nht2>=} zb9K+$&4vA*H>{~|kYBZQNhTko(bMHcKdSkIW0*fgej(@)>@jk@El$-&Hc63i59T0- zI@od^vGZYhO@JtETTQ2xrd2#yqoKwO^`72@Rd>AHHCuWBglExFfQFBDvJrVtE8wG( zJv--3w}xa1XhScJy5aZk&T}sY2MTqUGiR^EDHCLFBV8LR1R@Rb8PTR1i8MF8abV|G zt|;_iL^9%$*B2or^ZW~pp)CD(Br*#F&#&dWY^*8k`)9bUY0L7*gO6%@tBE&Gfq0=i ztu2C>52D2SPV*Spj=Q1@CJGftgx^trDBI29m#`+2@QHGk`%fo$;zs_ijb!Cocv+B1 z^%TMAlRPK39>5n9!Z52(ygjMl<3LW5s&@Hw=3(!H6;i z8H32atWg33cDeV|LPl=fp(gKiM# z<>e{7<~%DVo4=v_pls4nuR8A-5Ruc2H=CQ4TXl}JOr6@bFO;In{crs&#VlLoO%B2tsFJqXpKA@TkQ@uhmr?j#B`Wb4XRbIoybjtDlT=7i%CK>Xe>dIy5|7$)=EHv4`R>OD zUrUPw(z~20XHP4K@l`j`SZCbl?K1HA8-fovGZoZzo6;fpb7GVwreW5*j&YgUQ)#M^ z_mmRcdJ_1aqpNYfafBvi=k9VjZ`3rjn}T>8ybF01P)*J{Kzb*V#^P)&*3~;nJzVl- zI|*k((1NUDSrm)QqA(X7?7p=lhAlnJ!lyG7mPnfNRyi!HI#VQ$70Ihu^WD&kku*H; zaA`V^&>P{<$sB`>tUiBNXQ7Kbk2ZyhgUgRmTtw} z7P=v}cEKFR4UxvT9nz%Q%zAcf^?MAE#73|oUYTly7(X>|lJOOxS11vr9uL0xc~8pS ziZ9qxsjBBejJ2e88DsAO^YI7CmpHEXSpC(N+4isbz6v0jTNC@XS~XHh=erE1cdw6{ zW|ZSzy+5F>JC%J-Jg$7Q;%e7!iK_%$>cN7rfpTlIrgSU8r+?}Zj7|ChSdbxpH;voH z&yQj3TUB?}nAxikYM{sMcu?j4)vE0kI)p2kl44gSb1qk^60Hpj!ix zb%RvQ2IBB@0EL|-t7%x_nxUHoO*=&^4kceZ#5Ai%wAlLdf9jYWUs%e_2y0HGT$6o} zU>MI`j}!YwzF+rw7>RSQMqm`!8QZ=wPGV8$Ys#lwuRb?XBgTHI8+7e(;o*a%eX;{u zR|iovNcjfThaXQ!pio9A@&^v-kipzk7^zwvWB7HX_A?fJu)KX!Kn`N4DyrulB1Z;ukKB%%+Jng+2S`xRKfn#0iop|8eF*A7~L`lzewih z&4vaR7ADjwzh}5mga-9t2D7HCyIKd9g)PuLbro3c8NanW>kYOt5^ncwMrwWwZz zuA>I`wvOyIXqt4~=dr@I!w`<&_*N0K=k8 zLW0}kY*C4=o2HZapcH+c<8X8(<@JvWAuZP0TMGgL>8Klmr@Gs%tohqr@0+Y#lWK6= zD>s8nL1Dq8&{=G@s;(-31L2@9fdyk*+g3@LhlPj+W?g&od5MZXP-B2E38iACUOWB= zgX%6m-Fu1`@mHw=??zkN^CSXPx=1%+XQ@iSn~%RFMfCdl8+5z6Ab;Ks&yXd3dPsbE zW}LU{*qnILzN}lj;vy`RAlF&Nn65DJ9zmfpAr9=|t;S z6eLY%alo&8XgE<`xjT-M(r_Ld-iR2M1A$iUop>t82S|bTZfhPfvd|y!FnVRh0FZMF zjb;6~lgB1;>V%3ApyaBX!QCV$G6b`a05q`EC(%Ks)a;@;EE zZ6IYOyt$f2CdtfBS+qNx=aL$d2x@aW7BtOGcIx^5&>=DI(2V?hjM zzl)2ndrtY_82iseP_${TO>HTgM;rm7uf)pq)~c^v5P9q>qB;Cva5pg&^qO7ZiEAIq zd0u^8q1i~=!Kx^w4cMk34buF3$<(<0;JW3`bKb7fx?*~imA}E>s8OIx$BHtBEyzNW z(pN%vw`seXdRN)4Fw>9b$?}g+_z=dAGG@OHQpUx?uG;jq&M_tuT`OD&82LOcz9s#3 z^O4l`!duBMf5Up}SytdHaxoKOD3UcZDIv;>zxG#IaHy^+wK@Wo%E=NT(6LXTYZ!`qb0vh>f$=>LGYS~BnhyeAOaxFMP+Y+ zHbma`R4SK7OD|YVj^CpgbeMRKmT=w}k;|RO%WdNv@962uD&9SyzD75V&4mc6Gb4=t zBIKFnnEZxQS2F0z-VUclv7)Wt5cUIiIJWYW5_R$gyT>in(Ha+zKHxHNF|{$s>*p;JZnG8Nf3?) z?hI4$S-_2(*Emd3g)t8U$1_V0hB(&0qhdfLpD5vP!$|X(-Yu5&g;8m$^<6&j(h0PPn7_V zLQm(TEN)u`+GY15W-V5DQggUMqnA+##?6wgZt4}lsWvoZ5U0S9Hn7R?@^Yfj)N_Ja zJOSPbFwLW-0!auRvVgl(Bo4YWh=VB?eTZTFgK0y$lN)t1m zPngd!#H)Z;Y#*!IwH0~ZXq@)mN96NEYQ->kxz(=aSG$Y=_JqsFqQW_zc=rV%TJXGT zK4nn-3A@YLZaYrJ4kl-uY?7hmI5q@|1u4vYBgJ70;c;PX`mq+Y(K-BK8VMkaby?ms z-qWaE8rSq1GE&T=#~0-&xy#$aya2#zWzY847KGtgd%Pj^BX*My2EZ;R+?A1$dS+^e z&9Rpnj(HaWfF)zaM;%BGtd2Iwd;IU*s!ij---QTWJeR$risi@w*(FFf@zCeU)A| zquAD2uAi(sZNjYdo8MZ}LNcu4t`Ad@{GOXqFzQzl22K0vVU>+s|Epm?MJ=a2!SD=& z>6=Q-ZP4$J<*Sh;=4^_YtM8b5g9F!7Upski6R%$cnWW%X)4o?BNVJVTg}sSL>JuW! zsH%MP%*-})#ws%4)|{{`K-%Aske+t$*`b}t=gu8I|A$`Swvo6lAzIR1J0@SoZ?aGc ztb(;O57lk8H#??*;3;;l%5cTr4;56sCLH~4+bTM9q493EEM7`NJB171ZY$Hejq_QW zMHZf>%~{1ewi7%Qt4@xIOn$)2lkIu+ID7fLzd;=zjOb&Fr==iOZYX$TztM&%Uoih{ zDgdQMj3l@HVQQbDzI=h0hHBvW^sohCldF>EtAB-<-cjW< zZJ_#*o?Wtf5Rw|&0k`2&Pw$i6uDm7N$@4_2Bsx6bxWb`apro19L{-+K%BA(!-ICrS zut0YgEwQyV0As`dN7q|+#lZ#Jy1`u%AUF*nKnU&*L4s>=cXw~xA-KB-57M~1yIbQ9 zjk`l`zkAz#M2G_iK*963f(k2Hkv31kAUr z)80|Wc7NkUi@THbT^zmP#w%lfq?{5G0^V)a7iIlfjtv9r`I*3{(Ms=*I&ifYbEDxY z*K#O!7(moo8NLqekk!K-KY8D$C9&aU*xh|ELGkUg`EUJm?F!ORkC;^lC>xDfafK2; zk3sy|j0{l}OL<=Atw|P0>zsr_lJ^Xs3O)9W1FSk%JRA`7?6XPKP1~wf$FF%!NhCRq z3O^L?NY!=G)pB}D

9FF=!X_{c~^*DyL-1GT*q(B-n|G1>DZVeTyII`^^P?HO0h9bdPb&ymhC**}F8 zf$4~G6z!(Tt6ySXiq{1jT-DhHuD9xM10H;s->yJiQ`fDu)zT=WDe~ocKqpQ|>F1UB zO=*xe$meI0^hM{{`)v-Eo$9`#sdNL*ZxrCbEuZCrYiiYRSU;~h{n`6A-*0l6dUDSg zCbifgAOsfDrW~t)*6!)>>2eg!PS(rU3`-A(*Ff7fbSZob%kC=wc7(~hpNHQ2gg4Wu z_hjnHh%z^^4@FpaWzI4mp*c+5z4s(-l1$Gx{$2st!hvS^a(eOl&1dI>eh0oFH-Kdb z;N#*s_W6YSZ*XQ+lU(bWyZioQ9z6;@xa}S28c)#;Vn26gR!hH2`cv=X>bg5WJ8&m; zf1}tstPKMX7@I^>jjlwrwV`<}3%K}ay7WOgZQyNi>DZ+)A5rZLVHMrl##yp?O9M+< zmmIn1Gfh8CJ}r7zo$rvR=bm)o&=ti+nlKsUFGmzK5p_DrvD*aeR8}r-hFA-n8%~Hq za}mWq8;@~{oV>Vf9Uf%&U5-IZvR~Dqb+&&Qcu~q2Sg+^)8qOT{! z=?o@Wc`H)S!vK(WL&0SEeLS}p2N-IOgFECpAKo2v8Z7Oj?ii>xs3=-Vc!YkNseuCY!h|!uZZ&GoI&d-D>hesiuM2U z6=#3RsP(?kTS4whx%HR(lN}TMu1}B$wjAn}8i03p3<^zE)sC-GR$5}NAb9_i>+&-W z%eQ@DK%4}Qli-8^*G585e_n&YUxF0%Q+MD$c2eTqsqWFLk|zYoSC!6uRp~mtwb~Ui zGCkw@zbuufPB~X7OjF@k4$yq{LejiHpBmF+u^?aVKAh@lM?|iWMcqE1^Q5P3LHOAH zRl(bF{1i*tr7QU^1F^ekO5&oo^lh5+CTHxx)?ebq+y`^0A($t*uX9tS#>dQSq6R1X z)H2aO#6SD+be`#^LIGGab0Iw-4B-DLwI$~nSq!zF67~l zc8VZkB&xgp8>XbHt}E$b@mc6S(eT^WB@pg!=>Gfg{R>0W3LWwB0kM6iUF#iwXt@d} zzfU~C`!KM6^7%|qwOt8p{?IpTc?b|9@2wv;FV6JnPNo|T+E3Bp$adU#nBAK!_i563 z8M+N1Z7iLY_62>yncaz`Pn_ic=fw%9P27GMgEY6eY8NuW?2`~K66ab>ROKK+<{zFS zw=(f2^*$vhn0Ed&2FRKdDVJ6=rYUr{zZLY(uJ%celTFQWwSAt4@q&Yj4Bh!)b~k|E z80{Q<*I0&1@RsyuPv=Xf&K)hDN2VIW+fQ#;hm(nVkU0N=Zm)ve%6`rc+OK|pyL<1_ z)V+oQtTzVWn0{m-!2fehtQdUt@E75;v`|~)I`ezA&k73DSfEXO=AyegaADGW8%JM$ zljp23D#C>?mYFw~_ih#9dICEBiU#VIt~_(!M7zwRC*)Uv&Xq7ZNbkxT@gD`(b#-!0 zgko>aw5pg4Be5;VZJ+G#__%7@pz z_!XPOPuE(l8plEXjtAQlOFE~ z#947QS-%aTUPIiU_-0bRv{1N|JyMJORj&PnIC!MsHlpNcigh&e`2c4nj#khi0r#ju zpdizX2)Bzx#n+!EweU(Y;E>P9fyi@Bc8WVztLOJueJDrYuUInT67+|2U?DzM@Y+x@srde?P}|1FG!&u6(b zMp^o|BdFKgTkWJMc)Qbd0Xe69gfS}Aa$@Z!^HbG<5h){GYus0{GwOk3PS$|^K41An z05VIi2^Z<>?4L^ZUwX8pf04}k6EqH$3R@=dcc6($;4z5uQ7ptyeDAA_FUPc+OMVKu zBTmb&=q0{qPyHH!#6*|0quONX%{Sd!eczU7VWt-!=l4bZ0m(5u6t3S{QN|n2s>zi{ zfBXbnG#cneolnzE>B_llxM3c++CS%EJxzWtn`8rDJD(`@C#h%cNqZn<$sx0LZNUvm zw8y+@?aP&LmX!FcSv&rTrfZXa0c0eL>tw$6Qc|}z*UHeH*5ec0Ne7bZ0mD|#mG+n{SLu9i?N z=fI%}?n4djO6b}{oxM5VSmR{-h@&`em^z|jjXZQvfIh5GYm_My~-1tl;WK0F1XuR@1r`1 zL~5?uWe!v5kCk+?or*tKBgFR0nvf4n4a=yI3N&U9)nu+4tDw7$WHOJ_3pJ%Ell`H& zV3%`3A!#%QLH!zuEqiyN1Wxb{DIO#4XC>9P2%2u3E>~1N25(pL54vxO+H-cf%QZbL zBL5CDm|1ka`taaDXS$&yRUHqjq&kMVaZ4YQZDlCSZN;NyxwkWEit#q9;U|)e2XX3V zp}-B^RUGBa$p{kXCiR(5_hd+JcQJ9N(0bU!gB<)jE7qw<(G3aw&iDQ9EJIme+~RutuGwX9-&lyB`|kQOo%I-r?HkRnv~H%_>0Zdc zDr=#7=CjU(h17*EfnGa99+jG`{IbE{+NUObMTeR8>GS1 z3>YYac|9iFOblX?zljUv7$?|KSv?wm#FST(p69jVKBuEWOCfWFHQ-F79;4kqJtf3*F2z~M?BR`JJZcHn-$ zm(C_@Z_5OYhTo}cIM{22(8@d7sb}t96N*lB#^C(qganRY-Ijvm2=nugkNWPONWAUZ zei`L)2P(J}hsVvbJF-%1H)G-5-)2WKDc)SRmexC{;-BMH%k`t5N{yiu2ET!fV7nGK z6YKt(6m!Wp$QmHTi&H$0;9-|e>nIJV6CBN zoyz9yRW9O?3qMYa+5sW7w2I%KQg^APhea+!VeA z*|!QzJh0j~$9HvCR-alyu4n*fIE3Bnu1mU>(hUg_#5#;O8T*PFKr7b4-S)YEOqK16 zUoak$g*pXn>*k6VDVRHaMK?mnnGQbE1z%6z#BI-max>I4y=ufSLP~VA?}c7>k0Mo~ z(+!F}WfCLz&)@6xyygtX_chWf()L9apIJE=Ixl}g{)|nHtH(As zLnVTApycA+I>s5*V~(&`yN8cYVw|gzp*`@z!0NJloS_&R(ADR(Y zf*iYwa0l%FoSYPQw^!x$(R}x2?6K9Gfi7Be_RkPlQlPQ_Bde_dZceMUwhzDY4CT%I z2>Qv&>7X^8{Egxw@V@W0Kucn;KIYmBcVb?nc(nNTdYo143C0$8v7E%v=ZgghwMKx; zK5C5I=rq^+gY?^&qQ-5>ChV_Tib5*Rmvc4b|(aTEZ zm**;~)cR-kJ%z*5C%a`M5L5Aa|K{ZWI(Q@ADFx7vbY68Q{}7%!IS<8c$;-c<(eUY`)WE z7-@wYrJ*ey(^2bcK=0pq#8&d6Gxt>>tHDQ!u8q!n7j?P%X3W+5QySdl1iHa*cmxyn zyFZn$!IT}~(@f=5#;NWQwC9@bPrXp6YZMp5P1ZGvy`6SqITDq1AG_7fZS} zU;(m_F+G#!qUri3dB4xkt7M!gdhVNK&oHp<)yD$B5nHuB0oSFuuiODDTjphdVG(bL zH^U5)9I}(qv|5Ux=@PtrbNc!;Dmw z?}--ynd!1{>kh|!vgsp|DHF)r39NBJEbsACmxUN4eY9{D7(401A>$-+z=a^<-0_*z z&n+2|`r0OxBZC~~@DyW;gDoCA=-Lw;&cz~T^|cY{Y_H^-MCI)a*^cjy7;~gto2i^@ zE@y~b-&yZY4hx%XXFK1U|M~fGWCK@HCBpHD9?n~Dnwqr!?GIbVPDz(nwesGV%_d@X z%`{?WWf#2h+ouU7>|qNmeG;XQ7%84(PTzI;^3U@@Xtr3}*2H5dW5|2U;Hng^E1q-B zN{)8)5GRI*i_~r0ciiE1?ssZ{NHUzcsVF+`O#r*lBsZkJex((3RT`4}c=-;=0{mm% z)qo86O-zC0K|1X|C68~JXIWLx`%}bEoS#fdQ}wjY5?k5a ztN1D5_sFBjH!GO%)X6Uw^4X)3++Wkw9cFQj;~sQbj>v%7hSWSOl)9Af#qw8fZ z8|jK>*LTi2eBF2Z&E6TVYB-N9z4`D%559AI+|FPX_*fH@7c zT@-}08f$KE6{5m={Z4w3BBn}!=g}LSm&N%e{Da?w?$<=EQwFydnNtEIiu2O46-XrW zMr!U`juzrt3i*FR9<#O>sivZ}?kr+WBq!u#e~%#-u$XjbJT;l>JmP#~!tfV%h&Qs# z8?|5sQC0z`L1^Ei9cwN)Yz2#XRiU|9ZcgbS&lz2mc!g&CCV@LS_mk4oT#|WXq-<%U{8e*z7E^eEHFapduH5$$&kgw?H{^SwRZvM~gE(x%{l@~e z)8@#_wPwmJqry&v7BobtGZUf?*TQQ1xs$Q9-XBy8P+Og)?A+vfp#6$OE<1cRm;id5 z2my3;4>RFCe>I*^Ag&l=Yc5H9JEG%mhMhPM8x|NflTogiWP0wwbGS9G-A})xVebKu zY;lMJxMw(gh^Ub*0r~-jSbbQ9A5DAM`qeKV$=5vV*cZxYuvCyd$KJaH5a~G*A2@B%zj6`|#MsmeuwO@rZy7k{lGs;mr z7zE1JXNqB1c*C*1e3vEM`>pm0fB}ZNj-m+3TX)%(g*sp8Vo90n0=*#t^TJZrkfL zVjP%zj*nX0u8If4!1KG4m<)gP{lHpQ3)&!VtMQ{u z`*y~Orf#%8#7ldPw=+mU*g1a8WL) zx_5UtG)#@TBd1LDE%`5KXu?>3=nHN&yUc0|{UEEZ*phu`SvJiiowdtd@7y-uQ|1H2 zO}0OJ*QBWQ<~Sa&p`tvD#Co&)05`EX3M;ubsC4}z1t+(NvH}Dz2{^YG?CqOLa}9DkuZN7kG2=drtl=L}GY`3Y#>l7X^K zbiX2NjV;x9xOT!WT|er#-J#A| zsBg705RfMp9LcMnF>AEu{XN0$jBQeK=*{Nm)bX|CR*Bn1Nu^#ohrGJ~g#4=?>270x zpYtA?>+#Vsa=U1dTh^CsvbyP8Sf+v>c->pSQGR*AtGznOKgkea-^lz!?jSWKjjx!4 z1YtDODi87|(S(5f^95lQeBZBfX_kx!;1jKo8H{R4eu7qdBIk`Jj?aI)Hq2>N<@1df-0j%LD{D z{*6c-DTfQ-O(~2+l5u39cQoe5v&sLjSs(WX4Ki@Fo;|kB+{I_}yVnzm@Tzo^A(A@_ zi%Y!`=`+nWcX*f!*o17W9Vt0FJ>JbPu2c@f!g&zExB+C5bAt=@G<|QjYvMaHg^Cz? zQq@a|N7`uxUbDo6&d`3J{iShI7P2R>HBCKZ2+jN{JFZDMaebe*H2p1;L5@YQvM~zd zsXtudyey>B$lDa38d1|vqo7xirp+YR$BJTdnt>03a_atP@2KQwR5NlcHH+0r=BVP`V2B(Sjx<+ac2}o;sVM08Q*$**_+%>W^+D@wY zDtg>$o^W?LK$hxXcq)Zd0ybRU7%blr4r%G@YX+@Oije^C);$<-jBhilmSuSO7Q^;_ zH)arftTp9paZ>kcjf255mxQ8%9ljbh3b3QoC)prEV|uGSz5+Uin-!P$qO-#F%B77& z&ZjAi9~hlqc(ut*V2^Qjej1^$>+l9i_+> zpttQfp`J9^o@aVvCVlOyRuY3ht-q7E+@!p>0d79FMqefmdSTEo0v!IB8M9jk$zU#O zrV`Rbp-wzEUIv1s@%7HjR|@Zs;JUIq{U&K=ha#AMSBkE{=nt%!=GwB!_b3B*T6m;j7C!=^ujS++ZhlJ3S=w0c^fIL)K(`b*f}&)COkxw(lStAofs( zC1q2qU4=35pjNspTbTG&%d>9|R8XMhb>7mX618-fRBJQuEM8}GWz3@>Mp%xa>|t$6 zy~OGeREQNfMU>s2nJ2As4$DQwN5uf$v!s~}%cS*orbfWG8u|}9i z?laKa*%P`_K6VHo>9&et*!-xhTDf6VAPo~VBYXRH1WO=j1fx2hP=rcS|Mxz={q42E zJV}Z4r>0ggsQ9Trs?RcxJdCNU0Iut`|+S>lr@EYkw1YQLQg#J|K;R+Ryb>Fnz8qn*!g_`;OH=G+(HZHTFE*(_y&zjPX^n<*TJ_?3|zag0{82ViO5r0q=u6h{fe)_~yj}EY; z_4;l;&uQE;Cq5-hzET^*{>G8L%P|VU@_*eAR@4Iz_zYC@w0C|Uxe9vWg%&fB1XYzQ zvF#0bnxXu}zSaDXstOe(nSrXtPcn(Ej8s3@+t0ms zUo_{QLPlC<|6c6R*8Hat7Yy~4HuRgL4tjBalL2 zlig!6zto=|5)Y4W6OuyW#zfUMv$R#|s&1EWLfEA}un@F=LYL>+v-mOZh2YD%AvYdu zFVBYZ_$*Fd^bVLismDBS3nA|JFv~NH@+HyjYw5A7bTY4<8iVhOP@F0}h@fO#Xme~9 zqv@5t$`ZSZ`{MaFKh`c+Q>4+o*E-t0(2Hf8s)|#HL0@zD*(-!Fb*bt!u9%f$SWX z!s*{JP}=0<)%@VQu$Lc8wu?9)in(f$gIss;k}8$>R+K%IQuDdY$C$LOm)!rgg==49 zTd|O6c`*I0(?&QP!?`6%YlL%g#aSsO58x_*QK2iNHoFD4Ld``OhNX7SbK5M!{yr!} z)`Je_%s}5~vk}#19=bypW^&fS?_V{nZfC*W(OQ@KdyIVXu@H%ZuVf7~Zu2&c{7w`; zU=yA|V;#Gj@6!znvhu_!@EgB2neqe<>FEk&ux{)id5%AcgK|F+T)Yy9d+{w-d`32! zw%qoUOc{}SPX+woM{|XcYX@wFDB!b6_GgL~n=hq>xURI;Qy z6?a>;Ka2%hi?|`h)kwvEBm{euZWdYwWc(Zy6}BbXyW2&+nqTKEg~&fu6a?pR&WO2^ z&F0};bM_!wl0_@H>X~F$#!T-gxLK;+lU}S4+Q({9Zs*D1w^)M$^=@Jg%1rYnuhF3t zRCk)~3?136tSydfy=N4~4L1qDO)~IER-gqH21($x=?AlYm5(Vtjmvze85y%y)~b3~Kuj8l-*f6~ku~JoG|~xsFkr zepZhIAqVlvPU0L(6i+E3%M_&aK^Zt+qhzXe-gFz##H;#n1x!d^N~!Yd4z&HOJUX#fN#N^;t$t%UiD__~gaz<=`Fv%-oLz z#_B9U<&@2}sCPqiRJna~pRL}MqI?Y!rtarwM;Z5tH>+lrgLsb`?m2h+Y1G|%1bzIO z+S3PHrDcepC;-ZzCcw|+}j8QP8DWCjQ#2=; z(qPC2!`}D%E?vv3*#dlHqyl@#y3-~Qw}Ce0xr+hm)Wzfj?SJti9hqm6E8cd zMf&TWm8O9=LeH1C_1Aytfb|#@{ru@sAo&faG0>H35`pZR%l^7$YRAUrrS|z&KmQh8 zs5dOz*mW3Pb8{E0m*YV;YOi$REcV7H@ZfLozphxx)eErvEkmrTzJef6iM|5LI?Rm7 z!`6UdnH6%Xj)SI_RkAwX+HUL?T_9bR+73)G)VzTAw_`-fTjQ#|tg|}Ee#3PaFejer z+*_MW%DH{F)ZSZO&b7nwHM?C$P19}Pwq(s z?Yo}Pe=*j+se<*Z5);bu-bQY4u7P(y?nIW*PEf9!_E>z<;!^Z%u3ToOR+GJvpsMSB ztKRb8X{t|xFDM|+%{>SF5@4jQvF*Eg1`m4#lX)zL3!Bc5xnH30x2*aI^M^trv-XXi2BT9kVYSe1h>d@ytZKqehpbT$g1gYl+jipliqN_Iw~#Faf{2vm2;DdJp9G5evGBN?YqQz>xd2)#axuwEqVs`^RSn;5BSY4gwD4+qBmuvV_{IXPweG>n%VI$rv_*1&HH zjMfY~_e`D})jn8} zIbr)dNL^q>j@E z+ca-^qa-SifndOpNDUNY4d9m}pzAuLQ6BdV$XQ(dxy zhdrA&iW91@UYHJ=fSJH{r`cS|_Siie_3~(WNxFF>%X{7wMI_6&QSld7M?B)W2qpoy z*^w*AEkwgIOx2pL#-50)Ye~b{NNc|%wypI)H4wBi;h78&`nUONp%2C?&8_y5((b^+ zLaUF6JPTf#*0|Hw%AnAzbEQzQ!yn<``*_VRLAAO?$qFqCQHA>6=9h&7CT! za3}KOFJ?I>uTFHeQarOs>bpGOS$2s&C_ZmDUkk(G`Prv<7$vhEPL;No`H-m1b^45t zK+ZBjdfw(>#+FAXDnU+idW3lzNYIY(3dWLNs^0xyeEi-wKku)V4U@#uIzON#i6$b8 zeA%3DUxRwl6CEGc`%`@V!O97ub?6}HHV0>>%F`lOJC#q{4NFj;(A$E`E3L8;(sbOi zSQw&Cuu$vSq-E1vqx~!I?()stw*Eg`(zF={N@`HrERT!+Z?q(I;yE4(zPJiKZ?*$bLxB) zZ%U`|sQCj4J=Df*)a278mW1R~ts00RK)^oChSH+?V@YnVaW(zF;l92!1YVuJx(V4z zS5zwS^j2Kh-e1E(?2;96^Feoq2v1+$n#I)ZETl;mZ5}%Hhw(dF6JIo|tE##pUaT_A zu-iwIR40b(Q0$wbo4=DyvURN=Z;PuAoDvs@52x{+?85g6U3z+celsfc)LBkp)ZIZT z^VeCN-%H}HP^@JDIGh_RseZsdA+flslph$bbVT-ed9sdn9yi|KZ9Hn_0aTG!-76j} zQ2&`|kP5XSwBw1*m|f#^{4jQ3_xfi7_E%ZH&r6GLF@B?i(aX8zQs^U(HQ3Hfr~sW- zL|YY;IRPuE79bY>)%OMgxpEDbrOjx!_cYTx_;aN7b=ZBy#6cQq^*tzXGhM>-c!XT* zzTlq0K&j2q=*6Mm?dl`wX0YjKnzpD&nIHdk+8PlM31 z@AzwI9e0fpqy&jxeFbARDcX2?4BC9`X<{)`gh{DC|2}*tnQy=j4PxTd>ew=;db%gKM;a!%;pd%&CnvH&?XBXr7 z!eFv5)fAMqkY-YMyW%1&Z>NwJ2Uw_2tG zHs6j07JU9Zb2KWvJA^SGdeYZ7Ci)S*ZtM@x(oi(nUMVb@OfhSd=MW=OMaL{|vr#HjU^ok7aVVxWvx- zhgv9N#X;gRjRNUd?I;r_^Scd!h=}pW-F#keE9#4RZ%$-rzc1-hxL9?g z%)FfQdQrB~uL^HWFcQ;=fYZUcSVSD3 zEqPWnCVYAbQw#AHb?WmTlZW?Hv}^j!v4YS2p9{SWmL+5$_k6zD2fl`X9j4a3ZFg(>`!c~cMbH7SBx2^jI2;L{)PGfP-~IEg;%D}Ej=J(R<86c5&MpX+Ix zVe-js^|RQL2HBWiEbX^}aJG{pewFOm;5PQ<2$=EeSywQ;U}`v)g=Ob`$$de0aSY8t zkc+bXtzEmMY3mKkp3)6lDEC5y#!-Pc(N8vx;)(`J8IGzznHhr(mZlLWcl$A`y;@bYYT#@kZtnQQrjny7!@AbFlF{?5Ox zC59U3x*#kgYPq@z-#Peu!2$YpJ`D;qwClC8UvvT2<@^3tK@=vtO^Ed^13#!cY5=m{ z90ESgB90VD7M7wt?6sd~P8 z3<)tWvtWqKkguN1^jRp*fDs&_t9&2dsd%(2ZGhz)YI~lu!i%HU6GkN7ZI&D`vIg(-cX|auUV+N$ z9U`jpZh7sCD7lkWsc9}FET)cGZgWmeeE0WmObA<8(=#GXd#O79K@OimZ+RWonjk>% zo2~19n*OzSs4GdH|0ay!=+ewz zs0jc$$-U&Qm?@APp1*>Q7FVuE*xMW=u1x3FQ)rf+)oP;~?)g!O0$Zj8Vc3U>Tdl%V zn1CcqPz6;2L~0~oIwwIBRamnp>>K0a2y+}33I+8P^B%Tc+2!xDzAwok~LrFY`=PyBz2``bOIf`l=bLgj2g zf1)U_RmQH>nQ$uO6+9<->KCN#JotI*xluU&kjf8rGsdA&G~^dcWd#e$H{AR~{2iup zwr`CjOy8$cmX9l%8wyC46W~f~KSljslmE5&qQ?4b(88dmS-!Z>;uGEs@R&89^gEXl zn)UFCqEY#X?_rat0nIR*P!c>E%E_TEmn%18Q_r;2m?!DaUD%8WQQx0gid!ox_4RQm z@7+o=_y9)*iOV{_?g)xZ?%rFH1K-!7WT`a`Fby?B7jyB<@wW}_%P{sY^-QQKcw(-e zeQwKN*^l<~?03*wbdWDq>B=0OgSAn$!$Uh-seAPOH1<6pZ(kpVYi*^PQ8Dz5_P)m5 z%M}1_@qIt5qhS}LsmvW=d8)LZTS6=Hkn{`EP${krze@vwB0&4nHdMKaUVxS5V*Dnl z12%P4e5F>A|DIdxgsyK=FZbxUX^eGc*yo(ED_87j_Esd7saT-*OxpQhq@$DlpGa4D zz|(90?xFkaO0slRccEe@R0fP0S8<9~w#l4FL>19S?szZ2$Xs?J$)K1862uO~dor>! zlXZ7nDxoM9Dv@i~yqHdo_#c4S<#X7Z2>eapN>z1$sC_;q(*)0AF`9Y%=2u4-)xWgN zqobj4qwk$&J-gNdJ{B)aw)f*OoQjp zDRzXdtoU4q0(_VfJu0(nUlRr6V7-BE0vx@PKamD>=%EGgQ(bOM5@W4=E}3(E)h_8_ z8gX)7n?_9&knfpiH;m@4r>`xxDLANnqgE#=tiQ@IV)_o>RX{}=^^K8j^zOAtk3KHEQW{Kf)jEFOoaiOwE=7Y&=U z);-@u&RB@f3llsV*%yz_;o5QW#rkO;wGU|zUV{tM&Nw>bagk0fg*qfHsCsSem+ej; z3VwEh`9V??)_IBS>r~$H_5c3AhoU%h7HWN}`5_M_2Q(7dQ?>mio&3{^$)jlKX&qe) za8y8BZp;W2wl8GT+IJL!60gto+>UoWF1;Vwvsp*DsW7JQm4VNH@tWXEr3&2S67ssZ ztnGS4ozb2t^)f#<49quY1xDm6C;(!#S|5K)bhcS-NE*}*c77!Q4a?<>FG!_H)uOxV z-O+l#7$V*WX9!|Ed;*eA-?x9KTkcJP-e_9l_%ERM&PL7sczS}9RbHj{)-*x}1qP@O zkm7aQ-o?=#KJQ*YN|@=A)m1p2Bgr|JEY7(H5Q?3z(#@;BpHlvVqv2@OuyJZ>$ zuTL*l0weC|6bAZL!Sd6@`d?F~=qWA>qN>xz$Szki>7raPSFzB`m(1kDth~doXip1@ zv($3+>Y#zV(87WV^-OWjglo!ro;_SNs4cX?>cm;${2$DF8Dp_u+M) z$=-sqvTHj+sPNw_D4O10t32rE^emor%Zdg-$nnMJw7PzGXeB%iY_A1qg*&d>3fd9O z*e%?qIn+-`(Q`GFD5fDx4@g$8v3$$R7MA^3Wo=Jh`D@cn+kEa>AG%%ie4xfly@k?v zwpr>x)l&qL+mfot!z^!UEsu3mG_HWO%i~dbNw4$EZRikRZt>+u>zlj#%Dyce^6w|Q zJ~x=vkf{BW_TM}++Z2d~GK{SRdSU!kHGX>~T`BT{5A<>(k0uDm#rl~Qpr*GjIwC@! zqMDpbqT6g(XkXnhFIzWGG?2P(uscr@(X{;E2;UH+2e2}PJr>A$Hu(mV zQIj|fzV>~(^EiIRqo^&g43*0$<6B|{EqO+3l32fpP>ajW0maksx2|vYETM#mAHxt= zZfTui53&1FKCzF6p2OnU4k5SbC{XFHCfNG`mmqFhWWTfpq8hmB8iH>yM^ScNu)+tF z>|#haQ9efNxDX-mF{AaydG0fm1A%HmMuY%73vWZ14eB z!|oetet+h{*0~izI3e@4@oV$k??HC-bs-@A4eZPWYb{+pOW(Rzy zZckG?*@vF-DW;hN!`5Wi1%b#{u;hN()e<1e@^YW`1YL?tuzeN0rGr9|n&9&}#8s)c zV0M1FL_4AB?a&)$dK|sA`W#}b5?SXGtCSbU)(F!5-i~tINmVDyMAsJoz$u)c2Y=;NsH%)k_wsewzLha)h^mW- z=f4BKV$ql^-14R`J9g!z?7fG+3rlEL)t_oX zYG}w#Ak?Ca@FYYcaGvi^2MS!T&*r61lbN%4hwE{Iw=-vKUyyxp^z4254j?&QYoxsh zs5}y`Cp#ERuj~_Dm)`P?ia2^S?A-2E_Py5G3mfU@UYG9?UDo)HZ>vAg;1Z+~skiB( zWeNfBM~~8+MNGc;#E@HnZXQ|&e*1*p&4Lkzj)uOU&(eN$6m>f$LQJFADVD*DM}$2OR(bT@vZtZmIXSuIIQB?nTLEeO9~22G z#4@+AroZUj;RTh5fPx=;K9C~OwADvMDM*y2Ul%Tq$RdA1i$qiRXiV~1-sUajO1TeuKxxt&=5;s58eCc*_b zkeip^eo#7=I7*^kI?O+c(#*C#76+DkSC~HEB0}43E(!?te26G(o?fP&ADT9dtY1Iu z@d_$dd&gIMA-`<6|AuLl*2Xit&-+qcLX%K)qtYAAl&qbshMX-$nNem)p2@&-T;J7TfEPP%APHuQiC^x}^ z&Yw(+<$&8k`<&rMFM3xCKlf<-dv!cu#e(VU+cuw;Dh<^Y~Nt+tIU{ zEMA^k+BTycT@|&wFRwibNcH_R_vAaoSK}R=`e_}@?hH12L-1C^gr~>35c=&R6({Q^ zDjJX|YZ0olt|i($*Fz*}IJa^~@N7ufrVm?V)M2OFN4*YjtuGs1x%vM`_XFDNzTw;c zIaPV3^r>}%*n((e7X$m{7t^yYJJo$0yc`;UpScPN+fo0|J=W;4_%z#yO+Rwnnc#){ zTaGA&V|5fG>eP4oJ>qYzhk3B?w|QlSs)L^8WOgzGH5=ZVcqdV3Kz6_D-*}u8YMSvnPX38Gd5<&7W zKHCL6qBml&Yy_l_3XILH3=Kvvu0st=K(E$^POn<%DwmrFxk1O)CT^8wNBfEcBzLIq zSxd?Ax}f@x$@aa|ot^uqi2Dx#n_`eTsqt&@@G~?b=m&ueD#e+W9&jb|Kg0Ox6w!Svak( z@z3Y4Zx{sM@^@5*3}3t(>MVW!AJX0{tm&@n_ErHA5F#SdK}AG*?@dLd38+Xf5fJIU zmwzwtWR#UN7>_F&9(h5E(aKG^0-ms52f!%%i*>KhZl=c5dfGPNxmHNzWsdM9)R z7wFW?!hG0z9yhZXvji!$=k;N}>mSpaZCTCY9sxVP050s4LU%=lYf~LJIpVHFa??VB zc6C)2s+U%KR5MYhX`;PGy$Kdz;2Us7=A7GQou&k~u%b0CHwT?T*mSK~9=!msh*SlBzT%gvN9VVg z8td)eeOnewkg-2E6;g)-!o)V&blhM*G{}w0b9@3YDU3rIOh0>)DHzJ&H5R z8Xdh861UF-iWi>(Ua(k%kY1`EkQLH?PF<>|#5O6U$+z~%>IE9>dd_@i{N)d=6lRbr z&T11qc27E6ZtMM@jU1o8AYk^zPjj!|mb||Li8?T$F@Qh<_8*8Dd6F4uZ(K%e5~e(! zfA1BIRaBe;M$g=k6W|+|`U3QaN z&y*>517TBVi7nud$^&bF)cERk-SpQ8Ru8@Flfv4+_m9OBS?XsvO?-Q{DK#UgOEHNx zVg98;WLoae^t2>--O{%28GOzy7)#p>gp8g|vx|(|=>dDhBR&@$UZe^51)g`*o>?i3 zpt3Pp@bMx7G885D2duK6zw22)3|jqLyOoDu8Fk+mL;P~e#?k&UDw%VF(gIbYHB+-r z#?<+N>%`yQzF>J6Ra)cGzr)ie4?5{?@ta7NJM@LpYBL4X{6FSc zTR>JF&-`<^qBThjsH^fysV0z)78+33P;|-Wn=|clb&M?VJ;^19x9VE)!TlRB^G7?kvW zu$vH9SkN;d+wyDDBKUEqwC`lhrr$)D8`SOn1uhl|3B=URh@#^&S0mI&*NQnbma@# zrVulkEiGMrP7Rd!6zW+XFjPM0}A(JG+8gydykwh z!)D5w-~3L9N2>{X1_Xqt$pR?2hS;!oUuMiyj;6AIr`h0|dk~JmFYEa{b}WuA5>osQ z)KBqXlu{XOYD-Ic4J!O7{vnj#Wc<$BZ``v-d_<2&s*R6NItFgZ7z5Z7?bohkyTt4H zfLU`M-YWhb)zn8f(+R?UErHw2^M&kb)09%G=w!mkrZ?B)_Co41@63>Y3^n@~{OYkU zWx67Zl5Z~d6aBM=V}CLsO(HT$g!mf&$||AAY{EViwhUnw0k&O4E5*Up=I&)B0SGc+ z<|@riuG>EJ4OBivBxkwQMeJ?dY~$yDf#)9o0MEOV6+1Um)okJxMsd0M!<#=W+d2~g z=?vwuU1eUg??9EJBo;QYGJ9W0_P~yH2xAtpab1iRR06yP5XJ3WCQX?}v{Kg8Dv;_F^24;E@BA@SF)u9kd*n z^Ahnw(lE{I^T*4JqyRf?=~F%5vZjxI9Y2wJtVIOcv7U!q%vJLVIlIGW8I#E$Rg)RsHK-b~s)A zLSNiFB_3+4kq-y3?m2KQsZr>DT??Vn`0|o~)B)!4trI<{GcaIFGO*Y&l)IZ@*|ob8 z?#~e^B2@lhax3uW!QpSiPztDPggXI?Bl?$Cp2EWqzA{dRYK5%k zm=LyL#;PGkPw#z=l|xUsZ;g_QOTMXW+1WXXRGLLxFSZ;}ZC@Bw-tbu#fvDdE7RQ*4v9IBIjPtCXUc(f>mUf|T$07G(fy4;2)!!)Lb zO?N1*Ju0tsWXOCL$>@qg+Rtg_K**=JX1Uv&qx-WsZ{MwLc8sAqnkpWtm3V-f5iZ;H zlc0*<@y)#rAtk&!cVj)eY#O45`cQy|bl_9>#o7VMq;+h;d}#()LDqP?og4z+tC3EpY0*E7U)H?M10<0&~WE&hu0uQf@Rx|{xD7@h7UHi%f| zH6f?WIE^Iz7`0{}xS>;T(%&aI)Axo>8h47;v z2Wn<1f@0K83Z=LIkcp`E`#kxjM`gLltMhq~+wn5e>WM-~E=)-#QWDcJMIfe5_)xVD^lfE4%N=d&(wZHAtQ_$9-Lj z8QZ7rRP>&;c~Lw++_k&NnB@JDw)~F%{OTtYma_J3B^Ecs6W8)IC1HxjImS92S3QC+ zi*!s-GpyddF?1E$q>z|{QvM}Q^|In=>=8mL@{(jrP|#NQo@*WLnM^m}(t1?D?|evA zaotRAwCD2P0irl!%9JW&+H!YuTayNqA&*Xye<|aSfnB}yM8(hh>KdA*M_M3 z`Md7E&=009D9W6!rlR7LUmCY+ZBi|JT44bro_yO1f$r8{7Az=Sfg{t$V)PcX>b>z7 zltb)vS#C;(2Xz!a6I~QqE&ThKr8&_?$6tek;%^pd+K;+VlD%~N%x`*aOF`et?2lGH zXFn8HuSn!?Z#CJ5QTgqQO?B@S=|_lJhmHkog$-NW1)^86;~G$GsE&DWjbml&e0se^ z%Gv`>mCL<$;H*dXnZ%YdtM`)%%&ZubF%!e8&PxkwsVUHs|MBED$+3pbYiJ|hW!sL^ zf6_U$&H1egwB@o8cDA(mo$^3p=`M4~{6g~ z3+#O47R-^qkUXoCBb6E^8xCKuQmvIuJtJ&96U$qMQO*h(-CcY34UJ2PHy*P5pd6`s z%J@ir)kP9`FSNd9x9*On6!rRGPB8P$4+vuV@*lUfDC;A0tJ$+vso9jk8{b@GY3AC} z>U^Vj;g-)?1UU$%SC_2o9cs-FHGET}xiUayU1M1pg@T(Y?zQJC+8Y+{`qJrfh|{g= z{}=;CEO~(ZAi79d0EfX*Uge+1l0)B6BHuoNJ=$*G`XrBj`s=*xVZ}S1uuFubzwYm1I zWYQJf?;bO>e_G-^xfVC;LhRR$Ha)ZIC#uU7G*!*qt8S}Ak$4?=OgipS1-5F^ty!E^ z4a19Rlpf#M`6e-MbT>i1N-t$`ZyEW^j@vC=OQobu_`AT!X3T7Mx>BHv(i{(H0#cUo z?w`W({MQXaqHxelO?fj~;%5<(H+uy1r>-7ZdS{_~lU%gSCuxtXOw8W!UnDEuCj9df zOF;dF12GG*-_=nTFM(|uo(;$A3FAj~z1v&Lf;{3Bmp={gs@Z;HcAg`f)9nt^1Y-bJ zI3Op)*K{qKADsaQ8%bTu%i}1jYWN>(LbLF0dzGZ z)>|WIg<@AYhHF@s1s;;+I2<>KNp7yrSQn)XdagbVxpQNQ+WOs0^7ee!*OF72zCO)}sh(qDK2;kYmw%3_= zx}BOD0tiy{#dZnU{_tAxcbat`Z(wj7Q5)O|S!GNa^J>RjolFhtI{olkJTD@=pkor3 zgiIX|90$Aa?ju(6tShpP7vN*Pn7JDEZfm$6J-K1W1SEj7&9X*}JgtM*U-b*ta_V*3 zC+G2Z^wb-KfEn6BpS(rXD?^*6I-ykW>iDgxo)L-1S*SXv0ttsH1v7-i3damtDr!}a zQJJuakTjcikTV0?%|J<8Lj(#Fh9pf8@;jM;Eqmu~0}q@db{}c^Ls>=-?B+4+2MyUX zTv^r%(ewEfLwWbYqcbR;kB~tqb23+E7C-~|wjyn26H$IzHohe_URWRN)8yfjTkxMj z#hmP2|1#h?A$Y}X7Do@$K8_0w#}QH%6bWpIw0GIVU14w803L|?3vXDSa>{*&Dyl|} zFI1lhlJwrxIeRGQi-%{P2Dh{_wyc_#LTIBOY5K04^xP(?RMfZm`OU7v<#E}| zAUn?X?(pKGS)0nLI;6CTY4a=Hqn8XQBi0d81z~<=MddP?#t4XwrrC27zP439%TynZ z_d|&^62rksh^}HT9%;5mFu zuF)F+mb&3`fnuS1NbaYh!A3Nxr0#u3v znf-dsd1NV`owrBl0#Y5243%-TvR+$66j6%P!0BxL(vE**`7&5d97s0tJWC785B z=^b|Uo{~J!qZlzmJ;(KzRh)1aUw$K#oKHY~!dtBI9g|`sa2Y`n4Gi>4!fXi+KY=Fs z$rJJ|)s{T7k|pA435f(5a>Ws7OOfh$pat_|aR`nJ?*2w6v8HEO=ARehIr5x5v26UA ztAOZM?`Onk`wQr_QLjYBaGEG9fdJtKCUO&BQ93KXS%m=$7HaHOcCUkm@ZcPFIyF148GcVU^a@3kxsm~X^H9*y%oFaA1sH>}qB^}BR&{~XHih13kM zzvXZlDUc>xCa{+OO@mVMu#bIi=Tfeu&wbhCrZ*`}RjQ{XM> z!cBqmzX)c|tF;3%<;N^+n_RuPPFCX9eiTQQQGU}4y$4u_!%H)G3Vtcjq5Db%fW1NK;Ld@j#4xP%uMw@rtJ67w9yfkt>XZYdHF5;onRtB#!lTGx;S&4vsFWy58 z*r}}w8$5g+iHyQyHXWUleEKB(H_?@c*C+kbmJulBfb(oa(`zTw=1wO&w~UaZJ}v(; zB(QaFE`_s|fi6eh^fUotu(%7Vf02#TkWXJzGoLjC> zGrrT%Xo=m^x@bj81$wr;l$B+>%+xY!$7X}B{w&@Oy2jxzxZu5@9c8VPuL2ic5F_vz zHd|`N$nlltb@+9l{~{e98#q$rK=Mo$W8L@w53|<3#AJ8d~iy z*NBI;JECY!=R)KG&%^^z%211@v+DzO1q=kghTc;+=L8-1+mX%R^;tzXe2wR7N%E~8 zi*Q+GQquT9`#}8{m}zMqCmhrhG!wTq*wnhPK?rJnmSy+(plH+5DxzY@;2FxF-|v@Q z6G`@38Wz$;gx@oo`|iF4y3M`o=RM{2C*&aG=(XRR{8&O)H+$$V$@8Bpbc>|2-FZ66 zr>cR{$Yr+qVx@+->6c@4Q!WXyfT?FsQ{?d!LmI8bK@@Rug)g(^N4g8X$t$?7y)tRt zSH8IM!5P-G9%+S{m3{neNf6g;w(S4GEB;PEnavc^wW9g%dhHHRHaClGKP!C5KNw!y z#(Sl5aQ;@h>*WhT@eTA3sU(U*R8swC*c_D;6Uup@c8C<- zw0%77V6TX}Fntxx-_3mwJbyljn;s8Rz?2*)-NrX^y>*;FNjlu2xW%V`MaDAW=Draj0A5#5QF9T^K;e&7hlRFS-Fj1WHzZc=7zOGjp{Y;hj@AOLcDL;5TD{_gQ+< z(*)a`@*O7<^OyoS*yXSPfku{hJo02M-b?vhH0bqBrp_-7>MraOA`Lva3ym|yww!IDX$V zn?CQzRQuLV23(+hQ-H}$j*7feojX$^Z+|#L%<;4zeN~tUiWZ-8R2C1)3URxW#5G1+ zcT2>hW8`C*g;?T4H?H%K9_c{k6Sux)Pq}7(GI!Onml7m@JUvuIG^p-w#~1Ja_S(NN znPnn`*li@y)n;77Kyx90`4NavB#@9&@mNrE7d$zbVlt-jd$-xW=2c7#-arad&vu?( z!RF=aWxK3N**a>wJax+yOtPKf+5cvq>fEKH(C|d2c)-1G5!th6VpU*GR255^ydPsGni5j;=!hoAYt!qzCj__Gu!wjjGQ%2=-4E|_iU#nB+y($@OpXOt^U141*K z=DiR;^eBmuO*^07ZnPSHdFg^QneIAk)5-pNBJiRB*|rqeGFw6)fP{FX(UFIjdhOE> zVjr|+pI6~8(RE8F$T$Dim!KpQ{CP%u05(ahX$g0{Eh4`{kpjK+5XgMbiP}^+bmD^^ zN)eN{MLz`I~a z;=D0I$vQy{fr!hiN@S=&K&P&$#VSembr&)eh~*fk^w2lFs6^(=?*Rm|PqBe3C>_`b zNz&FQbBsfztr$P=r2S-)8WzrrBv`{{5)9O_3*lK7gZ4L127wPR+XJgxXN=kzasacJ z-3nVx@HpIg+-2B^#rH>F^}g9$91ZSgZoGNk@ODhVM|o?q3E|`=R+F1j6}klWB!0>Q zx*HrDA1&HAZ=WWcwUM#Vlzjl7#u}K<^`;l02!{x+z%12E!)dis0#4&>PA_s0b)2eC z;nTwSe(~?zYFCq+Pwb^~`S}X?YgK2jcFD3qh(w1GqTcL!o;J0~yjVV@CVsPGELRy# z?s^9~LRaGQX1y@0-Hu4LLHdcr+a`j1c53uajC%C^Od~GD{B}{C!2sO4*#CEbB)|6< zQ6ngo51Nx2nMy^Wu|1+A8n4{<3zdklFR<9)Gu|f4A2N5nIbwM62)w~wy-8L-JQ}VR zJJpm#A;yd`QxeU#EGTvlZQ0Jx4f~AB8S`a~Koyp;!3A*#9B;lavIAwI#-( z*hdWCR8cC}qkWFHlawKk{toHWCYdhO+WHPQZb+k32g}c>mYisRTJ)cRUwl2Pg|>9<8-$W8PtEx zqOKE3(~R*xU(J%<>3Wth&lW}L-Ovgt($;Y=aF3^QvGg#fnFigGwPXO;3!Cs{=uaZ8 zZUlSGBYypkpS&Ui0Ix~jRDjKO5bTB;v`YpqB**vkwe4f_Rg^_*qK_mhL@g}oIxC6q z*VK`hS88FqfOQ2Ab| z9j!x=mgzzw3y5KuTliL@L1?)2!7(P>`#D9~g#L_2%%{AEw34Z?;NX^&2JBbhEX6|K zQb~}E80t-F(D+xnWAhCX<6@({eXx;OCBt0$LfY9C1LD@ggG~*ojgbeh$CWpD-I-Xi z47#oC2(mHaj_*P1KJij_EV15>jHsj5hb@~!9|AWdwWRK+-ZJe4Y$(6d>v>Ps;A-4* z){a!hcLSPVs>PD~r+8hT&Khl*k=9RV@22Z|VLP*&_G}(Ay4-o%{8vI6XMXo)rQv8& zxI1^on^`smbhkn)VK%gNcxKAhid)yhPsfTS%daXbeDUeTW9htVKLfG((BHC$Zt_mp zsl)Q5_ehg1Ef9J<1$u;N|BM}kUS%Pu#Cd#=QvBg)n_>Jn29Jl=iz)bBq^eWhf;-y= z)5Wu04*aquya;r_$q(`2bMP}fP{^aT2|TKi_w*TWm_A4f#f3oqEwCMDSA;#%fPy5F&$Sg^X71n$kgbv09&whEe1D& z9vgAU3;|j%f{--|M-aZH^NOWP@sY1%D_2litV@EVqqo6nU1egq)7@=}_lb!S(X!+A z7awl{iT~J+5lwlI53;mIH>4AT@;MM|pd_=(_@?ZKav1f!TM z>klK;xYJd1VxT2)w$UM3}kd!GfPLeNj!Sy=DV*KJViv1{WO<(A>|{fQHwJ@NY7 zib}NXvAMfj7%;o(2MMH9bhF|XA>-YYq1t^{zK%vi|#6_<BHE5V(_NLF#N>m zDEygzXLR&C+#86Ua`G&!eb1ax*?Yt7PH*)m#476+v>9x1g`~A1$w;rs-N7lvtNgU88E|s7DdOM&!}nar_I45a_Rhjf$h! zMk5zg$f=#+oj>iK(phf25AxCnl6RmAUCB zJm26EV-9gO+Jd2@8NvX~+}doW7U;YB`Dgj8cxs??dE-|T`x$v;E-ExAd(FEtfg|m& z2zchr57w8JWWS_S12cC--%*y|k@QsFXkc+okQ;UZSHI60VB%daa9u(lS;=qsz&Qh^ zJQFa7cyP910ABf8)8v;}lbsG7Q~D*yTa0QlJnJo5l#1?)uB&A>e==1~b=j6A6>ARi-LmH~{f&a5shPe9vjOnU>@t*&`dcdOMa)K+7k?bW+6a z_#Va$k4;I=_C*f!DO>>qJblKr!Hunha|#|lXrn7aS)e&j-aT11`VGLcu(jh9@JHdd ztKbXo`bdsh4>6|jrORf4ww&Wpq>ci12U!HfmLfMzPmke7ZKv8P4Zbk#WX`rz2!W^W z;C2Cr1UUO(4zbVoEim~ahNu#{9@GjOdIb*jSt-)Qd_Ui?^7#+=HXnXu4Jy@Pu)p>@wHEUV?TQ3wW^!2-?S$<)j{GtSHI7rt0 zbo+K|&iG!wn3m6%Yd7C7tjkTHxi`D*EOOTS?1If!M#{2&fmA-_u?Y{Rl;E(Ap5z)L z0uwu_ORRf6QQ6@^`H6Y}FwY<+$zlJldRbC^cpnOV4}m07&W=(!J;JaBdpE+iv?WMP z?px#Byy7#$WCxOwdCIKMT5>NmAao#Rlf}*u%TyZ*3NeUMY0M*wdF6u7o~WJs7snCn zv4<~)2h4<<3!HSCX49pZ9Pv-&F2AIEWUYA3G`OztUUNs)3e!c1?Hjm%uDb6;wmg8z z4U3h%=Rg*?Ugq>@fsqKyn{h03tgoCJ=02HPZ3`X9=2-Ztzhe&2(v?Gn(3pR}aVC4u ze>&VLl~(Z}@CL<3p{|dkc}xOpCE4;;zGefq^)`u|>wqS79{Z&;Ok;8)A`lMx3dcBXZg++=USh9`sgv_L}8YMQ#Ep#c4tC*7Jd%*2UgHesse=_pX`sw`f@KV1Ab* znfS&e=t=qH^nd~BTKfbdf_%9^+svJ%vgTGOqjAl__xI0)N&;U?@Jv*ia0*H<7QYYA z(|C?ux;)}c9?3eZK28~u-- zE^kW2IK!GWBMP1ay$b?hZmg^iT$Xy2(QC78r=s5lOs>f|1 z?k0BLa*c!h_oc9@7i9 zxhBb^gOp(99=W8OkKw!{hY^Lg)4~ z#eWT*Xw~W?H1Bzlpn&Ff=)fy%>sewlK07)%QhviUn4*;6q*tA1b3Yv)pCttz6M_S= zk#u}`v>()kJ?Se#&)&IjqVP0@$TW@K&|gNg9x2U+nmt?zZLc5%8(lk@fKjhW(2lOS zy*Q|?4<4|tqw6@9=$*Q335(*K9Q@X|Uc#j*0iXYTL@7kWjbX6R6(X-o)+qIlQsI6| zQz8M)GKV6W_Ku?H1n`sz}y^?}7E@hffXXB_F1`ZM-4ydOlU zy!Abb_@PmpHaqu*e)9XeSAMX=6U8mA!WC~fo%nAt=Vr^$$z>LljXiF!S6bQpDF58Z zbofGZ`8}nP|3c34&E9eudgoAwX6rFM?u>k?j0W4&@8Y#adgDfH{9^6yw`;m(nEIj{ z_~fKy!AB-JKp3sl~=R7kLUCjn`ufvq>M{db~H|6KCoJzvexm00}!3IF;PMUGN}G z4l=GRG|B&L72FY%WSTsVAH10En+Xq#%gGlOquQ$dHpw~B{TiWs=}!IILebL{ax52q z_9TfH6fi*h7pq6mYHB&kXAj~&$5qwc6<-8vQd;*X%N~n0;ar_nF2kvr(X8eH1+Kf= za^b%N9|C_@kL`>qe}I76vi7`D8a^2XCbWM(%JF(Fp#(Sd-D54z%gqvfa~$ z3A~dJ;{$!rJUQ@)bB#$dSp*DT2HAia&OGydCjqIXtVX6d8ZE|GUO#2ToQrNRPK%)) zJ7H=VGt$Y=qaQuAm@HFEqi}cXe7`oubuNTCwp&W)3CRf4EDJ#m5r-0Y^F*5IzYS>%v>cfh7 z2dYCrp}FHm+{*|3txfuzpd;$wR{s6RDNfi}xTm2-N>Q#Q zc0$Yh8l4#js!wK*Ye|3D$o0AiUk?eyqBrG{7Ac5I2%sJlrZ&lSo4m6@;4+Vn?-1{| zxFBKD1DZ?(THwCyuVId|F=V7`>(gJ{ooup?RkQ2-PkQApy?Mbdx98aKy$TLKed^I8}-Eks2@h-re?g&u+oqrHR%5sF< z(Eh+$&gHTa)=ymbdG|RgDWjfoNkwp4Ti$;MWQ}@%{yxGd#~MUTCB{L)`k{g}$E;VM_2Dz)vCeA&E0E6t1y7knQ?w zD>MZ`{Cw(yzWT4lWR|3$#}}@#iw>RANKMKI<)WQ6KL_QAo`Az$3?xWQv&xuO{28cQ zuoyk}1rb57=dG)WJ(6iDN#vd;F?lkAr2UI@az6Q>$nImQTJFEef!^|etiVAZKCIQ0 zP>La`QbZ9}mPSFH6NGUI6d}l!s#0U$ZI*gmIZxY$Ed-Xiqt&twdhYmEK(0ph z$ec?g&@LJGpQ_Et_xH2kN^7mpi#zi}H(<8m2?<2@fw4IoPPS`b>nliMp!BLPTXyM$ zK#q42x}xu?bl*)Xvf{R!fdzNT-KoD1?mWZvy@~;o4_DetqwJI_UJRVeUM2zgKiAEO zK@EKdgBs5jrK-DFvK>)o06vmjR-?0#SNNf@40jmAV{uC1*^eI14Vb;=fU9&OdxlNo z;W2Pqre6~jZgdYpsEZKR;cwZvOg}T>DRyFF5&Rr0JP^Z z27-+|G?qUSx&HjoMA8DuvUFgQya}43NItt}na$uii=%s*jHCmecA#;?xL~AR`!)|h z+9ARO99GX9co7%4iWUmI00?OL&x(6DAk|`HuOZjl0A4n{Dq8~%p$D}!C*baAu@AHp zOAjv(1G~^^fh%swfJ=6$UDKh40IBH@(7u3dW9vEmIv1mKS+?l4KheeY#i{0);Scmw zBCrFU0><0GMHaCoBHP3PDwj#1O$pE$dOQ2FmC&@{-3p=GG>h(7F9q}%cdQE@&UUem zoHNA?b7C>uih-M?0>JY;=!9rYsMYn(nGx&QFlx_fzkcnPN1|XVXB*atcj@X7SY3tZ zl;0cX!>{_8n%b+KRrk3CbX}bZ1Rg^IhR{rb$08{bfwLKY+T*Gt%W-Y1y(vpqdkXRW z()ko`v>r-Nc2AU&W(t>I1SgXD8cA2Y&9GS1C+qIfQIl&r?4D^8wd_otoZ_64qN6SmzO32aUkEizg(mGQ_R$B8!q{HQqaEess6AiI+x9*lIRwr_R3RQ{Q<*SeUVl zi`JuWYUl^)1nd-I1 zY@uVIzpp$lK6r4pX%i95AW9%n;Fbe03#&DL2f;ZUI&gd;cy_*@R^@AVW!{fiHO`jf zj7~DzJyks>cC*n;%a&vt(t$1Y+YDqE9Q5ZU%rm5E$9~e^F7Jxo{pD=T-dlU=OX1zr zH6&TD43W^YeaipvI+Xr!FzKr4pglulJ3JBRp|}$vL77^BphfjpuZ-s@Hz)4NHQHX= zqK)|5@RfsSOKTFXhiImyeYv`oYZ*Q#bfHt8Q&A-!1hj9L!gd}M2yMmr;gRSo1za2X z-j-dlk@V~bMP>3a>=I_1Ao(F)f!WBk!;9TyAVEXD_p`b}ARWL*C&=)?E#;KJV<)JS z&&-*B_$^rlx15|hiI4V0sd=Bjx>x2{wU4*(9b%aR#?b>-@@_u8z$*+q5Id~UCcAIr z)PxIXyE;OW25ga@Mx_V<1NM>>bobgh^hItv4sNsy$X{`38G(<(ueZ>4OWQ108k5@K zi(g2*->Co}ew)*@$sYj+R{6u#@qL0`ww$TjI2(gF@8#LmhIR)VN$LSs=&;15GcJ1F z)Nu$fO*MGVzEGU)llnF>g?D;hpR+NEfINTU;s%RJa|!6k6Ynr1YVuc%kmUfhKXbZe zU|`nK1_asl8;=D29-!<~?~$gwOEhH1`{Kjb=#bUhG^}(>iPA`z*hFscFAHzSrK!o* zhI~@M6>U(~gs^;-JLZ7jLKhr+;dyEB^6{t@(fvl{Uu}#k$*oy_Tg=d4tS(YdTu+l`dm% zlNOd={pZ6FX+T9WZO7QB6s?`cy~l)9%yV~*vcWr4$m_7{7@+@$BB~c$BTB!c_+CAT z{8%#VzilVt*Yc{FAEqW8!1umvPJclx({Q&lNX%(Kz{ofA(bqJfd3k-N2f*c4VWKMX zaSklN1aT^Ug96|WeMa4{_*K-B0TLl!%e#b2-v>NNg8f&MoHL$p0Z-GTL2qrMg*Bz5 zjxtj>^ZzJ$2bgP)C=2DFreud@{biN+thRcA+^tN#qwXJT6Wo6q+3=UZqe`1&{?@sm z-11ihLWR9#mX-cQ;jQF$4K^-9vo}+4j)%@li^0}c#IE8w5X=6nK_%9&1@FKGD<}KG zXPf3UzK)d>KiF^F9HXE&%4sU~!=Unu#;N8SoFI!5vYCYp|o)P||` zauq&5(8#i}rZosEtavxh$h}JVqAY#}$}M1u#0{bgK*QlDtPR5{p?m?mbIDVNXXl`H zDT{(*qag>!-Q%2@Ty_8MV5@|B-R6n$$d}IxRj+a#JHVfo>Cw@4cpRV4!IZ~DL%zpx zs*_-DkHAoUp7t2yp}xQ`>w6xhpHf|6Ixm=;oywb@r%FX8nS_|t+bji{D+D}u7}!R={%sF_ zSI-FiRvBz0s|*mI5m3+$kC(u{R>sO44-l6A05vfJMn+frz{>^6<4TA3=O(3lRrW`e z3W)xtRU=HTEYeQ>!7q2);i>m026|Yppf4sGPM|T7*$RJ~!5*N^^L62GT1OQYh5KT! z?0hvybt0#ztvA{R#-;fTuDp~WU-!K#bK5E~Uw4}ijQ7vD6x>#fi^52ay2)G(ik!%m zT*zTHB_UL}^rH5%d%OXV=L|qY?>c@!k9V#BkG+V)N)s*L4m`}}@aTx#IGFchg3_CW zS2p>n1)IdnlxBwB27agW4i`~*Y^I&Rc-_D5Mf8+|yl_{=7A^hGKXbx4-Is%UsjAzb zQ_?*)sf~hZYDr&KT-5OeMy%7mLF?Mec+ZG4PcxPUwqjd9B2#YKupfNY z49}=1<~1CcN7}zMJ=AWW#kB>3gpm36*pG59a>FSLJ6f}LdqSXdlD=`EtSoxvMnj%% z2R~e-VCIj>64dH66GL5gu@3w8I(>D`6{Za?ylI5H*FFg7lh z{rs=O;#a*TUe*5HEZFPaxms_Qz&;1S}idhR`4N+@1;clUpVRDy`r}4|r^hF&>Xj-4pAI!a3 z_$3s0XG78F^r)LITDo!ju%qcDCb#>u!V4EMu(F!FbGd9kCY1)nJi=#mXOpL9Q>G_{ zcO!3C7LZ7A4xe%|J6=}R45yr0Z^&BI3rh9ni%0Vua%A(5mc#nLKs%m@tv&NT8UI9{ zE$TtCnhIH&RH zlFlUgcS%IYL+E50^L(1cvfM_tm}v|toy$*HpTpDN&^sPEQ5huwNVjX#TsvE*Jsfa~q0 zf3@*q{XkeCq2qpe@Q+706PzwUztisQPs8H<^qCZWpBl3~H$P;wilGfE%7{>%{T|Y+ zW)!Mj<%8r6h)^i=PxZl`DMYMF-ih zQqy=@;8*)~d(jUf)nO5!cN=k#7?W6i4tIip@0&s_zCeX^JzX*z*e={n4JfRaP*FO% zN9sYGV1qldG4kMcRiN%wwO8t`D1gH2dur0GZHSwB=O3CmTfU0x)S9I{%CKJ?1e+jq z=t=>)o=2I9?8i6u2Y5;VaY-h7dW1`JPMmK+apJa43Kjo{%{Xr&e_c<@Om$Pok!RQM z$w`U~#Lh-SxY-2qWMSNOe#uV0`&mGna>llVMZ&z^lM)xy5Ky;yiiNY^GL5SuO@V+0 zh>U4e(umF{aISWXQ_=IhD3YhV6nDUXvi9Z)96*k^g<3yQba$*>yM$(ESfC-f! z??jRLab(5uEbAm(7d&x&Jj@eC+<2apluv77{rB_1*Qs;NDm5rHA>%2?F3*X1iWH%K zdo#WHuUD+(^dO^QVQ!kK^TGqY>*sC1a&0J+VzuAC$B0#}>c!LQ zt5S+qn)b> z!dHr50YR8--}~j~(9vaFAI@BSbN7u*@CwxI2{ws${KwN8J%E2^!-rT)7(y< zV6TEZQL`CGkNmY+TRJ0xdpOt4t+EAnm9od>WbN}G{*2pdSs&*}*U_K(Z>L%bKm32~ zRR7VP|C~ckx%`FIm);x4_3cko{a9Cwl;>=XRbSZox_65CO`fEk2@mtKdmz^$O@nym zx_GE$-5zxZIGuj8*qvG2gv;7kw~HVr~)4&bkr;oI2@z_WLlNSY+xh;U?eG(sY=AXS=~uMI`{|<{^L-mysC7y$QTn*T zQAu#5ZD0WH`yft#Rp9f58iC-2bCDS8RAByR9l9U|U5}$&&^AD)eU@#Hngqu>#S_N+ z0}eKs5R%>%nR`1k4S_>X{+0>_9Jx;Tw82!;rjjgX&rJm;%Psr8TXr=&>?)^ZP+5%L z?1+v-P*nEt{Hl)$OnHYV@{rSxwWG=>_UM-zeDRBBlVugftpqkt0Uwj5rwKlhyNdiS z#x$Y2V6G9#z?%rSDk2m8G8?TDyAEt=R5cfD;0Q0(Ib;oQaAA6%OMUtM-b|6X1j?W7?I>(TpE=fTOA#O<+E8x7G zn|Q9+;VM3tP_o*wBQwD~Rg-y(^?`JQsY&KM{ zxJN_yXA`_8w17}W03kDLes2%KjJ6g}dlz~ai9*&O2oo^T+L>J${A0UG3k86^Ud+O- z)mia)L8!i&7W}MVQ^eBr-(3JX+W(Luo=8^~uw0KeJSp@uThGp0JlT9Roek4AC2-j4 zSCLv)d@`NtT*-9Ao3Ea*@dp(-mx?4W_?HpXY?tXS*#mcp%*IPsIJU)5=+K2FuI&Ok z+5FO_^SzV9{vEpkRD-gdsk1-A0BSqu)rsYPplgN9zl`k#_ZW^0^Xq7)OMA7hy?>tJ zO*M{IDr;T7phSAClmL|Dx*`3I^~!QDfJ@9!5W^rua<41RYrZQl-Th0kauEikJ$Eju z4K_|*SK~H=`Qkp!>_m@PZ}&TjketMLgkfyQJ!n|woShOC{eHa@HiFeN(Pv-%8pRU+ zdin?#Gqz*Fo1cqf zJ82aWxxw~Dlt{i{Y-xfB@9arq*3-;Qh#IzR!n zk0&k30E?g%mx8L{g4^dR)cdFYD4UK*-;`-2S?_Bs{)6U%XEh1I80IRv{&Yg~($Bi{ zVWS=Wf|1S2q$=P53!AMZYNAL!v>=RBe6 zPE2hR71Jjl>6(N-v5INS*O71?yhn(7Y`723h2`~!3e&wgoK*;_9Q;Q_WT^oF9KyqR`u4 z-e4zPB8i#BfhBW+64o4Toc2calk8^4xo}3V0ySd(&q?<7c6gMhHoR1P3Rq-;LwJsu zZM5l+Clix6($A*%lkIzwo-s>%L)}Cj*ER}PWnOU6wT!CC`;U8k4c8vyN8@zuUukOG z({*K!f}7Pdhc$MVRmSc+kB1epk1)_iytg;WYI5ksmXX-!9kX6Uy;UX=O(x~IjJUU&%G#cd#ZM+3NRHXhjEf@5H_|Fm#E!`U)g#290hxti zls>d3HTF2hru^xx*P?v6!_%EgD3%YIh9JuV*oV3)7ZS_4+@%E{qcGhW9>nro<4%4_ zy|ZWXERiJ$Uu?Wp#av2(xF zlr{n;D7nuD{>C$vD%EA+hfw(qk_uh~4mdesIoTRC{YBZ`&F%~h#9YQ+1>k862Q`Tr z_Bd4yo@^%UYzY#&4@1U76k=a#FOYjTajoyzcU!EvEmpl<**jDE9#E&%Mb72vbXQVk z@M$x6XnjvoNOM3sE+$GzO)NlQux($Z- zX|X)q(aBWM_W*syn;W7eX4;Tnq2{-OLB|V!diY(+#e5H{4g5?x$^jig7Srd}D4!_Y z>7I zM7OP&AU&hyOG$EO^)g%;lc#P&WKrgZ=5l^r>Zg$VD8ZORNy~6g0Wk4?D~wZT?|qB# z8-3~#;?%Lk##hW?LkU6!dW}2hJ8uP?^j0;=yI2XwP2vajh7uh=k~(GVQKY`-J*plx z>DWI(PepZ}SFQ*v;efLx>1(SxL5_Ca#N%mYZ__(Az z;n}a0E&U`@E105kh@W2H%=W+UmP^o1I&bYi{9s-01+!bme^A{J0?e5$nIb?Pk#;UZ zPyPOX&{Jsi@gs3?qLvYxCvaF)+5%c8Nngc-J=ELGV`zKll;2OvLZYPR{3oP zC7Jy}u?XIkr=22*t)}uz+}Jx9kE7Pmh@?HqVcC+2-LJ8T^B|O+*aAE(Es31sw8f`R zqxiagd;JHpGcxS}14fp^PtCg|6;#ANzk%(SsHrE1P}uBVm#)wVs1b9a-uVw-V|>7|zVI5pj?hIbM1fWPqg^&BgY%DOs`3H;qmn)t#3tX}RX3_oX$ed@!!>HIar z^bF~#23hE?e8#vEu`C@t%hJZG#a|vzEc7?#?{jE+Vihh-;V*8HP-n647_93^t#7b= z67(kN7dWg>7r3Nmd-|gfTBRcaxUDcj>8Np(FNVd#U%Y{$c0pqwRU#k3KzO@jiFq{UQvB`VG@x3q7DYy89dxW z9ppuw2dV8`%u@y!lT{Cz%8@SCKPdP(YzStsv;PDyyI6?|k}-J2*zt=cof2spFH^l7 zen5?(hv|yIaHt6Fr@_d<#h=*xP<<%jlW3E-XtuE2ei%1jrNOm@TBSKcI86>6HHC zit4sqGW)TYX#Q=ZPdlsix$ath?QC=3>E#G1OXixRt&?sx#t5$dCOMI3D0A=>f^;_2 z@&DGe8{xg3>J6>AtoaQzyL>D9v)hZTrnznvfV)=0^s((Edz+50XzM*Xo4Zm=?k7AW z^Y$w#Ro8H8R&D*G^{qh=MgqqabZ=-Gle?m;<3Vd%KEjlgi;>)kei1XVv^#(sjGpN+ zMb#x@%v;E{peyK^I!)y&!MH~VM5k~?fE<5%ces*zFf#qL;FRJ%kaMn3mi74Ipmc- zL5}lrLG^C0)m`7?ALftmP#*Bnw;sT;ERq~^Zx)rHwv=}Lwsk|$+DahChEQ}`L3k$U zC-lP(x!(EZX-w;C7fTem!E5a&Z~#rkZ@{Y4<(W<9>GIc0|8+jf0&Mz^t68TqU@{(x zEg{2p&KK$x=S?u%BG>v8hs~*?8a4Vug2=ybop%uT4j;$1CBnumwK5_8{uE=(Vbi(P zp;wWy@9>`^yz~d8CH7e+hU$iP*!v2n`Mz0nFP^c17v!u3$v}FE_jj>&sEzP~7XYo+d114LR>2%Q)vc}}sI|LR6d3pS z#X2O6?G{jciIPZR2vGu}zLbrv1E%Bn?Y{8ys46&-65#MmpnABH+x7p=i$Q!YsumV2`S>&Ad_BcV`vufADGU}?S~CB!P?-P?e^#q6kcQ}~59XX`C@6%bDyydV}eqq!`faFy^-(fmU&R}RwW&^^jm6P@ z8hb?>F1zCgG-uKZm>)y&__sm zj`1W3#Q<}50@s4H+>a%Os=$<}U0 zAH{#3Zhf9z2LJPRBW2VG@=xxp=Qfx$G<6HqC=VcIHHp(KOFSBN&W%|dFBPqWgyu&_ z3LV^UCCV(A5%&s^51J}@E%>zS9J~c5&bt1tsc?GmP{@EW4jasg#zc#--G6jVUmAS% zFZ+e-Go)UK{TKV)NwYdBjKVS78@pZFWzt2YLiaqy`ZTD@4;F#QjRPS$1pKX8`2dD% zto+CNi~NwF6b#;uf~^ISYKY%QVxh7XqHp=5@q3+rlh%IDU$hQ@hIZA+#<$k+KEJtg zw`Zm=-ugT^z(+0z^)|Lz{{ebDnA!G9i{ha@Dpb7m6~NZ^u2V zz`y)x#9&4l(IOofRXH;d<54T!fO*FCW7;h#1bZ150 zx6i4eyV|t&Q*&%yzi6pOT00SQ$$hug_&;;0@@bTphttmcw`DI;YahJEtYLWTCeG4I zY}PNIJA;2O%&6Rq{9@p33M0stS&0B2(ehi8eiHg*n2%9=%ts=8g)5`KA0Qws8d9Nc zb*(D^i^agTjb9PozIMpc%$5sH!I-=5YI-#v4}qZgzRo8HbtCIh3w{u&=-YE)>kvY7 z($xHgh9Za8AtQLk5Wq(De5Rj$e(VgnE0on-u;g&59xMei~+ zbYT+7b8vWgnn90|lRLPFg~MOyKG+X6#GK$X@536Vvk5g_V()BfiR@5O+L>UG4H^TC zu;JamekGYO?8^=%X~b29-#t*-qn@vWPE zgSE9=0Uf1pruNbn1C#WxQkKnfts>3pkX$atQLr>czP~P$a{Nnzt6s4=3M-a>zc)d>-epwVgi1F83OP_S0qbmvMBQvG@#?@+(Wi@o4&iY) z!PA|hpJ_JR4N2Zes*!O}+Za!wv4P-(FF|nKkZ7@Ju* zYZ#ino&Zu5`VM!8H~nZ~*xnFxPeBsDmda$l_VuNb)ezTnk}6XTU4lz0!L&$bxG&MS z_%F#^%1UO#_+}#aACiG@;rzk{7una}`tr(01Pa2uh#d6Qzz;>{sBIMU@6SrB_Bx>z z#~v*Odrey8&3=bouMQo_KjI z)+LO@{>=-YQEmYlaI=l959z@A-)I#tL!dm#dCGe@cv^zh=h8*rhsBw6HQ02n6s7v! z3ZL`iWZg3g+Rvy=yOa(IWLg^tD14*!zt|6f;_i%O2*10olOQ@AHedeGc#-H$TYr*0 zh+2ctSo=#`(H~+wHb?&B+qhNw)bNB*SZ*+*?KGAr=dCdg{-FY#@bO7*m9E^Zi{WeE zXtNum*;qC%kFNQg=j*YzX%5mLGGPJ{&tQzb;Z6Z?bx<@4${M^HUqXyEDJEC8d{Fr> zaW5+=wtk1xy#`gL%M=CNKa7>`3kQZbYg)=Ht^&#LAM=Ll$J+_TtK0?8Ui#f_6}!ge}JM;neLD)D}HX zjTKde;;CQGZ8&L*lTIgwT1&BTgsDk=?b2hGx3)%b*zMV6)H%`=`REja8@5ac90c}JA)XHfQ3t?u>rGk$hs`QwhgDl8MrJd(N7smYPn?1&g zd@?yQ9XI=~<5;YL9xrF}`D&_J9z0CgSiGnI`Y6eo`TT0ShEiJsh`T-mBRyt*F3vUs z+~m&vPx=11AzPG)RfK*zr{spgOA=Z=`wq*m(k<~y3q;=JcdwxqS64?I@)Aw`ZN*4m zoP}1kn$4BUx)g=`1dCKn&$-(Wtep+Df3?hun6Z*=HjcTOaIylfA_64WLff^~3U$mt zgDptM478P2tJ^)NVxw&r#aYcP#@(;}E(zc*du(W?Z`n|HWhPcxrH;SyFiys`;rOXh zs!8APia|6n#EZ)*`a<@0WXxR|kpDyiDqX&M_RfE3RN-&nRS?pFN_S`ufpZf?_=KB-W8j;gmIFd4ki6-W$ z7vr&d{oE2tn03I~?KqCB7ry>#X#|s_UQF%2txNHIS0S`8{o4IpGPr!&Wv>;V;tI{j zQ<{6Oe>&+L1s(K+%s?%xi5?<{(|@_sf2%v7ngV zpS=ug4=h_Zcm~>Fif)uY{LA*8dpF;$50Hp)m}b=UwTFfF;dk2YqxSNqe4QJgjQ$W? z&|J`={-!9fuMgF>=(Hqtz@C%&9}OlQ>~d#w?Q6pS5=)v9>uNS$e|3qx z=rk@r#K&wWX97G>Gb1df)RKVTJvvPGsY98X@cYmy@rAm9!+;-ql~! zl(%KthxzRitR^umhZ`Urp6W>wT=rs=dQxaDaQ;17pcC0S58bOQ+Yqh?+^Mo(CMGv! z-HPhz<7Et^X|kDR;a@17|CA|P#M69mT*lMzP138Qo!Abr_`6^iai4f!cNy@z<)Dze z?fQky9tQrOCGRX+8V{;r{%PJ@!5xD?c%EOL-sO9Irc0W;AAYu5qVlRKtL{+hnebK; z>6Wm8zjkI8!?HCcOcaj*O1g_N+=Ne-P%qMe{ZnL;QXETa&SLvh)?44~0>p7eD=%oE z)b&)EyNlU@gEBiwvn4o?D~N$f4>{Aq9FtFotF&_AM*8jWd-0ri!Ti!<<|%at3<)ge z??sXJdr;Qzl71iRZ1k|x3E>MoWIjgDGrv0<57HUPH%tU2jCg65q2KKMLBCeXQRpJG zq5V^mze+0)H;SE=e~+Q1wE*E8K=2`;PH)L8%PlLc&3#@m-yi1>F1`nx!uG0)S*KcY zyCbu9%TQlsuWsj@gt_;1@An^kZdvC}RaB$BXl2;vTI;flD5<6g%a6?JbQgAVhO`!T zX0$Apu0FMF69fn}FtN1MrjS5Km7gXh8(^`%@nF{vMTLlfYj?#(6+#AC;fbEb0@s)v z^OM{bY9g?TxJo)+FY8^gL|S+HQ9ph+Ic45u#7zsl*q94iJj%j3O_41xZm$&@;#F-P zZvXT}%eOi{oX}E;{FO4-N68}M|o5&GbO270Ll zn|oJ>kIqKzh$;Zhd{g`3=i!_aVm7^_F={_#`&O}b2^%pEwNlvq2#XuS4ETa^g1Pyo zNJG$^U7_&vl&#@awqe`NrP0i5v2QdkFP9;DmN`M9HxC~d4rR$*IHT*9)L`EGLoW?E z!yGba!XDmqy6>c@SOHA+!EsldFr0aT%+e&QcSc{HvY!WdW3}QAWBpmZU9rO)l?24t zN*8uVGsCyObz*}l+onPyek9BhTG4?Zz9i&HsDJe^{S>+(IEG>P*J#flZ=@oMacJf3F8-c=DPv0R2!^>c;M@y zrXz)#BF&We5o8`C_c#KPmxO}k*Tk8jA zhi$%x3^7>A1PKky?pwwEIR3a5Owe^SiQ=;u;V8p7&)Q6+R%AiiAwYzOm&m1F?h79cQvT=$)A8(4v-6q0_W8I}6 z$D%2$$J#n+pIismYI1km^M=~iyIDG>t z1E3+i&Dh~d?hqIQ{JGkNnREKz~)k zYnB%8l}y-x5?>2PPo4+_Fkjj^V7lWeP)q&tj=${)9dqu&eJngm!2rZ41$yX|b%xcx)11EBMPx z(7|<B7rh0EQWJY$sON z1IOw&l$A%(kY@#q`kCyPC7#3Fqg#?~;RY2I6VY{!48Hj0L+)-l;L1G_piA+`Tc=ez zcH5+6PC4r6(?~)1?D(8)rFx8HVfTRB8LykRx=)J{NeM==qoEAb&R1I$Sy(&tdC5e` zvQPKpAM(Jp(PmkQEl=;uK@E1tRGzAAH5C-uHV$|O<2CFf;1}_@s+%T~ku>!!QemJh zr2`I!p39DB!dqv*&u+dS>w8mq^PZLix`!|*%uozS3hfo&Nu7S*{qzeQQiDPg8>DQn z*`t=Z_4RZN0ydZ~?fh^^7AV_Cm(t^K0Kdzxu=KGqN&j@Z0=VBsLs4=`pK&-~JrQvJ zsn#*=qhHxNux5_uxmX1;Tp_ZE8<{`JB5<15YC+q2TW?-(l^q)#|#02ekjuJ%sDKWz31Z1cp%Y{&$P zvRj|A^$SZ`gTw?L=qo#te&ig>ieXY>#?Z!&LRIjwmb^nENa%!A$V)&XPS0A=oT&~1 zR&^r*{5_2ltB=*YvXb%ne#!Ou8BIusd1v$dGmer`^JxuFwi(xrA3LK2zXC+>5-Eo2 zsU8o>QOR=84M|40yF@Or?v{4gN8hoBERjV zWX<}={XvDYy(NnWt;L)N!vp$E8(!ZfQ{DFR+g-b8S|8cQWC0ADx&#gVv+<$63XOxg zU9GlXD)j;ruxL#icZ^rj{+Li}3J%p5LH=^ELQsL>*TVxo3?Hwf11{7-7FFB+xvD@p z==Z^nrZ!SPTQTW%O!3O<@Ekc`QrpE_ORi=jyY8}_9|Gbr(={teep`emye6CWJNdBnKCHFEG0Zro99ut3OnRGAk2lzvwy1r#u` zO@G`fZMoJ6ZK<%Ri?`OB8{`4H&1`JKg8!Ck4u%U{2Sl*6%-1M%92EidZx6QCX~(31 z@-H}SHgDW1187L+PZuCumqT`?rg^s75=nrN^HTVGbD2~E9-}<32a4of$Giv-%ggSn zzbQbb`(wzo?8@ns-$KP9P1zgWD}~5akj)(hRA}w`Y~RVP#rrAT)EVGK;BE|WKW3UGOZx$o=3EnkzzLvdycUrUap!ZzfJcN#n%6hc1J8k*B3)mY4&qZVp8|IXVLE1IJ=3D1NVny z<$E@Xy_T1sOI9h~0VqwC@gpJ>ULtezYf|)d;Fmm0y~ucBFrt5;Vi$a|K4nA6rlhb4 zu(xH(ON|c7Bduv-T+eS-mpn*ru{v@ zqX2|KtUjmu)=YhH3%&lZzB8cZhUVf>{N;2xA^-^dX0c-RjxassX?$_teCW5LE+RFPujt@@` z?%#$gHAB)5$M`EL#XhubHl=AuKZm-qjiB?=Ly9 zfQ9)^R3EaJ8B?h8N{!>09-*Wurqi$G2IDVES<~S;+NX4gy&#=U^mfC%&B?SV)~1;z zBm<41%;_)#+&xph1sP1n7nrR#ZgC9T>@2VnJC}sGxNayT#C26YyA;|TkMVyI@X~2n~!QX zeT$()@fS7uUnb_<-MwSiI}5;J$u|LV9o)ChfehX*S$n@Zh|sPBZtrG&tPAB;>wy^Z z;5v;YGo_6To8;|C0@j*T{`$LmF9Q{Yx0(;%pe}FQoav|^6j`Xxy@xFT$_ezVb$G!I zJfKvTeKabPf!ePNRBtp|Y?sW(w+70e#_2@LTeK9HRGz;&jOSSlR`}SI4w6PPU7pow z4vY#_Yz@KeICEBlG?^N^okl8rKW%NWQ{{x3!Cz(XETrJgh_W5Ny5_>OT;F$a8}xG# zDXeyN5&|son@LPR>l{c1wpyYkcL8r-H8n8Te&_kQM|tvxK1ieY_LZ&K1cL^;?B7Da zG8fMC0-H^_br^PEef@ltX2v6~aYj?ZZCG zOtO8k<9$Oki;{Lb*e&Ku`Q}rk>Y`PB)d4bxO-!BNE3vmEKf$*pX2p{~A82wCa+Htf zlEmNXJwB_)-_ejiR4(q?BI8>o94*YSv@EMQ|H6S|Cty){gb;LL7%(_3$eTd`A9~Rr z2DK8_mG)BEmNWdBge*6Vf8ppI_+%dg0#+ zgyMXdMX+R|lk^iEI;tZaU*mrh<1#Q()Wwf{V<)&%Zzv(RnBnt#7GO2STvbw)2oN08 zKwJpyJTNzVEb`UIrsKje(eOl+8z|NIkzMz>gjdy{s@NR7P3tA-!pD#x0Znc?uxnlG zKZck{AZ=wn7o68dyDR`_wy{xQaNv``^$|&eZ=9gNLzrJRpJ=FwD0Cv&Y?ayuMe#|{ zfM)GH<+%s`_NTCkYB;IkB)8M=DnC1G)?@w*Iso-Aa&_vgLfO@)scR0HnKOIUasY;zgAsSKm%O!TS z)f>5QeWirouvA)W;T*N^L?{cljzTm-831gM4nRNI>rg2Aw0w za$dqTjMd(2@o3C?r?!|2zoGsQWwzP_Eus#*<#H6t-Y_6>jOWiVn`Dd|G=J!x zCfTLK;zp!U7mnCxJ(Ivp^K#`R85dn68l=kgpQKYVgXF-acid;jjFuZ2HOj9FPy8KK z6D~RL)t>W;){|Uz$@@$8D94hW(m%~a*cTKRr9K{bENFj&Iwijp&K^Q9jv)QbcwBrm z?58|7j3SH}O**&7`-050|5I*ugwG(P^hmloSg11I&}cTuX}tYsKj5=d0&B%S`NJM? z?ZuCnaV=g`qFWk!*((|rbzpULyVc|Cv!65GUqsHr-fNV0tap7n>QlEbmgE?4(`nS^ zix;%JA8??Z?_+ff&pv*Ipe2{N_9@^4l+%27=08D{trqP26k03iAsm@z(VGBfA4s)e zIMoW|1EOEr;a@ko`3h6N){Zw@#>+e&QoAztomaAV2s^nX(?auxitpKaAwHWMp}`0D z&iI=~k~vM=RT3mv#PSQ;)1Y*)Uob%+mu`$QZomI@-;>36;}08NKBZp-qh4Ne@a;CW z4K)>O49W5}%d1?D-Q775WT9kExXSGP#*+mM^`5LZHD@{1mz2&gSf!hU+x3octlC@x zSfW;^J|XgoL+bxqFHLd3T&;gvBUShLlJ^=DqYkl`8!)@tr&2`; z^5y$(M+%B0uh_rXJrrJ7$h$o%774H9xP4lnH9sb7nle&J(J$NmhKBVBdae2O`sE|p zy=Tp+LKEn#HNq4H^=q^P3fB6?pKP0W9D)`M!dz%}r=`5(+3sV(UlXZO!{_JZ+U*nP zD#g0PxmH8Zp{^HJ`bbX39UsbMB=tTvD&4d_QSZkI7dXsyqxwi>6ntleWAhL|RT5E1v z`iMlA->`fw2DlIQ!D6C<`o_waqlEp<_nrzb?Y6$~npK5Y(bLBW+|^uX_nag*xa|Am zn@=ge(HPrEY%0yS6g_U~kDiVZ9VZ5hVceehYT0jg9pa%;-O%Q(c0 zXL7L$q*5WfETPRZJIumS&T^qr3yxUlWN9W*fg>9SMno27(b}5c-|XLfe!dW9&7L9h zNYYI1JIrr042xF^Ta^BLlP%Iq$?80=?ClFl<;#BsTOxw;Wjn*&Frps}5yn6gcGPNj zLO?|qa6G2UEoPVgW_@uqPG1rS5_{5zmGh~n_*o{KzEUyA>FZ9qsE19mr%`L1x>NL{n^!>j4k*-_Og35S zl{geEVz>8C3;r&XH%Fm6p`FO+d7@uFXyYY#$@1SKSQDQcjQ=Tub^dk3&3Ee|7Pu4@ z8d_SlvDbQYZ*JfNS!9_9@HMk+9FJF#Q_nK!%@yOM`PG{o#VSpT{-L6%=5ppG9E(@* z3k$QFnc%z|1Zfq0IoYLic#!Y zhRWxrYx{^UZ{^YAgvnQ?$uwh}Tc#v~d>t1R`-GSln_=oV7!L9`j3XCE9eaL7I@_k9F3*pP+Xs{f3FO(q4B%#(%SbN2Kt&r z<>c;_FMtnfMfsA=b~FN=T48+|qi$1?+@6$3op2$#=SjBB^Ff9OEYB+%%tE2jEd3JQ zhhH}-Y!SXu@-_S%eF2mZhgBFB$R7E!YdsT1D;mpHg8%z(;RQ-}(pEj{`6Iac1!VeC@0iv~@k z`j=Hcs>s!sOpSKvL*?y(p6{JJz8R{c9^G9j?vCJ4@uEV(O5Y#x!Bpjp0 zvM)}R+0aeZ9<@3R3UXExER^+{9cP*w;fzsvC_%A7@TR=wte*e|ZNBs$V&r!yER02C zK$zYDm*Mx`7H-b7{EmyPR$*jP|Bdr?inc+nvjl2NOQSE!zwt(KY~0yA$x=W5EwMyb z)*7S=*7#WX6KF5*be{G#&+ga;-E<-88y>S;jEo3gR~>&^(hs@xK&SAJ?sAbakqjzl z>vvNhN)Ea_eg-@;Mi`oQAa&8&;G5yz~% zj=}!sq|ne6ekj00Q1}tUNb~Sy?%Xb5Tp)`(kN3>*F0fwA3sv>S#| z^2$s-SJ!nG44+svZMf5(-DRMT#|s-P$Icm60mRQ%&FDYKOYZDV^DyZw0i<+EoXqQV zjeq%xf{uyW%z4?%4lHYZf_Kxow72)2E{Ba8s&JAtr`?o}&X-yn^*tuJq1Ey4$X4)& zuc*B*f&!30S>?BSbKG|2VDm0^s$GX8v(`J-B0!PK{65*>!-V$;8x+wIru`Wh`$W`OWP>hvtJU4YsY3E!=E z$pt!th5n7*-E|B6fm-Z?OpD^}uZtf^%y`0QIY7hLf8(K2X>YgTLFPTSj@fv zN54ui=;o+|%l>aX#p%Gz zumBKy*-Xq{8hqQKmVksI7I5Ork_dUWV%pA1^&k)s?=_!t)O_=1>UkPYX5`WSKr4dR zdO|Y;5%xI}^|`rz*HK$YwA1qUW#aMTz8B;RLW9JOoM#+89ZZ3#e8$VTO#K~w$*1=$#QT&8{y&Xj}8{|7Xua2)eq;p z65SD(66xM3pU?e;s;L$LLuISttW#>N~JUMc$Gdnr;=n)8)F(?E)3F zO5y(g$2?qaLnL?$lBxTS6TZV)V&pP$XoAVyWd9Bu$~a%c1*!QWk;=&{8^ke(;{6X^ zgD&9~XoWm5lYiJZsN0ypH{yQUTf7#JdNAcB!jOvB{Dx$MBuD)T3eiQ6r;hKj^0B8i zyUCY}(imxlVq74A>iSF7=1k^R^aNoMv? zIJ22a?w>KCJmWi&t2lPBX?d(Z5;~w6@NLK~9B=on*fQCJoU+F^<5~Mu2A%($>)<)~gi{dW-URYtmg*H{$vmAd%u2ac!Bn*92AS&WQeq|AifuU%0RBh$JzNg^+fInxju z{Z;EXdXKx$tE*Sq5Et>i2Q+#zR?$H84Qxmt-;FVu7agee3&`%wFqRpR><|^T@dz|M zlMsztNuq3~lJ(%Rz*+`w^`AJ64b}zbd>#)Y9I3rDsCbd zkoB&8b3hYiu&xIhr*37M{6aRy$dJ0huokFu{PRkNK1HzuPf~ zfKVY+=Raoj4^Aahv@V!!5k@RBTIk%Hqmn~3&*^|^dIOK0Br6X{;%>pnftN70DRmWU zbF~@qb55Z%;H4ZBgH6tPQTvFM$5r9c+>BAh)dlOx$lXc`iPN2j@~t(7fKUANW9@+H z&6bhkKtV|!GW|w@9UX3``=*PSoy>`U9M-WFu?W?plbHC!ImLnOG;af`UNy_dpB~|! zw_0aTUUPos5ti8cUfjC?-13BnVQBN_b>~|A&!9QOgN=`tyM$Pts2p~I z?W?epB%6}bG(2f}@4)--Uaub*i@$1<-BLdX+AJ%S3O9c4jHq|nn2_;e0GK+Vc1Vbf z4r^}xT&Bn^=IMsfBG%O9h}tg}wK+nA+f}DERiw1R9OQXJc4!mTto83t<i z{(V!9aYUl{Wzt!4yhoq=6@S1z(Fbl-B@Wz3PscY5!dJPRWFw~o-~E~I?&o(mFYR*# z+*}9``bT_T7b_WHKG332c$;q&3lmq2(5xL+9ChlRg*LzmCQbBz)QoBfAPVm+dmmI? z2l>qDCjYw&fPkPTbVzt&QrITXp{8E@FIBrX>4Xn=R-0IuNv&jtdCLuV>m$50*F33L zXE3hjN%A=S)hS&E`@hvZ>4tG>4g|x5gA_jH>iM`T=Z3o%V`OPS;f-a{;n6n8QJCxJy2E)NGr7&Tl)P8U%kRH4{ByWNFf6OgdRvEG)w`_9E9Jyfdqh{3)hXd8dy)HiDjv@))>`*cxF!e4 zN#K;6PGbG)d>PjSgSf2ym1HySm2o@jocsO`p|DRzfQIo(?QT{#f5~Jl#%qyCj*7#Z zU$)y#RktX0&U@)v>V)RP)_(>`brZzwHAU>6vU&BS#{{$3Jfg;St&5sWmtSqI{2vXL z%#SwEoL!dT{Hx9rljm87ZKomEwy`%RP4{V@!9k06q5FQ?A0;|NG1lOpGkf0z;6^dw zg=Em1XKoxJb!-@N7ltnYfSz4rb9 z+Z_cUi9dYYn}smIm993i{(KBy$dzcwDtfkF>XYmSjeIF=KK$%Ye`k=d zLve8Vy36kHt_Y4!WonsxG2v<&K&TYi&8*8*?jB+-L0|TscFyhY9)b_Bw1UKDKxb>> zgF%U30mt&);CIRG=>yn@sbyGlxR9G9RF=0)OHtdTV)U%?C=oA7)F- zP5!jb3=48u9631!JfTl*gyP2vJR~eB(?}{kM-Sr+{bWH3^}oB1|J~8P9X}zen<3D? zz;qq+Q1pskriIJe8_)53i%Ac8k{_%hVqq-S__$OmZNBk5f}G3W_fq11dznx^u@+Gp zlxY?I*TsER!V)*bDq5V$q=>ei9U<1dk>?5oj@upQM4MJ_&r3Y)5ev0R?*Yw!ljuzR z8vCs!<8&l+a`OQ zUO=R%GYuR%Ie>4$46%_sqsm#(nFS1vQQ?W`Vv=t{Rhl%7a=AQ1DbN(O@PhL0)w5Zf zOYkkcOUXZR<_oQOSQAp*$d~#v%mNY9RX3XiOVAfNSN^GC-_^1)YZ|r@=Xdq&y@$I0 zM8l!=3q}A;v!`U~to4fU!g{4^Eku|k{@fac(Zvaz*%?6j3OU^k>A-pKkMEYnn0mQ1 zU-l=CX!>}z>Xh*9Oxf+v()&lFs<8tT?#2eBDoeG{D7JNiFwo0;W-W%h+#GWd@6#y4 zHiXXtrn?w7?+cGgYhEQ3Y$aqrdC*?=W-Pd!R&P+^LP$!eZ&5`)<4Y|R%5`0BfO(uJ_=WdJYJ$Or=-t8l~aW^_w^GhTOBV32Y2v%wkSbPJF%?HDx7my$-TX0}$&?&P5?Mx)#JTx2iwQEU9nDdLv#tUn9rgxjZ ztnof$!mM~kRAv6hqh~DSE30IYImJfCcD{89Dug5QK1opS@Q;FI4gj6c+wzIGIs10& zVjYLBkH=qQN{kt84HmX`UwvBV8^Mp92xuNp1QbiD{h4azR1@}bD}r7prXWR?pd}E1 z<&Lfi6=P>0V~n&LMnU&mECr%p(8Z8qzK_jz>=nWsw8u}({KBMT+oFA`F=e6~Ygbsp z4fb=AL7LBpoKRkqzPscXpx43(uoP)h zZkg#L`VaPt>r!(tKV??0ltp!&h1*(Gm{ewIDIdhjF`3M2`CRddBJ_}K_uc8fUs7K| z!*vYQWvc8>dYKXnxpJ}!(ejE#eLNbSGDY>h-e#W%(vFAbDDdiRRYvk0;__5I9+gUO zeSE=km3Ljc>=uMP>E3Si~c5v?O|G1DLVjrpZ6V(&+NH zN63D;5mvEsiVO>D&AYW_R21p%b}s$?XtreYuHZw$})X!zP(kj`` zz$V(WM~t=4210*pbuSb9O*_lS@yYE+%Rfeq7)^mB_~7w;rDXDB7;Otwf%GC;Dj8JF zc=Usri9)1ZBi#bv$QXYS4GQ56^wCG@c0o0=kK(pui&y1A_k<=Zo-KP%$}a500mIdz zl4tFGX+5+>!T5qK2Tx1!GWr#DCwUA=YDB-5J8=d?Q_Q0HznPc zPtEWkhc`mw7r~iJ^)kKKneL7Zg1(8wd|uz|K2GG#H_1ujxYug1v=bb*)==KZriR=;Pb?G_#Ra%RhlEeYjA8MA01*eI&L)mgb=%j zVJ)?*Tgoq0mC_jmb@6jmN$Qt(W;&Ni3jKfC@U2<7?PA`K~O{7Cuh8eIFItN$A6L(o? zB-$D!;%~R%C;_%mrFcD*dlCTW@D`5;A#)*jO^{QoW0x%I%tmgw+FkZt)7z5^3b=Gz z_&(}BV#e~=;HJ9q&=}XrH73emy?HM%ilU=>TON^euR`JPzCqBQ#xxo?rw6bvl%Sct zFT!oNx~6D%wt-m;*LVKnqW25^Zr;v*(NO#$OpgUFRWLj*%ewJS7#PAj)TGdmds&m5 zLUReZ_sQB!J+s}|Np4IRC=Cr(BvbSkAs>eys2f4yB{*>*$|N_oP{D>_YZ7S#`RM)P zLUxnxgs@FOZYJIAs5mt7y0o7{0c*_Dxo4GA+i>-(8=dCN_R>S~axbTbjK6>1;vp6B z*j}e#0!6>f4bR~pT)E`m?FL63P13#y(xAMu3Vo^7zEyM*XM(*fr|sN@pp!!+6$p-^ z{eaZdE+Pfr14T=Li&?uL-=O zB5F*uq-Fl~PK#<>>~-(4Z=#ro!Kgg{Xkkcv8v4L0a+8>-YOjDnioQ6a=W55MAQKh3 zRdhS5-tL%})NR^vb^VD^0Gxr2#2#POdy_Rh6aW$9-C9)_@ad|W+m?Kvr74UCEtWMV zn@AfX5Gacg);LQ`koVMwVeUPeqGSVyxJ;Y4OhlE6_9}%2y(+inWdT?rs7*1AswfyR zeXZ;DZTX}e?W|Eh?NHw&x&9VIk!oHnKMkU6wX+h04My}xM01`pT8KJ-rT*sb|2o^W zgP9{T$aCVyIwMJc5rD^qW)yn9FIs4$#hN*JluC=}z`E`JO#i1%#c=){sr~YIi#s<7 z7Kf=Xe&rk|eOG(Thd5l{EWmAA&r6UN1;dfkD$gzp_7PLb%F#Lacjd>7-($EW`C=6| zB4GYJ4bTg_f_^64Mb5d+|0zr!17Ud>WV`Mw`T8AhQP`{Aoou}ozqT6()1pt^xI~!Z zAURke%y2u-|7nrximRNB$ldXL$sT^&F37ueGpW45Z8w`2E%rTSzWbXHh8GbpoCbYg z@<)Rx1MKl9+*zMyz3e*i<*<3u(@b)C&uj8#?1QP_t%G5|SCmLQN*)}KRrDZMuyK0^ zK^MkNTpzAW7fZ)!%ha-yGn9GpmJl@00V|4I7-6`dw%!E;^G0jX6oXri$Em4oKczKJ zk8rG(v|ZOxk`Hvx>L%J6Y)cB&@NH|5O&AFkP>$|)@}j;`z1k*)4rgmd@qOm`v%t&F zv}#|IsmGWJ&_2~&ZQwOQBlDQSj%4@gbefAFQC2h*rRcC}=>Z4iCj=4rG3{9+Wo6Ob zwa`b~Je688$AcTt@RrO{l z>l4_6BNb^`eTh#y5mrb_qh7;L^K9$ppy30mNODjKF@HWTyObAW-(TWCLtj^dL?eI4 z0dcTtRoQ&e;Jy>eVvj_8sL$^RN3A0Hi97^noIe@&^5F@z)NQtA=grrSeUj6>(Epj0dUD&A zgQhuj0Ks3K4+=NeP z;b0U*m674#*!PH=RuP-42lBhYQQUhj^(U@74N=o=btJcidylbo!?Morgc~@D0MA%j zeyuQ36s6r-DP6M?y`uHtdR}0`;LUUPP+bH6HZXGjsy^n+Pv$q>iE`{c@7$7l%Fiu~ zqtV7L8NEp^IYiuc&?8+=C*pmQ%Fm`-nt*;$R?7iqzBO`Sb~;DN$}*5B_nzy;1r_?F@AGH`kJoY29dl^I*L z^n26Hl!_psG)(CR{$! zB3fH1jC^~o+yTwi;rn}hNW*|9jMv_Ga;7R_R{vxX0R<>xe8yoO7julm|Gt4P2S3WmRGfC z%v1i1cKSF^_yLpSSmZD0Lo@?r>AzAWFiv+1O#}LdV`YXo2T7Us&^T?QlC}ojxT$R^SMy5Am^@qI2iK*HP~iexCq` zj!v=lv6K`jKWa51U+tNEYwjOWba)NJ?CyLUe}1g>b(n;$wfAZ2u|n6)BdL9vm$6G! zuqJlvbktq4gSZy{ht>ieo+HTrLu+I_>D;uJ^qD9NPLz@N?_kdcjM6>)0}85AH?A9@ zCTC2?U4nVemQD_^W%GzOQFJ;wXs$=XCCy{P$R%8}qYJLbAHxi7QD_&iG|TIac*~8T z9s5#G@X|U<)kh{ivi_0Mcw_Wmy1UZ-*>v0IuhGQ6WCy$rIu9 zVBXjS5A^8(&VR`@0^k(?9I}79i6x^O2Yxk`D2nUnT|OUcs?IpdksKs=07>h5Avk%j6W&?$Dss_G%Nzs! zH6CCeetrWd5eOp+xMZQgj3zQLXN(I~Ubv?3gd$JYbU|>4?txS$hE8a8$PQWUu=1yq zPi<;R_}}}=sazQG__kZqxq)*ybu8+7z~5A()Un2E?L>oCyec)su?m8Pq5I+HVM2`T z=%ZFw^U#-2jkNspe%Q%cz(L#4?`Ad{*8vVpRR;-mY zu>)+rFx{cR6`8;9auGt)3|YDuO(*3lg+|qCc#=KvsMH*tm%ykl^T*r@mQShE@X5y5 zMQB{g2+blQX=fVuAA;Sgdt&Xtk(XN!4on6NZkjS_YhMSqZe8RimTl<_+2beE;<>4= zLFyt+g?Xql2eIw^iTy7(0S`#sT0~bYB*kN=v4PzCzEjdE8SD$cm_r6H7B`cWz-e-O zAj3Xc>3p~QQ=h5W4<|$7fj`q-{d?F2taOfYHDi*(NKDQ{h7^Zr!^G>>M{!h<*#2Dc zD|Fs6Lo^23U3^a~CcyccclyDrxevmB}k0wQzWN==FwsoR;HM1&o zu!u68V}YVA`x9^605X9<{o$-EL8*s8^6@Z)Fg z>&ECbXzLK(Kh-Ud3rsBgVW%VjQJqx6rnML5C5Z7M>Gz`dpjui*lE8Q9DjV4aR8ty0 z+jmc1FZ9h2aCnGO!!f0W4Mv~J(w79VB{7$$_(Fg29tilxgYs+#04r*r?C z7m1hr$&qy4ng_8UljynsrU7loM6F21`eWUbFXu81K-d4(3~dX85~r8;+Heke?x*fa ztUZ6^llu}4p-GeT`hiD2RfL0+vcVT0l}N-SSb%3@SBXTelgFE(BdI=TOY2VGYZ*@J zyW;xmw;}<4m536ma@{5`3tO@*_})AF5k7H6tn&sceEvyO==B(8U57~JoJW>czduK{ z0>A;L={=0*7a+E~DTu^;4sZH>9RjpwK5dvd}t*FXEqPN(H%(g_0wsWo!QOtvzk z`<9ee-RiapN?UFLpW9Cu6LOCimE}5C(9OxaT}ZtQEtbn!(3KxmlD^&1gr{F8KmWtd z{nus48wTA#jtPEyJ5Pw71wG08G$US{oS208Ta6X+_RjKD>J8SkVA9$LP2L$zi{wSj zESJKSl$!VeU6M;Y!!uz+uWr0;pgC(LqSPQQHO4vMc-_mFVm`=ZoK4+YFnfJy zSlo116@tt_0uBb>C7gxvuO#E$DXf-Z8|8!E`r}+0=9q>m(B(vM)sUl?`X%3=|A{=I zx07+NynLq;U(-mEc2J)=7H0BsnF!cZSDuxdmp(aIIS${Sn6_>VEO}PoU0~jlQJC1IGusROo|uES^~(bcXWHVOoLi^7S- zFSE$y$^+A%%GTbGStIx-f=F;Yecf+fSu6N<18B_Si-GHZH~(R;sG2b%^u*7z*sl*o zT9*@3qf(VqKV+v>#G2gI(8(rln01Y*dcgqGD~*<*Mq+E~sm9er$E>_G-Mv4Y>rGIV zC1#+mah5HX)41b0^tiw;4o^>0OJR0AYdCiR0G~KgP6dO&XZX-*M`u=_7uCmmX|Ap{ z)~j*M*vx)O`M071qsI!iQX-(!v^1cLmC3C_FZN#y!nXB5cJe4(kL5*DNH@kG`5Ve! zg)9d@TU^D-uz9}3wRssMzM%E$lwHD#%SV)bA~@={dRLd2$wI z>j#==N;F<`BV8b(bbYU2D*9@Ze=SehR8dcQMQZR~DsGf1SqpxI@jro3l6FxKOP?v~ z{eSg53!Jsz4Kjn(0(XLSBNm}G6(HV_MZGHo&$L5Ba+lbpe3svQ48PBZlQeQ4-0SK0 ze@3e%L4y%GJj(|3prqEp7f=&L|HRxb`0)uAXumqo`Zb%!EnYkVwxi@8czN*I+*t_i zDo8@MO|a_QU#|R-bM4v4B$k@o62X zTcxOpKB`?B2vIlo>OKNW6SPL{(~#~7oY0&`?NC3%(rKPAoRIWw#ZAfW{>}lp9WejG zw}mO|amPT3>Q=mX(73VAQV@GbtG|Lz-vg(rnn;F3Z=r>ZR^?xO>r85?lWlegi37b2 zOF2=DY-eoR!`zql;!S+vlF}0A5SN;eg&g4ZkdarNY&};y z;<|OpibME;IcE!a>7v+;2k3oou70uIwROGuTMWz|upU58uDBm4bEp5%lilbLc0VDu z&WIP*-oL9$wqi|w;yW69M!icDv$1x|*DJt`77V`*s+(i1@T!PI)v0-1p}J7v z3!eUa8U|p`*BZMg;u(W_bFx&j4YsBG*7(ZkF7uJ%&o)y6+dEckr)`2vw9dpDu3LJ2 zv{6c`Ap<_}upP70J9S5QS@+#0v0F2rm>sj6&tG>LiUEz%X)SU7LG|L9O=~|!l~0TE zJ|79X>%+oNALF2DOJT#JM-J2GF8_za+Wz4%hCuLtSEQaF++o@xsf@Gkqo++R^6(14 zfOW>Po+Ys%g(aI3v-p{Cwsm>mWkRh*$3T1)g|3pylLn^E-~_w2*bn6o?v_K~RdcSD zElpu3?|ob0#9z9^4EsMSjn+)x&QfQpl{h~87CWmPv~8|-#psaobdOrgHp94_Nb{gG zL_k`p<5t8;_OySQ@3ZMZ#9af+)wXwxSf3i`nzUT*D3Y)N?)wYHc0e zLaVfl(+cb^<9ij@zn%z|A@`_KI?ZlFZkuLZ>}(AfN?b)RN4v|8nS_X|p}7$oYlgFO zM|F%>o5l8v#RHu#E@Lvs|JxG@5{ll2Tb>o3_LrRdXfk{{d{9(6!#Axp4DU40GOmAQ zEAB%Ta3<|^qjj4xrHQF>S<~$X632QX)EK$^NJ*-LA6annE?N&Z@FO*EN=B&G8vFwI z?7U916|@Ej-O_C5-QWy5X_Z7`$Vh^afE7OeDBxKuoo)2RQt_w0`hCGxUZ}Tf@=%FO zWa0f#rRdk(l4&_oid?sSjddOxaO&?A#|l~tgaGotG71xykWZVSq$5^=n`RGOid z-JjP4_T{&?7VmsC9tWR_wgwHvILv_XE-Vb&zWQ~$SS0PpaO)qMOby{Jze8 zljIN8IZ#DNOw5@I!d6{dN??!?BFt51#83YWeOp=xcdxO4D4zstLcVE*w{RzGwp|CT z;8sr52f4$s{%{d$cJR@%!oQXn=WE~i+Q{R{fMwSTrN5JygELd|(0$7wa{JQ9o_&J! zU|2ii<8z%HU#!E?M*&vt23oCEU}`m4GBPieuE?PpbLU_nB=r z_z-ey`;>=z65HU(kI21Q>Y_zekB!8#2}d1C6{?#cBFBw2H;9 zr`GR0Pao`6eBJl&sbfPOTDjXUOMLg*;!o(9ceig|k6`7{Uz^F;ZWSKvpT6rg^Su|r z-7-NW3+FU7HEjcCN140tZJiM1B>iWVJ;B(qb>)0fdwasowo{^0KwBiTX0rI9p}q&_ zIQIC#gUslF!tnV(1Jhz3PDz+OU<-_5;GYCMKHv_lJ(fLtsY}S+n}$_42JoY~$o}y2 z*EC?@Qzfyv9JhV)9D{9*6iKGtBx`d2m5lh)p-kSAAd*%wPjoHr!5>3ywYf->8_< z(J1Z+r>MP{2$AQb&j6^v#$hobHIfN%GSIw_(f(OgSm2;;lTJg!Ua+ zxe98n#Zj690Dt8{TZKW0fOsj;Y9dRH+J&8UF`97olL2)cD)OgbdK~K~KCeON`p%;s z%&6`y*Ge|rcf$R3sM{Sm0&-QB38lIXl7i@cTv}P25e(T?PMnRM_s=XP0KAZKWFq$h zPjq5Sr|yl?wAXlr#`10=m$sQ;GkH5B9{Sa012;MydQXW?p{{ikA~+@=nTj<>6#=|(w@9Yxse&7z z7+MD8gJ{chev2Ocgj$A8yM8@ZOqkbHif&M}NCx{oq@yO`f0HBcMx8GjILerhS+6!D zXijcfvKGvv{B|vR1+gb-nX3j>L~|;%;NIdgFR_kq1E|&Y=3#6HCZM#793-#e!1z^Q z4#KPKP47yyprJW_Ke8mW-A|nkdI33ufH@ZDYK;N%dUr(VWtp~-Fjx0%oOOBPx-A!p zPDw?J4N}?a=pZA@;ohC`r})T>f}9g z(=t$n_7NE&c>FTZid;PbEGdOPj8^1gF-RHLiAFAn0 zm*4$Dd$I8T=F84c<9Apw5ne$N+-Y?A8ss__$!$i+&mWPW$t%90OCV$4ucO{h$$Sic zVQicFaef3NbESI1#PDSvkErgq2}RnjOcUHeZO?V$rakR9oB`9iZ?K4H_5|FEPNj|l zgX8+wsU7YBqOQOR?Hm34oIye#0H{;(rn<(N2JRA2OZ7T9BmOdz{l}vLa&F@=D)u!7 z?F#G8Csa=4wh~(#o6KKtOg1X)3NM_|UNdnlXbL2!>Lv?FXxV$Q`ev~t3kStr_y!P< z=+897{%*7k)^#RbC<;?-QWPf4NF{EcaCyYrQ<2%C64siv4_}rSvrb@EM_1p?8xGIS z4S~*|h9mrW!4ohm@<>z9M0uMpvOc>DxQE_Z1`%t_gDvb!Z}RSk%pmWKagRJaFbYZ} zAYRsO7AG_Yhz(qnOz&LbS&{!h+{?2er;JCRB{Kom*uGUWYUDSOBJ)UIv{b#q_W4g7 zjcH>;cAQk%$JD&|D^_@K(S=Cg`!BK+%bI?)S$u-S?P#!--9b{s#@4KtF5=ZDVdmPM zw@2RVnuuW++dsI+aRQ$KR-2{+%5JQo&a7Z{B|1#39A6sB)^XK41>xT8@ak{(3|Yy# z*9r>ff^M1u6UMEH2I@L4sHzSDED%u@>r}0o0JfiSosv5V4u^f1l!_{aAysa>SXjgY zfry$ve>C8feq1nm-%Lrqwp3foIwzMLS)IX)pc36(iYcz&F&^xDZ@?7&W}{U28#75* z-YTIwO@;H<393+y0RIyfd1rbJB`iZfXUsdT^eek+L5W_BX+NbsW)a5PB1rpT2wMc~ z`{1E8?SWYHxd9#4y7!h;I|6=uHQn<2BdQ3bz;qHu8}N#Yf27_-+Oq6KtuC)zaELrH zs7njpd3a*{ScP=Xn|HGy7A;H$3T(_$6MjyqEA-AzBG}fuD~2@2K|sN=dz)O%pQ;$<80>lqlA9KWUfGdA$31I#-F|DxPPTAz zZWo@a;I*g(SJEr#SaBtb3BUnfW=itTb~Jyy!`YYoynTL1@k3EJFx?QO;dkHtF7Zlw z46`ja;mf`8<>0|ktQfU(I+sRa`BmS2YJ~A!O>d-{tAQoN5)T!#@BlA6*={JpIh?Hu zbcowh0`Wk{iTio>C(z~W+)KfJ!t>gTw{H4=0T4XHr{?x!vTL_%Cbdw2DF5F`5$5kC z9K8Sv6Xp3LESRe0139wws9_rUW!0m#oEBmpH_2b;fK{MuPR>lvAR9wx?GKLzZQ+4%@@Oa+8xulzMm0 zcaZ1RJ()xl@wl?9H161pOTJ#N*;8KV98grA${4SbD){u0f~Xud`5P5T*<@7S5^`Q; zjqOye%K&~GT< zd+wB>FKnsPG&tIA-=+BREV8X%)BBX!3mH++zQS~)WayA^KvnNQeZ`m! zSc{D+g^`ts@!#9;K0_QGsoQmmR~kv#W48rC0TP1-(5=PI)#(;_W6PIBUYZ^V^@2sI zf7*gW?QWT*eIYn=5nTSslQkjAhC5L)PxfpUw(DLxtcd1mq9{Y6pMRQy;fRVN3@k(& z%0+SE_u5R9i0ir;TzJ_1X$z~t7FP-vPCU+$x8ATojWncR@@I%1IgudGo0RS~0nZ?}z&J38{|< zyOaL75&eWn`Z*`8sD%cG*yKiggi$U{*gch|!@(0`4QNe{%`Awk$e}7O_e>R_7kIz$ zgFYYE!)g>pXsa`>S}WL9K(cM;PY?N~uI_UCjMlq5iB(NVMfq8}wZXe>C6gC5_?WQR zcz^Zf>Nq+c9e|i*{6?@G`kU>e4hjx9uXO?87uai#;&q zZuEDVWBZ8~tR)ort#?Yb6ipsRI2WPa=$>co(>E{PyMELr3zR3*`b{!!)&9T?8(ZY( zCrsFS$fQ{~ecT~BFRbhC&;n|dLiQjVeR8N+#?6f`Niw17p6yIbCMqftiQa}fDO_}7 z6>S7FJ1TGFoFijau-*p!nerIAU}z=%L6(*AZz`5kV)HlI4D>rtM=A2~RJiD+88Abz z*r@IM&wzVa&bUnCuR{-R4Pd|*Eo=Xd58o%;?OjGoAt{Hflo6wC_ubmJUpz|M`zdP# zaDIv6k1gl1U7S>ZTUfe-E60kf-9>nRPCiG=XuWr>(Xp2_(qHQwVa<;ps9N0zBT?tm9hCG3p3pZhI#Z)4XCX4|L1v-(U4Pn1ICf1-s?`A zrfsX;(>!4+G5GbP0;O1&*9dtTJStGf-+06T5&RLu2uB!Pau}$&tD$WLnFN4Q`S^2D zS;zO5o&{r3njai#bYyiWP#aI9&~fQp4Q!JE%3M#frNx0MCP)<_LD) z9CXMIyLyb*m-y>86@PAUvWWCMCc?KABXgcs?s6VJZCo|S!&xPQY@%UvVWy7iDCIXv zKDP-!e!NVk1%6Nxu~c^Qq2MM?w?y0SwAr2891O=A6CHGBEhH*()`-rGW4>+b40ccS zNQ(=YO`LF8R5zJ=iS0H1^h;}Ct4n>=i*}<#zXG-v=OYEEGjfWp7+*KzSa9RD2<7o` z9yqV%tloQ4AUR4=qq=eOg~o9cza{F9f->uuwf@eCTk`C?G#&9>vP-d%-|@WlCI2ME zGw^?8s7KNoXW!8LfkLH1>LlUNqh&?34*3Gck`u5aIvRUvMErAdq+?U5eGIK^BsJ=a z!BCCz_!m-XTq!A2U?v%H4@?dsR>Q!!y}I z^YfWs^<=jn2`mtArsy96>)U@EoMlHXb8^CuM_|#F@a(krk2mYqf?c}*Htpcc5=V2e z*;k4LwGo@LBABw~lp=iYwu@z+<9xcTpERjDhj~slit+jsc`oS}jbC zxK*TTPc}mn>c!I+4jsal6-zG~0=Njo_pO*EP2F>f|zN&PcF?FMwh`ErJJ+kf2 zGHcoDrJ2Ji_=%wyqu@dbfzR^I_Y^>keknhr0x?|OGH0HmnCgty(douD1nmj!G~BhtNi)S-bhBjt$dfxW)@eq0q|3h5NC@9gtDk$rs#Vm`>u zEQwT6Jw=bl6WiDG*MZ_S{hG@+AMy=PO0iKRw}jvs*_5eXstyv^5T5jV1-31wty00J zF~S)!*@p78dW~&REgIl1#X~M3j`6P47!(}eXq8I-`(n#CU&_zZM)j2y<2CKB01?gSpNjQ7)S;4{w zXN?}nYuO-p+n#%T@y@xS0#M~uI@#b)q-aA~7GRyTky9s9XC%Uml!mokroLczN=DL} z=UvwSU6KY@B@h1BWexWW3@3z2Yp#Dm(3--7dK)9@XmRavBH~ek{BulJc^;&N23L8{ zSo|urHl z)XrV?(+OgcR=)D>adVB>617W#bJUk*`9iV3wgu&T!XKetpTq3sb~u#?b8!&;r!1)E zGEFdq?`Qxe#qmJ#R0NNnZwRjP+`ZkdEK^_8*Xek4Mz97Y5wt;da?T`c^1xIwp$S6y z5+kf8(JQ-)H-#pF=XSP_x9^XqdvUAAxQ~vrai(HwsGJ8oV!QRpDVQ%azA=_cFYmF~ z-n;GpW>f7Kl3*nOmb*V8^Qarvw{nG&wtpUX-vTVBq3fHT^#^>!k||rK5Zc!LdOGuw zhHpmfl{zxEq;VF6zRw?aRvi{-V)6{Gou3QJ##;??A-WRIstMEjK^x-Y^b zJH{=opX0y;oo-*7iiGHvk*v9QoaDKD&eU@3#qv7Z{DjtnkEgr8{ymT;nms4_M3@z0^;>kVJ@2Y=Gv5lTm1)j=oqu>8qRXG;WLRK%pWjz; z^n2Z!+-ae@#A~XhS9rosuKiR9tOlFZu>?4!Io$xN29akx^vL+T?s)zJ?7F*y^Y*jI zqzoSGH&J@+9y^6xwNAr?g@K=<5$Aew6!Rs6(d_eM>#rhbnip^|j&M=mNJB#9YJokf zm+MpNOJh%O_RL|FcFwi*R3g5&wD?O>+OuNiiqNLE*$p^TK7SEbG`WaVsK#l@P->vd zMgu#@_--IEFBefLyimyqaA?!}2`L`@qIqeH;-jP1%t09FHrxSX<^sj-Lx?~Z#70fb z%2a2v>vr&xBt(r6|4E8|Vq~uV9T_aCAkWpyH3s&l-mfC?z>J|zeU9PbfM`DWZvkZe zI9%>K2w^DwAcTl>lu{54-ilYV5o)=`4s4^)H{(vV_*f*?c@`SC4PZ^eG z4zn3!Dp3?h{-3oC9}wj6p1R7C8p8p_9**tH%c`l?Rn;QTb2!LWCHsL9m_d33s(x;r zW!S602W`Fu48)v>bvotlD*$Y>v=wm6fh=&FZ8pb?$&#Uw$MzeUW0dA`v(7iA=Jw81 zQHj9#)VE4eeD`7-f*1@w;D8Z(`?Kq|ra>oGkwi=2z#62=wZj@92oRDT1S$a%k3 zyaPymDR&)@Q}(82Z!92~%-M%-eyiLZ|aiacO|7PQw3Y^Ca8Ngx<-q2 zZmM{8mXPnv)T&(|6IRnt*?CrfvKhT$w_V)Bk=P_sHV#~FR4BX1){}jle|<1+SUk!u zwJA&YV$TFH9Bi3EHR1}SnZa9_bS{{5*K1Wazve`XUkg(NC!g=&y}9FHq{L6GSxVIB zmim*#=-7d1BGk4}h$$OTdCXX#_%N?;f7UnnR911bleCYSkiTNF2VQfyG|{A6^MYrr zPtna6uhXwMIk*CU(nq|=zYOFYm-QB(1o-&x+Uh?OqyJg-HcNG)=y@!AT1ktkY4n?B zZ^louZM!e57&gSV$J(^fxzHV7=`qo!d!$|H$+d}gka;YKEir&5qM*Jt6-#B~X||G^ z>y0K}BY`moylB;P25_3|M0NWPq>VTc4{duq!o*j9iGWYt@CU%;KAp zMlMM`pdZvIf`Hdxu|eT_#CB5=q?~MAR8Zk`N#*;`n{RW|ci%l=RCD8-as<=8WV*S{ zDD_BwLe9LWWs+UOwX6XCo%$Rn0+M*K#8AlE!H+`;)|o2PcoWtcFg-yp7XKfdj7x6X zOrptE7n(-b+q=BkHm@KjSwvXWO|wc8Jy`IgbCusEdi#4hcY@ydmH=u1Xc2CGTAW92 zC&BnUv`HU7vlv=IMTDpNutwNH`)sgt24nCuyd^w5usEli{GiIcI5cN}J}6L%H94NS z6ko{|IO3w`IzNOc`-Yw z5<0^i{EH2~9^kacrk?OQwXl-9v2Q5C)jb7@K`(jkyZ`%);qtg}hx43mQTAj`-hm+t zSs$ zyFGP~q|3QEm`WwhxQ?6C6-@T+skRCAUE{Np5w~pTL|V!+PQ;k;AkrF*gx?f}hld)QhDjo-&8#h|2A_22#D{^Eabf zUaX9oNHYkUJ6OJSD6Ju=((&(R0`UP5mo2t_SO-(}uO8f{G@P(%WBF_!N}t}xUm%!s zX9H%Y<6`mK%ADTuuJ??0o3UW8eVQjvzkKPJVw0vKhG*$4n#j9P5%O43RDY}}avw}{ z{-aLh-TjY~d(4AJg6r@NG~0gM+;G^?DVIc>sUd(i#+(I>yT9(+rWK5uFY0OUv=ZON zVA>r$$C|{<)bbQ|DNMEHxiJ9C2*MKPRe^h)Oe%jEhNrf1pdQea$2-gqB;+=R0zF={ zE-;Jwo^Pfb$;z?XxGUL)6Mpife}{x>pKz%XZjxdoOmqAg(@yO>E6X3ypMT$*1wPGY zgKS(`Q1TS3en2ZBm?dvA-o&YX{IK_}oQ3HSsI(N|embqn3LW8Qr1kBDw@8w1b`_Ad zU0vK7UV+#s`$u``(?{c^uA!bD%8v!Q@=aK*Kb33I{mX(`n;=nsP}+B9yp)8m|Cl7sjFFDT&D~yNrE2lOCHyYp&baF6P$V2H<)Pp7PVI@lNaQXo*dyd&%y&`&!e+w-pag!)V^3keMro;jFWJ~vII`hm@&#eZ^zsPM#b~I zl6@Mj&wqDx<9t5w7=WII`o)Ao5U+A|{p=!)Fbt)XB`s#1JP1}dDm7F3*yo>Ws_EKG zroSCOZ}vx?Fdq%1-P=T5#^;EJ+ z=7!Xb?O#I0X2&vBcRxtSI@Fnn4NdJ^W-wnS%2&FNXqq^9C3xyJr=p1}p)Oz^+)Q+;n zwSoWaNS8T;y`18)9;J)n08@NUj<)O&d1fthv^te|HZ=0-FrBglpjtUheRHH=U(Id|kNqM=PAkS_+-Q9u&)o7|!ss`>uXi(1=jTlZnhEWAmZEvqt8U{&aTv zQ@9+y*6Ff7dt_laYFJloiU+O)&k~m0!*F7({V7x)016@(SLsK?f(_0lI=_DoSfHn2 z9>}}8wyQ5(ZchKbUJaV-8U@bAZ~M`Gv>=`QK+ zmXdCU&Y`;-q`SLeVBj7-&)>DL``-K7ul->E3&(?XeAg%5D;ECpt3PLkHO}C=m`W13 z{(F&nko82F84tUw+E1SdpH;$tLPK+~&X^Hs6f_NsP~#8gwiqmvj;v`XHgP`ETt`0D zGLFz(5pRbnZ?=3-Tw|le7+o^Cpz2BC-3O##xdqkJUuQpOz_jf6`nysy%0}1}nljcm zhe)SCiU^KT_#M_f(4~dI=FnNecLwrlnp4&ys&u%3{O=-CyBYCpVc~pf(qx!VkZ*qG z_*oE5*QAaFL@GV5F@5CscyMfaFAlRN!dwlKd*3hyNH)g4%t9+q(0;!qIhNsy-PUPe zXO{i;-ggGeiw>olq4`${>aY8c4oI9yzPr*7pBy_=`{)@6r0JHoYon21Xv`&EAPE&n z?lh>w1W!7@Uiht-ExKWx%t8l z6++4KuY%T*#7FF*^A+Hi-+(E@z-x zpx}FRXplg_rKM-o6(Hw(K{MM@0wI!CX zuhPGIIRdf6=-*?a6o>^N$Mvw^hV+Qmu%oh2B3JNN(hy5vQfv730I;=ulUT)tjrh?! zK(4gak?tr{YO30ER13Q-JFkeRm-Ry)zdhW$z#VR{sVf4D5Un~0vbU3HV|pqxmP4Fu zKMk^H?@#Xn%WH;J?Z(HNqy+bSg2Z?uQ@M0Dk7(T&iwfZxM)3)lBHwKQkXm+e>wqY3 z@0zw2C_@rS-h~9`*K&T?6zZ*F7=O>Z_ABO@0`y86KvqsbXs6dwZ3J()zgdW%=6W9> zoV`!$Te@{O$4Gdd8!=gH0=Tg&-fJ9jq3FETS@=MRn6D#fNc@>0&^x}bBDX%r$jT$i z;H^X?Iam9J2%YITqXadGXhT=u_V_xTIT!UIr$~pFEUcfATjuG8sqFd)tZl9cSh&QX z*kQun|8Yg<>6Ik155tLoF3reYoC>wQ`f=JGAb*XwpvyO(CS969C2v* z6)~cFzhVRD?QYPk8xNa{wz1dM9{qLcpLBTEsBfOMit#)baA!m%D^@SSMg^JDm$&QYMS zJ7-@RB2Xo9BDLh#kw0?pvBLP(7JQo7;p&;c@{@d65)dX~d(KY|4v&46*>*kc(HbGL z=lbPNF^8{BHyb~mKZ;+$#wxeVN?Q8^^&1 zzI&aiF-Jq>ARHt*8xE5)U7i-@jM>A$#!4XAMs0yJ(_o##vetG4@7k&@FSr-XVjx3B zX?xaaQ9zl7GKE|Bat?PXp*$gJ;G*418YgU3z*q3YJB9Oexb@igR{=8lTU<29^`nha zj10m~o6ItZV-a)P3SqO1z#}GyFO`asP31@5!+ggM-E@D~g!LwMlIJ+Eulb}VniXdk zS(x$yXn;MSZ`jjz9My00o}-zvtC3?JA2=NseJx>;?(}WdF(PYY6QcFbl*YU@M<5Fa z*f66Z5HAQ4E`qzetTyloE$y+m?X`6S0;Ai9fmtMi=xiB8KK|i>;RMLcD z=DWN(eby>np4reJ6YYfQeb8{|vs5xXxvkA1BRS+tM#Tb8MWDzzITZOTw0zLB7$TD< z&(&1ne$}~h3{Ek4rnTBx=DI*V!y`%kRUL&nn$5*sgn66^PaY$Cn83>o2Ad=iBwGay z)K5-Ph8%4qD4rtK9pIQ0pIv22wu?r&j!|dl8(z*SM&nw5SFWH@3nfzSiqIpuiRwx( z?%au9&}Z|p@>Sc}djJv6>T={+w)cAqaet-k?r&4B!+ZX~=0BuYCWV?t=5Y+dsSbNW=o`DRXeg|<|$+!9XMl%IY_9Th2b050<&sMoNjx zea1N%pE=`hmQSczULxtZZc(?WrIrb*(C`uOtK&t3BCCscbFD%$p?GZi8x=isJN*_p zsEwAh=v**8tEORVtozKS=H)jO1te4+h;%PM57k1vEmOq>JJlSY7KRxeL4I8cVFjL! zIl_*vrACaFFpWKKMw2HHI(`EZoOG^u`A#3LEw-?nZZ+ey%;V*DV~SG^gFf2Ng4W*( zo{5BP`KBhTsfI|+s@*GNGIb)g>=o10fb(Zet0$|PY3>IUHHYh`s*pPCptVTilMa}qpW@3~nNY}(L+VLEg1!9X zIpphQx+pU--F8kcK-;>qzhwpYL3X!sQD=n^*3q~8_RgL;B7Y@rvnaZr0sBc9u_A7a z02O3I`D9{K-c%d@wE#R8_skA$TJA(zAzl|%8fsmX#yn%h?ixH2Xou4o!H>=77?YDo zYrt3kQuAU+hLs5cFmd9JvGI(7!1N`pvz|t2XEJuZQ_R9KRvrf#I%~2Kt8J)Nnz=1@ z``d;jW=Vp&pV6#2m&8*Qy$)$3Kp&3NgCsO}@UW>{MRREoCxcOLqUfz-(@$$%IP1p# z)GL`MAI0r|A4sSr@&({a<;bBw=ntK{;n+#@3+j(nXy1D^6|=Ut-hfXh^7EKJhG`Y_ zACr=}G{9|BKRGfqGKX7{Z)nkI8WieY{P_L03fatrxSw>cT6aRYe(mfFDV!Ri8Xgy3 z;?u9%Na->!!_Q4rwi^xds+mpi#2Nww7`X2R^SboT<~f@80)sKme*AqXkuphe=ipZ> zPgm`53;J)~U35m!p>06y;tJtg^=98u7!`8|R7@Z~xMT%!?JD>B3dC?vtA;VECUeNR z7X$Bi)NRf>k-;-7f#Y=qZ)nr3YS@_2Y-T1ayfZucn*uka|6K-;g!wTtTd;D$R-wpEmVqTCH$5tY^wFzIZV zbW;bpKmvK0Pc$!99bswTV~G*Sa(%Z64aF%rb>z|)Kja|Gv~f8v)*)-gqdG3`Y0g13 zn!=(`8TJ8yvro)pGM_3qJe(bp2~@$TW<;WA z)5#*|XFNHxHg8y*Cz2xcOx|zen#Ah>C^XKDu83|XNeKgT8>9Qbq`5B)N?5bILJxhY zR(=kdXPmj{XR>hLPu7`N;PiP?zFi*tM`jN5e7-5|9}8;p*7RaFphgjwzA8f9wL+{d zYfElV3GviNI_dzYKRFQM8{2X-^y2JE%XEXxXvX~g-jPq&Q zcHl3#S~@|llAww|accMY`utBdS&!uIuPrV-Gv)UQ`+%J+G9|q7p;SS)T!3W_@hQ3Q zuzBMtpDU-c&|~hS(~bSA?)dxb-eKn0IAY~&%zRX3+~>-u8}WgmPRz14 zX13FPWj}T$<{2$uqu}9d)4x+vJ2O@17F^R{y6cP}57E$-4=Q@-?X9oF7j#R963M%A zsSIZ4XX!YLn#8iE!H7s|2X$H-S6V%`u&vVf42u9U5eJdCid_}c(;(SGpjYDu& z6R)oYcxUp{gYXvbr!&^+A)VX%Vr06(Y967M~9n~Cp*l`{)TXY3>A*R+V0J|gTB2r!|w^X z-$N^BL`cB)vyD?S=OYf&{y^ZF*NhS>w}g}Hk}urC>f^zBl1^(dPTLP^qBC2znNFX%h0Ng? z6*}6me)5BNYS+vMkO2Pit#23NkFYaL$hfbGoJa<&D#@G6`GU_3GFh?jvJFJ;TPlLk z5FHJK7$kD;xpHj#@HOi_?~HT*b1=>b`J7A-pkB0ra=(n<@8F z#ybKZ)r~+U!);^=`2E`MjQRo^n`tFeDfP>ghuT6WLUgS!XflPerwe5SYN9eZi;x`c z(|`F@33!VUcKZGFfW|L7PoUWJY@L8p10(d*Tp`~h3YCpkN0*nwr)&DV~ufF%30-{xxm>^ zhL7y~f>LmQRg)GefPgAQOG(Cm9x#fB92 z$J|9(#IQ3Yv59RweBr6HSS^p{8XG(%hxj{jd@_^>zPo*a@=bQ&cnP>XYyh@q>68yT zJpktYz(7(PC*yJXpML?1CEq?I0WNO!p9>%2s~rGvxj>rKYf{XVomy;Wq-9q? z&!B=JTT^j$?)3)&Aizf^FwP=I6{aq3X1&JV&@l>VyNt($>$d$8ED)9weU|XUHQD%F zo&AMSP{2WFLM%`0pnd?N0RPtzD&p=tAL8b6HiHF4k*)P?i?ol!wOQ+pl!)Mj@>UZf zN;8ySxC@_v3)?(nBb-H2NEwbcisvM(+)L}?#5}TxAFOO3Fo;K^V{s*H>0Ou3cnW=a zHGW(|A6gxT=(3gMz62Y@gxZV=EyTt$vj8CQ~-8}4x!`` z%AtP3y+!N7epB&7G}A~3-2?}T5nIK(f%t9E^~ni&I4(r~la(t2?v|s2nUGz}jLlU@ z$bG2SnxV};Zn_v{81hZ$N6=tvPswA33s6ABliL;}4HM%mc zK;YDlGzgAf4{yZpeGz*2DKn=wbR z=rmyAE@*CWvk*zqP$PIzP4fT>74B5u0-W4v%di8|mg4C6yG5}&Y?V=yGOwHP<=@Sr zvkgc>gNbCe#y|@WP~{k)(^vFm%pe)EW(-_EB2`4P%HSjYsA}(XNY-v`(enY-WOYvW zMVRl|`kAi=a@*tO-04$M6nQroq!3Dx7xUX>tan$#p$c4nf%6|eYMX0pY`wiW#q(q^+@qiLV4n00 z|ItC%QR8c9?B4BENgUg94n8^Mk|vZYaUDQqN?UfSQ}Z-l^e!9C>`OAXj;6^e^X(*P zen3Jco=M-2U?T8n+gjlwYcvP3D~CiF+0F!UI6}>IR-=CGXNWEif|nK@cY&UVpo##0 zuNia?nT%_|YR`O7SM|XOlhzjO>vlkxkp(v=yaC(wU5DP!Ag{dhk}KwmOxD+mT384p zUrI73%231JaE9aUa*mjKG=KFQBRw)No$IULn`KLPi;W&w``@aq%a{1ke^hF^zzbN{ zOc+LpHJro>53%{vZ4KZ(&(MYG(nwGL4-cYK({iJ1Tbw`7p>-n;{A^!){fx%u*61-Eot6hW;*cNbXQ4bz0|M+*1>L_jNi{9c5BnFaa9pCG?OGv<}POP z*Z8~;!9nBii86j{N^r4{vvx^~40t1VN=LPzelcsZ`Kzx}Omva;7|qgD>c-AtU%ldp z<3}R*g;Z;{LQ$fS63J@z<`kJ-ry`JxXlUP5ldkPsC5egRaCYoSqq8W^^%dE>LuaUb zg1qO-wse>1qTzZHpkgD~0QzyPK2}RBcrn;m>VM`fvpaRNRT3X_e zfGD#bzC;&;<@K&B_252wF&d+38iFIKy>^8v6CZm(e|F zc<*b~@YkM&;$yWr!;uf1`B_db@ba@0w>Ix9Z^AnROQ{_T?+sa94#^SmK;zq3byc`- zzIk1W`nGA6-nbkc*jz;(g`=gL8{qXBAtAzfc0$=bu}?F0?dOM)POtJ9uj;`X1BbqC z17^Lc0-}5CKXRfN)agU-14PoIBabwo!pQBWMBp1-BJa{U)-k6T0nrcZM~+wcH%Zls zpqS1e?%W;t8=tOuv1yI$UxwtpUi-N!k<&2&&ED%Vo@Ym*iDhiPV1%1T5}5{|twT`+ zR)?C0;DO*GDt0g!Z%Q<7{kJUfAXB2WNd447{ei-Fa<6uwr+|JfsG6l#CJtvc!-IO%E-`LL~Y%+uK(TYdlN_^q<;)L_&v=V=E&|}Q#WM*R}0pE0y)y-+q zRGh78%H@k!rDQ2;EvgD>TH!;X<}&F4Fo+A(2<~E{?Nz{rgkI#DpOvZ1`aD7>gQhN8 ziE`Z=V!bE$zS<1Xw{nPurOlYh!7muFvUUMhK@6NaDJp+BE#^$yq}v#h;a8p}(q26| zgK&t564MlkcabMqV^j&XAU1xeRnPr|IZ3=k?GOYZL*pZso+44%pZOP~0qMVK zS~`ur@;hcL`)=t|_kYc7)z~$?-tFeI513-hb!(!8#_*TJveq=#2lg=UnTt7Rk?@)I z?@D(0fGwcl=zm+YggL63DM3KjErsj|x9|6FE1ufqPb+>|R{7~)QqVuGcx}$OSOV49 zFcesp5QxN4RBjZ2d7$z5lEEv^t+RwLxu?Foy)h5^hm%_R~5Rq?VVviJDV+zSay5pB=P%xPcFvrX~yrlwg-$Oj-oU0{J{DO zh~;X&_`0=3i+K7jh$J%F6xG}G;1}P%FYgfMZvnNp@Lo0_s9J?;yXeN~xEiZo(i^3u zLXWxc_}7siudwt~^`0w|(^!uVO&!f!SB~A^UZY_fCt8ij<=VNZKEYJO}sTD{&ASn!DX@v22HY zvyR^|j-@XX``C=j*K9w&9zn%QdTUin@!8p`-i+JLoY`xT)jhLmMWKia92nE2>4a7S zSJ}wv;+R2O#VQeI%McE$D%o%MWx5HHj(56WTgSM2m)9a<$md;|q`jKHRZXAxJ*WCdrjo$mAJeN=P83(6>?IWUND%9rpM$W05P8ps)~e^Cfh%KPKmF&U|cgB6*S` zGM?BgFjN1YfO+~@`*Hv=^y zo|i=q5w8gbxjfCEIe1+^wfY)bK4%Q)=|u4!bZA8!6Bvc&AWGDym+Zj~d^*o=TZEIe zEK~iW!OuNm&v#jp{FEwQ!*h*0@PX$5+4CR+l9V%qZuP_>jJ=&C?5Q)jly-l=I+MT0 z7Ny#Tg6G=nkIGr}aA@a(emsK1_%tbIpuXv^OJjjtMQbUYZr6jrPsI}Xl-qT<7#i@TThn5RCD6hu035dsWpEl0Qd496*t7Qh8I(|Brv+DJTU~ausHZH226kb}qTSKM{e0v@1UQyTGt%u&7*38k*y8b(J+~{aa9_f5n7+-e_;OJ;B`b`J zuZ7+W)qakpZsQ>nyYzU~MVg@-5)5ui&+YJ$j*@%dt_jVkh;4icA9>|T(qwjfk5mlg zU|smR-jet~g|O1jQ3LKp+laIpgP6cR|HiU4U)EB1u2q$+u+Mpw_Zw{=-cy8;pA`A- z8TMV6BkKC3PE+ADzF5AuY4>4+p6pYD^OKA6O<|*h$9#|M(2)%H^tHO?3=NV#FH$Y4 zEE@nD*Osi@Fe7QaPibHf2DlAxToN$GlMHP3IUJmIlt|R$wC_qA|}Yf zAjHJVNi1fjhTnWmR)vK4b)PC`DYIp*ghL1|QrS)SGQ2GKs8jEkBQiQS#GyOn&dg}A zVb_2P-8yftmV-m-RvXyo1&F(q&FM48k>H+Yx-qK+^^f%w@F6+0{)I3hCkF2xZzZtA$@KgfYO-&d#C~6SPmWLaE|yqG4s8 z?OXjhf~x-S_!`l+0DMN?J+9-hWOGfVS8JL_n0I+eq73#Xyn89LTr>5tK#8cQkH0Y% zO3yc2|9nHp)291-t;$o2#+WCDU8AI*e&bP0j1GBp8qT$u4}7i*ik?HRA$0EdSp3{X z`=ATkBiG>8B)4DqR$ce2s!5laEn%j+_Z6v-tT)>N2#TeQ1 zn6K97K&zdHYq#&eR*Z1y+k@J2!xp{!5^i>WJvE+gsj3oa7MaYn9Hu!bXluAfz9YWX zOsf%9zeIMla$k5@BnK~yD?;o|c&IRSIr5*?GafRre$K&-&Y1kt)N75<12KAn0XTj) ztadQhdZylt$FYZW>`PIg8!g&sN$h;liQP%`&SkcxpYwmKz{>)WV3D2?9kiv+$bah_jnP0$s3Z0F@D&PS&ZW-(iOMKw{ifG1kGR5d#BbMa+({tvz3C;r zNCup-{I|3j)ma!Vztfz7`Amct?U1iwKkf~42lT9oNTnw@sF8m7>O*N`py1-XFo(U2 zh~F2=NA4CjAJM+JokPmyrQ4_7Hy z{I-vSG-fk+|9%3CzXNVXCEa|B>SLbs(9Lg)13lB1NrZ{^oaiJWk7xPY8q=6raZK4W zbD^DvuJQ_3uTv|An6m+mRqA(kH7-K0X6VvSjbN?!{ES#qDin0uRXUeIoeGgYQI{Kg zg$P^;hk^BOFLkhfZ$pYAA;n9%l|!k<$5yVg0cfF1x3-=)s*U z(YI#XcNgt3ehEEAO1 z(nZYRyGXdRu-nfM>tgXA9rHbV4o_kHpg1W-$w`X?f6wQl;8LdN?ylm<3eF~+#B@Zv z!~vmev;~P6V2xZEF)q$`;&T#~BOI5rlC($P4Uv}MPJJB)FT&WC^`mZs&N*gR)%axc z=I`Kelz_t|(@MeV3_&x(w5K6{O2^^fcFh+^;i1?Mz{r9}H=|zCC2hi#x6yE%o&@l? zyQGi@RoOH#7gd#)2DVX(P2_AdAeR(rbbC9T1D-|vI~_+7VBf5&=Q32O1rW(CakX}( z_yAaay{sbR%)*6ud}Q~5s)OZV)dA(2w=q{0vGC7Ef?se;M&72CZkd8~rJj?|vzpBS^sK!X#^ z4ZnlA;mxvglvl--(Lp?(U;U&~Y&P>>B_$~5e`i4)reOGMQq&GUl7Hc>J<=$1i+=sa30gL%AZGQP5qQe7w6ai7>r|rk$37^mKKB0 zJFiJ(bH(&HX&~7&As>oqEv?{|A%g&D5+wxLC<(~KWt>^K9n7-){BGAgIoc#EfT?Zf z;_VOjjAmO3J7zC+h|4|aA%$i#ifNZW5JFTZwA66w;P&?<#`UXrGyV}*2L(PK_~u~8 zd3HQEnTtYiA{7Sy!TeysFh6ns3diXsmIrnJqzC_@{49!X!#JybQgbTptMPxC8vS_M ze6MI+fHByE;q|-(;b+N4@Tp799+As|PQ``8ZZ+LlUONFmsB`tg+r!_D@?r?N?>W;n zE|<`g?Mh5n>*J}akT7s&zL6;Jrz3kZw895ApMM!?v0C3_m*t(fyP1uAX@3i)x+TeQ z?fybsz7!z2c4W7L^dZl2`{mrKx%r8$Y8@uR$U5(dND+MTe(-QE5C6KjW&vL2pFH{h zfIyUF?Fr65$?luRj4ByjdmYu(;5mGl%Mjk2j0qaTe~NeOwD?%mY@}HqqP|oaaz+RXQ7Gn$pc2AL^k`w=E)h+=!b}v>| zEOkPgs}Oa1h{IP0WW>!db~r^*R^c84grG2-A9+(V#sv$nJKT!RRZjIrGvqt1qus`kg#?ytS7-Dr(#@bXn;@_VHDLY<87RyB^bd zjORYpmD!E*C0)4_xbj4$(V^0a6D`fJ23^sFHpes46(dMDyL;!bs^+D~I8{soxV#wc zL5oIsSWvWLQ6u=_>uxl8ACf!wWmP?{Jj!f$ek!_v;*qcATk^;J5>vJ_HVLso) z%Od?$41Sld-QjZ1Pvx-`#Aj^CegAV%D*hZ9^;)d0?LwDNDzT5pDwHSMEV-C^rd0AsHbAMXRNt`P}F6#7l%W>&++i`bc4)pC(7E|G9@nX2K{@w)Q zqx!zb{93^q)o`~VAlKTTdtHaRUnD_Qnc6cAOoD`>4xjR_eNHd@PuZd0IQxudfkGg7{kb3(@46XaPa^Qa>pl zlbxHrCq#2~PTJ)&PjdbAn4iuTVA3yHHo1CAK_~n_-c#uxxV-x9e}l_^iAULbrL#A^ zjJC986ll(i5Nj>^>cL!KDeX&cwfPrbksze3<_z5|I#(mjwUp@faf$VW3{{rJJlo>< z60ZE?Pre#Uspq*vojwa3OKf#7-54A0X)GF;r2O>1O(A6)>?HIe_;N_t&0=pkN;)eE z#3b|e+sn$W&gc9Qi>qzl5%m*(PZjman<1p?iR*Gtv#|P)e-2e7T!*5=)S9XR(xG>@ z7$Qt;%6Ln&&8B~_V&WQ#R0S-*zw6qp?!(X;mcy2V^ir4{leoi49P+M~A0dTYd$KW) ze3>plJ4R!CZ728au&uQW7utC%TLv9h;DTBJW@Jq5e+?aZTiXAT?R;MYpl1D-?5uvc z;PKDYIgs)ilA`5`6F)m>gZ0{J)3CYLdu#{cdH;gNsQP+^-tHKeZ&o9Su6Fr%_?0dA z2Hbo7no%w%c51oncS21>_2mwGP=0tD_=d<0>c{`Y!8s;(y*B12fdA!^2gRHITir@u z@*YHBPSJ3)@SM9y{9M{;Ke189SY7ci*_j3*@xY!TGcTdsVl%@r2R!ud0T`$S&xUdB8%ZqX!~VwA-bs=GA}*9MvivBZ zhfd+Bt^3ek{R5L+oiOjq`)#hFlg1*88~e`lcBUiMEx51_p2zUw(1*FjIoNzC8~|;e zqX=SXNEfWx?1Y+>8`1G&x~-CvWw$yT;_CbsdB@Hnfk^P3i!OMObOUxXy|F|-k;#51 zUMY=y)jMW*^#lA>HCe*b3Ac|^rI7MniPrmI2%o`OHAyKz^^pqp^{ z2aD5c|E`A+qpN!m#B4+B$YjevO;B1Mu@xv9gvH`5gvCh?9}A7FtI8yar!}Xtlib8% zU!X4oCo8@2E;XAg z(^4b(t2%8!eMV3lk7>8db2T8TEo(x~@2q=_b7^iLdtPp;XwqD}aozfG%s0mCuc@62 zGqu5`5C5{`WoXj_ZNG9p9cazSCrYG-^aaLbWkRI*Yx4b&LO$21*vWFhy(Ud=?WA6c zsM3}BVa6@@_$ts|8Y%HmVnpd8^dNE-oM|J=l)Vhu%xccs==d=_V0mFfRNVjCXF${Y zg(Yv+D~{*iJ`X>hULM2`tlZ(tlXAg-scSrFf|%9He64JaOsn? zSn{DJX8YqFl=8nKrdaSe8i8gez)>e_$X{Uw;2C-oZ84i>8dZ|^&-n^*4VR?X39794 zAO5*tfpng#@zGcYJZ^A36{~fpWgwy?ra9*;S8Soa?hnx#_JRQxLal*52fzQ>tm;$~ zgj}Tlb8N8AK4H?a#^gVg)W7haLh-H=d7ls9@)#+`&0|zVaz11A4$51`OoRHYbXwYK zA_m)lOjkX?^Kz+ECy!IPGF4Qd0cdH?hutF|Ep^4lX;+fHl)%FwllO=zauANTAq>zu z_~pfkHM`WIT2tG)LrP*WABBdmp(MC%J)zP*!LarG+H>vM*C&5^zNniWi#Qht&_oYO)POY*{ZElBjW=-Ut-pAgIg#e>J~xLqd0 z?a1YGUv2{}bB{pZ@JRLr{}Ha=COmAuR%em)PU)0t+s^7(7GPiGe9rCGUTY#}7EY_% zQz=CXPVL>5j()2<;1MAtAL)j5elO|Z?E%0AyKuRHGr$WNRsS+joq+&b+2!0r{`n$< zOWW|plo``IX8rP7=i%ZpY*UFQJYJ_A{(ODOkX=Oa_t@g+O7}0Lh`l6%fyliTbMIXo z2gZ9!bS!1lp^+-T44d%)L_>?dgy>hQ8Ag^FC!W8q@N#g9HTr%Z`jdU~Oh}u?n=I9 zufWrrqtJR9m+uzH`yY>g-w|2UyGN)JhSuM?z8nIOUz^j2lh&e$e_5acM<>7^tQn1! z4;vJX{FJy}NRI&Gj4|$f`D|){Y~$i|u0hdcbR)g0>6tpG<*4tn$u4wS&CbK>c-aRx zt?;Ck0UYTfQ;t1MVEU;4Iue~S1DKLCwfhrC+t@{-N2c@up-q)@VY*l8u1IbIlgjUF zv9P9lCL4I)p;r?PI|_<33cH%rL{+#t?!DnDDX2PTa^&Js0F`#q7Q2c1#IE*n43Bq@ z{L*pZjDA;Bxx}-xIrA*Dj)ML-}C2QK<02u_ye9Rx2JWSGv-gr z`R9a-HqFy;W~qR!a)X0B^OEl#zzNcsIMhOO*Osi5>N>S8M z^F>QO6JLqB1V7u$!sqdTeq#mfAS6!L4)q+T&8}sKd>y_QAIrzXAU&BO-rHqrtlEqe zoZO*tY^lC}*B{Zg&|5>94^6m6wfI&$LS6&ak|rvw*X;pJE}5P=L&XRvHPkSFvW+yTKZ=Nx(UlTGbkYxmA8J4@^eov9v@D9C)KXj zaH~X8Y?yTb4gV(DM<11^cUU#$wALG-JGG~fshYT*(74!=3{(ksj;Ofuq91Mjte{bA zuV{9#%WI{xcQg#~xYkiNpF0h9uq^FPAGw|I7b>$^4ks`GcPZFT*Q+Du67&WIJWdy; zT(zT~gx|VM9>@3RHkRtx+4Jc~=F4S+Mimo@e>kpgXN~V_6xu`?lcq3`7^h;5Rmitc zlc7bNf6?t2=v4kJFUUm}M8slMSQxZbgm|}8cs}1qNS9Vr&nkcL_6gy%Uw_U_U)3u~ zwGxJvr+=tin!6j>l;6oee&VMo-s(ME;{Di&(iYYtvg**`MVV?}`+k(pPW`BepugjF0j6KOg3InBP0q+Z{A;bB#9C{fC_W}jK>&nRHpDS$0`uV7=3DVaNM0HcAoHZAV6FICsSEd z5&naB2c3KH#BO;Id$E^{PU6b8Yak~f!Z1(!moE;HLTKriKeycj88N%Quo&nL0QmMf z+xCVR${o}BNsXuz80P^eVaOKG(@!R8QC7&O-XrTbl^*j2gS{4Q*!1K@3uZj+--tgn zlWo6;>Tz1GDr3$p* z|7Opaq4m+j?zDFej^b)&?n$B0*(?-NBG~wJ5X9o5$M%$^{mjV3!Qb~athlwdZehVb zzV{V0jysY$vx${yCizEeufl<~XW~buPEHi^6DEieY8j6>L6pn11(7JvW{@PG{d;0sYXG099T z%%ijqwR&l@v?5fdWRPl8dI_0ob`a7!)vQ$_&n271kP&~9L*o$=%3l=OIwubY?8|n~ zdMhG|d%v0Z;#hOegAHLJ2+$ue$nDnb+~DSRu+8&R-51uz2p!sJtidRHjSPM;Z8((H zuE0ip3qfC>M5)Zehw!q&s#kKgD4O!N=-(5_GxoGO*T$VQN4O5yKBRmdd1agJB#h34 zo?s^1;olbBiT^l_irSnJMu?W|slphY?}`mt_yIJMTN*$~OX1v%8}kPcz?u^N4~Q(h zsYrv(8`@K$m;Nt&}`BUC_ zjwNrnwNS(AwdV;NLDyR%Mzc!?p#g2TweOdZ+b zIWDgd9cX@3GUC%0O6&1Bh!*H1^MgkSbK@_-#A!a-hg$?Q;uE6LI63_Dz;O(XEQrVM zp_K~z1lHxMt*IQQwZd)QyS>___g9zL>n2CgOtQpM*p_+QG&_-plN&^m^dA;LHxIcF z0S^>Qawu!=`;{Ak;964aIH!2LNUUYT&FAKd<}5^;-#?6a{Q-=PTI!|@N|n??yU0*0I@ z*;kYBx266mNwWks3j?<_i!V3kr!M7t>mA8^^zMWN)f1jd485A|gk>M3!!Z2ZCTELr zXq~C&i6*Ck*Ffk3R0%8BnkC6bFu#>>axPxm6_xa2_(E~@?q`z+661A21pCRSL3O~S znMv2apA1zBZ*$9|Ez!Z4egCYF3$fcbp0&pAG0K_;11KEqf*~ieHp7yI+M9q9wo~9? z3Fgt<^?uw6{%p7lw|P$6Sf){#35e10%)5Bf6Rf$@>@uP*SBXiu_zQhN&hf?fY!H`& zc3)@@d9Gc)2a2*~-21F*ZW|Nw10R1XY&dDEIM1GH*WA)<7t4uzF4Bd*lvlEvk;0tD z9cfGi!YJ2kyr{BQC+%k>b3S=DaHZ#=MkwZUg+H>*Qh3Cjyc}ydG2wl7y%~!%iP$(w z*7n>IqaXLkkpi+MUC&+#OL}c9A@9GOBB5{b-4hIhn`lp!9cGk8v`CtxKvJB_@|u@h;_1x7w$yKg;?_$cBEpAzjgg_=_CXaeAD3wyL%aI=~t zaVGrdiRN8AG6SaLs|Al!8?t#suF)M|3{b<|6@A_Xt*luLGhk@9j@HyhX8=k&rqyjN zjXwI7KL)n6(KepF%@NTT)70yroM8vq2zxRbujU{HZD^8yCH!;#^lics4}i!&sp0zi zjrBr`^Q2R;{<}Y;X%KdHuocI3XXbt+%A5^+@IkVJPBtZ%WrgUE8#erhHGlud4Kw(U z8^-Y04U7ENEAM!;HDPnra6T&U8m& z04?8s8=;S?p=%Qywm$Qp%2}%MzDh7_v>~qOXb&^N&gXLhxxWY_#%JH?m{K8J3`ZCn z$b3|e5+fx_9f>R*mdE7X4a%mn1P5{)!RAtPm-X-TVYxqNE#lcC<{F(p+u?iRxCgcF z`|=wp)4eNv64p7_=jtU#emk%2uhZ9OAEML)3YWa!&;iqYUWw-v`;5eE^w1>4%fdqE zwv$d{Gd*>WmlaNvS)+hsg6|=3iTwKBw;36&t*FigbwIN(Fq-xRBZFX6C5NSztnW6~ z586o|ghK5WsBK@Gnd0Bqd({Trtmd=NwfFR4Mo4%0%r*vK-qQ8AupHvL*c>~&{=(O6 z_@#-c4chzEn+bL75`2y}iEPnYJ1-TmiMBjIa`^}?Y^gkpzg$r)9?HFFHE<~!;JSvl zrpG<;Wv(Qf5Nvxct%Dpfkvu`_VZHV9DbM5pdiSFBgiGFIa&k;7`#Oak7QXJ*hx6*I z$0=v;heZ(cbnZtT<_jp4xNV+%znRId8KPW(wk2$Tp3!NKGFGmj;d~+TB1lu&&(8_c zx4Q0QGPh4?EDD^?@sA=9^te5Ij`Pg@CgPYHmYGp9f$8o9EoTy45IO3VLH^!dR!N3HV5nORjv;-;cU^au5Ui1yr~5_+*NlI%C$I)$R9rZ=`$onzRe#o`smAptcu~sHlR~ z{KyN#$%bnp!=Yb6ic{JaE-W+2AB7vswUsB5K5Lhhb9^Z-XZg}w*yl2UJ9#ih<6iN= z6l~nud6_V!tySEN^7CeA^0DeBEbbMN#t{3jQ+KV6Gk;RGF_N1vbCs=AE0xS9=`Rj<^6W_}cvqybL1JICZZzwZqJ$l#`+X_C=Ev6nyGfkyL#ljV zBv^I#id~3V<*kgH&PAZfnjX9oeE*BBw~UHvapOisq)Vh*M7lwG2t`0Zy1N;=V`%B_ zZYfC_K|o>v>FyZ1yN4Y5&N=^c-*?@$?x+2Iuf3lAJioL%()BgAu9l;y6Fi0o>9uv@ zS6B=?HK?0Mo|@KH7&G+Z_{&uop~uu!m@9t#_CZyFE;Yil!!PsktFooxaDJqAN@@w;5YYg9R}qx2D@(P zTzL|moa)3s&4ioDHfKu~?oUk__V8R>#;WbS2l|eG__9y*r`A}GGq(TjP$ghN)B{zg z(0_Ufx0MX(SEB(J2M;?ki8$bMWM)t(bFzBaGTlFfnHPdE6A8dEnc_o?Dio59+^L#H z9z;WP9o6l(cq|^pzJoU0_~fzfa=6we;)Yu~8Dass7_D_mwQ0dI)GI)&QjjZb`x-i)HfxSkyqtr6AK&34K0wku0YUa__V7{K;b0U`` z^RWfj0~=Y)-(=A5qyWsCGzECSNYx^c22fQs_H5gw`r{f(RY?1MOZVd>p=M1zJSvHpiwX8eXxUGF~A)qqmvY z-QZm%-l*}nm&Q#ZgsDiV0g#$$ONem)z82b*jrk7o*5FbQ1j3WvT6PBV(T8|HgRcQm072{3d=G+EAgjUQ3e+GQuu839cY8 zGzFPGvIUb+oE8;5F8$;;{+P$<$Py<&Y@}G+Zhy)owLf;&St;AgaXtCz^w#7Z>g$E% z<^no+&o8=G``j)8VP$j5%dom~oOu@VG46bj;yO-wP0+{k9JwQ*-gq(|%2bumnosTc z4e;eu-Y8$*EbmhGMscq;{QLb0sl{m!zh0J_tJcxW4dY-mdue^Iyyy^SohF*CJQwk{ zRl*C3i3}qRfq42)H88P=$)@gdp>!_Tww>wcNY2aQ8W4{P@Zig%w82E^#U#Ih(Pyzm zbe$*HxRm7cW_$YpZTDPv=w;bm5kc-jFnv8V{*WxVft{-O=_l6WTPxfBKVDXmme31c!sZWHjubuzk2{1OBwRlgm|>#k)V=fiZpxMQ_Jb3ET28}haA%>0 zL+F&Pr>~aUW=M_cFwye_?|ZoM zq0+Hc&RZcx>SHQ6W=D9MWaGTjPp7y1!KuqI4Dn5A1NC_L!Amg!$CAJYS!+!meXkSN zOI)QnRKa-PUFMC*(N~F_kc^`>$$%nsTiQ?|>I@d2M0No2M(@`5fiY({#~Nox4$ z&ZnMg#5OhUw5^db+1VneOl>E?)d=45PX+SJmu8pHA+{+=@0|>&+=Q5?6M-ObhhfEw z5o)#~*eF(t6+Q^&qIc6HL|l6F?$wYqX8l*Iga5TSr?FLv&8ESLhIX7M`H0T`w@~5L zhLOZeU>eevgx2;_Bv2;P!IM#AitiV0*NJk%JgTfZ%FbEyxXuWaxLB2Z`75bZb~IKt z+%`VcozQHF_c1?!dzR_D%UXbAHL?4=gInvr+H*>g@=G|Z=|XL=kMhg zFI}Iv^qt|nB$s&x5cgm-l^KTG1ws1U^H8z2UijeeLlHluM86jQ+bhQWVNQRC5@k_> z2^xFz$vlH2$54Sp-C5v(MnP-$7V~vw+mDEJZL$ox;9*fvUBMyef=tX~yLVf`sj`4u zI2w_mt;;XLlTa@d!d8=`yIq>SIIrj;xx_hg@dKU0U99iIGtXz^%!Z-Jx zN|%l;2xP;fX$l&)1s@1h=I8l7+h8`S;|&lwGG3buQFi}$sM1jJw57}~LYoPzsRJMl zMeIG*w(c;@7^Pk;?v-cnYQ;{G#=E1br(ToTPdt_ceNmv&kd2Q76;G&`OkX?JJurpTyTfqqJ_%w@j-|t7o^ZjLa+Vrbu9>W_dLv=`rBF7s#9! z+N;2mgJ<@u?&|J`s3nlZq(czxPHV?u^_p6%h3~q_0b|P)t2$-Z*VYpa0x6cS7L}9p zm8|5}T zAbfHquFwY+@geqk`CSa&^2S9~&L=Fi8cmJGzeIFl9lc7Pt8Uq-je`c4-(;oJ-doz2 zKE)mmMdE|Hgo*vDR~f`ygPlVSrlbJtR7zD-G=r4%k^9BWscfWzsR}JRuMDonluH#{ zU+wVoOW^UaM*juzINic}oecO-&oJiZI>RAwk@XK%Y7AbANU#8qUd2{9KzR@KMMQW= zTN?Jun^-E$4Zw@fSK%i8ejLC2dXn&R#Fx7;7M)B?i<+yX)M1#kk?L`7I$=@mpD64T z0Zc>^WoA%0Zs@oOKkalKXTDtr;i2r;os@wCBd@uf48LGF**Qc;22~) z7dpCX<~)5~?5Mw%d!T&=)Gg8R`iI)mZc}u(drN_qDa6EggXeS#D>XNYbzPig&a{o| zb3yhBF@kkucpG995u%Od8Dn$@=h_sp`4bGp(}x_+2kBus=TY&0b$!g*B+H*o#jK)X zHV2*o4vbc)j9mY0th~s-V;xn&1Jmn#)H@!)01y%}?&i?2>~`B)T?j9pnmz0U4v#RN zDgIeuMEOAZ&q?=-;19R(HMb}!c9eT_?o4$R{qkstwHAb>K>+2(nMhJp&|WZ!w!?lv z*Qd2Nik7$cYl0WK1Z#- z{ko=JFk}Hie}Pt4_!J~0?_AaMJMGprfN?=Cc+gM_^m3qI6;+mE?)C7lfl!H&VR;u8Yj5}Q zW8I$3Gv(mlw_78B5xE=n&mB5ghQzI;I)lHlkaJ@{ofu~ljK_;sbc3&pR1YhCAqej?Kp%^IH_;mzQ@?*Phb#S_9%Nl(} z5ly;&xuj8LdxZF7kGN@Z{sIiCsOyslO%GFL+dd7)QD_-M&#&58gV1GqUju9}O&Yaa z-si7ij^6q^>7PaxuU{_Lj|(nPmn@0f;J#D8n?S#GPx2GFt{@1iZl>jTzldDd_6Fak zReyXO$X8(ta|{cu&FSWe;k^AtRtd|w5a+l$`6GFoMD2zCUYh1MQ$uB$)SSs&y9 zQ4SW*<^nRnCEwjsYDD6AH+H?NJ$~K~2bJVjKAnLy0v5LB1DE72PgqS$tj!1FeIr|N z%ArLahZ-FQdrB^%JuU`7QSH>V6MhHZs!MNA-Fx?$S6J!4Q+Q`e^vzB;6q2g4^_lptUWBZ3F;x_x}J_vMDJA^qZ96w;|qkqap!E> z2a0~-YA^eF?wHA+#JW7$hn4ag_$dZUn7qou7 z-^d>BM0-W;N{fVEpRwR)z|-V^h?z!|gpRSh)mi-2M0pzjKrV1>f|RK8Y*+!ox6!ZD zHLA%Zo0)s}I(CfMZTuyTBjDetbVSf(A$updE48Cbg$iD=1k@$ZtPr`3GYs1%#sHIl zI#cP@&+{72m{edt9|%_Ju;Psm6R79Q<3j(*Mr3sS%_E9fgiKSoi7;a)rC73N9FRi# zAcnU|={2vb3J(cu6g9+5z61S1^nc2QHPw4cU4@BrGKm{f8Rt+>Bk1?l`%r70RT;*c z1HSphuiC$w(L%JRsxFB5Mg_<N2*Vy2Onzbb@jcb+!jrX<^u$ork$b(Ny>7tx16(!g|49 z^}cP$2S>7QhhPaYh_bP2j6eKC>1S=|AQ=56B-n?wmHZy}nD?-#&c*KyU?(9x!go#i zmh07isdZ~l$$nW!xy7S@N!sHYR=R&pIx8rRE96;I@2jE>#4|=fP(Vb{e%V|gkV=V$ ztb%o$XC>CGzZhgTY$9B1)Bo2EnoB#&eU?vnH1%&ZCByQeY?dl}W6^t5YRq{kwooz0 zsUtmLtdGD|>q+(%`*@shs7f!B7vao`4d8;VM21zGCbnkva$B}d4xZb_1aAejR6rPq z6-II~lnAlq#9<2stucEvz}yX;E)v3RjH?#@6wlnoc71pH$g`9LlL#=e%bk!5S=pY* z)lq2Qr1&M#XKapJ%y-9bLPzceehb64Kixl|X_VV}bx}xgpUYNXS@iW=9Q)opDz#VQ zty^Eq-bW1Z*^cQkvl`m2!=&7!p$j^H>X0dxHx0r~UgHVwd%-G%VWF}MFOX+`gSjmU zo_*(?+wyc)9M3koNv#ZR#{}97n{53jrW7mMzW_-o=Mq-$O{|Agc2g#Z1KJ{PTikZR zws-RJq+NTZCCek1oS#29WQ5yL%2(%~FGYu4UEaV z^8p4_iFZcMTWzXW@g_4gBgW!9BjF%-szY&&0-N=|b*(`ntlwEqNwnkw&1Q(lCg`oHs^{fiq-ED=)fiyjFK-~|cQ!}0%4bp**0 zKfjYyB|F;$A{Ra|7l%}|?vqg83I%-xOzvT}Mg*&jhbSa#H;Rw(rjaR7e*eN}Lf5wN zE+L5VbF~Plm=A*W%x|p@&z-tg~0P)%PgYPJBiz${`<)W8Qx;V9Z1`{z$Gh{YWEt z6E)bnV>Gq4-0{tDJ=dXiHFXJ;>w_jTQK-By_>nkX~uHC&@vRwNVEwx7Bl>tpERPrW%67`0~t#)#~1O@szFP714u3s zSDfYCBv`;uYm=935c42-ru07zCKcoxS>G(NRg!{&fHjCSAtqKdQiGw6qCzmS&{Dzn2M|W*elO39`1QH zwQ2To^qb}!`adRH^^_cA665>^XjL?bOBhS@$o6zxM zv7}EumT^CYdluDrWZn02Pusa@;OYTN`A{ZH|FfJyh#cSw_D}*ki4U|mRykHSJG9;M ziaOqM+}ILAuyf78lI^FZt$9GqpRnI+J2d?H@^3t!@F zSWqUmdpgQ)VSAQ*frawCnY@6$zOSg4mARL)Go1(cP25?{+;+kB#j^3z(TwRZTR&gKU~*m#udpV$e??MQOfq`MX@}(ZgZr`!2YA59^a;{&*Tb#Z%4dD!5Gr9=YP#6 zz@;kq<%!E-YmN*(b|C^@2y_X#y)acPSJL){SJ~XJVkl&8Z^;n(0Fe)P^+H5EX=twi zykGPDn%ZF1`CVy%Q;;W2>vBiX707ZvM}BPMZ3k*-p|#k^`g98G-_pKHIkchteVCG zd&4#+B``rv70{wxtY}}&e5KfxP^YP&nS&jDvW!u8 z09&j%_)nG|#A%Il1|+9vTpveGw9^09N@J7f&+_Y?D}ikDk&E>Q1EDm92v4WMd2i=&f;@Fp7YBRYRxztr_>-`Ky*|zAg$Z(W^Xz-#z67X{?Z$ zx{1egU$fC-wP0DAPco1<)`lQ7O>yr9pV>xyqqdNN%EWdX0zV4r`n<~v&kP@&2$i(9 zPZSw{Zqsv!Jmh`t*<6$E-hpk;rM_G{CVjo-%Dv-6Ma_uw_`dSA@$TNWW;btotIY&i z*5C!<7rVv zcQfXdUN~Qx$Kz`c*LN^Z@ptngc1?f2aK%cVaC1*{(6NMhG6z5Dxh4r|wx|Z=&QWn? zZQk}Hj#Rs!KEB5mdv{Orhw;+~!8s@45YsFVRy>_MnnDMKWJBhy9gxjgMUO3?zn4c( zt3q;WCba8R^<@~@l^p}sr)S}fytP{01VXB{a47pzct-5_2dcxRxEV=jNoq618AyF6 zp*5S$xZeF!oce8Q-^Pj{>q%Ux$Jv&IjOpzm1-onD)2sqino~I2O}3?E3V;8@pu%Ru zSD*p)`u;0~MvpHZ!SCcY0a@T+6W~d`!Kgl5L&gEoS2LxheIZ*lr66EJmj`=ShqU+^ zd1vnn#V*US*^^~GU9#oDrqFS*;C9Z3$T8a}?DrR))m62`KY2CvKgM8Z`Pis($6!6S z2_`9ahOskhq;FWCBxThg(`*L4@`L_>^kW-5V0zNWwxg5KKXmp%JoK3@2ye)d`B0lb?iwjIcRkO9E$=LM3hcaaVD zC&+p@O>P1SRsb*awazCQLYB_xMwU&l<$%LB1VF=W{uX@?C^m`sAPJ89dBC;?6IF5z zUJngiLQZ_kr2aLlD(%_)8x*a^MORaG|2@U(vGXXc-LW@XrG`~Oyx=;6JL|#sr+azT z`mR;i`~2aGW*HLU>qSPo_HAs~v~`(jUKN9k;@g*v%;f7wZ(aqX0hobT@>U937Gy|~ zlYgRsf8V0y@kkyD4Q^MtcxKX*JqWTq$NWiCg!>_ir5!*&QtoSiU>hGDJkg7d4Sv$Z z6?N5auM1Q3r?sH0{f1voQtVxJ`8(R=??8GT*4RaJxk>u2#Pi$T#swmu_QAJXGq?+L z)`J@B^=R4$G+r7(Je8tE%QEM(s6Zeok6NU_)3A?Jw4JAn>~)5xQE$Ddq*h}L8K#`c z-a_dW%b5+DD1+S1VI5R_EdF5TB~Rb1;GZ@x{<^pc>&t3)9qc&FaxLt^EBX6ef8ep) zEYjk4DzZ!>dZpUEv6VNZs`K}uJ&^~mIa99yRo=>IinEw7fT>AU9ZCNiax)ZZyvhT* z%+lG!(yh4C6JEaJKidASWA|^?-JSK2znl zVSvX-H!dyujvhk;6Ys~9u&7LnKjmFzF=W?AN6NG8z93#F4S>+h(+swgwt%W7mHG<> zNbMUdqX?Gzck6(_3Tf57W zx0@TbCtE}{xVo)WPB}$wkP$7i24U!g(&847Y1VCOP8K>di-Tv~UXgv909a96T6kM! zQ~xf{Up1}VIwYPFJelf%>ZZQDEdN#oVf>h>6puuZ*AD(hUVk3=T6Y_WU zH8K)+KNgjR8&se-EE=vOHMlUbo~}rrCH`JD@txc(pQnn4(;29-qVxkoY+SUUOXnZt{;{pm0{aN@CU5)3{&;FLo{{T!0&S{y4U_4NB-wCYp>?8N z45S^?dbCbcTS;Q%D2haBh1Qq4PM(5R)^BJesWHXl5~LNyw#B}Jtu$?Kxy zU~#Y-=Wlt@u1q+dplxTSV0WpV0qV30o0-+%kq|1&T5}41A-r3#w+lru@g?qAgvrk} z3#)8wWU^z}(2v!BZQBI1>&t|m*u=dKs@N!UR8I4G$a~O1 zL8I%Nm)n9Rr+1+{$drTFgL=MEz<;5ucNEP!C}kmH4B;}MGx8@$+(=C?(DLnCp(C-} z;d&g(J4U=5@8+XWB(U(BbBdHxI-`S6kY0{P0l=+RV*Gbp>kQFux-(~t=DaR?CnL3S z_}fIB4^#%4lOzQsXL2-WtP8U~Fn01z5&wU2be7&=KtR>g^!ThJd!x4cPVBe*hldOw zN=nxd83klR(3ibd#~5Oo`8j48bkR z(=HnWJ>cAxb9Cvowf0rTJb>!4T^3*}$AJj#N{~ zh8ZUnHVI9=hBrZ2-O+pr^uZjR1sQitnDWworMaV}63}A$H6&nWsLVzLJMAcvZminq zY^UQs)Xz6TX3i%5{OFDiuWa=_FPKZ(5~Sc?364&0|JF~fzR_hHu~Q;X=2&*`xbTdH zOyCX0Hlw63M)le(iv#sKTl&p3o2+ui2d|n7PW7?<7MLyeZlj$bK`u#`D zGwAB^8t+(0As5c_qji|Wodpp>?Y_ROaVT1WYe}}YHN(S&N5@PD-ysLgrK4xu2uHHTi!(0{~?=NQ3Q zs@y1%ekM%LN~cV73Q)M|+18Vk*26W8F>}Xl4SqVk8egc-F`=pR=SUTXQ@b=DBL+$b($bf&t*C)EJ2@~< z7=y;YBp*QF4+3?+p_~*dbfSI{;0B!dw1~%q{oP#>p^McQRnv4h`5N-@hVk)OnMw^t z+El3JwC^Ouay0O$QY-TXy3dXG^jm|x$PB!9YFz+7yiEKl7qFGSu6;`+wjnzBCdZ5Y zASAgjBK=OhB3l-9+0=i~XQcfHZ5HA0d_!juQRY!^0mTjkq17U9mr2(rr~AY9fh>QG z@0nR|@XIP|y~8jLDPat)oRL|Tc|W+O)0IJh<}uoftmQ3FI-gg|39EzQ)+aku`Wko! zjdd9vFqLesll!&#GJwkNQ`fb{fRXi3NY%?mUlMItZ-w^YSE0b=Zj%Ohj$5wqxmIph zIh?-O?__QLv*PZ_)5e#z)KxgWuhF7nPTN;(^F!HzN- zjfed%sR$yY?a;h-1}@@T%u`#9JYj!i_uaokQl@{WIXLK3CJZzCj#>B;&gko^{gVHZVipCX=) zK6Z4zhk@e1GxnN1CAa_$aX}xbI~lj*hN@_O7o=Jb^WulO?!m^8SM;{Be|wJTAP&f-pY2yQ@Ijs zACLK8Oqhm4lcpGZD1}@;VRu(`SDF2pCc%M z``R)MZTeKWRVK|Mj~)M(OVdQKL-~pqwSA_KIX3XIMfCfn_w;#LAzsSp43CZY0&W*` zFThabu&D&(J8SC-5#9weJ;mi*`W-af1?_UdRK>^R0NiWFpsD7cao!3zR0WkF50f(% zN;ydZ(jVM%8rcBU`V@MhdlzNt5ryiKte#0w1&BcgOHHFF>wbkj|zUi33ys2 zz^69wYWHu7lnriDk>YF9iM*g`%s;1Y9QMclgjwQ-ncFu{H9FlbvWXxREB+%COAw{4-ED7N$=vKlh>C`R)NeN#o!yBCVNFtm ze^2)Gl(6Q^A*)4O&FzHhpUtZy;6E8vr{lquV&3krvn*&YJ^v6nY#AH50R!zH;7hO+ z0)4O*cKP6TP*h;DyuWMlK@m~YC<8xo7<000I$hJZow9JdjrUcR+oA$tL;QxxZ8f1t zj*_v`MY(V??PT1FP7Hry$LM_2cX;w-Q)>s~R9%EgC4)SlC45p}$wCi4biVY=>~}?^F8-66s%{HPdq|MJ3#$ z#~j^Z+I0jb>OC%J?3`ln26oiU3%#s*`$aok(5+J<>1=szIdAZFf|xp`qUT9yrPwge` zeyQ*CHg+ec!;QvSw^WfC8rQ0{PqWiIvlS+@9*ZshvRS_Z^f&pJYD{94C0f@J9P7fa z+>=oyfbOHTlO*Uq@Je%ilTcjx2$v*T{*G+uIpkU-iUUgRjh0IoM_U(`SswLfecpEd zce1TZ#H#IjYa9_sryqYLLdH8+p}}2975D_#eZTUR^q}@MS@^YkSr;sk15OX%{!(IQ z5kT)ne5^14?aU2Z`xR8A=QG^Ti*m~7bNzkFz(3W=hWG19HS==|jk*w-&p9T|VHjyF zt|)v)Xg@f1cqZ>;(~*p`@aFo+bkqH(3|GP)ShNMs^F8Jx=#{k+>#59kGa{lW0&mRs z_(-NrpQ#AzUT!&KGaj}q23nO#@UHthMX>QYP9ON>7ufueWD_>g z1`@%V=d-G0#(GFqz<_&&JOiGL$J_8b(_<>2*qRhIXniIWHiG&}uTm|GWc9ZqWW%d6 zp9NuD5vM;E{J|aEk(69tvZWWM5(!2|W&_*W#PgDwC2}^ud0tkE-i3x$kX)p?#TNX= z{>_ak3$1INksVWYuApJG)qyl|&GInz@WyjP?qIF7OXDLn8}Z>k#)(9oe~$uT9S4FN>la^Sl$SqLx;OVT%4J7BJLP4e$eB!C(#ciO^Pa4wUuOsVfQWz zu08`cfmLz0KQ?C2$u4xcwr``;5-p!sd$P?HrfM>cV1O+Iz;B0pb(Uvj?()0V(ERL9 zhcS$^w1jovU+I+vA`6NW&uJpx-3pl&S?$A{&R7@D7+2OxdLP_!O5W2DGq(f`M}I2Y zq<`ti&_3I_OdtDnUK0QP4fbUbi?2COzFQ(!&fO|AV%2VR$9rEmv(#2>pZIVliZkvS zH$1#mDQl-{t`p%923AA_cj!GfaHBmK`6=ZmqLS_WUhXLKA$g1n7jo)639Te+F(=v{ zI2|77LJ_cW*xA^pc;$k$smyHosZmaS!3=65hYs!Tz^i^YSoUUQ=wS_672cBJu0wyM zzDwrCtq03(g2%ea1dD(x*QTKnJ>XB@x*0V#n&vb1`cE4ezv>)qBY2@UFLeXe?u#Q5{H`w9Ru9=zdWccPn<7FA!djdKrTJCU4kdJf7JoBOt zd`7MqZfuZTGy~)DnV8N~=HNoo>~eOfj|Z76f(f32rJj>;`Gzi3oUD#ltwqX?Qy=x&ylfa+FN+I z>ZWZ@1kw(7S^LH%@U+GAbUW+&VocOWKT4~+CDGA)-;>*}`ZzGJ$45S|q}sZ&&oVK> z-_COW#{Ioe_6I0C8h7-%7O&-@jYRFr&HY%$R?WG)_mUmUaG;1W^#06ZmcA%~w?2_v z1}W3^F3No!-r(GCvCSMa%rW5f*1c{8E9n3tv z+ug(yX2r3NxmQ7|faKl0;1!FR8?i-v~IH_=a+Fhvzv9#L! zA>2JJ>4%b_RZ`pd)K3@2<7RB7Df&3b6qZ!qcm-GUIOxlgMCJ5hQ$U0rlveI^r(d&; zaTtoGtbkg=+jPw;iF-voO3aODaIUrAJT;XI;pc!AATU zG=49-Lix-3cuz^hIQ|}nzsYVY2APBFeG~CBxn9a1sEEg?d-36Ql?vzqSNVS~AM`hC zp6~H##yH;c6k{%5h?F2KvIKdK=O20Htw^o6We7X(eUFBh#*tV8`Nzw4~Wh&*o z_Y1FC-!1Mgcp_8MK%S*fe08BWk$?2kndDS|{ zhn9?G5FY4I7E~29VMr>P;p) z>ka5(Q(C$OG`=InwsqE>`U7pSP?t0<7^d1EISmC$M8Pd!P${rEj0HNoK5B?Rl?7!h z+4Rqv|C|h5S4W>r>JCCWh49Xne}C_%6;ExnRT%Fa!KQ4y^&vda0yL=e#!Ekt_Q57D zs6$Nx``r>SR_Z&bwVKi~L#hmLN-Kr5(*aJ1Xi5bz-*G za~oWA?E|U7Kz^HW9}HOvo&EyPbJwaQ*7AfTp|bEjINq(Hy2S*mAl~8NR?~!@UL&1k z?Vcw3pQ3=u=La>`*gKeDG#>SiMn$YY<}$RlfBCVekliG%DVAr~`G=+A#{Y&^Oc3|` zUlJVg2_U73wrTb&y5YhQ=U`_skRqlsqSWM1 zp8#dJC11b(@jWGrkVYQ;vp3Y};0xLLha?&0Sg?!g_qeJlcoaF9${&HNHC)e zWk%s!Lve?dsoV3l_eJ;K?P)qjOy*Qy&}10y1a^UK92MR@R%x&W-wK6&)|mW-^shed zw;D<#@mV5Ou6N~E#n)4fft)@}^T8vVmRB>Tw;Hr}Gukzbgk~sU9O5?Iy-N(r0t7L zD^@2|>)VrW#MKPWi=ry}(B5bojrysER15O>wp88l!8Ms)TG)+)Z=KiC>ZxduneiI3 z0`TWWlkd6Yl<&Du?79*|dBQ{kxABE`X$tnrnzdS4A3U1K#;iF&1gA0W(E{cFL;Af1{b(J3c{{-i^a$cfJ1`iWnf|${14va#2(cCZ6U%_hLk1OcUbbbOs)Wm?1hTmwK{_T~CdHCz+T}|7Rzg zQPE427`vomgX~-tH>K52QlAmHZp1bg(Y1b2^a9^P-L@R|re!3%ny1Ewta1*}0s)=7 zPM|Q}AwRTT-E5Acp=V+-jdW%_$m&E_xFB&vH0cT2^7K~qlC8pt)4Sayyap8Qb=%A| zw%+4O4zvrn`_Wzp& z(83XfC_GRi`=7rVQpye(RQoS{feuvG9*9is-&5)e?U6} ze>sbsXw<6zp_7=$OXvNoKk}HIMCN%x*>QjAzgcO+oMWd?546Ss_fB{ud235DLhjiD zY)&w(Hd{MNn1}bscK32R{i~i>?ibwv&A4EE&L{*g%u^g5aPKuPwjDa}1{ z15G{l)$ePOg;^@~Rg74@; z8F3|YQp(ya&9DtoFS#kSG0I82`*l>az?|~^D4AgTDiur)jBVRUrgW)>G+Xr@*GGk0!dzQxDD@zdFCX#FCO@)+Qm?Rj z1s%CK(fvA%3f*w3hECO$0tynKDm`6})a7yGYO)kK+?0bEH!b|-V+tHz+``LCh(e-BF%a^&m~J zj7Vm5Nz#SWIU8|U-qV?#=6Y1>Janv~APWnnNifjsz2eIVOn(tF7N@2GJRlqS!`$fQ zc~l+p;mJU;E2KzVIOb+|@;jJQZ8V&OJpM`MQJ$0g)xolJhf8NFJlW;XFOf;pBkf)H zDjphyp}MrMP6IKe4F7A+#f<;wfUL;p6`%)nuCNNYU69`%cyG5%;w^jsZ$iz@ewRB( zEM38T%s<4c8*}prOF76(*Zvt%XMSP4hiAB>_523X>Cf!G6&z~R|Lj|M&TOwqP~C?z zdBFS3vIv9Vf=q9c`hRIuKk??mo=X z{VHJZ?4wAcm}+Dq_Wx`-Uk|DV)_XrB$OoZ<;G&$+rv<4}+CUH`AGf7kuKEn%8&DC!S0g`%;tW14^a zj*a%NPNnb@(Z*w6|8&Q@F1#qhwNKv;pJF=#U5xc!YQ0I=>`cXuNJInf!Ak;4ynelk z2a1nz)_;%NFYHq=542AuLTsfQukOhdxtcVE5HMG7)-N)dLS8EJ4t|gfvB+daBkqF1 z&m>U6Pc)E|vXC?;MZ6V-c(ANE<`8L`a^|d1)f;CF6xYt4gv#9m!r3wNLfuz&cz`6_ z*lmf3=&DO9V_XUM_n|tRm5SRs1WPEdjAZU)01^p|z3d%cd;`%A360OmVf^mPR|f8p zc8sTgndtj6eSTn;c4a@{EZ`SRg{dy^zx~z77M}p|HgzO1;dv@_C4K^tnxJdDYluFX zJ8$0+K4ThSQ@XW@k<%R~_~o51P#O_#k>&NI^ojQN_wY);x%Qd3Xla0U&ZUgZRxW3| zzGoWLI{z#Hv-*TeR%rfW$q;Lpw{msHP?xlMj&T@E;g7)fyRHj5tS>)R=J1AA%yf&+ zvff`!k-2K^_i|Kn~;WUaouft`gp9Cc7rH8R`CRT`I?W{tzK6EAXLwLWJ z7aq{9KA!3H7M!-PXCqKFZnauY5pq}t`_99%kC zB4Q4G{M4J5&y`ljzE|3&R3=O3(|N!&EXlW%xOy)_%WGfs`*0npqH~;e_ZdTGCl%71 z(>x_*_ukY$Ss=TwO-ivv$ECjDl)F}LL1JI)u^5e_N!>o?u38+X0mXc#cb7S-BdO(G z+8F7XOB@CjVeG9JZB&l2%lbMlDM~(Y1)4ato*dsMc@%jq#bI8idsPT7{V^D^SX}F2 zjcp>Ud~thw99K)|Ikpp$CzV%D;QC{wr=`?OA-6X_MMl57O@nKkZ)0e#2aI7y9#K=t z@M5*wD`Uod2uu|K*LtTQwdx#fn>kI}~>*6n7{=ivxS9H~lP6Wn4Qr5i$mMl5a?{)PMhg)Fcwe7XLo83C(hji^Wth97nD zjUrJLoao7h82pc@CNW;<}5d71tli#tD4Ta$v!`^Kr zhGUT-ckK)Tm=3tllAj4%n(>E^O5^*J-vjG=XOh6)x^%5#vb3ZlcfJMS49-_B_kuDQ z(+%hG+i1xy%)TfxmgI)2sAL#g&;XLm_!1|k>4stln9LB=AL}mnGZ4EF$myBoUiV!ARbCO{LU(dv72kRWb*~S7r!PA z;hn~wAGa<5miF|ORZjdxEI;#Poz~Ot^6UM(HGjsUFBc>oMS){M+h(xlwgm6!#>h2> zBgF#0*VxpOtlxFQjR;OSxToHypdOAliAAJ?BN@=@nR8;Ku-g7OF5V=Mo4*!t))gJF zFtww_rarN#%4@7>#;`_fHL&oK{@slJ29Q}ssF&XGLL%It_Njs7+=8AY8FXfn-P>|>tRXgE1*-f8pd5D9 zZP!I4_r^AYx$Gk(4AxAT8?0}&I0vC~hP0mDG}|Ov+~_%T($a=qX)@=b2R86$meJ?J zGbJ9D3+pc1Y6u{_eKua8s6Fnx<0Dgad0kouP>p?R#y}63M&Ps@=cXnAckf7W{eF-v zi;+>^=~Hv#i*(oKF82B@-gjdW6*t95~#d#LN`hffW=Mh{PO&fM!Lpb!+v{2$nnI09}wm zb4I-6gdw)Ip*}l8P|$V`Ga`mhDsp7_P&-#?HUe_Pw-$zBo_i?09jTr8*k7!OF|+=Mad;9A8@X31QRyaAuD z5;Gvb&0y`;wtI>0iodrLNZNoVd~ffY2_EmXS*^e-C*hs|Oif|^R()kc@5yG^jJ8bh zw}>%LNuyBuV?SQg+*^=9LN*rlOy&W3uvTI-nQRhfX{VDj{u%b^o`C_z7ms(V96(6? zSu4ru616OaDp44_^C4m=26f?7d_cr(&5uU>`(<@lo$53_^CGp7Q`pb9cQa7l+Y=7I`;Uhs9S;M=8G zggJb%ZdhK;K0QOkYnL9IJGo=0TxRI+AcOedl3F8DcnuU6vn;XwIGinC%%wn#%Q)khUikB-^@+3*CLSNPigaQ z9Zcl|ZCyk_3Pm5Y){hd2!@aCJjLb3&kF9Icl*TPX(U=hm^D_4VG2S^Zr!JcnFc|Z5 z(`{AVP7rsR>xN!ZHs?ICpz;TeSQo5ThbZN+!E<;jN^>l6G4**^&Er56J@s z-fzWTRpvBal+32+S`Jva5l$4vV}T1j;OS9AV(7zVtM%!}^B1%Vf=W?LryiPg)2WSx zPi|P3Hjp9rv}^p4cSVyt&V?#iE0VH>KhW47<9e}~gm3Q66s(Az2+MF%NsG!mU-rBy zM$Y=Sy+TOC`vPfr3ki>>{%x$h2z$sN2y0cOoGUq#@=lFmY;BiF2iT{^*DohEBObNO$P`3>WuG?f3s?51H$@ z@oRx?|E^ygrtcTgPpsX=7$IUW%+nLbD}+I;SExxJ-p2PREDX|;^%FYp@3Oy@NM;wr z_DamGFp4j+V1kG{R{5fIpHS$tBWb{2+2?asl9l_WH8x%5R^R^@23&1AXY=|WL&#YH z^-dy#=od|k%nwS_XlEp*TufR#_2l9OpVI)xshFFAkk8-dl>=SK@UFJ~SKWM#$Hf&S zoMo~NLu!}|2fgXo1}C#hA?l6DxY-Pw?;#2 zhawsy?Igi5y!9`r#pQ=~qR+tkQ+`W@U$)};X6mCFBx6CY%30q?$;o9J3GDuO*wsqZ zqIc;8ZApYtn_ckG(6U{}em;>P3z(L*Y8x%Mo&x9*iRT5-6`!Z{M(<^t!*DVpo|GQM zW%p2QPQS`X6zt$S*4WFVb}LS@T+~KI1d|v?28^@z^$SlBXS^_DTq0(Ww z5;wDd?$yiMTTa;ZHx}EFcxW#bF=Bh4?Lie$_h#uUFL&)&?tGBLR&!ZWp^gES>AF*Y zsS|!hz{b&r_~&OHQEdUzSZWs~$fOe<8nK0T`glff3<61}ikA+KJ)SL;O=ejwzrx<>t3ajsyo_4n5FUrk zlMoPD#@92$T6D*qb9rprOm8!Di4I7>$Y#xC9B`H~1k?v=yx7sJd_8@g~9gI;Yq|$6ZUM=tV zV7(oby3pz^?&oqrUbUJ)3LKkYgV(4dEdrmRRPvA?EzC_FE!|e-r1D6AHFk^Kpm$=Iu#IxjX;1sTuj1}-9pKT?1w{5C@Pk8IHaz|{c{+`e zw@NXhf3i&ffor-W!Q+U-|ANQA=;J@iP-~wr<1D>0Q^ZM!C#V79pCP5@@+y8^V`~k( z_V%sU(Qxk8i^u2qTJ>SI10oorXAH`dBQJ?yprm(iD=r=vTGvXk!}?MYfTU2&-eU`9 z{%h`OvZ8vV@lWn%`i!KVptl%Mt)TfJY}kMPw3 z{gX&YO`n9;eerlUqnZ|m*44&v?kOGj$zNrm*4<`9+p{V80cC_@R1s|Ru3(Q~p5=QF z!FVLG!MPTG=X4bMz)G>(YTr*%x-F?&7ggtOX-&R5WwZMo2G5D=`Q>;MtFTF4W?dpy z`V4gZ4A&E`IB%vK>l~}uz%GAl(kLaC{mt>s& zN)%{SW=p}V*B-$0!y;t+jhB{cso~DKI7t05riExNqJ!VL;xQT8$^Oymk&To}pZvz^ zw5=_)g#&H=CI1$NLF@oi1_>9@@!h<|4Bfmjplfzx=#t}ke-Y70@|jLxyU}WMRn4+K zG5-1-s%X^A{Re(83G^Ba$v4nGdOZEBjG804e64Ow1O25Qc?$hW>fjtRXSfsbrq54o z7Mx10n!zLaw_yF5V2T?&G}fCK^%bytpZH!C8Xy73R4ad*4ug-AGOS1QU|)W&Uo*HICLehu!$It-tdM78$+@ zE09ND9a6cRN-Lwwn$nbWjkxzHEA?U@=J?FNe;x8Xe`2JHEqP0#&n6%*(`H=LmTjZJ zG}`*TvZJ>%;J3!^tZ0*$rL zK)^Lx{t*%;P{@}| zruJHq3^Y=8WM7V0Skr#TYO+HLmc_6g(67NM)(&F$RTQuKzrp9e@(z?CyDd8#CU%hU zIf;;;U+mwcn5g1pgnwC$cNz{j9?RcY`(iBnFLb5KMFYHA&rkmndF|@7oR@LxVo*fI zLp}=rpLOcHN|Fsj;Wb0^{4d8JPA8@80{>{v5O7OhAa3U0edb&_9}! zyj&cmKVWs_^7!PE;_+|Q!C%;lwTQQ@wu#CoEmehHwq3xNo$L`jEv8|v5Q^tMugL}* z3CA`pFPQgAByY1{<|b2M-?NW&2;l|bc6koz>%du5^p@?5Pjrw}1BJUL{#gYbqOD4# zXH)gpv$2~G<1zSD@GbhE78T2Ynyy-}7^(ae{8wwM1J5Wws;=yE%mrk>>oZ$edunV# zh^g@4I06BUY0SQ+MOy-&&8ESbrca@7PURJl{G{uhT<7-@o8SGVP$k(2Zmv1_yRobZ zc%&`PR<$w@uEh-#k4#0|K)zBExVtu8wkYNXWw1(5hJ`YvPic;kAz<&aqm8meIQ^o4V2z zwQLYhRunP_XHq4W!n;Xp3d!-B(Clrz0rs@mkV}@aX6BqjvHEu24$ij8G`ScUX8!j_RV}E}dYV zvHFtFLF`4@eMul!!~w-X94CZ*yroor%Gl%2_ZUZgULfkvG6XT3M~BxhGv{r=@auxf zg8;eVANUJLy4{bLa$BGrjv47o!D>-_%hf9&QSZBhaE7%JDwTU~CYe94O0*o}BBYHT%ch=vQSi-rdnph??%!Ls#NR zJ>zRcz}=OMk^55E6So=UD;1l6U#Xzvs&bjO zuN9n~7Aj?|Sf2J7H0c)b`PFQmZ|}CY=%5%lgbLjS!{%s6H*D*;a;s%Eg<^0oYqU1q zD4C>z7yQMDIJbVB8KDNOg774L&k>*=SCefxfQk*X+Kl$v%<}PE`W0>#o(oQhBPkecM`n1@^Sqqn4)(Xitc`_HWH&)F=Iiw2qzL$3ji10{d%XzGR{#G$< zfc~uGcnPwfY*=khXrotK_ew2^%cm<&_-CCtVO8c$uGfJ|kk<5#fA%fj$f{TuXiyN- zE@8y|5bwDB`0iYcpe{Ta-~m6XuU?+aEzhqgM8B8#;rQ4|bfR|n<#)f+Evr|1-aLOdx=KkTN8Alo zL{0cD2co_UM*QnmKc^dY_e@c#xIu!<6elE*9{6V?`uQh7$oU zr(Hwr8T0MF85^dOdg4{TePn}~=>?#)z(ttHZX;3RMttuZY1ba)ZKv?^ZE@-HmWzU~ z72Ql)WjVJ)xU^{Au%V&d*YqChQ)R%h3AdQ-rSTSE#3T3D2R=mF8=%cl57@w%Lfhsm zb+RdQgIk(c^Fl1RPrzmH-e06vMhDwzy2XNhPHp;6u!M7 zy4L5_SvlCvR7~LzCoBN%e%e|i2OY3zM3$*A-jif{9@($lzf!Dsn{)FV|F9C8O*bq3 zdO>PFSiP)0L32_DnUNwTTF`QA?dKxpN;A_$)>y7t1JwIo(G(;5RNgF|ARD|jMGdF~ zyi;(q%a6zWmd=0PC073V$HVcMZ^ydSe88}q50a|baCNq~s|Q6O&s22Bqsjksrpn>| z_e}Ml^LZ@#)gaa)_&h`FmY03=TJs}d^lZ7T}=4H4xMSBv*Cp@6HO#nLAQ)`tqlJLZ)U;@c4S+SA# z9x9NK$NPcbl)f~dn#bF!F>&^Jdw-rJ9IFg(NX_6Mzcgm7rN$byG}*pW<_`Qz+5LHV z$>DukAK!58`k3*SMq%9r`Nk)q5Q(_Ny~n>b_|2s0#9EByXp&Lfz8m8y7$_8^Ge!6Z zlmbIR4NyJANt6&+kKzqt9uYnA0V!HOy$BL+)INX+d#h>=RLWvrxx0K_&zS3XCg@%R zQD28U4p{iEwn^4&tyM@Qz z*Qk&pBKcf87DN(^B@J3FUC6lkY0^yNfFln;%y!U6dyD~XR7V9=XgAp_zrmIvnz29! z1S}9B(%WCe{8*UMS`q>!_J(6Bl^A;8pDjXKAL78>BlX-|-GOWz?Y*HFs2LsfrcW;< zIA?QZyScz(bD-eD9!}Q=+VK#>sd&HdL#yCsb-AxU2j#|CGW3^aAfVt?6%Dk4Dd>-F zwCLr|b@;S6(TNTP*C7TS@WXBFuUn@5`U4oV!DAaWE~i3gx=0b07xFJfAN?J8mLh9I zqdDFLlz@z&9xG3&_W}WHT@QvetNgATxR{Q<58#mf z%1iYzZ^SgV^~S%)zS{x2DgrpK{d%H%T4Hy-!+-f>5y5kOV*H|?YeV_UUVtUNlb7=u z3gJ$X3^yP4X$B(3{qoJ_TkrB=y8v~a^ad@8pl7(Ig)xtyIf_jjr}Ma;Sq?{ z3zb?)>E)5q@M1TB1(??8Zj#?!t^zL>qJtXa_;+M^9eA9-$DdH#zN7ze+`?$W}a3)vd_?0CB0Soh436Lyh4# zhQ|rIX86}%xWBaXRR;XF%KzMRL^LUWlA4Szk3+9QdtjP(Lf#paYXasSAx1UH&6EZisjL{~c;1zne31LjME1?67mx2!BCy%RxVE^3nI^ zoODor4Pe1;-)3`X&zXdf)LAKWMza9MIZ?=TBtuC)z*MZ*#tBWSCO>!eqHpj%UA&fm zk?to{e%+OVMQ4$1w!TJ&N!=Uw`l|O&u|INB>Zj3tGW~~wmnt)lRY({ zzEXPT#_?_TmC;LjKXY?7UHH!UDZ7XBIppjh5#`IOI|ZIXe{Kz-)DT`Bz3HB*!wxD3 zXUX;&c&jq2-n0w_g>2;DL>)%--U_UCo!ByU44G$~$EEZz+RSFdRyKg2+zg+k#{zaj zfB3+zI+M@)Us^!uBOh#h8B<~2u4Nb5OYT#n5SOWaG23LovDnZ1Bzh&YQyNiXX265% z7rg=HTWg;(_eHkbZ)U5ywLR3-9sEv&_U(ifGj^3uisC9|muxe?JyE~Udov3(iSgQa#>;;PP=gzre+EE#w#7L>ny|ynl2WmC6gc}UTLn_U? ztToN@MT!Xe6+m~>Hz(R#h$zm|pWSJZ{MUsUC6tTtJK2^RN<^KhAy?jQ!hWvY`;3<_ z2ILn{=u|Z4$de9EM5K;tTLKS6X3RiEp0IH5iJr;jE?j|o3F$tfX3XC^^=AXMHynQ3 zoS^T{0vEG!_Af-<+R<45!2Pw0Km1OW!%*U5kbQh-?|GQ~A;@(~?X^9?QXR|WW33h; zpWH4i~E6d5{sV{BvA(4@j~ z$=?Mz3Wri_lTQQe%LBRitDaD0uSA7EjhEo%aUsN&wGjI%5I?d439YFxWmzOCm>~Q7 zZ1vy(9b&(t_tx)4R<0Tov^{#Y_AWOx9=)Eaj@l+4?N~P$pK{6Cfa#FLn>MO?^+T|l zQmoOY+GC}{QBcV^QX1a-HZe;Ze&q%*z5XpJ(LtW)QIZ6&YGTC43|^IlfT z##+raRSLi9Pi8A0THBF;un$QOt~g3)Z8J5ahueyVx3=O!HK$ezWUC%}4H~yU%f7ZD zT*}0SqJ!hDyH$*E3_D6qnLKfpJ00$`s47OTA)bt6gXZf2m3wo<*ELQ>eY8q@%_Gk* zg@(Q$-lohHAbeRBB;&-&;~RFuq7aQJk{H6ZJ^aSGgx5?zgNSJ+-*{>%O^wmYnQ~fW zJWpF8CMqVUtBJ;o_j~qt>N<2s&z%^ClWx8KH5U2%yd^jAw&PTL)3OAY=aP%4c!ma$ zd`RF0auzB5JY&gZAeegP8Ns!;|pB=(pO9;_Wv`*kj zSehZYYUV8$pJ_|dpCN0)Jm8l<*7l|v@4ecv%0c)WQ=cvXaS?lj8^L% z+;lo`&pB7Yn#IpVcIwMBW2op67C8l(ILa3_YdEjK!_qI=U!sBGBCv^m!;q$3e3xIP z8)lMgQU=Gp@b~M{P=ABkN+!G=m{GTsfjmQ|h_^fVP8)BV1G+Ocw4gL|ZS0h>IcgmN za&2ZEDVMT?(t2N)I@V(o9jcEUR}@1d*~hl~Z4mJW2YweM9uj@I(l!`fZ4hjNo%NGH zMBksRMXH_WRIQ-qBM^)mrI7W6wiLSbY+BM2o}@|)d?uP>U-eDKr*M7^t?(Bq=@VsRUSaT`-TBal5nGKC4cjRe!w=Jvw(n`F&;2& zFwuR_%Q#$_CiXpZ>y9HTEcxs;xTtk};N|`iYN~OF!+VO5&8NE$^1?eGb83|On6K4> z-G8cKkNP1L@Q7qXwou477!AILr0=QYiS%S!8yV zNN_BhQ4U>-k2WIi+ zQ#M^gaS`3B(ln-4BCz3Wk0aom`D@arzx@}I0U~m|PNNtCERcBuA3R>r;n26}#py@! z6HQ!&iEN(cc9PHZeXAAOrM!7F?{p3H>ANHdaFk3lZgyYdC>-aM#*dBN)F+=t)?j4# zedQDXIh`#F4OZ_jI*(#ZsNz!)BVUeDLjT@<_)^`2+4HF5VzMNeyXJ@TwG~PdnlNcK zyCx1wvMxh6Qlxh@zktLQ_pK8JkcX#;ZZK*HXs$Pbu25uADxL2JtYb9P*MhhLpWvtZJ>B8Y_VT1B zDgQ^H&M&{+Z9U!IdIReDa?B(lGXeVPjz(R(YMvc3%BNp3VkTt5v#o)aCA39hU1Fuy zSjDg0WNf@rcn*>qqPx)fFxeqCuh-(vi(NpJ08d%_E^O0J!D6y?k_LsGDh;#r5jd{# zL=x@c37m|tYH$*KVXlcu>>{|QYCXsw5KFlh3TK-CJzp)X!Ci-Z7EEe>{cT+P!HIc8 z#V@}J>KH^tgXJ=!VV08JG?v`|Zs&e*7QL@3raUvcKf4?Ss+XQjm{w{WIkQ`Gz`T}j zHqFcn-yM2OY@kQik>#aHseCd|)i{O4%89*s-UzsgE=XakI)WLQL^FAp#)MkT=(P_M95j2c-7=dv1OVdC~-0N+oQqa)~h*qL*ZKwi} zwo>K{6_}<9&?Nf@+8iHwIra5SL+ZHBZ2fgrXb5d@uWtxZaLSO7Jg0I-0!?=nNTN7) zSj6|OWor%LKA~{aeZR7bjh5%A6QF6__I^q(B5XpfkEYRy#H^ctb!OcnnWTA2=^#kF zRa3N&gs@T-51gpxQg-3LO)Xe)$F)2@{j-cRA&`QfJ-rh&US)Qd&DJEkPa&B(d{5ua zPr5moIgwndl*{*B&Mp0LyCd%MQ;^^%lJhLb#?BJF{(X+=jH0;Oeg+lu)MkT-D8O+4 zR*Zv&Svv4ra@udH;-&?<9Oyd>a;Li(1)GpcTX(d6I8V=_RY!mL4Fcr%RHIv9FXAHb zE>-QF=xV2--bMBoXXbm(%St{%$MNCwb#D!KT5?|-l1{zMTbd7iVt!VmMM zSOXur%kTe3iGJn-kxC@i6s(Sk^9WK-!l#I^Ms|w8NT;bPE@z!4CSEV@h*?JEPJr68 zrmw?v?H1GfiF8<770hI3bUK2+g;co-x}>`soQnrY70EWSuCMG5yud3s(kA&-hsX)xWdP!LcwxWi+UK_;>uxfqc_Vdc0QWy%$CB?n~)Myg5pYs zW}_}c(aCgTfum7vv|p~`IXCca#i(sHw^y4ZX87UW1h@y8-+|NVo%Sr5G|j_xRQM&< zj_Z0Mr@=N)zwX+NeRL2S`ZoBj>#2Ll@VWD($4+j?XxgR5670)}5Cuc=q8CS#>IY8+ ztr%Iu^5^FI%=UhT-p?~unHoC&#I8opQ+K(^+<6wwA9Re*PJT z;>9MlE`VbYeh_50pkl{FKAB4NM4@c_yM_-|#A*Mlh_lU6TJkaJK9$`rtOhpvRoyy`AKA(^kxKQ?lnC-&}W1FX41&s$XAUUu(KBBv!B zQ7qv=WW}Zz>z`Mp0uat8iyW8Q^Zi+y41}|~zI9`0Ky=9j1W_D8P@H!+kFf{Q6&12x z?L{8|rv$?s+K& zkI*hPntE#r2`|@XTM1a4-VUC4Ha1mKm*o3h8_=+_GPd237J_U|Lm+~Xkhf3tJf(q7 z4H5TyUH`KIly^7eEZ0z$eI|O#jmWo`%}}rth?)N7ZuM8CxFnT9pJ_A;ujA!=6=&IV zSiP^}oH_*_?eX@8SC8PMk=Uz@i{SCn&`C9Dw(8@_Fszpw(3D3AaGDRmxPgh2cC<1> z7*E@*SySs3+R;%K{vi6>cSs=2^M>TB#<{5m5~ja;AhrxhIxd*Lb9hFfBIs%f@l%hBS@$ zFgiYXAUi~b0J8gX;e!k*%aXSG=wR@&is7bTPig-#M{>-<$!CvOsZwY$y4^dxhZuH- z5xr~WzXJ)W@$PmGvJ{CiPS}iN`5=v4zx!r4Gy6aPF~NZ|2h5Z|2)j0c@VdSTa4UpN zpJB>fm7snUE4W&+P|ncT42v)z2CaNH2HzAv4N=%7-st%l?aFJw-UMEdMh^e{tD~0W zj5XHM`SteX4)h4#zjM;q$O_K>f5xOZ*=jYM-ij_B%|rR#LiFUX2hMpU=>rz(!K`a8 z>baY;KYuwKscn2ELZfh3!Lo&1LsHn9FI+DIZbTPTbdx5x!?s>bNunHbBu~Zqkw|WKcnQYC^o)4 zb9i#b;5hvh*Qc?)bcG8q3x)26XWBRR>#hVbKpR)#mHKl}Z?Xnj2)?xf1nxJoG3NQ~ z8RgS7)@!o$#9J|+?s!x3q~T3PXpvSi76Gt9k~PgrkO`}Jwm1(Qtj&9{CeYQ&8~Dax@pv%^8=!UhM>3=u@ho~r+Bp6cTowG z+w8)_BGNtsD$e-ZvO~KE4|Rnd;hlK_1C?jske?;WA5Q8kY66l9*#prru4q?3|DZ-9 zgs}9YWS2Wer?j2%dYC;Mhqm9ECx|v=Ok`}5{#^!*g_mOvS3L)1N|1;2g7F5p+M6L8 zO$JPlBXs#otw+$a{`=3;VBU7&1n;;Of6vq>vYId#k0GUaF%Q!E<59dAKSOeITI3oS z)KU9a(QHW*=PgkMwx6Db{yCZUaH4vPEHj_v(pFo*6b^joz1I6p{g3@#1w7)I${M{I zlhpoJWe<~+Jv0Km^7+o2eW9Fo*`9pSw!jF<>D_2obI+I#$2TCT$cu{APsn+RSDV{RaFu>eY8hP08nmFR zc-~i?2w4+s_GIWMfJy9z5iesST~`l>7MdLd*B!b|NhLbT0&opk1M7rSI!V);%e$h( zW&tHv{>IK~G~$ZKi9vj0Y8U+^?hZ|#Xr~%*o}ctHMl-f9@unE1n^*P|l4l|krHS>b zv)vQ9%z!{2=rrg)ex{3ki$Ky~wF54Bt!~cCUYtGEj4c2^7&oCLzIwlYW=rf?W(L08 zWpAII)o6GZGGDoo^?+D64%{%qHme}Obiep5!0B4~NVqw7j;!!$ZG@~m)q%WUe_H&o zrnPiGDt;Z49JMzwS0IJQuYPo9iFh9JJF^r+WYqGo2h@Ke&RUC6ENBy8e+FwD2{yTj z!S?mme+9(z(y1D9kP^%_G^~T+eK{d1k-Vud~XIC_ctJDFGI0GzyNO3{-n zY`sQdvj138_A4?q6N2g%GcmF{yO>G;?0alby?=yk*5g3uETYoVmC7ZaFrpDIG!MvM zqqVL(b9lqQ;lM06#Wc0wM}!jOQ!G9%bLx^OfW7>5d1GlW8FBIj7mY3_CVIFckD@Z5 z{#~2M+1k3FVm!WKz@%h3$IgTbx`!EihCU%f@m-%S{ixZiU=emllxW1}sm%~Hu zf|)hnPpN=AH&8gnJU=0n-SQ18O8$xhw{;?~T;rdk7i`jFRX6{`0w8g;d}sdJ zhMRz6{}oPiXQ#~u%8-Sn7#SE(caNL#1)%m5Xa)A;p%c~mV>SbO0N01mKn5`LeKw$? z6ez8$qOTRzwf}_a;DxXW%u>G1`dgsXpPh#f+#URW)#LOf^|zd#vIEOk8XPrykoYha zW+xoGe$3_`O?`6oEg6(2)^bp^WgU^5ZQo+aH`Y3<6b;4(t9r1oyTGuKur~s}8&4on zxP=+ztJ8hRXND@ox6&YCx*sp}V83D_>tMraEN+6d@7RLM5lN-M}hm}N3 zgCLXa;X#7pyG-j%QtF=B0}uJH%_~!;nrb*747 zlNfuD8oK`LaxK=HodlvbVwA^w$9(qkrU2`r;hR+17PS9~bI%7}CbJJ;sgep;Pq>Xf zk+BsM(Nu-@UHJlzW=PPt(9>q?WLoEE=!EMW%l8?9A%vDB+EjGqXHU2vush=T3X;pB z;pMQMm4=s>FHDIYWsLyu;nOfKw1DViI@{cRewEavCWE^7<{Lr2Xk5_jnvkUCY`CBa zRZ-J}worKhV)vRVlgvHqECs7r$e{=TbgK$)3Ch#Tn?C;btkK+I%4a7Z( zqZu!r#!KO&L$5flsJ8#u?P6UbXx(>(^X-M}24&+@Z;uGt2n>>Z? z-XjJ&lMaa*+LMV9U%kmy4XmT{gb4QZl};?Y#5_)tsjfqxI{CwU+kebRaj37(bv+Cx z2ubz%VcRvIgFupf9^bfJF=l{l!SvAW*&0JIwm>_^HSnM<>+17u%x0o&|J|r-T1!q? zGIZ|`(bWM@er+P6q{BC=VyyhlSALw?&QVC#agv|gC>PnC@CWdfiC4@6 z`;M_`Psy>=5PhX* zb2xJ1jqwflEudl+K&hCG5?UO~H|;@0)Dv0<4t@A6m<)Zx&W~z6`dWp(Qa&oe&Q%89 zwqn%P5bUAwnMG`qe`8g(F}Sc} zI=+mu{%=z6P_3s!Ed3(CXRoJ=#jz=B{=`X9+a18xo+PT8=0GUxI$|ztIxfantdn-U zQ|y<+;xnWRz!wd0#+ypi)S}nAp<1R@vUh7Bc6!hB=7+Ei?b~OmJN)h0l5t>&CtC~Uw~k$@W^?!lP{K3)~=Rc88pl-S&%;n5BhdWWY&0m=%DvC+DWe2iBIFWhd&8xDb0iZ0S?v zXo!^%RE_<-eZbh)*x?%?3dV`LW5>2n-=q>_u|4FWU4Icgl0bKk zDKYd5_&Fh}GItfjGj^*>U#=V;1+&-rGyK@?SyYA|>lMHO} zH5CJ$2$^;JpK?9CwEfis6rP|;oz{0MhRh2gt38k(vUa*eYSqLEXai7-IUZ#ID%e@d*nJ+@;c$jB6AaUR}h}_Sf(gc7!o3BQ|i^_5mOoB zI0MRM^3u84pb!t5{GvhZWD?P<&DEXG@$h2KN3Zn~uV3cN1b?aEu(qrYu^L0BX`(FG zaKU2bgG=i;&cu>l#22XK_HAK3cp*oLPX0?yC#5$yWT8fB2w^`HLsv4YQsSM|Jy&0S}Mn2_;Nq_M`%Cn!5EMcjDhh-qki z_o7w+SF4#ZMvEvDPC}iTpBO@@o8NGVw8IOQ{w}WC-zDq+Gw?M`y_#MNxgm(=LWV~_ zM|m)F>UCK8b`^C|=xH0C?iDe&$f4(OHMs<<3|ob^0q8-#U)S=7CDzY7f52Xfo35uQ zduAWt61aMa^b4$caB+kbP;tXO1rSAdnqm0nIE0EH%Cu)DUe&F=D5}^bGk!B+C1pvh z%^-TH(HbaG8z_7Nw5KPV3ZGaV?lsmM@1Sc)#x~_|Z{hZ28Pd`5Z!qCG?0*w6ss;hR zbIcAHQ80d2eudv$WMJz#D(7pQD?2b5)NY zHeu<-nEFg*P~J&m*$bN(N17i2f%kR!xsMBjQ`}@eQb^0(`>VvgZEd6-f;x-iIxBGn zr!+k7$$IPoH^c?0x8zGc>VNU4Qm&r85~zzUfJ$P4R$V%c($Cjh}leU@dwdoy9Ti;?NB{Od0~6 z>95Jx0xFa6et<@2CJoKWzjFWu9Ufd%{P~MIA+w4@0NYqc`p3NeDUgYeQ9VcKc1Juzu{Ge9~t_3!M9apXI}` z-0r)EDhx@VqlYcY@R*BOHPUG3m%%p-eb4tgbLieTG%I&ENl_>eDRg@?Gsie>w1h9+ z$)@od^fOsv6a4_L{W1}5s{`SUn;S%#1^Bk)1W_$9AavXLI|bk0@?#k4sFQcYJFQ)z z@99*}Q4&A;aS03?ZSdg|>n-{077P2cH<~)#Y(A7AzM$`UMsKoowdf~bZW;&nBt#3{ zja32!;ClXbhEt@McGAV)q0c_pF@s%BPD^7iRGVO0Hi!4g8NKhWjy7Gq7Ei|Zb14FG zMHP3@@oI=%5sbi@MElP59L|pQG17TMFuX_r)l)kY99b}fzDy1gw*?eIjLY25#ulrw z{ugI&85Q;4ZV!tfC@CThgLHSp45@U70|EomAtB`qDN-UO-2+I644nf=cXvp4NO%3m z-+k_L?q@yg`LDB{xAW@z;aJobSmnl{Q)>fO`{55<$HmaHgk?-;QQdi=O@Hy11 zB)xoR@3Y*CKWS`^xXtsDaj#*&eR}=(Wna3P5!)C_)=RGG+5pL_-gaeh)zt})&pMfK zmz!;^ETrF{mmBy1^@8Q6vS*IHt+UtACGW^0`6W#>_(3})#YyRp9aQH)D%GgMYEXaY z>j8Od^(1B)?}u7G)QZw6^0(#L|Lg`Ff^sm~A{W_Rnz=gG5eY#GtJ+UiZ!X}iM!hzO5V`mH`fsw-Qg2fq#5x#Fd{ zAC^vxf*#tlh-YDxdePC~oI2kyuc`9>a}x(1KP>r@ILJ|tJQ;@>XQ{cG9{AfcS4*wi z=v;<8S|ggRUKI=qCu0#b$NAEI=(2FOj2mMDK7GQ;q;6`L=C<1tq*A_CR)dVOOiWFZ z--uyMwZ{7lj(59tm2XArQ7IVl@ED66B6=#W3Cu6rHgyzzFe;n27SaA}>~Sa%sy`Da zRl}Vl{T-VSB?nr@Smnr0FP}C< z9N73Z7=mw2TvV~LcTLrqNj_qFbx-OAh~<7g;c=Zb&QV@`b5L#mVUwCi%P#D>2h037 zOy$o-f8HX|g7b#^m6=b;n-pt&{8H%sw$iM7I*9j{-Zmym<^7s_6fYt$FP=(cpM?+4 zVB(jy-6Q&ROwC9YP4F$yJ)4>K@C;=`d4DLjXa^_QbWu$~UVAN0KY>i3eajiYj$zvg z5l~kRIr*hzmj{%lzHqPU(afwGQu;HuQV>$wRviA*cL4EH&FGb|Jyu$J!uu(4=Mrc; z3M!-`4 zkH>a#o=k5%^O;>ab46;C`9SfCn~*7kQ)vWhO+I|Ei(Y4f=H) z)O)X!eOy6euX73jANOJt2nMvY?dzh48UGwF^1WkhXX6HLq1(!mG3m+)En3CEY$w~| zd*4)?1b3gZrNg%9AG4A=Yizci3?MKkQ7!~y=W%o>rG8Zz(9?b?w&VJ{-?#;N zXMpxl$@?Xk#1NNKMmjDUS)5kz4s)UG0Z!+o>id=-Tz28?j&vjs<5t%qB7#hmcj&33 zn8U{Vw3Y}eRP~>TCvaPAtvv6!a{*q~&}qVH%7l1K?+|De{p)#VRIc}kF#lUc&#)@9 zREsc+`9eV_3j@1Z`&0{LGN58SW=E!Z_r~nw)hqC|*MvI2$6CQ2)tqqmV{$Lf0C=D! zKThNpQ|*8IqRvO#{rbn!+Qkz32VPUIm5&(IY+~zlkb3IXlJPgYKivZyY?OU)REjxL z+`_`l2ARnxsONvk+w&VfvwpvW3E5*w)W<0xDyEcK*3rA@`mf6(;V?4sgu8I@;oCMAu*}wVWvWcL4 zdBNh4aYVJMggy03kV-T)nK-q_eOVi@nzL9JK+!|?OW`RE^vI5ih&C$mi;ItdSCC*N=73Tw;iP4q?T^7i~N)}&S(P>W!dDbp*9 zEqW2ZK>@7zSoYt<_h0yPFmkKq7?UHMmLrRn9KsFbrnmi#^`4fb?581PtZI~ zSaCCaSi-9!H5*^^d>T3f_>!QywP=tRKCcaxE7-<=>X*8HpulCrYGRN!bW-%r`hY0= zO}t6q_%H@mBwuAO`PJ-ab1zlQ zEZ-Nub}IbQwU)CFL=N5VC_b5NnS!=rpE0@_OR7Q>U)xQt;u%Y4?{96siD7yj%d7gz zsf+bO8s1&iSCL@aZPtFmqtWMq&aAUp83$&l4K(YN(512I#}}N$HfS^95giOzKw#Lk ztT3OrL`lN$BaVWqm)rJ5aY!Lq!QZrj;uoU#G@&x5N&N4xIJ`PA;}_F*gVo z{%>1^7yaW?H&C7Y1-;MfHjPO4>3Z1{Uc3CAg0C&`vuqd-W=Ppc0|>U3l+74qGv)PZ z`+T}}R+@lQ-lQXq%wzqJPy!fr{r7CT*`lNI(11g08t+IJ)cE+8W*CR6scNoPuNay|noWg44T;VCfE}kKp4VR= zR%^bdm?twfO=r~bm@4Nx8XAjRor`^S+NYt-?G*L?CE zfKC(d3>r^E5X``=5x4A^T1zV~p@AL8gFG2vk>x=}J>8UxPf0D104+v{X*KQ1S08Gm zX_K6{nEFh$WsjVT5ziFFZwYL|ReY!Rzq+UpW zPG^qPP0uR%E^goSZmw!=?Y5Nuf~OO7%|!r0;=RB$T*T@QEqgWpwxpZXv94e6P^jWr za9!u2$lT4i(%pyc+?HK&iM!u8=le3;3fL%jYmV(d?p8pIDecJCrJ>^jje3r0EKHw= zAcW1u@Yp(lqPAE76N?oR=an`jnC;xCpR34+uFCh{=CPkZ#%tienc0`$0Q&krT^+9| z{lRc4-rB2>RA%y;RU|o*Jy$)n?DI*25!esTWx(n))a%Js^F*uU<~{=9#Z1OV?iwfB$Z>wA;G0 zftJ_mKd)?&g=g3TlK$-8+F`@QEJc%c+hfnwN}XiGj<13#h+#|Lttf;YgZaTE8x8LR zlNx+D$FPYIueUR}<5zH&e+WXvGcnPrdSe!t+R<+nPHb$#{GQYW0TTvMCMltaB8ok;Xvp;rB_A=^Yhur`!MkLCsRReJQ)0HBJEEnk#cVM z3$168Uf(V2f|!q2r-GhoLn%JL_7?3iBcE9`1Qj&Ga>e{5BrWL zA(p*@S7`hfrq*h&>4rqw94mWzS;dq*zS8@(#XG7Rh_rs=Q}cN%t5=+(H=4L?0<=}7z{|1h2fT*&K5kSq6e-K*Jz#j6Ey*1qCyu55RIu*# zRKQ7*fF9Zbbk=XH_JAy1V3`FlOT0Mx)X%P$@)-_S=~%6^K&r;hhhS1Q9?Iq=6U6cw z=@vHk(+{iF)fz8-lPjZO|+qNCN1SsrAR8=GF~~9yGQqAV)oI@u>zAP zr;R2V`QVy}KVHa(4=pC2qmtgR;xzHqW*=;QKv@(d-m@%K36Km!$#MoAj?|NQSmLG< zg*|h_HHN+`4xOq(SpAzDBg`|L#Y@XPx<$DwduQ2khPcl6qy5xybopPZb1O`g!h4Cn zy>Y>%d{0_+ zhbT6G%AFMW2IY`p>P!m*b-#Y#98zJ65;ONsm|C>Ksti9egKYbRMJT0T9N)tg`ks=Y zC*bnF=vyC~FfP~C*L|JfLRrK>(b>PS_|182Q0E|a>Le6Z!WMh%8wbOl>t z{|$6=09i$qjM7<8bpd3ZK>bxHZ{_F+o>IgQ1cIJtxfhNDR1LF;w+z|ZmU;v0C%q($ zzx3eRF&sK7f>T~#!dtp3vr?V(KTbNWmnIbNZlf#&DB5NPV?3kCJ7F!fOI4_Ku8oWt z6;j(ADcyLB$9I;3pF2J?^h>2ZbV!NpIy-H>N5jc>2zK z)F!N%S-;2K39oEuSQ&h{JQTKssxo>+P}YW%xQ|leFlPOxz3R3@9yT779teb!Fq%qwAmizo z^!Zk+2Hn+FT{_NOR@h;Z@Z^H5&|$YihDB3yZmg5re8N7qk%i5-k;$symp2N_N&X20 zh>(eRs$p&4ej8r*qd@VdVpMtkpYK=>1BAB{4lOMjHa!>g>if&>3%Za!8Gm8IeOa(CN~G=s6dd%t?QrYNB#k>;ZtM?JjlWrCGRm+gBuT^ z+cm42#gSz{t^=S8NZ@oiZBOA>uS%K`KMq>?B+x%nqTBXPwp-!!sVs4)#J1snMnpg8 z`|c#f)PZ%yuS`jfKV%Oyx%Zol3zir$MFBU`W6}k)EspP>?$zr_x~Nz1!bgI&NVbgt zwg*7Sjp}i!gibKoJ|saR0@%%dk?_e%A9)Z3I{8zDcd0Em5rV72-5m5O3Nke=;fzwM z6>Qp~i(AMh;N8Ptr8)Pr-6%qgPs2D0g<-8n*H&Q_WY&!8FxQU=z@F!=G+@^M4`UmD z%#C&DEot0(#hWRMur8W)?K$&-BIn6lvv@+@PLW;EMkSaULEaEq9-Q77{)_8dw78jC zhs34een7R1?=PZo_u6oTFkj~HOHTH4M;+?rqnr281=jHloLF67#s*(05ONaQN`n9=?fT8H~|y5iL2X3(_gEnih`<9)Xw{$-?6{ zpZoUQ|7!(+!G4;XY0V+ye4=Z{(FelQwh<(h>Vig_JDDwK4E`{Nv#nq!)na+E{to^4 zXW=A&epig>rAX@w<01BQ&fDE;pF!an>apS|P>O1a-7Wo_e{?hNlvhQlNB_^@`Eo)~ z4Z9o#VMI14K(n6i{donZW{#VNbeiNkPy~mNxrFZ|siBo>IC{+w$r|TGTw8a2i7iv6 zGBu&Rx_(igY%Lxc1vVo-ctiJu2;J_GY-*TTHHT)kg>81U<(q{*2|PZ{$8xl{;&Ruv zj*;|)@J;yUSZ|!um{x*;H(P-#&ZHNo%5q#ut5n5OkmhA;6SNq*go%ap^Q_GUMA$qh z`4GU4Ku4Y!*|^U}=qiBQgI~*^XCBITaEU>ee+*IN1T9Q*vJgsJ` z?9;uCV^j4G*1@Ro2~t&z1qwO{=SZ#E3z`Z#8Iz8@vz;O8Xm*`IqS`SGi_x}5jO;H@ z8jl{64iQ@&;^H2!t=`16u35bty#55(VV{qD8e@U`OTi%kO%@PEi6tVi#3e#z_luC4 zsfV1Q0KyDJ$@8kqNzpTa8Sb)91pg&A4rTv<=ik^ROF#Ws4OHYrhE&kfta~_# zP5HX{EQnC|DloQ#`>9y5@BrE{<%O@R9<1=7qxdw^fqyV#tPtyInjfMl)sBo~Jvnr+ zB#(SoiihGLar6jV@Q;3e=_Ha;ZtGpI%gjJoXXue{Mt?vCo>~8@> z9qvD-|1^n)+$m7qS@dDkH$E?dJMCBJx}y;^_F_b$oUF>_)vvgF|KM$;n@BAY%e<7K z%$#5S{zvb1R`V1ly$QdK${Z|xmju|Nbh7ZbIF}o=7-zz`HKPdJ_ygRb5@-RZ#m9)#cm%Cd{TzbL3D{WW)?pu@Mdy%b^TY>nIi>Q>F)>!XfTw__g@Z0w= zAGMpaCa5Xl)$k@oF6X91Jg>Sv2_YwX1aEh6v1}7<(Scb#X|1>8YHuBb-@=&NXe?2k zvefn|PV*rS-d%)xGc0m9I$qig4ftvr<>;VSX7ev%Hr~07g!mf8J9-C>#FUg|^Hl=l z!K9K7?{EX$3^!fySPgA{_}f{d)&r{4|6UKQzlBd()OM`mE|lWeap%7;BrJ59{>9jM zO|1F*D~wYh{6$X&HsdC;JRc2QzfS+C&aaoEsB^ZL>(+32v`g0H4Z%d)(QkjTETY+# zz686b`P>V$aKv)!?ElQ43H*m=sZ{9OUf*^)E{lQB5an)wftz^X1u7ti9UnP`9CV5} zPDLehsZ}<{^VfD=_GXUmB+t{)Tj71Fd@`+O;&jmC&5b$;*h^&Ak)Q5%Rq15glp+en z_j_q?gj20WJ%-IZUX_UX?)L2tF=V@iqsa}=a_UVB0r*Wre;i4KzdzR(_4NZ0)rL{3 zkKeRg+7Nm|kxIs>SoKe%?O)$&$)6CJj~k4Bsel_*(@`ZW_}cj}N}uSBeJ-JT_UU@| z6Xk3s@r79W8d++b+@WnV5fFW_*^F9jV`9F-WXH>7uswwSz3V%onA#njFAi5z6DMfD z+q@8_Ts$P2!&i>;b59XsA4$M@-7(;IiC(;l?bdApLNTRe1Pp1N6u$-CETWb5yD-Lp z+UIAf#g9dUWNEfaioP&Pnhs>jhF7&t^0;~e1-_`FFwmViFHo7Z5u64%C{6$PwL z@(;nna>2p}kIvK}4EAYW5=i zC62PK&{Qa&&>&byx+j<-U$AXwL{ghY`ZWR^u-Q$5CO+w>O^0F_ z7WaH!uoC3G9RV@V-+53KP3`f-an~0WF<*V-wKfw!=`BGaF~pJCn*5w0Xkhww?%FFi zc!G1rmiMwfC~Ima)aGPQ=0hi-3a0>Nyts}w4wHa}>ww@d9Y3$AYI?fk5fGnGRiSZv zKfgbE+g70*Y6(o=@G-GM($^SESJn{9(VZJ+*@wi^;OLy~)~u^`9L&Z%190>o>5YMt=DFDFYDSbi(ies zu)!iD)|H!O@C5g6*e!=$Y5k^12 z7hr5ZY~@Am9dKSzTFx~5^IB#0xfy(A^5<-2*WBjFeybs4vo|ix24}t~dumev`m>`p zZ2*okp3U;Uh>0fCIE{RdDU0i$BivAFMXKmQm(haFp4;5oZRc8kYPNsz+CKJ9EZt5) z+3D{lGeglbA7#S`LHTZzIIGd{N1S$_o^b-K4ug32rZU#=MCLWOct(p{Z=y+Yp54fq zXfd7be0V|kc4dov^{ru6-Lm2$J;Yfnz7Ymi>eK$d>x^Xu66VUJk0dlceR)d>#iSXd zJKFauIf{H~q%J=naUf=M3CaC6b@C2An3oR;8|sZYWHZhr}Rf1kM}JD#C!3spp1q?s9=t|%6L+17J~qawSO zw}oc!mIJO2C>w^cod=9B#)4I_0^t zh}col%15A}N2#_8ODv)jE5)-HuD+ro@4hGXVf+QoX8VNX8BN<)7XtnQ7d~OY)+68& zGuNy6_FoXC`o}GaOFhW7F++~`#@gx}N@)-t&*7)J&<)B%a%9)U)kv@3cw6> z*WP>PoY`RuP!Y8XC;~~=*Hf`HNTNtbdaaQ3f@V*C2VZ%XJS-N)ZND9Sn<^a^rk2U| zB3ZE4{>d=z!PY9CKou^83brM|u=;H&FZ$*o~NcukHT=3-& z#mhl+dPiLHUV|W9PZAry0hzcb?o~Jh{$k7P@t~?h9WA3{ibB#HE2Q>YS9Ji#0xw%f zg&mnoVU!=OcNa^`$_tdos_jRDyE9OZbJbqQ+0*5iqXgEyBAcAsZL&~ijEr3PPI8BSP780`wXA|o#sbw(vQSr_c?ecPc%*aIuvXEy3 zhVElOD?tEDXv`j%&TFyDM0qEQQ(&LUgNW#+c;gjbBPUt9Jgj^fHnI>WYfPP=ng zuHc=GpG)2Dq+RV8ohamY7R`8`qxMs0!y@LnbHL!1-8Zol0@Q41Q@aXrCW!p_I>k%7 zFb~t%lqcKsqh!UUe7ik?oQvg$4XT0{_{0yW+oWG7r-`UrCF$KteBxQbY%PcNBxelv z@fv7q3#BC|Y5}$+o1pink?_X~Swk2|v6fcmjg{u6CgP}J-^;HfO@WAnG3bCHxx?@L z8v2w&WEZySvc|hoazK3YSF94~{TT4`NYD_r_g@(E5^r%=;(tquOfNGj8`?>$2@N;d zyi3pymaXD@`rlI2N>#PSmohJ>og`cRD@Ec^e<*FzHG9g`L)DkWNCcyZ0T+ZDc_?c* z!sU_^xVq-2mV5juHP^@{LZqW$X&KCsBA$~I8D)$h=esei+Vsf)J)%UIieKEqG6)d@ys!Jog;-?K<` z7F`1Das@3UNx+HQbI;FbGOUtq1N2pTz7N#bDYKmddwVb3ENhm+%YqAi#^Gym{!`>|(Y>2W50{MH z$|Kr@b3kjxA^aTF2gd2C@j*G9#ACvJXv1gb_HCM!ScXcb05K*)+ZMBcD zow`e9-I^NTY=S4*LM3Ej`AFxKEambC9fGg(xwNES-xoOK9IgjkTkLy*D6S}f-8H+b zYU>#XX3a61RPA1BTaC8$Yg|}cw*;Kuw=9v6`$L^OU*(@~-;JyZ26@%V2i92VT;+G2 z;Us#|j_(d*DDV*p@SF7@Ggrn&lEJBexA`8&yO@!c(kF)aUK_8<|~ps85hGb9Pb z7$?G>Ngwa1zYf4MtbS;^4%yhpyOn;np-dC+(1WjfD2vZAREf6X$Lp~&v3XM(aKWBI z;FevzWhndZ z3#AR}o&V2@P^o9;C}8|xgK)oz45*O=8wE=>Jy5O#t}A7pw`Jt zijve%-%9f69lAb!3p%jC_5vZ+{}+>*sfeRQCa_I1ehrt|IhyXnG|A`M2v6rlW&X0H zs+U1YtJnsue?l28T2QBt^qNOKRVIILw;_3Zp)*0?K`YlD##(W>mZcHZn7m=ar0#Nn z*)hd(!7X*34LF%z?IT+)$}jrgLs~y7J0kru%p(+|MSrLPRYi0EPZdpBZdLI>&Dd1a z+%SIH`r;MC%l{tGlnez++d@~9xu=I5_t?}1d znHczCZycX^P^}^FK#UE0S*4PkEQFd_=Ze(A7$-6WfxVVK{liL?@$j(c2ZU$Wt(}<3 z@zm~s`-y^lvq}(XC{c`f>P^DWp(tt(#(W}o;gUzSr&Vhh-I_=6ikA%AUHkHEm(2%~CgMNFxkGzfB*L#F&fXt$DYuQ!h_-Y?Z0g>l! zjs*@s+oeCSDt11?rt*mVD6=j;Vb|j`?bt_vKDwDdgq&rREe=!mIU5a~upL^Xk{}zf zRp%8l=BD>|sknPLlvqb;6!x2;h#cxrnJ@FAby~lkfA>NbKH2l+tE2WF6HB2>D z-7T*iMny7g5yJz>8V8dZY>MPK*GKmD@n8I@N~Mu|mLDvpub zBSCfEDvGb52L_R|7SPy2!Pu(Wt)$c4-u@FKxspk{YSH0Z+m;FLSk9of*{l(lY+CJe z=}`CxyX35TP3CCNiD8!HY}F<$;3h|@=iTw_5@UAJF{Pc@1uQMiw}ZWU{6 z?*TQJ`lJ@nrEc7=LIF>$qL+ZMU=nFQMZQ%5oz zj3=ljBAfpQo+$YZPB5@1{=!ov^KY8KG{r0dY;JqO3pv(z}ny&rn5K74T2qfqZvA zui`KU%vA04QylXU-Zh!jVtvggiYy}R;Y$-f%uvkOT3eG~(k?;c(j*he=2#_rSDZU4 zersk2We2xG>?#s5a06^wlQ?)YD^CUl)DAzh;H;B1GoV%Euj}0~#fVE%jS}?(8CIGK z4Fzb=*0hN!@tuVt__SCw! zGaH6laP4HRLX&sP>XFZ#=W@_SyBaGyXJyCZ)?zbP=#O_y(o2q0Gb6eq?xVo%r=^Av z_1jyIX#v-$gNkDb<)Ky_dL3YPD|kMW7eRMIRlgl*4meG0D%F;M|*DDFzAN{IE@3cKsvO4qSF@{$iiLv|c0 zc=jxqB7LC49V>BX3J`th=i0@vb7^1icb;_H%3qO>rx6u&PrTFlR%UWkcZ{b(Bgk;= zi!;;}0N3H+(n$P*MY9QUxbDIy9O@pu4ZIChIq@uOsA0_jj|TP9Ri}@hp4i(v4ajkq zUM`rmDwrHodEr|oUR|ZuQ$BO!%JHn-?;gM%|Jx1PnLX;Z1i-fB71ZyFF8y#=5J`S= zN%qSO253@m&cye2e6*W!((?fMRmF0Dbxa2^LOSelZx!6dZlx>&AQt$|I;v6d@x0qg= z?EN?({rw^K^~qNqeJPjpdQL}`piAtmCzht!enF-aQe6zRDP_wufo}0BA6*Wf*Et)? zK5mW?QndUJ)u@g!a`x~aixqaT0$xN;odl##I`JMG2Z225Rbrkpm4`C)sL z{!!8HP`}Sk$8?ndPZvb2Zl^)%Qt@VM{~5};NDBJh!G9eUTvcUNQ5E^$d%FfT3V`?9 zb3epp6);U*vym)4j(J(PdN2#N7`+Kdk9O+tyGWiQAJl|vQ{l8L;vz8n;v1|{C)f2% zHQk97>;C%~m>2N*i&0c*=p@ij=I>N@Kq0>!1E3CCKq88%Y_#Il?&(^@4FeJ;q;cGrRYxW5SQiS#4mjA2)iZ|7ew74ii|~|QVp9z zT;5G7jNT&JxH6n-Ek`jPfW+f*H~HOt%02U5j!&4k_1m4BC)Sh#cmph=K7R^4$V`rp zor-8|woHANF`7%({SDsUPitJQAzji}o?C3eCGteSmH5<+iOhnA=571m40t`m+g~)- zz3cnJiw#~IbC(RT)1AD18CitB(+hxD%nDUJWLxfzsQmpGf)?0270N=IKcZU>4`}%? zrClL0E=Wg|R_txm+m}J)O80VTxw0Q?u=QG zkH{AA`q&U6&$Qi4+q=p!39rLDVJ-2ZVhoIJ`*VpGV%DF9Z^o{} zlP-h;A$nl*WaXsytvv&Tf6d6#{YX&mwB1jFoV2{VQ(MU_faHry)<%VxkVTr78idgn zkwq49ms#K!#LK-)&$(fd{}okV{Gv17z+&u&K@eM?J^i~$<5PY7SyrGvaz!;zIK~V9 z!JJQ~)Ub1cJ@l^RgM#7{1GmVfOis_0jUckL&poe5< zKUV02ciQ&R2pYy!<1)|HDugNQ-75`t7+tWgV4g0nR`c^~y|T^r2~Js?l$yvJOhULGmfN=&iKPVqpG%zycZ4HHKA?HsK_ z-E)?0GjeY~oz(qTgkaV$8V@+De7Ryt&GEvr4qXf7f?k=PL2$%+%{GJODf+7P;ZA4EOX>Y9+=(g_l~1$ivi7eSgfnshIH^ z@MRih=O7|cMM9wuI8ugPpt^&cr0M>5*lqnEQQ4&*Ws6dAP|3{-`G>pjO5=7C--=`| zkX8_6;RLT3fHTA_jb7e5=~lYw+B^sRrA^4cLmj>i)src=f}st~ZFYyAPs{^IvBs+? zi#GNLDQw7pDc|^V&|;e>26g|nb^F1$o5K`w2;$naZH}41)$5FKv)&wWI_SW?4Qzl^bC zn=$+?*_o|mapW(`d43puD9fl`8z@0CcQ(<*WtWitVqNdOU1Ie9QHW#e`b(f3Mx z@&3>H!{v#i(fuCHeM#TLxz)mHwowJavtmU7$=zths@&Gm0>tR5vWNUA4$1XQK9fL7 z>f@qWmjz{WOkxv0$SJ2q@Vh@P`d9D2?$RdBYkv@0FW4i8>|s zcXMHHcoT6DcXRYUxZo+6@Ujn?`4~)lzll;Tp}v6w+lWecYO~RX$MA4yJH8&5+}mk3F^LmHwli{ zC#dhgko3n%JNJyP!M0<-uc@x+8&fL|OfV;W3y2cBfp_saXYRfTnaV zIH_A?d&t`e4J&?HE#3Scrn+#TWcDRo^j*v)nxE4aKh$cL-FEMj+nt4)YefN?e( zl@TLm4x{-AoZf1J1gApe*Rq^7;PQxOXV@JcZbWq4k$E?{f6BRCMVSgh$I(L3s9ah? z5@NO%T-JZ45EK7g=$6&DQ^eCaZfdOCtT|idLu%wEEoS9$Sf#^K`24Mah6zviz4g1f zl^K+UgG~2J+D(P&PTa}>7Ugk8&HPqZY>!*N;<^$|=PO0Zjw5Nz_Uq5B7tdtDaYAAP zQ)uf^-4a^1?58lwZ?yzP3X_RABCbVz<(rgn%`0on_0B|*>!)!`ZL!2x&k@}Ndf}+F zYekBBxRRr~F|itl&ByIkI+ojV+^J+B#$47y3$>4ucnk_ybUMj}^72*) z;}@+bEj|8vNnM)%MnVrW@Q>S27q+9s`r>jS6?;?n%GTNrbZGBh$PZW2p`Ut2BL=XJ zs}O!pldWgerW5^Et!{CCeR#xnWvv21xQ&eZ=3hTFz$sT%27`a02DMPD5_tf9)7CZ@ zd?XS$yiZu&Ipw5Sq3sHy{wUt0Er7Ijj}tGL5_A zk4udkZb2@wn=ettJ}=rtGlGwCAM~R6Q@a(vmA*H4v?BKBuV@9AHaL!J{Bg}~DeiHbU-`}TEev-m zV0(WnQq_`SZXPH8#mgP^-@i=7&7+hscRVb5)V}b`&v|k0{RX^!4b7x8Eh70#0I7S` zVvj8*hgp|c<%zXO&Yv5XHt?-#-o*~xlQLX>=)0+G_@?e*_Cgmc7u>9$P=-i1ir3Fn z!YGJ*F_=2is*1Uo>|=mD|NC?m5}`9!68fh^B_6GbU_cIg*+rz-upZc3NK9tIfG?&z zmlhQKSDaVe!3btUM2T=Cn+_{{Mwq!JH%WbukJ}zH$N~M_ z%l(3Sf}4h`JT>GIyFpzTDWarj6o6#Yo(bz=0?5Q{c8JRgEE2UBF4DUYk?qlz{uj# z@KKoc;wkAuo!9CCZ1*_fKCi}pypKKGY#G+6dRF{)2h0P*bD@ATh#$W`f4@}br(n_f z`E95^yQD`)qCXhrai9Nf%x4&8Irzp5Krne^=y9!wQNl-~61PSlTQGfVc!Q(290&~{snUXo0nA&pG)3I|dS z4)$uBUXnZ&hePh?g(c4P{CK~E)|)OVC48>F(7@06-CWFX`Y%z33Cc#1$8xke5k@uU zuI;{-8{7ehg3W)n*M0A4Olv`Wmvzl6aeYHqIqT~rQMo$UBEZ=mxgwdn2}hrcV*_7W z0)xuQ4t6EUPSb1JeQtZFtn2S|IU^Tb%_l=Xr^h|qEYivg<}L>3!@g&v&EBZq%iE9* zyy!Uysy1X*S|4|4<1m$vVCGOj+wrR~DcmrC{9q!jD>2e2^G}h+1eC;)Yb6>-y!HFQ zE_xVb^ut<8HG1Zgd}~rrumdw<|8v?PIKOn=e!pE`JKnSXB~lonz`HW zhG{EI1gWMaN+hc<*8IV-Fq^kqC6Bl=I?&>>;i?X38$P=%YZ9O{1FKTC{jB?xe~j{t z_F6ko|NN_wDe0H|O^I$o{qz$Es>RLLSX?3W9>!0kT6q%8&USDh-mls#T&M%QYAHBp zKa8>XZ2_(C51<5nk-ZlP15LHlQQ|qOznOPpt^`a$rz%nMH+!(brNaZ|3NM22V*TG# zFXsOj)uXWPBcC_(TGy~~Q+0^qwd%u~dxrbxAFdak^b__t(DpCab16xsS27)WzXgT{ zg9`&UQDXc>$A>m>xLYl(Y97z!{(lmnvkda$T>$qfdhnmnmviAUe0z#dgu3yJ_yIe0?W}`zOK9c&MCu7z?wA z62tFuSPRR0-0qbrIW)o8a9__Q zI6kC!tbp2`8b%ll$FwZ+VLhf^JppfLP5303YOf9>lo&C05laepTCDTLZX8{iw-J_* zY$Ycn*a`iB$$ZXtnd(3?^zRvuBl4jJRHVy83 zQOI=^=V;Wl+_&f|eKERUW<-=sG^)_p_5GZCWm^Jb#3y$6Jnp*SZg=>k`D|Cx z%O@Bgr+e=mUrDZ2jF-hS!{dCn)f-)|X1RU5uIv_z#Twl-rug_8*|OM!3Mcw7Ud)OE z%%JwAiXeo^pgRcW5oa065@w0$8Qaj^SJefW4jSQ&!Hs5Pu(K9#?q67vbH5|lqj85_ zg_y(Ej}>klc;iP8m2SxTn{J1E_m0sY(8A2qDZ6OyhSDKT_cc9ItVSm}YLK$qYsV{O zA0-V!%hw_Hgf{20o*U+|{K*^XdllFGFtVP0CtNsSXg2Tf)m78@=_*iigTEfZ2XNsA z*#EKR@{H^Y%^neU<)6CSi}bo-p5TTAbgdsR-sUZOUj=c;#7!?~#rpX?<(};httOJY zp*XnGcw*)qb_N@mG#X;=^rYG8+PCSf4MM}$|84_y4!`;RYI?1K|HTtfQW(E+=7i5z zAKj?Cjql-8jS;){Bk(OMzwus)Pjw!N6tjD?H>QovfJ})T1CcuVEa~9z#fK_n%l zySo$xBu5X4A&eABiP0b>Ii$P027;8MySuv^Ho7_c-S;`?b)KF357?vcYuEL;-dURL z%q9%5K1!5YYj7$Gc?~`l8qVV0x5ySSt6GDKHI57hsPMzDPuZ#C(yc!ZJIKrnm`|CdZ}8jG z!`JwCgsq~88QUP+<1^bx5+BZ0Txqzq$ntj5#H~o@=J#-p2l0$EER}*!#Wa^YsMG1V z*IaNi=?^5RZ?O&@C&coLB}uunGjRoZWEy0*P{}85JjE;+!8w#e)^4A-X=4K7Tb>MH z=1|Sk@I4g6uUZo_P^Z#g08;y)z6;XFj+BJuv(x5DILjzm|BN;$KVVe9JW{DPPYF+< za#DJl2Fb>Y+=m3)9P0dJd3<915O`d7H)+QP3u!FPs;tn6E{7PeJ`YMiIZZAmB_sP@$+MUF z-A(af2~C))WYpb-8(qoGc;cs_%_#DXW?8zF4D&oJ=-%|u2fH%GFHlJY-0f16Jp7^8 z=jd-g_rqX@;m7GpibA;bgGYve_p(oE=ZQ;UYw^(RjAY8Kk~evA-!J9)F12ZRLGjYx z<@iR$g`78O0652+(`$-%SQ=CGy-^p@f(tYJN*92?9D+=9TBzTde7Fa2qCM@dR)&*} zxC!``QPc7!o_8Wv!0_*N%H})NCc#~NZIHTp!&uuLf!99sV^b@8W6xo1Fm?0EWwXO| z(PINgKes&+_lo;UeYr?z@Ymh!#ZERS|Fa#wF0JwjR`_jG4=S$tVz6KMjtcmF+qSUSG3_=r zhl!U6lhxm`Eo=*{K_GJUG>tWIBPnAAzzId1eHSkyf@mCYeFEXDg3~0__rW(zL+^m2 za?wBBaIwq3YNeGSaWhUUpV3cz)xZ;^c%hvb0{;ezQEJZ53HIc+0@?U9hD)#;nW;XQ zX(6>Qj}d$C6~xKW6UlIhQp$cS|nCxJ{Ri+J|{|rX)Pxgf-nV{HP9F zrKf&z5KIK;;)DjZw%L%{Se9PgsRQn{`49b1kMHAwVDByNn=72<=6@glwU*3%NWg(} zHB|?LKsJ^~+`RZ0rM!8UWNu3i{P$yzD9r?#`@m(Ti1*LDZkM5tC)Pd}DJK_g-^$%- z(2~Yo_E1=L<(*PHJ&7@ErP=h{(~pbdurHd0r1;$U33llMO|$Gu9_GFDVfW3@ThmCN zws!XYUgDapQ5mgUpGLT3m1D+_Vf+GC*u_J%^7T6isbl&0I zoUU?%fhm#ht{hj{WrR18$TrLnF8H|kczSQNuyS5P^Pf}E!Dzs1^IG;(#zHFJQk4%G zMAAr&^J8PTdnNof*UpUMc)4};S8dxF+aJk19($DH>Di7Je0zn-TnI^`>)5#Tf$$Y;L9fHGZKqsOGS=)!0xq{X$5G?4afEg>T*sW2RV zL9rgs~p9mOw)pZJx&iP<0rNQ&ag%Q(3f^WUO91*dz2 zMIPVdc&oo2i1h1v2U2cH zJENnp>!EUB=`+jlgQiteoPEjd_X|-;_-Pg#Hc%9$Vtph{28JusRLE5!FQuzCecVn zU*qobk4`R+z+s-KEArsSTugBjYup()P6IocHj`tgnOgSuKx;%gB4K$2s^-lQH4V zSuJ|9OiJI1bNSpqw#|ph6myPIztYZd>?YI)jzFR!}tzg|{T&O!Ak<=nRpJ^Wc(k69H-^P<#rHR9UPsdcF}iZ~tVU*g z80lYv4Lv^)+T|tx3ef*?tfn!x$L5IuLntBWyxz}+P;`IZNI6~HpD{rWlG-lK6A~Hh zZNW~o_!_nOD&cZ1ZhM5Zj25fhn6`SzvE3OX>o;MdeE&Qh(dCRBpCx<~k&m}GR073z zR~BFAwb!W}y!tG@)C>e5IfeSV*ZgmO6>qc#eK}16s(u+O68>a&dgJHxzUthE@WP_ zh4Z?Yq$Ys;Z^Y;}o2=7|_7%%35cIS7DXZn!AGN8kM}>})f)AGX5s%0aRztBbZUOJ)!YOLYgy{U_oAPyU zf@z5SHheaXB(NiQ8FSP>6Q?VEaPR42Fo^|jz7-Vf!xO47_$X;#zeCG;@(u@1(PmX8@=t%5|-Ln@~U!hp?vFNh$NOM!gUQqdNq z@K19k80%RlByvkcIPVAExP2?(w)d<7fXS?#g>|aBbomka-&eAwdD4qj zC!@%L&3lh`J9x0?+~!Ed$F>x7PX+|tTN`onLErQgD!oPVoze`(C6q}6=WzGVy3N_f zUwE@1UQ)1JLP*d3VsbkNo&X(b6A3jQ0a0V(C+&d+3d!;KsK4&Fv`k{vd6E#hbu=HX zJEzf)z3U?KeCiJ(MSL9OB~i`#bIs`C-1yTXp?rZ>O@XA#wnPUi1cF&;IFwmWEQ%8D zRloISwuv5S5!`LNq}Znz0}PF8Egbed5oXh6;Qek)HW3s7L3SfMO*bAbj#XB?WH4&lCtj$k=SW2~k%eI3|5E(qpE-ajh$80^@q0%T)lZwvCu@pD$0oNl zzB2xcA@Nh^)96gvUkSU8+x!_Z%eCGm4LX<2kB|4u9!7x7#>-}pGCl3T-zF*C%l7sr z9jboeYX5Cxq4Z^9N`F$4WkQQtzOi9Q9m-!hyv(#;lwU)4v*CVg8%&ojTE zJ{%%3vMnK$;FYWLYE5iXXZP#SzctO!pTKL%r2?_VC_M%yZCE|U5#5{SV6D0Yn<9=b zj=KS`p{nhw0E}Z+eBvC5(8t#WEA+!p&_s7+d48fl2NpTti_<#pjEnmYfF7$s|psk9-Z>Oop6&OM71mS@u zf->;E9BNT4M-i`uh&s48HZXlL`_q63iubG`IcM*Qn=E~?Z3)}jE8YiVF0{C^3rS0| zhCN}PJSD@)`iTx7C&l|#hguo_fMgDR@+aLA{bnHy!eYv*q+`e8?zHiY+RBD0;8KKR=HWLc zc6jL1lGCb5bKUWx=#?o~iyig2GP-+d+jg!%()ZK>*Z;5|Afrn z%i=Y?X1i15B+Ifod|D**fdl4zmMeZ}@i-rMlfAk0#yqJ%crSEx#V^Bq(7yfeN#E+O zx5Vo7M@vqjlV1+T%#IyZrs|1IxOJa>GrykEbd0APl~9^t5P^0Jgsmws(XsHA}uqx0-0| zfT7yfLnW?Ezl!<$S)GaP%>{RrBgt0dINi)EKS+YV6zSee48(6n(pw0)$3{Np zQy@k<`_ltj`yrap>mBbNR8)q9kZ0?(;PANbcb^95m*8sE`?)&8TjgH+81SEX^7e=> z?8T}o&c|~yMB?VCG$R;_WOF^|me?4hj1%sR;@mVr)Mm|}ib!bQL()H}=be(B?$*w_ z)=qlO9}_xF@$4u&&d#UtYLAZ2bb^1E5oFdT?*F3xa1N1nJ(iAa^si_c@G)hnsjRyh zt{Dr|y+mwFek7gt`btUKqVYa3e>Q93Dw@`a7%&9t5Ha$Y$rZ=a> z`Sm5P*&Z8%=9JFJXFjY@Q-HQ!AfE$CH0%CJ>TCI)FW%(~PWGGf<&!KV4z_WRkHptDtNs)7rV z0FT-DzCV2yf^gA{*!p3-B`^p@?w(bY3q`O%R}rs|u_ejeR7U3)Uu4BQI(zb!z%Tj1rweF{RFPXc&O%Q9!3UAz5b(Dr}H&{#Vw1v zo1U(~TOI%J_`U@48M7ksK5pz(<(wiZqr6AW0*+B_iCcf*dMctdze55*rq~nfz(=n zQ}KM*JeOzkFXmsk(Z=3M73;a9H%dDy0g*MIqCnrdRP5h(R;>v# zf#9O&^a3_s?~7Fk`yp>7cU5oGEsS_QukWQ1h)s%YkJ86pwv^wXWICsrc94ZV zQVHYFHa0ko9bTU?Y3{MOZwkG!Q<#a3XeO+~g%tPEFr`+o>Ih{}EB{IO+Ii>p!|LJX z!QgLaR94f&K(iR{-9^a^M0ve>W83wny5H9V^j(8y9s7`e{)H>X8C|;()U474#dMzO zD|iykrJ626FU9fs?;8Ga`5Lj@C$CA=$Sd-}NE3l^e=o9S)H0NlI%u`G{3@-aaQRSf{wOUInvrhG%Xcg6r;!Y8&4zOTkuFDDWd*+^Zn%1Z+z z5SEXB{#4)Ruk{C=q=uf0->RT86%U0?&FxpB4#?V#GC>rV2&?-@xB#CHYsnUG2ueji zV9tY-TWh0jy)>ch+znCRmV*alC66{)8tL)~l7{zYTgXxW6Qv_)J~Qt|Zh<6{hCfsH zni2=&AvzlHM5;=9gX=$X+HBQiIzY>+Du+ zqY|mN(J%c-Ad{M=%F(`~4B4eF(Kq~gixKKgoEv=nx^Y8I;fmLUROPP*7aIU@#C|6wdo zg~`3nEaInlEkZHOMb&D$)ao(dEYfm|(COzJCX)w&R~@!_nxge6 zUsQ{b5I82Crmk>qi00P{4KVH*$BJD4DElK@iVC9?5tVNw_GK@4Au_Qitl8z16`+cd z^0_*+j>*W9XxZdV;A$>CKu&ubCl`Me_O(`Wtdv`ZjntX+T$TK7*Sy$!^@29i$YaUu z6@?T9N}GyN1#;PEArtfte6IhV_I+%AmDeqgMd+kk^I8n9iW4A0zLb$x6HXwI43wW= zM-PIeX1D9quKc#r5lk3PiK2?@b^@?rmY#*FOxa+bfwv12sg*=lH?()}57q4{d2zUF z;TfkG_KC7tPjzgc)O&EVouFxF{iam8#9qc@V+UAqbua~+EXXc9{1b(ebX^9v~CR~{!)P{SCLR{Bn4Lcb- zPVW+Kim|+!n|^^3!j*5{T2do$TSaEaREZ3|;7+DY0y*L7v?(xE&nJu7iR<==Y%6$is4 z9?q50?AWpJQr-&`n#Lf8A>;aQ3$YE0!ad}fB}<0m780%xZ&B=fgU z^;J}%AkuobLJj9A;yBo=5iG7_D{~l^FfMdl#FL{}rvlecG9rS5f*GRhkXrq6K;*t2 zQnHdN-}yL9`7al=@sX|}9Ixz!HHY!)tCt9?&>FbAmJKuT>+68lE($Y7&^WJmLPO zr$M9oyNlkf>Nk+z?0h4M81-KpGs(&E(L>WrEyW% zm@cgQmxz4lj5YDT`J<38#Tic08cp16x>dJNzNPm3-K)2_7O^f*8KOvVJD)SmJfG`f zF=5&qnoNXBGZlQGv5EQa^l1^ND|-qXBGWIISl@%i&77hd40HVKFT;kDepeTa#c^%^ zAoQNrk)7P&48sufOEAQZo^EU((`=bEaUv?&P1K+gu&ekcLVbg~4NM~mi-x~)x>Lb} zZ=`K1Z1LA+YkKDb8NQ&~IG=27BBHUp$37qp~bv`t0bUgkHD;WeWAQyZJah`31fKUGB>^n|J*egq5Vhb5AZA+@GBr4|7keC{qcJO0`eNIWa~QrWn$FBhE_uUDm9@9|(t#JrjoUMeJ|Y6)L+Rjmm=|HUzJE6rhEv;r+{jvx`j%j#hn zx!1_%hrZ#o5$tES6Ky}S3;C3hB$D8;&0V*HaaobSpF|C(Cc5O|t@|6uWAD$D|Mue~ zg*2s!!t|sIVIb+eCr&H%qSj=C!{@lP>B^6Lq6x=tki$n}&aLVLci7cJN)SPl5k7Xo zL4@-I)sik79UNB$KSBjD=XUiSYW8*#L=NkFLdJ>dWIySLB=RQ(f4Lw!?HZr;UU67` zBFAr+Ok?CSA;}V9UV(%COS527Xp_nzsLYN*spm%%8hSAE-=jah()R~VQc_kykYj^f zM;1aICgF`d5+~$0itEDn&UShLwy|MayohX zra#AlAB_4=aDbe``=TqYpFaUYnl5DHx z1c2kd1X|{3Nd%rSa3C7aCL?G3Ljx>3C=LWrCLi4wCQl@f3lSt=!aM@qD-*J;<9M^t ztN#lgNH37c=D!~M2h3_Zeoi^h0}W|yygD(4fcBHfpT;SvY(LebRF4Pmc%i#p6k$)} zF&ZVjMDo~l13#y9xs!)EhC;4{c-GIhH|hs+pbzLepAjCnZ|{GVE}AQ0KNI@>j9n+# zx5e831)dBWF;_z*o=O{*2{_~|lZrII+VPa=cf!4mgVjbZ4pW3T^-&9iI>JIJIX15~ zx%w@y@M{Sn^jP@Dn5brk3mS24duk_%UnEw_^1OrRB_pJ*m>M@a3lTZ+bB;Mry%e3$ z^B4>T7QT(yh^o1g9mK>bz#{kwNSTwYX#L4;ocBA`iC*HF!ykN-@0%BGQsbTN2l6^B zlJ6s&O`pC;QsDh^k0y-E<@-&b{H?XDmmZ;YY<57DUa#+~yYo7NJ=F29;~tTXc=mm6 zRUUSF+i&5DfOhIZa|BQ+(Sa?V&BHP8Pek$hWEpnd19eeQF(Vn+33pgX_$+IF0> zij^e1?PIC>n`$aWa60 z@fuz&L*`8l@GexaP9`%bNe6suJ$~~u3)=swHnt3LAq^!hiZFcLjYXkb*CoaDMxfU zm%A328JKF68PLmUeXHIe!<@lh!R~P2&^l`&Uj*=~ zz$~>}>f1{&(3lvN(mvz7(-U~%g8KveFZl#9B<4nk|8+W~nZ5D%MUs(==hbhJQDm}I zcH}a2bayog)%%JF%Pg#yo8s;yO=7alK zBa7YJXU>ZY(nEv@yU1^J!&TamI~h%;S7Uaw^TF?3YJ(Hz$mO9dR<^_-N(cPTu4B&~ z$lh_&UCD4KF8Zx?QXU z8zz5P37M0C^0uLTb0iwFM^bAflGEkfKFKZP=ZJI(1Hj(x1Sy1kov*x79b%h~*dg-X zzvRucc-+5~BMOGPbDZDhEANkiR>jE>Z0op53U%k5a{DZXDdqg+{pgL%i_H zSNS^Mx81_4-&r`o)wmD%oSZXl1?X?cc9I5v>jd5Y_4Id4Vvd`o+3|n54(00)y6OPNskVoEx%wb^HgHyWjDoxF}S?meC6;AB+a#)BjX7i~`twa@i`r?SvPf0T3! zvpaDQJ@>D};Bqmwvwh@Q5x)FzocAB60z3H+|IHd(VOxLe0g{LP#I>94%+A3vUO2%PB$`{E)J>Gy>)<1Qb>j?@Qt_)vv!SNjL+O86M$>%2)H9eMuX+* z+cI;d#QQq+bR2mvqrcA(oH%cSGcH#0eEhJtY`ZznHxuJNQv+=GvM;3MfzfnRgGKDy ze!4TlGN3`^)^sap@Uis3wnTT8K~I<6JVw2X>vtZgb{F|c04TdiK==0kT-Hu?IO5$O z5KdxsHuPfG|(bW3@C0ka@PWD(UrK_ZxaKnS?Tx_>IZA2ZN*a2@%YRY&f-~l< zxE&xmfO=Tq)m>3PqhW)P+VF`#u!Maref~KtL|LB-srR|YOsr&`;`6iRG#f#%zSnX=Ct`8Fx*KoZhXl5lHKBgtC=u4139FefTSAYz4y;d7Ic@Bc(U`&k zF6~e3+};z2;Gs=#&%=#!StbFP7aD#WClEA%qq25h?M2pLweb3#twM2&Ypt-99#%n`#@d3ozwp#gyfe(hHYd$8j}dI~4;?xXuSTlPl<@yU2$)QxNW^l8oRztEmH8 zm#%)0q?}lB&u0x+W&Tcda{ZU7aaA^9m@CyvtvG^lF;ZJ4NeG)LkF467=2zcKDc)d;5MxBcu~(Ik|AY7?H%_DgX*5d zO<=9}g&%0<^x>*zOjg?a5r7(sz#SB% z2RVX{-G(Yxm5xD&w!)sSLQq*eEy~GoKab+MT}!R#(c53&DNC2ho%F54nFbsEFK9e0 z`hKIrZia7a-*iwoT5kEf(_({lWiVNi0Em~$CGfI$Cn)tC8 ztY;f*N!oo>G)0C`6vZ!UG2S%I-jo@my$ak0Fv-(pLnUGnpag&jpS$e(FNM8m`%}*? zv$8X*J_@MsbZ!KItx+ba?msBv%b>o^UpOJE&>x*3w#j#iPC?$e|DgBpp*t)9-yNof z+}Y&;Vo$x$YtG(Eav}7P(dZ(4{iUe16M`cZEV`6YP#TYFtv?SpWNF;Y2_w}nvVLQN zo&uLLoQ|aFimLj7Vr&D|_(I+$5v19qdV+~8pPFI*V&Rq@qO*w~Os0(1FhBH(f&Ijq zA0#?Cse+R|LkIS&c<`97W2S-&oSQ=lj?YE5%p+yPR5ddo;NJr{h9u?V?9{~*MYQC7 zB|l11Q#>o)`mMZk2CjFsj9Mj|dqM;#dI>e&^ZZe-*JElDp&R-pFr`f-f)8xRAzRKP z2H#=qUy%Wl@Y}sMYY5;0X72H(_`^=+_C03^igZ4eQ$E?RFT9d!wsUwoReu7oHUW08 zm~n`v@1i$Vuf-*;R<&0@Q=T7qhSC%UrZ@#LNWiz0!f%ufWCHNQ)H>hQcfL%^Pnjkn z&{TCBJl%CLcgqi_hXQ`FT>F|r!!6hyQ=}ml;YK5QO`qZoXu~?go{{QdsyKajV=N&%4wf=T=)q#6*YlqMK@bb#f!$|sW+9<=Dv3y&U z)1j04E>|hYL;4O1yHS7Ty&IznczpQ6JfAvxY2QU^$N4jB-`7Yp+9Kg{Sb|65{$;{T zP%q)%<#pgP)E0W-nn5Ln1aYX{i;>EWTq$NDdOYFfVpkV5QbG&c9x%D(W#H1W+wJwL zxYQ<7CCgGP@DLwP1W+L>UuTc29D-1pyw7q;4ddIJ>w|0O&o0#&xA!nE|LHJlA9rXk4Anq^ zO(7{Qv02MSEp=UWX}*9XM3YrOD!ICW{KK54LFw3hs*i0OLnG!-YO`=S-o+3uyjdjR zy%?49O?)h;-@{lC#$`Ii{`2B}!T_)Q2bcb4ZvSb5R5I}CK+K=NXO0dTmve6_dl)zS zQg}3F{!bP_WqV5FJcb}Of?iXKFu-!Z-Am-?BLy+SBJr#v;Z2G>>0Yn!)K_aaM4ZgQ zXyj@{7A=;| zerqThuG&KpdJqeIN_p3X>6E4x2MoMfyDH80bHWP11f5)t*yi>(X zJz4NM@~wDr*D8o?buUoB-NdmRo`g(hPPwd2k&o{s;`qNKCf385LYI}ict3XfXp`ec zzjKDsWoze&8IB#s-;Ye!sL)_UZ5tV0I5~t0P+0x3aCg8V1lob=OX|EX<|z3pXaE%a z5~(6S(*mG+&@~fZprTQ{By7FRuVn`L!+Q$mI=xo1D+&+PCx!##ppA!|?T_6Raw2DuTY=sa*$K zr?;OzM}7%5q>e$f?iNwG;FFi187N=w#(lFqDPhlIg?E%TBo@@xarIY+5HJVIZvt0k z=aKK$MOB{+GLQee;h@%R$bQyD#|hPTNBNho^tML@Y{xAq?>^MvVz*1&R0o_dus0t6 zdtRod@qE9WX$xs(Ff;2duLGeqE!7Rom%vpmvM%L-!i>Zs)VZHq4$APn3~-Q+ZL--o ziQ9cvEou%q2(@337Sw^w`jKSU#<-hcoI!Zkou$<9T-~TktpI4uqoy1HxkHf%K2PaQ ztd_79!1>h5i@RkhZt++;flFdSu-`royYJYE%Tf0mpUnlxMe~<4~HE6Q+xpM-+bh^5a6IO>Szc&pIP> zaDl{?``_{W(g7?WhC3nXKCyvnYV95PueH93=Y0+rb<@Ld(vo3@+C`(~3v7#E`MaEn z-B>AEUMGqalOae*ikSwsX;v<1Y%Na6?Q(_n^(n6!gl_9X>2Y(CR)TS8Z?a}alcQ-w zo|=}~Zf$(M@W2j^*b3zdSJtcv+f6z}8fJL=Uu5DAcK3#TmU{4gs@O%=c;k9h+Nc_} zIh5gKf0eYnoZ&iMq#v}3T6PLU+)6B)(s{`(sS3IDRxqt6>m$F-9$%rbgS#J3WZToq z=j7?5+^JCncGM4du$v~8`E`;{l*y5+ae31cKWu~L$n6lV5-Afj@vaHs0_=F-;)>iX zP;B(@k6lw-HeC2HUBZ&3>*96Rd;G$t@p%mY`PO!5b%oE2-|B3$t1PtE29%>&`glS! zgg=b7&n}i4ZF{YC?9RA;ucKY3^9U-VvN7n)k-def2d#I*R3#TzivA$YRS}&|H}A=w zPD1p*=GfV~@<#HQNs)SO6i!8s?l}W7!zAo~y}Rz z)PzC7AG``~0{FJ{QT%Fx&rL31i0^d?LA*sSgA5nf&5wUy4=Pl-oo;u^D5axpt~0B} zWaJIV+f%kv0K=X(ycaancj&p5h1nS@LDnG{e_C9yCjwZbppV~6JlK+tF~J8DFTeSR zG{=G$ivV>id4w>2VwDOoe7%#&dY#JsI=6s`avXxp{&6xPn5?`xiDP)JRj35Ee6MQQ zm7Wc6-Hp=W)uCe?bfk!g#IVbTcD|2^o>{;j=Gx^54;&*%YxR#e$QcD*#`XDbcQErL zoXi1I@>L-{364ZA%(Md1I=e0D)RC9ld1pUK?!Chek$9?Oj5g6PA8o!cElB{hvE3kB zxL^Z8{_uiFz2}`WEHCw(Gzx3#KH@X zR+DT8Hc~(k9YY3B7wu}NIIFE0k)3zY9cvPBwbxuMu#?Z!yY~;&)P0zJb3W>gP0m;t zO*W}S-_sWtkVW8lNPTlzn?J|SP4$`?%eLb2t?dW)iFKm#SX}Y~7`NY7PaIjP;`hWMgND(3!U>$^nD&lK?frc3rTjDk6^n0s_o5aWe%*4*yW~aq zWz-rR!ZSqKg@OLy+pTn^%#jkmoyAU|*>6vrv3_({#nRc!by;5wE0&G?nmAjz67Wiv z*LY#^i4udeaxEYmP{wb9#Pw3(+&RYfSuJnheyr+`-Ytg%y*B;@PdDy}?cz3mD5rvq z^>s`(2#-hOxNUWVe6}~+=`-AeMWOpw*vmg`ui{IV81MQDexYfZ@^3B*HD>9(UVSMu zm0{<}eICcJ-Bn8$!A2*&9ux5GdITUk8ylVy|50gOQLU zM|(deb>goM$4*&CuZ_x{9ii%W8}Qm8O>6P14VT5wzgF0VeU!-OF@@5U zr1d|1gx6bM9dIxIqiQ})u4;!Z4pRrQ)qf)#=CYYX$7V|iHXg-L74zpR+*h>T^%83( z>$DECv%gNXdEW&)8xmvtxLwDce8m7W$UM#K|6ws=LkY5!_6O(1dSUrfRu5k-_M0MQ zelF73MYryg53U4)zLaYIC6x9Gb>EU=ysx~RxwCZakO#=nLW!g5oXdvo&s!!AuO@Gr%6xMsHg~ zf6&E`#n;*Cv!{wSvgy(nT=~tX<}Xj`7IP(iCG+H9G;a4Z>+;Y_IUa9#x&H73yicH69EnGAMmyG0@0S7GT?;9LHi zk&k!fF49)g!Um4+Psf(hdewF;EnlKYTJWu} zS!<8>AGPMG>1yxV7%OA8-(QBx#f`-zyM=$D@i8p#CmLLbB#>vgwVgUZ3oSJO7(mPx z`Y8BoWoZvY#_Q7S*bj+$`L+UxCfE61&${b%Z)ZAJKZmP1>Y~|a6K}-Ga7J;RQPmd$EQM4-|8^y&}PhB`#&KwndF@$ z_xej0eq*0kGsNLN^sjGK;{t`NKOA{!v0sS%BWdH$KMdddb9IQ0`55WPI-YPQ$Ov}r z!0{8w8jc=!8(MEfOHe_MN9`(7GkS%*UNOokV?R}RQnuDp-E~Rx>0CtRb9=4c>g;KG z@5EbzP_2EP0!8y<iy`bc9fHrT zVl0`)l$G69=oH9u{<;6fWW_c!PQA>IhSRJ+?FvXyAeX+x>GHVmQpi@6z93-t z<1(`|&E(GvILjBH3UgZeRIh(2fyc?)Adh{L?nY{Ydqi_IvtrY;SOY~G9F((m)#Ro7 zfb-dk@;KiGu<3kl)x+ET82;;yKpI<=j1 z;TAWh1Pl=nc;1<{CoKCMQ;JC*{D8-({{3k!A3r*3#o-_XZfEy>DB;`fF0q$~x!t!& zaB5MUgoWX@{d@_e6Z%Q>QYokQ(^mDD$a{C!N?MN{i)E1oC&@#7M;;x^)UQTt05VU9 z3WSehJn3VnS^H)!9IE(lFi!R~N9x$Tk@}#O5YkeUZm9V{8nm8}MgzVmZ}g~LHM~F$ z*Ne`Wm^Rq2ljI($cP#GSrSsD>%y6$_ zluW3{kMed)JTWH<$rY$}0$nh?EePAfD5z+lyRU zTIp`HkfRz`?4iB@LzZjRat)hIYEED5DixKSO;ByNM%DXo&lgkmM^zs5_r9EioEtOB z*FMS5n0voq=bVmK+ux1v=G&#DEmYX z_eVTPyi|zlJK42pc~24uiAF%bl`+*T4JWcM_pm(&%rq$g_uiR9j+*<@`{6yNZv-L> zB1r*C*ZgnGRL7%CSRYwSO-O!5=L|!Vq;*T2O3;+J1@lu25lI@v9sAe%C#fJ#2#HA$v3#JIn*dovdTpBJ*}|n+9ujc zQ?CNYywTu&)}FDU?s^9AUaF&Ra0!z3g|||5$<b_K~ zEs>K(eEu@077IIH-Aj$M2U@J_W zb=Q;NKmYo3U~;vzI5B_?-Hg?c;Rtk&|83oZ3G?6tAv*+D7TD{60?$NBo~;$QaXY_i z26W3zUw%6x{V35Y`-ay>j>ka@5qXe7r{s$kwxG!_%{8k9fu9j8ydyK(scKtBZe~4D zqsx(bd;*WLrKW+a6nKLJbgJ3gZotqnJ^H2(B)87WgtKcY9ewEBIB=>@Q@l-1}S4fU9Lyb#J2GkC(@v!m&mG?Mlnu%;eAAaX?B9 z_$M3Kiz0VjN~SMHcsF?>%5s2pxkMP`qG#u7b~oW5-!0o!$<=xuvuprIp3Te7!&4>rXh(ACB8`+#g3s+*cL`BijO7BFjmR=gTd1*ylSmEC2@= z)8WC6cdacn5-{je5Gw252uk~9Sp@PF!D=7jPlrQ7RF%+ND+{mEonDC%I2w>ip^JUi zsb}Zr52tbT0+eGme~X(*{p@yz-^oX^G15rAC%51JBiMj#)(o&NEB?@I9Dtl)@K*Qj zf$3$mS1UuSf~bu)YpEZ(;IBS~ZR!ydj-qC|HIh6&v}xW3NtgpW*+@uU&ML<A7 zktoXB#YQpH!+xhplAv$7nS_0w zHVIKqQorT_ds?6||H-c_t=V@`y`VBri^nNYc|A~?#S+$eWFRSRpH`{ZMC>K-ZmdGy zD<-8MN8WbT&p>rG{&ZOPIfD`JGPeY26z>jvxDxvburKIXLlhBhFlP^_|D%CPX3q6P zS7>yGZ`!n+=#f1`&wN0`{hEZ4yT41?S}O&fD86_j_ym>RGB`@&lC4xzW8jr&0fzg6 zK1GsVFeNB>%1<_GKZ+0G(k~Sk-LvgF^g=*!C(jsxlwd>o=r4awN2|wdhcv&Ngg%s) zY-R7`YGqeQjnAHcx1#boK|7nlj7A?&eX8h2#uEEf#Qy)m*5?mQ&Rome-8|xCpzRz3USWcBf+dcEo`x?xcpJLh*zUyFA#`tK%| zvUs9&CHQ|YdMgt(!J~7sTsQX z62mEeHTh&)Y4Y!`fRFSP>~oD`g_{-~ov8QCU$pY!?Paphxw<2{-(Z>=N%URsRju5K zLH_P;S231j?fs8qZB!<;?MBVgM)DrqUADq}5NLTv;cUTy`2w0C{z8#5#u;AH3*R@i zscwVma!Mme9iQ5fqhFU{c!r`kgO+>4m&XI8AVP%i-Y~5)+>E96kI9(ZgVk`t$9CLN zF+aR`YNh!GM1T9Eipg*pyu84O{qui~$bcwf693sn!GpF91cKxqGq~oS1Gm47kubk(hpbfwyBunIY#V0k;wogfKm4cY)w=zf zN&{B5#`d8$0QDQ5bBg~`j_>?FhxBENy4EIKsc+XGQ?=UI$;(<0HF8wK&G z6d>$T_v>sW0_#X#CXRa6v}iy)LLJ%rHhuNf2X=S9MfhhMw5W$Nth6h!;SiCDz>39>6mM}6VLFKVUPxq!o}k;GrC*pQ-5sty!I`J>yV3i z3DJh>pDEvdn0W~%?SfzMFEy%_4}_S3-FMibzC(J)E8l z3;!sHI|&HChNMjr3cKu}`WOBOR)sU{n|};-T*25g){q%zH!!ntTu}SO=4$PurvTK= z)Yr4KMxL7Lt8*AFqYemvBJCF~Vyiu0kS5b;0O5pdO!D5{f(FgPP+ki9`$yf38t_S*s*<*M6Hzv$uju#=unZNP*ukH2<7uYP5CI5TJfiB= zr9L^OLgo*o3x68vz)~%A&OVlQF)8+HY#aGfMY9-vez^3D3eMVcDXooOGgas~QF18Q zyXGGav=qJ)J~-WOglI4A=a-zaz*L3BBV7jnL#J24m#HNo|j#b$hnRvGrwy$h!ePLh%TyHDe-!F zM%6>tZvWsl5uiz(A$KhN3Aqm@H~PYqyU~cDic*gEp8U%-2R$b>GS$$YyMQbJb ze}cgEdv`-1d^~ta7*UzvL7vW3JNIaJhahk%{fUcWKu&^Ae92?jTf$f9Y(eX}p&6d| ztg%)-KLHSt<>?=FXc$qKSN`#@2#dQWU*ZFu!_5RaY-%z75dJp8u2RY}?zWG*zoeW= z689T-lt~0Wf)ggGPCP)`g9m2T5LwyHD3PpRT|Xy#(a|8)Vr`tqO&HQ{p#K5+g0Mkn zOmF#6r?aN60Jc06R7P-oUM0^!t9C62SvU`f)zXT02_gLs4KUv|zrZ1l$UTo*@l*6C z;p(sSO&SsUNvC+t2idMY1hq(;k)IRx=tpWy3ygXWPG?rf>X0;cRL6xF9VumMGOX@V zxi@D&zvhA5cC2D^mZxPJOgcl-Z;}`}z2IniF6Hr_b)u>wveQBt)DhKUL&V$`ZcCnD ziFk*|Y%@hWz~uEi3@M#n*MR$=uf!RJCT;$-ZH}@6&C{_gw@Kfh5|qQgFp(n4%}Nvq zoW>F#n?gau-BeuIYdkJ@vj=+qF^YuRg3DbJM#ozN!7|><&q78wI=2UP2S-WzWlP5i9y;1TZy~4@zTgs%Fi|JE3tS!xX_vq{GO>Xh+Vy1EnXT*qy*0r*@scIs9G?W&pUW zWf+8%I=$@C%Ned|`XOL6s!uXM< zw@y~KnUap?U!nhFProREUVvCMJT{HxT7rllMWZGWCFbkRnK|hREoSu{uq9Um<<3{# zMlJLF?o##n6`zZ4ui$A~cBPw8wXW~bE89Kui7kAqm&oiKs|>S@UwuYu$ocmMF(SQ8=k&;YVq@H z(M>Wz4jY#Tu1%RiIEC#O(7TwhR4_)^ zu+V!)W5ZfQrMLAQl|)G5H@fpqDTQ2|nvIPA{xYA93|8WioSVrLzuO$vKDDgueN9PmXDu`@JMvDj}BL?}x6N=XA z!SmfwMKuOo5QB|2&(7qb2Y($cGi{j0TO)X9bGcgm04M96nAn9elvw5;q1C0%JCu88 z^Bt0fX2cvA&NXOjXNy;>>L2ABvVQ9UX3(QvOIgodt2G>4)LR}WzGS`C^SVwKFXsZ6 z%unKSCz?KuO^V=Dwr6G%;X5M->-ge= z`b(o~jM!3+2iS&a*#`=GPN*8Qi91qTI(U@5=xDa8feDur-3C<(`K924{2f)4FMUQPe{CF0= z1$5weTKmLG5*N0+2=#;XSuVcMxE?O$W_{(bQ7TpTR$$bd@Ne?#mX)z?aDu^UuX*i3 z6Jr1pagPo~QD^y8N3I;_%+5^MvCq74-mOSLOkIkE;r6DPWX`Soyyc^NYx8|<*VTw zJEY4W?TvqU9?le?z4h5!xKvHj+uih+v`$r2a2RXcQ|$5S0oQulV;omXiQ(SuQr*&D zlRnVd{Uf7Cs}pjGS34}0y74J5YrFBtZT;9~W@WmRN_AMQJatpWv)`35>IM2WTI~EJ zzd~44TxvQ#G0uNjmF^%eRxbIvBOYP1qwd||y?5qrUFPnk_hVQX4c4iPx1g2KCh-bI zS()%yAecMfY6Tys@*DaF`utIqDh*e4aa1A@Y%fEQzth|iF{l``qMz>fGqX*_v< zk2XE8v-cFOqnJ-ep?&2R0}I{dGIWIG{>A~xI-cYoO!2wqJ284o-QcfN+1?5gC|-3x z+G{iX*HASW1Q*Y_|D=}G?lp3f(VJw>W7oE5E*Ie9!6t(fEP%(`G7TrcdOgn_B~zugEP<0<&n>VcmV zWCgZgZZEMeEkx9I%~n`bG3H~qo5tTUnq{_7h%Dkh2psZuFNR0H|9y?yla3R+`yQr? z?V7||r5*}?+N1ACBJ}De3Yh7#D$pre5m4|-IubPl#(zq2d8h{Uo;@_=^%?3dKH5h? za)C5im-a$ILR2v0nNe-{Kc((Tuq%8j1?#hw1ae?Uq35nOIHLdc}!s1fvyfU3!xd5Jp?@ zpwXh_i^3eq?6if!G?zofvOAMR-}7LN{s;CJc7rAM^i>UJy+yGy8W)3liV>IU_m6K>^_07jIW(2(r3xu+$NZQpp?+0cO+U5!6 z=<(SAgQm|=V02UiuLob$>Ma!p1ztqBZshM;Bp2?#TRafsrLxp{7R}Lr;Pq5zl5NpO zUHPzEOOPfZRIo&5?KXkQmLEbd=9Yq=`{PJM`RB#Y$|1H0T2A}Fs)dhXimvSiOhW1F z8`Xpy7(L7z(NGG-!1TI^O2WvF;fCi4tY4furrr0ZdGwm1gR|ddNdbB_9%Tlm1HE|N zf_@;63@2Y1Ca&rK;Lvz&Nnc=)4-P8Zn455j1})AACeA&Ee|ncO9Nam5C`|=f@gdBX z{KOa@iNO@knIy!i>BzT3-)~cbu2i(p49p@Go$e9^`-ol0d85ine|%~EB(boA>5I(cNHPqg~LjFg%=l^U~S{q&8UH*fkAZmLMU+*(T(M+Yoooa~6 zb{PI;aLuyTb~j1Y4`vcsvP>NpK8vNo@9%(}{k_hGAVhaH#(_&KALM1h#3x~V6WP*-s-V7eUQFGZo117nOGlFvXn@K~np7ZK; ze`_!<7XLp{_TMAi_T{fr zLjtaBc<@Q-VH*_ij;wB&(#UVJ7jA$0JM5fPRK{5Um(BPdax$4c_ch178)72~biJ@I zn$pF0&iD+6m|TtO<7Nogb-`dT=)Obw3pmJb%?R_Ob(OS3?2gsm@V-vc{B3ebF>g=p z><5yJ6N9W^k$yfAsuD64(dB`-wXB;nF_J8dSMyTycEK(-BYn4b0qsz2mL{p3{?eXL zr{`qNBt4CrbQb>XO)6uw7izte+IQiAD80{pNz4pA>n7Z5hhuj~cYP6Y4Xk_#3*hO7 zl2{K*W&I2iV;~2t)xknm8eWJNsA7zd4(0-rI&5}4e&P9_Uk7`(sbVDANZ4qcfpO_0 zl1ah~8^n0nxV4!hwBUB{VgU$0ZxoWt-1I7UICV1^uqJjD>%!R?{Y(SrxBN=aySVdD z!uYaIeBkU>hzRCid0ArI$!!}dsAS}=aoZV7%aNViB_-z_a&dQjXAJ)$ayIgXBb3K- z-%lsx6crdyxTfc>X&eG{cv~_n3EGuUs~e+nd^?N0v+&4T&U9A8h0X2FET7Hy zrqh9aYvbE&Q;s=J&Dbs+2-ktX`+U!ivlolfsT4r=w8nalrTm+DNn>fgn>?|{hxgnh z=hV558*Ir#(ipE)d+x!jG0QWqIZAMdk8X^h&9oI-)agJM(EMH)7bZoP(_)?1kaA4J zSrES9^?Bi2V%ivQjquR_|0HA)VX@UD7sOimZDn%(wrOpKvWJF5KU)*k^)>x?>zFNK zN1`+sKATqY{qsxC??+?(@my58M5p7c(wPp=qZIi- zKU4Tm{z>6PZ(TM{Yzq!7Q%JrGq?y_iImOiXaJn7j?pV(X;%A;oPwoZcAn2Wc2r%=| z5wRBx!GBsbyy^c0CSV>7kg>*68{5M)^VxVRyB~U4%%6&|PyG($WU^!jrS4Xk<(7`z zLZw+PANBtO6PBxu$yiOJH|cCao;N?<^=NIV@b&gEIR(8V8!r?VR3b=ReoDr}F0j+I z`|B~cimSny`=FFJ1LdL!5m@5};0ln`Z%HY2F4zL&h^DX_Kcjd(k}?a6klr!?cmMFTVG)GbQ{ zl~N4q(E^#J@h{z%a$>ne<})Ul>e1hEi~ff;{DuUu-*~}XEr;u!ZC(c%_%#+=M)xNX zf~EfQCOV~gc~c46ygYQjnNKO3VxLaYJD3ovnlBkLa}Gcm+$oQvol@^7!EKQBNKr%4BEATsJF?eM4|-Ec zZs1nxXVQ`_q0bTnKaUhFrjV}%$XJ|1J(X(WH@{Aoo1(+${&9AgL|>3d9zbK$*ly`( zRx^cc9IFk*@(+;9XHRM1Wt(3IO&F!k9b9_F3-_ZprrLDqn>oCO%wdAoGHp7Bu0fa{ zwR}T0|3x#k{!ZnA1^uKk*WCT9&Zp--RqPQocC>lt<${2G^S!ADk$zUIx%Funqpc9p z$g7AyLf&q}>y9o%?X;_>)Ej`DN!`NipL5ll6kF=3h+e}0UWL1c`=4q-qmb{B-%96fRSr z;jABa_`lx#7o0QnNrF)L4g^UQc!@-n@>g_(m|;0P17zZL&CK>l{|j1Um}zvt7R2q* zaWVYqMJ200NydO!{?-wOo8R>y;lPLP5TUqkn^`C_vVLgo4c4OK#!!Sno4g>YtE>`m z&kpB`(weycor2B@qJMcN)cr5a!gn|%ZH-N{^enu&D5xGAcw+nos#MnhD<-4@9CsV}_;ZvgMq$BssI$Qf5fm|PfsHuy;paiQ4!h|&f5 z%Xj(NaD}Ill?oA|m0rQb`LKqopM}>p$96hKvEg3dHfa-GRrD}b`5zLf#_@7#2sQi5 zP&si(;q?BAw?ac{jyRvUe;BL%kZtN=OAZ~e?%|U#HH-b4xv`O+UXsUS98yT$-lYnx zpkpaDb0(&Ff+%Bse)$@L70}|ry|a@(1z6^m@nE1B4Hc%OUm1Cmw#+`7I`<-zUII`- z@W!xvDHD-SRcyU)*rFTyEK%@-MeLTby_wVjCksZiNVN_Qu!0rzk$(&Q;%8**^8@%* z9CxUKjf7?5C_B5$LJe>9kG$kqkvU_Er4VWplu!Ug8TM|OW@{UDGsOyj-Zy3;nLi1z7~9dX;GB9ch*KG0T+ z%4%J4lek_#Ay>M(-}7(VcUm{9pU}&aL{hefT&02t(pmDR}` zCnLwQ>WP!`ZAdo`hX~htg0-0Og&DfTCd9*KCE4I*sh6e5({dKeM zPUY65K|841&zZNTCsj2}YCTnwhaI_;eACZ&B*50XqG%$)Em-vTJJ_hp2Ik&f(fclG|<8a}( zaTme|{IPsHSKBDcfDZi@_Wru~don_2o$a@Bo)ktwDoCc})ct#%r7M9`M_LLk-eDbDv2w_7Cztf;mN-eG9bLt9N zN>48px|%UTR^K(`8(CllHE_cYa?D z;W&lQXtrslw0z%Bf5&Oq)o6cb)$K|VCnZ;<*>&CjwV?A)2a_@P6K%AFUDT-a9vSoC z^Z3CUW#Iq#R;~@T`CKNV3BKj|@5dbOF!>=BA>6N|=vt3`Koh@MEP@FjFBMV8J^*Rz zn^0e9{(=Y(bascv*sI{31=Yp8_5Vws7DW<#O?Ytgewxq!j@2@x;desnI=?=PmGvkqF9n^QxVm1=fJkLhNvNCUs7y*8( z`TE=*X9uLu!B++dOK{b*0AJ+ zcVZtH=l;6!Y@@PVC!9L*^kLeTxoU_m2{YclKPhcsRU%n?SSdHfAb|C-Z2BV1oeaiX zclVWx0N!jqigFh^S``qV4R%#;6JuZf*2?d7ntc9YimMkVY1o(z_?Co^kPQJnStJ6t zoxIr3@Q?)C@6M%(=2A^TMo^T!i)N;IphIfY=E!SQHx11m#9kae-(j+FLgqsEqJBCZ zEXhIH!XxZG77cxp(3wnSYrB2Pz9{EIuemN(*xlIDlQ_Go4o=zhK;8o2=$}pZ3pIH9 z5cjd(t8B&u`L-576gFN3I5$H#sloXDkm9HKJlAePLt9|p=uE4c?pwN&LvC&!`O)Nl zuZt1+9O7 zEWs<(TXN$%9|mH<&!nr~>q4SL87IyyPm)j4UAC`u8a7v)61roD5T#yPtrK(vnOE&l zX1dLKC9!^?SdmcEdTvjN(wZk&;g^x-Wl5|Qu>X|*;9a9o{*yG5sZDIlIm1+t!Rk;t zN0`}E_BIp}1TBf$OxDQpFJsX5c1)dMmt@}CBG^fdtlP=BU+sFj279L zTi?9X)?L%#k)h;kS#|e#FR|B=oZzKl*|e)o&)2W+@;CL(vLM-7AER%{!RsG*8504# znDk>UUnXOY)Mqq;33I$M%HEMY+h+@$bn00f$U&pMpt_C5nDj zI^iG2dVnH=%oARpP+#qBSs?O$#2m6MIZrxW?JqaqN{d~G&B5*EKKUpgb~-gji}U-e zh({55w(cmv7d_W|>7N0vmkgo9|J4Pg(=VP$f=C?iCp0?e7Ja@np!Ds(a%c+%^ISQQ zJHM+svdyF)D+rRWI}J$Tk1F`eR+&`(`z^GcmtWN&;#soONg>D>06yMq_ScFxWW0Tg zBzC-Q@lluLlHG_uR)r0ETM5TNUc!wJTrL(SMd4k+f z{l4s?p5@H4Z`FO$Kz{eW>ApO+@qkdx#xg{!*#7Bzk(#W@egpI@AlUyR5qqAl0Zl^` zydqXZMgrUSlh27|+ujSV<4xL<)6PTaC!l+u{l&F;^Z82ThzDef7=bf!Ltxv<&0b#^ zx`>qZ;*tNhv(dfL0Ncs@T@%*&czfWTv7>>jG8t-&li1pUu5TfPw$CTEE6Z*8A6-ZS z*M%)Rp2HhJDzXO1!I zLn{5xHJ?=FtOE)`N2xr}WNQ*x_~|E_nZAc&nYKx-b8dpFm7=i z3XS#ZPcT|o`h+#cWAZjM{GO1nW(>TOEoeE{OmwzyYCRkx1pjP`h-m0`Dpn{kC;%gn z>0dRgw?tFa!(M2URcZK@ZfjC1Z5o}T_r3&pcc1h>@qFRf@&3YoardjNzX08L_p+yi zpL%1>l^c}*Sa@$z4+Z!KM`TpK^>a=B&(P>=)Z;0k79JEYDkM}kpB3&q0*NGrHC(1z zG^ZW81*B!S`Fu?7eh~ghz{Cyk#2}SI#a(xONw0O?rQxw!8uHW8-tOe6l0o@lam2)F z-fp9?p#@8dcbV2z9MZdy&k+L3XPxbGt{6m6_k9uTWLz#gfp{u_#N;27)K6Q?x6)DHxlrCl6LX1J+!QruV*qMJ zdE+m1;MT*6hm%Uq*ASRxbEo2AJ%v%-7Nd)era#{g%%g2J%;x{j(Gt@K=Va?LCQ(W0pV@g{+B$ zw!hyKAKr1NcL!&yA?|R>AaOkk8Nij<%a2*=!n;vyFzePe#GM*s=G4o9{4Q9m#j#-> z+f0}vP-Sg5QX0yKC&mz3aG$Vy`0^@oOiF&(-Z!8iFYL z5}uYr*}!2wAgDw5V>1Nr_VQyVQrzYO7JY}h_xFGdwK#m1n~OK{vX<`K6>qfEdFkSI z^#64Mz-M!>+!(G%c_K96oR`0tjU;@|WIbSM6PT@S9LY&st{&!%6*Lg4e<4UyZ1>MZ zWgwvG?=sh65qf02w4%i`G1i(5t(ZG=1=%k*@BOVbv!~ZRy&dGR@%Kw&w=j%1n9jzG z3o^J4^~)b)09&Kgvyd{hh>B?;6%X1&gdzRUGb0~%f`tL@?pFQJ!L?@9Ucr`0;QO;o zqdocH+(7xSke=C)s?b95sDIdQ1@6mpQ8iV&32idZ6L(`=u99?2b5n6fC z&@UmYG4HekT#9LQGWw(5p}leUgp#wQ+T z(Vu6Hz1%oz+s{&UvpN9nOm}@WnI)|~zLFMGlg43rghS8D#Cig@0>((*dV;m~-uG^a zfabY3>|ZFZ$lD*fVS#P%@zX5wU5j8WuBnBo78~hP^RMH-r0JRL9C)19_chf^bn@{N zV@VN@o8%YKV6*sRGnse7+tx*e|W$n8PS7QCV%& z$?;d8`pXv0`T18PT?&$Jo*$qYLABaKTYjedv`iosI7kQ3WL3%Y+#)7%{UBTGj=`ol zOgU>RgdkVO*E=X9di8e3Ehy=TB`?}y8hK}E@m)D9mj4OD$0wscLdypNGC;6KHF9-o zFoZve0^F5XvQ5I+M6JEIf6s5tvk8~tx~+ajj<%~R+#i`4TH~}FJnA8JpmiKqW&;!1 zY*#*rxZ$lXTQFhp63OrGx9kGZjS}Yj2BtUB^a!YJ{8bIcxPW1f_)%?NRKUO~`zFH#b4e-_rqRN?8=Q9Gb0|k$Gfw)V0y2|F?KyIV|f|IDA=$!S$HswJ!UC zX4X->I^QNbmMJ=8t^Bkst$F@-3q602IbzwGLe}4+iczM<=!#%tpTo0ngCUO4J^PTk zg-SH{tb}G~lzEU2KpTEmup63-H@P?Fg#4jlGs^DrIhG&3F}&H4}o3lrvgV znpqIma0XmIxqY}#&92Yx`*FGQPyy=;?ED!=ZjFgI8MEBxI(6(O<%Yx-EDywNCp^fD zvMyUD=BQH1Q{caqyv%vs8Rxezm{L``D=V=(i=uZF&+jHStL%i``z;1_%Hfk{1x+jh#%rAC{x#Q@#3D9@YzD->QUzU(}A`i63tx51N4>JbL5 zW=H!Lv**6rs|%!psY#yNFT$%FUHnZ=BblEYTdXN(K)53>D9`o-LXOI0lH}L)-rh!B z=vcZbLEAIn>uwOqna;i6Knu>s1VZo&TM}0P+N)LS?ftmbSdN~gaeH83QbHgS0NM>= zKIgBxQO4wNXj!6W@^Aa1N>xg#=`P|*8TYCWq=m*m1IWh)OQOny^7i7}qL(Fo|7N3N zBx|pAocm*u)NLF1`)7sjvmP+kL)p=n_M%$;*NdBmZvv2uYj?{~&aTqrz82s6xi`=O zi|CTezV-(B#NHpR94EhauN+%G_*#%7&a4Mv{Np%RE-0F4B*0GK>*`yY3*`gla!D_h z;{ZWy?@q9GxR=(<^W{R5eK`-M?ZN&FM*(oxq9I^?wloufc779f#8)Nnu=^vt zUa;zQgk;o9zO317WBK*gi5<=3(hGsNg18)Y+v7CwaWaXIBn8eBSsiQ}LxPr#EVatJ zKQ`JP71>b^%lr)lN@>EuoF|OVSkEP^&G`NI@^LC`F=fW!gW7QuWp%n9BV+%( zvQ^}AdxVp-yPCcFtji0o`Il}^3|_rNotGn@i@#4ft&Tf5MG6*46ZpM+?|=E2>!Uzr z=$ju8Nfc0z%W;qXI=bQacCkEtt#L-3#AXM|2Ch+*O{xMy46mN-YDr5=Ir6u2spRVmQweXC9TFPHa@mRYB z&^jKNY??c(CU5(>^Uag#oAhPZDR(PQr(((Rq98)Cqyn;h5t4}?;Ss?xUEY#}pC3Ll zq0yjvU-WdBzq2Cin@0T`mm*c}^H{u|^l!4Xs6K_Opwo>R1|Wb7)vmbRPu(ul@J+-{ zTwq9vM5=SYrD24#0$8 z$n4dC;j7Fh+{S_*ufnfif!J0v-}{>V>Ozl4aQw?!muXW$9~T|hOp!jj@IzB}c4qll zyt4Q8y&ZIXk$L&|;^~sl(TaCR=^?-2*)}x2*fEB0k+$vpM2HvxIlYz*;Zchf_x@05 zXnl>zCt}O9Wj>AE*!}aar}ZD%mwB6YrANAJ5DE-sZvW*JCd_o?C&NCS3tfn^G4tC{ z**s(=i?%MY>Y#vfWY@}hym8wwnkB0Z%J!Tf3)XV{qE6 zL|2(%?50JiGqLj$n;y;=1+!uC`F^%$)W&n2vSHmc;Yx>tHTIdTl@Ufh`4LC}&p{>E z6zOmwjmh#CE-|p!Kkb)r@H?}i1W$2Z;9)eE&dxzJQv7}heIn_@+w%$ zGTDZOYRjrcyk?q{M_5VM1PY)wII~Lc;S+Kj3|@qoCOG5{6NK+;br2}~uBC%}hoYm( z>gacrDa%E3EB0pcF)O)`%3H@D^rbQbH}j|?j8HR_tG(lZDto9jpBVjV&Pk^Z5RJ7g zi2K+*JTOH!-QWf#pK$ay#}Uruy)K-UuOXcc-BHAim6yC;K`9?J*I$1Ez$Hu#U<2;F z`u;ZkOdw!#D6;kH7aqwo02d8O5#Q~s?ziu8l_%YY4Uw~*qKj!LK}a!q&{xrg<(Wxf z6MD~L-+q;p>@vH6-W327_vrP@vES<{^||$PPP5R@+QEcNP`SU?n-vfbx&5SE{!~nQ zt87;N{B|qA{w}~WlkxdZ*dC9N3}W6jwKdIS3M*Nw#+E;R{$gTWKp8)DLf0M?=S)Rb z5YJ}C_U#z~wa=rSSG!h)P8-jqrfqjFd3>!O@0wkhXSuz2?$8B%h}sUzN@cc4S_il> zOr#g^q-xlJ%jsMT1zAy03BS?!DFb3qg2-EKYWdpKYkrPe3KpN>;knJZB#)SqO@Bwd*g zxVu5@GS3iBQ;rl>W^_B$BmuKtDj}Y7eQce7bo8&gofPD{R&!jraYIa+W0i#GB4-&X zVk!iCk>7L0M>#j;&51mfA1;`e+2vTzzIi;WTt{P+hNlZK`Qj6-ZB-ume8W%3Du?2< zJs`ag6xT8ZrmH8+6FvA8oY6$X%+inSWxYH1(YB7W1z&vKQ`%wI3%8B!?clVL4qu0e zs{{uc&Nn&~OS-Yx=V)U}P7jdr=-;Meii}UxR9BvjQYB=&Zhlnq_qlgHhiI+`!c?>R2Al$a}z)cha8>O@`dd9u@5 zE&bVp+Rl^C?j-B`ntK1e*4ecS|L);V#@jk=)n?H=G`AEgGEe7%8*NqD!XGNz=M)dog9|e@Y7?^uNhkkD-D+A8268#KK5VMeE3DP!HRA%Q*GxqoJLa>Mstj z!~-p