Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bridge: added force_set_pallet_state call to pallet-bridge-grandpa #4465

Merged
merged 7 commits into from
May 21, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions bridges/modules/grandpa/src/benchmarking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -138,5 +138,19 @@ benchmarks_instance_pallet! {
assert!(!<ImportedHeaders<T, I>>::contains_key(genesis_header.hash()));
}

force_set_pallet_state {
let set_id = 100;
let authorities = accounts(T::BridgedChain::MAX_AUTHORITIES_COUNT as u16)
.iter()
.map(|id| (AuthorityId::from(*id), 1))
.collect::<Vec<_>>();
let (header, _) = prepare_benchmark_data::<T, I>(1, 1);
let expected_hash = header.hash();
}: force_set_pallet_state(RawOrigin::Root, set_id, authorities, Box::new(header))
verify {
assert_eq!(<BestFinalized<T, I>>::get().unwrap().1, expected_hash);
assert_eq!(<CurrentAuthoritySet<T, I>>::get().set_id, set_id);
}

impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::TestRuntime)
}
178 changes: 160 additions & 18 deletions bridges/modules/grandpa/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ use bp_header_chain::{
};
use bp_runtime::{BlockNumberOf, HashOf, HasherOf, HeaderId, HeaderOf, OwnedBridgeModule};
use frame_support::{dispatch::PostDispatchInfo, ensure, DefaultNoBound};
use sp_consensus_grandpa::SetId;
use sp_consensus_grandpa::{AuthorityList, SetId};
use sp_runtime::{
traits::{Header as HeaderT, Zero},
SaturatedConversion,
Expand Down Expand Up @@ -360,6 +360,42 @@ pub mod pallet {

Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee })
}

/// Set current authorities set and best finalized bridged header to given values
/// (almost) without any checks. This call can fail only if:
///
/// - the call origin is not a root or a pallet owner;
///
/// - there are too many authorities in the new set.
///
/// No other checks are made. Previously imported headers stay in the storage and
/// are still accessible after the call.
#[pallet::call_index(5)]
#[pallet::weight(T::WeightInfo::force_set_pallet_state())]
pub fn force_set_pallet_state(
origin: OriginFor<T>,
new_current_set_id: SetId,
new_authorities: AuthorityList,
new_best_header: Box<BridgedHeader<T, I>>,
) -> DispatchResult {
Self::ensure_owner_or_root(origin)?;

// save new authorities set. It only fails if there are too many authorities
// in the new set
save_authorities_set::<T, I>(
CurrentAuthoritySet::<T, I>::get().set_id,
new_current_set_id,
new_authorities,
)?;

// save new best header. It may be older than the best header that is already
// known to the pallet - it changes nothing (except for the fact that previously
// imported headers may still be used to prove something)
let new_best_header_hash = new_best_header.hash();
insert_header::<T, I>(*new_best_header, new_best_header_hash);

Ok(())
}
}

/// Number of free header submissions that we may yet accept in the current block.
Expand Down Expand Up @@ -592,33 +628,45 @@ pub mod pallet {
// GRANDPA only includes a `delay` for forced changes, so this isn't valid.
ensure!(change.delay == Zero::zero(), <Error<T, I>>::UnsupportedScheduledChange);

// TODO [#788]: Stop manually increasing the `set_id` here.
let next_authorities = StoredAuthoritySet::<T, I> {
authorities: change
.next_authorities
.try_into()
.map_err(|_| Error::<T, I>::TooManyAuthoritiesInSet)?,
set_id: current_set_id + 1,
};

// Since our header schedules a change and we know the delay is 0, it must also enact
// the change.
<CurrentAuthoritySet<T, I>>::put(&next_authorities);

log::info!(
target: LOG_TARGET,
"Transitioned from authority set {} to {}! New authorities are: {:?}",
// TODO [#788]: Stop manually increasing the `set_id` here.
return save_authorities_set::<T, I>(
current_set_id,
current_set_id + 1,
next_authorities,
change.next_authorities,
);

return Ok(Some(next_authorities.into()))
};

Ok(None)
}

/// Save new authorities set.
pub(crate) fn save_authorities_set<T: Config<I>, I: 'static>(
old_current_set_id: SetId,
new_current_set_id: SetId,
new_authorities: AuthorityList,
) -> Result<Option<AuthoritySet>, DispatchError> {
let next_authorities = StoredAuthoritySet::<T, I> {
authorities: new_authorities
.try_into()
.map_err(|_| Error::<T, I>::TooManyAuthoritiesInSet)?,
set_id: new_current_set_id,
};

<CurrentAuthoritySet<T, I>>::put(&next_authorities);

log::info!(
target: LOG_TARGET,
"Transitioned from authority set {} to {}! New authorities are: {:?}",
old_current_set_id,
new_current_set_id,
next_authorities,
);

Ok(Some(next_authorities.into()))
}

/// Verify a GRANDPA justification (finality proof) for a given header.
///
/// Will use the GRANDPA current authorities known to the pallet.
Expand Down Expand Up @@ -1700,4 +1748,98 @@ mod tests {
assert_eq!(FreeHeadersRemaining::<TestRuntime, ()>::get(), Some(0));
})
}

#[test]
fn force_set_pallet_state_works() {
run_test(|| {
let header25 = test_header(25);
let header50 = test_header(50);
let ok_new_set_id = 100;
let ok_new_authorities = authority_list();
let bad_new_set_id = 100;
let bad_new_authorities: Vec<_> = std::iter::repeat((ALICE.into(), 1))
.take(MAX_BRIDGED_AUTHORITIES as usize + 1)
.collect();

// initialize and import several headers
initialize_substrate_bridge();
assert_ok!(submit_finality_proof(30));

// wrong origin => error
assert_noop!(
Pallet::<TestRuntime>::force_set_pallet_state(
RuntimeOrigin::signed(1),
ok_new_set_id,
ok_new_authorities.clone(),
Box::new(header50.clone()),
),
DispatchError::BadOrigin,
);

// too many authorities in the set => error
assert_noop!(
Pallet::<TestRuntime>::force_set_pallet_state(
RuntimeOrigin::root(),
bad_new_set_id,
bad_new_authorities.clone(),
Box::new(header50.clone()),
),
Error::<TestRuntime>::TooManyAuthoritiesInSet,
);

// force import header 50 => ok
assert_ok!(Pallet::<TestRuntime>::force_set_pallet_state(
RuntimeOrigin::root(),
ok_new_set_id,
ok_new_authorities.clone(),
Box::new(header50.clone()),
),);

// force import header 25 after 50 => ok
assert_ok!(Pallet::<TestRuntime>::force_set_pallet_state(
RuntimeOrigin::root(),
ok_new_set_id,
ok_new_authorities.clone(),
Box::new(header25.clone()),
),);

// we may import better headers
assert_noop!(submit_finality_proof(20), Error::<TestRuntime>::OldHeader);
assert_ok!(submit_finality_proof_with_set_id(26, ok_new_set_id));

// we can even reimport header #50. It **will cause** some issues during pruning
// (see below)
assert_ok!(submit_finality_proof_with_set_id(50, ok_new_set_id));

// and all headers are available. Even though there are 4 headers, the ring
// buffer thinks that there are 5, because we've imported header $50 twice
assert!(GrandpaChainHeaders::<TestRuntime, ()>::finalized_header_state_root(
test_header(30).hash()
)
.is_some());
assert!(GrandpaChainHeaders::<TestRuntime, ()>::finalized_header_state_root(
test_header(50).hash()
)
.is_some());
assert!(GrandpaChainHeaders::<TestRuntime, ()>::finalized_header_state_root(
test_header(25).hash()
)
.is_some());
assert!(GrandpaChainHeaders::<TestRuntime, ()>::finalized_header_state_root(
test_header(26).hash()
)
.is_some());

// next header import will prune header 30
assert_ok!(submit_finality_proof_with_set_id(70, ok_new_set_id));
// next header import will prune header 50
assert_ok!(submit_finality_proof_with_set_id(80, ok_new_set_id));
// next header import will prune header 25
assert_ok!(submit_finality_proof_with_set_id(90, ok_new_set_id));
// next header import will prune header 26
assert_ok!(submit_finality_proof_with_set_id(100, ok_new_set_id));
// next header import will prune header 50 again. But it is fine
assert_ok!(submit_finality_proof_with_set_id(110, ok_new_set_id));
});
}
}
49 changes: 49 additions & 0 deletions bridges/modules/grandpa/src/weights.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ use sp_std::marker::PhantomData;
/// Weight functions needed for pallet_bridge_grandpa.
pub trait WeightInfo {
fn submit_finality_proof(p: u32, v: u32) -> Weight;
fn force_set_pallet_state() -> Weight;
}

/// Weights for `pallet_bridge_grandpa` that are generated using one of the Bridge testnets.
Expand Down Expand Up @@ -109,6 +110,30 @@ impl<T: frame_system::Config> WeightInfo for BridgeWeight<T> {
.saturating_add(T::DbWeight::get().reads(6_u64))
.saturating_add(T::DbWeight::get().writes(6_u64))
}

/// Storage: `BridgeWestendGrandpa::CurrentAuthoritySet` (r:1 w:1)
/// Proof: `BridgeWestendGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`:
/// Some(50250), added: 50745, mode: `MaxEncodedLen`)
/// Storage: `BridgeWestendGrandpa::ImportedHashesPointer` (r:1 w:1)
/// Proof: `BridgeWestendGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`:
/// Some(4), added: 499, mode: `MaxEncodedLen`) Storage: `BridgeWestendGrandpa::ImportedHashes`
/// (r:1 w:1) Proof: `BridgeWestendGrandpa::ImportedHashes` (`max_values`: Some(1024),
/// `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`)
/// Storage: `BridgeWestendGrandpa::BestFinalized` (r:0 w:1)
/// Proof: `BridgeWestendGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36),
/// added: 531, mode: `MaxEncodedLen`) Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:0
/// w:2) Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`:
/// Some(68), added: 1553, mode: `MaxEncodedLen`)
fn force_set_pallet_state() -> Weight {
// Proof Size summary in bytes:
// Measured: `452`
// Estimated: `51735`
// Minimum execution time: 62_232_000 picoseconds.
Weight::from_parts(78_755_000, 0)
.saturating_add(Weight::from_parts(0, 51735))
.saturating_add(RocksDbWeight::get().reads(3))
.saturating_add(RocksDbWeight::get().writes(6))
}
}

// For backwards compatibility and tests
Expand Down Expand Up @@ -164,4 +189,28 @@ impl WeightInfo for () {
.saturating_add(RocksDbWeight::get().reads(6_u64))
.saturating_add(RocksDbWeight::get().writes(6_u64))
}

/// Storage: `BridgeWestendGrandpa::CurrentAuthoritySet` (r:1 w:1)
/// Proof: `BridgeWestendGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`:
/// Some(50250), added: 50745, mode: `MaxEncodedLen`)
/// Storage: `BridgeWestendGrandpa::ImportedHashesPointer` (r:1 w:1)
/// Proof: `BridgeWestendGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`:
/// Some(4), added: 499, mode: `MaxEncodedLen`) Storage: `BridgeWestendGrandpa::ImportedHashes`
/// (r:1 w:1) Proof: `BridgeWestendGrandpa::ImportedHashes` (`max_values`: Some(1024),
/// `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`)
/// Storage: `BridgeWestendGrandpa::BestFinalized` (r:0 w:1)
/// Proof: `BridgeWestendGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36),
/// added: 531, mode: `MaxEncodedLen`) Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:0
/// w:2) Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`:
/// Some(68), added: 1553, mode: `MaxEncodedLen`)
fn force_set_pallet_state() -> Weight {
// Proof Size summary in bytes:
// Measured: `452`
// Estimated: `51735`
// Minimum execution time: 62_232_000 picoseconds.
Weight::from_parts(78_755_000, 0)
.saturating_add(Weight::from_parts(0, 51735))
.saturating_add(RocksDbWeight::get().reads(3))
.saturating_add(RocksDbWeight::get().writes(6))
}
}
Loading
Loading