From 1da8a6b88f530b467394a9ead8f3042ebdc50a36 Mon Sep 17 00:00:00 2001 From: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Date: Wed, 20 Mar 2024 20:41:43 +0100 Subject: [PATCH 01/17] Enable PoV reclaim on `rococo-parachain` (#3765) This PR proposes enabling PoV reclaim on the `rococo-parachain` testchain to streamline testing and development of high-TPS stuff. --- Cargo.lock | 1 + cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml | 2 ++ cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs | 1 + 3 files changed, 4 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 5401dc5ecfb0..bdbf6ddac268 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14973,6 +14973,7 @@ dependencies = [ "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index 577ed749167f..790f38d94f50 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -56,6 +56,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-ping = { path = "../../../pallets/ping", default-features = false } cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } parachains-common = { path = "../../../common", default-features = false } testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } @@ -75,6 +76,7 @@ std = [ "cumulus-ping/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 1b7efa6f400a..b3bea4d4e654 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -653,6 +653,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = From 93b1abb280cf9d6c64f00b66c3593c2b04d50a9e Mon Sep 17 00:00:00 2001 From: gupnik Date: Thu, 21 Mar 2024 08:41:51 +0530 Subject: [PATCH 02/17] Migrates Westend to Runtime V2 (#3754) Step in https://github.com/paritytech/polkadot-sdk/issues/3688 --- polkadot/runtime/westend/Cargo.toml | 2 +- polkadot/runtime/westend/src/lib.rs | 321 +++++++++++++++++----------- prdoc/pr_3754.prdoc | 13 ++ 3 files changed, 211 insertions(+), 125 deletions(-) create mode 100644 prdoc/pr_3754.prdoc diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index 4180828bcfb1..fcead1dd0b53 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -45,7 +45,7 @@ sp-npos-elections = { path = "../../../substrate/primitives/npos-elections", def frame-election-provider-support = { path = "../../../substrate/frame/election-provider-support", default-features = false } frame-executive = { path = "../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["tuples-96"] } +frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["experimental", "tuples-96"] } frame-system = { path = "../../../substrate/frame/system", default-features = false } frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } westend-runtime-constants = { package = "westend-runtime-constants", path = "constants", default-features = false } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index f1d8841989c4..62821cae7e49 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -17,7 +17,7 @@ //! The Westend runtime. This can be compiled with `#[no_std]`, ready for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -// `construct_runtime!` does a lot of recursion and requires us to increase the limit. +// `#[frame_support::runtime]!` does a lot of recursion and requires us to increase the limit. #![recursion_limit = "512"] use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; @@ -27,7 +27,7 @@ use beefy_primitives::{ }; use frame_election_provider_support::{bounds::ElectionBoundsBuilder, onchain, SequentialPhragmen}; use frame_support::{ - construct_runtime, derive_impl, + derive_impl, genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ @@ -1414,128 +1414,201 @@ impl pallet_asset_rate::Config for Runtime { type BenchmarkHelper = runtime_common::impls::benchmarks::AssetRateArguments; } -construct_runtime! { - pub enum Runtime - { - // Basic stuff; balances is uncallable initially. - System: frame_system = 0, - - // Babe must be before session. - Babe: pallet_babe = 1, - - Timestamp: pallet_timestamp = 2, - Indices: pallet_indices = 3, - Balances: pallet_balances = 4, - TransactionPayment: pallet_transaction_payment = 26, - - // Consensus support. - // Authorship must be before session in order to note author in the correct session and era. - Authorship: pallet_authorship = 5, - Staking: pallet_staking = 6, - Offences: pallet_offences = 7, - Historical: session_historical = 27, - - Session: pallet_session = 8, - Grandpa: pallet_grandpa = 10, - AuthorityDiscovery: pallet_authority_discovery = 12, - - // Utility module. - Utility: pallet_utility = 16, - - // Less simple identity module. - Identity: pallet_identity = 17, - - // Social recovery module. - Recovery: pallet_recovery = 18, - - // Vesting. Usable initially, but removed once all vesting is finished. - Vesting: pallet_vesting = 19, - - // System scheduler. - Scheduler: pallet_scheduler = 20, - - // Preimage registrar. - Preimage: pallet_preimage = 28, - - // Sudo. - Sudo: pallet_sudo = 21, - - // Proxy module. Late addition. - Proxy: pallet_proxy = 22, - - // Multisig module. Late addition. - Multisig: pallet_multisig = 23, - - // Election pallet. Only works with staking, but placed here to maintain indices. - ElectionProviderMultiPhase: pallet_election_provider_multi_phase = 24, - - // Provides a semi-sorted list of nominators for staking. - VoterList: pallet_bags_list:: = 25, - - // Nomination pools for staking. - NominationPools: pallet_nomination_pools = 29, - - // Fast unstake pallet: extension to staking. - FastUnstake: pallet_fast_unstake = 30, - - // OpenGov - ConvictionVoting: pallet_conviction_voting = 31, - Referenda: pallet_referenda = 32, - Origins: pallet_custom_origins = 35, - Whitelist: pallet_whitelist = 36, - - // Treasury - Treasury: pallet_treasury = 37, - - // Parachains pallets. Start indices at 40 to leave room. - ParachainsOrigin: parachains_origin = 41, - Configuration: parachains_configuration = 42, - ParasShared: parachains_shared = 43, - ParaInclusion: parachains_inclusion = 44, - ParaInherent: parachains_paras_inherent = 45, - ParaScheduler: parachains_scheduler = 46, - Paras: parachains_paras = 47, - Initializer: parachains_initializer = 48, - Dmp: parachains_dmp = 49, - // RIP Ump 50 - Hrmp: parachains_hrmp = 51, - ParaSessionInfo: parachains_session_info = 52, - ParasDisputes: parachains_disputes = 53, - ParasSlashing: parachains_slashing = 54, - OnDemandAssignmentProvider: parachains_assigner_on_demand = 56, - CoretimeAssignmentProvider: parachains_assigner_coretime = 57, - - // Parachain Onboarding Pallets. Start indices at 60 to leave room. - Registrar: paras_registrar = 60, - Slots: slots = 61, - ParasSudoWrapper: paras_sudo_wrapper = 62, - Auctions: auctions = 63, - Crowdloan: crowdloan = 64, - AssignedSlots: assigned_slots = 65, - Coretime: coretime = 66, - - // Pallet for sending XCM. - XcmPallet: pallet_xcm = 99, - - // Generalized message queue - MessageQueue: pallet_message_queue = 100, - - // Asset rate. - AssetRate: pallet_asset_rate = 101, - - // Root testing pallet. - RootTesting: pallet_root_testing = 102, - - // BEEFY Bridges support. - Beefy: pallet_beefy = 200, - // MMR leaf construction must be after session in order to have a leaf's next_auth_set - // refer to block. See issue polkadot-fellows/runtimes#160 for details. - Mmr: pallet_mmr = 201, - BeefyMmrLeaf: pallet_beefy_mmr = 202, - - // Pallet for migrating Identity to a parachain. To be removed post-migration. - IdentityMigrator: identity_migrator = 248, - } +#[frame_support::runtime(legacy_ordering)] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + // Basic stuff; balances is uncallable initially. + #[runtime::pallet_index(0)] + pub type System = frame_system; + + // Babe must be before session. + #[runtime::pallet_index(1)] + pub type Babe = pallet_babe; + + #[runtime::pallet_index(2)] + pub type Timestamp = pallet_timestamp; + #[runtime::pallet_index(3)] + pub type Indices = pallet_indices; + #[runtime::pallet_index(4)] + pub type Balances = pallet_balances; + #[runtime::pallet_index(26)] + pub type TransactionPayment = pallet_transaction_payment; + + // Consensus support. + // Authorship must be before session in order to note author in the correct session and era. + #[runtime::pallet_index(5)] + pub type Authorship = pallet_authorship; + #[runtime::pallet_index(6)] + pub type Staking = pallet_staking; + #[runtime::pallet_index(7)] + pub type Offences = pallet_offences; + #[runtime::pallet_index(27)] + pub type Historical = session_historical; + + #[runtime::pallet_index(8)] + pub type Session = pallet_session; + #[runtime::pallet_index(10)] + pub type Grandpa = pallet_grandpa; + #[runtime::pallet_index(12)] + pub type AuthorityDiscovery = pallet_authority_discovery; + + // Utility module. + #[runtime::pallet_index(16)] + pub type Utility = pallet_utility; + + // Less simple identity module. + #[runtime::pallet_index(17)] + pub type Identity = pallet_identity; + + // Social recovery module. + #[runtime::pallet_index(18)] + pub type Recovery = pallet_recovery; + + // Vesting. Usable initially, but removed once all vesting is finished. + #[runtime::pallet_index(19)] + pub type Vesting = pallet_vesting; + + // System scheduler. + #[runtime::pallet_index(20)] + pub type Scheduler = pallet_scheduler; + + // Preimage registrar. + #[runtime::pallet_index(28)] + pub type Preimage = pallet_preimage; + + // Sudo. + #[runtime::pallet_index(21)] + pub type Sudo = pallet_sudo; + + // Proxy module. Late addition. + #[runtime::pallet_index(22)] + pub type Proxy = pallet_proxy; + + // Multisig module. Late addition. + #[runtime::pallet_index(23)] + pub type Multisig = pallet_multisig; + + // Election pallet. Only works with staking, but placed here to maintain indices. + #[runtime::pallet_index(24)] + pub type ElectionProviderMultiPhase = pallet_election_provider_multi_phase; + + // Provides a semi-sorted list of nominators for staking. + #[runtime::pallet_index(25)] + pub type VoterList = pallet_bags_list; + + // Nomination pools for staking. + #[runtime::pallet_index(29)] + pub type NominationPools = pallet_nomination_pools; + + // Fast unstake pallet = extension to staking. + #[runtime::pallet_index(30)] + pub type FastUnstake = pallet_fast_unstake; + + // OpenGov + #[runtime::pallet_index(31)] + pub type ConvictionVoting = pallet_conviction_voting; + #[runtime::pallet_index(32)] + pub type Referenda = pallet_referenda; + #[runtime::pallet_index(35)] + pub type Origins = pallet_custom_origins; + #[runtime::pallet_index(36)] + pub type Whitelist = pallet_whitelist; + + // Treasury + #[runtime::pallet_index(37)] + pub type Treasury = pallet_treasury; + + // Parachains pallets. Start indices at 40 to leave room. + #[runtime::pallet_index(41)] + pub type ParachainsOrigin = parachains_origin; + #[runtime::pallet_index(42)] + pub type Configuration = parachains_configuration; + #[runtime::pallet_index(43)] + pub type ParasShared = parachains_shared; + #[runtime::pallet_index(44)] + pub type ParaInclusion = parachains_inclusion; + #[runtime::pallet_index(45)] + pub type ParaInherent = parachains_paras_inherent; + #[runtime::pallet_index(46)] + pub type ParaScheduler = parachains_scheduler; + #[runtime::pallet_index(47)] + pub type Paras = parachains_paras; + #[runtime::pallet_index(48)] + pub type Initializer = parachains_initializer; + #[runtime::pallet_index(49)] + pub type Dmp = parachains_dmp; + // RIP Ump 50 + #[runtime::pallet_index(51)] + pub type Hrmp = parachains_hrmp; + #[runtime::pallet_index(52)] + pub type ParaSessionInfo = parachains_session_info; + #[runtime::pallet_index(53)] + pub type ParasDisputes = parachains_disputes; + #[runtime::pallet_index(54)] + pub type ParasSlashing = parachains_slashing; + #[runtime::pallet_index(56)] + pub type OnDemandAssignmentProvider = parachains_assigner_on_demand; + #[runtime::pallet_index(57)] + pub type CoretimeAssignmentProvider = parachains_assigner_coretime; + + // Parachain Onboarding Pallets. Start indices at 60 to leave room. + #[runtime::pallet_index(60)] + pub type Registrar = paras_registrar; + #[runtime::pallet_index(61)] + pub type Slots = slots; + #[runtime::pallet_index(62)] + pub type ParasSudoWrapper = paras_sudo_wrapper; + #[runtime::pallet_index(63)] + pub type Auctions = auctions; + #[runtime::pallet_index(64)] + pub type Crowdloan = crowdloan; + #[runtime::pallet_index(65)] + pub type AssignedSlots = assigned_slots; + #[runtime::pallet_index(66)] + pub type Coretime = coretime; + + // Pallet for sending XCM. + #[runtime::pallet_index(99)] + pub type XcmPallet = pallet_xcm; + + // Generalized message queue + #[runtime::pallet_index(100)] + pub type MessageQueue = pallet_message_queue; + + // Asset rate. + #[runtime::pallet_index(101)] + pub type AssetRate = pallet_asset_rate; + + // Root testing pallet. + #[runtime::pallet_index(102)] + pub type RootTesting = pallet_root_testing; + + // BEEFY Bridges support. + #[runtime::pallet_index(200)] + pub type Beefy = pallet_beefy; + // MMR leaf construction must be after session in order to have a leaf's next_auth_set + // refer to block. See issue polkadot-fellows/runtimes#160 for details. + #[runtime::pallet_index(201)] + pub type Mmr = pallet_mmr; + #[runtime::pallet_index(202)] + pub type BeefyMmrLeaf = pallet_beefy_mmr; + + // Pallet for migrating Identity to a parachain. To be removed post-migration. + #[runtime::pallet_index(248)] + pub type IdentityMigrator = identity_migrator; } /// The address format for describing accounts. diff --git a/prdoc/pr_3754.prdoc b/prdoc/pr_3754.prdoc new file mode 100644 index 000000000000..94ea6d566088 --- /dev/null +++ b/prdoc/pr_3754.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Migrates Westend to Runtime V2 + +doc: + - audience: Runtime Dev + description: | + This PR migrates Westend from `construct_runtime` to Runtime V2 + as introduced in https://github.com/paritytech/polkadot-sdk/pull/1378 + +crates: + - name: westend-runtime From 7b6b061e32f65b92574deaf8c199855e2c59d4c6 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Thu, 21 Mar 2024 10:00:10 +0100 Subject: [PATCH 03/17] [Backport] version bumps and prdocs reordering 1.9.0 (#3758) This PR backports: - node version bump - `spec_vesion` bump - reordering of the `prdocs` to the appropriate folder from the `1.9.0` release branch --- .../parachains/runtimes/assets/asset-hub-rococo/src/lib.rs | 4 ++-- .../parachains/runtimes/assets/asset-hub-westend/src/lib.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- .../runtimes/collectives/collectives-westend/src/lib.rs | 2 +- .../parachains/runtimes/contracts/contracts-rococo/src/lib.rs | 2 +- .../parachains/runtimes/coretime/coretime-rococo/src/lib.rs | 2 +- .../parachains/runtimes/coretime/coretime-westend/src/lib.rs | 2 +- .../parachains/runtimes/glutton/glutton-westend/src/lib.rs | 2 +- cumulus/parachains/runtimes/people/people-rococo/src/lib.rs | 2 +- cumulus/parachains/runtimes/people/people-westend/src/lib.rs | 2 +- .../parachains/runtimes/testing/rococo-parachain/src/lib.rs | 2 +- polkadot/node/primitives/src/lib.rs | 2 +- polkadot/runtime/rococo/src/lib.rs | 2 +- polkadot/runtime/westend/src/lib.rs | 2 +- prdoc/{ => 1.9.0}/pr_1378.prdoc | 0 prdoc/{ => 1.9.0}/pr_1554.prdoc | 0 prdoc/{ => 1.9.0}/pr_1781.prdoc | 0 prdoc/{ => 1.9.0}/pr_2393.prdoc | 0 prdoc/{ => 1.9.0}/pr_3002.prdoc | 0 prdoc/{ => 1.9.0}/pr_3187.prdoc | 0 prdoc/{ => 1.9.0}/pr_3231.prdoc | 0 prdoc/{ => 1.9.0}/pr_3233.prdoc | 0 prdoc/{ => 1.9.0}/pr_3324.prdoc | 0 prdoc/{ => 1.9.0}/pr_3371.prdoc | 0 prdoc/{ => 1.9.0}/pr_3377.prdoc | 0 prdoc/{ => 1.9.0}/pr_3378.prdoc | 0 prdoc/{ => 1.9.0}/pr_3403.prdoc | 0 prdoc/{ => 1.9.0}/pr_3411.prdoc | 0 prdoc/{ => 1.9.0}/pr_3412.prdoc | 0 prdoc/{ => 1.9.0}/pr_3447.prdoc | 0 prdoc/{ => 1.9.0}/pr_3453.prdoc | 0 prdoc/{ => 1.9.0}/pr_3454.prdoc | 0 prdoc/{ => 1.9.0}/pr_3456.prdoc | 0 prdoc/{ => 1.9.0}/pr_3460.prdoc | 0 prdoc/{ => 1.9.0}/pr_3491.prdoc | 0 prdoc/{ => 1.9.0}/pr_3504.prdoc | 0 prdoc/{ => 1.9.0}/pr_3505.prdoc | 0 prdoc/{ => 1.9.0}/pr_3510.prdoc | 0 prdoc/{ => 1.9.0}/pr_3513.prdoc | 0 prdoc/{ => 1.9.0}/pr_3523.prdoc | 0 prdoc/{ => 1.9.0}/pr_3532.prdoc | 0 prdoc/{ => 1.9.0}/pr_3540.prdoc | 0 prdoc/{ => 1.9.0}/pr_3574.prdoc | 0 prdoc/{ => 1.9.0}/pr_3589.prdoc | 0 prdoc/{ => 1.9.0}/pr_3606.prdoc | 0 prdoc/{ => 1.9.0}/pr_3636.prdoc | 0 prdoc/{ => 1.9.0}/pr_3639.prdoc | 0 prdoc/{ => 1.9.0}/pr_3643.prdoc | 0 prdoc/{ => 1.9.0}/pr_3665.prdoc | 0 50 files changed, 16 insertions(+), 16 deletions(-) rename prdoc/{ => 1.9.0}/pr_1378.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_1554.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_1781.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_2393.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3002.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3187.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3231.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3233.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3324.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3371.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3377.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3378.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3403.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3411.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3412.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3447.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3453.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3454.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3456.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3460.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3491.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3504.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3505.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3510.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3513.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3523.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3532.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3540.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3574.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3589.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3606.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3636.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3639.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3643.prdoc (100%) rename prdoc/{ => 1.9.0}/pr_3665.prdoc (100%) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index d3d8a495ea3f..689d8d56c48b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -113,7 +113,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, @@ -126,7 +126,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 711fadff92a0..48106b5f302d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -110,7 +110,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westmint"), impl_name: create_runtime_str!("westmint"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index bbafcd3c7ddd..bf7483179f29 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -202,7 +202,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-rococo"), impl_name: create_runtime_str!("bridge-hub-rococo"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 4, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 11ab9aecc617..9bdea6b9a7dd 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -176,7 +176,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-westend"), impl_name: create_runtime_str!("bridge-hub-westend"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 4, diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 55269283e081..d3f588bf25ff 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -117,7 +117,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("collectives-westend"), impl_name: create_runtime_str!("collectives-westend"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 17bb45f6b1b8..e1586c7d9b29 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -133,7 +133,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("contracts-rococo"), impl_name: create_runtime_str!("contracts-rococo"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 26add2ee9424..86eb5cdfcaf5 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -133,7 +133,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-rococo"), impl_name: create_runtime_str!("coretime-rococo"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 76732c3fccc7..c31e474cc2f1 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -133,7 +133,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-westend"), impl_name: create_runtime_str!("coretime-westend"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index fe18f48fbb3c..cee17cdc7b05 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -100,7 +100,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("glutton-westend"), impl_name: create_runtime_str!("glutton-westend"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 2bb5641b4af0..ad369583f07f 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -126,7 +126,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-rococo"), impl_name: create_runtime_str!("people-rococo"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index b81f7a9c9695..c76611ad2a3a 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -126,7 +126,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-westend"), impl_name: create_runtime_str!("people-westend"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index b3bea4d4e654..c60061419817 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -107,7 +107,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("test-parachain"), impl_name: create_runtime_str!("test-parachain"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index d295c21cce1d..b6556e0be56b 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -58,7 +58,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.8.0"; +pub const NODE_VERSION: &'static str = "1.9.0"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index a773eeb5cbdb..c9f5d81d286a 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -149,7 +149,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("rococo"), impl_name: create_runtime_str!("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 24, diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 62821cae7e49..83d47508c7c7 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -150,7 +150,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 24, diff --git a/prdoc/pr_1378.prdoc b/prdoc/1.9.0/pr_1378.prdoc similarity index 100% rename from prdoc/pr_1378.prdoc rename to prdoc/1.9.0/pr_1378.prdoc diff --git a/prdoc/pr_1554.prdoc b/prdoc/1.9.0/pr_1554.prdoc similarity index 100% rename from prdoc/pr_1554.prdoc rename to prdoc/1.9.0/pr_1554.prdoc diff --git a/prdoc/pr_1781.prdoc b/prdoc/1.9.0/pr_1781.prdoc similarity index 100% rename from prdoc/pr_1781.prdoc rename to prdoc/1.9.0/pr_1781.prdoc diff --git a/prdoc/pr_2393.prdoc b/prdoc/1.9.0/pr_2393.prdoc similarity index 100% rename from prdoc/pr_2393.prdoc rename to prdoc/1.9.0/pr_2393.prdoc diff --git a/prdoc/pr_3002.prdoc b/prdoc/1.9.0/pr_3002.prdoc similarity index 100% rename from prdoc/pr_3002.prdoc rename to prdoc/1.9.0/pr_3002.prdoc diff --git a/prdoc/pr_3187.prdoc b/prdoc/1.9.0/pr_3187.prdoc similarity index 100% rename from prdoc/pr_3187.prdoc rename to prdoc/1.9.0/pr_3187.prdoc diff --git a/prdoc/pr_3231.prdoc b/prdoc/1.9.0/pr_3231.prdoc similarity index 100% rename from prdoc/pr_3231.prdoc rename to prdoc/1.9.0/pr_3231.prdoc diff --git a/prdoc/pr_3233.prdoc b/prdoc/1.9.0/pr_3233.prdoc similarity index 100% rename from prdoc/pr_3233.prdoc rename to prdoc/1.9.0/pr_3233.prdoc diff --git a/prdoc/pr_3324.prdoc b/prdoc/1.9.0/pr_3324.prdoc similarity index 100% rename from prdoc/pr_3324.prdoc rename to prdoc/1.9.0/pr_3324.prdoc diff --git a/prdoc/pr_3371.prdoc b/prdoc/1.9.0/pr_3371.prdoc similarity index 100% rename from prdoc/pr_3371.prdoc rename to prdoc/1.9.0/pr_3371.prdoc diff --git a/prdoc/pr_3377.prdoc b/prdoc/1.9.0/pr_3377.prdoc similarity index 100% rename from prdoc/pr_3377.prdoc rename to prdoc/1.9.0/pr_3377.prdoc diff --git a/prdoc/pr_3378.prdoc b/prdoc/1.9.0/pr_3378.prdoc similarity index 100% rename from prdoc/pr_3378.prdoc rename to prdoc/1.9.0/pr_3378.prdoc diff --git a/prdoc/pr_3403.prdoc b/prdoc/1.9.0/pr_3403.prdoc similarity index 100% rename from prdoc/pr_3403.prdoc rename to prdoc/1.9.0/pr_3403.prdoc diff --git a/prdoc/pr_3411.prdoc b/prdoc/1.9.0/pr_3411.prdoc similarity index 100% rename from prdoc/pr_3411.prdoc rename to prdoc/1.9.0/pr_3411.prdoc diff --git a/prdoc/pr_3412.prdoc b/prdoc/1.9.0/pr_3412.prdoc similarity index 100% rename from prdoc/pr_3412.prdoc rename to prdoc/1.9.0/pr_3412.prdoc diff --git a/prdoc/pr_3447.prdoc b/prdoc/1.9.0/pr_3447.prdoc similarity index 100% rename from prdoc/pr_3447.prdoc rename to prdoc/1.9.0/pr_3447.prdoc diff --git a/prdoc/pr_3453.prdoc b/prdoc/1.9.0/pr_3453.prdoc similarity index 100% rename from prdoc/pr_3453.prdoc rename to prdoc/1.9.0/pr_3453.prdoc diff --git a/prdoc/pr_3454.prdoc b/prdoc/1.9.0/pr_3454.prdoc similarity index 100% rename from prdoc/pr_3454.prdoc rename to prdoc/1.9.0/pr_3454.prdoc diff --git a/prdoc/pr_3456.prdoc b/prdoc/1.9.0/pr_3456.prdoc similarity index 100% rename from prdoc/pr_3456.prdoc rename to prdoc/1.9.0/pr_3456.prdoc diff --git a/prdoc/pr_3460.prdoc b/prdoc/1.9.0/pr_3460.prdoc similarity index 100% rename from prdoc/pr_3460.prdoc rename to prdoc/1.9.0/pr_3460.prdoc diff --git a/prdoc/pr_3491.prdoc b/prdoc/1.9.0/pr_3491.prdoc similarity index 100% rename from prdoc/pr_3491.prdoc rename to prdoc/1.9.0/pr_3491.prdoc diff --git a/prdoc/pr_3504.prdoc b/prdoc/1.9.0/pr_3504.prdoc similarity index 100% rename from prdoc/pr_3504.prdoc rename to prdoc/1.9.0/pr_3504.prdoc diff --git a/prdoc/pr_3505.prdoc b/prdoc/1.9.0/pr_3505.prdoc similarity index 100% rename from prdoc/pr_3505.prdoc rename to prdoc/1.9.0/pr_3505.prdoc diff --git a/prdoc/pr_3510.prdoc b/prdoc/1.9.0/pr_3510.prdoc similarity index 100% rename from prdoc/pr_3510.prdoc rename to prdoc/1.9.0/pr_3510.prdoc diff --git a/prdoc/pr_3513.prdoc b/prdoc/1.9.0/pr_3513.prdoc similarity index 100% rename from prdoc/pr_3513.prdoc rename to prdoc/1.9.0/pr_3513.prdoc diff --git a/prdoc/pr_3523.prdoc b/prdoc/1.9.0/pr_3523.prdoc similarity index 100% rename from prdoc/pr_3523.prdoc rename to prdoc/1.9.0/pr_3523.prdoc diff --git a/prdoc/pr_3532.prdoc b/prdoc/1.9.0/pr_3532.prdoc similarity index 100% rename from prdoc/pr_3532.prdoc rename to prdoc/1.9.0/pr_3532.prdoc diff --git a/prdoc/pr_3540.prdoc b/prdoc/1.9.0/pr_3540.prdoc similarity index 100% rename from prdoc/pr_3540.prdoc rename to prdoc/1.9.0/pr_3540.prdoc diff --git a/prdoc/pr_3574.prdoc b/prdoc/1.9.0/pr_3574.prdoc similarity index 100% rename from prdoc/pr_3574.prdoc rename to prdoc/1.9.0/pr_3574.prdoc diff --git a/prdoc/pr_3589.prdoc b/prdoc/1.9.0/pr_3589.prdoc similarity index 100% rename from prdoc/pr_3589.prdoc rename to prdoc/1.9.0/pr_3589.prdoc diff --git a/prdoc/pr_3606.prdoc b/prdoc/1.9.0/pr_3606.prdoc similarity index 100% rename from prdoc/pr_3606.prdoc rename to prdoc/1.9.0/pr_3606.prdoc diff --git a/prdoc/pr_3636.prdoc b/prdoc/1.9.0/pr_3636.prdoc similarity index 100% rename from prdoc/pr_3636.prdoc rename to prdoc/1.9.0/pr_3636.prdoc diff --git a/prdoc/pr_3639.prdoc b/prdoc/1.9.0/pr_3639.prdoc similarity index 100% rename from prdoc/pr_3639.prdoc rename to prdoc/1.9.0/pr_3639.prdoc diff --git a/prdoc/pr_3643.prdoc b/prdoc/1.9.0/pr_3643.prdoc similarity index 100% rename from prdoc/pr_3643.prdoc rename to prdoc/1.9.0/pr_3643.prdoc diff --git a/prdoc/pr_3665.prdoc b/prdoc/1.9.0/pr_3665.prdoc similarity index 100% rename from prdoc/pr_3665.prdoc rename to prdoc/1.9.0/pr_3665.prdoc From 75074952a859f90213ea25257b71ec2189dbcfc1 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Thu, 21 Mar 2024 10:00:40 +0100 Subject: [PATCH 04/17] [Backport] Reformat release notes generation (#3759) This PR backports small reformatting of the release notes templates. --- scripts/release/build-changelogs.sh | 1 + scripts/release/templates/audience.md.tera | 6 ++---- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/scripts/release/build-changelogs.sh b/scripts/release/build-changelogs.sh index a9275f45a50c..cbfb7ad0e48f 100755 --- a/scripts/release/build-changelogs.sh +++ b/scripts/release/build-changelogs.sh @@ -48,6 +48,7 @@ for audience in "${audiences[@]}"; do echo "Processing audience: $audience ($audience_id)" export TARGET_AUDIENCE=$audience tera -t "${TEMPLATE_AUDIENCE}" --env --env-key env "${CONTEXT_JSON}" > "$OUTPUT/relnote_${audience_id}.md" + cat "$OUTPUT/relnote_${audience_id}.md" >> "$OUTPUT/relnote_combined.md" done # Show the files diff --git a/scripts/release/templates/audience.md.tera b/scripts/release/templates/audience.md.tera index dc507053dd5a..0b47850e3a37 100644 --- a/scripts/release/templates/audience.md.tera +++ b/scripts/release/templates/audience.md.tera @@ -1,11 +1,9 @@ -## Release {{ env.PRODUCT }} {{ env.VERSION }} - -Changelog for `{{ env.TARGET_AUDIENCE }}`. +### Changelog for `{{ env.TARGET_AUDIENCE }}` {% for file in prdoc -%} -#### PR #{{file.doc_filename.number}}: {{ file.content.title }} {% for doc_item in file.content.doc %} {%- if doc_item.audience == env.TARGET_AUDIENCE %} +#### [#{{file.doc_filename.number}}]: {{ file.content.title }} {{ doc_item.description }} {% endif -%} From 4842faf65d3628586d304fbcb6cb19b17b4a629c Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Thu, 21 Mar 2024 12:10:45 +0200 Subject: [PATCH 05/17] Elastic scaling: runtime dependency tracking and enactment (#3479) Changes needed to implement the runtime part of elastic scaling: https://github.com/paritytech/polkadot-sdk/issues/3131, https://github.com/paritytech/polkadot-sdk/issues/3132, https://github.com/paritytech/polkadot-sdk/issues/3202 Also fixes https://github.com/paritytech/polkadot-sdk/issues/3675 TODOs: - [x] storage migration - [x] optimise process_candidates from O(N^2) - [x] drop backable candidates which form cycles - [x] fix unit tests - [x] add more unit tests - [x] check the runtime APIs which use the pending availability storage. We need to expose all of them, see https://github.com/paritytech/polkadot-sdk/issues/3576 - [x] optimise the candidate selection. we're currently picking randomly until we satisfy the weight limit. we need to be smart about not breaking candidate chains while being fair to all paras - https://github.com/paritytech/polkadot-sdk/pull/3573 Relies on the changes made in https://github.com/paritytech/polkadot-sdk/pull/3233 in terms of the inclusion policy and the candidate ordering --------- Signed-off-by: alindima Co-authored-by: command-bot <> Co-authored-by: eskimor --- .../src/runtime/inclusion.md | 11 +- .../src/runtime/parainherent.md | 2 +- .../src/runtime/scheduler.md | 1 - polkadot/runtime/parachains/src/builder.rs | 109 +- .../parachains/src/inclusion/migration.rs | 317 ++++ .../runtime/parachains/src/inclusion/mod.rs | 803 ++++----- .../runtime/parachains/src/inclusion/tests.rs | 1241 ++++++++----- .../src/paras_inherent/benchmarking.rs | 8 - .../parachains/src/paras_inherent/mod.rs | 599 +++--- .../parachains/src/paras_inherent/tests.rs | 1604 +++++++++++++++-- .../parachains/src/runtime_api_impl/v7.rs | 84 +- polkadot/runtime/parachains/src/scheduler.rs | 10 - polkadot/runtime/parachains/src/util.rs | 19 +- polkadot/runtime/rococo/src/lib.rs | 2 + .../runtime_parachains_paras_inherent.rs | 421 +++-- polkadot/runtime/westend/src/lib.rs | 1 + .../runtime_parachains_paras_inherent.rs | 583 +++--- prdoc/pr_3479.prdoc | 8 + 18 files changed, 4075 insertions(+), 1748 deletions(-) create mode 100644 polkadot/runtime/parachains/src/inclusion/migration.rs create mode 100644 prdoc/pr_3479.prdoc diff --git a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md index f6a32a01d502..fd74f33253b7 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md @@ -147,15 +147,16 @@ All failed checks should lead to an unrecoverable error making the block invalid // return a vector of cleaned-up core IDs. } ``` -* `force_enact(ParaId)`: Forcibly enact the candidate with the given ID as though it had been deemed available by - bitfields. Is a no-op if there is no candidate pending availability for this para-id. This should generally not be - used but it is useful during execution of Runtime APIs, where the changes to the state are expected to be discarded - directly after. +* `force_enact(ParaId)`: Forcibly enact the pending candidates of the given paraid as though they had been deemed + available by bitfields. Is a no-op if there is no candidate pending availability for this para-id. + If there are multiple candidates pending availability for this para-id, it will enact all of + them. This should generally not be used but it is useful during execution of Runtime APIs, + where the changes to the state are expected to be discarded directly after. * `candidate_pending_availability(ParaId) -> Option`: returns the `CommittedCandidateReceipt` pending availability for the para provided, if any. * `pending_availability(ParaId) -> Option`: returns the metadata around the candidate pending availability for the para, if any. -* `collect_disputed(disputed: Vec) -> Vec`: Sweeps through all paras pending availability. If +* `free_disputed(disputed: Vec) -> Vec`: Sweeps through all paras pending availability. If the candidate hash is one of the disputed candidates, then clean up the corresponding storage for that candidate and the commitments. Return a vector of cleaned-up core IDs. diff --git a/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md b/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md index 5419ddae83d4..1345f0eea95e 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md @@ -17,7 +17,7 @@ There are a couple of important notes to the operations in this inherent as they this fork. 1. When disputes are initiated, we remove the block from pending availability. This allows us to roll back chains to the block before blocks are included as opposed to backing. It's important to do this before processing bitfields. -1. `Inclusion::collect_disputed` is kind of expensive so it's important to gate this on whether there are actually any +1. `Inclusion::free_disputed` is kind of expensive so it's important to gate this on whether there are actually any new disputes. Which should be never. 1. And we don't accept parablocks that have open disputes or disputes that have concluded against the candidate. It's important to import dispute statements before backing, but this is already the case as disputes are imported before diff --git a/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md b/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md index 32a7fe652dbc..04b221a83e58 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md @@ -285,7 +285,6 @@ No finalization routine runs for this module. - This clears them from `Scheduled` and marks each corresponding `core` in the `AvailabilityCores` as occupied. - Since both the availability cores and the newly-occupied cores lists are sorted ascending, this method can be implemented efficiently. -- `core_para(CoreIndex) -> ParaId`: return the currently-scheduled or occupied ParaId for the given core. - `group_validators(GroupIndex) -> Option>`: return all validators in a given group, if the group index is valid for this session. - `availability_timeout_predicate() -> Option bool>`: returns an optional predicate diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index 05f6845f3b7c..73617010f6de 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -40,7 +40,7 @@ use sp_runtime::{ RuntimeAppPublic, }; use sp_std::{ - collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + collections::{btree_map::BTreeMap, btree_set::BTreeSet, vec_deque::VecDeque}, prelude::Vec, vec, }; @@ -104,6 +104,8 @@ pub(crate) struct BenchBuilder { code_upgrade: Option, /// Specifies whether the claimqueue should be filled. fill_claimqueue: bool, + /// Cores which should not be available when being populated with pending candidates. + unavailable_cores: Vec, _phantom: sp_std::marker::PhantomData, } @@ -133,6 +135,7 @@ impl BenchBuilder { elastic_paras: Default::default(), code_upgrade: None, fill_claimqueue: true, + unavailable_cores: vec![], _phantom: sp_std::marker::PhantomData::, } } @@ -149,6 +152,12 @@ impl BenchBuilder { self } + /// Set the cores which should not be available when being populated with pending candidates. + pub(crate) fn set_unavailable_cores(mut self, unavailable_cores: Vec) -> Self { + self.unavailable_cores = unavailable_cores; + self + } + /// Set a map from para id seed to number of validity votes. pub(crate) fn set_backed_and_concluding_paras( mut self, @@ -159,7 +168,6 @@ impl BenchBuilder { } /// Set a map from para id seed to number of cores assigned to it. - #[cfg(feature = "runtime-benchmarks")] pub(crate) fn set_elastic_paras(mut self, elastic_paras: BTreeMap) -> Self { self.elastic_paras = elastic_paras; self @@ -284,11 +292,13 @@ impl BenchBuilder { core_idx: CoreIndex, candidate_hash: CandidateHash, availability_votes: BitVec, + commitments: CandidateCommitments, ) -> inclusion::CandidatePendingAvailability> { inclusion::CandidatePendingAvailability::>::new( core_idx, // core candidate_hash, // hash Self::candidate_descriptor_mock(), // candidate descriptor + commitments, // commitments availability_votes, // availability votes Default::default(), // backers Zero::zero(), // relay parent @@ -309,12 +319,6 @@ impl BenchBuilder { availability_votes: BitVec, candidate_hash: CandidateHash, ) { - let candidate_availability = Self::candidate_availability_mock( - group_idx, - core_idx, - candidate_hash, - availability_votes, - ); let commitments = CandidateCommitments:: { upward_messages: Default::default(), horizontal_messages: Default::default(), @@ -323,16 +327,29 @@ impl BenchBuilder { processed_downward_messages: 0, hrmp_watermark: 0u32.into(), }; - inclusion::PendingAvailability::::insert(para_id, candidate_availability); - inclusion::PendingAvailabilityCommitments::::insert(¶_id, commitments); + let candidate_availability = Self::candidate_availability_mock( + group_idx, + core_idx, + candidate_hash, + availability_votes, + commitments, + ); + inclusion::PendingAvailability::::mutate(para_id, |maybe_andidates| { + if let Some(candidates) = maybe_andidates { + candidates.push_back(candidate_availability); + } else { + *maybe_andidates = + Some([candidate_availability].into_iter().collect::>()); + } + }); } /// Create an `AvailabilityBitfield` where `concluding` is a map where each key is a core index /// that is concluding and `cores` is the total number of cores in the system. - fn availability_bitvec(concluding: &BTreeMap, cores: usize) -> AvailabilityBitfield { + fn availability_bitvec(concluding_cores: &BTreeSet, cores: usize) -> AvailabilityBitfield { let mut bitfields = bitvec::bitvec![u8, bitvec::order::Lsb0; 0; 0]; for i in 0..cores { - if concluding.get(&(i as u32)).is_some() { + if concluding_cores.contains(&(i as u32)) { bitfields.push(true); } else { bitfields.push(false) @@ -356,13 +373,13 @@ impl BenchBuilder { } } - /// Register `cores` count of parachains. + /// Register `n_paras` count of parachains. /// /// Note that this must be called at least 2 sessions before the target session as there is a /// n+2 session delay for the scheduled actions to take effect. - fn setup_para_ids(cores: usize) { + fn setup_para_ids(n_paras: usize) { // make sure parachains exist prior to session change. - for i in 0..cores { + for i in 0..n_paras { let para_id = ParaId::from(i as u32); let validation_code = mock_validation_code(); @@ -472,24 +489,8 @@ impl BenchBuilder { let validators = self.validators.as_ref().expect("must have some validators prior to calling"); - let availability_bitvec = Self::availability_bitvec(concluding_paras, total_cores); - - let bitfields: Vec> = validators - .iter() - .enumerate() - .map(|(i, public)| { - let unchecked_signed = UncheckedSigned::::benchmark_sign( - public, - availability_bitvec.clone(), - &self.signing_context(), - ValidatorIndex(i as u32), - ); - - unchecked_signed - }) - .collect(); - let mut current_core_idx = 0u32; + let mut concluding_cores = BTreeSet::new(); for (seed, _) in concluding_paras.iter() { // make sure the candidates that will be concluding are marked as pending availability. @@ -505,13 +506,34 @@ impl BenchBuilder { para_id, core_idx, group_idx, - Self::validator_availability_votes_yes(validators.len()), + // No validators have made this candidate available yet. + bitvec::bitvec![u8, bitvec::order::Lsb0; 0; validators.len()], CandidateHash(H256::from(byte32_slice_from(current_core_idx))), ); + if !self.unavailable_cores.contains(¤t_core_idx) { + concluding_cores.insert(current_core_idx); + } current_core_idx += 1; } } + let availability_bitvec = Self::availability_bitvec(&concluding_cores, total_cores); + + let bitfields: Vec> = validators + .iter() + .enumerate() + .map(|(i, public)| { + let unchecked_signed = UncheckedSigned::::benchmark_sign( + public, + availability_bitvec.clone(), + &self.signing_context(), + ValidatorIndex(i as u32), + ); + + unchecked_signed + }) + .collect(); + bitfields } @@ -522,7 +544,7 @@ impl BenchBuilder { /// validity votes. fn create_backed_candidates( &self, - cores_with_backed_candidates: &BTreeMap, + paras_with_backed_candidates: &BTreeMap, elastic_paras: &BTreeMap, includes_code_upgrade: Option, ) -> Vec> { @@ -531,7 +553,7 @@ impl BenchBuilder { let config = configuration::Pallet::::config(); let mut current_core_idx = 0u32; - cores_with_backed_candidates + paras_with_backed_candidates .iter() .flat_map(|(seed, num_votes)| { assert!(*num_votes <= validators.len() as u32); @@ -760,7 +782,7 @@ impl BenchBuilder { // NOTE: there is an n+2 session delay for these actions to take effect. // We are currently in Session 0, so these changes will take effect in Session 2. - Self::setup_para_ids(used_cores); + Self::setup_para_ids(used_cores - extra_cores); configuration::ActiveConfig::::mutate(|c| { c.scheduler_params.num_cores = used_cores as u32; }); @@ -782,11 +804,11 @@ impl BenchBuilder { let disputes = builder.create_disputes( builder.backed_and_concluding_paras.len() as u32, - used_cores as u32, + (used_cores - extra_cores) as u32, builder.dispute_sessions.as_slice(), ); let mut disputed_cores = (builder.backed_and_concluding_paras.len() as u32.. - used_cores as u32) + ((used_cores - extra_cores) as u32)) .into_iter() .map(|idx| (idx, 0)) .collect::>(); @@ -794,7 +816,7 @@ impl BenchBuilder { let mut all_cores = builder.backed_and_concluding_paras.clone(); all_cores.append(&mut disputed_cores); - assert_eq!(inclusion::PendingAvailability::::iter().count(), used_cores as usize,); + assert_eq!(inclusion::PendingAvailability::::iter().count(), used_cores - extra_cores); // Mark all the used cores as occupied. We expect that there are // `backed_and_concluding_paras` that are pending availability and that there are @@ -831,7 +853,7 @@ impl BenchBuilder { .keys() .flat_map(|para_id| { (0..elastic_paras.get(¶_id).cloned().unwrap_or(1)) - .map(|_para_local_core_idx| { + .filter_map(|_para_local_core_idx| { let ttl = configuration::Pallet::::config().scheduler_params.ttl; // Load an assignment into provider so that one is present to pop let assignment = @@ -844,8 +866,13 @@ impl BenchBuilder { CoreIndex(core_idx), [ParasEntry::new(assignment, now + ttl)].into(), ); + let res = if builder.unavailable_cores.contains(&core_idx) { + None + } else { + Some(entry) + }; core_idx += 1; - entry + res }) .collect::>)>>() }) diff --git a/polkadot/runtime/parachains/src/inclusion/migration.rs b/polkadot/runtime/parachains/src/inclusion/migration.rs new file mode 100644 index 000000000000..1e63b209f4e7 --- /dev/null +++ b/polkadot/runtime/parachains/src/inclusion/migration.rs @@ -0,0 +1,317 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +pub use v1::MigrateToV1; + +pub mod v0 { + use crate::inclusion::{Config, Pallet}; + use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; + use frame_support::{storage_alias, Twox64Concat}; + use frame_system::pallet_prelude::BlockNumberFor; + use parity_scale_codec::{Decode, Encode}; + use primitives::{ + AvailabilityBitfield, CandidateCommitments, CandidateDescriptor, CandidateHash, CoreIndex, + GroupIndex, Id as ParaId, ValidatorIndex, + }; + use scale_info::TypeInfo; + + #[derive(Encode, Decode, PartialEq, TypeInfo, Clone, Debug)] + pub struct CandidatePendingAvailability { + pub core: CoreIndex, + pub hash: CandidateHash, + pub descriptor: CandidateDescriptor, + pub availability_votes: BitVec, + pub backers: BitVec, + pub relay_parent_number: N, + pub backed_in_number: N, + pub backing_group: GroupIndex, + } + + #[derive(Encode, Decode, TypeInfo, Debug, PartialEq)] + pub struct AvailabilityBitfieldRecord { + pub bitfield: AvailabilityBitfield, + pub submitted_at: N, + } + + #[storage_alias] + pub type PendingAvailability = StorageMap< + Pallet, + Twox64Concat, + ParaId, + CandidatePendingAvailability<::Hash, BlockNumberFor>, + >; + + #[storage_alias] + pub type PendingAvailabilityCommitments = + StorageMap, Twox64Concat, ParaId, CandidateCommitments>; + + #[storage_alias] + pub type AvailabilityBitfields = StorageMap< + Pallet, + Twox64Concat, + ValidatorIndex, + AvailabilityBitfieldRecord>, + >; +} + +mod v1 { + use super::v0::{ + AvailabilityBitfields, PendingAvailability as V0PendingAvailability, + PendingAvailabilityCommitments as V0PendingAvailabilityCommitments, + }; + use crate::inclusion::{ + CandidatePendingAvailability as V1CandidatePendingAvailability, Config, Pallet, + PendingAvailability as V1PendingAvailability, + }; + use frame_support::{traits::OnRuntimeUpgrade, weights::Weight}; + use sp_core::Get; + use sp_std::{collections::vec_deque::VecDeque, vec::Vec}; + + #[cfg(feature = "try-runtime")] + use frame_support::{ + ensure, + traits::{GetStorageVersion, StorageVersion}, + }; + #[cfg(feature = "try-runtime")] + use parity_scale_codec::{Decode, Encode}; + + pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); + + impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + log::trace!(target: crate::inclusion::LOG_TARGET, "Running pre_upgrade() for inclusion MigrateToV1"); + let candidates_before_upgrade = V0PendingAvailability::::iter().count(); + let commitments_before_upgrade = V0PendingAvailabilityCommitments::::iter().count(); + + if candidates_before_upgrade != commitments_before_upgrade { + log::warn!( + target: crate::inclusion::LOG_TARGET, + "Number of pending candidates differ from the number of pending commitments. {} vs {}", + candidates_before_upgrade, + commitments_before_upgrade + ); + } + + Ok((candidates_before_upgrade as u32).encode()) + } + + fn on_runtime_upgrade() -> Weight { + let mut weight: Weight = Weight::zero(); + + let v0_candidates: Vec<_> = V0PendingAvailability::::drain().collect(); + + for (para_id, candidate) in v0_candidates { + let commitments = V0PendingAvailabilityCommitments::::take(para_id); + // One write for each removal (one candidate and one commitment). + weight = weight.saturating_add(T::DbWeight::get().writes(2)); + + if let Some(commitments) = commitments { + let mut per_para = VecDeque::new(); + per_para.push_back(V1CandidatePendingAvailability { + core: candidate.core, + hash: candidate.hash, + descriptor: candidate.descriptor, + availability_votes: candidate.availability_votes, + backers: candidate.backers, + relay_parent_number: candidate.relay_parent_number, + backed_in_number: candidate.backed_in_number, + backing_group: candidate.backing_group, + commitments, + }); + V1PendingAvailability::::insert(para_id, per_para); + + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + } + + // should've already been drained by the above for loop, but as a sanity check, in case + // there are more commitments than candidates. + // V0PendingAvailabilityCommitments should not contain too many keys so removing + // everything at once should be safe + let res = V0PendingAvailabilityCommitments::::clear(u32::MAX, None); + weight = weight.saturating_add( + T::DbWeight::get().reads_writes(res.loops as u64, res.backend as u64), + ); + + // AvailabilityBitfields should not contain too many keys so removing everything at once + // should be safe. + let res = AvailabilityBitfields::::clear(u32::MAX, None); + weight = weight.saturating_add( + T::DbWeight::get().reads_writes(res.loops as u64, res.backend as u64), + ); + + weight + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + log::trace!(target: crate::inclusion::LOG_TARGET, "Running post_upgrade() for inclusion MigrateToV1"); + ensure!( + Pallet::::on_chain_storage_version() >= StorageVersion::new(1), + "Storage version should be >= 1 after the migration" + ); + + let candidates_before_upgrade = + u32::decode(&mut &state[..]).expect("Was properly encoded") as usize; + let candidates_after_upgrade = V1PendingAvailability::::iter().fold( + 0usize, + |mut acc, (_paraid, para_candidates)| { + acc += para_candidates.len(); + acc + }, + ); + + ensure!( + candidates_before_upgrade == candidates_after_upgrade, + "Number of pending candidates should be the same as the one before the upgrade." + ); + ensure!( + V0PendingAvailability::::iter().next() == None, + "Pending availability candidates storage v0 should have been removed" + ); + ensure!( + V0PendingAvailabilityCommitments::::iter().next() == None, + "Pending availability commitments storage should have been removed" + ); + ensure!( + AvailabilityBitfields::::iter().next() == None, + "Availability bitfields storage should have been removed" + ); + + Ok(()) + } + } + + /// Migrate to v1 inclusion module storage. + /// - merges the `PendingAvailabilityCommitments` into the `CandidatePendingAvailability` + /// storage + /// - removes the `AvailabilityBitfields` storage, which was never read. + pub type MigrateToV1 = frame_support::migrations::VersionedMigration< + 0, + 1, + VersionUncheckedMigrateToV1, + Pallet, + ::DbWeight, + >; +} + +#[cfg(test)] +mod tests { + use super::{v1::VersionUncheckedMigrateToV1, *}; + use crate::{ + inclusion::{ + CandidatePendingAvailability as V1CandidatePendingAvailability, + PendingAvailability as V1PendingAvailability, *, + }, + mock::{new_test_ext, MockGenesisConfig, Test}, + }; + use frame_support::traits::OnRuntimeUpgrade; + use primitives::{AvailabilityBitfield, Id as ParaId}; + use test_helpers::{dummy_candidate_commitments, dummy_candidate_descriptor, dummy_hash}; + + #[test] + fn migrate_to_v1() { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + // No data to migrate. + assert_eq!( + as OnRuntimeUpgrade>::on_runtime_upgrade(), + Weight::zero() + ); + assert!(V1PendingAvailability::::iter().next().is_none()); + + let mut expected = vec![]; + + for i in 1..5 { + let descriptor = dummy_candidate_descriptor(dummy_hash()); + v0::PendingAvailability::::insert( + ParaId::from(i), + v0::CandidatePendingAvailability { + core: CoreIndex(i), + descriptor: descriptor.clone(), + relay_parent_number: i, + hash: CandidateHash(dummy_hash()), + availability_votes: Default::default(), + backed_in_number: i, + backers: Default::default(), + backing_group: GroupIndex(i), + }, + ); + v0::PendingAvailabilityCommitments::::insert( + ParaId::from(i), + dummy_candidate_commitments(HeadData(vec![i as _])), + ); + + v0::AvailabilityBitfields::::insert( + ValidatorIndex(i), + v0::AvailabilityBitfieldRecord { + bitfield: AvailabilityBitfield(Default::default()), + submitted_at: i, + }, + ); + + expected.push(( + ParaId::from(i), + [V1CandidatePendingAvailability { + core: CoreIndex(i), + descriptor, + relay_parent_number: i, + hash: CandidateHash(dummy_hash()), + availability_votes: Default::default(), + backed_in_number: i, + backers: Default::default(), + backing_group: GroupIndex(i), + commitments: dummy_candidate_commitments(HeadData(vec![i as _])), + }] + .into_iter() + .collect::>(), + )); + } + // add some wrong data also, candidates without commitments or commitments without + // candidates. + v0::PendingAvailability::::insert( + ParaId::from(6), + v0::CandidatePendingAvailability { + core: CoreIndex(6), + descriptor: dummy_candidate_descriptor(dummy_hash()), + relay_parent_number: 6, + hash: CandidateHash(dummy_hash()), + availability_votes: Default::default(), + backed_in_number: 6, + backers: Default::default(), + backing_group: GroupIndex(6), + }, + ); + v0::PendingAvailabilityCommitments::::insert( + ParaId::from(7), + dummy_candidate_commitments(HeadData(vec![7 as _])), + ); + + // For tests, db weight is zero. + assert_eq!( + as OnRuntimeUpgrade>::on_runtime_upgrade(), + Weight::zero() + ); + + assert_eq!(v0::PendingAvailabilityCommitments::::iter().next(), None); + assert_eq!(v0::PendingAvailability::::iter().next(), None); + assert_eq!(v0::AvailabilityBitfields::::iter().next(), None); + + let mut actual = V1PendingAvailability::::iter().collect::>(); + actual.sort_by(|(id1, _), (id2, _)| id1.cmp(id2)); + expected.sort_by(|(id1, _), (id2, _)| id1.cmp(id2)); + + assert_eq!(actual, expected); + }); + } +} diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 16e2e93b5617..e77f8d15b40d 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -23,31 +23,35 @@ use crate::{ configuration::{self, HostConfiguration}, disputes, dmp, hrmp, paras::{self, SetGoAhead}, - scheduler::{self, AvailabilityTimeoutStatus}, + scheduler, shared::{self, AllowedRelayParentsTracker}, + util::make_persisted_validation_data_with_parent, }; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::{ defensive, pallet_prelude::*, - traits::{Defensive, EnqueueMessage, Footprint, QueueFootprint}, + traits::{EnqueueMessage, Footprint, QueueFootprint}, BoundedSlice, }; use frame_system::pallet_prelude::*; use pallet_message_queue::OnQueueChanged; use parity_scale_codec::{Decode, Encode}; use primitives::{ - effective_minimum_backing_votes, supermajority_threshold, well_known_keys, - AvailabilityBitfield, BackedCandidate, CandidateCommitments, CandidateDescriptor, - CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, GroupIndex, Hash, - HeadData, Id as ParaId, SignedAvailabilityBitfields, SigningContext, UpwardMessage, - ValidatorId, ValidatorIndex, ValidityAttestation, + effective_minimum_backing_votes, supermajority_threshold, well_known_keys, BackedCandidate, + CandidateCommitments, CandidateDescriptor, CandidateHash, CandidateReceipt, + CommittedCandidateReceipt, CoreIndex, GroupIndex, Hash, HeadData, Id as ParaId, + SignedAvailabilityBitfields, SigningContext, UpwardMessage, ValidatorId, ValidatorIndex, + ValidityAttestation, }; use scale_info::TypeInfo; use sp_runtime::{traits::One, DispatchError, SaturatedConversion, Saturating}; #[cfg(feature = "std")] use sp_std::fmt; -use sp_std::{collections::btree_set::BTreeSet, prelude::*}; +use sp_std::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet, vec_deque::VecDeque}, + prelude::*, +}; pub use pallet::*; @@ -57,6 +61,8 @@ pub(crate) mod tests; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +pub mod migration; + pub trait WeightInfo { fn receive_upward_messages(i: u32) -> Weight; } @@ -80,20 +86,8 @@ impl WeightInfo for () { /// `configuration` pallet to check these values before setting. pub const MAX_UPWARD_MESSAGE_SIZE_BOUND: u32 = 128 * 1024; -/// A bitfield signed by a validator indicating that it is keeping its piece of the erasure-coding -/// for any backed candidates referred to by a `1` bit available. -/// -/// The bitfield's signature should be checked at the point of submission. Afterwards it can be -/// dropped. -#[derive(Encode, Decode, TypeInfo)] -#[cfg_attr(test, derive(Debug))] -pub struct AvailabilityBitfieldRecord { - bitfield: AvailabilityBitfield, // one bit per core. - submitted_at: N, // for accounting, as meaning of bits may change over time. -} - /// A backed candidate pending availability. -#[derive(Encode, Decode, PartialEq, TypeInfo)] +#[derive(Encode, Decode, PartialEq, TypeInfo, Clone)] #[cfg_attr(test, derive(Debug))] pub struct CandidatePendingAvailability { /// The availability core this is assigned to. @@ -102,6 +96,8 @@ pub struct CandidatePendingAvailability { hash: CandidateHash, /// The candidate descriptor. descriptor: CandidateDescriptor, + /// The candidate commitments. + commitments: CandidateCommitments, /// The received availability votes. One bit per validator. availability_votes: BitVec, /// The backers of the candidate pending availability. @@ -121,8 +117,11 @@ impl CandidatePendingAvailability { } /// Get the relay-chain block number this was backed in. - pub(crate) fn backed_in_number(&self) -> &N { - &self.backed_in_number + pub(crate) fn backed_in_number(&self) -> N + where + N: Clone, + { + self.backed_in_number.clone() } /// Get the core index. @@ -140,6 +139,11 @@ impl CandidatePendingAvailability { &self.descriptor } + /// Get the candidate commitments. + pub(crate) fn candidate_commitments(&self) -> &CandidateCommitments { + &self.commitments + } + /// Get the candidate's relay parent's number. pub(crate) fn relay_parent_number(&self) -> N where @@ -148,11 +152,22 @@ impl CandidatePendingAvailability { self.relay_parent_number.clone() } + /// Get the candidate backing group. + pub(crate) fn backing_group(&self) -> GroupIndex { + self.backing_group + } + + /// Get the candidate's backers. + pub(crate) fn backers(&self) -> &BitVec { + &self.backers + } + #[cfg(any(feature = "runtime-benchmarks", test))] pub(crate) fn new( core: CoreIndex, hash: CandidateHash, descriptor: CandidateDescriptor, + commitments: CandidateCommitments, availability_votes: BitVec, backers: BitVec, relay_parent_number: N, @@ -163,6 +178,7 @@ impl CandidatePendingAvailability { core, hash, descriptor, + commitments, availability_votes, backers, relay_parent_number, @@ -253,8 +269,10 @@ pub type MaxUmpMessageLenOf = pub mod pallet { use super::*; + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] @@ -297,30 +315,10 @@ pub mod pallet { #[pallet::error] pub enum Error { - /// Validator indices are out of order or contains duplicates. - UnsortedOrDuplicateValidatorIndices, - /// Dispute statement sets are out of order or contain duplicates. - UnsortedOrDuplicateDisputeStatementSet, - /// Backed candidates are out of order (core index) or contain duplicates. - UnsortedOrDuplicateBackedCandidates, - /// A different relay parent was provided compared to the on-chain stored one. - UnexpectedRelayParent, - /// Availability bitfield has unexpected size. - WrongBitfieldSize, - /// Bitfield consists of zeros only. - BitfieldAllZeros, - /// Multiple bitfields submitted by same validator or validators out of order by index. - BitfieldDuplicateOrUnordered, /// Validator index out of bounds. ValidatorIndexOutOfBounds, - /// Invalid signature - InvalidBitfieldSignature, /// Candidate submitted but para not scheduled. UnscheduledCandidate, - /// Candidate scheduled despite pending candidate already existing for the para. - CandidateScheduledBeforeParaFree, - /// Scheduled cores out of order. - ScheduledOutOfOrder, /// Head data exceeds the configured maximum. HeadDataTooLarge, /// Code upgrade prematurely. @@ -356,31 +354,22 @@ pub mod pallet { /// The `para_head` hash in the candidate descriptor doesn't match the hash of the actual /// para head in the commitments. ParaHeadMismatch, - /// A bitfield that references a freed core, - /// either intentionally or as part of a concluded - /// invalid dispute. - BitfieldReferencesFreedCore, } - /// The latest bitfield for each validator, referred to by their index in the validator set. - #[pallet::storage] - pub(crate) type AvailabilityBitfields = - StorageMap<_, Twox64Concat, ValidatorIndex, AvailabilityBitfieldRecord>>; - - /// Candidates pending availability by `ParaId`. + /// Candidates pending availability by `ParaId`. They form a chain starting from the latest + /// included head of the para. + /// Use a different prefix post-migration to v1, since the v0 `PendingAvailability` storage + /// would otherwise have the exact same prefix which could cause undefined behaviour when doing + /// the migration. #[pallet::storage] + #[pallet::storage_prefix = "V1"] pub(crate) type PendingAvailability = StorageMap< _, Twox64Concat, ParaId, - CandidatePendingAvailability>, + VecDeque>>, >; - /// The commitments of candidates pending availability, by `ParaId`. - #[pallet::storage] - pub(crate) type PendingAvailabilityCommitments = - StorageMap<_, Twox64Concat, ParaId, CandidateCommitments>; - #[pallet::call] impl Pallet {} } @@ -469,9 +458,7 @@ impl Pallet { ) { // unlike most drain methods, drained elements are not cleared on `Drop` of the iterator // and require consumption. - for _ in >::drain() {} for _ in >::drain() {} - for _ in >::drain() {} Self::cleanup_outgoing_ump_dispatch_queues(outgoing_paras); } @@ -490,27 +477,18 @@ impl Pallet { /// /// Bitfields are expected to have been sanitized already. E.g. via `sanitize_bitfields`! /// - /// Updates storage items `PendingAvailability` and `AvailabilityBitfields`. + /// Updates storage items `PendingAvailability`. /// /// Returns a `Vec` of `CandidateHash`es and their respective `AvailabilityCore`s that became /// available, and cores free. - pub(crate) fn update_pending_availability_and_get_freed_cores( - expected_bits: usize, + pub(crate) fn update_pending_availability_and_get_freed_cores( validators: &[ValidatorId], signed_bitfields: SignedAvailabilityBitfields, - core_lookup: F, - ) -> Vec<(CoreIndex, CandidateHash)> - where - F: Fn(CoreIndex) -> Option, - { - let mut assigned_paras_record = (0..expected_bits) - .map(|bit_index| core_lookup(CoreIndex::from(bit_index as u32))) - .map(|opt_para_id| { - opt_para_id.map(|para_id| (para_id, PendingAvailability::::get(¶_id))) - }) - .collect::>(); + ) -> Vec<(CoreIndex, CandidateHash)> { + let threshold = availability_threshold(validators.len()); + + let mut votes_per_core: BTreeMap> = BTreeMap::new(); - let now = >::block_number(); for (checked_bitfield, validator_index) in signed_bitfields.into_iter().map(|signed_bitfield| { let validator_idx = signed_bitfield.validator_index(); @@ -518,310 +496,275 @@ impl Pallet { (checked_bitfield, validator_idx) }) { for (bit_idx, _) in checked_bitfield.0.iter().enumerate().filter(|(_, is_av)| **is_av) { - let pending_availability = if let Some((_, pending_availability)) = - assigned_paras_record[bit_idx].as_mut() - { - pending_availability - } else { - // For honest validators, this happens in case of unoccupied cores, - // which in turn happens in case of a disputed candidate. - // A malicious one might include arbitrary indices, but they are represented - // by `None` values and will be sorted out in the next if case. - continue - }; - - // defensive check - this is constructed by loading the availability bitfield - // record, which is always `Some` if the core is occupied - that's why we're here. - let validator_index = validator_index.0 as usize; - if let Some(mut bit) = - pending_availability.as_mut().and_then(|candidate_pending_availability| { - candidate_pending_availability.availability_votes.get_mut(validator_index) - }) { - *bit = true; - } + let core_index = CoreIndex(bit_idx as u32); + votes_per_core + .entry(core_index) + .or_insert_with(|| BTreeSet::new()) + .insert(validator_index); } - - let record = - AvailabilityBitfieldRecord { bitfield: checked_bitfield, submitted_at: now }; - - >::insert(&validator_index, record); } - let threshold = availability_threshold(validators.len()); + let mut freed_cores = vec![]; + + let pending_paraids: Vec<_> = >::iter_keys().collect(); + for paraid in pending_paraids { + >::mutate(paraid, |candidates| { + if let Some(candidates) = candidates { + let mut last_enacted_index: Option = None; + + for (candidate_index, candidate) in candidates.iter_mut().enumerate() { + if let Some(validator_indices) = votes_per_core.remove(&candidate.core) { + for validator_index in validator_indices.iter() { + // defensive check - this is constructed by loading the + // availability bitfield record, which is always `Some` if + // the core is occupied - that's why we're here. + if let Some(mut bit) = + candidate.availability_votes.get_mut(validator_index.0 as usize) + { + *bit = true; + } + } + } + + // We check for the candidate's availability even if we didn't get any new + // bitfields for its core, as it may have already been available at a + // previous block but wasn't enacted due to its predecessors not being + // available. + if candidate.availability_votes.count_ones() >= threshold { + // We can only enact a candidate if we've enacted all of its + // predecessors already. + let can_enact = if candidate_index == 0 { + last_enacted_index == None + } else { + let prev_candidate_index = usize::try_from(candidate_index - 1) + .expect("Previous `if` would have caught a 0 candidate index."); + matches!(last_enacted_index, Some(old_index) if old_index == prev_candidate_index) + }; + + if can_enact { + last_enacted_index = Some(candidate_index); + } + } + } - let mut freed_cores = Vec::with_capacity(expected_bits); - for (para_id, pending_availability) in assigned_paras_record - .into_iter() - .flatten() - .filter_map(|(id, p)| p.map(|p| (id, p))) - { - if pending_availability.availability_votes.count_ones() >= threshold { - >::remove(¶_id); - let commitments = match PendingAvailabilityCommitments::::take(¶_id) { - Some(commitments) => commitments, - None => { - log::warn!( - target: LOG_TARGET, - "Inclusion::process_bitfields: PendingAvailability and PendingAvailabilityCommitments - are out of sync, did someone mess with the storage?", - ); - continue - }, - }; - - let receipt = CommittedCandidateReceipt { - descriptor: pending_availability.descriptor, - commitments, - }; - let _weight = Self::enact_candidate( - pending_availability.relay_parent_number, - receipt, - pending_availability.backers, - pending_availability.availability_votes, - pending_availability.core, - pending_availability.backing_group, - ); - - freed_cores.push((pending_availability.core, pending_availability.hash)); - } else { - >::insert(¶_id, &pending_availability); - } + // Trim the pending availability candidates storage and enact candidates of this + // para now. + if let Some(last_enacted_index) = last_enacted_index { + let evicted_candidates = candidates.drain(0..=last_enacted_index); + for candidate in evicted_candidates { + freed_cores.push((candidate.core, candidate.hash)); + + let receipt = CommittedCandidateReceipt { + descriptor: candidate.descriptor, + commitments: candidate.commitments, + }; + let _weight = Self::enact_candidate( + candidate.relay_parent_number, + receipt, + candidate.backers, + candidate.availability_votes, + candidate.core, + candidate.backing_group, + ); + } + } + } + }); } freed_cores } - /// Process candidates that have been backed. Provide the relay storage root, a set of - /// candidates and scheduled cores. + /// Process candidates that have been backed. Provide a set of + /// candidates along with their scheduled cores. /// - /// Both should be sorted ascending by core index, and the candidates should be a subset of - /// scheduled cores. If these conditions are not met, the execution of the function fails. + /// Candidates of the same paraid should be sorted according to their dependency order (they + /// should form a chain). If this condition is not met, this function will return an error. + /// (This really should not happen here, if the candidates were properly sanitised in + /// paras_inherent). pub(crate) fn process_candidates( allowed_relay_parents: &AllowedRelayParentsTracker>, - candidates: Vec<(BackedCandidate, CoreIndex)>, + candidates: &BTreeMap, CoreIndex)>>, group_validators: GV, core_index_enabled: bool, ) -> Result, DispatchError> where GV: Fn(GroupIndex) -> Option>, { - let now = >::block_number(); - if candidates.is_empty() { return Ok(ProcessedCandidates::default()) } - let minimum_backing_votes = configuration::Pallet::::config().minimum_backing_votes; + let now = >::block_number(); let validators = shared::Pallet::::active_validator_keys(); // Collect candidate receipts with backers. let mut candidate_receipt_with_backing_validator_indices = Vec::with_capacity(candidates.len()); + let mut core_indices = Vec::with_capacity(candidates.len()); - // Do all checks before writing storage. - let core_indices_and_backers = { - let mut core_indices_and_backers = Vec::with_capacity(candidates.len()); - let mut last_core = None; - - let mut check_assignment_in_order = |core_idx| -> DispatchResult { - ensure!( - last_core.map_or(true, |core| core_idx > core), - Error::::ScheduledOutOfOrder, - ); - - last_core = Some(core_idx); - Ok(()) + for (para_id, para_candidates) in candidates { + let mut latest_head_data = match Self::para_latest_head_data(para_id) { + None => { + defensive!("Latest included head data for paraid {:?} is None", para_id); + continue + }, + Some(latest_head_data) => latest_head_data, }; - // We combine an outer loop over candidates with an inner loop over the scheduled, - // where each iteration of the outer loop picks up at the position - // in scheduled just after the past iteration left off. - // - // If the candidates appear in the same order as they appear in `scheduled`, - // then they should always be found. If the end of `scheduled` is reached, - // then the candidate was either not scheduled or out-of-order. - // - // In the meantime, we do certain sanity checks on the candidates and on the scheduled - // list. - for (candidate_idx, (backed_candidate, core_index)) in candidates.iter().enumerate() { - let relay_parent_hash = backed_candidate.descriptor().relay_parent; - let para_id = backed_candidate.descriptor().para_id; - - let prev_context = >::para_most_recent_context(para_id); - - let check_ctx = CandidateCheckContext::::new(prev_context); - let signing_context = SigningContext { - parent_hash: relay_parent_hash, - session_index: shared::Pallet::::session_index(), - }; - - let relay_parent_number = match check_ctx.verify_backed_candidate( + for (candidate, core) in para_candidates.iter() { + let candidate_hash = candidate.candidate().hash(); + + let check_ctx = CandidateCheckContext::::new(None); + let relay_parent_number = check_ctx.verify_backed_candidate( &allowed_relay_parents, - candidate_idx, - backed_candidate.candidate(), - )? { - Err(FailedToCreatePVD) => { - log::debug!( - target: LOG_TARGET, - "Failed to create PVD for candidate {}", - candidate_idx, - ); - // We don't want to error out here because it will - // brick the relay-chain. So we return early without - // doing anything. - return Ok(ProcessedCandidates::default()) - }, - Ok(rpn) => rpn, - }; - - let (validator_indices, _) = - backed_candidate.validator_indices_and_core_index(core_index_enabled); - - log::debug!( - target: LOG_TARGET, - "Candidate {:?} on {:?}, - core_index_enabled = {}", - backed_candidate.hash(), - core_index, - core_index_enabled - ); - - check_assignment_in_order(core_index)?; - - let mut backers = bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; - - ensure!( - >::get(¶_id).is_none() && - >::get(¶_id).is_none(), - Error::::CandidateScheduledBeforeParaFree, - ); - - // The candidate based upon relay parent `N` should be backed by a group - // assigned to core at block `N + 1`. Thus, `relay_parent_number + 1` - // will always land in the current session. + candidate.candidate(), + latest_head_data.clone(), + )?; + + // The candidate based upon relay parent `N` should be backed by a + // group assigned to core at block `N + 1`. Thus, + // `relay_parent_number + 1` will always land in the current + // session. let group_idx = >::group_assigned_to_core( - *core_index, + *core, relay_parent_number + One::one(), ) .ok_or_else(|| { log::warn!( target: LOG_TARGET, - "Failed to compute group index for candidate {}", - candidate_idx + "Failed to compute group index for candidate {:?}", + candidate_hash ); Error::::InvalidAssignment })?; let group_vals = group_validators(group_idx).ok_or_else(|| Error::::InvalidGroupIndex)?; - // check the signatures in the backing and that it is a majority. - { - let maybe_amount_validated = primitives::check_candidate_backing( - backed_candidate.candidate().hash(), - backed_candidate.validity_votes(), - validator_indices, - &signing_context, - group_vals.len(), - |intra_group_vi| { - group_vals - .get(intra_group_vi) - .and_then(|vi| validators.get(vi.0 as usize)) - .map(|v| v.clone()) - }, - ); - - match maybe_amount_validated { - Ok(amount_validated) => ensure!( - amount_validated >= - effective_minimum_backing_votes( - group_vals.len(), - minimum_backing_votes - ), - Error::::InsufficientBacking, - ), - Err(()) => { - Err(Error::::InvalidBacking)?; - }, - } - - let mut backer_idx_and_attestation = - Vec::<(ValidatorIndex, ValidityAttestation)>::with_capacity( - validator_indices.count_ones(), - ); - let candidate_receipt = backed_candidate.receipt(); - - for ((bit_idx, _), attestation) in validator_indices - .iter() - .enumerate() - .filter(|(_, signed)| **signed) - .zip(backed_candidate.validity_votes().iter().cloned()) - { - let val_idx = - group_vals.get(bit_idx).expect("this query succeeded above; qed"); - backer_idx_and_attestation.push((*val_idx, attestation)); - - backers.set(val_idx.0 as _, true); + // Check backing vote count and validity. + let (backers, backer_idx_and_attestation) = Self::check_backing_votes( + candidate, + &validators, + group_vals, + core_index_enabled, + )?; + + // Found a valid candidate. + latest_head_data = candidate.candidate().commitments.head_data.clone(); + candidate_receipt_with_backing_validator_indices + .push((candidate.receipt(), backer_idx_and_attestation)); + core_indices.push((*core, *para_id)); + + // Update storage now + >::mutate(¶_id, |pending_availability| { + let new_candidate = CandidatePendingAvailability { + core: *core, + hash: candidate_hash, + descriptor: candidate.candidate().descriptor.clone(), + commitments: candidate.candidate().commitments.clone(), + // initialize all availability votes to 0. + availability_votes: bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()], + relay_parent_number, + backers: backers.to_bitvec(), + backed_in_number: now, + backing_group: group_idx, + }; + + if let Some(pending_availability) = pending_availability { + pending_availability.push_back(new_candidate); + } else { + *pending_availability = + Some([new_candidate].into_iter().collect::>()) } - candidate_receipt_with_backing_validator_indices - .push((candidate_receipt, backer_idx_and_attestation)); - } + }); - core_indices_and_backers.push(( - (*core_index, para_id), - backers, + // Deposit backed event. + Self::deposit_event(Event::::CandidateBacked( + candidate.candidate().to_plain(), + candidate.candidate().commitments.head_data.clone(), + *core, group_idx, - relay_parent_number, )); } + } - core_indices_and_backers - }; + Ok(ProcessedCandidates:: { + core_indices, + candidate_receipt_with_backing_validator_indices, + }) + } - // one more sweep for actually writing to storage. - let core_indices = core_indices_and_backers.iter().map(|(c, ..)| *c).collect(); - for ((candidate, _), (core, backers, group, relay_parent_number)) in - candidates.into_iter().zip(core_indices_and_backers) - { - let para_id = candidate.descriptor().para_id; + // Get the latest backed output head data of this para. + pub(crate) fn para_latest_head_data(para_id: &ParaId) -> Option { + match >::get(para_id).and_then(|pending_candidates| { + pending_candidates.back().map(|x| x.commitments.head_data.clone()) + }) { + Some(head_data) => Some(head_data), + None => >::para_head(para_id), + } + } - // initialize all availability votes to 0. - let availability_votes: BitVec = - bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; + fn check_backing_votes( + backed_candidate: &BackedCandidate, + validators: &[ValidatorId], + group_vals: Vec, + core_index_enabled: bool, + ) -> Result<(BitVec, Vec<(ValidatorIndex, ValidityAttestation)>), Error> { + let minimum_backing_votes = configuration::Pallet::::config().minimum_backing_votes; - Self::deposit_event(Event::::CandidateBacked( - candidate.candidate().to_plain(), - candidate.candidate().commitments.head_data.clone(), - core.0, - group, - )); + let mut backers = bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; + let signing_context = SigningContext { + parent_hash: backed_candidate.descriptor().relay_parent, + session_index: shared::Pallet::::session_index(), + }; - let candidate_hash = candidate.candidate().hash(); + let (validator_indices, _) = + backed_candidate.validator_indices_and_core_index(core_index_enabled); + + // check the signatures in the backing and that it is a majority. + let maybe_amount_validated = primitives::check_candidate_backing( + backed_candidate.candidate().hash(), + backed_candidate.validity_votes(), + validator_indices, + &signing_context, + group_vals.len(), + |intra_group_vi| { + group_vals + .get(intra_group_vi) + .and_then(|vi| validators.get(vi.0 as usize)) + .map(|v| v.clone()) + }, + ); - let (descriptor, commitments) = ( - candidate.candidate().descriptor.clone(), - candidate.candidate().commitments.clone(), - ); + match maybe_amount_validated { + Ok(amount_validated) => ensure!( + amount_validated >= + effective_minimum_backing_votes(group_vals.len(), minimum_backing_votes), + Error::::InsufficientBacking, + ), + Err(()) => { + Err(Error::::InvalidBacking)?; + }, + } - >::insert( - ¶_id, - CandidatePendingAvailability { - core: core.0, - hash: candidate_hash, - descriptor, - availability_votes, - relay_parent_number, - backers: backers.to_bitvec(), - backed_in_number: now, - backing_group: group, - }, + let mut backer_idx_and_attestation = + Vec::<(ValidatorIndex, ValidityAttestation)>::with_capacity( + validator_indices.count_ones(), ); - >::insert(¶_id, commitments); + + for ((bit_idx, _), attestation) in validator_indices + .iter() + .enumerate() + .filter(|(_, signed)| **signed) + .zip(backed_candidate.validity_votes().iter().cloned()) + { + let val_idx = group_vals.get(bit_idx).expect("this query succeeded above; qed"); + backer_idx_and_attestation.push((*val_idx, attestation)); + + backers.set(val_idx.0 as _, true); } - Ok(ProcessedCandidates:: { - core_indices, - candidate_receipt_with_backing_validator_indices, - }) + Ok((backers, backer_idx_and_attestation)) } /// Run the acceptance criteria checks on the given candidate commitments. @@ -1028,110 +971,155 @@ impl Pallet { weight } - /// Cleans up all paras pending availability that the predicate returns true for. - /// - /// The predicate accepts the index of the core and the block number the core has been occupied - /// since (i.e. the block number the candidate was backed at in this fork of the relay chain). + /// Cleans up all timed out candidates as well as their descendant candidates. /// /// Returns a vector of cleaned-up core IDs. - pub(crate) fn collect_pending( - pred: impl Fn(BlockNumberFor) -> AvailabilityTimeoutStatus>, - ) -> Vec { - let mut cleaned_up_ids = Vec::new(); - let mut cleaned_up_cores = Vec::new(); - - for (para_id, pending_record) in >::iter() { - if pred(pending_record.backed_in_number).timed_out { - cleaned_up_ids.push(para_id); - cleaned_up_cores.push(pending_record.core); - } - } - - for para_id in cleaned_up_ids { - let pending = >::take(¶_id); - let commitments = >::take(¶_id); - - if let (Some(pending), Some(commitments)) = (pending, commitments) { - // defensive: this should always be true. - let candidate = CandidateReceipt { - descriptor: pending.descriptor, - commitments_hash: commitments.hash(), - }; + pub(crate) fn free_timedout() -> Vec { + let timeout_pred = >::availability_timeout_predicate(); + + let timed_out: Vec<_> = Self::free_failed_cores( + |candidate| timeout_pred(candidate.backed_in_number).timed_out, + None, + ) + .collect(); + + let mut timed_out_cores = Vec::with_capacity(timed_out.len()); + for candidate in timed_out.iter() { + timed_out_cores.push(candidate.core); + + let receipt = CandidateReceipt { + descriptor: candidate.descriptor.clone(), + commitments_hash: candidate.commitments.hash(), + }; - Self::deposit_event(Event::::CandidateTimedOut( - candidate, - commitments.head_data, - pending.core, - )); - } + Self::deposit_event(Event::::CandidateTimedOut( + receipt, + candidate.commitments.head_data.clone(), + candidate.core, + )); } - cleaned_up_cores + timed_out_cores } - /// Cleans up all paras pending availability that are in the given list of disputed candidates. + /// Cleans up all cores pending availability occupied by one of the disputed candidates or which + /// are descendants of a disputed candidate. /// - /// Returns a vector of cleaned-up core IDs. - pub(crate) fn collect_disputed(disputed: &BTreeSet) -> Vec { - let mut cleaned_up_ids = Vec::new(); - let mut cleaned_up_cores = Vec::new(); - - for (para_id, pending_record) in >::iter() { - if disputed.contains(&pending_record.hash) { - cleaned_up_ids.push(para_id); - cleaned_up_cores.push(pending_record.core); + /// Returns a vector of cleaned-up core IDs, along with the evicted candidate hashes. + pub(crate) fn free_disputed( + disputed: &BTreeSet, + ) -> Vec<(CoreIndex, CandidateHash)> { + Self::free_failed_cores( + |candidate| disputed.contains(&candidate.hash), + Some(disputed.len()), + ) + .map(|candidate| (candidate.core, candidate.hash)) + .collect() + } + + // Clean up cores whose candidates are deemed as failed by the predicate. `pred` returns true if + // a candidate is considered failed. + // A failed candidate also frees all subsequent cores which hold descendants of said candidate. + fn free_failed_cores< + P: Fn(&CandidatePendingAvailability>) -> bool, + >( + pred: P, + capacity_hint: Option, + ) -> impl Iterator>> { + let mut earliest_dropped_indices: BTreeMap = BTreeMap::new(); + + for (para_id, pending_candidates) in >::iter() { + // We assume that pending candidates are stored in dependency order. So we need to store + // the earliest dropped candidate. All others that follow will get freed as well. + let mut earliest_dropped_idx = None; + for (index, candidate) in pending_candidates.iter().enumerate() { + if pred(candidate) { + earliest_dropped_idx = Some(index); + // Since we're looping the candidates in dependency order, we've found the + // earliest failed index for this paraid. + break; + } + } + + if let Some(earliest_dropped_idx) = earliest_dropped_idx { + earliest_dropped_indices.insert(para_id, earliest_dropped_idx); } } - for para_id in cleaned_up_ids { - let _ = >::take(¶_id); - let _ = >::take(¶_id); + let mut cleaned_up_cores = + if let Some(capacity) = capacity_hint { Vec::with_capacity(capacity) } else { vec![] }; + + for (para_id, earliest_dropped_idx) in earliest_dropped_indices { + // Do cleanups and record the cleaned up cores + >::mutate(¶_id, |record| { + if let Some(record) = record { + let cleaned_up = record.drain(earliest_dropped_idx..); + cleaned_up_cores.extend(cleaned_up); + } + }); } - cleaned_up_cores + cleaned_up_cores.into_iter() } - /// Forcibly enact the candidate with the given ID as though it had been deemed available - /// by bitfields. + /// Forcibly enact the pending candidates of the given paraid as though they had been deemed + /// available by bitfields. /// /// Is a no-op if there is no candidate pending availability for this para-id. - /// This should generally not be used but it is useful during execution of Runtime APIs, + /// If there are multiple candidates pending availability for this para-id, it will enact all of + /// them. This should generally not be used but it is useful during execution of Runtime APIs, /// where the changes to the state are expected to be discarded directly after. pub(crate) fn force_enact(para: ParaId) { - let pending = >::take(¶); - let commitments = >::take(¶); - - if let (Some(pending), Some(commitments)) = (pending, commitments) { - let candidate = - CommittedCandidateReceipt { descriptor: pending.descriptor, commitments }; - - Self::enact_candidate( - pending.relay_parent_number, - candidate, - pending.backers, - pending.availability_votes, - pending.core, - pending.backing_group, - ); - } + >::mutate(¶, |candidates| { + if let Some(candidates) = candidates { + for candidate in candidates.drain(..) { + let receipt = CommittedCandidateReceipt { + descriptor: candidate.descriptor, + commitments: candidate.commitments, + }; + + Self::enact_candidate( + candidate.relay_parent_number, + receipt, + candidate.backers, + candidate.availability_votes, + candidate.core, + candidate.backing_group, + ); + } + } + }); } - /// Returns the `CommittedCandidateReceipt` pending availability for the para provided, if any. + /// Returns the first `CommittedCandidateReceipt` pending availability for the para provided, if + /// any. pub(crate) fn candidate_pending_availability( para: ParaId, ) -> Option> { - >::get(¶) - .map(|p| p.descriptor) - .and_then(|d| >::get(¶).map(move |c| (d, c))) - .map(|(d, c)| CommittedCandidateReceipt { descriptor: d, commitments: c }) + >::get(¶).and_then(|p| { + p.get(0).map(|p| CommittedCandidateReceipt { + descriptor: p.descriptor.clone(), + commitments: p.commitments.clone(), + }) + }) } - /// Returns the metadata around the candidate pending availability for the + /// Returns the metadata around the first candidate pending availability for the /// para provided, if any. pub(crate) fn pending_availability( para: ParaId, + ) -> Option>> { + >::get(¶).and_then(|p| p.get(0).cloned()) + } + + /// Returns the metadata around the candidate pending availability occupying the supplied core, + /// if any. + pub(crate) fn pending_availability_with_core( + para: ParaId, + core: CoreIndex, ) -> Option>> { >::get(¶) + .and_then(|p| p.iter().find(|c| c.core == core).cloned()) } } @@ -1182,10 +1170,6 @@ pub(crate) struct CandidateCheckContext { prev_context: Option>, } -/// An error indicating that creating Persisted Validation Data failed -/// while checking a candidate's validity. -pub(crate) struct FailedToCreatePVD; - impl CandidateCheckContext { pub(crate) fn new(prev_context: Option>) -> Self { Self { config: >::config(), prev_context } @@ -1203,9 +1187,9 @@ impl CandidateCheckContext { pub(crate) fn verify_backed_candidate( &self, allowed_relay_parents: &AllowedRelayParentsTracker>, - candidate_idx: usize, backed_candidate_receipt: &CommittedCandidateReceipt<::Hash>, - ) -> Result, FailedToCreatePVD>, Error> { + parent_head_data: HeadData, + ) -> Result, Error> { let para_id = backed_candidate_receipt.descriptor().para_id; let relay_parent = backed_candidate_receipt.descriptor().relay_parent; @@ -1218,16 +1202,11 @@ impl CandidateCheckContext { }; { - let persisted_validation_data = match crate::util::make_persisted_validation_data::( - para_id, + let persisted_validation_data = make_persisted_validation_data_with_parent::( relay_parent_number, relay_parent_storage_root, - ) - .defensive_proof("the para is registered") - { - Some(l) => l, - None => return Ok(Err(FailedToCreatePVD)), - }; + parent_head_data, + ); let expected = persisted_validation_data.hash(); @@ -1268,13 +1247,13 @@ impl CandidateCheckContext { ) { log::debug!( target: LOG_TARGET, - "Validation outputs checking during inclusion of a candidate {} for parachain `{}` failed", - candidate_idx, + "Validation outputs checking during inclusion of a candidate {:?} for parachain `{}` failed", + backed_candidate_receipt.hash(), u32::from(para_id), ); Err(err.strip_into_dispatch_err::())?; }; - Ok(Ok(relay_parent_number)) + Ok(relay_parent_number) } /// Check the given outputs after candidate validation on whether it passes the acceptance diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 3fe7d7f0c7d4..5ab3a13324d2 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -27,7 +27,7 @@ use crate::{ shared::AllowedRelayParentsTracker, }; use primitives::{ - effective_minimum_backing_votes, SignedAvailabilityBitfields, + effective_minimum_backing_votes, AvailabilityBitfield, SignedAvailabilityBitfields, UncheckedSignedAvailabilityBitfields, }; @@ -360,81 +360,288 @@ fn simple_sanitize_bitfields( } /// Process a set of already sanitized bitfields. pub(crate) fn process_bitfields( - expected_bits: usize, signed_bitfields: SignedAvailabilityBitfields, - core_lookup: impl Fn(CoreIndex) -> Option, ) -> Vec<(CoreIndex, CandidateHash)> { let validators = shared::Pallet::::active_validator_keys(); - ParaInclusion::update_pending_availability_and_get_freed_cores::<_>( - expected_bits, + ParaInclusion::update_pending_availability_and_get_freed_cores( &validators[..], signed_bitfields, - core_lookup, ) } #[test] -fn collect_pending_cleans_up_pending() { +fn free_timedout() { let chain_a = ParaId::from(1_u32); let chain_b = ParaId::from(2_u32); - let thread_a = ParaId::from(3_u32); + let chain_c = ParaId::from(3_u32); + let chain_d = ParaId::from(4_u32); + let chain_e = ParaId::from(5_u32); + let chain_f = ParaId::from(6_u32); + let thread_a = ParaId::from(7_u32); let paras = vec![ (chain_a, ParaKind::Parachain), (chain_b, ParaKind::Parachain), + (chain_c, ParaKind::Parachain), + (chain_d, ParaKind::Parachain), + (chain_e, ParaKind::Parachain), + (chain_f, ParaKind::Parachain), (thread_a, ParaKind::Parathread), ]; let mut config = genesis_config(paras); config.configuration.config.scheduler_params.group_rotation_frequency = 3; new_test_ext(config).execute_with(|| { - let default_candidate = TestCandidateBuilder::default().build(); - >::insert( - chain_a, + let timed_out_cores = ParaInclusion::free_timedout(); + assert!(timed_out_cores.is_empty()); + + let make_candidate = |core_index: u32, timed_out: bool| { + let default_candidate = TestCandidateBuilder::default().build(); + let backed_in_number = if timed_out { 0 } else { 5 }; + CandidatePendingAvailability { - core: CoreIndex::from(0), + core: CoreIndex::from(core_index), hash: default_candidate.hash(), descriptor: default_candidate.descriptor.clone(), availability_votes: default_availability_votes(), relay_parent_number: 0, - backed_in_number: 0, + backed_in_number, backers: default_backing_bitfield(), - backing_group: GroupIndex::from(0), - }, - ); - PendingAvailabilityCommitments::::insert( + backing_group: GroupIndex::from(core_index), + commitments: default_candidate.commitments.clone(), + } + }; + + >::insert( chain_a, - default_candidate.commitments.clone(), + [make_candidate(0, true)].into_iter().collect::>(), ); >::insert( &chain_b, + [make_candidate(1, false)].into_iter().collect::>(), + ); + + // 2 chained candidates. The first one is timed out. The other will be evicted also. + let mut c_candidates = VecDeque::new(); + c_candidates.push_back(make_candidate(2, true)); + c_candidates.push_back(make_candidate(3, false)); + + >::insert(&chain_c, c_candidates); + + // 2 chained candidates. All are timed out. + let mut d_candidates = VecDeque::new(); + d_candidates.push_back(make_candidate(4, true)); + d_candidates.push_back(make_candidate(5, true)); + + >::insert(&chain_d, d_candidates); + + // 3 chained candidates. The second one is timed out. The first one will remain in place. + // With the current time out predicate this scenario is impossible. But this is not a + // concern for this module. + let mut e_candidates = VecDeque::new(); + e_candidates.push_back(make_candidate(6, false)); + e_candidates.push_back(make_candidate(7, true)); + e_candidates.push_back(make_candidate(8, false)); + + >::insert(&chain_e, e_candidates); + + // 3 chained candidates, none are timed out. + let mut f_candidates = VecDeque::new(); + f_candidates.push_back(make_candidate(9, false)); + f_candidates.push_back(make_candidate(10, false)); + f_candidates.push_back(make_candidate(11, false)); + + >::insert(&chain_f, f_candidates); + + run_to_block(5, |_| None); + + assert_eq!(>::get(&chain_a).unwrap().len(), 1); + assert_eq!(>::get(&chain_b).unwrap().len(), 1); + assert_eq!(>::get(&chain_c).unwrap().len(), 2); + assert_eq!(>::get(&chain_d).unwrap().len(), 2); + assert_eq!(>::get(&chain_e).unwrap().len(), 3); + assert_eq!(>::get(&chain_f).unwrap().len(), 3); + + let timed_out_cores = ParaInclusion::free_timedout(); + + assert_eq!( + timed_out_cores, + vec![ + CoreIndex(0), + CoreIndex(2), + CoreIndex(3), + CoreIndex(4), + CoreIndex(5), + CoreIndex(7), + CoreIndex(8), + ] + ); + + assert!(>::get(&chain_a).unwrap().is_empty()); + assert_eq!(>::get(&chain_b).unwrap().len(), 1); + assert!(>::get(&chain_c).unwrap().is_empty()); + assert!(>::get(&chain_d).unwrap().is_empty()); + assert_eq!( + >::get(&chain_e) + .unwrap() + .into_iter() + .map(|c| c.core) + .collect::>(), + vec![CoreIndex(6)] + ); + assert_eq!( + >::get(&chain_f) + .unwrap() + .into_iter() + .map(|c| c.core) + .collect::>(), + vec![CoreIndex(9), CoreIndex(10), CoreIndex(11)] + ); + }); +} + +#[test] +fn free_disputed() { + let chain_a = ParaId::from(1_u32); + let chain_b = ParaId::from(2_u32); + let chain_c = ParaId::from(3_u32); + let chain_d = ParaId::from(4_u32); + let chain_e = ParaId::from(5_u32); + let chain_f = ParaId::from(6_u32); + let thread_a = ParaId::from(7_u32); + + let paras = vec![ + (chain_a, ParaKind::Parachain), + (chain_b, ParaKind::Parachain), + (chain_c, ParaKind::Parachain), + (chain_d, ParaKind::Parachain), + (chain_e, ParaKind::Parachain), + (chain_f, ParaKind::Parachain), + (thread_a, ParaKind::Parathread), + ]; + let mut config = genesis_config(paras); + config.configuration.config.scheduler_params.group_rotation_frequency = 3; + new_test_ext(config).execute_with(|| { + let disputed_cores = ParaInclusion::free_disputed(&BTreeSet::new()); + assert!(disputed_cores.is_empty()); + + let disputed_cores = ParaInclusion::free_disputed( + &[CandidateHash::default()].into_iter().collect::>(), + ); + assert!(disputed_cores.is_empty()); + + let make_candidate = |core_index: u32| { + let default_candidate = TestCandidateBuilder::default().build(); + CandidatePendingAvailability { - core: CoreIndex::from(1), - hash: default_candidate.hash(), - descriptor: default_candidate.descriptor, + core: CoreIndex::from(core_index), + hash: CandidateHash(Hash::from_low_u64_be(core_index as _)), + descriptor: default_candidate.descriptor.clone(), availability_votes: default_availability_votes(), relay_parent_number: 0, - backed_in_number: 5, + backed_in_number: 0, backers: default_backing_bitfield(), - backing_group: GroupIndex::from(1), - }, + backing_group: GroupIndex::from(core_index), + commitments: default_candidate.commitments.clone(), + } + }; + + // Disputed + >::insert( + chain_a, + [make_candidate(0)].into_iter().collect::>(), + ); + + // Not disputed. + >::insert( + &chain_b, + [make_candidate(1)].into_iter().collect::>(), ); - PendingAvailabilityCommitments::::insert(chain_b, default_candidate.commitments); + + // 2 chained candidates. The first one is disputed. The other will be evicted also. + let mut c_candidates = VecDeque::new(); + c_candidates.push_back(make_candidate(2)); + c_candidates.push_back(make_candidate(3)); + + >::insert(&chain_c, c_candidates); + + // 2 chained candidates. All are disputed. + let mut d_candidates = VecDeque::new(); + d_candidates.push_back(make_candidate(4)); + d_candidates.push_back(make_candidate(5)); + + >::insert(&chain_d, d_candidates); + + // 3 chained candidates. The second one is disputed. The first one will remain in place. + let mut e_candidates = VecDeque::new(); + e_candidates.push_back(make_candidate(6)); + e_candidates.push_back(make_candidate(7)); + e_candidates.push_back(make_candidate(8)); + + >::insert(&chain_e, e_candidates); + + // 3 chained candidates, none are disputed. + let mut f_candidates = VecDeque::new(); + f_candidates.push_back(make_candidate(9)); + f_candidates.push_back(make_candidate(10)); + f_candidates.push_back(make_candidate(11)); + + >::insert(&chain_f, f_candidates); run_to_block(5, |_| None); - assert!(>::get(&chain_a).is_some()); - assert!(>::get(&chain_b).is_some()); - assert!(>::get(&chain_a).is_some()); - assert!(>::get(&chain_b).is_some()); + assert_eq!(>::get(&chain_a).unwrap().len(), 1); + assert_eq!(>::get(&chain_b).unwrap().len(), 1); + assert_eq!(>::get(&chain_c).unwrap().len(), 2); + assert_eq!(>::get(&chain_d).unwrap().len(), 2); + assert_eq!(>::get(&chain_e).unwrap().len(), 3); + assert_eq!(>::get(&chain_f).unwrap().len(), 3); + + let disputed_candidates = [ + CandidateHash(Hash::from_low_u64_be(0)), + CandidateHash(Hash::from_low_u64_be(2)), + CandidateHash(Hash::from_low_u64_be(4)), + CandidateHash(Hash::from_low_u64_be(5)), + CandidateHash(Hash::from_low_u64_be(7)), + ] + .into_iter() + .collect::>(); + let disputed_cores = ParaInclusion::free_disputed(&disputed_candidates); - ParaInclusion::collect_pending(Scheduler::availability_timeout_predicate()); + assert_eq!( + disputed_cores.into_iter().map(|(core, _)| core).collect::>(), + vec![ + CoreIndex(0), + CoreIndex(2), + CoreIndex(3), + CoreIndex(4), + CoreIndex(5), + CoreIndex(7), + CoreIndex(8), + ] + ); - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_some()); - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_some()); + assert!(>::get(&chain_a).unwrap().is_empty()); + assert_eq!(>::get(&chain_b).unwrap().len(), 1); + assert!(>::get(&chain_c).unwrap().is_empty()); + assert!(>::get(&chain_d).unwrap().is_empty()); + assert_eq!( + >::get(&chain_e) + .unwrap() + .into_iter() + .map(|c| c.core) + .collect::>(), + vec![CoreIndex(6)] + ); + assert_eq!( + >::get(&chain_f) + .unwrap() + .into_iter() + .map(|c| c.core) + .collect::>(), + vec![CoreIndex(9), CoreIndex(10), CoreIndex(11)] + ); }); } @@ -474,14 +681,6 @@ fn bitfield_checks() { let signing_context = SigningContext { parent_hash: System::parent_hash(), session_index: 5 }; - let core_lookup = |core| match core { - core if core == CoreIndex::from(0) => Some(chain_a), - core if core == CoreIndex::from(1) => Some(chain_b), - core if core == CoreIndex::from(2) => Some(thread_a), - core if core == CoreIndex::from(3) => None, // for the expected_cores() + 1 test below. - _ => panic!("out of bounds for testing"), - }; - // too many bits in bitfield { let mut bare_bitfield = default_bitfield(); @@ -550,7 +749,7 @@ fn bitfield_checks() { ); assert_eq!(checked_bitfields.len(), 1, "No bitfields should have been filtered!"); - let x = process_bitfields(expected_bits(), checked_bitfields, core_lookup); + let x = process_bitfields(checked_bitfields); assert!(x.is_empty(), "No core should be freed."); } @@ -571,7 +770,7 @@ fn bitfield_checks() { ); assert_eq!(checked_bitfields.len(), 1, "No bitfields should have been filtered!"); - let x = process_bitfields(expected_bits(), checked_bitfields, core_lookup); + let x = process_bitfields(checked_bitfields); assert!(x.is_empty(), "No core should be freed."); } @@ -579,12 +778,10 @@ fn bitfield_checks() { { let mut bare_bitfield = default_bitfield(); - assert_eq!(core_lookup(CoreIndex::from(0)), Some(chain_a)); - let default_candidate = TestCandidateBuilder::default().build(); >::insert( chain_a, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(0), hash: default_candidate.hash(), descriptor: default_candidate.descriptor, @@ -593,9 +790,11 @@ fn bitfield_checks() { backed_in_number: 0, backers: default_backing_bitfield(), backing_group: GroupIndex::from(0), - }, + commitments: default_candidate.commitments, + }] + .into_iter() + .collect::>(), ); - PendingAvailabilityCommitments::::insert(chain_a, default_candidate.commitments); *bare_bitfield.0.get_mut(0).unwrap() = true; let signed = sign_bitfield( @@ -613,53 +812,10 @@ fn bitfield_checks() { ); assert_eq!(checked_bitfields.len(), 1, "No bitfields should have been filtered!"); - let x = process_bitfields(expected_bits(), checked_bitfields, core_lookup); + let x = process_bitfields(checked_bitfields); assert!(x.is_empty(), "No core should be freed."); >::remove(chain_a); - PendingAvailabilityCommitments::::remove(chain_a); - } - - // bitfield signed with pending bit signed, but no commitments. - { - let mut bare_bitfield = default_bitfield(); - - assert_eq!(core_lookup(CoreIndex::from(0)), Some(chain_a)); - - let default_candidate = TestCandidateBuilder::default().build(); - >::insert( - chain_a, - CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: default_candidate.hash(), - descriptor: default_candidate.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: 0, - backed_in_number: 0, - backers: default_backing_bitfield(), - backing_group: GroupIndex::from(0), - }, - ); - - *bare_bitfield.0.get_mut(0).unwrap() = true; - let signed = sign_bitfield( - &keystore, - &validators[0], - ValidatorIndex(0), - bare_bitfield, - &signing_context, - ); - - let checked_bitfields = simple_sanitize_bitfields( - vec![signed.into()], - DisputedBitfield::zeros(expected_bits()), - expected_bits(), - ); - assert_eq!(checked_bitfields.len(), 1, "No bitfields should have been filtered!"); - - let x = process_bitfields(expected_bits(), checked_bitfields, core_lookup); - // no core is freed - assert!(x.is_empty(), "No core should be freed."); } }); } @@ -673,13 +829,17 @@ fn availability_threshold_is_supermajority() { #[test] fn supermajority_bitfields_trigger_availability() { - let chain_a = ParaId::from(1_u32); - let chain_b = ParaId::from(2_u32); - let thread_a = ParaId::from(3_u32); + let chain_a = ParaId::from(0_u32); + let chain_b = ParaId::from(1_u32); + let chain_c = ParaId::from(2_u32); + let chain_d = ParaId::from(3_u32); + let thread_a = ParaId::from(4_u32); let paras = vec![ (chain_a, ParaKind::Parachain), (chain_b, ParaKind::Parachain), + (chain_c, ParaKind::Parachain), + (chain_d, ParaKind::Parachain), (thread_a, ParaKind::Parathread), ]; let validators = vec![ @@ -688,6 +848,8 @@ fn supermajority_bitfields_trigger_availability() { Sr25519Keyring::Charlie, Sr25519Keyring::Dave, Sr25519Keyring::Ferdie, + Sr25519Keyring::One, + Sr25519Keyring::Two, ]; let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory()); for validator in validators.iter() { @@ -707,13 +869,7 @@ fn supermajority_bitfields_trigger_availability() { let signing_context = SigningContext { parent_hash: System::parent_hash(), session_index: 5 }; - let core_lookup = |core| match core { - core if core == CoreIndex::from(0) => Some(chain_a), - core if core == CoreIndex::from(1) => Some(chain_b), - core if core == CoreIndex::from(2) => Some(thread_a), - _ => panic!("Core out of bounds for 2 parachains and 1 parathread core."), - }; - + // Chain A only has one candidate pending availability. It will be made available now. let candidate_a = TestCandidateBuilder { para_id: chain_a, head_data: vec![1, 2, 3, 4].into(), @@ -723,7 +879,7 @@ fn supermajority_bitfields_trigger_availability() { >::insert( chain_a, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(0), hash: candidate_a.hash(), descriptor: candidate_a.clone().descriptor, @@ -732,10 +888,13 @@ fn supermajority_bitfields_trigger_availability() { backed_in_number: 0, backers: backing_bitfield(&[3, 4]), backing_group: GroupIndex::from(0), - }, + commitments: candidate_a.clone().commitments, + }] + .into_iter() + .collect::>(), ); - PendingAvailabilityCommitments::::insert(chain_a, candidate_a.clone().commitments); + // Chain B only has one candidate pending availability. It won't be made available now. let candidate_b = TestCandidateBuilder { para_id: chain_b, head_data: vec![5, 6, 7, 8].into(), @@ -745,7 +904,7 @@ fn supermajority_bitfields_trigger_availability() { >::insert( chain_b, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(1), hash: candidate_b.hash(), descriptor: candidate_b.descriptor, @@ -754,40 +913,99 @@ fn supermajority_bitfields_trigger_availability() { backed_in_number: 0, backers: backing_bitfield(&[0, 2]), backing_group: GroupIndex::from(1), - }, + commitments: candidate_b.commitments, + }] + .into_iter() + .collect::>(), ); - PendingAvailabilityCommitments::::insert(chain_b, candidate_b.commitments); - // this bitfield signals that a and b are available. - let a_and_b_available = { - let mut bare_bitfield = default_bitfield(); - *bare_bitfield.0.get_mut(0).unwrap() = true; - *bare_bitfield.0.get_mut(1).unwrap() = true; + // Chain C has three candidates pending availability. The first and third candidates will be + // made available. Only the first candidate will be evicted from the core and enacted. + let candidate_c_1 = TestCandidateBuilder { + para_id: chain_c, + head_data: vec![7, 8].into(), + ..Default::default() + } + .build(); + let candidate_c_2 = TestCandidateBuilder { + para_id: chain_c, + head_data: vec![9, 10].into(), + ..Default::default() + } + .build(); + let candidate_c_3 = TestCandidateBuilder { + para_id: chain_c, + head_data: vec![11, 12].into(), + ..Default::default() + } + .build(); - bare_bitfield - }; + let mut c_candidates = VecDeque::new(); + c_candidates.push_back(CandidatePendingAvailability { + core: CoreIndex::from(2), + hash: candidate_c_1.hash(), + descriptor: candidate_c_1.descriptor.clone(), + availability_votes: default_availability_votes(), + relay_parent_number: 0, + backed_in_number: 0, + backers: backing_bitfield(&[1]), + backing_group: GroupIndex::from(2), + commitments: candidate_c_1.commitments.clone(), + }); + c_candidates.push_back(CandidatePendingAvailability { + core: CoreIndex::from(3), + hash: candidate_c_2.hash(), + descriptor: candidate_c_2.descriptor.clone(), + availability_votes: default_availability_votes(), + relay_parent_number: 0, + backed_in_number: 0, + backers: backing_bitfield(&[5]), + backing_group: GroupIndex::from(3), + commitments: candidate_c_2.commitments.clone(), + }); + c_candidates.push_back(CandidatePendingAvailability { + core: CoreIndex::from(4), + hash: candidate_c_3.hash(), + descriptor: candidate_c_3.descriptor.clone(), + availability_votes: default_availability_votes(), + relay_parent_number: 0, + backed_in_number: 0, + backers: backing_bitfield(&[6]), + backing_group: GroupIndex::from(4), + commitments: candidate_c_3.commitments.clone(), + }); - // this bitfield signals that only a is available. - let a_available = { + >::insert(chain_c, c_candidates); + + // this bitfield signals that a and b are available. + let all_available = { let mut bare_bitfield = default_bitfield(); - *bare_bitfield.0.get_mut(0).unwrap() = true; + for bit in 0..=4 { + *bare_bitfield.0.get_mut(bit).unwrap() = true; + } bare_bitfield }; let threshold = availability_threshold(validators.len()); - // 4 of 5 first value >= 2/3 - assert_eq!(threshold, 4); + // 5 of 7 first value >= 2/3 + assert_eq!(threshold, 5); let signed_bitfields = validators .iter() .enumerate() .filter_map(|(i, key)| { - let to_sign = if i < 3 { - a_and_b_available.clone() - } else if i < 4 { - a_available.clone() + let to_sign = if i < 4 { + all_available.clone() + } else if i < 5 { + // this bitfield signals that only a, c1 and c3 are available. + let mut bare_bitfield = default_bitfield(); + *bare_bitfield.0.get_mut(0).unwrap() = true; + *bare_bitfield.0.get_mut(2).unwrap() = true; + *bare_bitfield.0.get_mut(4).unwrap() = true; + + bare_bitfield } else { // sign nothing. return None @@ -814,46 +1032,129 @@ fn supermajority_bitfields_trigger_availability() { ); assert_eq!(checked_bitfields.len(), old_len, "No bitfields should have been filtered!"); - // only chain A's core is freed. - let v = process_bitfields(expected_bits(), checked_bitfields, core_lookup); - assert_eq!(vec![(CoreIndex(0), candidate_a.hash())], v); - - // chain A had 4 signing off, which is >= threshold. - // chain B has 3 signing off, which is < threshold. - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_some()); - assert_eq!(>::get(&chain_b).unwrap().availability_votes, { - // check that votes from first 3 were tracked. + // only chain A's core and candidate's C1 core are freed. + let v = process_bitfields(checked_bitfields); + assert_eq!( + vec![(CoreIndex(2), candidate_c_1.hash()), (CoreIndex(0), candidate_a.hash())], + v + ); + let votes = |bits: &[usize]| { let mut votes = default_availability_votes(); - *votes.get_mut(0).unwrap() = true; - *votes.get_mut(1).unwrap() = true; - *votes.get_mut(2).unwrap() = true; + for bit in bits { + *votes.get_mut(*bit).unwrap() = true; + } votes - }); + }; - // and check that chain head was enacted. + assert!(>::get(&chain_a).unwrap().is_empty()); + assert_eq!( + >::get(&chain_b) + .unwrap() + .pop_front() + .unwrap() + .availability_votes, + votes(&[0, 1, 2, 3]) + ); + let mut pending_c = >::get(&chain_c).unwrap(); + assert_eq!(pending_c.pop_front().unwrap().availability_votes, votes(&[0, 1, 2, 3])); + assert_eq!(pending_c.pop_front().unwrap().availability_votes, votes(&[0, 1, 2, 3, 4])); + assert!(pending_c.is_empty()); + + // and check that chain heads. assert_eq!(Paras::para_head(&chain_a), Some(vec![1, 2, 3, 4].into())); + assert_ne!(Paras::para_head(&chain_b), Some(vec![5, 6, 7, 8].into())); + assert_eq!(Paras::para_head(&chain_c), Some(vec![7, 8].into())); // Check that rewards are applied. { let rewards = crate::mock::availability_rewards(); - assert_eq!(rewards.len(), 4); - assert_eq!(rewards.get(&ValidatorIndex(0)).unwrap(), &1); - assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &1); - assert_eq!(rewards.get(&ValidatorIndex(2)).unwrap(), &1); + assert_eq!(rewards.len(), 5); + assert_eq!(rewards.get(&ValidatorIndex(0)).unwrap(), &2); + assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &2); + assert_eq!(rewards.get(&ValidatorIndex(2)).unwrap(), &2); + assert_eq!(rewards.get(&ValidatorIndex(3)).unwrap(), &2); + assert_eq!(rewards.get(&ValidatorIndex(4)).unwrap(), &2); + } + + { + let rewards = crate::mock::backing_rewards(); + + assert_eq!(rewards.len(), 3); assert_eq!(rewards.get(&ValidatorIndex(3)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(4)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &1); + } + + // Add a new bitfield which will make candidate C2 available also. This will also evict and + // enact C3. + let signed_bitfields = vec![sign_bitfield( + &keystore, + &validators[5], + ValidatorIndex(5), + { + let mut bare_bitfield = default_bitfield(); + *bare_bitfield.0.get_mut(3).unwrap() = true; + bare_bitfield + }, + &signing_context, + ) + .into()]; + + let old_len = signed_bitfields.len(); + let checked_bitfields = simple_sanitize_bitfields( + signed_bitfields, + DisputedBitfield::zeros(expected_bits()), + expected_bits(), + ); + assert_eq!(checked_bitfields.len(), old_len, "No bitfields should have been filtered!"); + + let v = process_bitfields(checked_bitfields); + assert_eq!( + vec![(CoreIndex(3), candidate_c_2.hash()), (CoreIndex(4), candidate_c_3.hash())], + v + ); + + assert!(>::get(&chain_a).unwrap().is_empty()); + assert_eq!( + >::get(&chain_b) + .unwrap() + .pop_front() + .unwrap() + .availability_votes, + votes(&[0, 1, 2, 3]) + ); + assert!(>::get(&chain_c).unwrap().is_empty()); + + // and check that chain heads. + assert_eq!(Paras::para_head(&chain_a), Some(vec![1, 2, 3, 4].into())); + assert_ne!(Paras::para_head(&chain_b), Some(vec![5, 6, 7, 8].into())); + assert_eq!(Paras::para_head(&chain_c), Some(vec![11, 12].into())); + + // Check that rewards are applied. + { + let rewards = crate::mock::availability_rewards(); + + assert_eq!(rewards.len(), 6); + assert_eq!(rewards.get(&ValidatorIndex(0)).unwrap(), &4); + assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &4); + assert_eq!(rewards.get(&ValidatorIndex(2)).unwrap(), &4); + assert_eq!(rewards.get(&ValidatorIndex(3)).unwrap(), &4); + assert_eq!(rewards.get(&ValidatorIndex(4)).unwrap(), &3); + assert_eq!(rewards.get(&ValidatorIndex(5)).unwrap(), &1); } { let rewards = crate::mock::backing_rewards(); - assert_eq!(rewards.len(), 2); + assert_eq!(rewards.len(), 5); assert_eq!(rewards.get(&ValidatorIndex(3)).unwrap(), &1); assert_eq!(rewards.get(&ValidatorIndex(4)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(5)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(6)).unwrap(), &1); } }); } @@ -878,6 +1179,7 @@ fn candidate_checks() { Sr25519Keyring::Charlie, Sr25519Keyring::Dave, Sr25519Keyring::Ferdie, + Sr25519Keyring::One, ]; let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory()); for validator in validators.iter() { @@ -904,7 +1206,8 @@ fn candidate_checks() { group_index if group_index == GroupIndex::from(0) => Some(vec![0, 1]), group_index if group_index == GroupIndex::from(1) => Some(vec![2, 3]), group_index if group_index == GroupIndex::from(2) => Some(vec![4]), - _ => panic!("Group index out of bounds for 2 parachains and 1 parathread core"), + group_index if group_index == GroupIndex::from(3) => Some(vec![5]), + _ => panic!("Group index out of bounds"), } .map(|m| m.into_iter().map(ValidatorIndex).collect::>()) }; @@ -914,12 +1217,12 @@ fn candidate_checks() { vec![ValidatorIndex(0), ValidatorIndex(1)], vec![ValidatorIndex(2), ValidatorIndex(3)], vec![ValidatorIndex(4)], + vec![ValidatorIndex(5)], ]; Scheduler::set_validator_groups(validator_groups); let thread_collator: CollatorId = Sr25519Keyring::Two.public().into(); let chain_a_assignment = (chain_a, CoreIndex::from(0)); - let chain_b_assignment = (chain_b, CoreIndex::from(1)); let thread_a_assignment = (thread_a, CoreIndex::from(2)); @@ -929,14 +1232,14 @@ fn candidate_checks() { assert_eq!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![], + &BTreeMap::new(), &group_validators, false ), Ok(ProcessedCandidates::default()) ); - // candidates out of order. + // Check candidate ordering { let mut candidate_a = TestCandidateBuilder { para_id: chain_a, @@ -947,19 +1250,37 @@ fn candidate_checks() { ..Default::default() } .build(); - let mut candidate_b = TestCandidateBuilder { + let mut candidate_b_1 = TestCandidateBuilder { para_id: chain_b, relay_parent: System::parent_hash(), pov_hash: Hash::repeat_byte(2), persisted_validation_data_hash: make_vdata_hash(chain_b).unwrap(), hrmp_watermark: RELAY_PARENT_NUM, + head_data: HeadData(vec![1, 2, 3]), ..Default::default() } .build(); - collator_sign_candidate(Sr25519Keyring::One, &mut candidate_a); + // Make candidate b2 a child of b1. + let mut candidate_b_2 = TestCandidateBuilder { + para_id: chain_b, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(3), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::( + RELAY_PARENT_NUM, + Default::default(), + candidate_b_1.commitments.head_data.clone(), + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + head_data: HeadData(vec![5, 6, 7]), + ..Default::default() + } + .build(); - collator_sign_candidate(Sr25519Keyring::Two, &mut candidate_b); + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_a); + collator_sign_candidate(Sr25519Keyring::Two, &mut candidate_b_1); + collator_sign_candidate(Sr25519Keyring::Two, &mut candidate_b_2); let backed_a = back_candidate( candidate_a, @@ -971,8 +1292,18 @@ fn candidate_checks() { None, ); - let backed_b = back_candidate( - candidate_b, + let backed_b_1 = back_candidate( + candidate_b_1.clone(), + &validators, + group_validators(GroupIndex::from(2)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + None, + ); + + let backed_b_2 = back_candidate( + candidate_b_2, &validators, group_validators(GroupIndex::from(1)).unwrap().as_ref(), &keystore, @@ -981,15 +1312,82 @@ fn candidate_checks() { None, ); - // out-of-order manifests as unscheduled. + // candidates are required to be sorted in dependency order. assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed_b, chain_b_assignment.1), (backed_a, chain_a_assignment.1)], + &vec![( + chain_b, + vec![ + (backed_b_2.clone(), CoreIndex(1)), + (backed_b_1.clone(), CoreIndex(2)) + ] + ),] + .into_iter() + .collect(), &group_validators, false ), - Error::::ScheduledOutOfOrder + Error::::ValidationDataHashMismatch + ); + + // candidates are no longer required to be sorted by core index. + ParaInclusion::process_candidates( + &allowed_relay_parents, + &vec![ + ( + chain_b, + vec![ + (backed_b_1.clone(), CoreIndex(2)), + (backed_b_2.clone(), CoreIndex(1)), + ], + ), + (chain_a_assignment.0, vec![(backed_a.clone(), chain_a_assignment.1)]), + ] + .into_iter() + .collect(), + &group_validators, + false, + ) + .unwrap(); + + // candidate does not build on top of the latest unincluded head + + let mut candidate_b_3 = TestCandidateBuilder { + para_id: chain_b, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(4), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::( + RELAY_PARENT_NUM, + Default::default(), + candidate_b_1.commitments.head_data.clone(), + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + head_data: HeadData(vec![8, 9]), + ..Default::default() + } + .build(); + collator_sign_candidate(Sr25519Keyring::Two, &mut candidate_b_3); + + let backed_b_3 = back_candidate( + candidate_b_3, + &validators, + group_validators(GroupIndex::from(3)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + None, + ); + + assert_noop!( + ParaInclusion::process_candidates( + &allowed_relay_parents, + &vec![(chain_b, vec![(backed_b_3, CoreIndex(3))])].into_iter().collect(), + &group_validators, + false + ), + Error::::ValidationDataHashMismatch ); } @@ -1006,8 +1404,9 @@ fn candidate_checks() { .build(); collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + // Insufficient backing. let backed = back_candidate( - candidate, + candidate.clone(), &validators, group_validators(GroupIndex::from(0)).unwrap().as_ref(), &keystore, @@ -1019,12 +1418,37 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), Error::::InsufficientBacking ); + + // Wrong backing group. + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(1)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + None, + ); + + assert_noop!( + ParaInclusion::process_candidates( + &allowed_relay_parents, + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), + &group_validators, + false + ), + Error::::InvalidBacking + ); } // one of candidates is not based on allowed relay parent. @@ -1078,7 +1502,12 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed_b, chain_b_assignment.1), (backed_a, chain_a_assignment.1)], + &vec![ + (chain_b_assignment.0, vec![(backed_b, chain_b_assignment.1)]), + (chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)]) + ] + .into_iter() + .collect(), &group_validators, false ), @@ -1117,7 +1546,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, thread_a_assignment.1)], + &vec![(thread_a_assignment.0, vec![(backed, thread_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), @@ -1125,100 +1556,6 @@ fn candidate_checks() { ); } - // para occupied - reject. - { - let mut candidate = TestCandidateBuilder { - para_id: chain_a, - relay_parent: System::parent_hash(), - pov_hash: Hash::repeat_byte(1), - persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), - hrmp_watermark: RELAY_PARENT_NUM, - ..Default::default() - } - .build(); - - collator_sign_candidate(Sr25519Keyring::One, &mut candidate); - - let backed = back_candidate( - candidate, - &validators, - group_validators(GroupIndex::from(0)).unwrap().as_ref(), - &keystore, - &signing_context, - BackingKind::Threshold, - None, - ); - - let candidate = TestCandidateBuilder::default().build(); - >::insert( - &chain_a, - CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: candidate.hash(), - descriptor: candidate.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: 3, - backed_in_number: 4, - backers: default_backing_bitfield(), - backing_group: GroupIndex::from(0), - }, - ); - >::insert(&chain_a, candidate.commitments); - - assert_noop!( - ParaInclusion::process_candidates( - &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], - &group_validators, - false - ), - Error::::CandidateScheduledBeforeParaFree - ); - - >::remove(&chain_a); - >::remove(&chain_a); - } - - // messed up commitments storage - do not panic - reject. - { - let mut candidate = TestCandidateBuilder { - para_id: chain_a, - relay_parent: System::parent_hash(), - pov_hash: Hash::repeat_byte(1), - persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), - hrmp_watermark: RELAY_PARENT_NUM, - ..Default::default() - } - .build(); - - collator_sign_candidate(Sr25519Keyring::One, &mut candidate); - - // this is not supposed to happen - >::insert(&chain_a, candidate.commitments.clone()); - - let backed = back_candidate( - candidate, - &validators, - group_validators(GroupIndex::from(0)).unwrap().as_ref(), - &keystore, - &signing_context, - BackingKind::Threshold, - None, - ); - - assert_noop!( - ParaInclusion::process_candidates( - &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], - &group_validators, - false - ), - Error::::CandidateScheduledBeforeParaFree - ); - - >::remove(&chain_a); - } - // interfering code upgrade - reject { let mut candidate = TestCandidateBuilder { @@ -1260,7 +1597,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), @@ -1292,14 +1631,16 @@ fn candidate_checks() { None, ); - assert_eq!( + assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, - false + false, ), - Err(Error::::ValidationDataHashMismatch.into()), + Error::::ValidationDataHashMismatch ); } @@ -1331,7 +1672,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), @@ -1367,7 +1710,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), @@ -1506,16 +1851,21 @@ fn backing_works() { ); let backed_candidates = vec![ - (backed_a.clone(), chain_a_assignment.1), - (backed_b.clone(), chain_b_assignment.1), - (backed_c, thread_a_assignment.1), - ]; + (chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)]), + (chain_b_assignment.0, vec![(backed_b, chain_b_assignment.1)]), + (thread_a_assignment.0, vec![(backed_c, thread_a_assignment.1)]), + ] + .into_iter() + .collect::>(); + let get_backing_group_idx = { // the order defines the group implicitly for this test case let backed_candidates_with_groups = backed_candidates - .iter() + .values() .enumerate() - .map(|(idx, (backed_candidate, _))| (backed_candidate.hash(), GroupIndex(idx as _))) + .map(|(idx, backed_candidates)| { + (backed_candidates.iter().next().unwrap().0.hash(), GroupIndex(idx as _)) + }) .collect::>(); move |candidate_hash_x: CandidateHash| -> Option { @@ -1534,7 +1884,7 @@ fn backing_works() { candidate_receipt_with_backing_validator_indices, } = ParaInclusion::process_candidates( &allowed_relay_parents, - backed_candidates.clone(), + &backed_candidates, &group_validators, false, ) @@ -1555,7 +1905,8 @@ fn backing_works() { CandidateHash, (CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>), >::new(); - backed_candidates.into_iter().for_each(|(backed_candidate, _)| { + backed_candidates.values().for_each(|backed_candidates| { + let backed_candidate = backed_candidates.iter().next().unwrap().0.clone(); let candidate_receipt_with_backers = intermediate .entry(backed_candidate.hash()) .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); @@ -1606,20 +1957,21 @@ fn backing_works() { }; assert_eq!( >::get(&chain_a), - Some(CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: candidate_a.hash(), - descriptor: candidate_a.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers, - backing_group: GroupIndex::from(0), - }) - ); - assert_eq!( - >::get(&chain_a), - Some(candidate_a.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(0), + hash: candidate_a.hash(), + descriptor: candidate_a.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers, + backing_group: GroupIndex::from(0), + commitments: candidate_a.commitments, + }] + .into_iter() + .collect::>() + ) ); let backers = { @@ -1631,38 +1983,40 @@ fn backing_works() { }; assert_eq!( >::get(&chain_b), - Some(CandidatePendingAvailability { - core: CoreIndex::from(1), - hash: candidate_b.hash(), - descriptor: candidate_b.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers, - backing_group: GroupIndex::from(1), - }) - ); - assert_eq!( - >::get(&chain_b), - Some(candidate_b.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(1), + hash: candidate_b.hash(), + descriptor: candidate_b.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers, + backing_group: GroupIndex::from(1), + commitments: candidate_b.commitments, + }] + .into_iter() + .collect::>() + ) ); assert_eq!( >::get(&thread_a), - Some(CandidatePendingAvailability { - core: CoreIndex::from(2), - hash: candidate_c.hash(), - descriptor: candidate_c.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers: backing_bitfield(&[4]), - backing_group: GroupIndex::from(2), - }) - ); - assert_eq!( - >::get(&thread_a), - Some(candidate_c.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(2), + hash: candidate_c.hash(), + descriptor: candidate_c.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers: backing_bitfield(&[4]), + backing_group: GroupIndex::from(2), + commitments: candidate_c.commitments + }] + .into_iter() + .collect::>() + ) ); }); } @@ -1750,11 +2104,17 @@ fn backing_works_with_elastic_scaling_mvp() { .build(); collator_sign_candidate(Sr25519Keyring::One, &mut candidate_b_1); + // Make candidate b2 a child of b1. let mut candidate_b_2 = TestCandidateBuilder { para_id: chain_b, relay_parent: System::parent_hash(), pov_hash: Hash::repeat_byte(3), - persisted_validation_data_hash: make_vdata_hash(chain_b).unwrap(), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::( + RELAY_PARENT_NUM, + Default::default(), + candidate_b_1.commitments.head_data.clone(), + ) + .hash(), hrmp_watermark: RELAY_PARENT_NUM, ..Default::default() } @@ -1791,18 +2151,25 @@ fn backing_works_with_elastic_scaling_mvp() { Some(CoreIndex(2)), ); - let backed_candidates = vec![ - (backed_a.clone(), CoreIndex(0)), - (backed_b_1.clone(), CoreIndex(1)), - (backed_b_2.clone(), CoreIndex(2)), - ]; + let mut backed_candidates = BTreeMap::new(); + backed_candidates.insert(chain_a, vec![(backed_a, CoreIndex(0))]); + backed_candidates + .insert(chain_b, vec![(backed_b_1, CoreIndex(1)), (backed_b_2, CoreIndex(2))]); + let get_backing_group_idx = { // the order defines the group implicitly for this test case let backed_candidates_with_groups = backed_candidates - .iter() + .values() .enumerate() - .map(|(idx, (backed_candidate, _))| (backed_candidate.hash(), GroupIndex(idx as _))) - .collect::>(); + .map(|(idx, backed_candidates)| { + backed_candidates + .iter() + .enumerate() + .map(|(i, c)| (c.0.hash(), GroupIndex((idx + i) as _))) + .collect() + }) + .collect::>>() + .concat(); move |candidate_hash_x: CandidateHash| -> Option { backed_candidates_with_groups.iter().find_map(|(candidate_hash, grp)| { @@ -1820,14 +2187,13 @@ fn backing_works_with_elastic_scaling_mvp() { candidate_receipt_with_backing_validator_indices, } = ParaInclusion::process_candidates( &allowed_relay_parents, - backed_candidates.clone(), + &backed_candidates, &group_validators, true, ) .expect("candidates scheduled, in order, and backed"); - // Both b candidates will be backed. However, only one will be recorded on-chain and proceed - // with being made available. + // Both b candidates will be backed. assert_eq!( occupied_cores, vec![ @@ -1842,26 +2208,29 @@ fn backing_works_with_elastic_scaling_mvp() { CandidateHash, (CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>), >::new(); - backed_candidates.into_iter().for_each(|(backed_candidate, _)| { - let candidate_receipt_with_backers = expected - .entry(backed_candidate.hash()) - .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); - let (validator_indices, _maybe_core_index) = - backed_candidate.validator_indices_and_core_index(true); - assert_eq!(backed_candidate.validity_votes().len(), validator_indices.count_ones()); - candidate_receipt_with_backers.1.extend( - validator_indices - .iter() - .enumerate() - .filter(|(_, signed)| **signed) - .zip(backed_candidate.validity_votes().iter().cloned()) - .filter_map(|((validator_index_within_group, _), attestation)| { - let grp_idx = get_backing_group_idx(backed_candidate.hash()).unwrap(); - group_validators(grp_idx).map(|validator_indices| { - (validator_indices[validator_index_within_group], attestation) - }) - }), - ); + backed_candidates.values().for_each(|backed_candidates| { + for backed_candidate in backed_candidates { + let backed_candidate = backed_candidate.0.clone(); + let candidate_receipt_with_backers = expected + .entry(backed_candidate.hash()) + .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); + let (validator_indices, _maybe_core_index) = + backed_candidate.validator_indices_and_core_index(true); + assert_eq!(backed_candidate.validity_votes().len(), validator_indices.count_ones()); + candidate_receipt_with_backers.1.extend( + validator_indices + .iter() + .enumerate() + .filter(|(_, signed)| **signed) + .zip(backed_candidate.validity_votes().iter().cloned()) + .filter_map(|((validator_index_within_group, _), attestation)| { + let grp_idx = get_backing_group_idx(backed_candidate.hash()).unwrap(); + group_validators(grp_idx).map(|validator_indices| { + (validator_indices[validator_index_within_group], attestation) + }) + }), + ); + } }); assert_eq!( @@ -1881,39 +2250,54 @@ fn backing_works_with_elastic_scaling_mvp() { }; assert_eq!( >::get(&chain_a), - Some(CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: candidate_a.hash(), - descriptor: candidate_a.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers, - backing_group: GroupIndex::from(0), - }) - ); - assert_eq!( - >::get(&chain_a), - Some(candidate_a.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(0), + hash: candidate_a.hash(), + descriptor: candidate_a.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers, + backing_group: GroupIndex::from(0), + commitments: candidate_a.commitments + }] + .into_iter() + .collect::>() + ) ); - // Only one candidate for b will be recorded on chain. + // Both candidates of b will be recorded on chain. assert_eq!( >::get(&chain_b), - Some(CandidatePendingAvailability { - core: CoreIndex::from(2), - hash: candidate_b_2.hash(), - descriptor: candidate_b_2.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers: backing_bitfield(&[4]), - backing_group: GroupIndex::from(2), - }) - ); - assert_eq!( - >::get(&chain_b), - Some(candidate_b_2.commitments), + Some( + [ + CandidatePendingAvailability { + core: CoreIndex::from(1), + hash: candidate_b_1.hash(), + descriptor: candidate_b_1.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers: backing_bitfield(&[2, 3]), + backing_group: GroupIndex::from(1), + commitments: candidate_b_1.commitments + }, + CandidatePendingAvailability { + core: CoreIndex::from(2), + hash: candidate_b_2.hash(), + descriptor: candidate_b_2.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers: backing_bitfield(&[4]), + backing_group: GroupIndex::from(2), + commitments: candidate_b_2.commitments + } + ] + .into_iter() + .collect::>() + ) ); }); } @@ -1998,8 +2382,10 @@ fn can_include_candidate_with_ok_code_upgrade() { let ProcessedCandidates { core_indices: occupied_cores, .. } = ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed_a, chain_a_assignment.1)], - &group_validators, + &vec![(chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)])] + .into_iter() + .collect::>(), + group_validators, false, ) .expect("candidates scheduled, in order, and backed"); @@ -2015,20 +2401,21 @@ fn can_include_candidate_with_ok_code_upgrade() { }; assert_eq!( >::get(&chain_a), - Some(CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: candidate_a.hash(), - descriptor: candidate_a.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers, - backing_group: GroupIndex::from(0), - }) - ); - assert_eq!( - >::get(&chain_a), - Some(candidate_a.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(0), + hash: candidate_a.hash(), + descriptor: candidate_a.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers, + backing_group: GroupIndex::from(0), + commitments: candidate_a.commitments + }] + .into_iter() + .collect::>() + ) ); }); } @@ -2209,14 +2596,16 @@ fn check_allowed_relay_parents() { ); let backed_candidates = vec![ - (backed_a, chain_a_assignment.1), - (backed_b, chain_b_assignment.1), - (backed_c, thread_a_assignment.1), - ]; + (chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)]), + (chain_b_assignment.0, vec![(backed_b, chain_b_assignment.1)]), + (thread_a_assignment.0, vec![(backed_c, thread_a_assignment.1)]), + ] + .into_iter() + .collect::>(); ParaInclusion::process_candidates( &allowed_relay_parents, - backed_candidates.clone(), + &backed_candidates, &group_validators, false, ) @@ -2264,25 +2653,10 @@ fn session_change_wipes() { run_to_block(10, |_| None); - >::insert( - &ValidatorIndex(0), - AvailabilityBitfieldRecord { bitfield: default_bitfield(), submitted_at: 9 }, - ); - - >::insert( - &ValidatorIndex(1), - AvailabilityBitfieldRecord { bitfield: default_bitfield(), submitted_at: 9 }, - ); - - >::insert( - &ValidatorIndex(4), - AvailabilityBitfieldRecord { bitfield: default_bitfield(), submitted_at: 9 }, - ); - let candidate = TestCandidateBuilder::default().build(); >::insert( &chain_a, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(0), hash: candidate.hash(), descriptor: candidate.descriptor.clone(), @@ -2291,13 +2665,15 @@ fn session_change_wipes() { backed_in_number: 6, backers: default_backing_bitfield(), backing_group: GroupIndex::from(0), - }, + commitments: candidate.commitments.clone(), + }] + .into_iter() + .collect::>(), ); - >::insert(&chain_a, candidate.commitments.clone()); >::insert( &chain_b, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(1), hash: candidate.hash(), descriptor: candidate.descriptor, @@ -2306,22 +2682,18 @@ fn session_change_wipes() { backed_in_number: 7, backers: default_backing_bitfield(), backing_group: GroupIndex::from(1), - }, + commitments: candidate.commitments, + }] + .into_iter() + .collect::>(), ); - >::insert(&chain_b, candidate.commitments); run_to_block(11, |_| None); assert_eq!(shared::Pallet::::session_index(), 5); - assert!(>::get(&ValidatorIndex(0)).is_some()); - assert!(>::get(&ValidatorIndex(1)).is_some()); - assert!(>::get(&ValidatorIndex(4)).is_some()); - assert!(>::get(&chain_a).is_some()); assert!(>::get(&chain_b).is_some()); - assert!(>::get(&chain_a).is_some()); - assert!(>::get(&chain_b).is_some()); run_to_block(12, |n| match n { 12 => Some(SessionChangeNotification { @@ -2337,18 +2709,7 @@ fn session_change_wipes() { assert_eq!(shared::Pallet::::session_index(), 6); - assert!(>::get(&ValidatorIndex(0)).is_none()); - assert!(>::get(&ValidatorIndex(1)).is_none()); - assert!(>::get(&ValidatorIndex(4)).is_none()); - - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_none()); - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_none()); - - assert!(>::iter().collect::>().is_empty()); assert!(>::iter().collect::>().is_empty()); - assert!(>::iter().collect::>().is_empty()); }); } @@ -2420,11 +2781,6 @@ fn para_upgrade_delay_scheduled_from_inclusion() { ]]; Scheduler::set_validator_groups(validator_groups); - let core_lookup = |core| match core { - core if core == CoreIndex::from(0) => Some(chain_a), - _ => None, - }; - let allowed_relay_parents = default_allowed_relay_parent_tracker(); let chain_a_assignment = (chain_a, CoreIndex::from(0)); @@ -2453,7 +2809,9 @@ fn para_upgrade_delay_scheduled_from_inclusion() { let ProcessedCandidates { core_indices: occupied_cores, .. } = ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed_a, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)])] + .into_iter() + .collect::>(), &group_validators, false, ) @@ -2488,11 +2846,10 @@ fn para_upgrade_delay_scheduled_from_inclusion() { expected_bits(), ); - let v = process_bitfields(expected_bits(), checked_bitfields, core_lookup); + let v = process_bitfields(checked_bitfields); assert_eq!(vec![(CoreIndex(0), candidate_a.hash())], v); - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_a).is_none()); + assert!(>::get(&chain_a).unwrap().is_empty()); let active_vote_state = paras::Pallet::::active_vote_state(&new_validation_code_hash) .expect("prechecking must be initiated"); diff --git a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs index f3365945758c..1b07acffb154 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs @@ -145,10 +145,6 @@ benchmarks! { assert_eq!(backing_validators.1.len(), votes); } - assert_eq!( - inclusion::PendingAvailabilityCommitments::::iter().count(), - cores_with_backed.len() - ); assert_eq!( inclusion::PendingAvailability::::iter().count(), cores_with_backed.len() @@ -209,10 +205,6 @@ benchmarks! { ); } - assert_eq!( - inclusion::PendingAvailabilityCommitments::::iter().count(), - cores_with_backed.len() - ); assert_eq!( inclusion::PendingAvailability::::iter().count(), cores_with_backed.len() diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 723a15bdba7a..02ddfd0accab 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -24,8 +24,7 @@ use crate::{ configuration, disputes::DisputesHandler, - inclusion, - inclusion::CandidateCheckContext, + inclusion::{self, CandidateCheckContext}, initializer, metrics::METRICS, paras, @@ -35,6 +34,7 @@ use crate::{ }; use bitvec::prelude::BitVec; use frame_support::{ + defensive, dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo}, inherent::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent}, pallet_prelude::*, @@ -45,7 +45,7 @@ use pallet_babe::{self, ParentBlockRandomness}; use primitives::{ effective_minimum_backing_votes, vstaging::node_features::FeatureIndex, BackedCandidate, CandidateHash, CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, - CoreIndex, DisputeStatementSet, InherentData as ParachainsInherentData, + CoreIndex, DisputeStatementSet, HeadData, InherentData as ParachainsInherentData, MultiDisputeStatementSet, ScrapedOnChainVotes, SessionIndex, SignedAvailabilityBitfields, SigningContext, UncheckedSignedAvailabilityBitfield, UncheckedSignedAvailabilityBitfields, ValidatorId, ValidatorIndex, ValidityAttestation, PARACHAINS_INHERENT_IDENTIFIER, @@ -134,18 +134,11 @@ pub mod pallet { /// The hash of the submitted parent header doesn't correspond to the saved block hash of /// the parent. InvalidParentHeader, - /// Disputed candidate that was concluded invalid. - CandidateConcludedInvalid, /// The data given to the inherent will result in an overweight block. InherentOverweight, - /// The ordering of dispute statements was invalid. - DisputeStatementsUnsortedOrDuplicates, - /// A dispute statement was invalid. - DisputeInvalid, - /// A candidate was backed by a disabled validator - BackedByDisabled, - /// A candidate was backed even though the paraid was not scheduled. - BackedOnUnscheduledCore, + /// A candidate was filtered during inherent execution. This should have only been done + /// during creation. + CandidatesFilteredDuringExecution, /// Too many candidates supplied. UnscheduledCandidate, } @@ -235,35 +228,6 @@ pub mod pallet { } } - /// Collect all freed cores based on storage data. (i.e. append cores freed from timeouts to - /// the given `freed_concluded`). - /// - /// The parameter `freed_concluded` contains all core indicies that became - /// free due to candidate that became available. - pub(crate) fn collect_all_freed_cores( - freed_concluded: I, - ) -> BTreeMap - where - I: core::iter::IntoIterator, - T: Config, - { - // Handle timeouts for any availability core work. - let freed_timeout = if >::availability_timeout_check_required() { - let pred = >::availability_timeout_predicate(); - >::collect_pending(pred) - } else { - Vec::new() - }; - - // Schedule paras again, given freed cores, and reasons for freeing. - let freed = freed_concluded - .into_iter() - .map(|(c, _hash)| (c, FreedReason::Concluded)) - .chain(freed_timeout.into_iter().map(|c| (c, FreedReason::TimedOut))) - .collect::>(); - freed - } - #[pallet::call] impl Pallet { /// Enter the paras inherent. This will process bitfields and backed candidates. @@ -319,7 +283,7 @@ impl Pallet { /// Process inherent data. /// /// The given inherent data is processed and state is altered accordingly. If any data could - /// not be applied (inconsitencies, weight limit, ...) it is removed. + /// not be applied (inconsistencies, weight limit, ...) it is removed. /// /// When called from `create_inherent` the `context` must be set to /// `ProcessInherentDataContext::ProvideInherent` so it guarantees the invariant that inherent @@ -526,7 +490,7 @@ impl Pallet { // Contains the disputes that are concluded in the current session only, // since these are the only ones that are relevant for the occupied cores - // and lightens the load on `collect_disputed` significantly. + // and lightens the load on `free_disputed` significantly. // Cores can't be occupied with candidates of the previous sessions, and only // things with new votes can have just concluded. We only need to collect // cores with disputes that conclude just now, because disputes that @@ -542,21 +506,17 @@ impl Pallet { .map(|(_session, candidate)| candidate) .collect::>(); - let freed_disputed: BTreeMap = - >::collect_disputed(¤t_concluded_invalid_disputes) + // Get the cores freed as a result of concluded invalid candidates. + let (freed_disputed, concluded_invalid_hashes): (Vec, BTreeSet) = + >::free_disputed(¤t_concluded_invalid_disputes) .into_iter() - .map(|core| (core, FreedReason::Concluded)) - .collect(); + .unzip(); // Create a bit index from the set of core indices where each index corresponds to // a core index that was freed due to a dispute. // // I.e. 010100 would indicate, the candidates on Core 1 and 3 would be disputed. - let disputed_bitfield = create_disputed_bitfield(expected_bits, freed_disputed.keys()); - - if !freed_disputed.is_empty() { - >::free_cores_and_fill_claimqueue(freed_disputed.clone(), now); - } + let disputed_bitfield = create_disputed_bitfield(expected_bits, freed_disputed.iter()); let bitfields = sanitize_bitfields::( bitfields, @@ -571,11 +531,9 @@ impl Pallet { // Process new availability bitfields, yielding any availability cores whose // work has now concluded. let freed_concluded = - >::update_pending_availability_and_get_freed_cores::<_>( - expected_bits, + >::update_pending_availability_and_get_freed_cores( &validator_public[..], bitfields.clone(), - >::core_para, ); // Inform the disputes module of all included candidates. @@ -585,8 +543,24 @@ impl Pallet { METRICS.on_candidates_included(freed_concluded.len() as u64); - let freed = collect_all_freed_cores::(freed_concluded.iter().cloned()); + // Get the timed out candidates + let freed_timeout = if >::availability_timeout_check_required() { + >::free_timedout() + } else { + Vec::new() + }; + + if !freed_timeout.is_empty() { + log::debug!(target: LOG_TARGET, "Evicted timed out cores: {:?}", freed_timeout); + } + // We'll schedule paras again, given freed cores, and reasons for freeing. + let freed = freed_concluded + .into_iter() + .map(|(c, _hash)| (c, FreedReason::Concluded)) + .chain(freed_disputed.into_iter().map(|core| (core, FreedReason::Concluded))) + .chain(freed_timeout.into_iter().map(|c| (c, FreedReason::TimedOut))) + .collect::>(); >::free_cores_and_fill_claimqueue(freed, now); METRICS.on_candidates_processed_total(backed_candidates.len() as u64); @@ -605,55 +579,28 @@ impl Pallet { scheduled.entry(para_id).or_default().insert(core_idx); } - let SanitizedBackedCandidates { - backed_candidates_with_core, - votes_from_disabled_were_dropped, - dropped_unscheduled_candidates, - } = sanitize_backed_candidates::( + let initial_candidate_count = backed_candidates.len(); + let backed_candidates_with_core = sanitize_backed_candidates::( backed_candidates, &allowed_relay_parents, - |candidate_idx: usize, - backed_candidate: &BackedCandidate<::Hash>| - -> bool { - let para_id = backed_candidate.descriptor().para_id; - let prev_context = >::para_most_recent_context(para_id); - let check_ctx = CandidateCheckContext::::new(prev_context); - - // never include a concluded-invalid candidate - current_concluded_invalid_disputes.contains(&backed_candidate.hash()) || - // Instead of checking the candidates with code upgrades twice - // move the checking up here and skip it in the training wheels fallback. - // That way we avoid possible duplicate checks while assuring all - // backed candidates fine to pass on. - // - // NOTE: this is the only place where we check the relay-parent. - check_ctx - .verify_backed_candidate(&allowed_relay_parents, candidate_idx, backed_candidate.candidate()) - .is_err() - }, + concluded_invalid_hashes, scheduled, core_index_enabled, ); + let count = count_backed_candidates(&backed_candidates_with_core); - ensure!( - backed_candidates_with_core.len() <= total_scheduled_cores, - Error::::UnscheduledCandidate - ); - - METRICS.on_candidates_sanitized(backed_candidates_with_core.len() as u64); + ensure!(count <= total_scheduled_cores, Error::::UnscheduledCandidate); - // In `Enter` context (invoked during execution) there should be no backing votes from - // disabled validators because they should have been filtered out during inherent data - // preparation (`ProvideInherent` context). Abort in such cases. - if context == ProcessInherentDataContext::Enter { - ensure!(!votes_from_disabled_were_dropped, Error::::BackedByDisabled); - } + METRICS.on_candidates_sanitized(count as u64); - // In `Enter` context (invoked during execution) we shouldn't have filtered any candidates - // due to a para not being scheduled. They have been filtered during inherent data - // preparation (`ProvideInherent` context). Abort in such cases. + // In `Enter` context (invoked during execution) no more candidates should be filtered, + // because they have already been filtered during `ProvideInherent` context. Abort in such + // cases. if context == ProcessInherentDataContext::Enter { - ensure!(!dropped_unscheduled_candidates, Error::::BackedOnUnscheduledCore); + ensure!( + initial_candidate_count == count, + Error::::CandidatesFilteredDuringExecution + ); } // Process backed candidates according to scheduled cores. @@ -662,7 +609,7 @@ impl Pallet { candidate_receipt_with_backing_validator_indices, } = >::process_candidates( &allowed_relay_parents, - backed_candidates_with_core.clone(), + &backed_candidates_with_core, >::group_validators, core_index_enabled, )?; @@ -683,10 +630,13 @@ impl Pallet { let processed = ParachainsInherentData { bitfields, - backed_candidates: backed_candidates_with_core - .into_iter() - .map(|(candidate, _)| candidate) - .collect(), + backed_candidates: backed_candidates_with_core.into_iter().fold( + Vec::with_capacity(count), + |mut acc, (_id, candidates)| { + acc.extend(candidates.into_iter().map(|(c, _)| c)); + acc + }, + ), disputes, parent_header, }; @@ -986,83 +936,86 @@ pub(crate) fn sanitize_bitfields( bitfields } -// Result from `sanitize_backed_candidates` -#[derive(Debug, PartialEq)] -struct SanitizedBackedCandidates { - // Sanitized backed candidates along with the assigned core. The `Vec` is sorted according to - // the occupied core index. - backed_candidates_with_core: Vec<(BackedCandidate, CoreIndex)>, - // Set to true if any votes from disabled validators were dropped from the input. - votes_from_disabled_were_dropped: bool, - // Set to true if any candidates were dropped due to filtering done in - // `map_candidates_to_cores` - dropped_unscheduled_candidates: bool, -} - +/// Performs various filtering on the backed candidates inherent data. +/// Must maintain the invariant that the returned candidate collection contains the candidates +/// sorted in dependency order for each para. When doing any filtering, we must therefore drop any +/// subsequent candidates after the filtered one. +/// /// Filter out: -/// 1. any candidates that have a concluded invalid dispute -/// 2. any unscheduled candidates, as well as candidates whose paraid has multiple cores assigned +/// 1. any candidates which don't form a chain with the other candidates of the paraid (even if they +/// do form a chain but are not in the right order). +/// 2. any candidates that have a concluded invalid dispute or who are descendants of a concluded +/// invalid candidate. +/// 3. any unscheduled candidates, as well as candidates whose paraid has multiple cores assigned /// but have no injected core index. -/// 3. all backing votes from disabled validators -/// 4. any candidates that end up with less than `effective_minimum_backing_votes` backing votes +/// 4. all backing votes from disabled validators +/// 5. any candidates that end up with less than `effective_minimum_backing_votes` backing votes /// -/// `scheduled` follows the same naming scheme as provided in the -/// guide: Currently `free` but might become `occupied`. -/// For the filtering here the relevant part is only the current `free` -/// state. -/// -/// `candidate_has_concluded_invalid_dispute` must return `true` if the candidate -/// is disputed, false otherwise. The passed `usize` is the candidate index. -/// -/// Returns struct `SanitizedBackedCandidates` where `backed_candidates` are sorted according to the -/// occupied core index. -fn sanitize_backed_candidates< - T: crate::inclusion::Config, - F: FnMut(usize, &BackedCandidate) -> bool, ->( - mut backed_candidates: Vec>, +/// Returns the scheduled +/// backed candidates which passed filtering, mapped by para id and in the right dependency order. +fn sanitize_backed_candidates( + backed_candidates: Vec>, allowed_relay_parents: &AllowedRelayParentsTracker>, - mut candidate_has_concluded_invalid_dispute_or_is_invalid: F, + concluded_invalid_with_descendants: BTreeSet, scheduled: BTreeMap>, core_index_enabled: bool, -) -> SanitizedBackedCandidates { - // Remove any candidates that were concluded invalid. - // This does not assume sorting. - backed_candidates.indexed_retain(move |candidate_idx, backed_candidate| { - !candidate_has_concluded_invalid_dispute_or_is_invalid(candidate_idx, backed_candidate) +) -> BTreeMap, CoreIndex)>> { + // Map the candidates to the right paraids, while making sure that the order between candidates + // of the same para is preserved. + let mut candidates_per_para: BTreeMap> = BTreeMap::new(); + for candidate in backed_candidates { + candidates_per_para + .entry(candidate.descriptor().para_id) + .or_default() + .push(candidate); + } + + // Check that candidates pertaining to the same para form a chain. Drop the ones that + // don't, along with the rest of candidates which follow them in the input vector. + filter_unchained_candidates::(&mut candidates_per_para, allowed_relay_parents); + + // Remove any candidates that were concluded invalid or who are descendants of concluded invalid + // candidates (along with their descendants). + retain_candidates::(&mut candidates_per_para, |_, candidate| { + let keep = !concluded_invalid_with_descendants.contains(&candidate.candidate().hash()); + + if !keep { + log::debug!( + target: LOG_TARGET, + "Found backed candidate {:?} which was concluded invalid or is a descendant of a concluded invalid candidate, for paraid {:?}.", + candidate.candidate().hash(), + candidate.descriptor().para_id + ); + } + keep }); - let initial_candidate_count = backed_candidates.len(); - // Map candidates to scheduled cores. Filter out any unscheduled candidates. + // Map candidates to scheduled cores. Filter out any unscheduled candidates along with their + // descendants. let mut backed_candidates_with_core = map_candidates_to_cores::( &allowed_relay_parents, scheduled, core_index_enabled, - backed_candidates, + candidates_per_para, ); - let dropped_unscheduled_candidates = - initial_candidate_count != backed_candidates_with_core.len(); - - // Filter out backing statements from disabled validators - let votes_from_disabled_were_dropped = filter_backed_statements_from_disabled_validators::( + // Filter out backing statements from disabled validators. If by that we render a candidate with + // less backing votes than required, filter that candidate also. As all the other filtering + // operations above, we drop the descendants of the dropped candidates also. + filter_backed_statements_from_disabled_validators::( &mut backed_candidates_with_core, &allowed_relay_parents, core_index_enabled, ); - // Sort the `Vec` last, once there is a guarantee that these - // `BackedCandidates` references the expected relay chain parent, - // but more importantly are scheduled for a free core. - // This both avoids extra work for obviously invalid candidates, - // but also allows this to be done in place. - backed_candidates_with_core.sort_by(|(_x, core_x), (_y, core_y)| core_x.cmp(&core_y)); - - SanitizedBackedCandidates { - dropped_unscheduled_candidates, - votes_from_disabled_were_dropped, - backed_candidates_with_core, - } + backed_candidates_with_core +} + +fn count_backed_candidates(backed_candidates: &BTreeMap>) -> usize { + backed_candidates.iter().fold(0, |mut count, (_id, candidates)| { + count += candidates.len(); + count + }) } /// Derive entropy from babe provided per block randomness. @@ -1146,48 +1099,82 @@ fn limit_and_sanitize_disputes< } } -// Filters statements from disabled validators in `BackedCandidate`, non-scheduled candidates and -// few more sanity checks. Returns `true` if at least one statement is removed and `false` -// otherwise. -fn filter_backed_statements_from_disabled_validators( - backed_candidates_with_core: &mut Vec<( - BackedCandidate<::Hash>, - CoreIndex, - )>, +// Helper function for filtering candidates which don't pass the given predicate. When/if the first +// candidate which failes the predicate is found, all the other candidates that follow are dropped. +fn retain_candidates< + T: inclusion::Config + paras::Config + inclusion::Config, + F: FnMut(ParaId, &mut C) -> bool, + C, +>( + candidates_per_para: &mut BTreeMap>, + mut pred: F, +) { + for (para_id, candidates) in candidates_per_para.iter_mut() { + let mut latest_valid_idx = None; + + for (idx, candidate) in candidates.iter_mut().enumerate() { + if pred(*para_id, candidate) { + // Found a valid candidate. + latest_valid_idx = Some(idx); + } else { + break + } + } + + if let Some(latest_valid_idx) = latest_valid_idx { + candidates.truncate(latest_valid_idx + 1); + } else { + candidates.clear(); + } + } + + candidates_per_para.retain(|_, c| !c.is_empty()); +} + +// Filters statements from disabled validators in `BackedCandidate` and does a few more sanity +// checks. +fn filter_backed_statements_from_disabled_validators< + T: shared::Config + scheduler::Config + inclusion::Config, +>( + backed_candidates_with_core: &mut BTreeMap< + ParaId, + Vec<(BackedCandidate<::Hash>, CoreIndex)>, + >, allowed_relay_parents: &AllowedRelayParentsTracker>, core_index_enabled: bool, -) -> bool { +) { let disabled_validators = BTreeSet::<_>::from_iter(shared::Pallet::::disabled_validators().into_iter()); if disabled_validators.is_empty() { // No disabled validators - nothing to do - return false + return } - let backed_len_before = backed_candidates_with_core.len(); - - // Flag which will be returned. Set to `true` if at least one vote is filtered. - let mut filtered = false; - let minimum_backing_votes = configuration::Pallet::::config().minimum_backing_votes; // Process all backed candidates. `validator_indices` in `BackedCandidates` are indices within // the validator group assigned to the parachain. To obtain this group we need: // 1. Core index assigned to the parachain which has produced the candidate // 2. The relay chain block number of the candidate - backed_candidates_with_core.retain_mut(|(bc, core_idx)| { - let (validator_indices, maybe_core_index) = bc.validator_indices_and_core_index(core_index_enabled); + retain_candidates::(backed_candidates_with_core, |para_id, (bc, core_idx)| { + let (validator_indices, maybe_core_index) = + bc.validator_indices_and_core_index(core_index_enabled); let mut validator_indices = BitVec::<_>::from(validator_indices); - // Get relay parent block number of the candidate. We need this to get the group index assigned to this core at this block number - let relay_parent_block_number = match allowed_relay_parents - .acquire_info(bc.descriptor().relay_parent, None) { + // Get relay parent block number of the candidate. We need this to get the group index + // assigned to this core at this block number + let relay_parent_block_number = + match allowed_relay_parents.acquire_info(bc.descriptor().relay_parent, None) { Some((_, block_num)) => block_num, None => { - log::debug!(target: LOG_TARGET, "Relay parent {:?} for candidate is not in the allowed relay parents. Dropping the candidate.", bc.descriptor().relay_parent); + log::debug!( + target: LOG_TARGET, + "Relay parent {:?} for candidate is not in the allowed relay parents. Dropping the candidate.", + bc.descriptor().relay_parent + ); return false - } + }, }; // Get the group index for the core @@ -1208,12 +1195,15 @@ fn filter_backed_statements_from_disabled_validators { log::debug!(target: LOG_TARGET, "Can't get the validators from group {:?}. Dropping the candidate.", group_idx); return false - } + }, }; // Bitmask with the disabled indices within the validator group - let disabled_indices = BitVec::::from_iter(validator_group.iter().map(|idx| disabled_validators.contains(idx))); - // The indices of statements from disabled validators in `BackedCandidate`. We have to drop these. + let disabled_indices = BitVec::::from_iter( + validator_group.iter().map(|idx| disabled_validators.contains(idx)), + ); + // The indices of statements from disabled validators in `BackedCandidate`. We have to drop + // these. let indices_to_drop = disabled_indices.clone() & &validator_indices; // Apply the bitmask to drop the disabled validator from `validator_indices` validator_indices &= !disabled_indices; @@ -1225,62 +1215,218 @@ fn filter_backed_statements_from_disabled_validators 0 { - filtered = true; - } - // By filtering votes we might render the candidate invalid and cause a failure in // [`process_candidates`]. To avoid this we have to perform a sanity check here. If there // are not enough backing votes after filtering we will remove the whole candidate. - if bc.validity_votes().len() < effective_minimum_backing_votes( - validator_group.len(), - minimum_backing_votes - ) { + if bc.validity_votes().len() < + effective_minimum_backing_votes(validator_group.len(), minimum_backing_votes) + { + log::debug!( + target: LOG_TARGET, + "Dropping candidate {:?} of paraid {:?} because it was left with too few backing votes after votes from disabled validators were filtered.", + bc.candidate().hash(), + para_id + ); + return false } true }); +} + +// Check that candidates pertaining to the same para form a chain. Drop the ones that +// don't, along with the rest of candidates which follow them in the input vector. +// In the process, duplicated candidates will also be dropped (even if they form a valid cycle; +// cycles are not allowed if they entail backing duplicated candidates). +fn filter_unchained_candidates( + candidates: &mut BTreeMap>>, + allowed_relay_parents: &AllowedRelayParentsTracker>, +) { + let mut para_latest_head_data: BTreeMap = BTreeMap::new(); + for para_id in candidates.keys() { + let latest_head_data = match >::para_latest_head_data(¶_id) { + None => { + defensive!("Latest included head data for paraid {:?} is None", para_id); + continue + }, + Some(latest_head_data) => latest_head_data, + }; + para_latest_head_data.insert(*para_id, latest_head_data); + } + + let mut para_visited_candidates: BTreeMap> = BTreeMap::new(); + + retain_candidates::(candidates, |para_id, candidate| { + let Some(latest_head_data) = para_latest_head_data.get(¶_id) else { return false }; + let candidate_hash = candidate.candidate().hash(); + + let visited_candidates = + para_visited_candidates.entry(para_id).or_insert_with(|| BTreeSet::new()); + if visited_candidates.contains(&candidate_hash) { + log::debug!( + target: LOG_TARGET, + "Found duplicate candidates for paraid {:?}. Dropping the candidates with hash {:?}", + para_id, + candidate_hash + ); + + // If we got a duplicate candidate, stop. + return false + } else { + visited_candidates.insert(candidate_hash); + } + + let prev_context = >::para_most_recent_context(para_id); + let check_ctx = CandidateCheckContext::::new(prev_context); + + let res = match check_ctx.verify_backed_candidate( + &allowed_relay_parents, + candidate.candidate(), + latest_head_data.clone(), + ) { + Ok(_) => true, + Err(err) => { + log::debug!( + target: LOG_TARGET, + "Backed candidate verification for candidate {:?} of paraid {:?} failed with {:?}", + candidate_hash, + para_id, + err + ); + false + }, + }; + + if res { + para_latest_head_data + .insert(para_id, candidate.candidate().commitments.head_data.clone()); + } - // Also return `true` if a whole candidate was dropped from the set - filtered || backed_len_before != backed_candidates_with_core.len() + res + }); } /// Map candidates to scheduled cores. -/// If the para only has one scheduled core and no `CoreIndex` is injected, map the candidate to the +/// If the para only has one scheduled core and one candidate supplied, map the candidate to the /// single core. If the para has multiple cores scheduled, only map the candidates which have a /// proper core injected. Filter out the rest. /// Also returns whether or not we dropped any candidates. +/// When dropping a candidate of a para, we must drop all subsequent candidates from that para +/// (because they form a chain). fn map_candidates_to_cores( allowed_relay_parents: &AllowedRelayParentsTracker>, mut scheduled: BTreeMap>, core_index_enabled: bool, - candidates: Vec>, -) -> Vec<(BackedCandidate, CoreIndex)> { - let mut backed_candidates_with_core = Vec::with_capacity(candidates.len()); - - // We keep a candidate if the parachain has only one core assigned or if - // a core index is provided by block author and it's indeed scheduled. - for backed_candidate in candidates { - let maybe_injected_core_index = get_injected_core_index::( - allowed_relay_parents, - &backed_candidate, - core_index_enabled, - ); + candidates: BTreeMap>>, +) -> BTreeMap, CoreIndex)>> { + let mut backed_candidates_with_core = BTreeMap::new(); + + for (para_id, backed_candidates) in candidates.into_iter() { + if backed_candidates.len() == 0 { + defensive!("Backed candidates for paraid {} is empty.", para_id); + continue + } - let scheduled_cores = scheduled.get_mut(&backed_candidate.descriptor().para_id); - // Candidates without scheduled cores are silently filtered out. + let scheduled_cores = scheduled.get_mut(¶_id); + + // ParaIds without scheduled cores are silently filtered out. if let Some(scheduled_cores) = scheduled_cores { - if let Some(core_idx) = maybe_injected_core_index { - if scheduled_cores.contains(&core_idx) { - scheduled_cores.remove(&core_idx); - backed_candidates_with_core.push((backed_candidate, core_idx)); + if scheduled_cores.len() == 0 { + log::debug!( + target: LOG_TARGET, + "Paraid: {:?} has no scheduled cores but {} candidates were supplied.", + para_id, + backed_candidates.len() + ); + + // Non-elastic scaling case. One core per para. + } else if scheduled_cores.len() == 1 && !core_index_enabled { + backed_candidates_with_core.insert( + para_id, + vec![( + // We need the first one here, as we assume candidates of a para are in + // dependency order. + backed_candidates.into_iter().next().expect("Length is at least 1"), + scheduled_cores.pop_first().expect("Length is 1"), + )], + ); + continue; + + // Elastic scaling case. We only allow candidates which have the right core + // indices injected. + } else if scheduled_cores.len() >= 1 && core_index_enabled { + // We must preserve the dependency order given in the input. + let mut temp_backed_candidates = Vec::with_capacity(scheduled_cores.len()); + + for candidate in backed_candidates { + if scheduled_cores.len() == 0 { + // We've got candidates for all of this para's assigned cores. Move on to + // the next para. + log::debug!( + target: LOG_TARGET, + "Found enough candidates for paraid: {:?}.", + candidate.descriptor().para_id + ); + break; + } + let maybe_injected_core_index: Option = + get_injected_core_index::(allowed_relay_parents, &candidate); + + if let Some(core_index) = maybe_injected_core_index { + if scheduled_cores.remove(&core_index) { + temp_backed_candidates.push((candidate, core_index)); + } else { + // if we got a candidate for a core index which is not scheduled, stop + // the work for this para. the already processed candidate chain in + // temp_backed_candidates is still fine though. + log::debug!( + target: LOG_TARGET, + "Found a backed candidate {:?} with injected core index {}, which is not scheduled for paraid {:?}.", + candidate.candidate().hash(), + core_index.0, + candidate.descriptor().para_id + ); + + break; + } + } else { + // if we got a candidate which does not contain its core index, stop the + // work for this para. the already processed candidate chain in + // temp_backed_candidates is still fine though. + + log::debug!( + target: LOG_TARGET, + "Found a backed candidate {:?} with no injected core index, for paraid {:?} which has multiple scheduled cores.", + candidate.candidate().hash(), + candidate.descriptor().para_id + ); + + break; + } } - } else if scheduled_cores.len() == 1 { - backed_candidates_with_core - .push((backed_candidate, scheduled_cores.pop_first().expect("Length is 1"))); + + if !temp_backed_candidates.is_empty() { + backed_candidates_with_core + .entry(para_id) + .or_insert_with(|| vec![]) + .extend(temp_backed_candidates); + } + } else { + log::warn!( + target: LOG_TARGET, + "Found a paraid {:?} which has multiple scheduled cores but ElasticScalingMVP feature is not enabled: {:?}", + para_id, + scheduled_cores + ); } + } else { + log::debug!( + target: LOG_TARGET, + "Paraid: {:?} has no scheduled cores but {} candidates were supplied.", + para_id, + backed_candidates.len() + ); } } @@ -1290,13 +1436,11 @@ fn map_candidates_to_cores( allowed_relay_parents: &AllowedRelayParentsTracker>, candidate: &BackedCandidate, - core_index_enabled: bool, ) -> Option { // After stripping the 8 bit extensions, the `validator_indices` field length is expected // to be equal to backing group size. If these don't match, the `CoreIndex` is badly encoded, // or not supported. - let (validator_indices, maybe_core_idx) = - candidate.validator_indices_and_core_index(core_index_enabled); + let (validator_indices, maybe_core_idx) = candidate.validator_indices_and_core_index(true); let Some(core_idx) = maybe_core_idx else { return None }; @@ -1306,7 +1450,7 @@ fn get_injected_core_index { log::debug!( target: LOG_TARGET, - "Relay parent {:?} for candidate {:?} is not in the allowed relay parents. Dropping the candidate.", + "Relay parent {:?} for candidate {:?} is not in the allowed relay parents.", candidate.descriptor().relay_parent, candidate.candidate().hash(), ); @@ -1323,9 +1467,8 @@ fn get_injected_core_index { log::debug!( target: LOG_TARGET, - "Can't get the group index for core idx {:?}. Dropping the candidate {:?}.", + "Can't get the group index for core idx {:?}.", core_idx, - candidate.candidate().hash(), ); return None }, @@ -1339,6 +1482,14 @@ fn get_injected_core_index MockGenesisConfig { + MockGenesisConfig { + configuration: configuration::GenesisConfig { + config: HostConfiguration { + max_head_data_size: 0b100000, + scheduler_params: SchedulerParams { + group_rotation_frequency: u32::MAX, + ..Default::default() + }, + ..Default::default() + }, + }, + ..Default::default() + } +} + // In order to facilitate benchmarks as tests we have a benchmark feature gated `WeightInfo` impl // that uses 0 for all the weights. Because all the weights are 0, the tests that rely on // weights for limiting data will fail, so we don't run them when using the benchmark feature. #[cfg(not(feature = "runtime-benchmarks"))] mod enter { - use super::{inclusion::tests::TestCandidateBuilder, *}; use crate::{ builder::{Bench, BenchBuilder}, - mock::{mock_assigner, new_test_ext, BlockLength, BlockWeights, MockGenesisConfig, Test}, + mock::{mock_assigner, new_test_ext, BlockLength, BlockWeights, RuntimeOrigin, Test}, scheduler::{ common::{Assignment, AssignmentProvider}, ParasEntry, }, + session_info, }; use assert_matches::assert_matches; + use core::panic; use frame_support::assert_ok; use frame_system::limits; - use primitives::vstaging::SchedulerParams; + use primitives::{vstaging::SchedulerParams, AvailabilityBitfield, UncheckedSigned}; use sp_runtime::Perbill; use sp_std::collections::btree_map::BTreeMap; @@ -45,6 +68,8 @@ mod enter { num_validators_per_core: u32, code_upgrade: Option, fill_claimqueue: bool, + elastic_paras: BTreeMap, + unavailable_cores: Vec, } fn make_inherent_data( @@ -55,25 +80,39 @@ mod enter { num_validators_per_core, code_upgrade, fill_claimqueue, + elastic_paras, + unavailable_cores, }: TestConfig, ) -> Bench { + let extra_cores = elastic_paras + .values() + .map(|count| *count as usize) + .sum::() + .saturating_sub(elastic_paras.len() as usize); + let total_cores = dispute_sessions.len() + backed_and_concluding.len() + extra_cores; + let builder = BenchBuilder::::new() - .set_max_validators( - (dispute_sessions.len() + backed_and_concluding.len()) as u32 * - num_validators_per_core, - ) + .set_max_validators((total_cores) as u32 * num_validators_per_core) + .set_elastic_paras(elastic_paras.clone()) .set_max_validators_per_core(num_validators_per_core) .set_dispute_statements(dispute_statements) - .set_backed_and_concluding_paras(backed_and_concluding) + .set_backed_and_concluding_paras(backed_and_concluding.clone()) .set_dispute_sessions(&dispute_sessions[..]) - .set_fill_claimqueue(fill_claimqueue); + .set_fill_claimqueue(fill_claimqueue) + .set_unavailable_cores(unavailable_cores); // Setup some assignments as needed: mock_assigner::Pallet::::set_core_count(builder.max_cores()); - for core_index in 0..builder.max_cores() { - // Core index == para_id in this case - mock_assigner::Pallet::::add_test_assignment(Assignment::Bulk(core_index.into())); - } + + (0..(builder.max_cores() as usize - extra_cores)).for_each(|para_id| { + (0..elastic_paras.get(&(para_id as u32)).cloned().unwrap_or(1)).for_each( + |_para_local_core_idx| { + mock_assigner::Pallet::::add_test_assignment(Assignment::Bulk( + para_id.into(), + )); + }, + ); + }); if let Some(code_size) = code_upgrade { builder.set_code_upgrade(code_size).build() @@ -104,6 +143,8 @@ mod enter { num_validators_per_core: 1, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); // We expect the scenario to have cores 0 & 1 with pending availability. The backed @@ -145,6 +186,305 @@ mod enter { Pallet::::on_chain_votes().unwrap().session, 2 ); + + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(0)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(0)] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(1)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(1)] + ); + }); + } + + #[test] + fn include_backed_candidates_elastic_scaling() { + // ParaId 0 has one pending candidate on core 0. + // ParaId 1 has one pending candidate on core 1. + // ParaId 2 has three pending candidates on cores 2, 3 and 4. + // All of them are being made available in this block. Propose 5 more candidates (one for + // each core) and check that they're successfully backed and the old ones enacted. + let config = default_config(); + assert!(config.configuration.config.scheduler_params.lookahead > 0); + new_test_ext(config).execute_with(|| { + // Set the elastic scaling MVP feature. + >::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::ElasticScalingMVP as u8, + true, + ) + .unwrap(); + + let dispute_statements = BTreeMap::new(); + + let mut backed_and_concluding = BTreeMap::new(); + backed_and_concluding.insert(0, 1); + backed_and_concluding.insert(1, 1); + backed_and_concluding.insert(2, 1); + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + dispute_sessions: vec![], // No disputes + backed_and_concluding, + num_validators_per_core: 1, + code_upgrade: None, + fill_claimqueue: false, + elastic_paras: [(2, 3)].into_iter().collect(), + unavailable_cores: vec![], + }); + + let expected_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (5 validators) + assert_eq!(expected_para_inherent_data.bitfields.len(), 5); + // * 1 backed candidate per core (5 cores) + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 5); + // * 0 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 0); + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) + .unwrap(); + + // The current schedule is empty prior to calling `create_inherent_enter`. + assert!(>::claimqueue_is_empty()); + + assert!(Pallet::::on_chain_votes().is_none()); + + // Nothing is filtered out (including the backed candidates.) + assert_eq!( + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(), + expected_para_inherent_data + ); + + assert_eq!( + // The length of this vec is equal to the number of candidates, so we know our 5 + // backed candidates did not get filtered out + Pallet::::on_chain_votes().unwrap().backing_validators_per_candidate.len(), + 5 + ); + + assert_eq!( + // The session of the on chain votes should equal the current session, which is 2 + Pallet::::on_chain_votes().unwrap().session, + 2 + ); + + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(0)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(0)] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(1)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(1)] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(2)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(2), CoreIndex(3), CoreIndex(4)] + ); + }); + + // ParaId 0 has one pending candidate on core 0. + // ParaId 1 has one pending candidate on core 1. + // ParaId 2 has 4 pending candidates on cores 2, 3, 4 and 5. + // Cores 1, 2 and 3 are being made available in this block. Propose 6 more candidates (one + // for each core) and check that the right ones are successfully backed and the old ones + // enacted. + let config = default_config(); + assert!(config.configuration.config.scheduler_params.lookahead > 0); + new_test_ext(config).execute_with(|| { + // Set the elastic scaling MVP feature. + >::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::ElasticScalingMVP as u8, + true, + ) + .unwrap(); + + let mut backed_and_concluding = BTreeMap::new(); + backed_and_concluding.insert(0, 1); + backed_and_concluding.insert(1, 1); + backed_and_concluding.insert(2, 1); + + // Modify the availability bitfields so that cores 0, 4 and 5 are not being made + // available. + let unavailable_cores = vec![0, 4, 5]; + + let scenario = make_inherent_data(TestConfig { + dispute_statements: BTreeMap::new(), + dispute_sessions: vec![], // No disputes + backed_and_concluding, + num_validators_per_core: 1, + code_upgrade: None, + fill_claimqueue: true, + elastic_paras: [(2, 4)].into_iter().collect(), + unavailable_cores: unavailable_cores.clone(), + }); + + let mut expected_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (6 validators) + assert_eq!(expected_para_inherent_data.bitfields.len(), 6); + // * 1 backed candidate per core (6 cores) + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 6); + // * 0 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 0); + assert!(Pallet::::on_chain_votes().is_none()); + + expected_para_inherent_data.backed_candidates = expected_para_inherent_data + .backed_candidates + .into_iter() + .filter(|candidate| { + let (_, Some(core_index)) = candidate.validator_indices_and_core_index(true) + else { + panic!("Core index must have been injected"); + }; + !unavailable_cores.contains(&core_index.0) + }) + .collect(); + + let mut inherent_data = InherentData::new(); + inherent_data.put_data(PARACHAINS_INHERENT_IDENTIFIER, &scenario.data).unwrap(); + + assert!(!>::claimqueue_is_empty()); + + // The right candidates have been filtered out (the ones for cores 0,4,5) + assert_eq!( + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(), + expected_para_inherent_data + ); + + // 3 candidates have been backed (for cores 1,2 and 3) + assert_eq!( + Pallet::::on_chain_votes().unwrap().backing_validators_per_candidate.len(), + 3 + ); + + assert_eq!( + // The session of the on chain votes should equal the current session, which is 2 + Pallet::::on_chain_votes().unwrap().session, + 2 + ); + + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(1)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(1)] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(2)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(4), CoreIndex(5), CoreIndex(2), CoreIndex(3)] + ); + + let expected_heads = (0..=2) + .map(|id| { + inclusion::PendingAvailability::::get(ParaId::from(id)) + .unwrap() + .back() + .unwrap() + .candidate_commitments() + .head_data + .clone() + }) + .collect::>(); + + // Now just make all candidates available. + let mut data = scenario.data.clone(); + let validators = session_info::Pallet::::session_info(2).unwrap().validators; + let signing_context = SigningContext { + parent_hash: BenchBuilder::::header(4).hash(), + session_index: 2, + }; + + data.backed_candidates.clear(); + + data.bitfields.iter_mut().enumerate().for_each(|(i, bitfield)| { + let unchecked_signed = UncheckedSigned::::benchmark_sign( + validators.get(ValidatorIndex(i as u32)).unwrap(), + bitvec::bitvec![u8, bitvec::order::Lsb0; 1; 6].into(), + &signing_context, + ValidatorIndex(i as u32), + ); + *bitfield = unchecked_signed; + }); + let mut inherent_data = InherentData::new(); + inherent_data.put_data(PARACHAINS_INHERENT_IDENTIFIER, &data).unwrap(); + + // Nothing has been filtered out. + assert_eq!( + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(), + data + ); + + // No more candidates have been backed + assert!(Pallet::::on_chain_votes() + .unwrap() + .backing_validators_per_candidate + .is_empty()); + + // No more pending availability candidates + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(0)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(1)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(2)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![] + ); + + // Paras have the right on-chain heads now + expected_heads.into_iter().enumerate().for_each(|(id, head)| { + assert_eq!( + paras::Pallet::::para_head(ParaId::from(id as u32)).unwrap(), + head + ); + }); }); } @@ -255,6 +595,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -326,6 +668,8 @@ mod enter { num_validators_per_core: 6, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -395,6 +739,8 @@ mod enter { num_validators_per_core: 4, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -480,6 +826,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -565,6 +913,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -649,6 +999,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -754,6 +1106,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -820,6 +1174,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -884,6 +1240,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -985,6 +1343,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let mut para_inherent_data = scenario.data.clone(); @@ -1072,6 +1432,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -1119,7 +1481,7 @@ mod sanitizers { inclusion::tests::{ back_candidate, collator_sign_candidate, BackingKind, TestCandidateBuilder, }, - mock::{new_test_ext, MockGenesisConfig}, + mock::new_test_ext, }; use bitvec::order::Lsb0; use primitives::{ @@ -1375,9 +1737,11 @@ mod sanitizers { mod candidates { use crate::{ - mock::set_disabled_validators, + mock::{set_disabled_validators, RuntimeOrigin}, scheduler::{common::Assignment, ParasEntry}, + util::{make_persisted_validation_data, make_persisted_validation_data_with_parent}, }; + use primitives::ValidationCode; use sp_std::collections::vec_deque::VecDeque; use super::*; @@ -1385,13 +1749,14 @@ mod sanitizers { // Backed candidates and scheduled parachains used for `sanitize_backed_candidates` testing struct TestData { backed_candidates: Vec, - all_backed_candidates_with_core: Vec<(BackedCandidate, CoreIndex)>, + expected_backed_candidates_with_core: + BTreeMap>, scheduled_paras: BTreeMap>, } - // Generate test data for the candidates and assert that the evnironment is set as expected + // Generate test data for the candidates and assert that the environment is set as expected // (check the comments for details) - fn get_test_data(core_index_enabled: bool) -> TestData { + fn get_test_data_one_core_per_para(core_index_enabled: bool) -> TestData { const RELAY_PARENT_NUM: u32 = 3; // Add the relay parent to `shared` pallet. Otherwise some code (e.g. filtering backing @@ -1467,6 +1832,24 @@ mod sanitizers { ), ])); + // Set the on-chain included head data for paras. + paras::Pallet::::set_current_head(ParaId::from(1), HeadData(vec![1])); + paras::Pallet::::set_current_head(ParaId::from(2), HeadData(vec![2])); + + // Set the current_code_hash + paras::Pallet::::force_set_current_code( + RuntimeOrigin::root(), + ParaId::from(1), + ValidationCode(vec![1]), + ) + .unwrap(); + paras::Pallet::::force_set_current_code( + RuntimeOrigin::root(), + ParaId::from(2), + ValidationCode(vec![2]), + ) + .unwrap(); + // Callback used for backing candidates let group_validators = |group_index: GroupIndex| { match group_index { @@ -1486,8 +1869,15 @@ mod sanitizers { para_id: ParaId::from(idx1), relay_parent, pov_hash: Hash::repeat_byte(idx1 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(idx1), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![idx1 as u8]), ..Default::default() } .build(); @@ -1523,36 +1913,34 @@ mod sanitizers { ] ); - let all_backed_candidates_with_core = backed_candidates - .iter() - .map(|candidate| { - // Only one entry for this test data. - ( - candidate.clone(), - scheduled - .get(&candidate.descriptor().para_id) - .unwrap() - .first() - .copied() - .unwrap(), - ) - }) - .collect(); + let mut expected_backed_candidates_with_core = BTreeMap::new(); + + for candidate in backed_candidates.iter() { + let para_id = candidate.descriptor().para_id; + + expected_backed_candidates_with_core.entry(para_id).or_insert(vec![]).push(( + candidate.clone(), + scheduled.get(¶_id).unwrap().first().copied().unwrap(), + )); + } TestData { backed_candidates, scheduled_paras: scheduled, - all_backed_candidates_with_core, + expected_backed_candidates_with_core, } } - // Generate test data for the candidates and assert that the evnironment is set as expected + // Generate test data for the candidates and assert that the environment is set as expected // (check the comments for details) // Para 1 scheduled on core 0 and core 1. Two candidates are supplied. // Para 2 scheduled on cores 2 and 3. One candidate supplied. // Para 3 scheduled on core 4. One candidate supplied. // Para 4 scheduled on core 5. Two candidates supplied. // Para 5 scheduled on core 6. No candidates supplied. + // Para 6 is not scheduled. One candidate supplied. + // Para 7 is scheduled on core 7 and 8, but the candidate contains the wrong core index. + // Para 8 is scheduled on core 9, but the candidate contains the wrong core index. fn get_test_data_multiple_cores_per_para(core_index_enabled: bool) -> TestData { const RELAY_PARENT_NUM: u32 = 3; @@ -1581,6 +1969,7 @@ mod sanitizers { keyring::Sr25519Keyring::Eve, keyring::Sr25519Keyring::Ferdie, keyring::Sr25519Keyring::One, + keyring::Sr25519Keyring::Two, ]; for validator in validators.iter() { Keystore::sr25519_generate_new( @@ -1605,6 +1994,7 @@ mod sanitizers { vec![ValidatorIndex(4)], vec![ValidatorIndex(5)], vec![ValidatorIndex(6)], + vec![ValidatorIndex(7)], ]); // Update scheduler's claimqueue with the parachains @@ -1658,8 +2048,40 @@ mod sanitizers { RELAY_PARENT_NUM, )]), ), + ( + CoreIndex::from(7), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 7.into(), core_index: CoreIndex(7) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(8), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 7.into(), core_index: CoreIndex(8) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(9), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 8.into(), core_index: CoreIndex(9) }, + RELAY_PARENT_NUM, + )]), + ), ])); + // Set the on-chain included head data and current code hash. + for id in 1..=8u32 { + paras::Pallet::::set_current_head(ParaId::from(id), HeadData(vec![id as u8])); + paras::Pallet::::force_set_current_code( + RuntimeOrigin::root(), + ParaId::from(id), + ValidationCode(vec![id as u8]), + ) + .unwrap(); + } + // Callback used for backing candidates let group_validators = |group_index: GroupIndex| { match group_index { @@ -1670,6 +2092,7 @@ mod sanitizers { group_index if group_index == GroupIndex::from(4) => Some(vec![4]), group_index if group_index == GroupIndex::from(5) => Some(vec![5]), group_index if group_index == GroupIndex::from(6) => Some(vec![6]), + group_index if group_index == GroupIndex::from(7) => Some(vec![7]), _ => panic!("Group index out of bounds"), } @@ -1677,7 +2100,7 @@ mod sanitizers { }; let mut backed_candidates = vec![]; - let mut all_backed_candidates_with_core = vec![]; + let mut expected_backed_candidates_with_core = BTreeMap::new(); // Para 1 { @@ -1685,14 +2108,23 @@ mod sanitizers { para_id: ParaId::from(1), relay_parent, pov_hash: Hash::repeat_byte(1 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(1), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + head_data: HeadData(vec![1, 1]), + validation_code: ValidationCode(vec![1]), ..Default::default() } .build(); collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + let prev_candidate = candidate.clone(); let backed: BackedCandidate = back_candidate( candidate, &validators, @@ -1704,15 +2136,26 @@ mod sanitizers { ); backed_candidates.push(backed.clone()); if core_index_enabled { - all_backed_candidates_with_core.push((backed, CoreIndex(0))); + expected_backed_candidates_with_core + .entry(ParaId::from(1)) + .or_insert(vec![]) + .push((backed, CoreIndex(0))); } let mut candidate = TestCandidateBuilder { para_id: ParaId::from(1), relay_parent, pov_hash: Hash::repeat_byte(2 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + prev_candidate.commitments.head_data, + ) + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![1]), ..Default::default() } .build(); @@ -1730,7 +2173,10 @@ mod sanitizers { ); backed_candidates.push(backed.clone()); if core_index_enabled { - all_backed_candidates_with_core.push((backed, CoreIndex(1))); + expected_backed_candidates_with_core + .entry(ParaId::from(1)) + .or_insert(vec![]) + .push((backed, CoreIndex(1))); } } @@ -1740,8 +2186,15 @@ mod sanitizers { para_id: ParaId::from(2), relay_parent, pov_hash: Hash::repeat_byte(3 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(2), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![2]), ..Default::default() } .build(); @@ -1759,7 +2212,10 @@ mod sanitizers { ); backed_candidates.push(backed.clone()); if core_index_enabled { - all_backed_candidates_with_core.push((backed, CoreIndex(2))); + expected_backed_candidates_with_core + .entry(ParaId::from(2)) + .or_insert(vec![]) + .push((backed, CoreIndex(2))); } } @@ -1769,8 +2225,15 @@ mod sanitizers { para_id: ParaId::from(3), relay_parent, pov_hash: Hash::repeat_byte(4 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(3), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![3]), ..Default::default() } .build(); @@ -1787,7 +2250,10 @@ mod sanitizers { core_index_enabled.then_some(CoreIndex(4 as u32)), ); backed_candidates.push(backed.clone()); - all_backed_candidates_with_core.push((backed, CoreIndex(4))); + expected_backed_candidates_with_core + .entry(ParaId::from(3)) + .or_insert(vec![]) + .push((backed, CoreIndex(4))); } // Para 4 @@ -1796,14 +2262,22 @@ mod sanitizers { para_id: ParaId::from(4), relay_parent, pov_hash: Hash::repeat_byte(5 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(4), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![4]), ..Default::default() } .build(); collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + let prev_candidate = candidate.clone(); let backed = back_candidate( candidate, &validators, @@ -1811,17 +2285,28 @@ mod sanitizers { &keystore, &signing_context, BackingKind::Threshold, - None, + core_index_enabled.then_some(CoreIndex(5 as u32)), ); backed_candidates.push(backed.clone()); - all_backed_candidates_with_core.push((backed, CoreIndex(5))); + expected_backed_candidates_with_core + .entry(ParaId::from(4)) + .or_insert(vec![]) + .push((backed, CoreIndex(5))); let mut candidate = TestCandidateBuilder { para_id: ParaId::from(4), relay_parent, pov_hash: Hash::repeat_byte(6 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + prev_candidate.commitments.head_data, + ) + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![4]), ..Default::default() } .build(); @@ -1842,25 +2327,635 @@ mod sanitizers { // No candidate for para 5. - // State sanity checks - assert_eq!( - >::scheduled_paras().collect::>(), - vec![ - (CoreIndex(0), ParaId::from(1)), - (CoreIndex(1), ParaId::from(1)), - (CoreIndex(2), ParaId::from(2)), - (CoreIndex(3), ParaId::from(2)), - (CoreIndex(4), ParaId::from(3)), - (CoreIndex(5), ParaId::from(4)), - (CoreIndex(6), ParaId::from(5)), - ] - ); - let mut scheduled: BTreeMap> = BTreeMap::new(); - for (core_idx, para_id) in >::scheduled_paras() { - scheduled.entry(para_id).or_default().insert(core_idx); + // Para 6. + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(6), + relay_parent, + pov_hash: Hash::repeat_byte(3 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(6), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![6]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(6 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(6 as u32)), + ); + backed_candidates.push(backed.clone()); } - assert_eq!( + // Para 7. + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(7), + relay_parent, + pov_hash: Hash::repeat_byte(3 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(7), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![7]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(6 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(6 as u32)), + ); + backed_candidates.push(backed.clone()); + } + + // Para 8. + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(8), + relay_parent, + pov_hash: Hash::repeat_byte(3 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(8), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![8]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(6 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(7 as u32)), + ); + backed_candidates.push(backed.clone()); + if !core_index_enabled { + expected_backed_candidates_with_core + .entry(ParaId::from(8)) + .or_insert(vec![]) + .push((backed, CoreIndex(9))); + } + } + + // State sanity checks + assert_eq!( + >::scheduled_paras().collect::>(), + vec![ + (CoreIndex(0), ParaId::from(1)), + (CoreIndex(1), ParaId::from(1)), + (CoreIndex(2), ParaId::from(2)), + (CoreIndex(3), ParaId::from(2)), + (CoreIndex(4), ParaId::from(3)), + (CoreIndex(5), ParaId::from(4)), + (CoreIndex(6), ParaId::from(5)), + (CoreIndex(7), ParaId::from(7)), + (CoreIndex(8), ParaId::from(7)), + (CoreIndex(9), ParaId::from(8)), + ] + ); + let mut scheduled: BTreeMap> = BTreeMap::new(); + for (core_idx, para_id) in >::scheduled_paras() { + scheduled.entry(para_id).or_default().insert(core_idx); + } + + assert_eq!( + shared::Pallet::::active_validator_indices(), + vec![ + ValidatorIndex(0), + ValidatorIndex(1), + ValidatorIndex(2), + ValidatorIndex(3), + ValidatorIndex(4), + ValidatorIndex(5), + ValidatorIndex(6), + ValidatorIndex(7), + ] + ); + + TestData { + backed_candidates, + scheduled_paras: scheduled, + expected_backed_candidates_with_core, + } + } + + // Para 1 scheduled on core 0 and core 1. Two candidates are supplied. They form a chain but + // in the wrong order. + // Para 2 scheduled on core 2, core 3 and core 4. Three candidates are supplied. The second + // one is not part of the chain. + // Para 3 scheduled on core 5 and 6. Two candidates are supplied and they all form a chain. + // Para 4 scheduled on core 7 and 8. Duplicated candidates. + fn get_test_data_for_order_checks(core_index_enabled: bool) -> TestData { + const RELAY_PARENT_NUM: u32 = 3; + + // Add the relay parent to `shared` pallet. Otherwise some code (e.g. filtering backing + // votes) won't behave correctly + shared::Pallet::::add_allowed_relay_parent( + default_header().hash(), + Default::default(), + RELAY_PARENT_NUM, + 1, + ); + + let header = default_header(); + let relay_parent = header.hash(); + let session_index = SessionIndex::from(0_u32); + + let keystore = LocalKeystore::in_memory(); + let keystore = Arc::new(keystore) as KeystorePtr; + let signing_context = SigningContext { parent_hash: relay_parent, session_index }; + + let validators = vec![ + keyring::Sr25519Keyring::Alice, + keyring::Sr25519Keyring::Bob, + keyring::Sr25519Keyring::Charlie, + keyring::Sr25519Keyring::Dave, + keyring::Sr25519Keyring::Eve, + keyring::Sr25519Keyring::Ferdie, + keyring::Sr25519Keyring::One, + keyring::Sr25519Keyring::Two, + keyring::Sr25519Keyring::AliceStash, + ]; + for validator in validators.iter() { + Keystore::sr25519_generate_new( + &*keystore, + PARACHAIN_KEY_TYPE_ID, + Some(&validator.to_seed()), + ) + .unwrap(); + } + + // Set active validators in `shared` pallet + let validator_ids = + validators.iter().map(|v| v.public().into()).collect::>(); + shared::Pallet::::set_active_validators_ascending(validator_ids); + + // Set the validator groups in `scheduler` + scheduler::Pallet::::set_validator_groups(vec![ + vec![ValidatorIndex(0)], + vec![ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3)], + vec![ValidatorIndex(4)], + vec![ValidatorIndex(5)], + vec![ValidatorIndex(6)], + vec![ValidatorIndex(7)], + vec![ValidatorIndex(8)], + ]); + + // Update scheduler's claimqueue with the parachains + scheduler::Pallet::::set_claimqueue(BTreeMap::from([ + ( + CoreIndex::from(0), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(1), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(1) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(2), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(2) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(3), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(3) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(4), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(4) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(5), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(5) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(6), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(6) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(7), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(7) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(8), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(8) }, + RELAY_PARENT_NUM, + )]), + ), + ])); + + // Set the on-chain included head data and current code hash. + for id in 1..=4u32 { + paras::Pallet::::set_current_head(ParaId::from(id), HeadData(vec![id as u8])); + paras::Pallet::::force_set_current_code( + RuntimeOrigin::root(), + ParaId::from(id), + ValidationCode(vec![id as u8]), + ) + .unwrap(); + } + + // Callback used for backing candidates + let group_validators = |group_index: GroupIndex| { + match group_index { + group_index if group_index == GroupIndex::from(0) => Some(vec![0]), + group_index if group_index == GroupIndex::from(1) => Some(vec![1]), + group_index if group_index == GroupIndex::from(2) => Some(vec![2]), + group_index if group_index == GroupIndex::from(3) => Some(vec![3]), + group_index if group_index == GroupIndex::from(4) => Some(vec![4]), + group_index if group_index == GroupIndex::from(5) => Some(vec![5]), + group_index if group_index == GroupIndex::from(6) => Some(vec![6]), + group_index if group_index == GroupIndex::from(7) => Some(vec![7]), + group_index if group_index == GroupIndex::from(8) => Some(vec![8]), + + _ => panic!("Group index out of bounds"), + } + .map(|m| m.into_iter().map(ValidatorIndex).collect::>()) + }; + + let mut backed_candidates = vec![]; + let mut expected_backed_candidates_with_core = BTreeMap::new(); + + // Para 1 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(1), + relay_parent, + pov_hash: Hash::repeat_byte(1 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(1), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + head_data: HeadData(vec![1, 1]), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![1]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let prev_candidate = candidate.clone(); + let prev_backed: BackedCandidate = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(0 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(0 as u32)), + ); + + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(1), + relay_parent, + pov_hash: Hash::repeat_byte(2 as u8), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + prev_candidate.commitments.head_data, + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![1]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(1 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(1 as u32)), + ); + backed_candidates.push(backed.clone()); + backed_candidates.push(prev_backed.clone()); + } + + // Para 2. + { + let mut candidate_1 = TestCandidateBuilder { + para_id: ParaId::from(2), + relay_parent, + pov_hash: Hash::repeat_byte(3 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(2), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + head_data: HeadData(vec![2, 2]), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![2]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_1); + + let backed_1: BackedCandidate = back_candidate( + candidate_1, + &validators, + group_validators(GroupIndex::from(2 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(2 as u32)), + ); + + backed_candidates.push(backed_1.clone()); + if core_index_enabled { + expected_backed_candidates_with_core + .entry(ParaId::from(2)) + .or_insert(vec![]) + .push((backed_1, CoreIndex(2))); + } + + let mut candidate_2 = TestCandidateBuilder { + para_id: ParaId::from(2), + relay_parent, + pov_hash: Hash::repeat_byte(4 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(2), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![2]), + head_data: HeadData(vec![3, 3]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_2); + + let backed_2 = back_candidate( + candidate_2.clone(), + &validators, + group_validators(GroupIndex::from(3 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(3 as u32)), + ); + backed_candidates.push(backed_2.clone()); + + let mut candidate_3 = TestCandidateBuilder { + para_id: ParaId::from(2), + relay_parent, + pov_hash: Hash::repeat_byte(5 as u8), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + candidate_2.commitments.head_data, + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![2]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_3); + + let backed_3 = back_candidate( + candidate_3, + &validators, + group_validators(GroupIndex::from(4 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(4 as u32)), + ); + backed_candidates.push(backed_3.clone()); + } + + // Para 3 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(3), + relay_parent, + pov_hash: Hash::repeat_byte(6 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(3), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + head_data: HeadData(vec![3, 3]), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![3]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let prev_candidate = candidate.clone(); + let backed: BackedCandidate = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(5 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(5 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + expected_backed_candidates_with_core + .entry(ParaId::from(3)) + .or_insert(vec![]) + .push((backed, CoreIndex(5))); + } + + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(3), + relay_parent, + pov_hash: Hash::repeat_byte(6 as u8), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + prev_candidate.commitments.head_data, + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![3]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(6 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(6 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + expected_backed_candidates_with_core + .entry(ParaId::from(3)) + .or_insert(vec![]) + .push((backed, CoreIndex(6))); + } + } + + // Para 4 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(4), + relay_parent, + pov_hash: Hash::repeat_byte(8 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(4), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + head_data: HeadData(vec![4]), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![4]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed: BackedCandidate = back_candidate( + candidate.clone(), + &validators, + group_validators(GroupIndex::from(7 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(7 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + expected_backed_candidates_with_core + .entry(ParaId::from(4)) + .or_insert(vec![]) + .push((backed, CoreIndex(7))); + } + + let backed: BackedCandidate = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(7 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(8 as u32)), + ); + backed_candidates.push(backed.clone()); + } + + // State sanity checks + assert_eq!( + >::scheduled_paras().collect::>(), + vec![ + (CoreIndex(0), ParaId::from(1)), + (CoreIndex(1), ParaId::from(1)), + (CoreIndex(2), ParaId::from(2)), + (CoreIndex(3), ParaId::from(2)), + (CoreIndex(4), ParaId::from(2)), + (CoreIndex(5), ParaId::from(3)), + (CoreIndex(6), ParaId::from(3)), + (CoreIndex(7), ParaId::from(4)), + (CoreIndex(8), ParaId::from(4)), + ] + ); + let mut scheduled: BTreeMap> = BTreeMap::new(); + for (core_idx, para_id) in >::scheduled_paras() { + scheduled.entry(para_id).or_default().insert(core_idx); + } + + assert_eq!( shared::Pallet::::active_validator_indices(), vec![ ValidatorIndex(0), @@ -1870,43 +2965,38 @@ mod sanitizers { ValidatorIndex(4), ValidatorIndex(5), ValidatorIndex(6), + ValidatorIndex(7), + ValidatorIndex(8), ] ); TestData { backed_candidates, scheduled_paras: scheduled, - all_backed_candidates_with_core, + expected_backed_candidates_with_core, } } #[rstest] #[case(false)] #[case(true)] - fn happy_path(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + fn happy_path_one_core_per_para(#[case] core_index_enabled: bool) { + new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, - all_backed_candidates_with_core, + expected_backed_candidates_with_core, scheduled_paras: scheduled, - } = get_test_data(core_index_enabled); - - let has_concluded_invalid = - |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; + } = get_test_data_one_core_per_para(core_index_enabled); assert_eq!( - sanitize_backed_candidates::( + sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), - has_concluded_invalid, + BTreeSet::new(), scheduled, core_index_enabled ), - SanitizedBackedCandidates { - backed_candidates_with_core: all_backed_candidates_with_core, - votes_from_disabled_were_dropped: false, - dropped_unscheduled_candidates: false - } + expected_backed_candidates_with_core, ); }); } @@ -1915,29 +3005,46 @@ mod sanitizers { #[case(false)] #[case(true)] fn test_with_multiple_cores_per_para(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, - all_backed_candidates_with_core: expected_all_backed_candidates_with_core, + expected_backed_candidates_with_core, scheduled_paras: scheduled, } = get_test_data_multiple_cores_per_para(core_index_enabled); - let has_concluded_invalid = - |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; - assert_eq!( - sanitize_backed_candidates::( + sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), - has_concluded_invalid, + BTreeSet::new(), scheduled, core_index_enabled ), - SanitizedBackedCandidates { - backed_candidates_with_core: expected_all_backed_candidates_with_core, - votes_from_disabled_were_dropped: false, - dropped_unscheduled_candidates: true - } + expected_backed_candidates_with_core, + ); + }); + } + + #[rstest] + #[case(false)] + #[case(true)] + fn test_candidate_ordering(#[case] core_index_enabled: bool) { + new_test_ext(default_config()).execute_with(|| { + let TestData { + backed_candidates, + scheduled_paras: scheduled, + expected_backed_candidates_with_core, + } = get_test_data_for_order_checks(core_index_enabled); + + assert_eq!( + sanitize_backed_candidates::( + backed_candidates.clone(), + &>::allowed_relay_parents(), + BTreeSet::new(), + scheduled, + core_index_enabled, + ), + expected_backed_candidates_with_core ); }); } @@ -1952,31 +3059,23 @@ mod sanitizers { #[case] core_index_enabled: bool, #[case] multiple_cores_per_para: bool, ) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, .. } = if multiple_cores_per_para { get_test_data_multiple_cores_per_para(core_index_enabled) } else { - get_test_data(core_index_enabled) + get_test_data_one_core_per_para(core_index_enabled) }; let scheduled = BTreeMap::new(); - let has_concluded_invalid = - |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; - - let SanitizedBackedCandidates { - backed_candidates_with_core: sanitized_backed_candidates, - votes_from_disabled_were_dropped, - dropped_unscheduled_candidates, - } = sanitize_backed_candidates::( + + let sanitized_backed_candidates = sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), - has_concluded_invalid, + BTreeSet::new(), scheduled, core_index_enabled, ); assert!(sanitized_backed_candidates.is_empty()); - assert!(!votes_from_disabled_were_dropped); - assert!(dropped_unscheduled_candidates); }); } @@ -1984,14 +3083,16 @@ mod sanitizers { #[rstest] #[case(false)] #[case(true)] - fn invalid_are_filtered_out(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + fn concluded_invalid_are_filtered_out_single_core_per_para( + #[case] core_index_enabled: bool, + ) { + new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, scheduled_paras: scheduled, .. } = - get_test_data(core_index_enabled); + get_test_data_one_core_per_para(core_index_enabled); // mark every second one as concluded invalid let set = { - let mut set = std::collections::HashSet::new(); + let mut set = std::collections::BTreeSet::new(); for (idx, backed_candidate) in backed_candidates.iter().enumerate() { if idx & 0x01 == 0 { set.insert(backed_candidate.hash()); @@ -1999,23 +3100,98 @@ mod sanitizers { } set }; - let has_concluded_invalid = - |_idx: usize, candidate: &BackedCandidate| set.contains(&candidate.hash()); - let SanitizedBackedCandidates { - backed_candidates_with_core: sanitized_backed_candidates, - votes_from_disabled_were_dropped, - dropped_unscheduled_candidates, - } = sanitize_backed_candidates::( + let sanitized_backed_candidates: BTreeMap< + ParaId, + Vec<(BackedCandidate<_>, CoreIndex)>, + > = sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), - has_concluded_invalid, + set, scheduled, core_index_enabled, ); assert_eq!(sanitized_backed_candidates.len(), backed_candidates.len() / 2); - assert!(!votes_from_disabled_were_dropped); - assert!(!dropped_unscheduled_candidates); + }); + } + + // candidates that have concluded as invalid are filtered out, as well as their descendants. + #[test] + fn concluded_invalid_are_filtered_out_multiple_cores_per_para() { + // Mark the first candidate of paraid 1 as invalid. Its descendant should also + // be dropped. Also mark the candidate of paraid 3 as invalid. + new_test_ext(default_config()).execute_with(|| { + let TestData { + backed_candidates, + scheduled_paras: scheduled, + mut expected_backed_candidates_with_core, + .. + } = get_test_data_multiple_cores_per_para(true); + + let mut invalid_set = std::collections::BTreeSet::new(); + + for (idx, backed_candidate) in backed_candidates.iter().enumerate() { + if backed_candidate.descriptor().para_id == ParaId::from(1) && idx == 0 { + invalid_set.insert(backed_candidate.hash()); + } else if backed_candidate.descriptor().para_id == ParaId::from(3) { + invalid_set.insert(backed_candidate.hash()); + } + } + let sanitized_backed_candidates: BTreeMap< + ParaId, + Vec<(BackedCandidate<_>, CoreIndex)>, + > = sanitize_backed_candidates::( + backed_candidates.clone(), + &>::allowed_relay_parents(), + invalid_set, + scheduled, + true, + ); + + // We'll be left with candidates from paraid 2 and 4. + + expected_backed_candidates_with_core.remove(&ParaId::from(1)).unwrap(); + expected_backed_candidates_with_core.remove(&ParaId::from(3)).unwrap(); + + assert_eq!(sanitized_backed_candidates, sanitized_backed_candidates); + }); + + // Mark the second candidate of paraid 1 as invalid. Its predecessor should be left + // in place. + new_test_ext(default_config()).execute_with(|| { + let TestData { + backed_candidates, + scheduled_paras: scheduled, + mut expected_backed_candidates_with_core, + .. + } = get_test_data_multiple_cores_per_para(true); + + let mut invalid_set = std::collections::BTreeSet::new(); + + for (idx, backed_candidate) in backed_candidates.iter().enumerate() { + if backed_candidate.descriptor().para_id == ParaId::from(1) && idx == 1 { + invalid_set.insert(backed_candidate.hash()); + } + } + let sanitized_backed_candidates: BTreeMap< + ParaId, + Vec<(BackedCandidate<_>, CoreIndex)>, + > = sanitize_backed_candidates::( + backed_candidates.clone(), + &>::allowed_relay_parents(), + invalid_set, + scheduled, + true, + ); + + // Only the second candidate of paraid 1 should be removed. + expected_backed_candidates_with_core + .get_mut(&ParaId::from(1)) + .unwrap() + .remove(1); + + // We'll be left with candidates from paraid 1, 2, 3 and 4. + assert_eq!(sanitized_backed_candidates, expected_backed_candidates_with_core); }); } @@ -2023,34 +3199,35 @@ mod sanitizers { #[case(false)] #[case(true)] fn disabled_non_signing_validator_doesnt_get_filtered(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut all_backed_candidates_with_core, .. } = - get_test_data(core_index_enabled); + new_test_ext(default_config()).execute_with(|| { + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_one_core_per_para(core_index_enabled); // Disable Eve set_disabled_validators(vec![4]); - let before = all_backed_candidates_with_core.clone(); + let before = expected_backed_candidates_with_core.clone(); // Eve is disabled but no backing statement is signed by it so nothing should be // filtered - assert!(!filter_backed_statements_from_disabled_validators::( - &mut all_backed_candidates_with_core, + filter_backed_statements_from_disabled_validators::( + &mut expected_backed_candidates_with_core, &>::allowed_relay_parents(), - core_index_enabled - )); - assert_eq!(all_backed_candidates_with_core, before); + core_index_enabled, + ); + assert_eq!(expected_backed_candidates_with_core, before); }); } + #[rstest] #[case(false)] #[case(true)] fn drop_statements_from_disabled_without_dropping_candidate( #[case] core_index_enabled: bool, ) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut all_backed_candidates_with_core, .. } = - get_test_data(core_index_enabled); + new_test_ext(default_config()).execute_with(|| { + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_one_core_per_para(core_index_enabled); // Disable Alice set_disabled_validators(vec![0]); @@ -2064,11 +3241,22 @@ mod sanitizers { // Verify the initial state is as expected assert_eq!( - all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + expected_backed_candidates_with_core + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .validity_votes() + .len(), 2 ); - let (validator_indices, maybe_core_index) = all_backed_candidates_with_core - .get(0) + let (validator_indices, maybe_core_index) = expected_backed_candidates_with_core + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() .unwrap() .0 .validator_indices_and_core_index(core_index_enabled); @@ -2080,16 +3268,28 @@ mod sanitizers { assert_eq!(validator_indices.get(0).unwrap(), true); assert_eq!(validator_indices.get(1).unwrap(), true); - let untouched = all_backed_candidates_with_core.get(1).unwrap().0.clone(); + let untouched = expected_backed_candidates_with_core + .get(&ParaId::from(2)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .clone(); - assert!(filter_backed_statements_from_disabled_validators::( - &mut all_backed_candidates_with_core, + let before = expected_backed_candidates_with_core.clone(); + filter_backed_statements_from_disabled_validators::( + &mut expected_backed_candidates_with_core, &>::allowed_relay_parents(), - core_index_enabled - )); + core_index_enabled, + ); + assert_eq!(before.len(), expected_backed_candidates_with_core.len()); - let (validator_indices, maybe_core_index) = all_backed_candidates_with_core - .get(0) + let (validator_indices, maybe_core_index) = expected_backed_candidates_with_core + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() .unwrap() .0 .validator_indices_and_core_index(core_index_enabled); @@ -2100,47 +3300,137 @@ mod sanitizers { } // there should still be two backed candidates - assert_eq!(all_backed_candidates_with_core.len(), 2); + assert_eq!(expected_backed_candidates_with_core.len(), 2); // but the first one should have only one validity vote assert_eq!( - all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + expected_backed_candidates_with_core + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .validity_votes() + .len(), 1 ); // Validator 0 vote should be dropped, validator 1 - retained assert_eq!(validator_indices.get(0).unwrap(), false); assert_eq!(validator_indices.get(1).unwrap(), true); // the second candidate shouldn't be modified - assert_eq!(all_backed_candidates_with_core.get(1).unwrap().0, untouched); + assert_eq!( + expected_backed_candidates_with_core + .get(&ParaId::from(2)) + .unwrap() + .iter() + .next() + .unwrap() + .0, + untouched + ); }); } #[rstest] #[case(false)] #[case(true)] - fn drop_candidate_if_all_statements_are_from_disabled(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut all_backed_candidates_with_core, .. } = - get_test_data(core_index_enabled); + fn drop_candidate_if_all_statements_are_from_disabled_single_core_per_para( + #[case] core_index_enabled: bool, + ) { + new_test_ext(default_config()).execute_with(|| { + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_one_core_per_para(core_index_enabled); // Disable Alice and Bob set_disabled_validators(vec![0, 1]); // Verify the initial state is as expected assert_eq!( - all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + expected_backed_candidates_with_core + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .validity_votes() + .len(), 2 ); - let untouched = all_backed_candidates_with_core.get(1).unwrap().0.clone(); + let untouched = expected_backed_candidates_with_core + .get(&ParaId::from(2)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .clone(); - assert!(filter_backed_statements_from_disabled_validators::( - &mut all_backed_candidates_with_core, + filter_backed_statements_from_disabled_validators::( + &mut expected_backed_candidates_with_core, &>::allowed_relay_parents(), - core_index_enabled - )); + core_index_enabled, + ); + + assert_eq!(expected_backed_candidates_with_core.len(), 1); + assert_eq!( + expected_backed_candidates_with_core + .get(&ParaId::from(2)) + .unwrap() + .iter() + .next() + .unwrap() + .0, + untouched + ); + assert_eq!(expected_backed_candidates_with_core.get(&ParaId::from(1)), None); + }); + } + + #[test] + fn drop_candidate_if_all_statements_are_from_disabled_multiple_cores_per_para() { + // Disable Bob, only the second candidate of paraid 1 should be removed. + new_test_ext(default_config()).execute_with(|| { + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_multiple_cores_per_para(true); - assert_eq!(all_backed_candidates_with_core.len(), 1); - assert_eq!(all_backed_candidates_with_core.get(0).unwrap().0, untouched); + set_disabled_validators(vec![1]); + + let mut untouched = expected_backed_candidates_with_core.clone(); + + filter_backed_statements_from_disabled_validators::( + &mut expected_backed_candidates_with_core, + &>::allowed_relay_parents(), + true, + ); + + untouched.get_mut(&ParaId::from(1)).unwrap().remove(1); + + assert_eq!(expected_backed_candidates_with_core, untouched); }); + + // Disable Alice or disable both Alice and Bob, all candidates of paraid 1 should be + // removed. + for disabled in [vec![0], vec![0, 1]] { + new_test_ext(default_config()).execute_with(|| { + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_multiple_cores_per_para(true); + + set_disabled_validators(disabled); + + let mut untouched = expected_backed_candidates_with_core.clone(); + + filter_backed_statements_from_disabled_validators::( + &mut expected_backed_candidates_with_core, + &>::allowed_relay_parents(), + true, + ); + + untouched.remove(&ParaId::from(1)).unwrap(); + + assert_eq!(expected_backed_candidates_with_core, untouched); + }); + } } } } diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs index 1bbd4dfb716f..171f3f746a82 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs @@ -22,6 +22,7 @@ use crate::{ scheduler::{self, CoreOccupied}, session_info, shared, }; +use frame_support::traits::{GetStorageVersion, StorageVersion}; use frame_system::pallet_prelude::*; use primitives::{ async_backing::{ @@ -92,16 +93,41 @@ pub fn availability_cores() -> Vec { - let pending_availability = - >::pending_availability(entry.para_id()) - .expect("Occupied core always has pending availability; qed"); - - let backed_in_number = *pending_availability.backed_in_number(); + // Due to https://github.com/paritytech/polkadot-sdk/issues/64, using the new storage types would cause + // this runtime API to panic. We explicitly handle the storage for version 0 to + // prevent that. When removing the inclusion v0 -> v1 migration, this bit of code + // can also be removed. + let pending_availability = if >::on_chain_storage_version() == + StorageVersion::new(0) + { + inclusion::migration::v0::PendingAvailability::::get(entry.para_id()) + .expect("Occupied core always has pending availability; qed") + } else { + let candidate = >::pending_availability_with_core( + entry.para_id(), + CoreIndex(i as u32), + ) + .expect("Occupied core always has pending availability; qed"); + + // Translate to the old candidate format, as we don't need the commitments now. + inclusion::migration::v0::CandidatePendingAvailability { + core: candidate.core_occupied(), + hash: candidate.candidate_hash(), + descriptor: candidate.candidate_descriptor().clone(), + availability_votes: candidate.availability_votes().clone(), + backers: candidate.backers().clone(), + relay_parent_number: candidate.relay_parent_number(), + backed_in_number: candidate.backed_in_number(), + backing_group: candidate.backing_group(), + } + }; + + let backed_in_number = pending_availability.backed_in_number; // Use the same block number for determining the responsible group as what the // backing subsystem would use when it calls validator_groups api. let backing_group_allocation_time = - pending_availability.relay_parent_number() + One::one(); + pending_availability.relay_parent_number + One::one(); CoreState::Occupied(OccupiedCore { next_up_on_available: >::next_up_on_available(CoreIndex( i as u32, @@ -111,13 +137,13 @@ pub fn availability_cores() -> Vec>::next_up_on_time_out(CoreIndex( i as u32, )), - availability: pending_availability.availability_votes().clone(), + availability: pending_availability.availability_votes.clone(), group_responsible: group_responsible_for( backing_group_allocation_time, - pending_availability.core_occupied(), + pending_availability.core, ), - candidate_hash: pending_availability.candidate_hash(), - candidate_descriptor: pending_availability.candidate_descriptor().clone(), + candidate_hash: pending_availability.hash, + candidate_descriptor: pending_availability.descriptor, }) }, CoreOccupied::Free => { @@ -200,8 +226,8 @@ pub fn assumed_validation_data( }; let persisted_validation_data = make_validation_data().or_else(|| { - // Try again with force enacting the core. This check only makes sense if - // the core is occupied. + // Try again with force enacting the pending candidates. This check only makes sense if + // there are any pending candidates. >::pending_availability(para_id).and_then(|_| { >::force_enact(para_id); make_validation_data() @@ -465,27 +491,23 @@ pub fn backing_state( }; let pending_availability = { - // Note: the API deals with a `Vec` as it is future-proof for cases - // where there may be multiple candidates pending availability at a time. - // But at the moment only one candidate can be pending availability per - // parachain. crate::inclusion::PendingAvailability::::get(¶_id) - .and_then(|pending| { - let commitments = - crate::inclusion::PendingAvailabilityCommitments::::get(¶_id); - commitments.map(move |c| (pending, c)) - }) - .map(|(pending, commitments)| { - CandidatePendingAvailability { - candidate_hash: pending.candidate_hash(), - descriptor: pending.candidate_descriptor().clone(), - commitments, - relay_parent_number: pending.relay_parent_number(), - max_pov_size: constraints.max_pov_size, // assume always same in session. - } + .map(|pending_candidates| { + pending_candidates + .into_iter() + .map(|candidate| { + CandidatePendingAvailability { + candidate_hash: candidate.candidate_hash(), + descriptor: candidate.candidate_descriptor().clone(), + commitments: candidate.candidate_commitments().clone(), + relay_parent_number: candidate.relay_parent_number(), + max_pov_size: constraints.max_pov_size, /* assume always same in + * session. */ + } + }) + .collect() }) - .into_iter() - .collect() + .unwrap_or_else(|| vec![]) }; Some(BackingState { constraints, pending_availability }) diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index f231864a85e7..25840d9707dc 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -398,16 +398,6 @@ impl Pallet { }); } - /// Get the para (chain or thread) ID assigned to a particular core or index, if any. Core - /// indices out of bounds will return `None`, as will indices of unassigned cores. - pub(crate) fn core_para(core_index: CoreIndex) -> Option { - let cores = AvailabilityCores::::get(); - match cores.get(core_index.0 as usize) { - None | Some(CoreOccupied::Free) => None, - Some(CoreOccupied::Paras(entry)) => Some(entry.para_id()), - } - } - /// Get the validators in the given group, if the group index is valid for this session. pub(crate) fn group_validators(group_index: GroupIndex) -> Option> { ValidatorGroups::::get().get(group_index.0 as usize).map(|g| g.clone()) diff --git a/polkadot/runtime/parachains/src/util.rs b/polkadot/runtime/parachains/src/util.rs index aa07ef080055..493a9d055efd 100644 --- a/polkadot/runtime/parachains/src/util.rs +++ b/polkadot/runtime/parachains/src/util.rs @@ -18,7 +18,7 @@ //! on all modules. use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{Id as ParaId, PersistedValidationData, ValidatorIndex}; +use primitives::{HeadData, Id as ParaId, PersistedValidationData, ValidatorIndex}; use sp_std::{collections::btree_set::BTreeSet, vec::Vec}; use crate::{configuration, hrmp, paras}; @@ -42,6 +42,23 @@ pub fn make_persisted_validation_data( }) } +/// Make the persisted validation data for a particular parachain, a specified relay-parent, its +/// storage root and parent head data. +pub fn make_persisted_validation_data_with_parent( + relay_parent_number: BlockNumberFor, + relay_parent_storage_root: T::Hash, + parent_head: HeadData, +) -> PersistedValidationData> { + let config = >::config(); + + PersistedValidationData { + parent_head, + relay_parent_number, + relay_parent_storage_root, + max_pov_size: config.max_pov_size, + } +} + /// Take an active subset of a set containing all validators. /// /// First item in pair will be all items in set have indices found in the `active` indices set (in diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index c9f5d81d286a..90824a2f6f08 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1663,6 +1663,8 @@ pub mod migrations { // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, + + parachains_inclusion::migration::MigrateToV1, ); } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs index a102d1903b2f..c250c86665be 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs @@ -13,161 +13,322 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . + //! Autogenerated weights for `runtime_parachains::paras_inherent` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-11-20, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 128 +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/release/polkadot +// target/production/polkadot // benchmark -// --chain=rococo-dev +// pallet // --steps=50 // --repeat=20 -// --pallet=runtime_parachains::paras_inherent // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs -// --header=./file_header.txt +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=runtime_parachains::paras_inherent +// --chain=rococo-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `runtime_parachains::paras_inherent`. pub struct WeightInfo(PhantomData); impl runtime_parachains::paras_inherent::WeightInfo for WeightInfo { - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParaSessionInfo Sessions (r:1 w:0) - // Storage: ParasDisputes Disputes (r:1 w:1) - // Storage: ParasDisputes Included (r:1 w:1) - // Storage: ParasDisputes SpamSlots (r:1 w:1) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaSessionInfo::Sessions` (r:1 w:0) + /// Proof: `ParaSessionInfo::Sessions` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:1) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::BackersOnDisputes` (r:1 w:1) + /// Proof: `ParasDisputes::BackersOnDisputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:1 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `v` is `[10, 200]`. fn enter_variable_disputes(v: u32, ) -> Weight { - Weight::from_parts(352_590_000 as u64, 0) - // Standard Error: 13_000 - .saturating_add(Weight::from_parts(49_254_000 as u64, 0).saturating_mul(v as u64)) - .saturating_add(T::DbWeight::get().reads(24 as u64)) - .saturating_add(T::DbWeight::get().writes(16 as u64)) + // Proof Size summary in bytes: + // Measured: `67785` + // Estimated: `73725 + v * (23 ±0)` + // Minimum execution time: 949_716_000 picoseconds. + Weight::from_parts(482_361_515, 0) + .saturating_add(Weight::from_parts(0, 73725)) + // Standard Error: 17_471 + .saturating_add(Weight::from_parts(50_100_764, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(25)) + .saturating_add(T::DbWeight::get().writes(15)) + .saturating_add(Weight::from_parts(0, 23).saturating_mul(v.into())) } - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParasDisputes Disputes (r:1 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInclusion AvailabilityBitfields (r:0 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: ParasDisputes Included (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_bitfields() -> Weight { - Weight::from_parts(299_878_000 as u64, 0) - .saturating_add(T::DbWeight::get().reads(21 as u64)) - .saturating_add(T::DbWeight::get().writes(15 as u64)) + // Proof Size summary in bytes: + // Measured: `42757` + // Estimated: `48697` + // Minimum execution time: 437_627_000 picoseconds. + Weight::from_parts(460_975_000, 0) + .saturating_add(Weight::from_parts(0, 48697)) + .saturating_add(T::DbWeight::get().reads(23)) + .saturating_add(T::DbWeight::get().writes(15)) } - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParasDisputes Disputes (r:2 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Paras PastCodeMeta (r:1 w:0) - // Storage: Paras CurrentCodeHash (r:1 w:0) - // Storage: Ump RelayDispatchQueueSize (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: ParasDisputes Included (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) - fn enter_backed_candidates_variable(_v: u32) -> Weight { - Weight::from_parts(442_472_000 as u64, 0) - .saturating_add(T::DbWeight::get().reads(25 as u64)) - .saturating_add(T::DbWeight::get().writes(14 as u64)) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `v` is `[101, 200]`. + fn enter_backed_candidates_variable(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `42829` + // Estimated: `48769` + // Minimum execution time: 1_305_254_000 picoseconds. + Weight::from_parts(1_347_160_667, 0) + .saturating_add(Weight::from_parts(0, 48769)) + // Standard Error: 22_128 + .saturating_add(Weight::from_parts(57_229, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(26)) + .saturating_add(T::DbWeight::get().writes(15)) } - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParasDisputes Disputes (r:2 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Paras PastCodeMeta (r:1 w:0) - // Storage: Paras CurrentCodeHash (r:1 w:0) - // Storage: Ump RelayDispatchQueueSize (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: ParasDisputes Included (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeHash` (r:1 w:0) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_backed_candidate_code_upgrade() -> Weight { - Weight::from_parts(36_903_411_000 as u64, 0) - .saturating_add(T::DbWeight::get().reads(25 as u64)) - .saturating_add(T::DbWeight::get().writes(14 as u64)) + // Proof Size summary in bytes: + // Measured: `42842` + // Estimated: `48782` + // Minimum execution time: 38_637_547_000 picoseconds. + Weight::from_parts(41_447_412_000, 0) + .saturating_add(Weight::from_parts(0, 48782)) + .saturating_add(T::DbWeight::get().reads(28)) + .saturating_add(T::DbWeight::get().writes(15)) } } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 83d47508c7c7..664044b713ee 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1781,6 +1781,7 @@ pub mod migrations { crate::xcm_config::XcmRouter, GetLegacyLeaseImpl, >, + parachains_inclusion::migration::MigrateToV1, ); } diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs index 0dd64f054d00..aa99ac9438c4 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::paras_inherent` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot @@ -29,14 +29,13 @@ // --steps=50 // --repeat=20 // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=runtime_parachains::paras_inherent // --chain=westend-dev -// --header=./file_header.txt -// --output=./runtime/westend/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,297 +48,311 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::paras_inherent`. pub struct WeightInfo(PhantomData); impl runtime_parachains::paras_inherent::WeightInfo for WeightInfo { - /// Storage: ParaInherent Included (r:1 w:1) - /// Proof Skipped: ParaInherent Included (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler AvailabilityCores (r:1 w:1) - /// Proof Skipped: ParaScheduler AvailabilityCores (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Babe AuthorVrfRandomness (r:1 w:0) - /// Proof: Babe AuthorVrfRandomness (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) - /// Storage: ParaSessionInfo Sessions (r:1 w:0) - /// Proof Skipped: ParaSessionInfo Sessions (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Disputes (r:1 w:1) - /// Proof Skipped: ParasDisputes Disputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes BackersOnDisputes (r:1 w:1) - /// Proof Skipped: ParasDisputes BackersOnDisputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Included (r:1 w:1) - /// Proof Skipped: ParasDisputes Included (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaSessionInfo AccountKeys (r:1 w:0) - /// Proof Skipped: ParaSessionInfo AccountKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking ActiveEra (r:1 w:0) - /// Proof: Staking ActiveEra (max_values: Some(1), max_size: Some(13), added: 508, mode: MaxEncodedLen) - /// Storage: Staking ErasRewardPoints (r:1 w:1) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaInherent OnChainVotes (r:1 w:1) - /// Proof Skipped: ParaInherent OnChainVotes (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasDisputes Frozen (r:1 w:0) - /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailability (r:2 w:1) - /// Proof Skipped: ParaInclusion PendingAvailability (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - /// Proof Skipped: ParaInclusion PendingAvailabilityCommitments (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:1) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelDigests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpChannelDigests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaScheduler SessionStartBlock (r:1 w:0) - /// Proof Skipped: ParaScheduler SessionStartBlock (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ParathreadQueue (r:1 w:1) - /// Proof Skipped: ParaScheduler ParathreadQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler Scheduled (r:1 w:1) - /// Proof Skipped: ParaScheduler Scheduled (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ValidatorGroups (r:1 w:0) - /// Proof Skipped: ParaScheduler ValidatorGroups (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpWatermarks (r:0 w:1) - /// Proof Skipped: Hrmp HrmpWatermarks (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaSessionInfo::Sessions` (r:1 w:0) + /// Proof: `ParaSessionInfo::Sessions` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:1) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::BackersOnDisputes` (r:1 w:1) + /// Proof: `ParasDisputes::BackersOnDisputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:1 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `v` is `[10, 200]`. fn enter_variable_disputes(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `50518` - // Estimated: `56458 + v * (23 ±0)` - // Minimum execution time: 998_338_000 picoseconds. - Weight::from_parts(468_412_001, 0) - .saturating_add(Weight::from_parts(0, 56458)) - // Standard Error: 20_559 - .saturating_add(Weight::from_parts(56_965_025, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(27)) - .saturating_add(T::DbWeight::get().writes(15)) + // Measured: `67518` + // Estimated: `73458 + v * (23 ±0)` + // Minimum execution time: 844_022_000 picoseconds. + Weight::from_parts(456_682_337, 0) + .saturating_add(Weight::from_parts(0, 73458)) + // Standard Error: 16_403 + .saturating_add(Weight::from_parts(41_871_245, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(28)) + .saturating_add(T::DbWeight::get().writes(16)) .saturating_add(Weight::from_parts(0, 23).saturating_mul(v.into())) } - /// Storage: ParaInherent Included (r:1 w:1) - /// Proof Skipped: ParaInherent Included (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler AvailabilityCores (r:1 w:1) - /// Proof Skipped: ParaScheduler AvailabilityCores (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Babe AuthorVrfRandomness (r:1 w:0) - /// Proof: Babe AuthorVrfRandomness (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) - /// Storage: ParaInherent OnChainVotes (r:1 w:1) - /// Proof Skipped: ParaInherent OnChainVotes (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasDisputes Frozen (r:1 w:0) - /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailability (r:2 w:1) - /// Proof Skipped: ParaInclusion PendingAvailability (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - /// Proof Skipped: ParaInclusion PendingAvailabilityCommitments (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaSessionInfo AccountKeys (r:1 w:0) - /// Proof Skipped: ParaSessionInfo AccountKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking ActiveEra (r:1 w:0) - /// Proof: Staking ActiveEra (max_values: Some(1), max_size: Some(13), added: 508, mode: MaxEncodedLen) - /// Storage: Staking ErasRewardPoints (r:1 w:1) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:1) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelDigests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpChannelDigests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Disputes (r:1 w:0) - /// Proof Skipped: ParasDisputes Disputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaScheduler SessionStartBlock (r:1 w:0) - /// Proof Skipped: ParaScheduler SessionStartBlock (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ParathreadQueue (r:1 w:1) - /// Proof Skipped: ParaScheduler ParathreadQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler Scheduled (r:1 w:1) - /// Proof Skipped: ParaScheduler Scheduled (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ValidatorGroups (r:1 w:0) - /// Proof Skipped: ParaScheduler ValidatorGroups (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion AvailabilityBitfields (r:0 w:1) - /// Proof Skipped: ParaInclusion AvailabilityBitfields (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Included (r:0 w:1) - /// Proof Skipped: ParasDisputes Included (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpWatermarks (r:0 w:1) - /// Proof Skipped: Hrmp HrmpWatermarks (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_bitfields() -> Weight { // Proof Size summary in bytes: - // Measured: `42352` - // Estimated: `48292` - // Minimum execution time: 457_404_000 picoseconds. - Weight::from_parts(485_416_000, 0) - .saturating_add(Weight::from_parts(0, 48292)) - .saturating_add(T::DbWeight::get().reads(25)) + // Measured: `43196` + // Estimated: `49136` + // Minimum execution time: 438_637_000 picoseconds. + Weight::from_parts(458_342_000, 0) + .saturating_add(Weight::from_parts(0, 49136)) + .saturating_add(T::DbWeight::get().reads(26)) .saturating_add(T::DbWeight::get().writes(16)) } - /// Storage: ParaInherent Included (r:1 w:1) - /// Proof Skipped: ParaInherent Included (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler AvailabilityCores (r:1 w:1) - /// Proof Skipped: ParaScheduler AvailabilityCores (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Babe AuthorVrfRandomness (r:1 w:0) - /// Proof: Babe AuthorVrfRandomness (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) - /// Storage: ParaInherent OnChainVotes (r:1 w:1) - /// Proof Skipped: ParaInherent OnChainVotes (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasDisputes Frozen (r:1 w:0) - /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailability (r:2 w:1) - /// Proof Skipped: ParaInclusion PendingAvailability (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - /// Proof Skipped: ParaInclusion PendingAvailabilityCommitments (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaSessionInfo AccountKeys (r:1 w:0) - /// Proof Skipped: ParaSessionInfo AccountKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking ActiveEra (r:1 w:0) - /// Proof: Staking ActiveEra (max_values: Some(1), max_size: Some(13), added: 508, mode: MaxEncodedLen) - /// Storage: Staking ErasRewardPoints (r:1 w:1) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:1) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelDigests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpChannelDigests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Disputes (r:1 w:0) - /// Proof Skipped: ParasDisputes Disputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaScheduler SessionStartBlock (r:1 w:0) - /// Proof Skipped: ParaScheduler SessionStartBlock (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ParathreadQueue (r:1 w:1) - /// Proof Skipped: ParaScheduler ParathreadQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler Scheduled (r:1 w:1) - /// Proof Skipped: ParaScheduler Scheduled (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ValidatorGroups (r:1 w:0) - /// Proof Skipped: ParaScheduler ValidatorGroups (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:1 w:0) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: ParasDisputes Included (r:0 w:1) - /// Proof Skipped: ParasDisputes Included (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpWatermarks (r:0 w:1) - /// Proof Skipped: Hrmp HrmpWatermarks (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `v` is `[101, 200]`. fn enter_backed_candidates_variable(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `42387` - // Estimated: `48327` - // Minimum execution time: 6_864_029_000 picoseconds. - Weight::from_parts(1_237_704_892, 0) - .saturating_add(Weight::from_parts(0, 48327)) - // Standard Error: 33_413 - .saturating_add(Weight::from_parts(56_199_819, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(28)) - .saturating_add(T::DbWeight::get().writes(15)) + // Measured: `43269` + // Estimated: `49209` + // Minimum execution time: 5_955_361_000 picoseconds. + Weight::from_parts(1_285_398_956, 0) + .saturating_add(Weight::from_parts(0, 49209)) + // Standard Error: 57_369 + .saturating_add(Weight::from_parts(47_073_853, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(29)) + .saturating_add(T::DbWeight::get().writes(16)) } - /// Storage: ParaInherent Included (r:1 w:1) - /// Proof Skipped: ParaInherent Included (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler AvailabilityCores (r:1 w:1) - /// Proof Skipped: ParaScheduler AvailabilityCores (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Babe AuthorVrfRandomness (r:1 w:0) - /// Proof: Babe AuthorVrfRandomness (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) - /// Storage: ParaInherent OnChainVotes (r:1 w:1) - /// Proof Skipped: ParaInherent OnChainVotes (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasDisputes Frozen (r:1 w:0) - /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailability (r:2 w:1) - /// Proof Skipped: ParaInclusion PendingAvailability (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - /// Proof Skipped: ParaInclusion PendingAvailabilityCommitments (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaSessionInfo AccountKeys (r:1 w:0) - /// Proof Skipped: ParaSessionInfo AccountKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking ActiveEra (r:1 w:0) - /// Proof: Staking ActiveEra (max_values: Some(1), max_size: Some(13), added: 508, mode: MaxEncodedLen) - /// Storage: Staking ErasRewardPoints (r:1 w:1) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:1) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelDigests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpChannelDigests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Disputes (r:1 w:0) - /// Proof Skipped: ParasDisputes Disputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaScheduler SessionStartBlock (r:1 w:0) - /// Proof Skipped: ParaScheduler SessionStartBlock (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ParathreadQueue (r:1 w:1) - /// Proof Skipped: ParaScheduler ParathreadQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler Scheduled (r:1 w:1) - /// Proof Skipped: ParaScheduler Scheduled (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ValidatorGroups (r:1 w:0) - /// Proof Skipped: ParaScheduler ValidatorGroups (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:1 w:0) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeHash (r:1 w:0) - /// Proof Skipped: Paras FutureCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeRestrictionSignal (r:1 w:0) - /// Proof Skipped: Paras UpgradeRestrictionSignal (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: ParasDisputes Included (r:0 w:1) - /// Proof Skipped: ParasDisputes Included (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpWatermarks (r:0 w:1) - /// Proof Skipped: Hrmp HrmpWatermarks (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeHash` (r:1 w:0) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_backed_candidate_code_upgrade() -> Weight { // Proof Size summary in bytes: - // Measured: `42414` - // Estimated: `48354` - // Minimum execution time: 43_320_529_000 picoseconds. - Weight::from_parts(45_622_613_000, 0) - .saturating_add(Weight::from_parts(0, 48354)) - .saturating_add(T::DbWeight::get().reads(30)) - .saturating_add(T::DbWeight::get().writes(15)) + // Measured: `43282` + // Estimated: `49222` + // Minimum execution time: 42_128_606_000 picoseconds. + Weight::from_parts(42_822_806_000, 0) + .saturating_add(Weight::from_parts(0, 49222)) + .saturating_add(T::DbWeight::get().reads(31)) + .saturating_add(T::DbWeight::get().writes(16)) } } diff --git a/prdoc/pr_3479.prdoc b/prdoc/pr_3479.prdoc new file mode 100644 index 000000000000..1e44ce5646b9 --- /dev/null +++ b/prdoc/pr_3479.prdoc @@ -0,0 +1,8 @@ +title: "Elastic scaling: runtime dependency tracking and enactment" + +doc: + - audience: Node Dev + description: | + Adds support in the inclusion and paras_inherent runtime modules for backing and including multiple candidates of the same para if they form a chain. + +crates: [ ] From 64a707a4824ad12eb9ab715966dbd0d2b0efaf01 Mon Sep 17 00:00:00 2001 From: ordian Date: Thu, 21 Mar 2024 12:22:16 +0100 Subject: [PATCH 06/17] approval-voting: remove some inefficiences on startup (#3747) Small refactoring to reduce the algorithmic complexity of the initial message distribution in approval voting after a sync from O(n_candidates ^ 2) to O(n_candidates). --- polkadot/node/core/approval-voting/src/lib.rs | 274 +++++++++--------- 1 file changed, 129 insertions(+), 145 deletions(-) diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 8cc16a6e1ec1..1d7ab3eee217 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -55,9 +55,9 @@ use polkadot_node_subsystem_util::{ }; use polkadot_primitives::{ vstaging::{ApprovalVoteMultipleCandidates, ApprovalVotingParams}, - BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, DisputeStatement, ExecutorParams, - GroupIndex, Hash, PvfExecKind, SessionIndex, SessionInfo, ValidDisputeStatementKind, - ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, + BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, CoreIndex, DisputeStatement, + ExecutorParams, GroupIndex, Hash, PvfExecKind, SessionIndex, SessionInfo, + ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, }; use sc_keystore::LocalKeystore; use sp_application_crypto::Pair; @@ -1285,10 +1285,10 @@ fn cores_to_candidate_indices( // Map from core index to candidate index. for claimed_core_index in core_indices.iter_ones() { - if let Some(candidate_index) = block_entry + // Candidates are sorted by core index. + if let Ok(candidate_index) = block_entry .candidates() - .iter() - .position(|(core_index, _)| core_index.0 == claimed_core_index as u32) + .binary_search_by_key(&(claimed_core_index as u32), |(core_index, _)| core_index.0) { candidate_indices.push(candidate_index as _); } @@ -1297,6 +1297,21 @@ fn cores_to_candidate_indices( CandidateBitfield::try_from(candidate_indices) } +// Returns the claimed core bitfield from the assignment cert and the core index +// from the block entry. +fn get_core_indices_on_startup( + assignment: &AssignmentCertKindV2, + block_entry_core_index: CoreIndex, +) -> CoreBitfield { + match &assignment { + AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield } => core_bitfield.clone(), + AssignmentCertKindV2::RelayVRFModulo { sample: _ } => + CoreBitfield::try_from(vec![block_entry_core_index]).expect("Not an empty vec; qed"), + AssignmentCertKindV2::RelayVRFDelay { core_index } => + CoreBitfield::try_from(vec![*core_index]).expect("Not an empty vec; qed"), + } +} + // Returns the claimed core bitfield from the assignment cert, the candidate hash and a // `BlockEntry`. Can fail only for VRF Delay assignments for which we cannot find the candidate hash // in the block entry which indicates a bug or corrupted storage. @@ -1367,7 +1382,7 @@ async fn distribution_messages_for_activation( session: block_entry.session(), }); let mut signatures_queued = HashSet::new(); - for (_, candidate_hash) in block_entry.candidates() { + for (core_index, candidate_hash) in block_entry.candidates() { let _candidate_span = distribution_message_span.child("candidate").with_candidate(*candidate_hash); let candidate_entry = match db.load_candidate_entry(&candidate_hash)? { @@ -1389,152 +1404,121 @@ async fn distribution_messages_for_activation( match approval_entry.local_statements() { (None, None) | (None, Some(_)) => {}, // second is impossible case. (Some(assignment), None) => { - if let Some(claimed_core_indices) = get_assignment_core_indices( - &assignment.cert().kind, - &candidate_hash, - &block_entry, - ) { - if block_entry.has_candidates_pending_signature() { - delayed_approvals_timers.maybe_arm_timer( - state.clock.tick_now(), - state.clock.as_ref(), - block_entry.block_hash(), - assignment.validator_index(), - ) - } + let claimed_core_indices = + get_core_indices_on_startup(&assignment.cert().kind, *core_index); + + if block_entry.has_candidates_pending_signature() { + delayed_approvals_timers.maybe_arm_timer( + state.clock.tick_now(), + state.clock.as_ref(), + block_entry.block_hash(), + assignment.validator_index(), + ) + } - match cores_to_candidate_indices( - &claimed_core_indices, - &block_entry, - ) { - Ok(bitfield) => { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?candidate_entry.candidate_receipt().hash(), - ?block_hash, - "Discovered, triggered assignment, not approved yet", - ); - - let indirect_cert = IndirectAssignmentCertV2 { - block_hash, - validator: assignment.validator_index(), - cert: assignment.cert().clone(), - }; - messages.push( - ApprovalDistributionMessage::DistributeAssignment( - indirect_cert.clone(), - bitfield.clone(), - ), - ); - - if !block_entry - .candidate_is_pending_signature(*candidate_hash) - { - let ExtendedSessionInfo { ref executor_params, .. } = - match get_extended_session_info( - session_info_provider, - ctx.sender(), - block_entry.block_hash(), - block_entry.session(), - ) - .await - { - Some(i) => i, - None => continue, - }; - - actions.push(Action::LaunchApproval { - claimed_candidate_indices: bitfield, - candidate_hash: candidate_entry - .candidate_receipt() - .hash(), - indirect_cert, - assignment_tranche: assignment.tranche(), - relay_block_hash: block_hash, - session: block_entry.session(), - executor_params: executor_params.clone(), - candidate: candidate_entry - .candidate_receipt() - .clone(), - backing_group: approval_entry.backing_group(), - distribute_assignment: false, - }); - } - }, - Err(err) => { - // Should never happen. If we fail here it means the - // assignment is null (no cores claimed). - gum::warn!( - target: LOG_TARGET, - ?block_hash, - ?candidate_hash, - ?err, - "Failed to create assignment bitfield", - ); - }, - } - } else { - gum::warn!( - target: LOG_TARGET, - ?block_hash, - ?candidate_hash, - "Cannot get assignment claimed core indices", - ); + match cores_to_candidate_indices(&claimed_core_indices, &block_entry) { + Ok(bitfield) => { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?candidate_entry.candidate_receipt().hash(), + ?block_hash, + "Discovered, triggered assignment, not approved yet", + ); + + let indirect_cert = IndirectAssignmentCertV2 { + block_hash, + validator: assignment.validator_index(), + cert: assignment.cert().clone(), + }; + messages.push( + ApprovalDistributionMessage::DistributeAssignment( + indirect_cert.clone(), + bitfield.clone(), + ), + ); + + if !block_entry.candidate_is_pending_signature(*candidate_hash) + { + let ExtendedSessionInfo { ref executor_params, .. } = + match get_extended_session_info( + session_info_provider, + ctx.sender(), + block_entry.block_hash(), + block_entry.session(), + ) + .await + { + Some(i) => i, + None => continue, + }; + + actions.push(Action::LaunchApproval { + claimed_candidate_indices: bitfield, + candidate_hash: candidate_entry + .candidate_receipt() + .hash(), + indirect_cert, + assignment_tranche: assignment.tranche(), + relay_block_hash: block_hash, + session: block_entry.session(), + executor_params: executor_params.clone(), + candidate: candidate_entry.candidate_receipt().clone(), + backing_group: approval_entry.backing_group(), + distribute_assignment: false, + }); + } + }, + Err(err) => { + // Should never happen. If we fail here it means the + // assignment is null (no cores claimed). + gum::warn!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + ?err, + "Failed to create assignment bitfield", + ); + }, } }, (Some(assignment), Some(approval_sig)) => { - if let Some(claimed_core_indices) = get_assignment_core_indices( - &assignment.cert().kind, - &candidate_hash, - &block_entry, - ) { - match cores_to_candidate_indices( - &claimed_core_indices, - &block_entry, - ) { - Ok(bitfield) => messages.push( - ApprovalDistributionMessage::DistributeAssignment( - IndirectAssignmentCertV2 { - block_hash, - validator: assignment.validator_index(), - cert: assignment.cert().clone(), - }, - bitfield, - ), - ), - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?block_hash, - ?candidate_hash, - ?err, - "Failed to create assignment bitfield", - ); - // If we didn't send assignment, we don't send approval. - continue - }, - } - if signatures_queued - .insert(approval_sig.signed_candidates_indices.clone()) - { - messages.push(ApprovalDistributionMessage::DistributeApproval( - IndirectSignedApprovalVoteV2 { + let claimed_core_indices = + get_core_indices_on_startup(&assignment.cert().kind, *core_index); + match cores_to_candidate_indices(&claimed_core_indices, &block_entry) { + Ok(bitfield) => messages.push( + ApprovalDistributionMessage::DistributeAssignment( + IndirectAssignmentCertV2 { block_hash, - candidate_indices: approval_sig - .signed_candidates_indices, validator: assignment.validator_index(), - signature: approval_sig.signature, + cert: assignment.cert().clone(), }, - )) - }; - } else { - gum::warn!( - target: LOG_TARGET, - ?block_hash, - ?candidate_hash, - "Cannot get assignment claimed core indices", - ); + bitfield, + ), + ), + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + ?err, + "Failed to create assignment bitfield", + ); + // If we didn't send assignment, we don't send approval. + continue + }, } + if signatures_queued + .insert(approval_sig.signed_candidates_indices.clone()) + { + messages.push(ApprovalDistributionMessage::DistributeApproval( + IndirectSignedApprovalVoteV2 { + block_hash, + candidate_indices: approval_sig.signed_candidates_indices, + validator: assignment.validator_index(), + signature: approval_sig.signature, + }, + )) + }; }, } }, From 9922fd39437cea5797564145bfd1adb989fd57d1 Mon Sep 17 00:00:00 2001 From: kvalerio <24193167+kevin-valerio@users.noreply.github.com> Date: Thu, 21 Mar 2024 13:18:41 +0100 Subject: [PATCH 07/17] there's a typo (#3779) There was a typo, so now, there's no more typo. Co-authored-by: Liam Aharon --- substrate/primitives/runtime/src/multiaddress.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/primitives/runtime/src/multiaddress.rs b/substrate/primitives/runtime/src/multiaddress.rs index 89b0a3bcf8cc..c435606312e4 100644 --- a/substrate/primitives/runtime/src/multiaddress.rs +++ b/substrate/primitives/runtime/src/multiaddress.rs @@ -32,7 +32,7 @@ pub enum MultiAddress { Raw(Vec), /// It's a 32 byte representation. Address32([u8; 32]), - /// Its a 20 byte representation. + /// It's a 20 byte representation. Address20([u8; 20]), } From 46ba85500ffc77fa8e267c5f38b2c213550d68fa Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Thu, 21 Mar 2024 14:46:55 +0200 Subject: [PATCH 08/17] Fix toml formatting (#3782) Make taplo happy --- substrate/primitives/arithmetic/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/primitives/arithmetic/Cargo.toml b/substrate/primitives/arithmetic/Cargo.toml index 64c1025b5858..120edd06a660 100644 --- a/substrate/primitives/arithmetic/Cargo.toml +++ b/substrate/primitives/arithmetic/Cargo.toml @@ -43,7 +43,7 @@ std = [ "scale-info/std", "serde/std", "sp-crypto-hashing/std", - "sp-std/std", + "sp-std/std", ] # Serde support without relying on std features. serde = ["dep:serde", "scale-info/serde"] From 01d65f6b995021bfe7be184581895dc374285f06 Mon Sep 17 00:00:00 2001 From: Alejandro Martinez Andres <11448715+al3mart@users.noreply.github.com> Date: Thu, 21 Mar 2024 17:49:23 +0100 Subject: [PATCH 09/17] Revert `SendXcmOrigin` in Rococo & Westend (#2571) Based on issue [#2512](https://github.com/paritytech/polkadot-sdk/issues/2512), it seems that some ecosystem teams are using these networks to set up their staging environments and test certain use cases, some of them involving sending XCMs from the relay with origins not allowed in the current configuration. This change reverts the configuration of `SendXcmOrigin`. --------- Co-authored-by: Adrian Catangiu --- polkadot/runtime/rococo/src/xcm_config.rs | 8 +++----- polkadot/runtime/westend/src/xcm_config.rs | 4 +++- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index 328879715de3..c7063bd7ad61 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -259,11 +259,9 @@ pub type LocalPalletOriginToLocation = ( impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; - // We only allow the root, fellows and the staking admin to send messages. - // This is basically safe to enable for everyone (safe the possibility of someone spamming the - // parachain if they're willing to pay the KSM to send from the Relay-chain), but it's useless - // until we bring in XCM v3 which will make `DescendOrigin` a bit more useful. - type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; + // Note that this configuration of `SendXcmOrigin` is different from the one present in + // production. + type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmRouter = XcmRouter; // Anyone can execute XCM messages locally. type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 73127cb1efd6..400843c58356 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -272,7 +272,9 @@ pub type LocalPalletOriginToLocation = ( impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; + // Note that this configuration of `SendXcmOrigin` is different from the one present in + // production. + type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmRouter = XcmRouter; // Anyone can execute XCM messages locally... type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; From ea5f4e9a4d78234eb68bbb9130a24b0598c7893a Mon Sep 17 00:00:00 2001 From: "Will | Paradox | ParaNodes.io" <79228812+paradox-tt@users.noreply.github.com> Date: Fri, 22 Mar 2024 00:07:24 +0000 Subject: [PATCH 10/17] Adding LF's bootnodes to relay and system chains (#3514) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Good day, I'm seeking to add the following bootnodes for Kusama and Polkadot's relay and system chains. The following commands can be used to test connectivity. All node keys are backed up. Polkadot: ``` polkadot --chain polkadot --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/boot-polkadot.luckyfriday.io/tcp/443/wss/p2p/12D3KooWAdyiVAaeGdtBt6vn5zVetwA4z4qfm9Fi2QCSykN1wTBJ" --no-hardware-benchmarks ``` Assethub-Polkadot: ``` polkadot-parachain --chain asset-hub-polkadot --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/boot-polkadot-assethub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWDR9M7CjV1xdjCRbRwkFn1E7sjMaL4oYxGyDWxuLrFc2J" --no-hardware-benchmarks ``` Bridgehub-Polkadot: ``` polkadot-parachain --chain bridge-hub-polkadot --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/boot-polkadot-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWKf3mBXHjLbwtPqv1BdbQuwbFNcQQYxASS7iQ25264AXH" --no-hardware-benchmarks ``` Collectives-Polkadot ``` polkadot-parachain --chain collectives-polkadot --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/boot-polkadot-collectives.luckyfriday.io/tcp/443/wss/p2p/12D3KooWCzifnPooTt4kvTnXT7FTKTymVL7xn7DURQLsS2AKpf6w" --no-hardware-benchmarks ``` Kusama: ``` polkadot --chain kusama --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/boot-kusama.luckyfriday.io/tcp/443/wss/p2p/12D3KooWS1Lu6DmK8YHSvkErpxpcXmk14vG6y4KVEFEkd9g62PP8" --no-hardware-benchmarks ``` Assethub-Kusama: ``` polkadot-parachain --chain asset-hub-kusama --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/boot-kusama-assethub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWSwaeFs6FNgpgh54fdoxSDAA4nJNaPE3PAcse2GRrG7b3" --no-hardware-benchmarks ``` Bridgehub-Kusama: ``` polkadot-parachain --chain bridge-hub-kusama --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/boot-kusama-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWQybw6AFmAvrFfwUQnNxUpS12RovapD6oorh2mAJr4xyd" --no-hardware-benchmarks ``` Co-authored-by: Bastian Köcher --- cumulus/parachains/chain-specs/asset-hub-kusama.json | 3 ++- cumulus/parachains/chain-specs/asset-hub-polkadot.json | 3 ++- cumulus/parachains/chain-specs/bridge-hub-kusama.json | 3 ++- cumulus/parachains/chain-specs/bridge-hub-polkadot.json | 3 ++- cumulus/parachains/chain-specs/collectives-polkadot.json | 3 ++- polkadot/node/service/chain-specs/kusama.json | 1 + polkadot/node/service/chain-specs/polkadot.json | 3 ++- 7 files changed, 13 insertions(+), 6 deletions(-) diff --git a/cumulus/parachains/chain-specs/asset-hub-kusama.json b/cumulus/parachains/chain-specs/asset-hub-kusama.json index 654275eade81..66a705a40869 100644 --- a/cumulus/parachains/chain-specs/asset-hub-kusama.json +++ b/cumulus/parachains/chain-specs/asset-hub-kusama.json @@ -27,7 +27,8 @@ "/dns/mine14.rotko.net/tcp/34524/ws/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", "/dns/mine14.rotko.net/tcp/35524/wss/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30511/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU", - "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30513/wss/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU" + "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30513/wss/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU", + "/dns/boot-kusama-assethub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWSwaeFs6FNgpgh54fdoxSDAA4nJNaPE3PAcse2GRrG7b3" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-polkadot.json b/cumulus/parachains/chain-specs/asset-hub-polkadot.json index 454060d2a87a..16caa52ba913 100644 --- a/cumulus/parachains/chain-specs/asset-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/asset-hub-polkadot.json @@ -27,7 +27,8 @@ "/dns/mint14.rotko.net/tcp/34514/ws/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", "/dns/mint14.rotko.net/tcp/35514/wss/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30508/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R", - "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30510/wss/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R" + "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30510/wss/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R", + "/dns/boot-polkadot-assethub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWDR9M7CjV1xdjCRbRwkFn1E7sjMaL4oYxGyDWxuLrFc2J" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-kusama.json b/cumulus/parachains/chain-specs/bridge-hub-kusama.json index 90b70b05016d..6644ea41ab74 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-kusama.json +++ b/cumulus/parachains/chain-specs/bridge-hub-kusama.json @@ -27,7 +27,8 @@ "/dns/kbr13.rotko.net/tcp/34553/ws/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", "/dns/kbr13.rotko.net/tcp/35553/wss/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30520/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV", - "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30522/wss/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV" + "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30522/wss/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV", + "/dns/boot-kusama-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWQybw6AFmAvrFfwUQnNxUpS12RovapD6oorh2mAJr4xyd" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json index a9444b89e1e2..c51c5eff89b8 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json @@ -23,7 +23,8 @@ "/dns/pbr13.rotko.net/tcp/34543/ws/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", "/dns/pbr13.rotko.net/tcp/35543/wss/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30517/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB", - "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30519/wss/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB" + "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30519/wss/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB", + "/dns/boot-polkadot-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWKf3mBXHjLbwtPqv1BdbQuwbFNcQQYxASS7iQ25264AXH" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-polkadot.json b/cumulus/parachains/chain-specs/collectives-polkadot.json index 259669cf37a0..ce80e21ae625 100644 --- a/cumulus/parachains/chain-specs/collectives-polkadot.json +++ b/cumulus/parachains/chain-specs/collectives-polkadot.json @@ -27,7 +27,8 @@ "/dns/pch13.rotko.net/tcp/34573/ws/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", "/dns/pch13.rotko.net/tcp/35573/wss/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30526/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", - "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30528/wss/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff" + "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30528/wss/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", + "/dns/boot-polkadot-collectives.luckyfriday.io/tcp/443/wss/p2p/12D3KooWCzifnPooTt4kvTnXT7FTKTymVL7xn7DURQLsS2AKpf6w" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/polkadot/node/service/chain-specs/kusama.json b/polkadot/node/service/chain-specs/kusama.json index fd60cb8b6c1d..490b39ee6969 100644 --- a/polkadot/node/service/chain-specs/kusama.json +++ b/polkadot/node/service/chain-specs/kusama.json @@ -35,6 +35,7 @@ "/dns/ksm14.rotko.net/tcp/35224/wss/p2p/12D3KooWAa5THTw8HPfnhEei23HdL8P9McBXdozG2oTtMMksjZkK", "/dns/ksm14.rotko.net/tcp/33224/p2p/12D3KooWAa5THTw8HPfnhEei23HdL8P9McBXdozG2oTtMMksjZkK", "/dns/ibp-boot-kusama.luckyfriday.io/tcp/30333/p2p/12D3KooW9vu1GWHBuxyhm7rZgD3fhGZpNajPXFexadvhujWMgwfT", + "/dns/boot-kusama.luckyfriday.io/tcp/443/wss/p2p/12D3KooWS1Lu6DmK8YHSvkErpxpcXmk14vG6y4KVEFEkd9g62PP8", "/dns/ibp-boot-kusama.luckyfriday.io/tcp/30334/wss/p2p/12D3KooW9vu1GWHBuxyhm7rZgD3fhGZpNajPXFexadvhujWMgwfT" ], "telemetryEndpoints": [ diff --git a/polkadot/node/service/chain-specs/polkadot.json b/polkadot/node/service/chain-specs/polkadot.json index c235cdd09d8b..5f8d88102d7e 100644 --- a/polkadot/node/service/chain-specs/polkadot.json +++ b/polkadot/node/service/chain-specs/polkadot.json @@ -36,7 +36,8 @@ "/dns/dot14.rotko.net/tcp/35214/wss/p2p/12D3KooWPyEvPEXghnMC67Gff6PuZiSvfx3fmziKiPZcGStZ5xff", "/dns/dot14.rotko.net/tcp/33214/p2p/12D3KooWPyEvPEXghnMC67Gff6PuZiSvfx3fmziKiPZcGStZ5xff", "/dns/ibp-boot-polkadot.luckyfriday.io/tcp/30333/p2p/12D3KooWEjk6QXrZJ26fLpaajisJGHiz6WiQsR8k7mkM9GmWKnRZ", - "/dns/ibp-boot-polkadot.luckyfriday.io/tcp/30334/wss/p2p/12D3KooWEjk6QXrZJ26fLpaajisJGHiz6WiQsR8k7mkM9GmWKnRZ" + "/dns/ibp-boot-polkadot.luckyfriday.io/tcp/30334/wss/p2p/12D3KooWEjk6QXrZJ26fLpaajisJGHiz6WiQsR8k7mkM9GmWKnRZ", + "/dns/boot-polkadot.luckyfriday.io/tcp/443/wss/p2p/12D3KooWAdyiVAaeGdtBt6vn5zVetwA4z4qfm9Fi2QCSykN1wTBJ" ], "telemetryEndpoints": [ [ From 3410dfb3929462da88be2da813f121d8b1cf46b3 Mon Sep 17 00:00:00 2001 From: Clara van Staden Date: Fri, 22 Mar 2024 11:25:28 +0200 Subject: [PATCH 11/17] Snowbridge Beacon header age check (#3727) ## Bug Explanation Adds a check that prevents finalized headers with a gap larger than the sync committee period being imported, which could cause execution headers in the gap being unprovable. The current version of the Ethereum client checks that there is a header at least every sync committee, but it doesn't check that the headers are within a sync period of each other. For example: Header 100 (sync committee period 1) Header 9000 (sync committee period 2) (8900 blocks apart) These headers are in adjacent sync committees, but more than the sync committee period (8192 blocks) apart. The reason we need a header every 8192 slots at least, is the header is used to prove messages within the last 8192 blocks. If we import header 9000, and we receive a message to be verified at header 200, the `block_roots` field of header 9000 won't contain the header in order to do the ancestry check. ## Environment While running in Rococo, this edge case was discovered after the relayer was offline for a few days. It is unlikely, but not impossible, to happen again and so it should be backported to polkadot-sdk 1.7.0 (so that [polkadot-fellows/runtimes](https://github.com/polkadot-fellows/runtimes) can be updated with the fix). Our Ethereum client has been operational on Rococo for the past few months, and this been the only major issue discovered so far. ### Unrelated Change An unrelated nit: Removes a left over file that should have been deleted when the `parachain` directory was removed. --------- Co-authored-by: claravanstaden --- .../pallets/ethereum-client/src/lib.rs | 15 + .../pallets/ethereum-client/src/tests.rs | 57 +++- .../ethereum-beacon-client/src/mock.rs | 259 ------------------ 3 files changed, 71 insertions(+), 260 deletions(-) delete mode 100644 bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs diff --git a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs index a54d4a05ac58..fc2ab2fbb588 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs @@ -130,6 +130,10 @@ pub mod pallet { InvalidExecutionHeaderProof, InvalidAncestryMerkleProof, InvalidBlockRootsRootMerkleProof, + /// The gap between the finalized headers is larger than the sync committee period, + /// rendering execution headers unprovable using ancestry proofs (blocks root size is + /// the same as the sync committee period slots). + InvalidFinalizedHeaderGap, HeaderNotFinalized, BlockBodyHashTreeRootFailed, HeaderHashTreeRootFailed, @@ -398,6 +402,17 @@ pub mod pallet { Error::::IrrelevantUpdate ); + // Verify the finalized header gap between the current finalized header and new imported + // header is not larger than the sync committee period, otherwise we cannot do + // ancestry proofs for execution headers in the gap. + ensure!( + latest_finalized_state + .slot + .saturating_add(config::SLOTS_PER_HISTORICAL_ROOT as u64) >= + update.finalized_header.slot, + Error::::InvalidFinalizedHeaderGap + ); + // Verify that the `finality_branch`, if present, confirms `finalized_header` to match // the finalized checkpoint root saved in the state of `attested_header`. let finalized_block_root: H256 = update diff --git a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs index 50b6a25c3428..4a7b7b458869 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs @@ -15,7 +15,7 @@ use crate::mock::{ pub use crate::mock::*; -use crate::config::{EPOCHS_PER_SYNC_COMMITTEE_PERIOD, SLOTS_PER_EPOCH}; +use crate::config::{EPOCHS_PER_SYNC_COMMITTEE_PERIOD, SLOTS_PER_EPOCH, SLOTS_PER_HISTORICAL_ROOT}; use frame_support::{assert_err, assert_noop, assert_ok}; use hex_literal::hex; use primitives::{ @@ -884,6 +884,61 @@ fn submit_execution_header_not_finalized() { }); } +/// Check that a gap of more than 8192 slots between finalized headers is not allowed. +#[test] +fn submit_finalized_header_update_with_too_large_gap() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let update = Box::new(load_sync_committee_update_fixture()); + let mut next_update = Box::new(load_next_sync_committee_update_fixture()); + + // Adds 8193 slots, so that the next update is still in the next sync committee, but the + // gap between the finalized headers is more than 8192 slots. + let slot_with_large_gap = checkpoint.header.slot + SLOTS_PER_HISTORICAL_ROOT as u64 + 1; + + next_update.finalized_header.slot = slot_with_large_gap; + // Adding some slots to the attested header and signature slot since they need to be ahead + // of the finalized header. + next_update.attested_header.slot = slot_with_large_gap + 33; + next_update.signature_slot = slot_with_large_gap + 43; + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), update.clone())); + assert!(>::exists()); + assert_err!( + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), next_update.clone()), + Error::::InvalidFinalizedHeaderGap + ); + }); +} + +/// Check that a gap of 8192 slots between finalized headers is allowed. +#[test] +fn submit_finalized_header_update_with_gap_at_limit() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let update = Box::new(load_sync_committee_update_fixture()); + let mut next_update = Box::new(load_next_sync_committee_update_fixture()); + + next_update.finalized_header.slot = checkpoint.header.slot + SLOTS_PER_HISTORICAL_ROOT as u64; + // Adding some slots to the attested header and signature slot since they need to be ahead + // of the finalized header. + next_update.attested_header.slot = + checkpoint.header.slot + SLOTS_PER_HISTORICAL_ROOT as u64 + 33; + next_update.signature_slot = checkpoint.header.slot + SLOTS_PER_HISTORICAL_ROOT as u64 + 43; + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), update.clone())); + assert!(>::exists()); + assert_err!( + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), next_update.clone()), + // The test should pass the InvalidFinalizedHeaderGap check, and will fail at the + // next check, the merkle proof, because we changed the next_update slots. + Error::::InvalidHeaderMerkleProof + ); + }); +} + /* IMPLS */ #[test] diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs deleted file mode 100644 index d2cca373e92b..000000000000 --- a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs +++ /dev/null @@ -1,259 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -use crate as ethereum_beacon_client; -use frame_support::parameter_types; -use pallet_timestamp; -use primitives::{Fork, ForkVersions}; -use sp_core::H256; -use sp_runtime::traits::{BlakeTwo256, IdentityLookup}; - -#[cfg(not(feature = "beacon-spec-mainnet"))] -pub mod minimal { - use super::*; - - use crate::config; - use frame_support::derive_impl; - use hex_literal::hex; - use primitives::CompactExecutionHeader; - use snowbridge_core::inbound::{Log, Proof}; - use sp_runtime::BuildStorage; - use std::{fs::File, path::PathBuf}; - - type Block = frame_system::mocking::MockBlock; - - frame_support::construct_runtime!( - pub enum Test { - System: frame_system::{Pallet, Call, Storage, Event}, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, - EthereumBeaconClient: ethereum_beacon_client::{Pallet, Call, Storage, Event}, - } - ); - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const SS58Prefix: u8 = 42; - } - - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] - impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type RuntimeTask = RuntimeTask; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type PalletInfo = PalletInfo; - type SS58Prefix = SS58Prefix; - type Nonce = u64; - type Block = Block; - } - - impl pallet_timestamp::Config for Test { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = (); - type WeightInfo = (); - } - - parameter_types! { - pub const ExecutionHeadersPruneThreshold: u32 = 10; - pub const ChainForkVersions: ForkVersions = ForkVersions{ - genesis: Fork { - version: [0, 0, 0, 1], // 0x00000001 - epoch: 0, - }, - altair: Fork { - version: [1, 0, 0, 1], // 0x01000001 - epoch: 0, - }, - bellatrix: Fork { - version: [2, 0, 0, 1], // 0x02000001 - epoch: 0, - }, - capella: Fork { - version: [3, 0, 0, 1], // 0x03000001 - epoch: 0, - }, - }; - } - - impl ethereum_beacon_client::Config for Test { - type RuntimeEvent = RuntimeEvent; - type ForkVersions = ChainForkVersions; - type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; - type WeightInfo = (); - } - - // Build genesis storage according to the mock runtime. - pub fn new_tester() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - let _ = ext.execute_with(|| Timestamp::set(RuntimeOrigin::signed(1), 30_000)); - ext - } - - fn load_fixture(basename: &str) -> Result - where - T: for<'de> serde::Deserialize<'de>, - { - let filepath: PathBuf = - [env!("CARGO_MANIFEST_DIR"), "tests", "fixtures", basename].iter().collect(); - serde_json::from_reader(File::open(filepath).unwrap()) - } - - pub fn load_execution_header_update_fixture() -> primitives::ExecutionHeaderUpdate { - load_fixture("execution-header-update.minimal.json").unwrap() - } - - pub fn load_checkpoint_update_fixture( - ) -> primitives::CheckpointUpdate<{ config::SYNC_COMMITTEE_SIZE }> { - load_fixture("initial-checkpoint.minimal.json").unwrap() - } - - pub fn load_sync_committee_update_fixture( - ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { - load_fixture("sync-committee-update.minimal.json").unwrap() - } - - pub fn load_finalized_header_update_fixture( - ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { - load_fixture("finalized-header-update.minimal.json").unwrap() - } - - pub fn load_next_sync_committee_update_fixture( - ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { - load_fixture("next-sync-committee-update.minimal.json").unwrap() - } - - pub fn load_next_finalized_header_update_fixture( - ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { - load_fixture("next-finalized-header-update.minimal.json").unwrap() - } - - pub fn get_message_verification_payload() -> (Log, Proof) { - ( - Log { - address: hex!("ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0").into(), - topics: vec![ - hex!("1b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ad").into(), - hex!("00000000000000000000000000000000000000000000000000000000000003e8").into(), - hex!("0000000000000000000000000000000000000000000000000000000000000001").into(), - ], - data: hex!("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000").into(), - }, - Proof { - block_hash: hex!("05aaa60b0f27cce9e71909508527264b77ee14da7b5bf915fcc4e32715333213").into(), - tx_index: 0, - data: (vec![ - hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb").to_vec(), - hex!("d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c185510").to_vec(), - hex!("b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646").to_vec(), - ], vec![ - hex!("f90131a0b601337b3aa10a671caa724eba641e759399979856141d3aea6b6b4ac59b889ba00c7d5dd48be9060221a02fb8fa213860b4c50d47046c8fa65ffaba5737d569e0a094601b62a1086cd9c9cb71a7ebff9e718f3217fd6e837efe4246733c0a196f63a06a4b0dd0aefc37b3c77828c8f07d1b7a2455ceb5dbfd3c77d7d6aeeddc2f7e8ca0d6e8e23142cdd8ec219e1f5d8b56aa18e456702b195deeaa210327284d42ade4a08a313d4c87023005d1ab631bbfe3f5de1e405d0e66d0bef3e033f1e5711b5521a0bf09a5d9a48b10ade82b8d6a5362a15921c8b5228a3487479b467db97411d82fa0f95cccae2a7c572ef3c566503e30bac2b2feb2d2f26eebf6d870dcf7f8cf59cea0d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c1855108080808080808080").to_vec(), - hex!("f851a0b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646a060a634b9280e3a23fb63375e7bbdd9ab07fd379ab6a67e2312bbc112195fa358808080808080808080808080808080").to_vec(), - hex!("f9030820b9030402f90300018301d6e2b9010000000000000800000000000020040008000000000000000000000000400000008000000000000000000000000000000000000000000000000000000000042010000000001000000000000000000000000000000000040000000000000000000000000000000000000000000000008000000000000000002000000000000000000000000200000000000000200000000000100000000040000001000200008000000000000200000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000f901f5f87a942ffa5ecdbe006d30397c7636d3e015eee251369ff842a0c965575a00553e094ca7c5d14f02e107c258dda06867cbf9e0e69f80e71bbcc1a000000000000000000000000000000000000000000000000000000000000003e8a000000000000000000000000000000000000000000000000000000000000003e8f9011c94ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0f863a01b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ada000000000000000000000000000000000000000000000000000000000000003e8a00000000000000000000000000000000000000000000000000000000000000001b8a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000f858948cf6147918a5cbb672703f879f385036f8793a24e1a01449abf21e49fd025f33495e77f7b1461caefdd3d4bb646424a3f445c4576a5ba0000000000000000000000000440edffa1352b13227e8ee646f3ea37456dec701").to_vec(), - ]), - } - ) - } - - pub fn get_message_verification_header() -> CompactExecutionHeader { - CompactExecutionHeader { - parent_hash: hex!("04a7f6ab8282203562c62f38b0ab41d32aaebe2c7ea687702b463148a6429e04") - .into(), - block_number: 55, - state_root: hex!("894d968712976d613519f973a317cb0781c7b039c89f27ea2b7ca193f7befdb3") - .into(), - receipts_root: hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb") - .into(), - } - } -} - -#[cfg(feature = "beacon-spec-mainnet")] -pub mod mainnet { - use super::*; - use frame_support::derive_impl; - - type Block = frame_system::mocking::MockBlock; - use sp_runtime::BuildStorage; - - frame_support::construct_runtime!( - pub enum Test { - System: frame_system::{Pallet, Call, Storage, Event}, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, - EthereumBeaconClient: ethereum_beacon_client::{Pallet, Call, Storage, Event}, - } - ); - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const SS58Prefix: u8 = 42; - } - - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] - impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type RuntimeTask = RuntimeTask; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type PalletInfo = PalletInfo; - type SS58Prefix = SS58Prefix; - type Nonce = u64; - type Block = Block; - } - - impl pallet_timestamp::Config for Test { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = (); - type WeightInfo = (); - } - - parameter_types! { - pub const ChainForkVersions: ForkVersions = ForkVersions{ - genesis: Fork { - version: [0, 0, 16, 32], // 0x00001020 - epoch: 0, - }, - altair: Fork { - version: [1, 0, 16, 32], // 0x01001020 - epoch: 36660, - }, - bellatrix: Fork { - version: [2, 0, 16, 32], // 0x02001020 - epoch: 112260, - }, - capella: Fork { - version: [3, 0, 16, 32], // 0x03001020 - epoch: 162304, - }, - }; - pub const ExecutionHeadersPruneThreshold: u32 = 10; - } - - impl ethereum_beacon_client::Config for Test { - type RuntimeEvent = RuntimeEvent; - type ForkVersions = ChainForkVersions; - type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; - type WeightInfo = (); - } - - // Build genesis storage according to the mock runtime. - pub fn new_tester() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - let _ = ext.execute_with(|| Timestamp::set(RuntimeOrigin::signed(1), 30_000)); - ext - } -} From 22d5b80d4486f2e9d53bf8ad1eb3efa11a4136af Mon Sep 17 00:00:00 2001 From: Vincent Geddes Date: Fri, 22 Mar 2024 13:52:51 +0200 Subject: [PATCH 12/17] Add a linear fee multiplier (#127) (#3790) Bridging fees are calculated using a static ETH/DOT exchange rate that can deviate significantly from the real-world exchange rate. We therefore need to add a safety margin to the fee so that users almost aways cover the cost of relaying. # FAQ > Why introduce a `multiplier` parameter instead of configuring an exchange rate which already has a safety factor applied? When converting from ETH to DOT, we need to _divide_ the multiplier by the exchange rate, and to convert from DOT to ETH we need to _multiply_ the multiplier by the exchange rate. > Other input parameters to the fee calculation can also deviate from real-world values. These include substrate weights, gas prices, and so on. Why does the multiplier introduced here not adjust those? A single scalar multiplier won't be able to accommodate the different volatilities efficiently. For example, gas prices are much more volatile than exchange rates, and substrate weights hardly ever change. So the pricing config relating to weights and gas prices should already have some appropriate safety margin pre-applied. # Detailed Changes: * Added `multiplier` field to `PricingParameters` * Outbound-queue fee is multiplied by `multiplier` * This `multiplier` is synced to the Ethereum side * Improved Runtime API for calculating outbound-queue fees. This API makes it much easier to for configure parts of the system in preparation for launch. * Improve and clarify code documentation Upstreamed from https://github.com/Snowfork/polkadot-sdk/pull/127 --------- Co-authored-by: Clara van Staden Co-authored-by: Adrian Catangiu --- .../pallets/inbound-queue/src/mock.rs | 3 +- .../outbound-queue/runtime-api/src/lib.rs | 11 +++-- .../pallets/outbound-queue/src/api.rs | 18 +++++--- .../pallets/outbound-queue/src/lib.rs | 32 +++++++++---- .../pallets/outbound-queue/src/mock.rs | 3 +- .../pallets/outbound-queue/src/test.rs | 46 +++++++++++-------- bridges/snowbridge/pallets/system/src/lib.rs | 2 + bridges/snowbridge/pallets/system/src/mock.rs | 3 +- .../primitives/core/src/outbound.rs | 13 ++++-- .../snowbridge/primitives/core/src/pricing.rs | 5 ++ .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 11 +++-- 11 files changed, 97 insertions(+), 50 deletions(-) diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index 086b27cb8280..39e9532ed321 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -173,7 +173,8 @@ parameter_types! { pub Parameters: PricingParameters = PricingParameters { exchange_rate: FixedU128::from_rational(1, 400), fee_per_gas: gwei(20), - rewards: Rewards { local: DOT, remote: meth(1) } + rewards: Rewards { local: DOT, remote: meth(1) }, + multiplier: FixedU128::from_rational(1, 1), }; } diff --git a/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs index 51f46a7b49c8..e6ddaa439352 100644 --- a/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs @@ -3,7 +3,10 @@ #![cfg_attr(not(feature = "std"), no_std)] use frame_support::traits::tokens::Balance as BalanceT; -use snowbridge_core::outbound::Message; +use snowbridge_core::{ + outbound::{Command, Fee}, + PricingParameters, +}; use snowbridge_outbound_queue_merkle_tree::MerkleProof; sp_api::decl_runtime_apis! { @@ -11,10 +14,10 @@ sp_api::decl_runtime_apis! { { /// Generate a merkle proof for a committed message identified by `leaf_index`. /// The merkle root is stored in the block header as a - /// `\[`sp_runtime::generic::DigestItem::Other`\]` + /// `sp_runtime::generic::DigestItem::Other` fn prove_message(leaf_index: u64) -> Option; - /// Calculate the delivery fee for `message` - fn calculate_fee(message: Message) -> Option; + /// Calculate the delivery fee for `command` + fn calculate_fee(command: Command, parameters: Option>) -> Fee; } } diff --git a/bridges/snowbridge/pallets/outbound-queue/src/api.rs b/bridges/snowbridge/pallets/outbound-queue/src/api.rs index 44d63f1e2d23..b904819b1b18 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/api.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/api.rs @@ -4,8 +4,12 @@ use crate::{Config, MessageLeaves}; use frame_support::storage::StorageStreamIter; -use snowbridge_core::outbound::{Message, SendMessage}; +use snowbridge_core::{ + outbound::{Command, Fee, GasMeter}, + PricingParameters, +}; use snowbridge_outbound_queue_merkle_tree::{merkle_proof, MerkleProof}; +use sp_core::Get; pub fn prove_message(leaf_index: u64) -> Option where @@ -19,12 +23,14 @@ where Some(proof) } -pub fn calculate_fee(message: Message) -> Option +pub fn calculate_fee( + command: Command, + parameters: Option>, +) -> Fee where T: Config, { - match crate::Pallet::::validate(&message) { - Ok((_, fees)) => Some(fees.total()), - _ => None, - } + let gas_used_at_most = T::GasMeter::maximum_gas_used_at_most(&command); + let parameters = parameters.unwrap_or(T::PricingParameters::get()); + crate::Pallet::::calculate_fee(gas_used_at_most, parameters) } diff --git a/bridges/snowbridge/pallets/outbound-queue/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue/src/lib.rs index 9e949a4791a8..9b9dbe854a5e 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/lib.rs @@ -47,24 +47,37 @@ //! consume on Ethereum. Using this upper bound, a final fee can be calculated. //! //! The fee calculation also requires the following parameters: -//! * ETH/DOT exchange rate -//! * Ether fee per unit of gas +//! * Average ETH/DOT exchange rate over some period +//! * Max fee per unit of gas that bridge is willing to refund relayers for //! //! By design, it is expected that governance should manually update these //! parameters every few weeks using the `set_pricing_parameters` extrinsic in the //! system pallet. //! +//! This is an interim measure. Once ETH/DOT liquidity pools are available in the Polkadot network, +//! we'll use them as a source of pricing info, subject to certain safeguards. +//! //! ## Fee Computation Function //! //! ```text //! LocalFee(Message) = WeightToFee(ProcessMessageWeight(Message)) -//! RemoteFee(Message) = MaxGasRequired(Message) * FeePerGas + Reward -//! Fee(Message) = LocalFee(Message) + (RemoteFee(Message) / Ratio("ETH/DOT")) +//! RemoteFee(Message) = MaxGasRequired(Message) * Params.MaxFeePerGas + Params.Reward +//! RemoteFeeAdjusted(Message) = Params.Multiplier * (RemoteFee(Message) / Params.Ratio("ETH/DOT")) +//! Fee(Message) = LocalFee(Message) + RemoteFeeAdjusted(Message) //! ``` //! -//! By design, the computed fee is always going to conservative, to cover worst-case -//! costs of dispatch on Ethereum. In future iterations of the design, we will optimize -//! this, or provide a mechanism to asynchronously refund a portion of collected fees. +//! By design, the computed fee includes a safety factor (the `Multiplier`) to cover +//! unfavourable fluctuations in the ETH/DOT exchange rate. +//! +//! ## Fee Settlement +//! +//! On the remote side, in the gateway contract, the relayer accrues +//! +//! ```text +//! Min(GasPrice, Message.MaxFeePerGas) * GasUsed() + Message.Reward +//! ``` +//! Or in plain english, relayers are refunded for gas consumption, using a +//! price that is a minimum of the actual gas price, or `Message.MaxFeePerGas`. //! //! # Extrinsics //! @@ -106,7 +119,7 @@ pub use snowbridge_outbound_queue_merkle_tree::MerkleProof; use sp_core::{H256, U256}; use sp_runtime::{ traits::{CheckedDiv, Hash}, - DigestItem, + DigestItem, Saturating, }; use sp_std::prelude::*; pub use types::{CommittedMessage, ProcessMessageOriginOf}; @@ -366,8 +379,9 @@ pub mod pallet { // downcast to u128 let fee: u128 = fee.try_into().defensive_unwrap_or(u128::MAX); - // convert to local currency + // multiply by multiplier and convert to local currency let fee = FixedU128::from_inner(fee) + .saturating_mul(params.multiplier) .checked_div(¶ms.exchange_rate) .expect("exchange rate is not zero; qed") .into_inner(); diff --git a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs index 850b13dcf310..67877a05c79a 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs @@ -77,7 +77,8 @@ parameter_types! { pub Parameters: PricingParameters = PricingParameters { exchange_rate: FixedU128::from_rational(1, 400), fee_per_gas: gwei(20), - rewards: Rewards { local: DOT, remote: meth(1) } + rewards: Rewards { local: DOT, remote: meth(1) }, + multiplier: FixedU128::from_rational(4, 3), }; } diff --git a/bridges/snowbridge/pallets/outbound-queue/src/test.rs b/bridges/snowbridge/pallets/outbound-queue/src/test.rs index 8ed4a318d68e..4e9ea36e24bc 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/test.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/test.rs @@ -268,28 +268,34 @@ fn encode_digest_item() { } #[test] -fn validate_messages_with_fees() { +fn test_calculate_fees_with_unit_multiplier() { new_tester().execute_with(|| { - let message = mock_message(1000); - let (_, fee) = OutboundQueue::validate(&message).unwrap(); + let gas_used: u64 = 250000; + let price_params: PricingParameters<::Balance> = PricingParameters { + exchange_rate: FixedU128::from_rational(1, 400), + fee_per_gas: 10000_u32.into(), + rewards: Rewards { local: 1_u32.into(), remote: 1_u32.into() }, + multiplier: FixedU128::from_rational(1, 1), + }; + let fee = OutboundQueue::calculate_fee(gas_used, price_params); assert_eq!(fee.local, 698000000); - assert_eq!(fee.remote, 2680000000000); + assert_eq!(fee.remote, 1000000); }); } #[test] -fn test_calculate_fees() { +fn test_calculate_fees_with_multiplier() { new_tester().execute_with(|| { let gas_used: u64 = 250000; - let illegal_price_params: PricingParameters<::Balance> = - PricingParameters { - exchange_rate: FixedU128::from_rational(1, 400), - fee_per_gas: 10000_u32.into(), - rewards: Rewards { local: 1_u32.into(), remote: 1_u32.into() }, - }; - let fee = OutboundQueue::calculate_fee(gas_used, illegal_price_params); + let price_params: PricingParameters<::Balance> = PricingParameters { + exchange_rate: FixedU128::from_rational(1, 400), + fee_per_gas: 10000_u32.into(), + rewards: Rewards { local: 1_u32.into(), remote: 1_u32.into() }, + multiplier: FixedU128::from_rational(4, 3), + }; + let fee = OutboundQueue::calculate_fee(gas_used, price_params); assert_eq!(fee.local, 698000000); - assert_eq!(fee.remote, 1000000); + assert_eq!(fee.remote, 1333333); }); } @@ -297,13 +303,13 @@ fn test_calculate_fees() { fn test_calculate_fees_with_valid_exchange_rate_but_remote_fee_calculated_as_zero() { new_tester().execute_with(|| { let gas_used: u64 = 250000; - let illegal_price_params: PricingParameters<::Balance> = - PricingParameters { - exchange_rate: FixedU128::from_rational(1, 1), - fee_per_gas: 1_u32.into(), - rewards: Rewards { local: 1_u32.into(), remote: 1_u32.into() }, - }; - let fee = OutboundQueue::calculate_fee(gas_used, illegal_price_params.clone()); + let price_params: PricingParameters<::Balance> = PricingParameters { + exchange_rate: FixedU128::from_rational(1, 1), + fee_per_gas: 1_u32.into(), + rewards: Rewards { local: 1_u32.into(), remote: 1_u32.into() }, + multiplier: FixedU128::from_rational(1, 1), + }; + let fee = OutboundQueue::calculate_fee(gas_used, price_params.clone()); assert_eq!(fee.local, 698000000); // Though none zero pricing params the remote fee calculated here is invalid // which should be avoided diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 6e5ceb5e9b1d..39c73e3630e7 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -159,6 +159,7 @@ pub mod pallet { type DefaultPricingParameters: Get>; /// Cost of delivering a message from Ethereum + #[pallet::constant] type InboundDeliveryCost: Get>; type WeightInfo: WeightInfo; @@ -334,6 +335,7 @@ pub mod pallet { let command = Command::SetPricingParameters { exchange_rate: params.exchange_rate.into(), delivery_cost: T::InboundDeliveryCost::get().saturated_into::(), + multiplier: params.multiplier.into(), }; Self::send(PRIMARY_GOVERNANCE_CHANNEL, command, PaysFee::::No)?; diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index a711eab5d3d4..0312456c9823 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -193,7 +193,8 @@ parameter_types! { pub Parameters: PricingParameters = PricingParameters { exchange_rate: FixedU128::from_rational(1, 400), fee_per_gas: gwei(20), - rewards: Rewards { local: DOT, remote: meth(1) } + rewards: Rewards { local: DOT, remote: meth(1) }, + multiplier: FixedU128::from_rational(4, 3) }; pub const InboundDeliveryCost: u128 = 1_000_000_000; diff --git a/bridges/snowbridge/primitives/core/src/outbound.rs b/bridges/snowbridge/primitives/core/src/outbound.rs index bce123878d3a..0ba0fdb61089 100644 --- a/bridges/snowbridge/primitives/core/src/outbound.rs +++ b/bridges/snowbridge/primitives/core/src/outbound.rs @@ -136,6 +136,8 @@ mod v1 { exchange_rate: UD60x18, // Cost of delivering a message from Ethereum to BridgeHub, in ROC/KSM/DOT delivery_cost: u128, + // Fee multiplier + multiplier: UD60x18, }, } @@ -203,10 +205,11 @@ mod v1 { Token::Uint(U256::from(*transfer_asset_xcm)), Token::Uint(*register_token), ])]), - Command::SetPricingParameters { exchange_rate, delivery_cost } => + Command::SetPricingParameters { exchange_rate, delivery_cost, multiplier } => ethabi::encode(&[Token::Tuple(vec![ Token::Uint(exchange_rate.clone().into_inner()), Token::Uint(U256::from(*delivery_cost)), + Token::Uint(multiplier.clone().into_inner()), ])]), } } @@ -273,7 +276,8 @@ mod v1 { } } -#[cfg_attr(feature = "std", derive(PartialEq, Debug))] +#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(PartialEq))] /// Fee for delivering message pub struct Fee where @@ -346,12 +350,13 @@ pub trait GasMeter { /// the command within the message const MAXIMUM_BASE_GAS: u64; + /// Total gas consumed at most, including verification & dispatch fn maximum_gas_used_at_most(command: &Command) -> u64 { Self::MAXIMUM_BASE_GAS + Self::maximum_dispatch_gas_used_at_most(command) } - /// Measures the maximum amount of gas a command payload will require to dispatch, AFTER - /// validation & verification. + /// Measures the maximum amount of gas a command payload will require to *dispatch*, NOT + /// including validation & verification. fn maximum_dispatch_gas_used_at_most(command: &Command) -> u64; } diff --git a/bridges/snowbridge/primitives/core/src/pricing.rs b/bridges/snowbridge/primitives/core/src/pricing.rs index 33aeda6d15c4..0f392c7ad4bd 100644 --- a/bridges/snowbridge/primitives/core/src/pricing.rs +++ b/bridges/snowbridge/primitives/core/src/pricing.rs @@ -13,6 +13,8 @@ pub struct PricingParameters { pub rewards: Rewards, /// Ether (wei) fee per gas unit pub fee_per_gas: U256, + /// Fee multiplier + pub multiplier: FixedU128, } #[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo)] @@ -43,6 +45,9 @@ where if self.rewards.remote.is_zero() { return Err(InvalidPricingParameters) } + if self.multiplier == FixedU128::zero() { + return Err(InvalidPricingParameters) + } Ok(()) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index bf7483179f29..3980fa0d501a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -38,7 +38,9 @@ pub mod xcm_config; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use snowbridge_beacon_primitives::{Fork, ForkVersions}; use snowbridge_core::{ - gwei, meth, outbound::Message, AgentId, AllowSiblingsOnly, PricingParameters, Rewards, + gwei, meth, + outbound::{Command, Fee}, + AgentId, AllowSiblingsOnly, PricingParameters, Rewards, }; use snowbridge_router_primitives::inbound::MessageToXcm; use sp_api::impl_runtime_apis; @@ -503,7 +505,8 @@ parameter_types! { pub Parameters: PricingParameters = PricingParameters { exchange_rate: FixedU128::from_rational(1, 400), fee_per_gas: gwei(20), - rewards: Rewards { local: 1 * UNITS, remote: meth(1) } + rewards: Rewards { local: 1 * UNITS, remote: meth(1) }, + multiplier: FixedU128::from_rational(1, 1), }; } @@ -1022,8 +1025,8 @@ impl_runtime_apis! { snowbridge_pallet_outbound_queue::api::prove_message::(leaf_index) } - fn calculate_fee(message: Message) -> Option { - snowbridge_pallet_outbound_queue::api::calculate_fee::(message) + fn calculate_fee(command: Command, parameters: Option>) -> Fee { + snowbridge_pallet_outbound_queue::api::calculate_fee::(command, parameters) } } From 9d2963c29d9b7ea949851a166e0cb2792fc66fff Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Fri, 22 Mar 2024 14:18:03 +0200 Subject: [PATCH 13/17] Make public addresses go first in authority discovery DHT records (#3757) Make sure explicitly set by the operator public addresses go first in the authority discovery DHT records. Also update `Discovery` behavior to eliminate duplicates in the returned addresses. This PR should improve situation with https://github.com/paritytech/polkadot-sdk/issues/3519. Obsoletes https://github.com/paritytech/polkadot-sdk/pull/3657. --- Cargo.lock | 1 + .../relay-chain-minimal-node/src/lib.rs | 2 + polkadot/node/service/src/lib.rs | 2 + substrate/bin/node/cli/src/service.rs | 2 + .../client/authority-discovery/Cargo.toml | 1 + .../client/authority-discovery/src/lib.rs | 5 + .../client/authority-discovery/src/worker.rs | 92 ++++++++++++++----- substrate/client/network/Cargo.toml | 2 +- substrate/client/network/src/discovery.rs | 19 +++- 9 files changed, 99 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bdbf6ddac268..074b657e767b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15555,6 +15555,7 @@ dependencies = [ "futures-timer", "ip_network", "libp2p", + "linked_hash_set", "log", "multihash 0.18.1", "multihash-codetable", diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs index 4bccca59fe3e..6aea043713d8 100644 --- a/cumulus/client/relay-chain-minimal-node/src/lib.rs +++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs @@ -55,6 +55,7 @@ fn build_authority_discovery_service( prometheus_registry: Option, ) -> AuthorityDiscoveryService { let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht; + let auth_disc_public_addresses = config.network.public_addresses.clone(); let authority_discovery_role = sc_authority_discovery::Role::Discover; let dht_event_stream = network.event_stream("authority-discovery").filter_map(|e| async move { match e { @@ -65,6 +66,7 @@ fn build_authority_discovery_service( let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config( sc_authority_discovery::WorkerConfig { publish_non_global_ips: auth_disc_publish_non_global_ips, + public_addresses: auth_disc_public_addresses, // Require that authority discovery records are signed. strict_record_validation: true, ..Default::default() diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 83a0afc077e7..4f4ede537055 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -807,6 +807,7 @@ pub fn new_full( let shared_voter_state = rpc_setup; let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht; + let auth_disc_public_addresses = config.network.public_addresses.clone(); let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network); let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"); @@ -1061,6 +1062,7 @@ pub fn new_full( let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config( sc_authority_discovery::WorkerConfig { publish_non_global_ips: auth_disc_publish_non_global_ips, + public_addresses: auth_disc_public_addresses, // Require that authority discovery records are signed. strict_record_validation: true, ..Default::default() diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 8f2aba6b44cd..e4b425e6f963 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -422,6 +422,7 @@ pub fn new_full_base( let shared_voter_state = rpc_setup; let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht; + let auth_disc_public_addresses = config.network.public_addresses.clone(); let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network); let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"); @@ -610,6 +611,7 @@ pub fn new_full_base( sc_authority_discovery::new_worker_and_service_with_config( sc_authority_discovery::WorkerConfig { publish_non_global_ips: auth_disc_publish_non_global_ips, + public_addresses: auth_disc_public_addresses, ..Default::default() }, client.clone(), diff --git a/substrate/client/authority-discovery/Cargo.toml b/substrate/client/authority-discovery/Cargo.toml index cdd4052f0b09..26580064b3c4 100644 --- a/substrate/client/authority-discovery/Cargo.toml +++ b/substrate/client/authority-discovery/Cargo.toml @@ -29,6 +29,7 @@ multihash = { version = "0.18.1", default-features = false, features = [ "sha2", "std", ] } +linked_hash_set = "0.1.4" log = { workspace = true, default-features = true } prost = "0.12" rand = "0.8.5" diff --git a/substrate/client/authority-discovery/src/lib.rs b/substrate/client/authority-discovery/src/lib.rs index 6bb12804cada..281188de1432 100644 --- a/substrate/client/authority-discovery/src/lib.rs +++ b/substrate/client/authority-discovery/src/lib.rs @@ -80,6 +80,10 @@ pub struct WorkerConfig { /// Defaults to `true` to avoid the surprise factor. pub publish_non_global_ips: bool, + /// Public addresses set by the node operator to always publish first in the authority + /// discovery DHT record. + pub public_addresses: Vec, + /// Reject authority discovery records that are not signed by their network identity (PeerId) /// /// Defaults to `false` to provide compatibility with old versions @@ -104,6 +108,7 @@ impl Default for WorkerConfig { // `authority_discovery_dht_event_received`. max_query_interval: Duration::from_secs(10 * 60), publish_non_global_ips: true, + public_addresses: Vec::new(), strict_record_validation: false, } } diff --git a/substrate/client/authority-discovery/src/worker.rs b/substrate/client/authority-discovery/src/worker.rs index 9bccb96ff378..b77f0241ec2f 100644 --- a/substrate/client/authority-discovery/src/worker.rs +++ b/substrate/client/authority-discovery/src/worker.rs @@ -35,6 +35,7 @@ use addr_cache::AddrCache; use codec::{Decode, Encode}; use ip_network::IpNetwork; use libp2p::{core::multiaddr, identity::PublicKey, multihash::Multihash, Multiaddr, PeerId}; +use linked_hash_set::LinkedHashSet; use multihash_codetable::{Code, MultihashDigest}; use log::{debug, error, log_enabled}; @@ -120,14 +121,22 @@ pub struct Worker { /// Interval to be proactive, publishing own addresses. publish_interval: ExpIncInterval, + /// Pro-actively publish our own addresses at this interval, if the keys in the keystore /// have changed. publish_if_changed_interval: ExpIncInterval, + /// List of keys onto which addresses have been published at the latest publication. /// Used to check whether they have changed. latest_published_keys: HashSet, + /// Same value as in the configuration. publish_non_global_ips: bool, + + /// Public addresses set by the node operator to always publish first in the authority + /// discovery DHT record. + public_addresses: LinkedHashSet, + /// Same value as in the configuration. strict_record_validation: bool, @@ -136,6 +145,7 @@ pub struct Worker { /// Queue of throttled lookups pending to be passed to the network. pending_lookups: Vec, + /// Set of in-flight lookups. in_flight_lookups: HashMap, @@ -224,6 +234,29 @@ where None => None, }; + let public_addresses = { + let local_peer_id: Multihash = network.local_peer_id().into(); + + config + .public_addresses + .into_iter() + .map(|mut address| { + if let Some(multiaddr::Protocol::P2p(peer_id)) = address.iter().last() { + if peer_id != local_peer_id { + error!( + target: LOG_TARGET, + "Discarding invalid local peer ID in public address {address}.", + ); + } + // Always discard `/p2p/...` protocol for proper address comparison (local + // peer id will be added before publishing). + address.pop(); + } + address + }) + .collect() + }; + Worker { from_service: from_service.fuse(), client, @@ -233,6 +266,7 @@ where publish_if_changed_interval, latest_published_keys: HashSet::new(), publish_non_global_ips: config.publish_non_global_ips, + public_addresses, strict_record_validation: config.strict_record_validation, query_interval, pending_lookups: Vec::new(), @@ -304,32 +338,48 @@ where } fn addresses_to_publish(&self) -> impl Iterator { - let peer_id: Multihash = self.network.local_peer_id().into(); let publish_non_global_ips = self.publish_non_global_ips; - let addresses = self.network.external_addresses().into_iter().filter(move |a| { - if publish_non_global_ips { - return true - } + let addresses = self + .public_addresses + .clone() + .into_iter() + .chain(self.network.external_addresses().into_iter().filter_map(|mut address| { + // Make sure the reported external address does not contain `/p2p/...` protocol. + if let Some(multiaddr::Protocol::P2p(_)) = address.iter().last() { + address.pop(); + } - a.iter().all(|p| match p { - // The `ip_network` library is used because its `is_global()` method is stable, - // while `is_global()` in the standard library currently isn't. - multiaddr::Protocol::Ip4(ip) if !IpNetwork::from(ip).is_global() => false, - multiaddr::Protocol::Ip6(ip) if !IpNetwork::from(ip).is_global() => false, - _ => true, + if self.public_addresses.contains(&address) { + // Already added above. + None + } else { + Some(address) + } + })) + .filter(move |address| { + if publish_non_global_ips { + return true + } + + address.iter().all(|protocol| match protocol { + // The `ip_network` library is used because its `is_global()` method is stable, + // while `is_global()` in the standard library currently isn't. + multiaddr::Protocol::Ip4(ip) if !IpNetwork::from(ip).is_global() => false, + multiaddr::Protocol::Ip6(ip) if !IpNetwork::from(ip).is_global() => false, + _ => true, + }) }) - }); + .collect::>(); - debug!(target: LOG_TARGET, "Authority DHT record peer_id='{:?}' addresses='{:?}'", peer_id, addresses.clone().collect::>()); + let peer_id = self.network.local_peer_id(); + debug!( + target: LOG_TARGET, + "Authority DHT record peer_id='{peer_id}' addresses='{addresses:?}'", + ); - // The address must include the peer id if not already set. - addresses.map(move |a| { - if a.iter().any(|p| matches!(p, multiaddr::Protocol::P2p(_))) { - a - } else { - a.with(multiaddr::Protocol::P2p(peer_id)) - } - }) + // The address must include the peer id. + let peer_id: Multihash = peer_id.into(); + addresses.into_iter().map(move |a| a.with(multiaddr::Protocol::P2p(peer_id))) } /// Publish own public addresses. diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index cbf74440dc1a..c6f17647166c 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -29,7 +29,7 @@ futures = "0.3.21" futures-timer = "3.0.2" ip_network = "0.4.1" libp2p = { version = "0.51.4", features = ["dns", "identify", "kad", "macros", "mdns", "noise", "ping", "request-response", "tcp", "tokio", "websocket", "yamux"] } -linked_hash_set = "0.1.3" +linked_hash_set = "0.1.4" log = { workspace = true, default-features = true } mockall = "0.11.3" parking_lot = "0.12.1" diff --git a/substrate/client/network/src/discovery.rs b/substrate/client/network/src/discovery.rs index 77c26266aac4..4e2121c5540d 100644 --- a/substrate/client/network/src/discovery.rs +++ b/substrate/client/network/src/discovery.rs @@ -72,6 +72,7 @@ use libp2p::{ }, PeerId, }; +use linked_hash_set::LinkedHashSet; use log::{debug, info, trace, warn}; use sp_core::hexdisplay::HexDisplay; use std::{ @@ -550,14 +551,20 @@ impl NetworkBehaviour for DiscoveryBehaviour { ) -> Result, ConnectionDenied> { let Some(peer_id) = maybe_peer else { return Ok(Vec::new()) }; - let mut list = self + // Collect addresses into [`LinkedHashSet`] to eliminate duplicate entries preserving the + // order of addresses. Give priority to `permanent_addresses` (used with reserved nodes) and + // `ephemeral_addresses` (used for addresses discovered from other sources, like authority + // discovery DHT records). + let mut list: LinkedHashSet<_> = self .permanent_addresses .iter() .filter_map(|(p, a)| (*p == peer_id).then_some(a.clone())) - .collect::>(); + .collect(); if let Some(ephemeral_addresses) = self.ephemeral_addresses.get(&peer_id) { - list.extend(ephemeral_addresses.clone()); + ephemeral_addresses.iter().for_each(|address| { + list.insert_if_absent(address.clone()); + }); } { @@ -583,12 +590,14 @@ impl NetworkBehaviour for DiscoveryBehaviour { }); } - list.extend(list_to_filter); + list_to_filter.into_iter().for_each(|address| { + list.insert_if_absent(address); + }); } trace!(target: "sub-libp2p", "Addresses of {:?}: {:?}", peer_id, list); - Ok(list) + Ok(list.into_iter().collect()) } fn on_swarm_event(&mut self, event: FromSwarm) { From 2f59e9efa8142d02ee4893a1383debd3b6209019 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Fri, 22 Mar 2024 19:45:26 +0100 Subject: [PATCH 14/17] XCM remove extra QueryId types from traits (#3763) We do not need to make these traits generic over QueryId type, we can just use the QueryId alias everywhere --- polkadot/xcm/pallet-xcm/src/lib.rs | 11 +++---- polkadot/xcm/xcm-builder/src/controller.rs | 4 +-- polkadot/xcm/xcm-builder/src/pay.rs | 2 +- polkadot/xcm/xcm-builder/src/tests/mock.rs | 11 +++---- .../xcm-executor/src/traits/on_response.rs | 31 ++++++------------- 5 files changed, 22 insertions(+), 37 deletions(-) diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 1a1f8b402a39..0761b375dfbf 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -374,7 +374,7 @@ pub mod pallet { origin: OriginFor, timeout: BlockNumberFor, match_querier: VersionedLocation, - ) -> Result { + ) -> Result { let responder = ::ExecuteXcmOrigin::ensure_origin(origin)?; let query_id = ::new_query( responder, @@ -1478,7 +1478,6 @@ impl sp_std::fmt::Debug for FeesHandling { } impl QueryHandler for Pallet { - type QueryId = u64; type BlockNumber = BlockNumberFor; type Error = XcmError; type UniversalLocation = T::UniversalLocation; @@ -1488,7 +1487,7 @@ impl QueryHandler for Pallet { responder: impl Into, timeout: BlockNumberFor, match_querier: impl Into, - ) -> Self::QueryId { + ) -> QueryId { Self::do_new_query(responder, None, timeout, match_querier) } @@ -1498,7 +1497,7 @@ impl QueryHandler for Pallet { message: &mut Xcm<()>, responder: impl Into, timeout: Self::BlockNumber, - ) -> Result { + ) -> Result { let responder = responder.into(); let destination = Self::UniversalLocation::get() .invert_target(&responder) @@ -1511,7 +1510,7 @@ impl QueryHandler for Pallet { } /// Removes response when ready and emits [Event::ResponseTaken] event. - fn take_response(query_id: Self::QueryId) -> QueryResponseStatus { + fn take_response(query_id: QueryId) -> QueryResponseStatus { match Queries::::get(query_id) { Some(QueryStatus::Ready { response, at }) => match response.try_into() { Ok(response) => { @@ -1528,7 +1527,7 @@ impl QueryHandler for Pallet { } #[cfg(feature = "runtime-benchmarks")] - fn expect_response(id: Self::QueryId, response: Response) { + fn expect_response(id: QueryId, response: Response) { let response = response.into(); Queries::::insert( id, diff --git a/polkadot/xcm/xcm-builder/src/controller.rs b/polkadot/xcm/xcm-builder/src/controller.rs index ba2b1fb44b8e..04b19eaa5870 100644 --- a/polkadot/xcm/xcm-builder/src/controller.rs +++ b/polkadot/xcm/xcm-builder/src/controller.rs @@ -132,7 +132,7 @@ pub trait QueryController: QueryHandler { origin: Origin, timeout: Timeout, match_querier: VersionedLocation, - ) -> Result; + ) -> Result; } impl ExecuteController for () { @@ -186,7 +186,7 @@ impl QueryController for () { _origin: Origin, _timeout: Timeout, _match_querier: VersionedLocation, - ) -> Result { + ) -> Result { Ok(Default::default()) } } diff --git a/polkadot/xcm/xcm-builder/src/pay.rs b/polkadot/xcm/xcm-builder/src/pay.rs index 6b466483cfad..35b624b04153 100644 --- a/polkadot/xcm/xcm-builder/src/pay.rs +++ b/polkadot/xcm/xcm-builder/src/pay.rs @@ -88,7 +88,7 @@ impl< type Beneficiary = Beneficiary; type AssetKind = AssetKind; type Balance = u128; - type Id = Querier::QueryId; + type Id = QueryId; type Error = xcm::latest::Error; fn pay( diff --git a/polkadot/xcm/xcm-builder/src/tests/mock.rs b/polkadot/xcm/xcm-builder/src/tests/mock.rs index 4bf347ea7713..3d03ab054248 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mock.rs @@ -38,7 +38,7 @@ pub use sp_std::{ collections::{btree_map::BTreeMap, btree_set::BTreeSet}, fmt::Debug, }; -pub use xcm::latest::{prelude::*, Weight}; +pub use xcm::latest::{prelude::*, QueryId, Weight}; use xcm_executor::traits::{Properties, QueryHandler, QueryResponseStatus}; pub use xcm_executor::{ traits::{ @@ -414,7 +414,6 @@ pub struct TestQueryHandler(core::marker::PhantomData<(T, BlockN impl QueryHandler for TestQueryHandler { - type QueryId = u64; type BlockNumber = BlockNumber; type Error = XcmError; type UniversalLocation = T::UniversalLocation; @@ -423,7 +422,7 @@ impl QueryHandler responder: impl Into, _timeout: Self::BlockNumber, _match_querier: impl Into, - ) -> Self::QueryId { + ) -> QueryId { let query_id = 1; expect_response(query_id, responder.into()); query_id @@ -433,7 +432,7 @@ impl QueryHandler message: &mut Xcm<()>, responder: impl Into, timeout: Self::BlockNumber, - ) -> Result { + ) -> Result { let responder = responder.into(); let destination = Self::UniversalLocation::get() .invert_target(&responder) @@ -445,7 +444,7 @@ impl QueryHandler Ok(query_id) } - fn take_response(query_id: Self::QueryId) -> QueryResponseStatus { + fn take_response(query_id: QueryId) -> QueryResponseStatus { QUERIES .with(|q| { q.borrow().get(&query_id).and_then(|v| match v { @@ -460,7 +459,7 @@ impl QueryHandler } #[cfg(feature = "runtime-benchmarks")] - fn expect_response(_id: Self::QueryId, _response: xcm::latest::Response) { + fn expect_response(_id: QueryId, _response: xcm::latest::Response) { // Unnecessary since it's only a test implementation } } diff --git a/polkadot/xcm/xcm-executor/src/traits/on_response.rs b/polkadot/xcm/xcm-executor/src/traits/on_response.rs index 952bd2d0040a..1049bacdca5f 100644 --- a/polkadot/xcm/xcm-executor/src/traits/on_response.rs +++ b/polkadot/xcm/xcm-executor/src/traits/on_response.rs @@ -16,11 +16,8 @@ use crate::{Junctions::Here, Xcm}; use core::result; -use frame_support::{ - pallet_prelude::{Get, TypeInfo}, - parameter_types, -}; -use parity_scale_codec::{Decode, Encode, FullCodec, MaxEncodedLen}; +use frame_support::{pallet_prelude::Get, parameter_types}; +use parity_scale_codec::{Decode, Encode}; use sp_arithmetic::traits::Zero; use sp_std::fmt::Debug; use xcm::latest::{ @@ -115,15 +112,6 @@ pub enum QueryResponseStatus { /// Provides methods to expect responses from XCMs and query their status. pub trait QueryHandler { - type QueryId: From - + FullCodec - + MaxEncodedLen - + TypeInfo - + Clone - + Eq - + PartialEq - + Debug - + Copy; type BlockNumber: Zero + Encode; type Error; type UniversalLocation: Get; @@ -151,14 +139,14 @@ pub trait QueryHandler { message: &mut Xcm<()>, responder: impl Into, timeout: Self::BlockNumber, - ) -> result::Result; + ) -> result::Result; /// Attempt to remove and return the response of query with ID `query_id`. - fn take_response(id: Self::QueryId) -> QueryResponseStatus; + fn take_response(id: QueryId) -> QueryResponseStatus; /// Makes sure to expect a response with the given id. #[cfg(feature = "runtime-benchmarks")] - fn expect_response(id: Self::QueryId, response: Response); + fn expect_response(id: QueryId, response: Response); } parameter_types! { @@ -168,17 +156,16 @@ parameter_types! { impl QueryHandler for () { type BlockNumber = u64; type Error = (); - type QueryId = u64; type UniversalLocation = UniversalLocation; - fn take_response(_query_id: Self::QueryId) -> QueryResponseStatus { + fn take_response(_query_id: QueryId) -> QueryResponseStatus { QueryResponseStatus::NotFound } fn new_query( _responder: impl Into, _timeout: Self::BlockNumber, _match_querier: impl Into, - ) -> Self::QueryId { + ) -> QueryId { 0u64 } @@ -186,10 +173,10 @@ impl QueryHandler for () { _message: &mut Xcm<()>, _responder: impl Into, _timeout: Self::BlockNumber, - ) -> Result { + ) -> Result { Err(()) } #[cfg(feature = "runtime-benchmarks")] - fn expect_response(_id: Self::QueryId, _response: crate::Response) {} + fn expect_response(_id: QueryId, _response: crate::Response) {} } From 9a04ebbfb0b77f2dcdc46db6a1a2b455881edd03 Mon Sep 17 00:00:00 2001 From: girazoki Date: Fri, 22 Mar 2024 19:48:15 +0100 Subject: [PATCH 15/17] [pallet-xcm] fix transport fees for remote reserve transfers (#3792) Currently `transfer_assets` from pallet-xcm covers 4 main different transfer types: - `localReserve` - `DestinationReserve` - `Teleport` - `RemoteReserve` For the first three, the local execution and the remote message sending are separated, and fees are deducted in pallet-xcm itself: https://github.com/paritytech/polkadot-sdk/blob/3410dfb3929462da88be2da813f121d8b1cf46b3/polkadot/xcm/pallet-xcm/src/lib.rs#L1758. For the 4th case `RemoteReserve`, pallet-xcm is still relying on the xcm-executor itself to send the message (through the `initiateReserveWithdraw` instruction). In this case, if delivery fees need to be charged, it is not possible to do so because the `jit_withdraw` mode has not being set. This PR proposes to still use the `initiateReserveWithdraw` but prepending a `setFeesMode { jit_withdraw: true }` to make sure delivery fees can be paid. A test-case is also added to present the aforementioned case --------- Co-authored-by: Adrian Catangiu --- .../emulated/common/src/macros.rs | 2 +- .../tests/assets/asset-hub-rococo/src/lib.rs | 8 +- .../src/tests/reserve_transfer.rs | 73 ++------ .../asset-hub-rococo/src/tests/teleport.rs | 14 +- .../tests/assets/asset-hub-westend/src/lib.rs | 8 +- .../src/tests/reserve_transfer.rs | 71 ++------ .../asset-hub-westend/src/tests/teleport.rs | 14 +- .../people-rococo/src/tests/teleport.rs | 6 +- .../people-westend/src/tests/teleport.rs | 6 +- .../assets/test-utils/src/test_cases.rs | 4 +- .../assets/test-utils/src/xcm_helpers.rs | 5 +- .../runtimes/testing/penpal/src/lib.rs | 19 +- .../runtimes/testing/penpal/src/xcm_config.rs | 19 +- polkadot/xcm/pallet-xcm/src/lib.rs | 1 + polkadot/xcm/pallet-xcm/src/mock.rs | 15 +- .../pallet-xcm/src/tests/assets_transfer.rs | 171 ++++++++++++++++++ prdoc/pr_3792.prdoc | 19 ++ 17 files changed, 291 insertions(+), 164 deletions(-) create mode 100644 prdoc/pr_3792.prdoc diff --git a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs index 6951de6faa72..6f6bbe41e01b 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs @@ -115,7 +115,7 @@ macro_rules! test_parachain_is_trusted_teleporter { let para_receiver_balance_after = <$receiver_para as $crate::macros::Chain>::account_data_of(receiver.clone()).free; let delivery_fees = <$sender_para>::execute_with(|| { - $crate::macros::asset_test_utils::xcm_helpers::transfer_assets_delivery_fees::< + $crate::macros::asset_test_utils::xcm_helpers::teleport_assets_delivery_fees::< <$sender_xcm_config as xcm_executor::Config>::XcmSender, >($assets.clone(), fee_asset_item, weight_limit.clone(), beneficiary, para_destination) }); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs index 21d858f1fe51..a5a4914e21d8 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs @@ -63,17 +63,13 @@ mod imports { // Runtimes pub use asset_hub_rococo_runtime::xcm_config::{ - TokenLocation as RelayLocation, UniversalLocation as AssetHubRococoUniversalLocation, - XcmConfig as AssetHubRococoXcmConfig, + TokenLocation as RelayLocation, XcmConfig as AssetHubRococoXcmConfig, }; pub use penpal_runtime::xcm_config::{ LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, - UniversalLocation as PenpalUniversalLocation, XcmConfig as PenpalRococoXcmConfig, - }; - pub use rococo_runtime::xcm_config::{ - UniversalLocation as RococoUniversalLocation, XcmConfig as RococoXcmConfig, }; + pub use rococo_runtime::xcm_config::XcmConfig as RococoXcmConfig; pub const ASSET_ID: u32 = 3; pub const ASSET_MIN_BALANCE: u128 = 1000; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index 705c9613b647..0a5956dedfdf 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -524,7 +524,6 @@ fn reserve_transfer_native_asset_from_relay_to_para() { let destination = Rococo::child_location_of(PenpalA::para_id()); let sender = RococoSender::get(); let amount_to_send: Balance = ROCOCO_ED * 1000; - let assets: Assets = (Here, amount_to_send).into(); // Init values fot Parachain let relay_native_asset_location = @@ -552,15 +551,6 @@ fn reserve_transfer_native_asset_from_relay_to_para() { test.set_dispatchable::(relay_to_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = Rococo::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &RococoUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_balance_after = test.sender.balance; let receiver_assets_after = PenpalA::execute_with(|| { @@ -568,8 +558,8 @@ fn reserve_transfer_native_asset_from_relay_to_para() { >::balance(relay_native_asset_location.into(), &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); // Receiver's asset balance is increased assert!(receiver_assets_after > receiver_assets_before); // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -595,7 +585,7 @@ fn reserve_transfer_native_asset_from_para_to_relay() { ::RuntimeOrigin::signed(asset_owner), relay_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // Init values for Relay @@ -634,15 +624,6 @@ fn reserve_transfer_native_asset_from_para_to_relay() { test.set_dispatchable::(para_to_relay_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &PenpalUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -650,8 +631,8 @@ fn reserve_transfer_native_asset_from_para_to_relay() { }); let receiver_balance_after = test.receiver.balance; - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's asset balance is increased assert!(receiver_balance_after > receiver_balance_before); // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -705,16 +686,6 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = AssetHubRococo::execute_with(|| { - let reanchored_assets = assets - .reanchored(&destination, &AssetHubRococoUniversalLocation::get()) - .unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_balance_after = test.sender.balance; let receiver_assets_after = PenpalA::execute_with(|| { @@ -722,8 +693,8 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { >::balance(system_para_native_asset_location, &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); // Receiver's assets is increased assert!(receiver_assets_after > receiver_assets_before); // Receiver's assets increased by `amount_to_send - delivery_fees - bought_execution`; @@ -738,7 +709,7 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { // Init values for Parachain let destination = PenpalA::sibling_location_of(AssetHubRococo::para_id()); let sender = PenpalASender::get(); - let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; + let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 10000; let assets: Assets = (Parent, amount_to_send).into(); let system_para_native_asset_location = v3::Location::try_from(RelayLocation::get()).expect("conversion works"); @@ -749,7 +720,7 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { ::RuntimeOrigin::signed(asset_owner), system_para_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // Init values for System Parachain @@ -788,15 +759,6 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { test.set_dispatchable::(para_to_system_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &PenpalUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -804,8 +766,8 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { }); let receiver_balance_after = test.receiver.balance; - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's balance is increased assert!(receiver_balance_after > receiver_balance_before); // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -1084,7 +1046,7 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { ::RuntimeOrigin::signed(asset_owner), relay_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // fund the Parachain Origin's SA on Relay Chain with the native tokens held in reserve @@ -1118,13 +1080,6 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { test.set_dispatchable::(para_to_para_through_relay_limited_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -1135,8 +1090,8 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { >::balance(relay_native_asset_location, &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's balance is increased assert!(receiver_assets_after > receiver_assets_before); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs index 0cc5ddb9f64d..4432999aa955 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs @@ -322,7 +322,7 @@ fn limited_teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Rococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -369,7 +369,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -410,7 +410,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -445,7 +445,7 @@ fn teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Rococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -492,7 +492,7 @@ fn teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -530,7 +530,7 @@ fn teleport_native_assets_from_system_para_to_relay_fails() { test.assert(); let delivery_fees = AssetHubRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -593,7 +593,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { ::RuntimeOrigin::signed(asset_owner.clone()), system_para_native_asset_location, sender.clone(), - fee_amount_to_send, + fee_amount_to_send * 2, ); // No need to create the asset (only mint) as it exists in genesis. PenpalA::mint_asset( diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index 3f899d1dbdbc..c9f5fe0647e1 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -67,17 +67,13 @@ mod imports { // Runtimes pub use asset_hub_westend_runtime::xcm_config::{ - UniversalLocation as AssetHubWestendUniversalLocation, WestendLocation as RelayLocation, - XcmConfig as AssetHubWestendXcmConfig, + WestendLocation as RelayLocation, XcmConfig as AssetHubWestendXcmConfig, }; pub use penpal_runtime::xcm_config::{ LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, - UniversalLocation as PenpalUniversalLocation, XcmConfig as PenpalWestendXcmConfig, - }; - pub use westend_runtime::xcm_config::{ - UniversalLocation as WestendUniversalLocation, XcmConfig as WestendXcmConfig, }; + pub use westend_runtime::xcm_config::XcmConfig as WestendXcmConfig; pub const ASSET_ID: u32 = 3; pub const ASSET_MIN_BALANCE: u128 = 1000; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index 8c836132b546..64ad15ca3121 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -524,7 +524,6 @@ fn reserve_transfer_native_asset_from_relay_to_para() { let destination = Westend::child_location_of(PenpalA::para_id()); let sender = WestendSender::get(); let amount_to_send: Balance = WESTEND_ED * 1000; - let assets: Assets = (Here, amount_to_send).into(); // Init values fot Parachain let relay_native_asset_location = @@ -552,15 +551,6 @@ fn reserve_transfer_native_asset_from_relay_to_para() { test.set_dispatchable::(relay_to_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = Westend::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &WestendUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_balance_after = test.sender.balance; let receiver_assets_after = PenpalA::execute_with(|| { @@ -568,8 +558,8 @@ fn reserve_transfer_native_asset_from_relay_to_para() { >::balance(relay_native_asset_location.into(), &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); // Receiver's asset balance is increased assert!(receiver_assets_after > receiver_assets_before); // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -595,7 +585,7 @@ fn reserve_transfer_native_asset_from_para_to_relay() { ::RuntimeOrigin::signed(asset_owner), relay_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // Init values for Relay @@ -634,15 +624,6 @@ fn reserve_transfer_native_asset_from_para_to_relay() { test.set_dispatchable::(para_to_relay_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &PenpalUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -650,8 +631,8 @@ fn reserve_transfer_native_asset_from_para_to_relay() { }); let receiver_balance_after = test.receiver.balance; - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's asset balance is increased assert!(receiver_balance_after > receiver_balance_before); // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -705,16 +686,6 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = AssetHubWestend::execute_with(|| { - let reanchored_assets = assets - .reanchored(&destination, &AssetHubWestendUniversalLocation::get()) - .unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_balance_after = test.sender.balance; let receiver_assets_after = PenpalA::execute_with(|| { @@ -722,8 +693,8 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { >::balance(system_para_native_asset_location, &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); // Receiver's assets is increased assert!(receiver_assets_after > receiver_assets_before); // Receiver's assets increased by `amount_to_send - delivery_fees - bought_execution`; @@ -749,7 +720,7 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { ::RuntimeOrigin::signed(asset_owner), system_para_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // Init values for System Parachain @@ -789,15 +760,6 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { test.set_dispatchable::(para_to_system_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &PenpalUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -805,8 +767,8 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { }); let receiver_balance_after = test.receiver.balance; - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's balance is increased assert!(receiver_balance_after > receiver_balance_before); // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -1086,7 +1048,7 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { ::RuntimeOrigin::signed(asset_owner), relay_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // fund the Parachain Origin's SA on Relay Chain with the native tokens held in reserve @@ -1120,13 +1082,6 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { test.set_dispatchable::(para_to_para_through_relay_limited_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -1137,8 +1092,8 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { >::balance(relay_native_asset_location, &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's balance is increased assert!(receiver_assets_after > receiver_assets_before); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs index 61f547fe7c5e..aba05ea4322c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs @@ -322,7 +322,7 @@ fn limited_teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Westend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -369,7 +369,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -410,7 +410,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -445,7 +445,7 @@ fn teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Westend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -492,7 +492,7 @@ fn teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -530,7 +530,7 @@ fn teleport_native_assets_from_system_para_to_relay_fails() { test.assert(); let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -593,7 +593,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { ::RuntimeOrigin::signed(asset_owner.clone()), system_para_native_asset_location, sender.clone(), - fee_amount_to_send, + fee_amount_to_send * 2, ); // No need to create the asset (only mint) as it exists in genesis. PenpalA::mint_asset( diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs index 3abe5c6cf66a..350d87d638ab 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs @@ -155,7 +155,7 @@ fn limited_teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Rococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -204,7 +204,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = PeopleRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -248,7 +248,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let receiver_balance_after = test.receiver.balance; let delivery_fees = PeopleRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs index eef35a99a833..8697477ba769 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs @@ -155,7 +155,7 @@ fn limited_teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Westend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -204,7 +204,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = PeopleWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -248,7 +248,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let receiver_balance_after = test.receiver.balance; let delivery_fees = PeopleWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index 53e10956bd0d..2f2624d8e523 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -186,7 +186,7 @@ pub fn teleports_for_native_asset_works< // Mint funds into account to ensure it has enough balance to pay delivery fees let delivery_fees = - xcm_helpers::transfer_assets_delivery_fees::( + xcm_helpers::teleport_assets_delivery_fees::( (native_asset_id.clone(), native_asset_to_teleport_away.into()).into(), 0, Unlimited, @@ -579,7 +579,7 @@ pub fn teleports_for_foreign_assets_works< // Make sure the target account has enough native asset to pay for delivery fees let delivery_fees = - xcm_helpers::transfer_assets_delivery_fees::( + xcm_helpers::teleport_assets_delivery_fees::( (foreign_asset_id_location_latest.clone(), asset_to_teleport_away).into(), 0, Unlimited, diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/xcm_helpers.rs b/cumulus/parachains/runtimes/assets/test-utils/src/xcm_helpers.rs index f509a3a8acaa..ca0e81fae42e 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/xcm_helpers.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/xcm_helpers.rs @@ -18,11 +18,10 @@ use xcm::latest::prelude::*; -/// Returns the delivery fees amount for pallet xcm's `teleport_assets` and -/// `reserve_transfer_assets` extrinsics. +/// Returns the delivery fees amount for pallet xcm's `teleport_assets` extrinsics. /// Because it returns only a `u128`, it assumes delivery fees are only paid /// in one asset and that asset is known. -pub fn transfer_assets_delivery_fees( +pub fn teleport_assets_delivery_fees( assets: Assets, fee_asset_item: u32, weight_limit: WeightLimit, diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 1e6d485d148d..1d404feac3db 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -57,7 +57,6 @@ use parachains_common::{ impls::{AssetsToBlockAuthor, NonZeroIssuance}, message_queue::{NarrowOriginToSibling, ParaIdToSibling}, }; -use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use smallvec::smallvec; use sp_api::impl_runtime_apis; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; @@ -85,7 +84,7 @@ use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; // XCM Imports use parachains_common::{AccountId, Signature}; -use xcm::latest::prelude::BodyId; +use xcm::latest::prelude::{AssetId as AssetLocationId, BodyId}; /// Balance of an account. pub type Balance = u128; @@ -545,6 +544,20 @@ impl pallet_message_queue::Config for Runtime { impl cumulus_pallet_aura_ext::Config for Runtime {} +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetLocationId = AssetLocationId(xcm_config::RelayLocation::get()); + /// The base fee for the message delivery fees (3 CENTS). + pub const BaseDeliveryFee: u128 = (1_000_000_000_000u128 / 100).saturating_mul(3); +} + +pub type PriceForSiblingParachainDelivery = polkadot_runtime_common::xcm_sender::ExponentialPrice< + FeeAssetId, + BaseDeliveryFee, + TransactionByteFee, + XcmpQueue, +>; + impl cumulus_pallet_xcmp_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ChannelInfo = ParachainSystem; @@ -555,7 +568,7 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = (); - type PriceForSiblingDelivery = NoPriceForMessageDelivery; + type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } parameter_types! { diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index d83a877c2f89..639bfd95834b 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -28,6 +28,7 @@ use super::{ ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, WeightToFee, XcmpQueue, }; +use crate::{BaseDeliveryFee, FeeAssetId, TransactionByteFee}; use core::marker::PhantomData; use frame_support::{ parameter_types, @@ -36,10 +37,10 @@ use frame_support::{ }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; -use parachains_common::xcm_config::AssetFeeAsExistentialDepositMultiplier; +use parachains_common::{xcm_config::AssetFeeAsExistentialDepositMultiplier, TREASURY_PALLET_ID}; use polkadot_parachain_primitives::primitives::Sibling; -use polkadot_runtime_common::impls::ToAuthor; -use sp_runtime::traits::ConvertInto; +use polkadot_runtime_common::{impls::ToAuthor, xcm_sender::ExponentialPrice}; +use sp_runtime::traits::{AccountIdConversion, ConvertInto}; use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, @@ -49,6 +50,7 @@ use xcm_builder::{ SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, StartsWith, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::{traits::JustTry, XcmExecutor}; @@ -59,6 +61,7 @@ parameter_types! { pub const RelayNetwork: Option = None; pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = [Parachain(ParachainInfo::parachain_id().into())].into(); + pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); } /// Type for specifying how a `Location` can be converted into an `AccountId`. This is used @@ -331,7 +334,10 @@ impl xcm_executor::Config for XcmConfig { type MaxAssetsIntoHolding = MaxAssetsIntoHolding; type AssetLocker = (); type AssetExchanger = (); - type FeeManager = (); + type FeeManager = XcmFeeManagerFromComponents< + (), + XcmFeeToAccount, + >; type MessageExporter = (); type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; @@ -355,11 +361,14 @@ pub type ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger = /// No local origins on this chain are allowed to dispatch XCM sends/executions. pub type LocalOriginToLocation = SignedToAccountId32; +pub type PriceForParentDelivery = + ExponentialPrice; + /// The means for routing XCM messages which are not for local execution into the right message /// queues. pub type XcmRouter = WithUniqueTopic<( // Two routers - use UMP to communicate with the relay chain: - cumulus_primitives_utility::ParentAsUmp, + cumulus_primitives_utility::ParentAsUmp, // ..and XCMP to communicate with the sibling chains. XcmpQueue, )>; diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 0761b375dfbf..8a9e5288f2e1 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -1990,6 +1990,7 @@ impl Pallet { ]); Ok(Xcm(vec![ WithdrawAsset(assets.into()), + SetFeesMode { jit_withdraw: true }, InitiateReserveWithdraw { assets: Wild(AllCounted(max_assets)), reserve, diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index b29562fc833b..2cc228476ba8 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -361,6 +361,10 @@ parameter_types! { 0, [Parachain(FOREIGN_ASSET_RESERVE_PARA_ID)] ); + pub PaidParaForeignReserveLocation: Location = Location::new( + 0, + [Parachain(Para3000::get())] + ); pub ForeignAsset: Asset = Asset { fun: Fungible(10), id: AssetId(Location::new( @@ -368,6 +372,13 @@ parameter_types! { [Parachain(FOREIGN_ASSET_RESERVE_PARA_ID), FOREIGN_ASSET_INNER_JUNCTION], )), }; + pub PaidParaForeignAsset: Asset = Asset { + fun: Fungible(10), + id: AssetId(Location::new( + 0, + [Parachain(Para3000::get())], + )), + }; pub UsdcReserveLocation: Location = Location::new( 0, [Parachain(USDC_RESERVE_PARA_ID)] @@ -450,6 +461,8 @@ parameter_types! { pub TrustedFilteredTeleport: (AssetFilter, Location) = (FilteredTeleportAsset::get().into(), FilteredTeleportLocation::get()); pub TeleportUsdtToForeign: (AssetFilter, Location) = (Usdt::get().into(), ForeignReserveLocation::get()); pub TrustedForeign: (AssetFilter, Location) = (ForeignAsset::get().into(), ForeignReserveLocation::get()); + pub TrustedPaidParaForeign: (AssetFilter, Location) = (PaidParaForeignAsset::get().into(), PaidParaForeignReserveLocation::get()); + pub TrustedUsdc: (AssetFilter, Location) = (Usdc::get().into(), UsdcReserveLocation::get()); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 64; @@ -483,7 +496,7 @@ impl xcm_executor::Config for XcmConfig { type XcmSender = XcmRouter; type AssetTransactor = AssetTransactors; type OriginConverter = LocalOriginConverter; - type IsReserve = (Case, Case); + type IsReserve = (Case, Case, Case); type IsTeleporter = ( Case, Case, diff --git a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs index 27be5cce1458..7752d1355cda 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs @@ -2604,3 +2604,174 @@ fn teleport_assets_using_destination_reserve_fee_disallowed() { expected_result, ); } + +/// Test `tested_call` transferring single asset using remote reserve. +/// +/// Transferring Para3000 asset (`Para3000` reserve) to +/// `OTHER_PARA_ID` (no teleport trust), therefore triggering remote reserve. +/// Using the same asset asset (Para3000 reserve) for fees. +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the correct message is sent and event is emitted. +/// +/// Verifies that XCM router fees (`SendXcm::validate` -> `Assets`) are withdrawn from correct +/// user account and deposited to a correct target account (`XcmFeesTargetAccount`). +/// Verifies `expected_result`. +fn remote_asset_reserve_and_remote_fee_reserve_paid_call( + tested_call: Call, + expected_result: DispatchResult, +) where + Call: FnOnce( + OriginFor, + Box, + Box, + Box, + u32, + WeightLimit, + ) -> DispatchResult, +{ + let weight = BaseXcmWeight::get() * 3; + let user_account = AccountId::from(XCM_FEES_NOT_WAIVED_USER_ACCOUNT); + let xcm_router_fee_amount = Para3000PaymentAmount::get(); + let paid_para_id = Para3000::get(); + let balances = vec![ + (user_account.clone(), INITIAL_BALANCE), + (ParaId::from(paid_para_id).into_account_truncating(), INITIAL_BALANCE), + (XcmFeesTargetAccount::get(), INITIAL_BALANCE), + ]; + let beneficiary: Location = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset BLA + let foreign_initial_amount = 142; + let (reserve_location, _, foreign_asset_id_location) = set_up_foreign_asset( + paid_para_id, + None, + user_account.clone(), + foreign_initial_amount, + true, + ); + + // transfer destination is another chain that is not the reserve location + // the goal is to trigger the remoteReserve case + let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); + + let transferred_asset: Assets = (foreign_asset_id_location.clone(), SEND_AMOUNT).into(); + + // balances checks before + assert_eq!( + AssetsPallet::balance(foreign_asset_id_location.clone(), user_account.clone()), + foreign_initial_amount + ); + assert_eq!(Balances::free_balance(user_account.clone()), INITIAL_BALANCE); + + // do the transfer + let result = tested_call( + RuntimeOrigin::signed(user_account.clone()), + Box::new(dest.clone().into()), + Box::new(beneficiary.clone().into()), + Box::new(transferred_asset.into()), + 0 as u32, + Unlimited, + ); + assert_eq!(result, expected_result); + if expected_result.is_err() { + // short-circuit here for tests where we expect failure + return; + } + + let mut last_events = last_events(7).into_iter(); + // asset events + // forceCreate + last_events.next().unwrap(); + // mint tokens + last_events.next().unwrap(); + // burn tokens + last_events.next().unwrap(); + // balance events + // burn delivery fee + last_events.next().unwrap(); + // mint delivery fee + last_events.next().unwrap(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { + outcome: Outcome::Complete { used: weight } + }) + ); + + // user account spent (transferred) amount + assert_eq!( + AssetsPallet::balance(foreign_asset_id_location.clone(), user_account.clone()), + foreign_initial_amount - SEND_AMOUNT + ); + + // user account spent delivery fees + assert_eq!(Balances::free_balance(user_account), INITIAL_BALANCE - xcm_router_fee_amount); + + // XcmFeesTargetAccount where should lend xcm_router_fee_amount + assert_eq!( + Balances::free_balance(XcmFeesTargetAccount::get()), + INITIAL_BALANCE + xcm_router_fee_amount + ); + + // Verify total and active issuance of foreign BLA have decreased (burned on + // reserve-withdraw) + let expected_issuance = foreign_initial_amount - SEND_AMOUNT; + assert_eq!( + AssetsPallet::total_issuance(foreign_asset_id_location.clone()), + expected_issuance + ); + assert_eq!( + AssetsPallet::active_issuance(foreign_asset_id_location.clone()), + expected_issuance + ); + + let context = UniversalLocation::get(); + let foreign_id_location_reanchored = + foreign_asset_id_location.reanchored(&dest, &context).unwrap(); + let dest_reanchored = dest.reanchored(&reserve_location, &context).unwrap(); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + reserve_location, + // `assets` are burned on source and withdrawn from SA in remote reserve chain + Xcm(vec![ + WithdrawAsset((Location::here(), SEND_AMOUNT).into()), + ClearOrigin, + buy_execution((Location::here(), SEND_AMOUNT / 2)), + DepositReserveAsset { + assets: Wild(AllCounted(1)), + // final destination is `dest` as seen by `reserve` + dest: dest_reanchored, + // message sent onward to `dest` + xcm: Xcm(vec![ + buy_execution((foreign_id_location_reanchored, SEND_AMOUNT / 2)), + DepositAsset { assets: AllCounted(1).into(), beneficiary } + ]) + } + ]) + )] + ); + }); +} +/// Test `transfer_assets` with remote asset reserve and remote fee reserve. +#[test] +fn transfer_assets_with_remote_asset_reserve_and_remote_asset_fee_reserve_paid_works() { + let expected_result = Ok(()); + remote_asset_reserve_and_remote_fee_reserve_paid_call( + XcmPallet::transfer_assets, + expected_result, + ); +} +/// Test `limited_reserve_transfer_assets` with remote asset reserve and remote fee reserve. +#[test] +fn limited_reserve_transfer_assets_with_remote_asset_reserve_and_remote_asset_fee_reserve_paid_works( +) { + let expected_result = Ok(()); + remote_asset_reserve_and_remote_fee_reserve_paid_call( + XcmPallet::limited_reserve_transfer_assets, + expected_result, + ); +} diff --git a/prdoc/pr_3792.prdoc b/prdoc/pr_3792.prdoc new file mode 100644 index 000000000000..cbcdc29a9c6e --- /dev/null +++ b/prdoc/pr_3792.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[pallet-xcm] fix transport fees for remote reserve transfers" + +doc: + - audience: Runtime Dev + description: | + This PR fixes `pallet_xcm::transfer_assets` and + `pallet_xcm::limited_reserve_transfer_assets` extrinsics for transfers + that need to go through remote reserves. The fix is adding a + `SetFeesMode { jit_withdraw: true }` instruction before local execution of + `InitiateReserveWithdraw` so that delivery fees are correctly charged by + the xcm-executor. Without this change, a runtime that has implemented + delivery fees would not be able to execute remote reserve transfers using + these extrinsics. + +crates: + - name: pallet-xcm From 19490abae08570ce87c3cae57542e928aa9a149d Mon Sep 17 00:00:00 2001 From: eskimor Date: Sat, 23 Mar 2024 06:46:15 +0100 Subject: [PATCH 16/17] Fix xcm config for coretime. (#3768) Fixes https://github.com/paritytech/polkadot-sdk/issues/3762 . --------- Co-authored-by: eskimor Co-authored-by: Adrian Catangiu --- polkadot/runtime/westend/src/xcm_config.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 400843c58356..96d2a124ff9a 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -41,10 +41,11 @@ use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, ChildParachainConvertsVia, DescribeAllTerminal, DescribeFamily, FrameTransactionalProcessor, - FungibleAdapter, HashedDescription, IsConcrete, MintLocation, OriginToPluralityVoice, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, - XcmFeeManagerFromComponents, XcmFeeToAccount, + FungibleAdapter, HashedDescription, IsChildSystemParachain, IsConcrete, MintLocation, + OriginToPluralityVoice, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -142,13 +143,12 @@ impl Contains for OnlyParachains { } } -pub struct CollectivesOrFellows; -impl Contains for CollectivesOrFellows { +pub struct Fellows; +impl Contains for Fellows { fn contains(location: &Location) -> bool { matches!( location.unpack(), - (0, [Parachain(COLLECTIVES_ID)]) | - (0, [Parachain(COLLECTIVES_ID), Plurality { id: BodyId::Technical, .. }]) + (0, [Parachain(COLLECTIVES_ID), Plurality { id: BodyId::Technical, .. }]) ) } } @@ -172,8 +172,8 @@ pub type Barrier = TrailingSetTopicAsId<( AllowTopLevelPaidExecutionFrom, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, - // Collectives and Fellows plurality get free execution. - AllowExplicitUnpaidExecutionFrom, + // Messages from system parachains or the Fellows plurality need not pay for execution. + AllowExplicitUnpaidExecutionFrom<(IsChildSystemParachain, Fellows)>, ), UniversalLocation, ConstU32<8>, From 463ccb8f9028ebf5f7fc53d7e835c21f43172253 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sun, 24 Mar 2024 15:21:18 +0100 Subject: [PATCH 17/17] pallet-aura: Expose `SlotDuration` as constant (#3732) --- substrate/frame/aura/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/substrate/frame/aura/src/lib.rs b/substrate/frame/aura/src/lib.rs index 997f51ba428f..3ca1444aaae9 100644 --- a/substrate/frame/aura/src/lib.rs +++ b/substrate/frame/aura/src/lib.rs @@ -114,6 +114,7 @@ pub mod pallet { /// The effective value of this type should not change while the chain is running. /// /// For backwards compatibility either use [`MinimumPeriodTimesTwo`] or a const. + #[pallet::constant] type SlotDuration: Get<::Moment>; }