From 71fca5e7d31a2b5e24d95c996435c6ed67dfb87e Mon Sep 17 00:00:00 2001 From: remz <> Date: Fri, 14 Oct 2022 13:54:27 +0200 Subject: [PATCH 1/3] + Ported consensus transition logic to the new substrate convention. Block 14555555 will be the switch. It should happen after the runtime upgrade. --- Cargo.lock | 221 +++++- node/cli/Cargo.toml | 5 +- node/cli/src/service.rs | 16 +- node/consensus-transition/aura/Cargo.toml | 49 ++ .../aura/src/import_queue.rs | 441 ++++++++++++ node/consensus-transition/aura/src/lib.rs | 630 ++++++++++++++++++ .../manual-seal/Cargo.toml | 53 ++ .../manual-seal/src/consensus.rs | 46 ++ .../manual-seal/src/consensus/aura.rs | 98 +++ .../manual-seal/src/consensus/babe.rs | 314 +++++++++ .../manual-seal/src/consensus/timestamp.rs | 164 +++++ .../manual-seal/src/error.rs | 113 ++++ .../manual-seal/src/finalize_block.rs | 59 ++ .../manual-seal/src/lib.rs | 601 +++++++++++++++++ .../manual-seal/src/rpc.rs | 170 +++++ .../manual-seal/src/seal_block.rs | 166 +++++ node/rpc/Cargo.toml | 4 +- 17 files changed, 3109 insertions(+), 41 deletions(-) create mode 100644 node/consensus-transition/aura/Cargo.toml create mode 100644 node/consensus-transition/aura/src/import_queue.rs create mode 100644 node/consensus-transition/aura/src/lib.rs create mode 100644 node/consensus-transition/manual-seal/Cargo.toml create mode 100644 node/consensus-transition/manual-seal/src/consensus.rs create mode 100644 node/consensus-transition/manual-seal/src/consensus/aura.rs create mode 100644 node/consensus-transition/manual-seal/src/consensus/babe.rs create mode 100644 node/consensus-transition/manual-seal/src/consensus/timestamp.rs create mode 100644 node/consensus-transition/manual-seal/src/error.rs create mode 100644 node/consensus-transition/manual-seal/src/finalize_block.rs create mode 100644 node/consensus-transition/manual-seal/src/lib.rs create mode 100644 node/consensus-transition/manual-seal/src/rpc.rs create mode 100644 node/consensus-transition/manual-seal/src/seal_block.rs diff --git a/Cargo.lock b/Cargo.lock index edae5080..ab94dfda 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -403,6 +403,20 @@ dependencies = [ "serde", ] +[[package]] +name = "beefy-primitives" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.19#174735ea1bb5fc4513519c45181d8df63d86f613" +dependencies = [ + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-application-crypto", + "sp-core", + "sp-runtime", + "sp-std", +] + [[package]] name = "bimap" version = "0.6.2" @@ -1539,7 +1553,7 @@ dependencies = [ "sc-client-api", "sc-client-db", "sc-consensus", - "sc-consensus-aura 0.10.0-dev (git+https://github.com/edgeware-network/edg-aura)", + "sc-consensus-aura", "sc-consensus-epochs", "sc-consensus-slots", "sc-finality-grandpa", @@ -1673,7 +1687,7 @@ dependencies = [ "pallet-transaction-payment-rpc", "pallet-transaction-payment-rpc-runtime-api", "sc-client-api", - "sc-consensus-aura 0.10.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.19)", + "sc-consensus-aura", "sc-consensus-epochs", "sc-consensus-manual-seal", "sc-finality-grandpa", @@ -5238,6 +5252,30 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-babe" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.19#174735ea1bb5fc4513519c45181d8df63d86f613" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-authorship", + "pallet-session", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-application-crypto", + "sp-consensus-babe", + "sp-consensus-vrf", + "sp-io", + "sp-runtime", + "sp-session", + "sp-staking", + "sp-std", +] + [[package]] name = "pallet-bags-list" version = "4.0.0-dev" @@ -7266,45 +7304,20 @@ dependencies = [ [[package]] name = "sc-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.19#174735ea1bb5fc4513519c45181d8df63d86f613" -dependencies = [ - "async-trait", - "futures 0.3.21", - "log", - "parity-scale-codec", - "sc-block-builder", - "sc-client-api", - "sc-consensus", - "sc-consensus-slots", - "sc-telemetry", - "sp-api", - "sp-application-crypto", - "sp-block-builder", - "sp-blockchain", - "sp-consensus", - "sp-consensus-aura", - "sp-consensus-slots", - "sp-core", - "sp-inherents", - "sp-keystore", - "sp-runtime", - "substrate-prometheus-endpoint", - "thiserror", -] - -[[package]] -name = "sc-consensus-aura" -version = "0.10.0-dev" -source = "git+https://github.com/edgeware-network/edg-aura#c92b1fab4ea61232841710f7ac275cdb963a51c2" dependencies = [ "async-trait", "futures 0.3.21", "log", "parity-scale-codec", + "parking_lot 0.12.1", "sc-block-builder", "sc-client-api", "sc-consensus", "sc-consensus-slots", + "sc-keystore", + "sc-network", + "sc-network-test", + "sc-service", "sc-telemetry", "sp-api", "sp-application-crypto", @@ -7315,9 +7328,14 @@ dependencies = [ "sp-consensus-slots", "sp-core", "sp-inherents", + "sp-keyring", "sp-keystore", "sp-runtime", + "sp-timestamp", + "sp-tracing", "substrate-prometheus-endpoint", + "substrate-test-runtime-client", + "tempfile", "thiserror", ] @@ -7380,7 +7398,6 @@ dependencies = [ [[package]] name = "sc-consensus-manual-seal" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.19#174735ea1bb5fc4513519c45181d8df63d86f613" dependencies = [ "assert_matches", "async-trait", @@ -7390,9 +7407,10 @@ dependencies = [ "jsonrpc-derive", "log", "parity-scale-codec", + "sc-basic-authorship", "sc-client-api", "sc-consensus", - "sc-consensus-aura 0.10.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.19)", + "sc-consensus-aura", "sc-consensus-babe", "sc-consensus-epochs", "sc-transaction-pool", @@ -7410,7 +7428,10 @@ dependencies = [ "sp-runtime", "sp-timestamp", "substrate-prometheus-endpoint", + "substrate-test-runtime-client", + "substrate-test-runtime-transaction-pool", "thiserror", + "tokio 1.19.2", ] [[package]] @@ -7678,6 +7699,34 @@ dependencies = [ "tracing", ] +[[package]] +name = "sc-network-test" +version = "0.8.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.19#174735ea1bb5fc4513519c45181d8df63d86f613" +dependencies = [ + "async-std", + "async-trait", + "futures 0.3.21", + "futures-timer", + "libp2p", + "log", + "parking_lot 0.12.1", + "rand 0.7.3", + "sc-block-builder", + "sc-client-api", + "sc-consensus", + "sc-network", + "sc-service", + "sp-blockchain", + "sp-consensus", + "sp-consensus-babe", + "sp-core", + "sp-runtime", + "sp-tracing", + "substrate-test-runtime", + "substrate-test-runtime-client", +] + [[package]] name = "sc-offchain" version = "4.0.0-dev" @@ -9303,6 +9352,110 @@ dependencies = [ "tokio 1.19.2", ] +[[package]] +name = "substrate-test-client" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.19#174735ea1bb5fc4513519c45181d8df63d86f613" +dependencies = [ + "async-trait", + "futures 0.3.21", + "hex 0.4.3", + "parity-scale-codec", + "sc-client-api", + "sc-client-db", + "sc-consensus", + "sc-executor", + "sc-offchain", + "sc-service", + "serde", + "serde_json", + "sp-blockchain", + "sp-consensus", + "sp-core", + "sp-keyring", + "sp-keystore", + "sp-runtime", + "sp-state-machine", +] + +[[package]] +name = "substrate-test-runtime" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.19#174735ea1bb5fc4513519c45181d8df63d86f613" +dependencies = [ + "beefy-primitives", + "cfg-if 1.0.0", + "frame-support", + "frame-system", + "frame-system-rpc-runtime-api", + "log", + "memory-db", + "pallet-babe", + "pallet-timestamp", + "parity-scale-codec", + "parity-util-mem", + "sc-service", + "scale-info", + "serde", + "sp-api", + "sp-application-crypto", + "sp-block-builder", + "sp-consensus-aura", + "sp-consensus-babe", + "sp-core", + "sp-externalities", + "sp-finality-grandpa", + "sp-inherents", + "sp-io", + "sp-keyring", + "sp-offchain", + "sp-runtime", + "sp-runtime-interface", + "sp-session", + "sp-state-machine", + "sp-std", + "sp-transaction-pool", + "sp-trie", + "sp-version", + "substrate-wasm-builder", + "trie-db", +] + +[[package]] +name = "substrate-test-runtime-client" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.19#174735ea1bb5fc4513519c45181d8df63d86f613" +dependencies = [ + "futures 0.3.21", + "parity-scale-codec", + "sc-block-builder", + "sc-client-api", + "sc-consensus", + "sp-api", + "sp-blockchain", + "sp-consensus", + "sp-core", + "sp-runtime", + "substrate-test-client", + "substrate-test-runtime", +] + +[[package]] +name = "substrate-test-runtime-transaction-pool" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.19#174735ea1bb5fc4513519c45181d8df63d86f613" +dependencies = [ + "futures 0.3.21", + "parity-scale-codec", + "parking_lot 0.12.1", + "sc-transaction-pool", + "sc-transaction-pool-api", + "sp-blockchain", + "sp-runtime", + "substrate-test-runtime-client", + "thiserror", +] + [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" diff --git a/node/cli/Cargo.toml b/node/cli/Cargo.toml index 32dfa804..8f7806d6 100644 --- a/node/cli/Cargo.toml +++ b/node/cli/Cargo.toml @@ -64,7 +64,7 @@ sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "pol sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } sc-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } -sc-consensus-aura = { git = "https://github.com/edgeware-network/edg-aura", default-features = false } +sc-consensus-aura = { path = "../consensus-transition/aura", default-features = false } sc-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } sc-consensus-slots = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } sc-client-db = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } @@ -179,3 +179,6 @@ runtime-benchmarks = [ fast-runtime = [ "edgeware-runtime/fast-runtime", ] +beresheet-runtime = [ + "edgeware-runtime/beresheet-runtime", +] diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index dd1edc3d..a1d2f308 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -20,7 +20,7 @@ use crate::Cli; use edgeware_cli_opt::{EthApi as EthApiCmd, RpcConfig}; -use edgeware_primitives::Block; +use edgeware_primitives::{Block, BlockNumber}; use fc_db::DatabaseSource; use edgeware_runtime::RuntimeApi; // use maplit::hashmap; @@ -29,7 +29,7 @@ use fc_consensus::FrontierBlockImport; use sc_client_api::BlockBackend; use fc_rpc_core::types::{FeeHistoryCache, FilterPool}; use futures::prelude::*; -use sc_consensus_aura::{self, ImportQueueParams, SlotProportion, StartAuraParams}; +use sc_consensus_aura::{self, CompatibilityMode, ImportQueueParams, SlotProportion, StartAuraParams}; use sc_network::{Event, NetworkService}; use sc_service::{config::{Configuration, /*PrometheusConfig*/}, error::Error as ServiceError, RpcHandlers,BasePath, ChainSpec, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle}; @@ -264,6 +264,10 @@ pub fn new_partial( can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), check_for_equivocation: Default::default(), telemetry: telemetry.as_ref().map(|x| x.handle()), + #[cfg(feature = "beresheet-runtime")] + compatibility_mode: CompatibilityMode::UseInitializeBlock { until: BlockNumber::from(1888u32) }, + #[cfg(not(feature = "beresheet-runtime"))] + compatibility_mode: CompatibilityMode::UseInitializeBlock { until: BlockNumber::from(14_555_555u32) }, } )?; @@ -307,7 +311,7 @@ pub struct NewFullBase { /// Creates a full service from the configuration. pub fn new_full_base(mut config: Configuration, - cli: &Cli, + cli: &Cli, rpc_config: RpcConfig ) -> Result { let sc_service::PartialComponents { @@ -468,7 +472,7 @@ pub fn new_full_base(mut config: Configuration, fee_history_cache_limit: fee_history_cache_limit, overrides: overrides.clone(), block_data_cache: block_data_cache.clone(), - command_sink: None, + command_sink: None, }; #[allow(unused_mut)] let mut io = edgeware_rpc::create_full(deps, subscription_task_executor.clone()); @@ -549,6 +553,10 @@ pub fn new_full_base(mut config: Configuration, block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), max_block_proposal_slot_portion: None, telemetry: telemetry.as_ref().map(|x| x.handle()), + #[cfg(feature = "beresheet-runtime")] + compatibility_mode: CompatibilityMode::UseInitializeBlock { until: BlockNumber::from(1888u32) }, + #[cfg(not(feature = "beresheet-runtime"))] + compatibility_mode: CompatibilityMode::UseInitializeBlock { until: BlockNumber::from(14_555_555u32) }, }, )?; diff --git a/node/consensus-transition/aura/Cargo.toml b/node/consensus-transition/aura/Cargo.toml new file mode 100644 index 00000000..587fb855 --- /dev/null +++ b/node/consensus-transition/aura/Cargo.toml @@ -0,0 +1,49 @@ +[package] +name = "sc-consensus-aura" +version = "0.10.0-dev" +authors = ["Parity Technologies "] +description = "Aura consensus algorithm for substrate" +edition = "2021" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +sc-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +codec = { package = "parity-scale-codec", version = "3.0.0" } +sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +sp-consensus-slots = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +thiserror = "1.0" +futures = "0.3.21" +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +log = "0.4.8" +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +sc-consensus-slots = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +sc-service = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +async-trait = "0.1.50" + +[dev-dependencies] +sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +sc-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +sc-network-test = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +substrate-test-runtime-client = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19" } +tempfile = "3.1.0" +parking_lot = "0.12.0" diff --git a/node/consensus-transition/aura/src/import_queue.rs b/node/consensus-transition/aura/src/import_queue.rs new file mode 100644 index 00000000..d5002ceb --- /dev/null +++ b/node/consensus-transition/aura/src/import_queue.rs @@ -0,0 +1,441 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Module implementing the logic for verifying and importing AuRa blocks. + +use crate::{ + aura_err, authorities, find_pre_digest, slot_author, AuthorityId, CompatibilityMode, Error, +}; +use codec::{Codec, Decode, Encode}; +use log::{debug, info, trace}; +use prometheus_endpoint::Registry; +use sc_client_api::{backend::AuxStore, BlockOf, UsageProvider}; +use sc_consensus::{ + block_import::{BlockImport, BlockImportParams, ForkChoiceStrategy}, + import_queue::{BasicQueue, BoxJustificationImport, DefaultImportQueue, Verifier}, +}; +use sc_consensus_slots::{check_equivocation, CheckedHeader, InherentDataProviderExt}; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_blockchain::{well_known_cache_keys::Id as CacheKeyId, HeaderBackend}; +use sp_consensus::{CanAuthorWith, Error as ConsensusError}; +use sp_consensus_aura::{digests::CompatibleDigestItem, inherents::AuraInherentData, AuraApi}; +use sp_consensus_slots::Slot; +use sp_core::{crypto::Pair, ExecutionContext}; +use sp_inherents::{CreateInherentDataProviders, InherentDataProvider as _}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header, NumberFor}, + DigestItem, +}; +use std::{fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; + +/// check a header has been signed by the right key. If the slot is too far in the future, an error +/// will be returned. If it's successful, returns the pre-header and the digest item +/// containing the seal. +/// +/// This digest item will always return `Some` when used with `as_aura_seal`. +fn check_header( + client: &C, + slot_now: Slot, + mut header: B::Header, + hash: B::Hash, + authorities: &[AuthorityId

], + check_for_equivocation: CheckForEquivocation, +) -> Result, Error> +where + P::Signature: Codec, + C: sc_client_api::backend::AuxStore, + P::Public: Encode + Decode + PartialEq + Clone, +{ + let seal = header.digest_mut().pop().ok_or(Error::HeaderUnsealed(hash))?; + + let sig = seal.as_aura_seal().ok_or_else(|| aura_err(Error::HeaderBadSeal(hash)))?; + + let slot = find_pre_digest::(&header)?; + + if slot > slot_now { + header.digest_mut().push(seal); + Ok(CheckedHeader::Deferred(header, slot)) + } else { + // check the signature is valid under the expected authority and + // chain state. + let expected_author = + slot_author::

(slot, authorities).ok_or(Error::SlotAuthorNotFound)?; + + let pre_hash = header.hash(); + + if P::verify(&sig, pre_hash.as_ref(), expected_author) { + if check_for_equivocation.check_for_equivocation() { + if let Some(equivocation_proof) = + check_equivocation(client, slot_now, slot, &header, expected_author) + .map_err(Error::Client)? + { + info!( + target: "aura", + "Slot author is equivocating at slot {} with headers {:?} and {:?}", + slot, + equivocation_proof.first_header.hash(), + equivocation_proof.second_header.hash(), + ); + } + } + + Ok(CheckedHeader::Checked(header, (slot, seal))) + } else { + Err(Error::BadSignature(hash)) + } + } +} + +/// A verifier for Aura blocks. +pub struct AuraVerifier { + client: Arc, + phantom: PhantomData

, + create_inherent_data_providers: CIDP, + can_author_with: CAW, + check_for_equivocation: CheckForEquivocation, + telemetry: Option, + compatibility_mode: CompatibilityMode, +} + +impl AuraVerifier { + pub(crate) fn new( + client: Arc, + create_inherent_data_providers: CIDP, + can_author_with: CAW, + check_for_equivocation: CheckForEquivocation, + telemetry: Option, + compatibility_mode: CompatibilityMode, + ) -> Self { + Self { + client, + create_inherent_data_providers, + can_author_with, + check_for_equivocation, + telemetry, + compatibility_mode, + phantom: PhantomData, + } + } +} + +impl AuraVerifier +where + P: Send + Sync + 'static, + CAW: Send + Sync + 'static, + CIDP: Send, +{ + async fn check_inherents( + &self, + block: B, + block_id: BlockId, + inherent_data: sp_inherents::InherentData, + create_inherent_data_providers: CIDP::InherentDataProviders, + execution_context: ExecutionContext, + ) -> Result<(), Error> + where + C: ProvideRuntimeApi, + C::Api: BlockBuilderApi, + CAW: CanAuthorWith, + CIDP: CreateInherentDataProviders, + { + if let Err(e) = self.can_author_with.can_author_with(&block_id) { + debug!( + target: "aura", + "Skipping `check_inherents` as authoring version is not compatible: {}", + e, + ); + + return Ok(()) + } + + let inherent_res = self + .client + .runtime_api() + .check_inherents_with_context(&block_id, execution_context, block, inherent_data) + .map_err(|e| Error::Client(e.into()))?; + + if !inherent_res.ok() { + for (i, e) in inherent_res.into_errors() { + match create_inherent_data_providers.try_handle_error(&i, &e).await { + Some(res) => res.map_err(Error::Inherent)?, + None => return Err(Error::UnknownInherentError(i)), + } + } + } + + Ok(()) + } +} + +#[async_trait::async_trait] +impl Verifier for AuraVerifier> +where + C: ProvideRuntimeApi + Send + Sync + sc_client_api::backend::AuxStore + BlockOf, + C::Api: BlockBuilderApi + AuraApi> + ApiExt, + P: Pair + Send + Sync + 'static, + P::Public: Send + Sync + Hash + Eq + Clone + Decode + Encode + Debug + 'static, + P::Signature: Encode + Decode, + CAW: CanAuthorWith + Send + Sync + 'static, + CIDP: CreateInherentDataProviders + Send + Sync, + CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, +{ + async fn verify( + &mut self, + mut block: BlockImportParams, + ) -> Result<(BlockImportParams, Option)>>), String> { + let hash = block.header.hash(); + let parent_hash = *block.header.parent_hash(); + let authorities = authorities( + self.client.as_ref(), + parent_hash, + *block.header.number(), + &self.compatibility_mode, + ) + .map_err(|e| format!("Could not fetch authorities at {:?}: {}", parent_hash, e))?; + + let create_inherent_data_providers = self + .create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) + .await + .map_err(|e| Error::::Client(sp_blockchain::Error::Application(e)))?; + + let mut inherent_data = create_inherent_data_providers + .create_inherent_data() + .map_err(Error::::Inherent)?; + + let slot_now = create_inherent_data_providers.slot(); + + // we add one to allow for some small drift. + // FIXME #1019 in the future, alter this queue to allow deferring of + // headers + let checked_header = check_header::( + &self.client, + slot_now + 1, + block.header, + hash, + &authorities[..], + self.check_for_equivocation, + ) + .map_err(|e| e.to_string())?; + match checked_header { + CheckedHeader::Checked(pre_header, (slot, seal)) => { + // if the body is passed through, we need to use the runtime + // to check that the internally-set timestamp in the inherents + // actually matches the slot set in the seal. + if let Some(inner_body) = block.body.take() { + let new_block = B::new(pre_header.clone(), inner_body); + + inherent_data.aura_replace_inherent_data(slot); + + // skip the inherents verification if the runtime API is old. + if self + .client + .runtime_api() + .has_api_with::, _>( + &BlockId::Hash(parent_hash), + |v| v >= 2, + ) + .map_err(|e| e.to_string())? + { + self.check_inherents( + new_block.clone(), + BlockId::Hash(parent_hash), + inherent_data, + create_inherent_data_providers, + block.origin.into(), + ) + .await + .map_err(|e| e.to_string())?; + } + + let (_, inner_body) = new_block.deconstruct(); + block.body = Some(inner_body); + } + + trace!(target: "aura", "Checked {:?}; importing.", pre_header); + telemetry!( + self.telemetry; + CONSENSUS_TRACE; + "aura.checked_and_importing"; + "pre_header" => ?pre_header, + ); + + block.header = pre_header; + block.post_digests.push(seal); + block.fork_choice = Some(ForkChoiceStrategy::LongestChain); + block.post_hash = Some(hash); + + Ok((block, None)) + }, + CheckedHeader::Deferred(a, b) => { + debug!(target: "aura", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "aura.header_too_far_in_future"; + "hash" => ?hash, + "a" => ?a, + "b" => ?b, + ); + Err(format!("Header {:?} rejected: too far in the future", hash)) + }, + } + } +} + +/// Should we check for equivocation of a block author? +#[derive(Debug, Clone, Copy)] +pub enum CheckForEquivocation { + /// Yes, check for equivocation. + /// + /// This is the default setting for this. + Yes, + /// No, don't check for equivocation. + No, +} + +impl CheckForEquivocation { + /// Should we check for equivocation? + fn check_for_equivocation(self) -> bool { + matches!(self, Self::Yes) + } +} + +impl Default for CheckForEquivocation { + fn default() -> Self { + Self::Yes + } +} + +/// Parameters of [`import_queue`]. +pub struct ImportQueueParams<'a, Block: BlockT, I, C, S, CAW, CIDP> { + /// The block import to use. + pub block_import: I, + /// The justification import. + pub justification_import: Option>, + /// The client to interact with the chain. + pub client: Arc, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, + /// The spawner to spawn background tasks. + pub spawner: &'a S, + /// The prometheus registry. + pub registry: Option<&'a Registry>, + /// Can we author with the current node? + pub can_author_with: CAW, + /// Should we check for equivocation? + pub check_for_equivocation: CheckForEquivocation, + /// Telemetry instance used to report telemetry metrics. + pub telemetry: Option, + /// Compatibility mode that should be used. + /// + /// If in doubt, use `Default::default()`. + pub compatibility_mode: CompatibilityMode>, +} + +/// Start an import queue for the Aura consensus algorithm. +pub fn import_queue( + ImportQueueParams { + block_import, + justification_import, + client, + create_inherent_data_providers, + spawner, + registry, + can_author_with, + check_for_equivocation, + telemetry, + compatibility_mode, + }: ImportQueueParams, +) -> Result, sp_consensus::Error> +where + Block: BlockT, + C::Api: BlockBuilderApi + AuraApi> + ApiExt, + C: 'static + + ProvideRuntimeApi + + BlockOf + + Send + + Sync + + AuxStore + + UsageProvider + + HeaderBackend, + I: BlockImport> + + Send + + Sync + + 'static, + P: Pair + Send + Sync + 'static, + P::Public: Clone + Eq + Send + Sync + Hash + Debug + Encode + Decode, + P::Signature: Encode + Decode, + S: sp_core::traits::SpawnEssentialNamed, + CAW: CanAuthorWith + Send + Sync + 'static, + CIDP: CreateInherentDataProviders + Sync + Send + 'static, + CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, +{ + let verifier = build_verifier::(BuildVerifierParams { + client, + create_inherent_data_providers, + can_author_with, + check_for_equivocation, + telemetry, + compatibility_mode, + }); + + Ok(BasicQueue::new(verifier, Box::new(block_import), justification_import, spawner, registry)) +} + +/// Parameters of [`build_verifier`]. +pub struct BuildVerifierParams { + /// The client to interact with the chain. + pub client: Arc, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, + /// Can we author with the current node? + pub can_author_with: CAW, + /// Should we check for equivocation? + pub check_for_equivocation: CheckForEquivocation, + /// Telemetry instance used to report telemetry metrics. + pub telemetry: Option, + /// Compatibility mode that should be used. + /// + /// If in doubt, use `Default::default()`. + pub compatibility_mode: CompatibilityMode, +} + +/// Build the [`AuraVerifier`] +pub fn build_verifier( + BuildVerifierParams { + client, + create_inherent_data_providers, + can_author_with, + check_for_equivocation, + telemetry, + compatibility_mode, + }: BuildVerifierParams, +) -> AuraVerifier { + AuraVerifier::<_, P, _, _, _>::new( + client, + create_inherent_data_providers, + can_author_with, + check_for_equivocation, + telemetry, + compatibility_mode, + ) +} \ No newline at end of file diff --git a/node/consensus-transition/aura/src/lib.rs b/node/consensus-transition/aura/src/lib.rs new file mode 100644 index 00000000..25742700 --- /dev/null +++ b/node/consensus-transition/aura/src/lib.rs @@ -0,0 +1,630 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Aura (Authority-round) consensus in substrate. +//! +//! Aura works by having a list of authorities A who are expected to roughly +//! agree on the current time. Time is divided up into discrete slots of t +//! seconds each. For each slot s, the author of that slot is A[s % |A|]. +//! +//! The author is allowed to issue one block but not more during that slot, +//! and it will be built upon the longest valid chain that has been seen. +//! +//! Blocks from future steps will be either deferred or rejected depending on how +//! far in the future they are. +//! +//! NOTE: Aura itself is designed to be generic over the crypto used. +#![forbid(missing_docs, unsafe_code)] +use std::{fmt::Debug, hash::Hash, marker::PhantomData, pin::Pin, sync::Arc}; + +use futures::prelude::*; +use log::{debug, trace}; + +use codec::{Codec, Decode, Encode}; + +use sc_client_api::{backend::AuxStore, BlockOf, UsageProvider}; +use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction}; +use sc_consensus_slots::{ + BackoffAuthoringBlocksStrategy, InherentDataProviderExt, SimpleSlotWorkerToSlotWorker, + SlotInfo, StorageChanges, +}; +use sc_telemetry::TelemetryHandle; +use sp_api::{Core, ProvideRuntimeApi}; +use sp_application_crypto::{AppKey, AppPublic}; +use sp_blockchain::{HeaderBackend, Result as CResult}; +use sp_consensus::{ + BlockOrigin, CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, +}; +use sp_consensus_slots::Slot; +use sp_core::crypto::{ByteArray, Pair, Public}; +use sp_inherents::CreateInherentDataProviders; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header, Member, NumberFor, Zero}, + DigestItem, +}; + +mod import_queue; + +pub use import_queue::{ + build_verifier, import_queue, AuraVerifier, BuildVerifierParams, CheckForEquivocation, + ImportQueueParams, +}; +pub use sc_consensus_slots::SlotProportion; +pub use sp_consensus::SyncOracle; +pub use sp_consensus_aura::{ + digests::CompatibleDigestItem, + inherents::{InherentDataProvider, InherentType as AuraInherent, INHERENT_IDENTIFIER}, + AuraApi, ConsensusLog, SlotDuration, AURA_ENGINE_ID, +}; + +type AuthorityId

=

::Public; + +/// Run `AURA` in a compatibility mode. +/// +/// This is required for when the chain was launched and later there +/// was a consensus breaking change. +#[derive(Debug, Clone)] +pub enum CompatibilityMode { + /// Don't use any compatibility mode. + None, + /// Call `initialize_block` before doing any runtime calls. + /// + /// The node would execute `initialize_block` before fetchting the authorities + /// from the runtime. This behaviour changed in: + /// + /// By calling `initialize_block` before fetching the authorities, on a block that + /// would enact a new validator set, the block would already be build/sealed by an + /// authority of the new set. A block that enacts a new set, should not be sealed/build + /// by an authority of the new set. This isn't done anymore. However, to make new nodes + /// being able to sync the old chain this compatibility mode exists. + UseInitializeBlock { + /// The block number until this compatibility mode should be executed. The first runtime + /// call in the context (importing it/building it) of the `until` block should disable the + /// compatibility mode. This number should be of a block in the future! It should be a + /// block number on that all nodes have upgraded to a release that runs with the + /// compatibility mode. After this block there will be a hard fork when the authority set + /// changes, between the old nodes (running with `initialize_block`) and the new nodes. + until: N, + }, +} + +impl Default for CompatibilityMode { + fn default() -> Self { + Self::None + } +} + +/// Get the slot duration for Aura. +pub fn slot_duration(client: &C) -> CResult +where + A: Codec, + B: BlockT, + C: AuxStore + ProvideRuntimeApi + UsageProvider, + C::Api: AuraApi, +{ + let best_block_id = BlockId::Hash(client.usage_info().chain.best_hash); + client.runtime_api().slot_duration(&best_block_id).map_err(|err| err.into()) +} + +/// Get slot author for given block along with authorities. +fn slot_author(slot: Slot, authorities: &[AuthorityId

]) -> Option<&AuthorityId

> { + if authorities.is_empty() { + return None + } + + let idx = *slot % (authorities.len() as u64); + assert!( + idx <= usize::MAX as u64, + "It is impossible to have a vector with length beyond the address space; qed", + ); + + let current_author = authorities.get(idx as usize).expect( + "authorities not empty; index constrained to list length;this is a valid index; qed", + ); + + Some(current_author) +} + +/// Parameters of [`start_aura`]. +pub struct StartAuraParams { + /// The duration of a slot. + pub slot_duration: SlotDuration, + /// The client to interact with the chain. + pub client: Arc, + /// A select chain implementation to select the best block. + pub select_chain: SC, + /// The block import. + pub block_import: I, + /// The proposer factory to build proposer instances. + pub proposer_factory: PF, + /// The sync oracle that can give us the current sync status. + pub sync_oracle: SO, + /// Hook into the sync module to control the justification sync process. + pub justification_sync_link: L, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, + /// Should we force the authoring of blocks? + pub force_authoring: bool, + /// The backoff strategy when we miss slots. + pub backoff_authoring_blocks: Option, + /// The keystore used by the node. + pub keystore: SyncCryptoStorePtr, + /// Can we author a block with this node? + pub can_author_with: CAW, + /// The proportion of the slot dedicated to proposing. + /// + /// The block proposing will be limited to this proportion of the slot from the starting of the + /// slot. However, the proposing can still take longer when there is some lenience factor + /// applied, because there were no blocks produced for some slots. + pub block_proposal_slot_portion: SlotProportion, + /// The maximum proportion of the slot dedicated to proposing with any lenience factor applied + /// due to no blocks being produced. + pub max_block_proposal_slot_portion: Option, + /// Telemetry instance used to report telemetry metrics. + pub telemetry: Option, + /// Compatibility mode that should be used. + /// + /// If in doubt, use `Default::default()`. + pub compatibility_mode: CompatibilityMode, +} + +/// Start the aura worker. The returned future should be run in a futures executor. +pub fn start_aura( + StartAuraParams { + slot_duration, + client, + select_chain, + block_import, + proposer_factory, + sync_oracle, + justification_sync_link, + create_inherent_data_providers, + force_authoring, + backoff_authoring_blocks, + keystore, + can_author_with, + block_proposal_slot_portion, + max_block_proposal_slot_portion, + telemetry, + compatibility_mode, + }: StartAuraParams>, +) -> Result, sp_consensus::Error> +where + P: Pair + Send + Sync, + P::Public: AppPublic + Hash + Member + Encode + Decode, + P::Signature: TryFrom> + Hash + Member + Encode + Decode, + B: BlockT, + C: ProvideRuntimeApi + BlockOf + AuxStore + HeaderBackend + Send + Sync, + C::Api: AuraApi>, + SC: SelectChain, + I: BlockImport> + Send + Sync + 'static, + PF: Environment + Send + Sync + 'static, + PF::Proposer: Proposer>, + SO: SyncOracle + Send + Sync + Clone, + L: sc_consensus::JustificationSyncLink, + CIDP: CreateInherentDataProviders + Send, + CIDP::InherentDataProviders: InherentDataProviderExt + Send, + BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static, + CAW: CanAuthorWith + Send, + Error: std::error::Error + Send + From + 'static, +{ + let worker = build_aura_worker::(BuildAuraWorkerParams { + client, + block_import, + proposer_factory, + keystore, + sync_oracle: sync_oracle.clone(), + justification_sync_link, + force_authoring, + backoff_authoring_blocks, + telemetry, + block_proposal_slot_portion, + max_block_proposal_slot_portion, + compatibility_mode, + }); + + Ok(sc_consensus_slots::start_slot_worker( + slot_duration, + select_chain, + worker, + sync_oracle, + create_inherent_data_providers, + can_author_with, + )) +} + +/// Parameters of [`build_aura_worker`]. +pub struct BuildAuraWorkerParams { + /// The client to interact with the chain. + pub client: Arc, + /// The block import. + pub block_import: I, + /// The proposer factory to build proposer instances. + pub proposer_factory: PF, + /// The sync oracle that can give us the current sync status. + pub sync_oracle: SO, + /// Hook into the sync module to control the justification sync process. + pub justification_sync_link: L, + /// Should we force the authoring of blocks? + pub force_authoring: bool, + /// The backoff strategy when we miss slots. + pub backoff_authoring_blocks: Option, + /// The keystore used by the node. + pub keystore: SyncCryptoStorePtr, + /// The proportion of the slot dedicated to proposing. + /// + /// The block proposing will be limited to this proportion of the slot from the starting of the + /// slot. However, the proposing can still take longer when there is some lenience factor + /// applied, because there were no blocks produced for some slots. + pub block_proposal_slot_portion: SlotProportion, + /// The maximum proportion of the slot dedicated to proposing with any lenience factor applied + /// due to no blocks being produced. + pub max_block_proposal_slot_portion: Option, + /// Telemetry instance used to report telemetry metrics. + pub telemetry: Option, + /// Compatibility mode that should be used. + /// + /// If in doubt, use `Default::default()`. + pub compatibility_mode: CompatibilityMode, +} + +/// Build the aura worker. +/// +/// The caller is responsible for running this worker, otherwise it will do nothing. +pub fn build_aura_worker( + BuildAuraWorkerParams { + client, + block_import, + proposer_factory, + sync_oracle, + justification_sync_link, + backoff_authoring_blocks, + keystore, + block_proposal_slot_portion, + max_block_proposal_slot_portion, + telemetry, + force_authoring, + compatibility_mode, + }: BuildAuraWorkerParams>, +) -> impl sc_consensus_slots::SlotWorker>::Proof> + +where + B: BlockT, + C: ProvideRuntimeApi + BlockOf + AuxStore + HeaderBackend + Send + Sync, + C::Api: AuraApi>, + PF: Environment + Send + Sync + 'static, + PF::Proposer: Proposer>, + P: Pair + Send + Sync, + P::Public: AppPublic + Hash + Member + Encode + Decode, + P::Signature: TryFrom> + Hash + Member + Encode + Decode, + I: BlockImport> + Send + Sync + 'static, + Error: std::error::Error + Send + From + 'static, + SO: SyncOracle + Send + Sync + Clone, + L: sc_consensus::JustificationSyncLink, + BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static, +{ + SimpleSlotWorkerToSlotWorker(AuraWorker { + client, + block_import, + env: proposer_factory, + keystore, + sync_oracle, + justification_sync_link, + force_authoring, + backoff_authoring_blocks, + telemetry, + block_proposal_slot_portion, + max_block_proposal_slot_portion, + compatibility_mode, + _key_type: PhantomData::

, + }) +} + +struct AuraWorker { + client: Arc, + block_import: I, + env: E, + keystore: SyncCryptoStorePtr, + sync_oracle: SO, + justification_sync_link: L, + force_authoring: bool, + backoff_authoring_blocks: Option, + block_proposal_slot_portion: SlotProportion, + max_block_proposal_slot_portion: Option, + telemetry: Option, + compatibility_mode: CompatibilityMode, + _key_type: PhantomData

, +} + +#[async_trait::async_trait] +impl sc_consensus_slots::SimpleSlotWorker + for AuraWorker> +where + B: BlockT, + C: ProvideRuntimeApi + BlockOf + HeaderBackend + Sync, + C::Api: AuraApi>, + E: Environment + Send + Sync, + E::Proposer: Proposer>, + I: BlockImport> + Send + Sync + 'static, + P: Pair + Send + Sync, + P::Public: AppPublic + Public + Member + Encode + Decode + Hash, + P::Signature: TryFrom> + Member + Encode + Decode + Hash + Debug, + SO: SyncOracle + Send + Clone + Sync, + L: sc_consensus::JustificationSyncLink, + BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static, + Error: std::error::Error + Send + From + 'static, +{ + type BlockImport = I; + type SyncOracle = SO; + type JustificationSyncLink = L; + type CreateProposer = + Pin> + Send + 'static>>; + type Proposer = E::Proposer; + type Claim = P::Public; + type EpochData = Vec>; + + fn logging_target(&self) -> &'static str { + "aura" + } + + fn block_import(&mut self) -> &mut Self::BlockImport { + &mut self.block_import + } + + fn epoch_data( + &self, + header: &B::Header, + _slot: Slot, + ) -> Result { + authorities( + self.client.as_ref(), + header.hash(), + *header.number() + 1u32.into(), + &self.compatibility_mode, + ) + } + + fn authorities_len(&self, epoch_data: &Self::EpochData) -> Option { + Some(epoch_data.len()) + } + + async fn claim_slot( + &self, + _header: &B::Header, + slot: Slot, + epoch_data: &Self::EpochData, + ) -> Option { + let expected_author = slot_author::

(slot, epoch_data); + expected_author.and_then(|p| { + if SyncCryptoStore::has_keys( + &*self.keystore, + &[(p.to_raw_vec(), sp_application_crypto::key_types::AURA)], + ) { + Some(p.clone()) + } else { + None + } + }) + } + + fn pre_digest_data(&self, slot: Slot, _claim: &Self::Claim) -> Vec { + vec![>::aura_pre_digest(slot)] + } + + async fn block_import_params( + &self, + header: B::Header, + header_hash: &B::Hash, + body: Vec, + storage_changes: StorageChanges<>::Transaction, B>, + public: Self::Claim, + _epoch: Self::EpochData, + ) -> Result< + sc_consensus::BlockImportParams>::Transaction>, + sp_consensus::Error, + > { + // sign the pre-sealed hash of the block and then + // add it to a digest item. + let public_type_pair = public.to_public_crypto_pair(); + let public = public.to_raw_vec(); + let signature = SyncCryptoStore::sign_with( + &*self.keystore, + as AppKey>::ID, + &public_type_pair, + header_hash.as_ref(), + ) + .map_err(|e| sp_consensus::Error::CannotSign(public.clone(), e.to_string()))? + .ok_or_else(|| { + sp_consensus::Error::CannotSign( + public.clone(), + "Could not find key in keystore.".into(), + ) + })?; + let signature = signature + .clone() + .try_into() + .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; + + let signature_digest_item = + >::aura_seal(signature); + + let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); + import_block.post_digests.push(signature_digest_item); + import_block.body = Some(body); + import_block.state_action = + StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); + import_block.fork_choice = Some(ForkChoiceStrategy::LongestChain); + + Ok(import_block) + } + + fn force_authoring(&self) -> bool { + self.force_authoring + } + + fn should_backoff(&self, slot: Slot, chain_head: &B::Header) -> bool { + if let Some(ref strategy) = self.backoff_authoring_blocks { + if let Ok(chain_head_slot) = find_pre_digest::(chain_head) { + return strategy.should_backoff( + *chain_head.number(), + chain_head_slot, + self.client.info().finalized_number, + slot, + self.logging_target(), + ) + } + } + false + } + + fn sync_oracle(&mut self) -> &mut Self::SyncOracle { + &mut self.sync_oracle + } + + fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink { + &mut self.justification_sync_link + } + + fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { + self.env + .init(block) + .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))) + .boxed() + } + + fn telemetry(&self) -> Option { + self.telemetry.clone() + } + + fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> std::time::Duration { + let parent_slot = find_pre_digest::(&slot_info.chain_head).ok(); + + sc_consensus_slots::proposing_remaining_duration( + parent_slot, + slot_info, + &self.block_proposal_slot_portion, + self.max_block_proposal_slot_portion.as_ref(), + sc_consensus_slots::SlotLenienceType::Exponential, + self.logging_target(), + ) + } +} + +fn aura_err(error: Error) -> Error { + debug!(target: "aura", "{}", error); + error +} + +/// Aura Errors +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Multiple Aura pre-runtime headers + #[error("Multiple Aura pre-runtime headers")] + MultipleHeaders, + /// No Aura pre-runtime digest found + #[error("No Aura pre-runtime digest found")] + NoDigestFound, + /// Header is unsealed + #[error("Header {0:?} is unsealed")] + HeaderUnsealed(B::Hash), + /// Header has a bad seal + #[error("Header {0:?} has a bad seal")] + HeaderBadSeal(B::Hash), + /// Slot Author not found + #[error("Slot Author not found")] + SlotAuthorNotFound, + /// Bad signature + #[error("Bad signature on {0:?}")] + BadSignature(B::Hash), + /// Client Error + #[error(transparent)] + Client(sp_blockchain::Error), + /// Unknown inherent error for identifier + #[error("Unknown inherent error for identifier: {}", String::from_utf8_lossy(.0))] + UnknownInherentError(sp_inherents::InherentIdentifier), + /// Inherents Error + #[error("Inherent error: {0}")] + Inherent(sp_inherents::Error), +} + +impl From> for String { + fn from(error: Error) -> String { + error.to_string() + } +} + +/// Get pre-digests from the header +pub fn find_pre_digest(header: &B::Header) -> Result> { + if header.number().is_zero() { + return Ok(0.into()) + } + + let mut pre_digest: Option = None; + for log in header.digest().logs() { + trace!(target: "aura", "Checking log {:?}", log); + match (CompatibleDigestItem::::as_aura_pre_digest(log), pre_digest.is_some()) { + (Some(_), true) => return Err(aura_err(Error::MultipleHeaders)), + (None, _) => trace!(target: "aura", "Ignoring digest not meant for us"), + (s, false) => pre_digest = s, + } + } + pre_digest.ok_or_else(|| aura_err(Error::NoDigestFound)) +} + +fn authorities( + client: &C, + parent_hash: B::Hash, + context_block_number: NumberFor, + compatibility_mode: &CompatibilityMode>, +) -> Result, ConsensusError> +where + A: Codec + Debug, + B: BlockT, + C: ProvideRuntimeApi, + C::Api: AuraApi, +{ + let runtime_api = client.runtime_api(); + + match compatibility_mode { + CompatibilityMode::None => {}, + // Use `initialize_block` until we hit the block that should disable the mode. + CompatibilityMode::UseInitializeBlock { until } => + if *until > context_block_number { + runtime_api + .initialize_block( + &BlockId::Hash(parent_hash), + &B::Header::new( + context_block_number, + Default::default(), + Default::default(), + parent_hash, + Default::default(), + ), + ) + .map_err(|_| sp_consensus::Error::InvalidAuthoritiesSet)?; + }, + } + + runtime_api + .authorities(&BlockId::Hash(parent_hash)) + .ok() + .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) +} + diff --git a/node/consensus-transition/manual-seal/Cargo.toml b/node/consensus-transition/manual-seal/Cargo.toml new file mode 100644 index 00000000..663b9cd7 --- /dev/null +++ b/node/consensus-transition/manual-seal/Cargo.toml @@ -0,0 +1,53 @@ +[package] +name = "sc-consensus-manual-seal" +version = "0.10.0-dev" +authors = ["Parity Technologies "] +description = "Manual sealing engine for Substrate" +edition = "2021" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +thiserror = "1.0" +futures = "0.3.21" +jsonrpc-core = "18.0.0" +jsonrpc-core-client = "18.0.0" +jsonrpc-derive = "18.0.0" +log = "0.4.8" +codec = { package = "parity-scale-codec", version = "3.0.0" } +serde = { version = "1.0", features = ["derive"] } +assert_matches = "1.3.0" +async-trait = "0.1.50" + +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +sc-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +sc-consensus-aura = { path = "../aura", default-features = false } +sc-consensus-epochs = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } + +sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +sp-consensus-slots = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +sc-transaction-pool-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } + +prometheus-endpoint = { package = "substrate-prometheus-endpoint", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } + +[dev-dependencies] +tokio = { version = "1.17.0", features = ["rt-multi-thread", "macros"] } +sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +substrate-test-runtime-client = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +substrate-test-runtime-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } diff --git a/node/consensus-transition/manual-seal/src/consensus.rs b/node/consensus-transition/manual-seal/src/consensus.rs new file mode 100644 index 00000000..dfd3730f --- /dev/null +++ b/node/consensus-transition/manual-seal/src/consensus.rs @@ -0,0 +1,46 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Extensions for manual seal to produce blocks valid for any runtime. +use super::Error; + +use sc_consensus::BlockImportParams; +use sp_inherents::InherentData; +use sp_runtime::{traits::Block as BlockT, Digest}; + +pub mod aura; +pub mod babe; +pub mod timestamp; + +/// Consensus data provider, manual seal uses this trait object for authoring blocks valid +/// for any runtime. +pub trait ConsensusDataProvider: Send + Sync { + /// Block import transaction type + type Transaction; + + /// Attempt to create a consensus digest. + fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result; + + /// set up the neccessary import params. + fn append_block_import( + &self, + parent: &B::Header, + params: &mut BlockImportParams, + inherents: &InherentData, + ) -> Result<(), Error>; +} diff --git a/node/consensus-transition/manual-seal/src/consensus/aura.rs b/node/consensus-transition/manual-seal/src/consensus/aura.rs new file mode 100644 index 00000000..7b5d6720 --- /dev/null +++ b/node/consensus-transition/manual-seal/src/consensus/aura.rs @@ -0,0 +1,98 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Aura consensus data provider, This allows manual seal author blocks that are valid for +//! runtimes that expect the aura-specific digests. + +use crate::{ConsensusDataProvider, Error}; +use sc_client_api::{AuxStore, UsageProvider}; +use sc_consensus::BlockImportParams; +use sp_api::{ProvideRuntimeApi, TransactionFor}; +use sp_blockchain::{HeaderBackend, HeaderMetadata}; +use sp_consensus_aura::{ + digests::CompatibleDigestItem, + sr25519::{AuthorityId, AuthoritySignature}, + AuraApi, Slot, SlotDuration, +}; +use sp_inherents::InherentData; +use sp_runtime::{traits::Block as BlockT, Digest, DigestItem}; +use sp_timestamp::TimestampInherentData; +use std::{marker::PhantomData, sync::Arc}; + +/// Consensus data provider for Aura. +pub struct AuraConsensusDataProvider { + // slot duration + slot_duration: SlotDuration, + // phantom data for required generics + _phantom: PhantomData<(B, C)>, +} + +impl AuraConsensusDataProvider +where + B: BlockT, + C: AuxStore + ProvideRuntimeApi + UsageProvider, + C::Api: AuraApi, +{ + /// Creates a new instance of the [`AuraConsensusDataProvider`], requires that `client` + /// implements [`sp_consensus_aura::AuraApi`] + pub fn new(client: Arc) -> Self { + let slot_duration = sc_consensus_aura::slot_duration(&*client) + .expect("slot_duration is always present; qed."); + + Self { slot_duration, _phantom: PhantomData } + } +} + +impl ConsensusDataProvider for AuraConsensusDataProvider +where + B: BlockT, + C: AuxStore + + HeaderBackend + + HeaderMetadata + + UsageProvider + + ProvideRuntimeApi, + C::Api: AuraApi, +{ + type Transaction = TransactionFor; + + fn create_digest( + &self, + _parent: &B::Header, + inherents: &InherentData, + ) -> Result { + let timestamp = + inherents.timestamp_inherent_data()?.expect("Timestamp is always present; qed"); + + // we always calculate the new slot number based on the current time-stamp and the slot + // duration. + let digest_item = >::aura_pre_digest( + Slot::from_timestamp(timestamp, self.slot_duration), + ); + + Ok(Digest { logs: vec![digest_item] }) + } + + fn append_block_import( + &self, + _parent: &B::Header, + _params: &mut BlockImportParams, + _inherents: &InherentData, + ) -> Result<(), Error> { + Ok(()) + } +} diff --git a/node/consensus-transition/manual-seal/src/consensus/babe.rs b/node/consensus-transition/manual-seal/src/consensus/babe.rs new file mode 100644 index 00000000..53cc58df --- /dev/null +++ b/node/consensus-transition/manual-seal/src/consensus/babe.rs @@ -0,0 +1,314 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! BABE consensus data provider, This allows manual seal author blocks that are valid for runtimes +//! that expect babe-specific digests. + +use super::ConsensusDataProvider; +use crate::Error; +use codec::Encode; +use sc_client_api::{AuxStore, UsageProvider}; +use sc_consensus_babe::{ + authorship, find_pre_digest, BabeIntermediate, CompatibleDigestItem, Config, Epoch, + INTERMEDIATE_KEY, +}; +use sc_consensus_epochs::{ + descendent_query, EpochHeader, SharedEpochChanges, ViableEpochDescriptor, +}; +use sp_keystore::SyncCryptoStorePtr; +use std::{borrow::Cow, sync::Arc}; + +use sc_consensus::{BlockImportParams, ForkChoiceStrategy, Verifier}; +use sp_api::{ProvideRuntimeApi, TransactionFor}; +use sp_blockchain::{HeaderBackend, HeaderMetadata}; +use sp_consensus::CacheKeyId; +use sp_consensus_babe::{ + digests::{NextEpochDescriptor, PreDigest, SecondaryPlainPreDigest}, + inherents::BabeInherentData, + AuthorityId, BabeApi, BabeAuthorityWeight, ConsensusLog, BABE_ENGINE_ID, +}; +use sp_consensus_slots::Slot; +use sp_inherents::InherentData; +use sp_runtime::{ + generic::{BlockId, Digest}, + traits::{Block as BlockT, Header}, + DigestItem, +}; +use sp_timestamp::TimestampInherentData; + +/// Provides BABE-compatible predigests and BlockImportParams. +/// Intended for use with BABE runtimes. +pub struct BabeConsensusDataProvider { + /// shared reference to keystore + keystore: SyncCryptoStorePtr, + + /// Shared reference to the client. + client: Arc, + + /// Shared epoch changes + epoch_changes: SharedEpochChanges, + + /// BABE config, gotten from the runtime. + config: Config, + + /// Authorities to be used for this babe chain. + authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, +} + +/// Verifier to be used for babe chains +pub struct BabeVerifier { + /// Shared epoch changes + epoch_changes: SharedEpochChanges, + + /// Shared reference to the client. + client: Arc, +} + +impl BabeVerifier { + /// create a nrew verifier + pub fn new(epoch_changes: SharedEpochChanges, client: Arc) -> BabeVerifier { + BabeVerifier { epoch_changes, client } + } +} + +/// The verifier for the manual seal engine; instantly finalizes. +#[async_trait::async_trait] +impl Verifier for BabeVerifier +where + B: BlockT, + C: HeaderBackend + HeaderMetadata, +{ + async fn verify( + &mut self, + mut import_params: BlockImportParams, + ) -> Result<(BlockImportParams, Option)>>), String> { + import_params.finalized = false; + import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); + + let pre_digest = find_pre_digest::(&import_params.header)?; + + let parent_hash = import_params.header.parent_hash(); + let parent = self + .client + .header(BlockId::Hash(*parent_hash)) + .ok() + .flatten() + .ok_or_else(|| format!("header for block {} not found", parent_hash))?; + let epoch_changes = self.epoch_changes.shared_data(); + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + parent.number().clone(), + pre_digest.slot(), + ) + .map_err(|e| format!("failed to fetch epoch_descriptor: {}", e))? + .ok_or_else(|| format!("{}", sp_consensus::Error::InvalidAuthoritiesSet))?; + // drop the lock + drop(epoch_changes); + + import_params.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, + ); + + Ok((import_params, None)) + } +} + +impl BabeConsensusDataProvider +where + B: BlockT, + C: AuxStore + + HeaderBackend + + ProvideRuntimeApi + + HeaderMetadata + + UsageProvider, + C::Api: BabeApi, +{ + pub fn new( + client: Arc, + keystore: SyncCryptoStorePtr, + epoch_changes: SharedEpochChanges, + authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + ) -> Result { + if authorities.is_empty() { + return Err(Error::StringError("Cannot supply empty authority set!".into())) + } + + let config = Config::get(&*client)?; + + Ok(Self { config, client, keystore, epoch_changes, authorities }) + } + + fn epoch(&self, parent: &B::Header, slot: Slot) -> Result { + let epoch_changes = self.epoch_changes.shared_data(); + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + parent.number().clone(), + slot, + ) + .map_err(|e| Error::StringError(format!("failed to fetch epoch_descriptor: {}", e)))? + .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?; + + let epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| { + Epoch::genesis(self.config.genesis_config(), slot) + }) + .ok_or_else(|| { + log::info!(target: "babe", "create_digest: no viable_epoch :("); + sp_consensus::Error::InvalidAuthoritiesSet + })?; + + Ok(epoch.as_ref().clone()) + } +} + +impl ConsensusDataProvider for BabeConsensusDataProvider +where + B: BlockT, + C: AuxStore + + HeaderBackend + + HeaderMetadata + + UsageProvider + + ProvideRuntimeApi, + C::Api: BabeApi, +{ + type Transaction = TransactionFor; + + fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result { + let slot = inherents + .babe_inherent_data()? + .ok_or_else(|| Error::StringError("No babe inherent data".into()))?; + let epoch = self.epoch(parent, slot)?; + + // this is a dev node environment, we should always be able to claim a slot. + let logs = if let Some((predigest, _)) = + authorship::claim_slot(slot, &epoch, &self.keystore) + { + vec![::babe_pre_digest(predigest)] + } else { + // well we couldn't claim a slot because this is an existing chain and we're not in the + // authorities. we need to tell BabeBlockImport that the epoch has changed, and we put + // ourselves in the authorities. + let predigest = + PreDigest::SecondaryPlain(SecondaryPlainPreDigest { slot, authority_index: 0_u32 }); + + let mut epoch_changes = self.epoch_changes.shared_data(); + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + parent.number().clone(), + slot, + ) + .map_err(|e| { + Error::StringError(format!("failed to fetch epoch_descriptor: {}", e)) + })? + .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?; + + match epoch_descriptor { + ViableEpochDescriptor::Signaled(identifier, _epoch_header) => { + let epoch_mut = epoch_changes + .epoch_mut(&identifier) + .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?; + + // mutate the current epoch + epoch_mut.authorities = self.authorities.clone(); + + let next_epoch = ConsensusLog::NextEpochData(NextEpochDescriptor { + authorities: self.authorities.clone(), + // copy the old randomness + randomness: epoch_mut.randomness.clone(), + }); + + vec![ + DigestItem::PreRuntime(BABE_ENGINE_ID, predigest.encode()), + DigestItem::Consensus(BABE_ENGINE_ID, next_epoch.encode()), + ] + }, + ViableEpochDescriptor::UnimportedGenesis(_) => { + // since this is the genesis, secondary predigest works for now. + vec![DigestItem::PreRuntime(BABE_ENGINE_ID, predigest.encode())] + }, + } + }; + + Ok(Digest { logs }) + } + + fn append_block_import( + &self, + parent: &B::Header, + params: &mut BlockImportParams, + inherents: &InherentData, + ) -> Result<(), Error> { + let slot = inherents + .babe_inherent_data()? + .ok_or_else(|| Error::StringError("No babe inherent data".into()))?; + let epoch_changes = self.epoch_changes.shared_data(); + let mut epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + parent.number().clone(), + slot, + ) + .map_err(|e| Error::StringError(format!("failed to fetch epoch_descriptor: {}", e)))? + .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?; + // drop the lock + drop(epoch_changes); + // a quick check to see if we're in the authorities + let epoch = self.epoch(parent, slot)?; + let (authority, _) = self.authorities.first().expect("authorities is non-emptyp; qed"); + let has_authority = epoch.authorities.iter().any(|(id, _)| *id == *authority); + + if !has_authority { + log::info!(target: "manual-seal", "authority not found"); + let timestamp = inherents + .timestamp_inherent_data()? + .ok_or_else(|| Error::StringError("No timestamp inherent data".into()))?; + + let slot = Slot::from_timestamp(timestamp, self.config.slot_duration()); + + // manually hard code epoch descriptor + epoch_descriptor = match epoch_descriptor { + ViableEpochDescriptor::Signaled(identifier, _header) => + ViableEpochDescriptor::Signaled( + identifier, + EpochHeader { + start_slot: slot, + end_slot: (*slot * self.config.genesis_config().epoch_length).into(), + }, + ), + _ => unreachable!( + "we're not in the authorities, so this isn't the genesis epoch; qed" + ), + }; + } + + params.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, + ); + + Ok(()) + } +} diff --git a/node/consensus-transition/manual-seal/src/consensus/timestamp.rs b/node/consensus-transition/manual-seal/src/consensus/timestamp.rs new file mode 100644 index 00000000..e7f4e709 --- /dev/null +++ b/node/consensus-transition/manual-seal/src/consensus/timestamp.rs @@ -0,0 +1,164 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Mocked timestamp inherent, allows for manual seal to create blocks for runtimes +//! that expect this inherent. + +use crate::Error; +use sc_client_api::{AuxStore, UsageProvider}; +use sp_api::ProvideRuntimeApi; +use sp_blockchain::HeaderBackend; +use sp_consensus_aura::{ + sr25519::{AuthorityId, AuthoritySignature}, + AuraApi, +}; +use sp_consensus_babe::BabeApi; +use sp_consensus_slots::{Slot, SlotDuration}; +use sp_inherents::{InherentData, InherentDataProvider, InherentIdentifier}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Zero}, +}; +use sp_timestamp::{InherentType, INHERENT_IDENTIFIER}; +use std::{ + sync::{atomic, Arc}, + time::SystemTime, +}; + +/// Provide duration since unix epoch in millisecond for timestamp inherent. +/// Mocks the timestamp inherent to always produce a valid timestamp for the next slot. +/// +/// This works by either fetching the `slot_number` from the most recent header and dividing +/// that value by `slot_duration` in order to fork chains that expect this inherent. +/// +/// It produces timestamp inherents that are increaed by `slot_duraation` whenever +/// `provide_inherent_data` is called. +pub struct SlotTimestampProvider { + // holds the unix millisecnd timestamp for the most recent block + unix_millis: atomic::AtomicU64, + // configured slot_duration in the runtime + slot_duration: SlotDuration, +} + +impl SlotTimestampProvider { + /// Create a new mocked time stamp provider, for babe. + pub fn new_babe(client: Arc) -> Result + where + B: BlockT, + C: AuxStore + HeaderBackend + ProvideRuntimeApi + UsageProvider, + C::Api: BabeApi, + { + let slot_duration = sc_consensus_babe::Config::get(&*client)?.slot_duration(); + + let time = Self::with_header(&client, slot_duration, |header| { + let slot_number = *sc_consensus_babe::find_pre_digest::(&header) + .map_err(|err| format!("{}", err))? + .slot(); + Ok(slot_number) + })?; + + Ok(Self { unix_millis: atomic::AtomicU64::new(time), slot_duration }) + } + + /// Create a new mocked time stamp provider, for aura + pub fn new_aura(client: Arc) -> Result + where + B: BlockT, + C: AuxStore + HeaderBackend + ProvideRuntimeApi + UsageProvider, + C::Api: AuraApi, + { + let slot_duration = sc_consensus_aura::slot_duration(&*client)?; + + let time = Self::with_header(&client, slot_duration, |header| { + let slot_number = *sc_consensus_aura::find_pre_digest::(&header) + .map_err(|err| format!("{}", err))?; + Ok(slot_number) + })?; + + Ok(Self { unix_millis: atomic::AtomicU64::new(time), slot_duration }) + } + + fn with_header( + client: &Arc, + slot_duration: SlotDuration, + func: F, + ) -> Result + where + B: BlockT, + C: AuxStore + HeaderBackend + UsageProvider, + F: Fn(B::Header) -> Result, + { + let info = client.info(); + + // looks like this isn't the first block, rehydrate the fake time. + // otherwise we'd be producing blocks for older slots. + let time = if info.best_number != Zero::zero() { + let header = client + .header(BlockId::Hash(info.best_hash))? + .ok_or_else(|| "best header not found in the db!".to_string())?; + let slot = func(header)?; + // add the slot duration so there's no collision of slots + (slot * slot_duration.as_millis() as u64) + slot_duration.as_millis() as u64 + } else { + // this is the first block, use the correct time. + let now = SystemTime::now(); + now.duration_since(SystemTime::UNIX_EPOCH) + .map_err(|err| Error::StringError(format!("{}", err)))? + .as_millis() as u64 + }; + + Ok(time) + } + + /// Get the current slot number + pub fn slot(&self) -> Slot { + Slot::from_timestamp( + self.unix_millis.load(atomic::Ordering::SeqCst).into(), + self.slot_duration, + ) + } + + /// Gets the current time stamp. + pub fn timestamp(&self) -> sp_timestamp::Timestamp { + sp_timestamp::Timestamp::new(self.unix_millis.load(atomic::Ordering::SeqCst)) + } +} + +#[async_trait::async_trait] +impl InherentDataProvider for SlotTimestampProvider { + fn provide_inherent_data( + &self, + inherent_data: &mut InherentData, + ) -> Result<(), sp_inherents::Error> { + // we update the time here. + let new_time: InherentType = self + .unix_millis + .fetch_add(self.slot_duration.as_millis() as u64, atomic::Ordering::SeqCst) + .into(); + inherent_data.put_data(INHERENT_IDENTIFIER, &new_time)?; + Ok(()) + } + + async fn try_handle_error( + &self, + _: &InherentIdentifier, + _: &[u8], + ) -> Option> { + None + } +} diff --git a/node/consensus-transition/manual-seal/src/error.rs b/node/consensus-transition/manual-seal/src/error.rs new file mode 100644 index 00000000..7c321120 --- /dev/null +++ b/node/consensus-transition/manual-seal/src/error.rs @@ -0,0 +1,113 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! A manual sealing engine: the engine listens for rpc calls to seal blocks and create forks. +//! This is suitable for a testing environment. + +use futures::channel::{mpsc::SendError, oneshot}; +use sc_consensus::ImportResult; +use sp_blockchain::Error as BlockchainError; +use sp_consensus::Error as ConsensusError; +use sp_inherents::Error as InherentsError; + +/// Error code for rpc +mod codes { + pub const SERVER_SHUTTING_DOWN: i64 = 10_000; + pub const BLOCK_IMPORT_FAILED: i64 = 11_000; + pub const EMPTY_TRANSACTION_POOL: i64 = 12_000; + pub const BLOCK_NOT_FOUND: i64 = 13_000; + pub const CONSENSUS_ERROR: i64 = 14_000; + pub const INHERENTS_ERROR: i64 = 15_000; + pub const BLOCKCHAIN_ERROR: i64 = 16_000; + pub const UNKNOWN_ERROR: i64 = 20_000; +} + +/// errors encountered by background block authorship task +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// An error occurred while importing the block + #[error("Block import failed: {0:?}")] + BlockImportError(ImportResult), + /// Transaction pool is empty, cannot create a block + #[error( + "Transaction pool is empty, set create_empty to true, if you want to create empty blocks" + )] + EmptyTransactionPool, + /// encountered during creation of Proposer. + #[error("Consensus Error: {0}")] + ConsensusError(#[from] ConsensusError), + /// Failed to create Inherents data + #[error("Inherents Error: {0}")] + InherentError(#[from] InherentsError), + /// error encountered during finalization + #[error("Finalization Error: {0}")] + BlockchainError(#[from] BlockchainError), + /// Supplied parent_hash doesn't exist in chain + #[error("Supplied parent_hash: {0} doesn't exist in chain")] + BlockNotFound(String), + /// Some string error + #[error("{0}")] + StringError(String), + /// send error + #[error("Consensus process is terminating")] + Canceled(#[from] oneshot::Canceled), + /// send error + #[error("Consensus process is terminating")] + SendError(#[from] SendError), + /// Some other error. + #[error("Other error: {0}")] + Other(#[from] Box), +} + +impl From for Error { + fn from(err: ImportResult) -> Self { + Error::BlockImportError(err) + } +} + +impl From for Error { + fn from(s: String) -> Self { + Error::StringError(s) + } +} + +impl Error { + fn to_code(&self) -> i64 { + use Error::*; + match self { + BlockImportError(_) => codes::BLOCK_IMPORT_FAILED, + BlockNotFound(_) => codes::BLOCK_NOT_FOUND, + EmptyTransactionPool => codes::EMPTY_TRANSACTION_POOL, + ConsensusError(_) => codes::CONSENSUS_ERROR, + InherentError(_) => codes::INHERENTS_ERROR, + BlockchainError(_) => codes::BLOCKCHAIN_ERROR, + SendError(_) | Canceled(_) => codes::SERVER_SHUTTING_DOWN, + _ => codes::UNKNOWN_ERROR, + } + } +} + +impl From for jsonrpc_core::Error { + fn from(error: Error) -> Self { + jsonrpc_core::Error { + code: jsonrpc_core::ErrorCode::ServerError(error.to_code()), + message: format!("{}", error), + data: None, + } + } +} diff --git a/node/consensus-transition/manual-seal/src/finalize_block.rs b/node/consensus-transition/manual-seal/src/finalize_block.rs new file mode 100644 index 00000000..d134ce77 --- /dev/null +++ b/node/consensus-transition/manual-seal/src/finalize_block.rs @@ -0,0 +1,59 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Block finalization utilities + +use crate::rpc; +use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT, Justification}; +use std::{marker::PhantomData, sync::Arc}; + +/// params for block finalization. +pub struct FinalizeBlockParams { + /// hash of the block + pub hash: ::Hash, + /// sender to report errors/success to the rpc. + pub sender: rpc::Sender<()>, + /// finalization justification + pub justification: Option, + /// Finalizer trait object. + pub finalizer: Arc, + /// phantom type to pin the Backend type + pub _phantom: PhantomData, +} + +/// finalizes a block in the backend with the given params. +pub async fn finalize_block(params: FinalizeBlockParams) +where + B: BlockT, + F: Finalizer, + CB: ClientBackend, +{ + let FinalizeBlockParams { hash, mut sender, justification, finalizer, .. } = params; + + match finalizer.finalize_block(BlockId::Hash(hash), justification, true) { + Err(e) => { + log::warn!("Failed to finalize block {}", e); + rpc::send_result(&mut sender, Err(e.into())) + }, + Ok(()) => { + log::info!("✅ Successfully finalized block: {}", hash); + rpc::send_result(&mut sender, Ok(())) + }, + } +} diff --git a/node/consensus-transition/manual-seal/src/lib.rs b/node/consensus-transition/manual-seal/src/lib.rs new file mode 100644 index 00000000..a8d2634a --- /dev/null +++ b/node/consensus-transition/manual-seal/src/lib.rs @@ -0,0 +1,601 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! A manual sealing engine: the engine listens for rpc calls to seal blocks and create forks. +//! This is suitable for a testing environment. + +use futures::prelude::*; +use prometheus_endpoint::Registry; +use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; +use sc_consensus::{ + block_import::{BlockImport, BlockImportParams, ForkChoiceStrategy}, + import_queue::{BasicQueue, BoxBlockImport, Verifier}, +}; +use sp_blockchain::HeaderBackend; +use sp_consensus::{CacheKeyId, Environment, Proposer, SelectChain}; +use sp_inherents::CreateInherentDataProviders; +use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; +use std::{marker::PhantomData, sync::Arc}; + +mod error; +mod finalize_block; +mod seal_block; + +pub mod consensus; +pub mod rpc; + +pub use self::{ + consensus::ConsensusDataProvider, + error::Error, + finalize_block::{finalize_block, FinalizeBlockParams}, + rpc::{CreatedBlock, EngineCommand}, + seal_block::{seal_block, SealBlockParams, MAX_PROPOSAL_DURATION}, +}; +use sc_transaction_pool_api::TransactionPool; +use sp_api::{ProvideRuntimeApi, TransactionFor}; + +/// The `ConsensusEngineId` of Manual Seal. +pub const MANUAL_SEAL_ENGINE_ID: ConsensusEngineId = [b'm', b'a', b'n', b'l']; + +/// The verifier for the manual seal engine; instantly finalizes. +struct ManualSealVerifier; + +#[async_trait::async_trait] +impl Verifier for ManualSealVerifier { + async fn verify( + &mut self, + mut block: BlockImportParams, + ) -> Result<(BlockImportParams, Option)>>), String> { + block.finalized = false; + block.fork_choice = Some(ForkChoiceStrategy::LongestChain); + Ok((block, None)) + } +} + +/// Instantiate the import queue for the manual seal consensus engine. +pub fn import_queue( + block_import: BoxBlockImport, + spawner: &impl sp_core::traits::SpawnEssentialNamed, + registry: Option<&Registry>, +) -> BasicQueue +where + Block: BlockT, + Transaction: Send + Sync + 'static, +{ + BasicQueue::new(ManualSealVerifier, block_import, None, spawner, registry) +} + +/// Params required to start the instant sealing authorship task. +pub struct ManualSealParams, TP, SC, CS, CIDP> { + /// Block import instance for well. importing blocks. + pub block_import: BI, + + /// The environment we are producing blocks for. + pub env: E, + + /// Client instance + pub client: Arc, + + /// Shared reference to the transaction pool. + pub pool: Arc, + + /// Stream, Basically the receiving end of a channel for sending + /// commands to the authorship task. + pub commands_stream: CS, + + /// SelectChain strategy. + pub select_chain: SC, + + /// Digest provider for inclusion in blocks. + pub consensus_data_provider: + Option>>>, + + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, +} + +/// Params required to start the manual sealing authorship task. +pub struct InstantSealParams, TP, SC, CIDP> { + /// Block import instance for well. importing blocks. + pub block_import: BI, + + /// The environment we are producing blocks for. + pub env: E, + + /// Client instance + pub client: Arc, + + /// Shared reference to the transaction pool. + pub pool: Arc, + + /// SelectChain strategy. + pub select_chain: SC, + + /// Digest provider for inclusion in blocks. + pub consensus_data_provider: + Option>>>, + + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, +} + +/// Creates the background authorship task for the manual seal engine. +pub async fn run_manual_seal( + ManualSealParams { + mut block_import, + mut env, + client, + pool, + mut commands_stream, + select_chain, + consensus_data_provider, + create_inherent_data_providers, + }: ManualSealParams, +) where + B: BlockT + 'static, + BI: BlockImport> + + Send + + Sync + + 'static, + C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, + CB: ClientBackend + 'static, + E: Environment + 'static, + E::Proposer: Proposer>, + CS: Stream::Hash>> + Unpin + 'static, + SC: SelectChain + 'static, + TransactionFor: 'static, + TP: TransactionPool, + CIDP: CreateInherentDataProviders, +{ + while let Some(command) = commands_stream.next().await { + match command { + EngineCommand::SealNewBlock { create_empty, finalize, parent_hash, sender } => { + seal_block(SealBlockParams { + sender, + parent_hash, + finalize, + create_empty, + env: &mut env, + select_chain: &select_chain, + block_import: &mut block_import, + consensus_data_provider: consensus_data_provider.as_deref(), + pool: pool.clone(), + client: client.clone(), + create_inherent_data_providers: &create_inherent_data_providers, + }) + .await; + }, + EngineCommand::FinalizeBlock { hash, sender, justification } => { + let justification = justification.map(|j| (MANUAL_SEAL_ENGINE_ID, j)); + finalize_block(FinalizeBlockParams { + hash, + sender, + justification, + finalizer: client.clone(), + _phantom: PhantomData, + }) + .await + }, + } + } +} + +/// runs the background authorship task for the instant seal engine. +/// instant-seal creates a new block for every transaction imported into +/// the transaction pool. +pub async fn run_instant_seal( + InstantSealParams { + block_import, + env, + client, + pool, + select_chain, + consensus_data_provider, + create_inherent_data_providers, + }: InstantSealParams, +) where + B: BlockT + 'static, + BI: BlockImport> + + Send + + Sync + + 'static, + C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, + CB: ClientBackend + 'static, + E: Environment + 'static, + E::Proposer: Proposer>, + SC: SelectChain + 'static, + TransactionFor: 'static, + TP: TransactionPool, + CIDP: CreateInherentDataProviders, +{ + // instant-seal creates blocks as soon as transactions are imported + // into the transaction pool. + let commands_stream = pool.import_notification_stream().map(|_| EngineCommand::SealNewBlock { + create_empty: false, + finalize: false, + parent_hash: None, + sender: None, + }); + + run_manual_seal(ManualSealParams { + block_import, + env, + client, + pool, + commands_stream, + select_chain, + consensus_data_provider, + create_inherent_data_providers, + }) + .await +} + +#[cfg(test)] +mod tests { + use super::*; + use sc_basic_authorship::ProposerFactory; + use sc_client_api::BlockBackend; + use sc_consensus::ImportedAux; + use sc_transaction_pool::{BasicPool, Options, RevalidationType}; + use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionSource}; + use sp_inherents::InherentData; + use sp_runtime::generic::{BlockId, Digest, DigestItem}; + use substrate_test_runtime_client::{ + AccountKeyring::*, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, + }; + use substrate_test_runtime_transaction_pool::{uxt, TestApi}; + + fn api() -> Arc { + Arc::new(TestApi::empty()) + } + + const SOURCE: TransactionSource = TransactionSource::External; + + struct TestDigestProvider { + _client: Arc, + } + impl ConsensusDataProvider for TestDigestProvider + where + B: BlockT, + C: ProvideRuntimeApi + Send + Sync, + { + type Transaction = TransactionFor; + + fn create_digest( + &self, + _parent: &B::Header, + _inherents: &InherentData, + ) -> Result { + Ok(Digest { logs: vec![] }) + } + + fn append_block_import( + &self, + _parent: &B::Header, + params: &mut BlockImportParams, + _inherents: &InherentData, + ) -> Result<(), Error> { + params.post_digests.push(DigestItem::Other(vec![1])); + Ok(()) + } + } + + #[tokio::test] + async fn instant_seal() { + let builder = TestClientBuilder::new(); + let (client, select_chain) = builder.build_with_longest_chain(); + let client = Arc::new(client); + let spawner = sp_core::testing::TaskExecutor::new(); + let pool = Arc::new(BasicPool::with_revalidation_type( + Options::default(), + true.into(), + api(), + None, + RevalidationType::Full, + spawner.clone(), + 0, + )); + let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); + // this test checks that blocks are created as soon as transactions are imported into the + // pool. + let (sender, receiver) = futures::channel::oneshot::channel(); + let mut sender = Arc::new(Some(sender)); + let commands_stream = + pool.pool().validated_pool().import_notification_stream().map(move |_| { + // we're only going to submit one tx so this fn will only be called once. + let mut_sender = Arc::get_mut(&mut sender).unwrap(); + let sender = std::mem::take(mut_sender); + EngineCommand::SealNewBlock { + create_empty: false, + finalize: true, + parent_hash: None, + sender, + } + }); + let future = run_manual_seal(ManualSealParams { + block_import: client.clone(), + env, + client: client.clone(), + pool: pool.clone(), + commands_stream, + select_chain, + create_inherent_data_providers: |_, _| async { Ok(()) }, + consensus_data_provider: None, + }); + std::thread::spawn(|| { + let rt = tokio::runtime::Runtime::new().unwrap(); + // spawn the background authorship task + rt.block_on(future); + }); + // submit a transaction to pool. + let result = pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)).await; + // assert that it was successfully imported + assert!(result.is_ok()); + // assert that the background task returns ok + let created_block = receiver.await.unwrap().unwrap(); + assert_eq!( + created_block, + CreatedBlock { + hash: created_block.hash.clone(), + aux: ImportedAux { + header_only: false, + clear_justification_requests: false, + needs_justification: false, + bad_justification: false, + is_new_best: true, + } + } + ); + // assert that there's a new block in the db. + assert!(client.header(&BlockId::Number(1)).unwrap().is_some()) + } + + #[tokio::test] + async fn manual_seal_and_finalization() { + let builder = TestClientBuilder::new(); + let (client, select_chain) = builder.build_with_longest_chain(); + let client = Arc::new(client); + let spawner = sp_core::testing::TaskExecutor::new(); + let pool = Arc::new(BasicPool::with_revalidation_type( + Options::default(), + true.into(), + api(), + None, + RevalidationType::Full, + spawner.clone(), + 0, + )); + let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); + // this test checks that blocks are created as soon as an engine command is sent over the + // stream. + let (mut sink, commands_stream) = futures::channel::mpsc::channel(1024); + let future = run_manual_seal(ManualSealParams { + block_import: client.clone(), + env, + client: client.clone(), + pool: pool.clone(), + commands_stream, + select_chain, + consensus_data_provider: None, + create_inherent_data_providers: |_, _| async { Ok(()) }, + }); + std::thread::spawn(|| { + let rt = tokio::runtime::Runtime::new().unwrap(); + // spawn the background authorship task + rt.block_on(future); + }); + // submit a transaction to pool. + let result = pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)).await; + // assert that it was successfully imported + assert!(result.is_ok()); + let (tx, rx) = futures::channel::oneshot::channel(); + sink.send(EngineCommand::SealNewBlock { + parent_hash: None, + sender: Some(tx), + create_empty: false, + finalize: false, + }) + .await + .unwrap(); + let created_block = rx.await.unwrap().unwrap(); + + // assert that the background task returns ok + assert_eq!( + created_block, + CreatedBlock { + hash: created_block.hash.clone(), + aux: ImportedAux { + header_only: false, + clear_justification_requests: false, + needs_justification: false, + bad_justification: false, + is_new_best: true, + } + } + ); + // assert that there's a new block in the db. + let header = client.header(&BlockId::Number(1)).unwrap().unwrap(); + let (tx, rx) = futures::channel::oneshot::channel(); + sink.send(EngineCommand::FinalizeBlock { + sender: Some(tx), + hash: header.hash(), + justification: None, + }) + .await + .unwrap(); + // check that the background task returns ok: + rx.await.unwrap().unwrap(); + } + + #[tokio::test] + async fn manual_seal_fork_blocks() { + let builder = TestClientBuilder::new(); + let (client, select_chain) = builder.build_with_longest_chain(); + let client = Arc::new(client); + let pool_api = api(); + let spawner = sp_core::testing::TaskExecutor::new(); + let pool = Arc::new(BasicPool::with_revalidation_type( + Options::default(), + true.into(), + pool_api.clone(), + None, + RevalidationType::Full, + spawner.clone(), + 0, + )); + let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); + // this test checks that blocks are created as soon as an engine command is sent over the + // stream. + let (mut sink, commands_stream) = futures::channel::mpsc::channel(1024); + let future = run_manual_seal(ManualSealParams { + block_import: client.clone(), + env, + client: client.clone(), + pool: pool.clone(), + commands_stream, + select_chain, + consensus_data_provider: None, + create_inherent_data_providers: |_, _| async { Ok(()) }, + }); + std::thread::spawn(|| { + let rt = tokio::runtime::Runtime::new().unwrap(); + // spawn the background authorship task + rt.block_on(future); + }); + // submit a transaction to pool. + let result = pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)).await; + // assert that it was successfully imported + assert!(result.is_ok()); + + let (tx, rx) = futures::channel::oneshot::channel(); + sink.send(EngineCommand::SealNewBlock { + parent_hash: None, + sender: Some(tx), + create_empty: false, + finalize: false, + }) + .await + .unwrap(); + let created_block = rx.await.unwrap().unwrap(); + pool_api.increment_nonce(Alice.into()); + + // assert that the background task returns ok + assert_eq!( + created_block, + CreatedBlock { + hash: created_block.hash.clone(), + aux: ImportedAux { + header_only: false, + clear_justification_requests: false, + needs_justification: false, + bad_justification: false, + is_new_best: true + } + } + ); + let block = client.block(&BlockId::Number(1)).unwrap().unwrap().block; + pool_api.add_block(block, true); + assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Alice, 1)).await.is_ok()); + + let header = client.header(&BlockId::Number(1)).expect("db error").expect("imported above"); + pool.maintain(sc_transaction_pool_api::ChainEvent::NewBestBlock { + hash: header.hash(), + tree_route: None, + }) + .await; + + let (tx1, rx1) = futures::channel::oneshot::channel(); + assert!(sink + .send(EngineCommand::SealNewBlock { + parent_hash: Some(created_block.hash), + sender: Some(tx1), + create_empty: false, + finalize: false, + }) + .await + .is_ok()); + assert_matches::assert_matches!(rx1.await.expect("should be no error receiving"), Ok(_)); + let block = client.block(&BlockId::Number(2)).unwrap().unwrap().block; + pool_api.add_block(block, true); + pool_api.increment_nonce(Alice.into()); + + assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Bob, 0)).await.is_ok()); + let (tx2, rx2) = futures::channel::oneshot::channel(); + assert!(sink + .send(EngineCommand::SealNewBlock { + parent_hash: Some(created_block.hash), + sender: Some(tx2), + create_empty: false, + finalize: false, + }) + .await + .is_ok()); + let imported = rx2.await.unwrap().unwrap(); + // assert that fork block is in the db + assert!(client.header(&BlockId::Hash(imported.hash)).unwrap().is_some()) + } + + #[tokio::test] + async fn manual_seal_post_hash() { + let builder = TestClientBuilder::new(); + let (client, select_chain) = builder.build_with_longest_chain(); + let client = Arc::new(client); + let spawner = sp_core::testing::TaskExecutor::new(); + let pool = Arc::new(BasicPool::with_revalidation_type( + Options::default(), + true.into(), + api(), + None, + RevalidationType::Full, + spawner.clone(), + 0, + )); + let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); + + let (mut sink, commands_stream) = futures::channel::mpsc::channel(1024); + let future = run_manual_seal(ManualSealParams { + block_import: client.clone(), + env, + client: client.clone(), + pool: pool.clone(), + commands_stream, + select_chain, + // use a provider that pushes some post digest data + consensus_data_provider: Some(Box::new(TestDigestProvider { _client: client.clone() })), + create_inherent_data_providers: |_, _| async { Ok(()) }, + }); + std::thread::spawn(|| { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(future); + }); + let (tx, rx) = futures::channel::oneshot::channel(); + sink.send(EngineCommand::SealNewBlock { + parent_hash: None, + sender: Some(tx), + create_empty: true, + finalize: false, + }) + .await + .unwrap(); + let created_block = rx.await.unwrap().unwrap(); + + // assert that the background task returned the actual header hash + let header = client.header(&BlockId::Number(1)).unwrap().unwrap(); + assert_eq!(header.hash(), created_block.hash); + } +} diff --git a/node/consensus-transition/manual-seal/src/rpc.rs b/node/consensus-transition/manual-seal/src/rpc.rs new file mode 100644 index 00000000..4a8dcbc0 --- /dev/null +++ b/node/consensus-transition/manual-seal/src/rpc.rs @@ -0,0 +1,170 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! RPC interface for the `ManualSeal` Engine. + +pub use self::gen_client::Client as ManualSealClient; +use futures::{ + channel::{mpsc, oneshot}, + FutureExt, SinkExt, TryFutureExt, +}; +use jsonrpc_core::Error; +use jsonrpc_derive::rpc; +use sc_consensus::ImportedAux; +use serde::{Deserialize, Serialize}; +use sp_runtime::EncodedJustification; + +/// Future's type for jsonrpc +type FutureResult = jsonrpc_core::BoxFuture>; +/// sender passed to the authorship task to report errors or successes. +pub type Sender = Option>>; + +/// Message sent to the background authorship task, usually by RPC. +pub enum EngineCommand { + /// Tells the engine to propose a new block + /// + /// if create_empty == true, it will create empty blocks if there are no transactions + /// in the transaction pool. + /// + /// if finalize == true, the block will be instantly finalized. + SealNewBlock { + /// if true, empty blocks(without extrinsics) will be created. + /// otherwise, will return Error::EmptyTransactionPool. + create_empty: bool, + /// instantly finalize this block? + finalize: bool, + /// specify the parent hash of the about-to-created block + parent_hash: Option, + /// sender to report errors/success to the rpc. + sender: Sender>, + }, + /// Tells the engine to finalize the block with the supplied hash + FinalizeBlock { + /// hash of the block + hash: Hash, + /// sender to report errors/success to the rpc. + sender: Sender<()>, + /// finalization justification + justification: Option, + }, +} + +/// RPC trait that provides methods for interacting with the manual-seal authorship task over rpc. +#[rpc] +pub trait ManualSealApi { + /// Instructs the manual-seal authorship task to create a new block + #[rpc(name = "engine_createBlock")] + fn create_block( + &self, + create_empty: bool, + finalize: bool, + parent_hash: Option, + ) -> FutureResult>; + + /// Instructs the manual-seal authorship task to finalize a block + #[rpc(name = "engine_finalizeBlock")] + fn finalize_block( + &self, + hash: Hash, + justification: Option, + ) -> FutureResult; +} + +/// A struct that implements the [`ManualSealApi`]. +pub struct ManualSeal { + import_block_channel: mpsc::Sender>, +} + +/// return type of `engine_createBlock` +#[derive(Debug, Deserialize, Serialize, PartialEq, Eq)] +pub struct CreatedBlock { + /// hash of the created block. + pub hash: Hash, + /// some extra details about the import operation + pub aux: ImportedAux, +} + +impl ManualSeal { + /// Create new `ManualSeal` with the given reference to the client. + pub fn new(import_block_channel: mpsc::Sender>) -> Self { + Self { import_block_channel } + } +} + +impl ManualSealApi for ManualSeal { + fn create_block( + &self, + create_empty: bool, + finalize: bool, + parent_hash: Option, + ) -> FutureResult> { + let mut sink = self.import_block_channel.clone(); + async move { + let (sender, receiver) = oneshot::channel(); + let command = EngineCommand::SealNewBlock { + create_empty, + finalize, + parent_hash, + sender: Some(sender), + }; + sink.send(command).await?; + receiver.await? + } + .map_err(Error::from) + .boxed() + } + + fn finalize_block( + &self, + hash: Hash, + justification: Option, + ) -> FutureResult { + let mut sink = self.import_block_channel.clone(); + async move { + let (sender, receiver) = oneshot::channel(); + sink.send(EngineCommand::FinalizeBlock { hash, sender: Some(sender), justification }) + .await?; + + receiver.await?.map(|_| true) + } + .map_err(Error::from) + .boxed() + } +} + +/// report any errors or successes encountered by the authorship task back +/// to the rpc +pub fn send_result( + sender: &mut Sender, + result: std::result::Result, +) { + if let Some(sender) = sender.take() { + if let Err(err) = sender.send(result) { + match err { + Ok(value) => log::warn!("Server is shutting down: {:?}", value), + Err(error) => log::warn!("Server is shutting down with error: {}", error), + } + } + } else { + // instant seal doesn't report errors over rpc, simply log them. + match result { + Ok(r) => log::info!("Instant Seal success: {:?}", r), + Err(e) => log::error!("Instant Seal encountered an error: {}", e), + } + } +} diff --git a/node/consensus-transition/manual-seal/src/seal_block.rs b/node/consensus-transition/manual-seal/src/seal_block.rs new file mode 100644 index 00000000..202b54fe --- /dev/null +++ b/node/consensus-transition/manual-seal/src/seal_block.rs @@ -0,0 +1,166 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Block sealing utilities + +use crate::{rpc, ConsensusDataProvider, CreatedBlock, Error}; +use futures::prelude::*; +use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction}; +use sc_transaction_pool_api::TransactionPool; +use sp_api::{ProvideRuntimeApi, TransactionFor}; +use sp_blockchain::HeaderBackend; +use sp_consensus::{self, BlockOrigin, Environment, Proposer, SelectChain}; +use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT}, +}; +use std::{collections::HashMap, sync::Arc, time::Duration}; + +/// max duration for creating a proposal in secs +pub const MAX_PROPOSAL_DURATION: u64 = 10; + +/// params for sealing a new block +pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP, CIDP> { + /// if true, empty blocks(without extrinsics) will be created. + /// otherwise, will return Error::EmptyTransactionPool. + pub create_empty: bool, + /// instantly finalize this block? + pub finalize: bool, + /// specify the parent hash of the about-to-created block + pub parent_hash: Option<::Hash>, + /// sender to report errors/success to the rpc. + pub sender: rpc::Sender::Hash>>, + /// transaction pool + pub pool: Arc, + /// header backend + pub client: Arc, + /// Environment trait object for creating a proposer + pub env: &'a mut E, + /// SelectChain object + pub select_chain: &'a SC, + /// Digest provider for inclusion in blocks. + pub consensus_data_provider: + Option<&'a dyn ConsensusDataProvider>>, + /// block import object + pub block_import: &'a mut BI, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: &'a CIDP, +} + +/// seals a new block with the given params +pub async fn seal_block( + SealBlockParams { + create_empty, + finalize, + pool, + parent_hash, + client, + select_chain, + block_import, + env, + create_inherent_data_providers, + consensus_data_provider: digest_provider, + mut sender, + }: SealBlockParams<'_, B, BI, SC, C, E, TP, CIDP>, +) where + B: BlockT, + BI: BlockImport> + + Send + + Sync + + 'static, + C: HeaderBackend + ProvideRuntimeApi, + E: Environment, + E::Proposer: Proposer>, + TP: TransactionPool, + SC: SelectChain, + TransactionFor: 'static, + CIDP: CreateInherentDataProviders, +{ + let future = async { + if pool.status().ready == 0 && !create_empty { + return Err(Error::EmptyTransactionPool) + } + + // get the header to build this new block on. + // use the parent_hash supplied via `EngineCommand` + // or fetch the best_block. + let parent = match parent_hash { + Some(hash) => client + .header(BlockId::Hash(hash))? + .ok_or_else(|| Error::BlockNotFound(format!("{}", hash)))?, + None => select_chain.best_chain().await?, + }; + + let inherent_data_providers = create_inherent_data_providers + .create_inherent_data_providers(parent.hash(), ()) + .await + .map_err(|e| Error::Other(e))?; + + let inherent_data = inherent_data_providers.create_inherent_data()?; + + let proposer = env.init(&parent).map_err(|err| Error::StringError(err.to_string())).await?; + let inherents_len = inherent_data.len(); + + let digest = if let Some(digest_provider) = digest_provider { + digest_provider.create_digest(&parent, &inherent_data)? + } else { + Default::default() + }; + + let proposal = proposer + .propose( + inherent_data.clone(), + digest, + Duration::from_secs(MAX_PROPOSAL_DURATION), + None, + ) + .map_err(|err| Error::StringError(err.to_string())) + .await?; + + if proposal.block.extrinsics().len() == inherents_len && !create_empty { + return Err(Error::EmptyTransactionPool) + } + + let (header, body) = proposal.block.deconstruct(); + let mut params = BlockImportParams::new(BlockOrigin::Own, header.clone()); + params.body = Some(body); + params.finalized = finalize; + params.fork_choice = Some(ForkChoiceStrategy::LongestChain); + params.state_action = StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes( + proposal.storage_changes, + )); + + if let Some(digest_provider) = digest_provider { + digest_provider.append_block_import(&parent, &mut params, &inherent_data)?; + } + + // Make sure we return the same post-hash that will be calculated when importing the block + // This is important in case the digest_provider added any signature, seal, ect. + let mut post_header = header.clone(); + post_header.digest_mut().logs.extend(params.post_digests.iter().cloned()); + + match block_import.import_block(params, HashMap::new()).await? { + ImportResult::Imported(aux) => + Ok(CreatedBlock { hash: ::Header::hash(&post_header), aux }), + other => Err(other.into()), + } + }; + + rpc::send_result(&mut sender, future.await) +} diff --git a/node/rpc/Cargo.toml b/node/rpc/Cargo.toml index 9454cfe0..074c5a37 100644 --- a/node/rpc/Cargo.toml +++ b/node/rpc/Cargo.toml @@ -15,8 +15,8 @@ pallet-contracts-rpc = { git = "https://github.com/paritytech/substrate", branch pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } -sc-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } -sc-consensus-manual-seal = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } +sc-consensus-aura = { path = "../consensus-transition/aura", default-features = false } +sc-consensus-manual-seal = { path = "../consensus-transition/manual-seal", default-features = false } sc-consensus-epochs = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } sc-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } sc-finality-grandpa-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.19", default-features = false } From 815b51cc901d7226ad3ae2f9fef1227828391699 Mon Sep 17 00:00:00 2001 From: remz <> Date: Fri, 14 Oct 2022 22:02:50 +0200 Subject: [PATCH 2/3] Amended the testnet block trigger to span two eras with the old consensus before switching to the new one. --- node/cli/src/service.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index a1d2f308..56df0389 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -265,7 +265,7 @@ pub fn new_partial( check_for_equivocation: Default::default(), telemetry: telemetry.as_ref().map(|x| x.handle()), #[cfg(feature = "beresheet-runtime")] - compatibility_mode: CompatibilityMode::UseInitializeBlock { until: BlockNumber::from(1888u32) }, + compatibility_mode: CompatibilityMode::UseInitializeBlock { until: BlockNumber::from(8888u32) }, #[cfg(not(feature = "beresheet-runtime"))] compatibility_mode: CompatibilityMode::UseInitializeBlock { until: BlockNumber::from(14_555_555u32) }, } @@ -554,7 +554,7 @@ pub fn new_full_base(mut config: Configuration, max_block_proposal_slot_portion: None, telemetry: telemetry.as_ref().map(|x| x.handle()), #[cfg(feature = "beresheet-runtime")] - compatibility_mode: CompatibilityMode::UseInitializeBlock { until: BlockNumber::from(1888u32) }, + compatibility_mode: CompatibilityMode::UseInitializeBlock { until: BlockNumber::from(8888u32) }, #[cfg(not(feature = "beresheet-runtime"))] compatibility_mode: CompatibilityMode::UseInitializeBlock { until: BlockNumber::from(14_555_555u32) }, }, From 36e20f5f9e3e35ae29629b8a735e563b193cb834 Mon Sep 17 00:00:00 2001 From: remz <> Date: Mon, 17 Oct 2022 10:11:21 +0200 Subject: [PATCH 3/3] + Incremented the implementation version. --- node/cli/src/command.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/cli/src/command.rs b/node/cli/src/command.rs index 0b02772b..3aad764f 100644 --- a/node/cli/src/command.rs +++ b/node/cli/src/command.rs @@ -25,7 +25,7 @@ impl SubstrateCli for Cli { } fn impl_version() -> String { - "4.0.0".into() + "4.0.1".into() } fn description() -> String {